summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_middle
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_middle')
-rw-r--r--compiler/rustc_middle/Cargo.toml40
-rw-r--r--compiler/rustc_middle/README.md3
-rw-r--r--compiler/rustc_middle/benches/lib.rs54
-rw-r--r--compiler/rustc_middle/src/arena.rs108
-rw-r--r--compiler/rustc_middle/src/dep_graph/dep_node.rs435
-rw-r--r--compiler/rustc_middle/src/dep_graph/mod.rs140
-rw-r--r--compiler/rustc_middle/src/hir/map/mod.rs1405
-rw-r--r--compiler/rustc_middle/src/hir/mod.rs182
-rw-r--r--compiler/rustc_middle/src/hir/nested_filter.rs31
-rw-r--r--compiler/rustc_middle/src/hir/place.rs117
-rw-r--r--compiler/rustc_middle/src/infer/canonical.rs363
-rw-r--r--compiler/rustc_middle/src/infer/mod.rs32
-rw-r--r--compiler/rustc_middle/src/infer/unify_key.rs162
-rw-r--r--compiler/rustc_middle/src/lib.rs106
-rw-r--r--compiler/rustc_middle/src/lint.rs443
-rw-r--r--compiler/rustc_middle/src/macros.rs232
-rw-r--r--compiler/rustc_middle/src/metadata.rs26
-rw-r--r--compiler/rustc_middle/src/middle/codegen_fn_attrs.rs146
-rw-r--r--compiler/rustc_middle/src/middle/dependency_format.rs28
-rw-r--r--compiler/rustc_middle/src/middle/exported_symbols.rs72
-rw-r--r--compiler/rustc_middle/src/middle/lang_items.rs61
-rw-r--r--compiler/rustc_middle/src/middle/limits.rs85
-rw-r--r--compiler/rustc_middle/src/middle/mod.rs37
-rw-r--r--compiler/rustc_middle/src/middle/privacy.rs64
-rw-r--r--compiler/rustc_middle/src/middle/region.rs443
-rw-r--r--compiler/rustc_middle/src/middle/resolve_lifetime.rs54
-rw-r--r--compiler/rustc_middle/src/middle/stability.rs591
-rw-r--r--compiler/rustc_middle/src/mir/basic_blocks.rs147
-rw-r--r--compiler/rustc_middle/src/mir/coverage.rs186
-rw-r--r--compiler/rustc_middle/src/mir/generic_graph.rs69
-rw-r--r--compiler/rustc_middle/src/mir/generic_graphviz.rs173
-rw-r--r--compiler/rustc_middle/src/mir/graph_cyclic_cache.rs63
-rw-r--r--compiler/rustc_middle/src/mir/graphviz.rs134
-rw-r--r--compiler/rustc_middle/src/mir/interpret/allocation.rs1300
-rw-r--r--compiler/rustc_middle/src/mir/interpret/error.rs551
-rw-r--r--compiler/rustc_middle/src/mir/interpret/mod.rs633
-rw-r--r--compiler/rustc_middle/src/mir/interpret/pointer.rs307
-rw-r--r--compiler/rustc_middle/src/mir/interpret/queries.rs217
-rw-r--r--compiler/rustc_middle/src/mir/interpret/value.rs651
-rw-r--r--compiler/rustc_middle/src/mir/mod.rs2900
-rw-r--r--compiler/rustc_middle/src/mir/mono.rs527
-rw-r--r--compiler/rustc_middle/src/mir/patch.rs196
-rw-r--r--compiler/rustc_middle/src/mir/predecessors.rs78
-rw-r--r--compiler/rustc_middle/src/mir/pretty.rs1067
-rw-r--r--compiler/rustc_middle/src/mir/query.rs476
-rw-r--r--compiler/rustc_middle/src/mir/spanview.rs691
-rw-r--r--compiler/rustc_middle/src/mir/switch_sources.rs78
-rw-r--r--compiler/rustc_middle/src/mir/syntax.rs1168
-rw-r--r--compiler/rustc_middle/src/mir/tcx.rs307
-rw-r--r--compiler/rustc_middle/src/mir/terminator.rs448
-rw-r--r--compiler/rustc_middle/src/mir/traversal.rs388
-rw-r--r--compiler/rustc_middle/src/mir/type_foldable.rs240
-rw-r--r--compiler/rustc_middle/src/mir/type_visitable.rs190
-rw-r--r--compiler/rustc_middle/src/mir/visit.rs1330
-rw-r--r--compiler/rustc_middle/src/query/mod.rs2060
-rw-r--r--compiler/rustc_middle/src/tests.rs13
-rw-r--r--compiler/rustc_middle/src/thir.rs821
-rw-r--r--compiler/rustc_middle/src/thir/visit.rs244
-rw-r--r--compiler/rustc_middle/src/traits/chalk.rs403
-rw-r--r--compiler/rustc_middle/src/traits/mod.rs1026
-rw-r--r--compiler/rustc_middle/src/traits/query.rs230
-rw-r--r--compiler/rustc_middle/src/traits/select.rs312
-rw-r--r--compiler/rustc_middle/src/traits/specialization_graph.rs261
-rw-r--r--compiler/rustc_middle/src/traits/structural_impls.rs135
-rw-r--r--compiler/rustc_middle/src/traits/util.rs49
-rw-r--r--compiler/rustc_middle/src/ty/_match.rs124
-rw-r--r--compiler/rustc_middle/src/ty/abstract_const.rs194
-rw-r--r--compiler/rustc_middle/src/ty/adjustment.rs198
-rw-r--r--compiler/rustc_middle/src/ty/adt.rs569
-rw-r--r--compiler/rustc_middle/src/ty/assoc.rs195
-rw-r--r--compiler/rustc_middle/src/ty/binding.rs22
-rw-r--r--compiler/rustc_middle/src/ty/cast.rs73
-rw-r--r--compiler/rustc_middle/src/ty/closure.rs454
-rw-r--r--compiler/rustc_middle/src/ty/codec.rs527
-rw-r--r--compiler/rustc_middle/src/ty/consts.rs326
-rw-r--r--compiler/rustc_middle/src/ty/consts/int.rs483
-rw-r--r--compiler/rustc_middle/src/ty/consts/kind.rs239
-rw-r--r--compiler/rustc_middle/src/ty/consts/valtree.rs104
-rw-r--r--compiler/rustc_middle/src/ty/context.rs3018
-rw-r--r--compiler/rustc_middle/src/ty/diagnostics.rs501
-rw-r--r--compiler/rustc_middle/src/ty/erase_regions.rs74
-rw-r--r--compiler/rustc_middle/src/ty/error.rs965
-rw-r--r--compiler/rustc_middle/src/ty/fast_reject.rs405
-rw-r--r--compiler/rustc_middle/src/ty/flags.rs342
-rw-r--r--compiler/rustc_middle/src/ty/fold.rs797
-rw-r--r--compiler/rustc_middle/src/ty/generics.rs349
-rw-r--r--compiler/rustc_middle/src/ty/impls_ty.rs135
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs145
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/mod.rs234
-rw-r--r--compiler/rustc_middle/src/ty/instance.rs746
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs3504
-rw-r--r--compiler/rustc_middle/src/ty/list.rs215
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs2518
-rw-r--r--compiler/rustc_middle/src/ty/normalize_erasing_regions.rs283
-rw-r--r--compiler/rustc_middle/src/ty/parameterized.rs119
-rw-r--r--compiler/rustc_middle/src/ty/print/mod.rs327
-rw-r--r--compiler/rustc_middle/src/ty/print/pretty.rs2789
-rw-r--r--compiler/rustc_middle/src/ty/query.rs386
-rw-r--r--compiler/rustc_middle/src/ty/relate.rs841
-rw-r--r--compiler/rustc_middle/src/ty/rvalue_scopes.rs57
-rw-r--r--compiler/rustc_middle/src/ty/structural_impls.rs1304
-rw-r--r--compiler/rustc_middle/src/ty/sty.rs2295
-rw-r--r--compiler/rustc_middle/src/ty/subst.rs785
-rw-r--r--compiler/rustc_middle/src/ty/trait_def.rs272
-rw-r--r--compiler/rustc_middle/src/ty/util.rs1294
-rw-r--r--compiler/rustc_middle/src/ty/visit.rs745
-rw-r--r--compiler/rustc_middle/src/ty/vtable.rs117
-rw-r--r--compiler/rustc_middle/src/ty/walk.rs207
-rw-r--r--compiler/rustc_middle/src/util/bug.rs54
-rw-r--r--compiler/rustc_middle/src/util/common.rs67
-rw-r--r--compiler/rustc_middle/src/util/common/tests.rs14
111 files changed, 54941 insertions, 0 deletions
diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml
new file mode 100644
index 000000000..008d2c709
--- /dev/null
+++ b/compiler/rustc_middle/Cargo.toml
@@ -0,0 +1,40 @@
+[package]
+name = "rustc_middle"
+version = "0.0.0"
+edition = "2021"
+
+[lib]
+doctest = false
+
+[dependencies]
+rustc_arena = { path = "../rustc_arena" }
+bitflags = "1.2.1"
+either = "1.5.0"
+gsgdt = "0.1.2"
+tracing = "0.1"
+rustc-rayon = { version = "0.4.0", optional = true }
+rustc-rayon-core = { version = "0.4.0", optional = true }
+polonius-engine = "0.13.0"
+rustc_apfloat = { path = "../rustc_apfloat" }
+rustc_attr = { path = "../rustc_attr" }
+rustc_feature = { path = "../rustc_feature" }
+rustc_hir = { path = "../rustc_hir" }
+rustc_target = { path = "../rustc_target" }
+rustc_macros = { path = "../rustc_macros" }
+rustc_data_structures = { path = "../rustc_data_structures" }
+rustc_query_system = { path = "../rustc_query_system" }
+rustc_errors = { path = "../rustc_errors" }
+rustc_graphviz = { path = "../rustc_graphviz" }
+rustc_index = { path = "../rustc_index" }
+rustc_serialize = { path = "../rustc_serialize" }
+rustc_ast = { path = "../rustc_ast" }
+rustc_span = { path = "../rustc_span" }
+chalk-ir = "0.80.0"
+smallvec = { version = "1.8.1", features = ["union", "may_dangle"] }
+rustc_session = { path = "../rustc_session" }
+rustc_type_ir = { path = "../rustc_type_ir" }
+rand = "0.8.4"
+rand_xoshiro = "0.6.0"
+
+[features]
+rustc_use_parallel_compiler = ["rustc-rayon", "rustc-rayon-core"]
diff --git a/compiler/rustc_middle/README.md b/compiler/rustc_middle/README.md
new file mode 100644
index 000000000..de58f546c
--- /dev/null
+++ b/compiler/rustc_middle/README.md
@@ -0,0 +1,3 @@
+For more information about how rustc works, see the [rustc dev guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/
diff --git a/compiler/rustc_middle/benches/lib.rs b/compiler/rustc_middle/benches/lib.rs
new file mode 100644
index 000000000..237751bcb
--- /dev/null
+++ b/compiler/rustc_middle/benches/lib.rs
@@ -0,0 +1,54 @@
+#![feature(test)]
+
+extern crate test;
+
+use test::Bencher;
+
+// Static/dynamic method dispatch
+
+struct Struct {
+ field: isize,
+}
+
+trait Trait {
+ fn method(&self) -> isize;
+}
+
+impl Trait for Struct {
+ fn method(&self) -> isize {
+ self.field
+ }
+}
+
+#[bench]
+fn trait_vtable_method_call(b: &mut Bencher) {
+ let s = Struct { field: 10 };
+ let t = &s as &dyn Trait;
+ b.iter(|| t.method());
+}
+
+#[bench]
+fn trait_static_method_call(b: &mut Bencher) {
+ let s = Struct { field: 10 };
+ b.iter(|| s.method());
+}
+
+// Overhead of various match forms
+
+#[bench]
+fn option_some(b: &mut Bencher) {
+ let x = Some(10);
+ b.iter(|| match x {
+ Some(y) => y,
+ None => 11,
+ });
+}
+
+#[bench]
+fn vec_pattern(b: &mut Bencher) {
+ let x = [1, 2, 3, 4, 5, 6];
+ b.iter(|| match x {
+ [1, 2, 3, ..] => 10,
+ _ => 11,
+ });
+}
diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs
new file mode 100644
index 000000000..b94de537d
--- /dev/null
+++ b/compiler/rustc_middle/src/arena.rs
@@ -0,0 +1,108 @@
+/// This higher-order macro declares a list of types which can be allocated by `Arena`.
+///
+/// Specifying the `decode` modifier will add decode impls for `&T` and `&[T]` where `T` is the type
+/// listed. These impls will appear in the implement_ty_decoder! macro.
+#[macro_export]
+macro_rules! arena_types {
+ ($macro:path) => (
+ $macro!([
+ [] layout: rustc_target::abi::LayoutS<'tcx>,
+ [] fn_abi: rustc_target::abi::call::FnAbi<'tcx, rustc_middle::ty::Ty<'tcx>>,
+ // AdtDef are interned and compared by address
+ [decode] adt_def: rustc_middle::ty::AdtDefData,
+ [] steal_thir: rustc_data_structures::steal::Steal<rustc_middle::thir::Thir<'tcx>>,
+ [] steal_mir: rustc_data_structures::steal::Steal<rustc_middle::mir::Body<'tcx>>,
+ [decode] mir: rustc_middle::mir::Body<'tcx>,
+ [] steal_promoted:
+ rustc_data_structures::steal::Steal<
+ rustc_index::vec::IndexVec<
+ rustc_middle::mir::Promoted,
+ rustc_middle::mir::Body<'tcx>
+ >
+ >,
+ [decode] promoted:
+ rustc_index::vec::IndexVec<
+ rustc_middle::mir::Promoted,
+ rustc_middle::mir::Body<'tcx>
+ >,
+ [decode] typeck_results: rustc_middle::ty::TypeckResults<'tcx>,
+ [decode] borrowck_result:
+ rustc_middle::mir::BorrowCheckResult<'tcx>,
+ [decode] unsafety_check_result: rustc_middle::mir::UnsafetyCheckResult,
+ [decode] code_region: rustc_middle::mir::coverage::CodeRegion,
+ [] const_allocs: rustc_middle::mir::interpret::Allocation,
+ [] region_scope_tree: rustc_middle::middle::region::ScopeTree,
+ // Required for the incremental on-disk cache
+ [] mir_keys: rustc_hir::def_id::DefIdSet,
+ [] dropck_outlives:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx,
+ rustc_middle::traits::query::DropckOutlivesResult<'tcx>
+ >
+ >,
+ [] normalize_projection_ty:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx,
+ rustc_middle::traits::query::NormalizationResult<'tcx>
+ >
+ >,
+ [] implied_outlives_bounds:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx,
+ Vec<rustc_middle::traits::query::OutlivesBound<'tcx>>
+ >
+ >,
+ [] dtorck_constraint: rustc_middle::traits::query::DropckConstraint<'tcx>,
+ [] candidate_step: rustc_middle::traits::query::CandidateStep<'tcx>,
+ [] autoderef_bad_ty: rustc_middle::traits::query::MethodAutoderefBadTy<'tcx>,
+ [] query_region_constraints: rustc_middle::infer::canonical::QueryRegionConstraints<'tcx>,
+ [] type_op_subtype:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx, ()>
+ >,
+ [] type_op_normalize_poly_fn_sig:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::PolyFnSig<'tcx>>
+ >,
+ [] type_op_normalize_fn_sig:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::FnSig<'tcx>>
+ >,
+ [] type_op_normalize_predicate:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::Predicate<'tcx>>
+ >,
+ [] type_op_normalize_ty:
+ rustc_middle::infer::canonical::Canonical<'tcx,
+ rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::Ty<'tcx>>
+ >,
+ [] all_traits: Vec<rustc_hir::def_id::DefId>,
+ [] privacy_access_levels: rustc_middle::middle::privacy::AccessLevels,
+ [] foreign_module: rustc_session::cstore::ForeignModule,
+ [] foreign_modules: Vec<rustc_session::cstore::ForeignModule>,
+ [] upvars_mentioned: rustc_data_structures::fx::FxIndexMap<rustc_hir::HirId, rustc_hir::Upvar>,
+ [] object_safety_violations: rustc_middle::traits::ObjectSafetyViolation,
+ [] codegen_unit: rustc_middle::mir::mono::CodegenUnit<'tcx>,
+ [decode] attribute: rustc_ast::Attribute,
+ [] name_set: rustc_data_structures::fx::FxHashSet<rustc_span::symbol::Symbol>,
+ [] hir_id_set: rustc_hir::HirIdSet,
+
+ // Interned types
+ [] tys: rustc_data_structures::intern::WithStableHash<rustc_middle::ty::TyS<'tcx>>,
+ [] predicates: rustc_middle::ty::PredicateS<'tcx>,
+ [] consts: rustc_middle::ty::ConstS<'tcx>,
+
+ // Note that this deliberately duplicates items in the `rustc_hir::arena`,
+ // since we need to allocate this type on both the `rustc_hir` arena
+ // (during lowering) and the `librustc_middle` arena (for decoding MIR)
+ [decode] asm_template: rustc_ast::InlineAsmTemplatePiece,
+ [decode] used_trait_imports: rustc_data_structures::fx::FxHashSet<rustc_hir::def_id::LocalDefId>,
+ [decode] is_late_bound_map: rustc_data_structures::fx::FxIndexSet<rustc_hir::def_id::LocalDefId>,
+ [decode] impl_source: rustc_middle::traits::ImplSource<'tcx, ()>,
+
+ [] dep_kind: rustc_middle::dep_graph::DepKindStruct,
+ ]);
+ )
+}
+
+arena_types!(rustc_arena::declare_arena);
diff --git a/compiler/rustc_middle/src/dep_graph/dep_node.rs b/compiler/rustc_middle/src/dep_graph/dep_node.rs
new file mode 100644
index 000000000..2d095438f
--- /dev/null
+++ b/compiler/rustc_middle/src/dep_graph/dep_node.rs
@@ -0,0 +1,435 @@
+//! Nodes in the dependency graph.
+//!
+//! A node in the [dependency graph] is represented by a [`DepNode`].
+//! A `DepNode` consists of a [`DepKind`] (which
+//! specifies the kind of thing it represents, like a piece of HIR, MIR, etc.)
+//! and a [`Fingerprint`], a 128-bit hash value, the exact meaning of which
+//! depends on the node's `DepKind`. Together, the kind and the fingerprint
+//! fully identify a dependency node, even across multiple compilation sessions.
+//! In other words, the value of the fingerprint does not depend on anything
+//! that is specific to a given compilation session, like an unpredictable
+//! interning key (e.g., `NodeId`, `DefId`, `Symbol`) or the numeric value of a
+//! pointer. The concept behind this could be compared to how git commit hashes
+//! uniquely identify a given commit. The fingerprinting approach has
+//! a few advantages:
+//!
+//! * A `DepNode` can simply be serialized to disk and loaded in another session
+//! without the need to do any "rebasing" (like we have to do for Spans and
+//! NodeIds) or "retracing" (like we had to do for `DefId` in earlier
+//! implementations of the dependency graph).
+//! * A `Fingerprint` is just a bunch of bits, which allows `DepNode` to
+//! implement `Copy`, `Sync`, `Send`, `Freeze`, etc.
+//! * Since we just have a bit pattern, `DepNode` can be mapped from disk into
+//! memory without any post-processing (e.g., "abomination-style" pointer
+//! reconstruction).
+//! * Because a `DepNode` is self-contained, we can instantiate `DepNodes` that
+//! refer to things that do not exist anymore. In previous implementations
+//! `DepNode` contained a `DefId`. A `DepNode` referring to something that
+//! had been removed between the previous and the current compilation session
+//! could not be instantiated because the current compilation session
+//! contained no `DefId` for thing that had been removed.
+//!
+//! `DepNode` definition happens in the `define_dep_nodes!()` macro. This macro
+//! defines the `DepKind` enum. Each `DepKind` has its own parameters that are
+//! needed at runtime in order to construct a valid `DepNode` fingerprint.
+//! However, only `CompileCodegenUnit` and `CompileMonoItem` are constructed
+//! explicitly (with `make_compile_codegen_unit` cq `make_compile_mono_item`).
+//!
+//! Because the macro sees what parameters a given `DepKind` requires, it can
+//! "infer" some properties for each kind of `DepNode`:
+//!
+//! * Whether a `DepNode` of a given kind has any parameters at all. Some
+//! `DepNode`s could represent global concepts with only one value.
+//! * Whether it is possible, in principle, to reconstruct a query key from a
+//! given `DepNode`. Many `DepKind`s only require a single `DefId` parameter,
+//! in which case it is possible to map the node's fingerprint back to the
+//! `DefId` it was computed from. In other cases, too much information gets
+//! lost during fingerprint computation.
+//!
+//! `make_compile_codegen_unit` and `make_compile_mono_items`, together with
+//! `DepNode::new()`, ensures that only valid `DepNode` instances can be
+//! constructed. For example, the API does not allow for constructing
+//! parameterless `DepNode`s with anything other than a zeroed out fingerprint.
+//! More generally speaking, it relieves the user of the `DepNode` API of
+//! having to know how to compute the expected fingerprint for a given set of
+//! node parameters.
+//!
+//! [dependency graph]: https://rustc-dev-guide.rust-lang.org/query.html
+
+use crate::mir::mono::MonoItem;
+use crate::ty::TyCtxt;
+
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
+use rustc_hir::definitions::DefPathHash;
+use rustc_hir::HirId;
+use rustc_query_system::dep_graph::FingerprintStyle;
+use rustc_span::symbol::Symbol;
+use std::hash::Hash;
+
+pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams};
+
+/// This struct stores metadata about each DepKind.
+///
+/// Information is retrieved by indexing the `DEP_KINDS` array using the integer value
+/// of the `DepKind`. Overall, this allows to implement `DepContext` using this manual
+/// jump table instead of large matches.
+pub struct DepKindStruct {
+ /// Anonymous queries cannot be replayed from one compiler invocation to the next.
+ /// When their result is needed, it is recomputed. They are useful for fine-grained
+ /// dependency tracking, and caching within one compiler invocation.
+ pub is_anon: bool,
+
+ /// Eval-always queries do not track their dependencies, and are always recomputed, even if
+ /// their inputs have not changed since the last compiler invocation. The result is still
+ /// cached within one compiler invocation.
+ pub is_eval_always: bool,
+
+ /// Whether the query key can be recovered from the hashed fingerprint.
+ /// See [DepNodeParams] trait for the behaviour of each key type.
+ pub fingerprint_style: FingerprintStyle,
+
+ /// The red/green evaluation system will try to mark a specific DepNode in the
+ /// dependency graph as green by recursively trying to mark the dependencies of
+ /// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
+ /// where we don't know if it is red or green and we therefore actually have
+ /// to recompute its value in order to find out. Since the only piece of
+ /// information that we have at that point is the `DepNode` we are trying to
+ /// re-evaluate, we need some way to re-run a query from just that. This is what
+ /// `force_from_dep_node()` implements.
+ ///
+ /// In the general case, a `DepNode` consists of a `DepKind` and an opaque
+ /// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
+ /// is usually constructed by computing a stable hash of the query-key that the
+ /// `DepNode` corresponds to. Consequently, it is not in general possible to go
+ /// back from hash to query-key (since hash functions are not reversible). For
+ /// this reason `force_from_dep_node()` is expected to fail from time to time
+ /// because we just cannot find out, from the `DepNode` alone, what the
+ /// corresponding query-key is and therefore cannot re-run the query.
+ ///
+ /// The system deals with this case letting `try_mark_green` fail which forces
+ /// the root query to be re-evaluated.
+ ///
+ /// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
+ /// Fortunately, we can use some contextual information that will allow us to
+ /// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
+ /// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
+ /// valid `DefPathHash`. Since we also always build a huge table that maps every
+ /// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
+ /// everything we need to re-run the query.
+ ///
+ /// Take the `mir_promoted` query as an example. Like many other queries, it
+ /// just has a single parameter: the `DefId` of the item it will compute the
+ /// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
+ /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
+ /// is actually a `DefPathHash`, and can therefore just look up the corresponding
+ /// `DefId` in `tcx.def_path_hash_to_def_id`.
+ pub force_from_dep_node: Option<fn(tcx: TyCtxt<'_>, dep_node: DepNode) -> bool>,
+
+ /// Invoke a query to put the on-disk cached value in memory.
+ pub try_load_from_on_disk_cache: Option<fn(TyCtxt<'_>, DepNode)>,
+}
+
+impl DepKind {
+ #[inline(always)]
+ pub fn fingerprint_style(self, tcx: TyCtxt<'_>) -> FingerprintStyle {
+ // Only fetch the DepKindStruct once.
+ let data = tcx.query_kind(self);
+ if data.is_anon {
+ return FingerprintStyle::Opaque;
+ }
+ data.fingerprint_style
+ }
+}
+
+macro_rules! define_dep_nodes {
+ (<$tcx:tt>
+ $(
+ [$($attrs:tt)*]
+ $variant:ident $(( $tuple_arg_ty:ty $(,)? ))*
+ ,)*
+ ) => (
+ #[macro_export]
+ macro_rules! make_dep_kind_array {
+ ($mod:ident) => {[ $($mod::$variant()),* ]};
+ }
+
+ /// This enum serves as an index into arrays built by `make_dep_kind_array`.
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Encodable, Decodable)]
+ #[allow(non_camel_case_types)]
+ pub enum DepKind {
+ $($variant),*
+ }
+
+ fn dep_kind_from_label_string(label: &str) -> Result<DepKind, ()> {
+ match label {
+ $(stringify!($variant) => Ok(DepKind::$variant),)*
+ _ => Err(()),
+ }
+ }
+
+ /// Contains variant => str representations for constructing
+ /// DepNode groups for tests.
+ #[allow(dead_code, non_upper_case_globals)]
+ pub mod label_strs {
+ $(
+ pub const $variant: &str = stringify!($variant);
+ )*
+ }
+ );
+}
+
+rustc_dep_node_append!([define_dep_nodes!][ <'tcx>
+ // We use this for most things when incr. comp. is turned off.
+ [] Null,
+
+ // We use this to create a forever-red node.
+ [] Red,
+
+ [anon] TraitSelect,
+
+ // WARNING: if `Symbol` is changed, make sure you update `make_compile_codegen_unit` below.
+ [] CompileCodegenUnit(Symbol),
+
+ // WARNING: if `MonoItem` is changed, make sure you update `make_compile_mono_item` below.
+ // Only used by rustc_codegen_cranelift
+ [] CompileMonoItem(MonoItem),
+]);
+
+// WARNING: `construct` is generic and does not know that `CompileCodegenUnit` takes `Symbol`s as keys.
+// Be very careful changing this type signature!
+pub(crate) fn make_compile_codegen_unit(tcx: TyCtxt<'_>, name: Symbol) -> DepNode {
+ DepNode::construct(tcx, DepKind::CompileCodegenUnit, &name)
+}
+
+// WARNING: `construct` is generic and does not know that `CompileMonoItem` takes `MonoItem`s as keys.
+// Be very careful changing this type signature!
+pub(crate) fn make_compile_mono_item<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ mono_item: &MonoItem<'tcx>,
+) -> DepNode {
+ DepNode::construct(tcx, DepKind::CompileMonoItem, mono_item)
+}
+
+pub type DepNode = rustc_query_system::dep_graph::DepNode<DepKind>;
+
+// We keep a lot of `DepNode`s in memory during compilation. It's not
+// required that their size stay the same, but we don't want to change
+// it inadvertently. This assert just ensures we're aware of any change.
+#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
+static_assert_size!(DepNode, 18);
+
+#[cfg(not(any(target_arch = "x86", target_arch = "x86_64")))]
+static_assert_size!(DepNode, 24);
+
+pub trait DepNodeExt: Sized {
+ /// Construct a DepNode from the given DepKind and DefPathHash. This
+ /// method will assert that the given DepKind actually requires a
+ /// single DefId/DefPathHash parameter.
+ fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> Self;
+
+ /// Extracts the DefId corresponding to this DepNode. This will work
+ /// if two conditions are met:
+ ///
+ /// 1. The Fingerprint of the DepNode actually is a DefPathHash, and
+ /// 2. the item that the DefPath refers to exists in the current tcx.
+ ///
+ /// Condition (1) is determined by the DepKind variant of the
+ /// DepNode. Condition (2) might not be fulfilled if a DepNode
+ /// refers to something from the previous compilation session that
+ /// has been removed.
+ fn extract_def_id(&self, tcx: TyCtxt<'_>) -> Option<DefId>;
+
+ /// Used in testing
+ fn from_label_string(
+ tcx: TyCtxt<'_>,
+ label: &str,
+ def_path_hash: DefPathHash,
+ ) -> Result<Self, ()>;
+
+ /// Used in testing
+ fn has_label_string(label: &str) -> bool;
+}
+
+impl DepNodeExt for DepNode {
+ /// Construct a DepNode from the given DepKind and DefPathHash. This
+ /// method will assert that the given DepKind actually requires a
+ /// single DefId/DefPathHash parameter.
+ fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> DepNode {
+ debug_assert!(kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash);
+ DepNode { kind, hash: def_path_hash.0.into() }
+ }
+
+ /// Extracts the DefId corresponding to this DepNode. This will work
+ /// if two conditions are met:
+ ///
+ /// 1. The Fingerprint of the DepNode actually is a DefPathHash, and
+ /// 2. the item that the DefPath refers to exists in the current tcx.
+ ///
+ /// Condition (1) is determined by the DepKind variant of the
+ /// DepNode. Condition (2) might not be fulfilled if a DepNode
+ /// refers to something from the previous compilation session that
+ /// has been removed.
+ fn extract_def_id<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> {
+ if self.kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash {
+ Some(tcx.def_path_hash_to_def_id(DefPathHash(self.hash.into()), &mut || {
+ panic!("Failed to extract DefId: {:?} {}", self.kind, self.hash)
+ }))
+ } else {
+ None
+ }
+ }
+
+ /// Used in testing
+ fn from_label_string(
+ tcx: TyCtxt<'_>,
+ label: &str,
+ def_path_hash: DefPathHash,
+ ) -> Result<DepNode, ()> {
+ let kind = dep_kind_from_label_string(label)?;
+
+ match kind.fingerprint_style(tcx) {
+ FingerprintStyle::Opaque => Err(()),
+ FingerprintStyle::Unit => Ok(DepNode::new_no_params(tcx, kind)),
+ FingerprintStyle::DefPathHash => {
+ Ok(DepNode::from_def_path_hash(tcx, def_path_hash, kind))
+ }
+ }
+ }
+
+ /// Used in testing
+ fn has_label_string(label: &str) -> bool {
+ dep_kind_from_label_string(label).is_ok()
+ }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for () {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::Unit
+ }
+
+ #[inline(always)]
+ fn to_fingerprint(&self, _: TyCtxt<'tcx>) -> Fingerprint {
+ Fingerprint::ZERO
+ }
+
+ #[inline(always)]
+ fn recover(_: TyCtxt<'tcx>, _: &DepNode) -> Option<Self> {
+ Some(())
+ }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for DefId {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::DefPathHash
+ }
+
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ tcx.def_path_hash(*self).0
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ tcx.def_path_str(*self)
+ }
+
+ #[inline(always)]
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ dep_node.extract_def_id(tcx)
+ }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for LocalDefId {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::DefPathHash
+ }
+
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ self.to_def_id().to_fingerprint(tcx)
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ self.to_def_id().to_debug_str(tcx)
+ }
+
+ #[inline(always)]
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ dep_node.extract_def_id(tcx).map(|id| id.expect_local())
+ }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for CrateNum {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::DefPathHash
+ }
+
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ let def_id = self.as_def_id();
+ def_id.to_fingerprint(tcx)
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ tcx.crate_name(*self).to_string()
+ }
+
+ #[inline(always)]
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ dep_node.extract_def_id(tcx).map(|id| id.krate)
+ }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for (DefId, DefId) {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::Opaque
+ }
+
+ // We actually would not need to specialize the implementation of this
+ // method but it's faster to combine the hashes than to instantiate a full
+ // hashing context and stable-hashing state.
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ let (def_id_0, def_id_1) = *self;
+
+ let def_path_hash_0 = tcx.def_path_hash(def_id_0);
+ let def_path_hash_1 = tcx.def_path_hash(def_id_1);
+
+ def_path_hash_0.0.combine(def_path_hash_1.0)
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ let (def_id_0, def_id_1) = *self;
+
+ format!("({}, {})", tcx.def_path_debug_str(def_id_0), tcx.def_path_debug_str(def_id_1))
+ }
+}
+
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::Opaque
+ }
+
+ // We actually would not need to specialize the implementation of this
+ // method but it's faster to combine the hashes than to instantiate a full
+ // hashing context and stable-hashing state.
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ let HirId { owner, local_id } = *self;
+
+ let def_path_hash = tcx.def_path_hash(owner.to_def_id());
+ let local_id = Fingerprint::from_smaller_hash(local_id.as_u32().into());
+
+ def_path_hash.0.combine(local_id)
+ }
+}
diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs
new file mode 100644
index 000000000..c8b3b52b0
--- /dev/null
+++ b/compiler/rustc_middle/src/dep_graph/mod.rs
@@ -0,0 +1,140 @@
+use crate::ty::{self, TyCtxt};
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_session::Session;
+
+#[macro_use]
+mod dep_node;
+
+pub use rustc_query_system::dep_graph::{
+ debug::DepNodeFilter, hash_result, DepContext, DepNodeColor, DepNodeIndex,
+ SerializedDepNodeIndex, WorkProduct, WorkProductId,
+};
+
+pub use dep_node::{label_strs, DepKind, DepKindStruct, DepNode, DepNodeExt};
+pub(crate) use dep_node::{make_compile_codegen_unit, make_compile_mono_item};
+
+pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>;
+pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>;
+pub type TaskDepsRef<'a> = rustc_query_system::dep_graph::TaskDepsRef<'a, DepKind>;
+pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>;
+pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>;
+pub type EdgeFilter = rustc_query_system::dep_graph::debug::EdgeFilter<DepKind>;
+
+impl rustc_query_system::dep_graph::DepKind for DepKind {
+ const NULL: Self = DepKind::Null;
+ const RED: Self = DepKind::Red;
+
+ fn debug_node(node: &DepNode, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ write!(f, "{:?}(", node.kind)?;
+
+ ty::tls::with_opt(|opt_tcx| {
+ if let Some(tcx) = opt_tcx {
+ if let Some(def_id) = node.extract_def_id(tcx) {
+ write!(f, "{}", tcx.def_path_debug_str(def_id))?;
+ } else if let Some(ref s) = tcx.dep_graph.dep_node_debug_str(*node) {
+ write!(f, "{}", s)?;
+ } else {
+ write!(f, "{}", node.hash)?;
+ }
+ } else {
+ write!(f, "{}", node.hash)?;
+ }
+ Ok(())
+ })?;
+
+ write!(f, ")")
+ }
+
+ fn with_deps<OP, R>(task_deps: TaskDepsRef<'_>, op: OP) -> R
+ where
+ OP: FnOnce() -> R,
+ {
+ ty::tls::with_context(|icx| {
+ let icx = ty::tls::ImplicitCtxt { task_deps, ..icx.clone() };
+
+ ty::tls::enter_context(&icx, |_| op())
+ })
+ }
+
+ fn read_deps<OP>(op: OP)
+ where
+ OP: for<'a> FnOnce(TaskDepsRef<'a>),
+ {
+ ty::tls::with_context_opt(|icx| {
+ let Some(icx) = icx else { return };
+ op(icx.task_deps)
+ })
+ }
+}
+
+impl<'tcx> DepContext for TyCtxt<'tcx> {
+ type DepKind = DepKind;
+
+ #[inline]
+ fn with_stable_hashing_context<R>(&self, f: impl FnOnce(StableHashingContext<'_>) -> R) -> R {
+ TyCtxt::with_stable_hashing_context(*self, f)
+ }
+
+ #[inline]
+ fn dep_graph(&self) -> &DepGraph {
+ &self.dep_graph
+ }
+
+ #[inline(always)]
+ fn profiler(&self) -> &SelfProfilerRef {
+ &self.prof
+ }
+
+ #[inline(always)]
+ fn sess(&self) -> &Session {
+ self.sess
+ }
+
+ #[inline(always)]
+ fn fingerprint_style(&self, kind: DepKind) -> rustc_query_system::dep_graph::FingerprintStyle {
+ kind.fingerprint_style(*self)
+ }
+
+ #[inline(always)]
+ fn is_eval_always(&self, kind: DepKind) -> bool {
+ self.query_kind(kind).is_eval_always
+ }
+
+ fn try_force_from_dep_node(&self, dep_node: DepNode) -> bool {
+ debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
+
+ // We must avoid ever having to call `force_from_dep_node()` for a
+ // `DepNode::codegen_unit`:
+ // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
+ // would always end up having to evaluate the first caller of the
+ // `codegen_unit` query that *is* reconstructible. This might very well be
+ // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
+ // to re-trigger calling the `codegen_unit` query with the right key. At
+ // that point we would already have re-done all the work we are trying to
+ // avoid doing in the first place.
+ // The solution is simple: Just explicitly call the `codegen_unit` query for
+ // each CGU, right after partitioning. This way `try_mark_green` will always
+ // hit the cache instead of having to go through `force_from_dep_node`.
+ // This assertion makes sure, we actually keep applying the solution above.
+ debug_assert!(
+ dep_node.kind != DepKind::codegen_unit,
+ "calling force_from_dep_node() on DepKind::codegen_unit"
+ );
+
+ let cb = self.query_kind(dep_node.kind);
+ if let Some(f) = cb.force_from_dep_node {
+ f(*self, dep_node);
+ true
+ } else {
+ false
+ }
+ }
+
+ fn try_load_from_on_disk_cache(&self, dep_node: DepNode) {
+ let cb = self.query_kind(dep_node.kind);
+ if let Some(f) = cb.try_load_from_on_disk_cache {
+ f(*self, dep_node)
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs
new file mode 100644
index 000000000..47b04c33e
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/map/mod.rs
@@ -0,0 +1,1405 @@
+use crate::hir::{ModuleItems, Owner};
+use crate::ty::{DefIdTree, TyCtxt};
+use rustc_ast as ast;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::{par_for_each_in, Send, Sync};
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::definitions::{DefKey, DefPath, DefPathHash};
+use rustc_hir::intravisit::{self, Visitor};
+use rustc_hir::*;
+use rustc_index::vec::Idx;
+use rustc_middle::hir::nested_filter;
+use rustc_span::def_id::StableCrateId;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::Span;
+use rustc_target::spec::abi::Abi;
+
+fn fn_decl<'hir>(node: Node<'hir>) -> Option<&'hir FnDecl<'hir>> {
+ match node {
+ Node::Item(Item { kind: ItemKind::Fn(sig, _, _), .. })
+ | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, _), .. })
+ | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, _), .. }) => Some(&sig.decl),
+ Node::Expr(Expr { kind: ExprKind::Closure(Closure { fn_decl, .. }), .. })
+ | Node::ForeignItem(ForeignItem { kind: ForeignItemKind::Fn(fn_decl, ..), .. }) => {
+ Some(fn_decl)
+ }
+ _ => None,
+ }
+}
+
+pub fn fn_sig<'hir>(node: Node<'hir>) -> Option<&'hir FnSig<'hir>> {
+ match &node {
+ Node::Item(Item { kind: ItemKind::Fn(sig, _, _), .. })
+ | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, _), .. })
+ | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, _), .. }) => Some(sig),
+ _ => None,
+ }
+}
+
+#[inline]
+pub fn associated_body<'hir>(node: Node<'hir>) -> Option<BodyId> {
+ match node {
+ Node::Item(Item {
+ kind: ItemKind::Const(_, body) | ItemKind::Static(.., body) | ItemKind::Fn(.., body),
+ ..
+ })
+ | Node::TraitItem(TraitItem {
+ kind:
+ TraitItemKind::Const(_, Some(body)) | TraitItemKind::Fn(_, TraitFn::Provided(body)),
+ ..
+ })
+ | Node::ImplItem(ImplItem {
+ kind: ImplItemKind::Const(_, body) | ImplItemKind::Fn(_, body),
+ ..
+ })
+ | Node::Expr(Expr { kind: ExprKind::Closure(Closure { body, .. }), .. }) => Some(*body),
+
+ Node::AnonConst(constant) => Some(constant.body),
+
+ _ => None,
+ }
+}
+
+fn is_body_owner<'hir>(node: Node<'hir>, hir_id: HirId) -> bool {
+ match associated_body(node) {
+ Some(b) => b.hir_id == hir_id,
+ None => false,
+ }
+}
+
+#[derive(Copy, Clone)]
+pub struct Map<'hir> {
+ pub(super) tcx: TyCtxt<'hir>,
+}
+
+/// An iterator that walks up the ancestor tree of a given `HirId`.
+/// Constructed using `tcx.hir().parent_iter(hir_id)`.
+pub struct ParentHirIterator<'hir> {
+ current_id: HirId,
+ map: Map<'hir>,
+}
+
+impl<'hir> Iterator for ParentHirIterator<'hir> {
+ type Item = (HirId, Node<'hir>);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.current_id == CRATE_HIR_ID {
+ return None;
+ }
+ loop {
+ // There are nodes that do not have entries, so we need to skip them.
+ let parent_id = self.map.get_parent_node(self.current_id);
+
+ if parent_id == self.current_id {
+ self.current_id = CRATE_HIR_ID;
+ return None;
+ }
+
+ self.current_id = parent_id;
+ if let Some(node) = self.map.find(parent_id) {
+ return Some((parent_id, node));
+ }
+ // If this `HirId` doesn't have an entry, skip it and look for its `parent_id`.
+ }
+ }
+}
+
+/// An iterator that walks up the ancestor tree of a given `HirId`.
+/// Constructed using `tcx.hir().parent_owner_iter(hir_id)`.
+pub struct ParentOwnerIterator<'hir> {
+ current_id: HirId,
+ map: Map<'hir>,
+}
+
+impl<'hir> Iterator for ParentOwnerIterator<'hir> {
+ type Item = (LocalDefId, OwnerNode<'hir>);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.current_id.local_id.index() != 0 {
+ self.current_id.local_id = ItemLocalId::new(0);
+ if let Some(node) = self.map.tcx.hir_owner(self.current_id.owner) {
+ return Some((self.current_id.owner, node.node));
+ }
+ }
+ if self.current_id == CRATE_HIR_ID {
+ return None;
+ }
+ loop {
+ // There are nodes that do not have entries, so we need to skip them.
+ let parent_id = self.map.def_key(self.current_id.owner).parent;
+
+ let parent_id = parent_id.map_or(CRATE_HIR_ID.owner, |local_def_index| {
+ let def_id = LocalDefId { local_def_index };
+ self.map.local_def_id_to_hir_id(def_id).owner
+ });
+ self.current_id = HirId::make_owner(parent_id);
+
+ // If this `HirId` doesn't have an entry, skip it and look for its `parent_id`.
+ if let Some(node) = self.map.tcx.hir_owner(self.current_id.owner) {
+ return Some((self.current_id.owner, node.node));
+ }
+ }
+ }
+}
+
+impl<'hir> Map<'hir> {
+ pub fn krate(self) -> &'hir Crate<'hir> {
+ self.tcx.hir_crate(())
+ }
+
+ pub fn root_module(self) -> &'hir Mod<'hir> {
+ match self.tcx.hir_owner(CRATE_DEF_ID).map(|o| o.node) {
+ Some(OwnerNode::Crate(item)) => item,
+ _ => bug!(),
+ }
+ }
+
+ pub fn items(self) -> impl Iterator<Item = ItemId> + 'hir {
+ self.tcx.hir_crate_items(()).items.iter().copied()
+ }
+
+ pub fn module_items(self, module: LocalDefId) -> impl Iterator<Item = ItemId> + 'hir {
+ self.tcx.hir_module_items(module).items()
+ }
+
+ pub fn par_for_each_item(self, f: impl Fn(ItemId) + Sync + Send) {
+ par_for_each_in(&self.tcx.hir_crate_items(()).items[..], |id| f(*id));
+ }
+
+ pub fn def_key(self, def_id: LocalDefId) -> DefKey {
+ // Accessing the DefKey is ok, since it is part of DefPathHash.
+ self.tcx.definitions_untracked().def_key(def_id)
+ }
+
+ pub fn def_path_from_hir_id(self, id: HirId) -> Option<DefPath> {
+ self.opt_local_def_id(id).map(|def_id| self.def_path(def_id))
+ }
+
+ pub fn def_path(self, def_id: LocalDefId) -> DefPath {
+ // Accessing the DefPath is ok, since it is part of DefPathHash.
+ self.tcx.definitions_untracked().def_path(def_id)
+ }
+
+ #[inline]
+ pub fn def_path_hash(self, def_id: LocalDefId) -> DefPathHash {
+ // Accessing the DefPathHash is ok, it is incr. comp. stable.
+ self.tcx.definitions_untracked().def_path_hash(def_id)
+ }
+
+ #[inline]
+ pub fn local_def_id(self, hir_id: HirId) -> LocalDefId {
+ self.opt_local_def_id(hir_id).unwrap_or_else(|| {
+ bug!(
+ "local_def_id: no entry for `{:?}`, which has a map of `{:?}`",
+ hir_id,
+ self.find(hir_id)
+ )
+ })
+ }
+
+ #[inline]
+ pub fn opt_local_def_id(self, hir_id: HirId) -> Option<LocalDefId> {
+ if hir_id.local_id == ItemLocalId::new(0) {
+ Some(hir_id.owner)
+ } else {
+ self.tcx
+ .hir_owner_nodes(hir_id.owner)
+ .as_owner()?
+ .local_id_to_def_id
+ .get(&hir_id.local_id)
+ .copied()
+ }
+ }
+
+ #[inline]
+ pub fn local_def_id_to_hir_id(self, def_id: LocalDefId) -> HirId {
+ self.tcx.local_def_id_to_hir_id(def_id)
+ }
+
+ /// Do not call this function directly. The query should be called.
+ pub(super) fn opt_def_kind(self, local_def_id: LocalDefId) -> Option<DefKind> {
+ let hir_id = self.local_def_id_to_hir_id(local_def_id);
+ let def_kind = match self.find(hir_id)? {
+ Node::Item(item) => match item.kind {
+ ItemKind::Static(_, mt, _) => DefKind::Static(mt),
+ ItemKind::Const(..) => DefKind::Const,
+ ItemKind::Fn(..) => DefKind::Fn,
+ ItemKind::Macro(_, macro_kind) => DefKind::Macro(macro_kind),
+ ItemKind::Mod(..) => DefKind::Mod,
+ ItemKind::OpaqueTy(..) => DefKind::OpaqueTy,
+ ItemKind::TyAlias(..) => DefKind::TyAlias,
+ ItemKind::Enum(..) => DefKind::Enum,
+ ItemKind::Struct(..) => DefKind::Struct,
+ ItemKind::Union(..) => DefKind::Union,
+ ItemKind::Trait(..) => DefKind::Trait,
+ ItemKind::TraitAlias(..) => DefKind::TraitAlias,
+ ItemKind::ExternCrate(_) => DefKind::ExternCrate,
+ ItemKind::Use(..) => DefKind::Use,
+ ItemKind::ForeignMod { .. } => DefKind::ForeignMod,
+ ItemKind::GlobalAsm(..) => DefKind::GlobalAsm,
+ ItemKind::Impl { .. } => DefKind::Impl,
+ },
+ Node::ForeignItem(item) => match item.kind {
+ ForeignItemKind::Fn(..) => DefKind::Fn,
+ ForeignItemKind::Static(_, mt) => DefKind::Static(mt),
+ ForeignItemKind::Type => DefKind::ForeignTy,
+ },
+ Node::TraitItem(item) => match item.kind {
+ TraitItemKind::Const(..) => DefKind::AssocConst,
+ TraitItemKind::Fn(..) => DefKind::AssocFn,
+ TraitItemKind::Type(..) => DefKind::AssocTy,
+ },
+ Node::ImplItem(item) => match item.kind {
+ ImplItemKind::Const(..) => DefKind::AssocConst,
+ ImplItemKind::Fn(..) => DefKind::AssocFn,
+ ImplItemKind::TyAlias(..) => DefKind::AssocTy,
+ },
+ Node::Variant(_) => DefKind::Variant,
+ Node::Ctor(variant_data) => {
+ // FIXME(eddyb) is this even possible, if we have a `Node::Ctor`?
+ assert_ne!(variant_data.ctor_hir_id(), None);
+
+ let ctor_of = match self.find(self.get_parent_node(hir_id)) {
+ Some(Node::Item(..)) => def::CtorOf::Struct,
+ Some(Node::Variant(..)) => def::CtorOf::Variant,
+ _ => unreachable!(),
+ };
+ DefKind::Ctor(ctor_of, def::CtorKind::from_hir(variant_data))
+ }
+ Node::AnonConst(_) => {
+ let inline = match self.find(self.get_parent_node(hir_id)) {
+ Some(Node::Expr(&Expr {
+ kind: ExprKind::ConstBlock(ref anon_const), ..
+ })) if anon_const.hir_id == hir_id => true,
+ _ => false,
+ };
+ if inline { DefKind::InlineConst } else { DefKind::AnonConst }
+ }
+ Node::Field(_) => DefKind::Field,
+ Node::Expr(expr) => match expr.kind {
+ ExprKind::Closure(Closure { movability: None, .. }) => DefKind::Closure,
+ ExprKind::Closure(Closure { movability: Some(_), .. }) => DefKind::Generator,
+ _ => bug!("def_kind: unsupported node: {}", self.node_to_string(hir_id)),
+ },
+ Node::GenericParam(param) => match param.kind {
+ GenericParamKind::Lifetime { .. } => DefKind::LifetimeParam,
+ GenericParamKind::Type { .. } => DefKind::TyParam,
+ GenericParamKind::Const { .. } => DefKind::ConstParam,
+ },
+ Node::Crate(_) => DefKind::Mod,
+ Node::Stmt(_)
+ | Node::PathSegment(_)
+ | Node::Ty(_)
+ | Node::TypeBinding(_)
+ | Node::Infer(_)
+ | Node::TraitRef(_)
+ | Node::Pat(_)
+ | Node::Local(_)
+ | Node::Param(_)
+ | Node::Arm(_)
+ | Node::Lifetime(_)
+ | Node::Block(_) => return None,
+ };
+ Some(def_kind)
+ }
+
+ pub fn find_parent_node(self, id: HirId) -> Option<HirId> {
+ if id.local_id == ItemLocalId::from_u32(0) {
+ Some(self.tcx.hir_owner_parent(id.owner))
+ } else {
+ let owner = self.tcx.hir_owner_nodes(id.owner).as_owner()?;
+ let node = owner.nodes[id.local_id].as_ref()?;
+ let hir_id = HirId { owner: id.owner, local_id: node.parent };
+ Some(hir_id)
+ }
+ }
+
+ pub fn get_parent_node(self, hir_id: HirId) -> HirId {
+ self.find_parent_node(hir_id)
+ .unwrap_or_else(|| bug!("No parent for node {:?}", self.node_to_string(hir_id)))
+ }
+
+ /// Retrieves the `Node` corresponding to `id`, returning `None` if cannot be found.
+ pub fn find(self, id: HirId) -> Option<Node<'hir>> {
+ if id.local_id == ItemLocalId::from_u32(0) {
+ let owner = self.tcx.hir_owner(id.owner)?;
+ Some(owner.node.into())
+ } else {
+ let owner = self.tcx.hir_owner_nodes(id.owner).as_owner()?;
+ let node = owner.nodes[id.local_id].as_ref()?;
+ Some(node.node)
+ }
+ }
+
+ /// Retrieves the `Node` corresponding to `id`, returning `None` if cannot be found.
+ #[inline]
+ pub fn find_by_def_id(self, id: LocalDefId) -> Option<Node<'hir>> {
+ self.find(self.local_def_id_to_hir_id(id))
+ }
+
+ /// Retrieves the `Node` corresponding to `id`, panicking if it cannot be found.
+ pub fn get(self, id: HirId) -> Node<'hir> {
+ self.find(id).unwrap_or_else(|| bug!("couldn't find hir id {} in the HIR map", id))
+ }
+
+ /// Retrieves the `Node` corresponding to `id`, panicking if it cannot be found.
+ #[inline]
+ pub fn get_by_def_id(self, id: LocalDefId) -> Node<'hir> {
+ self.find_by_def_id(id).unwrap_or_else(|| bug!("couldn't find {:?} in the HIR map", id))
+ }
+
+ pub fn get_if_local(self, id: DefId) -> Option<Node<'hir>> {
+ id.as_local().and_then(|id| self.find(self.local_def_id_to_hir_id(id)))
+ }
+
+ pub fn get_generics(self, id: LocalDefId) -> Option<&'hir Generics<'hir>> {
+ let node = self.tcx.hir_owner(id)?;
+ node.node.generics()
+ }
+
+ pub fn item(self, id: ItemId) -> &'hir Item<'hir> {
+ self.tcx.hir_owner(id.def_id).unwrap().node.expect_item()
+ }
+
+ pub fn trait_item(self, id: TraitItemId) -> &'hir TraitItem<'hir> {
+ self.tcx.hir_owner(id.def_id).unwrap().node.expect_trait_item()
+ }
+
+ pub fn impl_item(self, id: ImplItemId) -> &'hir ImplItem<'hir> {
+ self.tcx.hir_owner(id.def_id).unwrap().node.expect_impl_item()
+ }
+
+ pub fn foreign_item(self, id: ForeignItemId) -> &'hir ForeignItem<'hir> {
+ self.tcx.hir_owner(id.def_id).unwrap().node.expect_foreign_item()
+ }
+
+ pub fn body(self, id: BodyId) -> &'hir Body<'hir> {
+ self.tcx.hir_owner_nodes(id.hir_id.owner).unwrap().bodies[&id.hir_id.local_id]
+ }
+
+ pub fn fn_decl_by_hir_id(self, hir_id: HirId) -> Option<&'hir FnDecl<'hir>> {
+ if let Some(node) = self.find(hir_id) {
+ fn_decl(node)
+ } else {
+ bug!("no node for hir_id `{}`", hir_id)
+ }
+ }
+
+ pub fn fn_sig_by_hir_id(self, hir_id: HirId) -> Option<&'hir FnSig<'hir>> {
+ if let Some(node) = self.find(hir_id) {
+ fn_sig(node)
+ } else {
+ bug!("no node for hir_id `{}`", hir_id)
+ }
+ }
+
+ pub fn enclosing_body_owner(self, hir_id: HirId) -> LocalDefId {
+ for (parent, _) in self.parent_iter(hir_id) {
+ if let Some(body) = self.find(parent).map(associated_body).flatten() {
+ return self.body_owner_def_id(body);
+ }
+ }
+
+ bug!("no `enclosing_body_owner` for hir_id `{}`", hir_id);
+ }
+
+ /// Returns the `HirId` that corresponds to the definition of
+ /// which this is the body of, i.e., a `fn`, `const` or `static`
+ /// item (possibly associated), a closure, or a `hir::AnonConst`.
+ pub fn body_owner(self, BodyId { hir_id }: BodyId) -> HirId {
+ let parent = self.get_parent_node(hir_id);
+ assert!(self.find(parent).map_or(false, |n| is_body_owner(n, hir_id)));
+ parent
+ }
+
+ pub fn body_owner_def_id(self, id: BodyId) -> LocalDefId {
+ self.local_def_id(self.body_owner(id))
+ }
+
+ /// Given a `LocalDefId`, returns the `BodyId` associated with it,
+ /// if the node is a body owner, otherwise returns `None`.
+ pub fn maybe_body_owned_by(self, id: LocalDefId) -> Option<BodyId> {
+ self.get_if_local(id.to_def_id()).map(associated_body).flatten()
+ }
+
+ /// Given a body owner's id, returns the `BodyId` associated with it.
+ pub fn body_owned_by(self, id: LocalDefId) -> BodyId {
+ self.maybe_body_owned_by(id).unwrap_or_else(|| {
+ let hir_id = self.local_def_id_to_hir_id(id);
+ span_bug!(
+ self.span(hir_id),
+ "body_owned_by: {} has no associated body",
+ self.node_to_string(hir_id)
+ );
+ })
+ }
+
+ pub fn body_param_names(self, id: BodyId) -> impl Iterator<Item = Ident> + 'hir {
+ self.body(id).params.iter().map(|arg| match arg.pat.kind {
+ PatKind::Binding(_, _, ident, _) => ident,
+ _ => Ident::empty(),
+ })
+ }
+
+ /// Returns the `BodyOwnerKind` of this `LocalDefId`.
+ ///
+ /// Panics if `LocalDefId` does not have an associated body.
+ pub fn body_owner_kind(self, def_id: LocalDefId) -> BodyOwnerKind {
+ match self.tcx.def_kind(def_id) {
+ DefKind::Const | DefKind::AssocConst | DefKind::InlineConst | DefKind::AnonConst => {
+ BodyOwnerKind::Const
+ }
+ DefKind::Ctor(..) | DefKind::Fn | DefKind::AssocFn => BodyOwnerKind::Fn,
+ DefKind::Closure | DefKind::Generator => BodyOwnerKind::Closure,
+ DefKind::Static(mt) => BodyOwnerKind::Static(mt),
+ dk => bug!("{:?} is not a body node: {:?}", def_id, dk),
+ }
+ }
+
+ /// Returns the `ConstContext` of the body associated with this `LocalDefId`.
+ ///
+ /// Panics if `LocalDefId` does not have an associated body.
+ ///
+ /// This should only be used for determining the context of a body, a return
+ /// value of `Some` does not always suggest that the owner of the body is `const`,
+ /// just that it has to be checked as if it were.
+ pub fn body_const_context(self, def_id: LocalDefId) -> Option<ConstContext> {
+ let ccx = match self.body_owner_kind(def_id) {
+ BodyOwnerKind::Const => ConstContext::Const,
+ BodyOwnerKind::Static(mt) => ConstContext::Static(mt),
+
+ BodyOwnerKind::Fn if self.tcx.is_constructor(def_id.to_def_id()) => return None,
+ BodyOwnerKind::Fn if self.tcx.is_const_fn_raw(def_id.to_def_id()) => {
+ ConstContext::ConstFn
+ }
+ BodyOwnerKind::Fn if self.tcx.is_const_default_method(def_id.to_def_id()) => {
+ ConstContext::ConstFn
+ }
+ BodyOwnerKind::Fn | BodyOwnerKind::Closure => return None,
+ };
+
+ Some(ccx)
+ }
+
+ /// Returns an iterator of the `DefId`s for all body-owners in this
+ /// crate. If you would prefer to iterate over the bodies
+ /// themselves, you can do `self.hir().krate().body_ids.iter()`.
+ pub fn body_owners(self) -> impl Iterator<Item = LocalDefId> + 'hir {
+ self.tcx.hir_crate_items(()).body_owners.iter().copied()
+ }
+
+ pub fn par_body_owners<F: Fn(LocalDefId) + Sync + Send>(self, f: F) {
+ par_for_each_in(&self.tcx.hir_crate_items(()).body_owners[..], |&def_id| f(def_id));
+ }
+
+ pub fn ty_param_owner(self, def_id: LocalDefId) -> LocalDefId {
+ let def_kind = self.tcx.def_kind(def_id);
+ match def_kind {
+ DefKind::Trait | DefKind::TraitAlias => def_id,
+ DefKind::TyParam | DefKind::ConstParam => self.tcx.local_parent(def_id),
+ _ => bug!("ty_param_owner: {:?} is a {:?} not a type parameter", def_id, def_kind),
+ }
+ }
+
+ pub fn ty_param_name(self, def_id: LocalDefId) -> Symbol {
+ let def_kind = self.tcx.def_kind(def_id);
+ match def_kind {
+ DefKind::Trait | DefKind::TraitAlias => kw::SelfUpper,
+ DefKind::TyParam | DefKind::ConstParam => self.tcx.item_name(def_id.to_def_id()),
+ _ => bug!("ty_param_name: {:?} is a {:?} not a type parameter", def_id, def_kind),
+ }
+ }
+
+ pub fn trait_impls(self, trait_did: DefId) -> &'hir [LocalDefId] {
+ self.tcx.all_local_trait_impls(()).get(&trait_did).map_or(&[], |xs| &xs[..])
+ }
+
+ /// Gets the attributes on the crate. This is preferable to
+ /// invoking `krate.attrs` because it registers a tighter
+ /// dep-graph access.
+ pub fn krate_attrs(self) -> &'hir [ast::Attribute] {
+ self.attrs(CRATE_HIR_ID)
+ }
+
+ pub fn rustc_coherence_is_core(self) -> bool {
+ self.krate_attrs().iter().any(|attr| attr.has_name(sym::rustc_coherence_is_core))
+ }
+
+ pub fn get_module(self, module: LocalDefId) -> (&'hir Mod<'hir>, Span, HirId) {
+ let hir_id = HirId::make_owner(module);
+ match self.tcx.hir_owner(module).map(|o| o.node) {
+ Some(OwnerNode::Item(&Item { span, kind: ItemKind::Mod(ref m), .. })) => {
+ (m, span, hir_id)
+ }
+ Some(OwnerNode::Crate(item)) => (item, item.spans.inner_span, hir_id),
+ node => panic!("not a module: {:?}", node),
+ }
+ }
+
+ /// Walks the contents of the local crate. See also `visit_all_item_likes_in_crate`.
+ pub fn walk_toplevel_module(self, visitor: &mut impl Visitor<'hir>) {
+ let (top_mod, span, hir_id) = self.get_module(CRATE_DEF_ID);
+ visitor.visit_mod(top_mod, span, hir_id);
+ }
+
+ /// Walks the attributes in a crate.
+ pub fn walk_attributes(self, visitor: &mut impl Visitor<'hir>) {
+ let krate = self.krate();
+ for info in krate.owners.iter() {
+ if let MaybeOwner::Owner(info) = info {
+ for attrs in info.attrs.map.values() {
+ for a in *attrs {
+ visitor.visit_attribute(a)
+ }
+ }
+ }
+ }
+ }
+
+ /// Visits all item-likes in the crate in some deterministic (but unspecified) order. If you
+ /// need to process every item-like, and don't care about visiting nested items in a particular
+ /// order then this method is the best choice. If you do care about this nesting, you should
+ /// use the `tcx.hir().walk_toplevel_module`.
+ ///
+ /// Note that this function will access HIR for all the item-likes in the crate. If you only
+ /// need to access some of them, it is usually better to manually loop on the iterators
+ /// provided by `tcx.hir_crate_items(())`.
+ ///
+ /// Please see the notes in `intravisit.rs` for more information.
+ pub fn visit_all_item_likes_in_crate<V>(self, visitor: &mut V)
+ where
+ V: Visitor<'hir>,
+ {
+ let krate = self.tcx.hir_crate_items(());
+
+ for id in krate.items() {
+ visitor.visit_item(self.item(id));
+ }
+
+ for id in krate.trait_items() {
+ visitor.visit_trait_item(self.trait_item(id));
+ }
+
+ for id in krate.impl_items() {
+ visitor.visit_impl_item(self.impl_item(id));
+ }
+
+ for id in krate.foreign_items() {
+ visitor.visit_foreign_item(self.foreign_item(id));
+ }
+ }
+
+ /// This method is the equivalent of `visit_all_item_likes_in_crate` but restricted to
+ /// item-likes in a single module.
+ pub fn visit_item_likes_in_module<V>(self, module: LocalDefId, visitor: &mut V)
+ where
+ V: Visitor<'hir>,
+ {
+ let module = self.tcx.hir_module_items(module);
+
+ for id in module.items() {
+ visitor.visit_item(self.item(id));
+ }
+
+ for id in module.trait_items() {
+ visitor.visit_trait_item(self.trait_item(id));
+ }
+
+ for id in module.impl_items() {
+ visitor.visit_impl_item(self.impl_item(id));
+ }
+
+ for id in module.foreign_items() {
+ visitor.visit_foreign_item(self.foreign_item(id));
+ }
+ }
+
+ pub fn for_each_module(self, mut f: impl FnMut(LocalDefId)) {
+ let crate_items = self.tcx.hir_crate_items(());
+ for module in crate_items.submodules.iter() {
+ f(*module)
+ }
+ }
+
+ #[cfg(not(parallel_compiler))]
+ #[inline]
+ pub fn par_for_each_module(self, f: impl Fn(LocalDefId)) {
+ self.for_each_module(f)
+ }
+
+ #[cfg(parallel_compiler)]
+ pub fn par_for_each_module(self, f: impl Fn(LocalDefId) + Sync) {
+ use rustc_data_structures::sync::{par_iter, ParallelIterator};
+ par_iter_submodules(self.tcx, CRATE_DEF_ID, &f);
+
+ fn par_iter_submodules<F>(tcx: TyCtxt<'_>, module: LocalDefId, f: &F)
+ where
+ F: Fn(LocalDefId) + Sync,
+ {
+ (*f)(module);
+ let items = tcx.hir_module_items(module);
+ par_iter(&items.submodules[..]).for_each(|&sm| par_iter_submodules(tcx, sm, f));
+ }
+ }
+
+ /// Returns an iterator for the nodes in the ancestor tree of the `current_id`
+ /// until the crate root is reached. Prefer this over your own loop using `get_parent_node`.
+ pub fn parent_iter(self, current_id: HirId) -> ParentHirIterator<'hir> {
+ ParentHirIterator { current_id, map: self }
+ }
+
+ /// Returns an iterator for the nodes in the ancestor tree of the `current_id`
+ /// until the crate root is reached. Prefer this over your own loop using `get_parent_node`.
+ pub fn parent_owner_iter(self, current_id: HirId) -> ParentOwnerIterator<'hir> {
+ ParentOwnerIterator { current_id, map: self }
+ }
+
+ /// Checks if the node is left-hand side of an assignment.
+ pub fn is_lhs(self, id: HirId) -> bool {
+ match self.find(self.get_parent_node(id)) {
+ Some(Node::Expr(expr)) => match expr.kind {
+ ExprKind::Assign(lhs, _rhs, _span) => lhs.hir_id == id,
+ _ => false,
+ },
+ _ => false,
+ }
+ }
+
+ /// Whether the expression pointed at by `hir_id` belongs to a `const` evaluation context.
+ /// Used exclusively for diagnostics, to avoid suggestion function calls.
+ pub fn is_inside_const_context(self, hir_id: HirId) -> bool {
+ self.body_const_context(self.enclosing_body_owner(hir_id)).is_some()
+ }
+
+ /// Retrieves the `HirId` for `id`'s enclosing method, unless there's a
+ /// `while` or `loop` before reaching it, as block tail returns are not
+ /// available in them.
+ ///
+ /// ```
+ /// fn foo(x: usize) -> bool {
+ /// if x == 1 {
+ /// true // If `get_return_block` gets passed the `id` corresponding
+ /// } else { // to this, it will return `foo`'s `HirId`.
+ /// false
+ /// }
+ /// }
+ /// ```
+ ///
+ /// ```compile_fail,E0308
+ /// fn foo(x: usize) -> bool {
+ /// loop {
+ /// true // If `get_return_block` gets passed the `id` corresponding
+ /// } // to this, it will return `None`.
+ /// false
+ /// }
+ /// ```
+ pub fn get_return_block(self, id: HirId) -> Option<HirId> {
+ let mut iter = self.parent_iter(id).peekable();
+ let mut ignore_tail = false;
+ if let Some(node) = self.find(id) {
+ if let Node::Expr(Expr { kind: ExprKind::Ret(_), .. }) = node {
+ // When dealing with `return` statements, we don't care about climbing only tail
+ // expressions.
+ ignore_tail = true;
+ }
+ }
+ while let Some((hir_id, node)) = iter.next() {
+ if let (Some((_, next_node)), false) = (iter.peek(), ignore_tail) {
+ match next_node {
+ Node::Block(Block { expr: None, .. }) => return None,
+ // The current node is not the tail expression of its parent.
+ Node::Block(Block { expr: Some(e), .. }) if hir_id != e.hir_id => return None,
+ _ => {}
+ }
+ }
+ match node {
+ Node::Item(_)
+ | Node::ForeignItem(_)
+ | Node::TraitItem(_)
+ | Node::Expr(Expr { kind: ExprKind::Closure { .. }, .. })
+ | Node::ImplItem(_) => return Some(hir_id),
+ // Ignore `return`s on the first iteration
+ Node::Expr(Expr { kind: ExprKind::Loop(..) | ExprKind::Ret(..), .. })
+ | Node::Local(_) => {
+ return None;
+ }
+ _ => {}
+ }
+ }
+ None
+ }
+
+ /// Retrieves the `HirId` for `id`'s parent item, or `id` itself if no
+ /// parent item is in this map. The "parent item" is the closest parent node
+ /// in the HIR which is recorded by the map and is an item, either an item
+ /// in a module, trait, or impl.
+ pub fn get_parent_item(self, hir_id: HirId) -> LocalDefId {
+ if let Some((def_id, _node)) = self.parent_owner_iter(hir_id).next() {
+ def_id
+ } else {
+ CRATE_DEF_ID
+ }
+ }
+
+ /// Returns the `HirId` of `id`'s nearest module parent, or `id` itself if no
+ /// module parent is in this map.
+ pub(super) fn get_module_parent_node(self, hir_id: HirId) -> LocalDefId {
+ for (def_id, node) in self.parent_owner_iter(hir_id) {
+ if let OwnerNode::Item(&Item { kind: ItemKind::Mod(_), .. }) = node {
+ return def_id;
+ }
+ }
+ CRATE_DEF_ID
+ }
+
+ /// When on an if expression, a match arm tail expression or a match arm, give back
+ /// the enclosing `if` or `match` expression.
+ ///
+ /// Used by error reporting when there's a type error in an if or match arm caused by the
+ /// expression needing to be unit.
+ pub fn get_if_cause(self, hir_id: HirId) -> Option<&'hir Expr<'hir>> {
+ for (_, node) in self.parent_iter(hir_id) {
+ match node {
+ Node::Item(_)
+ | Node::ForeignItem(_)
+ | Node::TraitItem(_)
+ | Node::ImplItem(_)
+ | Node::Stmt(Stmt { kind: StmtKind::Local(_), .. }) => break,
+ Node::Expr(expr @ Expr { kind: ExprKind::If(..) | ExprKind::Match(..), .. }) => {
+ return Some(expr);
+ }
+ _ => {}
+ }
+ }
+ None
+ }
+
+ /// Returns the nearest enclosing scope. A scope is roughly an item or block.
+ pub fn get_enclosing_scope(self, hir_id: HirId) -> Option<HirId> {
+ for (hir_id, node) in self.parent_iter(hir_id) {
+ if let Node::Item(Item {
+ kind:
+ ItemKind::Fn(..)
+ | ItemKind::Const(..)
+ | ItemKind::Static(..)
+ | ItemKind::Mod(..)
+ | ItemKind::Enum(..)
+ | ItemKind::Struct(..)
+ | ItemKind::Union(..)
+ | ItemKind::Trait(..)
+ | ItemKind::Impl { .. },
+ ..
+ })
+ | Node::ForeignItem(ForeignItem { kind: ForeignItemKind::Fn(..), .. })
+ | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(..), .. })
+ | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(..), .. })
+ | Node::Block(_) = node
+ {
+ return Some(hir_id);
+ }
+ }
+ None
+ }
+
+ /// Returns the defining scope for an opaque type definition.
+ pub fn get_defining_scope(self, id: HirId) -> HirId {
+ let mut scope = id;
+ loop {
+ scope = self.get_enclosing_scope(scope).unwrap_or(CRATE_HIR_ID);
+ if scope == CRATE_HIR_ID || !matches!(self.get(scope), Node::Block(_)) {
+ return scope;
+ }
+ }
+ }
+
+ pub fn get_foreign_abi(self, hir_id: HirId) -> Abi {
+ let parent = self.get_parent_item(hir_id);
+ if let Some(node) = self.tcx.hir_owner(parent) {
+ if let OwnerNode::Item(Item { kind: ItemKind::ForeignMod { abi, .. }, .. }) = node.node
+ {
+ return *abi;
+ }
+ }
+ bug!(
+ "expected foreign mod or inlined parent, found {}",
+ self.node_to_string(HirId::make_owner(parent))
+ )
+ }
+
+ pub fn expect_owner(self, id: LocalDefId) -> OwnerNode<'hir> {
+ self.tcx.hir_owner(id).unwrap_or_else(|| bug!("expected owner for {:?}", id)).node
+ }
+
+ pub fn expect_item(self, id: LocalDefId) -> &'hir Item<'hir> {
+ match self.tcx.hir_owner(id) {
+ Some(Owner { node: OwnerNode::Item(item), .. }) => item,
+ _ => bug!("expected item, found {}", self.node_to_string(HirId::make_owner(id))),
+ }
+ }
+
+ pub fn expect_impl_item(self, id: LocalDefId) -> &'hir ImplItem<'hir> {
+ match self.tcx.hir_owner(id) {
+ Some(Owner { node: OwnerNode::ImplItem(item), .. }) => item,
+ _ => bug!("expected impl item, found {}", self.node_to_string(HirId::make_owner(id))),
+ }
+ }
+
+ pub fn expect_trait_item(self, id: LocalDefId) -> &'hir TraitItem<'hir> {
+ match self.tcx.hir_owner(id) {
+ Some(Owner { node: OwnerNode::TraitItem(item), .. }) => item,
+ _ => bug!("expected trait item, found {}", self.node_to_string(HirId::make_owner(id))),
+ }
+ }
+
+ pub fn expect_variant(self, id: HirId) -> &'hir Variant<'hir> {
+ match self.find(id) {
+ Some(Node::Variant(variant)) => variant,
+ _ => bug!("expected variant, found {}", self.node_to_string(id)),
+ }
+ }
+
+ pub fn expect_foreign_item(self, id: LocalDefId) -> &'hir ForeignItem<'hir> {
+ match self.tcx.hir_owner(id) {
+ Some(Owner { node: OwnerNode::ForeignItem(item), .. }) => item,
+ _ => {
+ bug!("expected foreign item, found {}", self.node_to_string(HirId::make_owner(id)))
+ }
+ }
+ }
+
+ pub fn expect_expr(self, id: HirId) -> &'hir Expr<'hir> {
+ match self.find(id) {
+ Some(Node::Expr(expr)) => expr,
+ _ => bug!("expected expr, found {}", self.node_to_string(id)),
+ }
+ }
+
+ #[inline]
+ fn opt_ident(self, id: HirId) -> Option<Ident> {
+ match self.get(id) {
+ Node::Pat(&Pat { kind: PatKind::Binding(_, _, ident, _), .. }) => Some(ident),
+ // A `Ctor` doesn't have an identifier itself, but its parent
+ // struct/variant does. Compare with `hir::Map::opt_span`.
+ Node::Ctor(..) => match self.find(self.get_parent_node(id))? {
+ Node::Item(item) => Some(item.ident),
+ Node::Variant(variant) => Some(variant.ident),
+ _ => unreachable!(),
+ },
+ node => node.ident(),
+ }
+ }
+
+ #[inline]
+ pub(super) fn opt_ident_span(self, id: HirId) -> Option<Span> {
+ self.opt_ident(id).map(|ident| ident.span)
+ }
+
+ #[inline]
+ pub fn opt_name(self, id: HirId) -> Option<Symbol> {
+ self.opt_ident(id).map(|ident| ident.name)
+ }
+
+ pub fn name(self, id: HirId) -> Symbol {
+ self.opt_name(id).unwrap_or_else(|| bug!("no name for {}", self.node_to_string(id)))
+ }
+
+ /// Given a node ID, gets a list of attributes associated with the AST
+ /// corresponding to the node-ID.
+ pub fn attrs(self, id: HirId) -> &'hir [ast::Attribute] {
+ self.tcx.hir_attrs(id.owner).get(id.local_id)
+ }
+
+ /// Gets the span of the definition of the specified HIR node.
+ /// This is used by `tcx.def_span`.
+ pub fn span(self, hir_id: HirId) -> Span {
+ self.opt_span(hir_id)
+ .unwrap_or_else(|| bug!("hir::map::Map::span: id not in map: {:?}", hir_id))
+ }
+
+ pub fn opt_span(self, hir_id: HirId) -> Option<Span> {
+ fn until_within(outer: Span, end: Span) -> Span {
+ if let Some(end) = end.find_ancestor_inside(outer) {
+ outer.with_hi(end.hi())
+ } else {
+ outer
+ }
+ }
+
+ fn named_span(item_span: Span, ident: Ident, generics: Option<&Generics<'_>>) -> Span {
+ if ident.name != kw::Empty {
+ let mut span = until_within(item_span, ident.span);
+ if let Some(g) = generics
+ && !g.span.is_dummy()
+ && let Some(g_span) = g.span.find_ancestor_inside(item_span)
+ {
+ span = span.to(g_span);
+ }
+ span
+ } else {
+ item_span
+ }
+ }
+
+ let span = match self.find(hir_id)? {
+ // Function-like.
+ Node::Item(Item { kind: ItemKind::Fn(sig, ..), .. })
+ | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, ..), .. })
+ | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, ..), .. }) => sig.span,
+ // Constants and Statics.
+ Node::Item(Item {
+ kind:
+ ItemKind::Const(ty, ..)
+ | ItemKind::Static(ty, ..)
+ | ItemKind::Impl(Impl { self_ty: ty, .. }),
+ span: outer_span,
+ ..
+ })
+ | Node::TraitItem(TraitItem {
+ kind: TraitItemKind::Const(ty, ..),
+ span: outer_span,
+ ..
+ })
+ | Node::ImplItem(ImplItem {
+ kind: ImplItemKind::Const(ty, ..),
+ span: outer_span,
+ ..
+ })
+ | Node::ForeignItem(ForeignItem {
+ kind: ForeignItemKind::Static(ty, ..),
+ span: outer_span,
+ ..
+ }) => until_within(*outer_span, ty.span),
+ // With generics and bounds.
+ Node::Item(Item {
+ kind: ItemKind::Trait(_, _, generics, bounds, _),
+ span: outer_span,
+ ..
+ })
+ | Node::TraitItem(TraitItem {
+ kind: TraitItemKind::Type(bounds, _),
+ generics,
+ span: outer_span,
+ ..
+ }) => {
+ let end = if let Some(b) = bounds.last() { b.span() } else { generics.span };
+ until_within(*outer_span, end)
+ }
+ // Other cases.
+ Node::Item(item) => match &item.kind {
+ ItemKind::Use(path, _) => path.span,
+ _ => named_span(item.span, item.ident, item.kind.generics()),
+ },
+ Node::Variant(variant) => named_span(variant.span, variant.ident, None),
+ Node::ImplItem(item) => named_span(item.span, item.ident, Some(item.generics)),
+ Node::ForeignItem(item) => match item.kind {
+ ForeignItemKind::Fn(decl, _, _) => until_within(item.span, decl.output.span()),
+ _ => named_span(item.span, item.ident, None),
+ },
+ Node::Ctor(_) => return self.opt_span(self.get_parent_node(hir_id)),
+ Node::Expr(Expr { kind: ExprKind::Closure(Closure { fn_decl_span, .. }), .. }) => {
+ *fn_decl_span
+ }
+ _ => self.span_with_body(hir_id),
+ };
+ Some(span)
+ }
+
+ /// Like `hir.span()`, but includes the body of items
+ /// (instead of just the item header)
+ pub fn span_with_body(self, hir_id: HirId) -> Span {
+ match self.get(hir_id) {
+ Node::Param(param) => param.span,
+ Node::Item(item) => item.span,
+ Node::ForeignItem(foreign_item) => foreign_item.span,
+ Node::TraitItem(trait_item) => trait_item.span,
+ Node::ImplItem(impl_item) => impl_item.span,
+ Node::Variant(variant) => variant.span,
+ Node::Field(field) => field.span,
+ Node::AnonConst(constant) => self.body(constant.body).value.span,
+ Node::Expr(expr) => expr.span,
+ Node::Stmt(stmt) => stmt.span,
+ Node::PathSegment(seg) => {
+ let ident_span = seg.ident.span;
+ ident_span
+ .with_hi(seg.args.map_or_else(|| ident_span.hi(), |args| args.span_ext.hi()))
+ }
+ Node::Ty(ty) => ty.span,
+ Node::TypeBinding(tb) => tb.span,
+ Node::TraitRef(tr) => tr.path.span,
+ Node::Pat(pat) => pat.span,
+ Node::Arm(arm) => arm.span,
+ Node::Block(block) => block.span,
+ Node::Ctor(..) => self.span_with_body(self.get_parent_node(hir_id)),
+ Node::Lifetime(lifetime) => lifetime.span,
+ Node::GenericParam(param) => param.span,
+ Node::Infer(i) => i.span,
+ Node::Local(local) => local.span,
+ Node::Crate(item) => item.spans.inner_span,
+ }
+ }
+
+ pub fn span_if_local(self, id: DefId) -> Option<Span> {
+ if id.is_local() { Some(self.tcx.def_span(id)) } else { None }
+ }
+
+ pub fn res_span(self, res: Res) -> Option<Span> {
+ match res {
+ Res::Err => None,
+ Res::Local(id) => Some(self.span(id)),
+ res => self.span_if_local(res.opt_def_id()?),
+ }
+ }
+
+ /// Get a representation of this `id` for debugging purposes.
+ /// NOTE: Do NOT use this in diagnostics!
+ pub fn node_to_string(self, id: HirId) -> String {
+ hir_id_to_string(self, id)
+ }
+
+ /// Returns the HirId of `N` in `struct Foo<const N: usize = { ... }>` when
+ /// called with the HirId for the `{ ... }` anon const
+ pub fn opt_const_param_default_param_hir_id(self, anon_const: HirId) -> Option<HirId> {
+ match self.get(self.get_parent_node(anon_const)) {
+ Node::GenericParam(GenericParam {
+ hir_id: param_id,
+ kind: GenericParamKind::Const { .. },
+ ..
+ }) => Some(*param_id),
+ _ => None,
+ }
+ }
+}
+
+impl<'hir> intravisit::Map<'hir> for Map<'hir> {
+ fn find(&self, hir_id: HirId) -> Option<Node<'hir>> {
+ (*self).find(hir_id)
+ }
+
+ fn body(&self, id: BodyId) -> &'hir Body<'hir> {
+ (*self).body(id)
+ }
+
+ fn item(&self, id: ItemId) -> &'hir Item<'hir> {
+ (*self).item(id)
+ }
+
+ fn trait_item(&self, id: TraitItemId) -> &'hir TraitItem<'hir> {
+ (*self).trait_item(id)
+ }
+
+ fn impl_item(&self, id: ImplItemId) -> &'hir ImplItem<'hir> {
+ (*self).impl_item(id)
+ }
+
+ fn foreign_item(&self, id: ForeignItemId) -> &'hir ForeignItem<'hir> {
+ (*self).foreign_item(id)
+ }
+}
+
+pub(super) fn crate_hash(tcx: TyCtxt<'_>, crate_num: CrateNum) -> Svh {
+ debug_assert_eq!(crate_num, LOCAL_CRATE);
+ let krate = tcx.hir_crate(());
+ let hir_body_hash = krate.hir_hash;
+
+ let upstream_crates = upstream_crates(tcx);
+
+ let resolutions = tcx.resolutions(());
+
+ // We hash the final, remapped names of all local source files so we
+ // don't have to include the path prefix remapping commandline args.
+ // If we included the full mapping in the SVH, we could only have
+ // reproducible builds by compiling from the same directory. So we just
+ // hash the result of the mapping instead of the mapping itself.
+ let mut source_file_names: Vec<_> = tcx
+ .sess
+ .source_map()
+ .files()
+ .iter()
+ .filter(|source_file| source_file.cnum == LOCAL_CRATE)
+ .map(|source_file| source_file.name_hash)
+ .collect();
+
+ source_file_names.sort_unstable();
+
+ let crate_hash: Fingerprint = tcx.with_stable_hashing_context(|mut hcx| {
+ let mut stable_hasher = StableHasher::new();
+ hir_body_hash.hash_stable(&mut hcx, &mut stable_hasher);
+ upstream_crates.hash_stable(&mut hcx, &mut stable_hasher);
+ source_file_names.hash_stable(&mut hcx, &mut stable_hasher);
+ if tcx.sess.opts.unstable_opts.incremental_relative_spans {
+ let definitions = tcx.definitions_untracked();
+ let mut owner_spans: Vec<_> = krate
+ .owners
+ .iter_enumerated()
+ .filter_map(|(def_id, info)| {
+ let _ = info.as_owner()?;
+ let def_path_hash = definitions.def_path_hash(def_id);
+ let span = resolutions.source_span[def_id];
+ debug_assert_eq!(span.parent(), None);
+ Some((def_path_hash, span))
+ })
+ .collect();
+ owner_spans.sort_unstable_by_key(|bn| bn.0);
+ owner_spans.hash_stable(&mut hcx, &mut stable_hasher);
+ }
+ tcx.sess.opts.dep_tracking_hash(true).hash_stable(&mut hcx, &mut stable_hasher);
+ tcx.sess.local_stable_crate_id().hash_stable(&mut hcx, &mut stable_hasher);
+ // Hash visibility information since it does not appear in HIR.
+ resolutions.visibilities.hash_stable(&mut hcx, &mut stable_hasher);
+ resolutions.has_pub_restricted.hash_stable(&mut hcx, &mut stable_hasher);
+ stable_hasher.finish()
+ });
+
+ Svh::new(crate_hash.to_smaller_hash())
+}
+
+fn upstream_crates(tcx: TyCtxt<'_>) -> Vec<(StableCrateId, Svh)> {
+ let mut upstream_crates: Vec<_> = tcx
+ .crates(())
+ .iter()
+ .map(|&cnum| {
+ let stable_crate_id = tcx.stable_crate_id(cnum);
+ let hash = tcx.crate_hash(cnum);
+ (stable_crate_id, hash)
+ })
+ .collect();
+ upstream_crates.sort_unstable_by_key(|&(stable_crate_id, _)| stable_crate_id);
+ upstream_crates
+}
+
+fn hir_id_to_string(map: Map<'_>, id: HirId) -> String {
+ let id_str = format!(" (hir_id={})", id);
+
+ let path_str = || {
+ // This functionality is used for debugging, try to use `TyCtxt` to get
+ // the user-friendly path, otherwise fall back to stringifying `DefPath`.
+ crate::ty::tls::with_opt(|tcx| {
+ if let Some(tcx) = tcx {
+ let def_id = map.local_def_id(id);
+ tcx.def_path_str(def_id.to_def_id())
+ } else if let Some(path) = map.def_path_from_hir_id(id) {
+ path.data.into_iter().map(|elem| elem.to_string()).collect::<Vec<_>>().join("::")
+ } else {
+ String::from("<missing path>")
+ }
+ })
+ };
+
+ let span_str = || map.tcx.sess.source_map().span_to_snippet(map.span(id)).unwrap_or_default();
+ let node_str = |prefix| format!("{} {}{}", prefix, span_str(), id_str);
+
+ match map.find(id) {
+ Some(Node::Item(item)) => {
+ let item_str = match item.kind {
+ ItemKind::ExternCrate(..) => "extern crate",
+ ItemKind::Use(..) => "use",
+ ItemKind::Static(..) => "static",
+ ItemKind::Const(..) => "const",
+ ItemKind::Fn(..) => "fn",
+ ItemKind::Macro(..) => "macro",
+ ItemKind::Mod(..) => "mod",
+ ItemKind::ForeignMod { .. } => "foreign mod",
+ ItemKind::GlobalAsm(..) => "global asm",
+ ItemKind::TyAlias(..) => "ty",
+ ItemKind::OpaqueTy(..) => "opaque type",
+ ItemKind::Enum(..) => "enum",
+ ItemKind::Struct(..) => "struct",
+ ItemKind::Union(..) => "union",
+ ItemKind::Trait(..) => "trait",
+ ItemKind::TraitAlias(..) => "trait alias",
+ ItemKind::Impl { .. } => "impl",
+ };
+ format!("{} {}{}", item_str, path_str(), id_str)
+ }
+ Some(Node::ForeignItem(_)) => format!("foreign item {}{}", path_str(), id_str),
+ Some(Node::ImplItem(ii)) => match ii.kind {
+ ImplItemKind::Const(..) => {
+ format!("assoc const {} in {}{}", ii.ident, path_str(), id_str)
+ }
+ ImplItemKind::Fn(..) => format!("method {} in {}{}", ii.ident, path_str(), id_str),
+ ImplItemKind::TyAlias(_) => {
+ format!("assoc type {} in {}{}", ii.ident, path_str(), id_str)
+ }
+ },
+ Some(Node::TraitItem(ti)) => {
+ let kind = match ti.kind {
+ TraitItemKind::Const(..) => "assoc constant",
+ TraitItemKind::Fn(..) => "trait method",
+ TraitItemKind::Type(..) => "assoc type",
+ };
+
+ format!("{} {} in {}{}", kind, ti.ident, path_str(), id_str)
+ }
+ Some(Node::Variant(ref variant)) => {
+ format!("variant {} in {}{}", variant.ident, path_str(), id_str)
+ }
+ Some(Node::Field(ref field)) => {
+ format!("field {} in {}{}", field.ident, path_str(), id_str)
+ }
+ Some(Node::AnonConst(_)) => node_str("const"),
+ Some(Node::Expr(_)) => node_str("expr"),
+ Some(Node::Stmt(_)) => node_str("stmt"),
+ Some(Node::PathSegment(_)) => node_str("path segment"),
+ Some(Node::Ty(_)) => node_str("type"),
+ Some(Node::TypeBinding(_)) => node_str("type binding"),
+ Some(Node::TraitRef(_)) => node_str("trait ref"),
+ Some(Node::Pat(_)) => node_str("pat"),
+ Some(Node::Param(_)) => node_str("param"),
+ Some(Node::Arm(_)) => node_str("arm"),
+ Some(Node::Block(_)) => node_str("block"),
+ Some(Node::Infer(_)) => node_str("infer"),
+ Some(Node::Local(_)) => node_str("local"),
+ Some(Node::Ctor(..)) => format!("ctor {}{}", path_str(), id_str),
+ Some(Node::Lifetime(_)) => node_str("lifetime"),
+ Some(Node::GenericParam(ref param)) => format!("generic_param {:?}{}", param, id_str),
+ Some(Node::Crate(..)) => String::from("root_crate"),
+ None => format!("unknown node{}", id_str),
+ }
+}
+
+pub(super) fn hir_module_items(tcx: TyCtxt<'_>, module_id: LocalDefId) -> ModuleItems {
+ let mut collector = ItemCollector::new(tcx, false);
+
+ let (hir_mod, span, hir_id) = tcx.hir().get_module(module_id);
+ collector.visit_mod(hir_mod, span, hir_id);
+
+ let ItemCollector {
+ submodules,
+ items,
+ trait_items,
+ impl_items,
+ foreign_items,
+ body_owners,
+ ..
+ } = collector;
+ return ModuleItems {
+ submodules: submodules.into_boxed_slice(),
+ items: items.into_boxed_slice(),
+ trait_items: trait_items.into_boxed_slice(),
+ impl_items: impl_items.into_boxed_slice(),
+ foreign_items: foreign_items.into_boxed_slice(),
+ body_owners: body_owners.into_boxed_slice(),
+ };
+}
+
+pub(crate) fn hir_crate_items(tcx: TyCtxt<'_>, _: ()) -> ModuleItems {
+ let mut collector = ItemCollector::new(tcx, true);
+
+ // A "crate collector" and "module collector" start at a
+ // module item (the former starts at the crate root) but only
+ // the former needs to collect it. ItemCollector does not do this for us.
+ collector.submodules.push(CRATE_DEF_ID);
+ tcx.hir().walk_toplevel_module(&mut collector);
+
+ let ItemCollector {
+ submodules,
+ items,
+ trait_items,
+ impl_items,
+ foreign_items,
+ body_owners,
+ ..
+ } = collector;
+
+ return ModuleItems {
+ submodules: submodules.into_boxed_slice(),
+ items: items.into_boxed_slice(),
+ trait_items: trait_items.into_boxed_slice(),
+ impl_items: impl_items.into_boxed_slice(),
+ foreign_items: foreign_items.into_boxed_slice(),
+ body_owners: body_owners.into_boxed_slice(),
+ };
+}
+
+struct ItemCollector<'tcx> {
+ // When true, it collects all items in the create,
+ // otherwise it collects items in some module.
+ crate_collector: bool,
+ tcx: TyCtxt<'tcx>,
+ submodules: Vec<LocalDefId>,
+ items: Vec<ItemId>,
+ trait_items: Vec<TraitItemId>,
+ impl_items: Vec<ImplItemId>,
+ foreign_items: Vec<ForeignItemId>,
+ body_owners: Vec<LocalDefId>,
+}
+
+impl<'tcx> ItemCollector<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, crate_collector: bool) -> ItemCollector<'tcx> {
+ ItemCollector {
+ crate_collector,
+ tcx,
+ submodules: Vec::default(),
+ items: Vec::default(),
+ trait_items: Vec::default(),
+ impl_items: Vec::default(),
+ foreign_items: Vec::default(),
+ body_owners: Vec::default(),
+ }
+ }
+}
+
+impl<'hir> Visitor<'hir> for ItemCollector<'hir> {
+ type NestedFilter = nested_filter::All;
+
+ fn nested_visit_map(&mut self) -> Self::Map {
+ self.tcx.hir()
+ }
+
+ fn visit_item(&mut self, item: &'hir Item<'hir>) {
+ if associated_body(Node::Item(item)).is_some() {
+ self.body_owners.push(item.def_id);
+ }
+
+ self.items.push(item.item_id());
+
+ // Items that are modules are handled here instead of in visit_mod.
+ if let ItemKind::Mod(module) = &item.kind {
+ self.submodules.push(item.def_id);
+ // A module collector does not recurse inside nested modules.
+ if self.crate_collector {
+ intravisit::walk_mod(self, module, item.hir_id());
+ }
+ } else {
+ intravisit::walk_item(self, item)
+ }
+ }
+
+ fn visit_foreign_item(&mut self, item: &'hir ForeignItem<'hir>) {
+ self.foreign_items.push(item.foreign_item_id());
+ intravisit::walk_foreign_item(self, item)
+ }
+
+ fn visit_anon_const(&mut self, c: &'hir AnonConst) {
+ self.body_owners.push(self.tcx.hir().local_def_id(c.hir_id));
+ intravisit::walk_anon_const(self, c)
+ }
+
+ fn visit_expr(&mut self, ex: &'hir Expr<'hir>) {
+ if matches!(ex.kind, ExprKind::Closure { .. }) {
+ self.body_owners.push(self.tcx.hir().local_def_id(ex.hir_id));
+ }
+ intravisit::walk_expr(self, ex)
+ }
+
+ fn visit_trait_item(&mut self, item: &'hir TraitItem<'hir>) {
+ if associated_body(Node::TraitItem(item)).is_some() {
+ self.body_owners.push(item.def_id);
+ }
+
+ self.trait_items.push(item.trait_item_id());
+ intravisit::walk_trait_item(self, item)
+ }
+
+ fn visit_impl_item(&mut self, item: &'hir ImplItem<'hir>) {
+ if associated_body(Node::ImplItem(item)).is_some() {
+ self.body_owners.push(item.def_id);
+ }
+
+ self.impl_items.push(item.impl_item_id());
+ intravisit::walk_impl_item(self, item)
+ }
+}
diff --git a/compiler/rustc_middle/src/hir/mod.rs b/compiler/rustc_middle/src/hir/mod.rs
new file mode 100644
index 000000000..211a61471
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/mod.rs
@@ -0,0 +1,182 @@
+//! HIR datatypes. See the [rustc dev guide] for more info.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/hir.html
+
+pub mod map;
+pub mod nested_filter;
+pub mod place;
+
+use crate::ty::query::Providers;
+use crate::ty::{DefIdTree, ImplSubject, TyCtxt};
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::{par_for_each_in, Send, Sync};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::*;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_span::{ExpnId, DUMMY_SP};
+
+/// Top-level HIR node for current owner. This only contains the node for which
+/// `HirId::local_id == 0`, and excludes bodies.
+///
+/// This struct exists to encapsulate all access to the hir_owner query in this module, and to
+/// implement HashStable without hashing bodies.
+#[derive(Copy, Clone, Debug)]
+pub struct Owner<'tcx> {
+ node: OwnerNode<'tcx>,
+ hash_without_bodies: Fingerprint,
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Owner<'tcx> {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let Owner { node: _, hash_without_bodies } = self;
+ hash_without_bodies.hash_stable(hcx, hasher)
+ }
+}
+
+/// Gather the LocalDefId for each item-like within a module, including items contained within
+/// bodies. The Ids are in visitor order. This is used to partition a pass between modules.
+#[derive(Debug, HashStable, Encodable, Decodable)]
+pub struct ModuleItems {
+ submodules: Box<[LocalDefId]>,
+ items: Box<[ItemId]>,
+ trait_items: Box<[TraitItemId]>,
+ impl_items: Box<[ImplItemId]>,
+ foreign_items: Box<[ForeignItemId]>,
+ body_owners: Box<[LocalDefId]>,
+}
+
+impl ModuleItems {
+ pub fn items(&self) -> impl Iterator<Item = ItemId> + '_ {
+ self.items.iter().copied()
+ }
+
+ pub fn trait_items(&self) -> impl Iterator<Item = TraitItemId> + '_ {
+ self.trait_items.iter().copied()
+ }
+
+ pub fn impl_items(&self) -> impl Iterator<Item = ImplItemId> + '_ {
+ self.impl_items.iter().copied()
+ }
+
+ pub fn foreign_items(&self) -> impl Iterator<Item = ForeignItemId> + '_ {
+ self.foreign_items.iter().copied()
+ }
+
+ pub fn definitions(&self) -> impl Iterator<Item = LocalDefId> + '_ {
+ self.items
+ .iter()
+ .map(|id| id.def_id)
+ .chain(self.trait_items.iter().map(|id| id.def_id))
+ .chain(self.impl_items.iter().map(|id| id.def_id))
+ .chain(self.foreign_items.iter().map(|id| id.def_id))
+ }
+
+ pub fn par_items(&self, f: impl Fn(ItemId) + Send + Sync) {
+ par_for_each_in(&self.items[..], |&id| f(id))
+ }
+
+ pub fn par_trait_items(&self, f: impl Fn(TraitItemId) + Send + Sync) {
+ par_for_each_in(&self.trait_items[..], |&id| f(id))
+ }
+
+ pub fn par_impl_items(&self, f: impl Fn(ImplItemId) + Send + Sync) {
+ par_for_each_in(&self.impl_items[..], |&id| f(id))
+ }
+
+ pub fn par_foreign_items(&self, f: impl Fn(ForeignItemId) + Send + Sync) {
+ par_for_each_in(&self.foreign_items[..], |&id| f(id))
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ #[inline(always)]
+ pub fn hir(self) -> map::Map<'tcx> {
+ map::Map { tcx: self }
+ }
+
+ pub fn parent_module(self, id: HirId) -> LocalDefId {
+ self.parent_module_from_def_id(id.owner)
+ }
+
+ pub fn impl_subject(self, def_id: DefId) -> ImplSubject<'tcx> {
+ self.impl_trait_ref(def_id)
+ .map(ImplSubject::Trait)
+ .unwrap_or_else(|| ImplSubject::Inherent(self.type_of(def_id)))
+ }
+}
+
+pub fn provide(providers: &mut Providers) {
+ providers.parent_module_from_def_id = |tcx, id| {
+ let hir = tcx.hir();
+ hir.get_module_parent_node(hir.local_def_id_to_hir_id(id))
+ };
+ providers.hir_crate_items = map::hir_crate_items;
+ providers.crate_hash = map::crate_hash;
+ providers.hir_module_items = map::hir_module_items;
+ providers.hir_owner = |tcx, id| {
+ let owner = tcx.hir_crate(()).owners.get(id)?.as_owner()?;
+ let node = owner.node();
+ Some(Owner { node, hash_without_bodies: owner.nodes.hash_without_bodies })
+ };
+ providers.local_def_id_to_hir_id = |tcx, id| {
+ let owner = tcx.hir_crate(()).owners[id].map(|_| ());
+ match owner {
+ MaybeOwner::Owner(_) => HirId::make_owner(id),
+ MaybeOwner::Phantom => bug!("No HirId for {:?}", id),
+ MaybeOwner::NonOwner(hir_id) => hir_id,
+ }
+ };
+ providers.hir_owner_nodes = |tcx, id| tcx.hir_crate(()).owners[id].map(|i| &i.nodes);
+ providers.hir_owner_parent = |tcx, id| {
+ // Accessing the local_parent is ok since its value is hashed as part of `id`'s DefPathHash.
+ tcx.opt_local_parent(id).map_or(CRATE_HIR_ID, |parent| {
+ let mut parent_hir_id = tcx.hir().local_def_id_to_hir_id(parent);
+ if let Some(local_id) =
+ tcx.hir_crate(()).owners[parent_hir_id.owner].unwrap().parenting.get(&id)
+ {
+ parent_hir_id.local_id = *local_id;
+ }
+ parent_hir_id
+ })
+ };
+ providers.hir_attrs =
+ |tcx, id| tcx.hir_crate(()).owners[id].as_owner().map_or(AttributeMap::EMPTY, |o| &o.attrs);
+ providers.source_span =
+ |tcx, def_id| tcx.resolutions(()).source_span.get(def_id).copied().unwrap_or(DUMMY_SP);
+ providers.def_span = |tcx, def_id| {
+ let def_id = def_id.expect_local();
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ tcx.hir().opt_span(hir_id).unwrap_or(DUMMY_SP)
+ };
+ providers.def_ident_span = |tcx, def_id| {
+ let def_id = def_id.expect_local();
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ tcx.hir().opt_ident_span(hir_id)
+ };
+ providers.fn_arg_names = |tcx, id| {
+ let hir = tcx.hir();
+ let def_id = id.expect_local();
+ let hir_id = hir.local_def_id_to_hir_id(def_id);
+ if let Some(body_id) = hir.maybe_body_owned_by(def_id) {
+ tcx.arena.alloc_from_iter(hir.body_param_names(body_id))
+ } else if let Node::TraitItem(&TraitItem {
+ kind: TraitItemKind::Fn(_, TraitFn::Required(idents)),
+ ..
+ }) = hir.get(hir_id)
+ {
+ tcx.arena.alloc_slice(idents)
+ } else {
+ span_bug!(hir.span(hir_id), "fn_arg_names: unexpected item {:?}", id);
+ }
+ };
+ providers.opt_def_kind = |tcx, def_id| tcx.hir().opt_def_kind(def_id.expect_local());
+ providers.all_local_trait_impls = |tcx, ()| &tcx.resolutions(()).trait_impls;
+ providers.expn_that_defined = |tcx, id| {
+ let id = id.expect_local();
+ tcx.resolutions(()).expn_that_defined.get(&id).copied().unwrap_or(ExpnId::root())
+ };
+ providers.in_scope_traits_map =
+ |tcx, id| tcx.hir_crate(()).owners[id].as_owner().map(|owner_info| &owner_info.trait_map);
+}
diff --git a/compiler/rustc_middle/src/hir/nested_filter.rs b/compiler/rustc_middle/src/hir/nested_filter.rs
new file mode 100644
index 000000000..6896837aa
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/nested_filter.rs
@@ -0,0 +1,31 @@
+use rustc_hir::intravisit::nested_filter::NestedFilter;
+
+/// Do not visit nested item-like things, but visit nested things
+/// that are inside of an item-like.
+///
+/// Notably, possible occurrences of bodies in non-item-like things
+/// include: closures/generators, inline `const {}` blocks, and
+/// constant arguments of types, e.g. in `let _: [(); /* HERE */];`.
+///
+/// **This is the most common choice.** A very common pattern is
+/// to use `visit_all_item_likes_in_crate()` as an outer loop,
+/// and to have the visitor that visits the contents of each item
+/// using this setting.
+pub struct OnlyBodies(());
+impl<'hir> NestedFilter<'hir> for OnlyBodies {
+ type Map = crate::hir::map::Map<'hir>;
+ const INTER: bool = false;
+ const INTRA: bool = true;
+}
+
+/// Visits all nested things, including item-likes.
+///
+/// **This is an unusual choice.** It is used when you want to
+/// process everything within their lexical context. Typically you
+/// kick off the visit by doing `walk_krate()`.
+pub struct All(());
+impl<'hir> NestedFilter<'hir> for All {
+ type Map = crate::hir::map::Map<'hir>;
+ const INTER: bool = true;
+ const INTRA: bool = true;
+}
diff --git a/compiler/rustc_middle/src/hir/place.rs b/compiler/rustc_middle/src/hir/place.rs
new file mode 100644
index 000000000..83d3b0100
--- /dev/null
+++ b/compiler/rustc_middle/src/hir/place.rs
@@ -0,0 +1,117 @@
+use crate::ty;
+use crate::ty::Ty;
+
+use rustc_hir::HirId;
+use rustc_target::abi::VariantIdx;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum PlaceBase {
+ /// A temporary variable.
+ Rvalue,
+ /// A named `static` item.
+ StaticItem,
+ /// A named local variable.
+ Local(HirId),
+ /// An upvar referenced by closure env.
+ Upvar(ty::UpvarId),
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum ProjectionKind {
+ /// A dereference of a pointer, reference or `Box<T>` of the given type.
+ Deref,
+
+ /// `B.F` where `B` is the base expression and `F` is
+ /// the field. The field is identified by which variant
+ /// it appears in along with a field index. The variant
+ /// is used for enums.
+ Field(u32, VariantIdx),
+
+ /// Some index like `B[x]`, where `B` is the base
+ /// expression. We don't preserve the index `x` because
+ /// we won't need it.
+ Index,
+
+ /// A subslice covering a range of values like `B[x..y]`.
+ Subslice,
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct Projection<'tcx> {
+ /// Type after the projection is applied.
+ pub ty: Ty<'tcx>,
+
+ /// Defines the kind of access made by the projection.
+ pub kind: ProjectionKind,
+}
+
+/// A `Place` represents how a value is located in memory.
+///
+/// This is an HIR version of [`rustc_middle::mir::Place`].
+#[derive(Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct Place<'tcx> {
+ /// The type of the `PlaceBase`
+ pub base_ty: Ty<'tcx>,
+ /// The "outermost" place that holds this value.
+ pub base: PlaceBase,
+ /// How this place is derived from the base place.
+ pub projections: Vec<Projection<'tcx>>,
+}
+
+/// A `PlaceWithHirId` represents how a value is located in memory.
+///
+/// This is an HIR version of [`rustc_middle::mir::Place`].
+#[derive(Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct PlaceWithHirId<'tcx> {
+ /// `HirId` of the expression or pattern producing this value.
+ pub hir_id: HirId,
+
+ /// Information about the `Place`.
+ pub place: Place<'tcx>,
+}
+
+impl<'tcx> PlaceWithHirId<'tcx> {
+ pub fn new(
+ hir_id: HirId,
+ base_ty: Ty<'tcx>,
+ base: PlaceBase,
+ projections: Vec<Projection<'tcx>>,
+ ) -> PlaceWithHirId<'tcx> {
+ PlaceWithHirId { hir_id, place: Place { base_ty, base, projections } }
+ }
+}
+
+impl<'tcx> Place<'tcx> {
+ /// Returns an iterator of the types that have to be dereferenced to access
+ /// the `Place`.
+ ///
+ /// The types are in the reverse order that they are applied. So if
+ /// `x: &*const u32` and the `Place` is `**x`, then the types returned are
+ ///`*const u32` then `&*const u32`.
+ pub fn deref_tys(&self) -> impl Iterator<Item = Ty<'tcx>> + '_ {
+ self.projections.iter().enumerate().rev().filter_map(move |(index, proj)| {
+ if ProjectionKind::Deref == proj.kind {
+ Some(self.ty_before_projection(index))
+ } else {
+ None
+ }
+ })
+ }
+
+ /// Returns the type of this `Place` after all projections have been applied.
+ pub fn ty(&self) -> Ty<'tcx> {
+ self.projections.last().map_or(self.base_ty, |proj| proj.ty)
+ }
+
+ /// Returns the type of this `Place` immediately before `projection_index`th projection
+ /// is applied.
+ pub fn ty_before_projection(&self, projection_index: usize) -> Ty<'tcx> {
+ assert!(projection_index < self.projections.len());
+ if projection_index == 0 { self.base_ty } else { self.projections[projection_index - 1].ty }
+ }
+}
diff --git a/compiler/rustc_middle/src/infer/canonical.rs b/compiler/rustc_middle/src/infer/canonical.rs
new file mode 100644
index 000000000..200de9079
--- /dev/null
+++ b/compiler/rustc_middle/src/infer/canonical.rs
@@ -0,0 +1,363 @@
+//! **Canonicalization** is the key to constructing a query in the
+//! middle of type inference. Ordinarily, it is not possible to store
+//! types from type inference in query keys, because they contain
+//! references to inference variables whose lifetimes are too short
+//! and so forth. Canonicalizing a value T1 using `canonicalize_query`
+//! produces two things:
+//!
+//! - a value T2 where each unbound inference variable has been
+//! replaced with a **canonical variable**;
+//! - a map M (of type `CanonicalVarValues`) from those canonical
+//! variables back to the original.
+//!
+//! We can then do queries using T2. These will give back constraints
+//! on the canonical variables which can be translated, using the map
+//! M, into constraints in our source context. This process of
+//! translating the results back is done by the
+//! `instantiate_query_result` method.
+//!
+//! For a more detailed look at what is happening here, check
+//! out the [chapter in the rustc dev guide][c].
+//!
+//! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html
+
+use crate::infer::MemberConstraint;
+use crate::ty::subst::GenericArg;
+use crate::ty::{self, BoundVar, List, Region, Ty, TyCtxt};
+use rustc_index::vec::IndexVec;
+use rustc_macros::HashStable;
+use smallvec::SmallVec;
+use std::iter;
+use std::ops::Index;
+
+/// A "canonicalized" type `V` is one where all free inference
+/// variables have been rewritten to "canonical vars". These are
+/// numbered starting from 0 in order of first appearance.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct Canonical<'tcx, V> {
+ pub max_universe: ty::UniverseIndex,
+ pub variables: CanonicalVarInfos<'tcx>,
+ pub value: V,
+}
+
+pub type CanonicalVarInfos<'tcx> = &'tcx List<CanonicalVarInfo<'tcx>>;
+
+/// A set of values corresponding to the canonical variables from some
+/// `Canonical`. You can give these values to
+/// `canonical_value.substitute` to substitute them into the canonical
+/// value at the right places.
+///
+/// When you canonicalize a value `V`, you get back one of these
+/// vectors with the original values that were replaced by canonical
+/// variables. You will need to supply it later to instantiate the
+/// canonicalized query response.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct CanonicalVarValues<'tcx> {
+ pub var_values: IndexVec<BoundVar, GenericArg<'tcx>>,
+}
+
+/// When we canonicalize a value to form a query, we wind up replacing
+/// various parts of it with canonical variables. This struct stores
+/// those replaced bits to remember for when we process the query
+/// result.
+#[derive(Clone, Debug)]
+pub struct OriginalQueryValues<'tcx> {
+ /// Map from the universes that appear in the query to the universes in the
+ /// caller context. For all queries except `evaluate_goal` (used by Chalk),
+ /// we only ever put ROOT values into the query, so this map is very
+ /// simple.
+ pub universe_map: SmallVec<[ty::UniverseIndex; 4]>,
+
+ /// This is equivalent to `CanonicalVarValues`, but using a
+ /// `SmallVec` yields a significant performance win.
+ pub var_values: SmallVec<[GenericArg<'tcx>; 8]>,
+}
+
+impl<'tcx> Default for OriginalQueryValues<'tcx> {
+ fn default() -> Self {
+ let mut universe_map = SmallVec::default();
+ universe_map.push(ty::UniverseIndex::ROOT);
+
+ Self { universe_map, var_values: SmallVec::default() }
+ }
+}
+
+/// Information about a canonical variable that is included with the
+/// canonical value. This is sufficient information for code to create
+/// a copy of the canonical value in some other inference context,
+/// with fresh inference variables replacing the canonical values.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
+pub struct CanonicalVarInfo<'tcx> {
+ pub kind: CanonicalVarKind<'tcx>,
+}
+
+impl<'tcx> CanonicalVarInfo<'tcx> {
+ pub fn universe(&self) -> ty::UniverseIndex {
+ self.kind.universe()
+ }
+
+ pub fn is_existential(&self) -> bool {
+ match self.kind {
+ CanonicalVarKind::Ty(_) => true,
+ CanonicalVarKind::PlaceholderTy(_) => false,
+ CanonicalVarKind::Region(_) => true,
+ CanonicalVarKind::PlaceholderRegion(..) => false,
+ CanonicalVarKind::Const(..) => true,
+ CanonicalVarKind::PlaceholderConst(_, _) => false,
+ }
+ }
+}
+
+/// Describes the "kind" of the canonical variable. This is a "kind"
+/// in the type-theory sense of the term -- i.e., a "meta" type system
+/// that analyzes type-like values.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
+pub enum CanonicalVarKind<'tcx> {
+ /// Some kind of type inference variable.
+ Ty(CanonicalTyVarKind),
+
+ /// A "placeholder" that represents "any type".
+ PlaceholderTy(ty::PlaceholderType),
+
+ /// Region variable `'?R`.
+ Region(ty::UniverseIndex),
+
+ /// A "placeholder" that represents "any region". Created when you
+ /// are solving a goal like `for<'a> T: Foo<'a>` to represent the
+ /// bound region `'a`.
+ PlaceholderRegion(ty::PlaceholderRegion),
+
+ /// Some kind of const inference variable.
+ Const(ty::UniverseIndex, Ty<'tcx>),
+
+ /// A "placeholder" that represents "any const".
+ PlaceholderConst(ty::PlaceholderConst<'tcx>, Ty<'tcx>),
+}
+
+impl<'tcx> CanonicalVarKind<'tcx> {
+ pub fn universe(self) -> ty::UniverseIndex {
+ match self {
+ CanonicalVarKind::Ty(kind) => match kind {
+ CanonicalTyVarKind::General(ui) => ui,
+ CanonicalTyVarKind::Float | CanonicalTyVarKind::Int => ty::UniverseIndex::ROOT,
+ },
+
+ CanonicalVarKind::PlaceholderTy(placeholder) => placeholder.universe,
+ CanonicalVarKind::Region(ui) => ui,
+ CanonicalVarKind::PlaceholderRegion(placeholder) => placeholder.universe,
+ CanonicalVarKind::Const(ui, _) => ui,
+ CanonicalVarKind::PlaceholderConst(placeholder, _) => placeholder.universe,
+ }
+ }
+}
+
+/// Rust actually has more than one category of type variables;
+/// notably, the type variables we create for literals (e.g., 22 or
+/// 22.) can only be instantiated with integral/float types (e.g.,
+/// usize or f32). In order to faithfully reproduce a type, we need to
+/// know what set of types a given type variable can be unified with.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)]
+pub enum CanonicalTyVarKind {
+ /// General type variable `?T` that can be unified with arbitrary types.
+ General(ty::UniverseIndex),
+
+ /// Integral type variable `?I` (that can only be unified with integral types).
+ Int,
+
+ /// Floating-point type variable `?F` (that can only be unified with float types).
+ Float,
+}
+
+/// After we execute a query with a canonicalized key, we get back a
+/// `Canonical<QueryResponse<..>>`. You can use
+/// `instantiate_query_result` to access the data in this result.
+#[derive(Clone, Debug, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct QueryResponse<'tcx, R> {
+ pub var_values: CanonicalVarValues<'tcx>,
+ pub region_constraints: QueryRegionConstraints<'tcx>,
+ pub certainty: Certainty,
+ /// List of opaque types which we tried to compare to another type.
+ /// Inside the query we don't know yet whether the opaque type actually
+ /// should get its hidden type inferred. So we bubble the opaque type
+ /// and the type it was compared against upwards and let the query caller
+ /// handle it.
+ pub opaque_types: Vec<(Ty<'tcx>, Ty<'tcx>)>,
+ pub value: R,
+}
+
+#[derive(Clone, Debug, Default, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct QueryRegionConstraints<'tcx> {
+ pub outlives: Vec<QueryOutlivesConstraint<'tcx>>,
+ pub member_constraints: Vec<MemberConstraint<'tcx>>,
+}
+
+impl QueryRegionConstraints<'_> {
+ /// Represents an empty (trivially true) set of region
+ /// constraints.
+ pub fn is_empty(&self) -> bool {
+ self.outlives.is_empty() && self.member_constraints.is_empty()
+ }
+}
+
+pub type Canonicalized<'tcx, V> = Canonical<'tcx, V>;
+
+pub type CanonicalizedQueryResponse<'tcx, T> = &'tcx Canonical<'tcx, QueryResponse<'tcx, T>>;
+
+/// Indicates whether or not we were able to prove the query to be
+/// true.
+#[derive(Copy, Clone, Debug, HashStable)]
+pub enum Certainty {
+ /// The query is known to be true, presuming that you apply the
+ /// given `var_values` and the region-constraints are satisfied.
+ Proven,
+
+ /// The query is not known to be true, but also not known to be
+ /// false. The `var_values` represent *either* values that must
+ /// hold in order for the query to be true, or helpful tips that
+ /// *might* make it true. Currently rustc's trait solver cannot
+ /// distinguish the two (e.g., due to our preference for where
+ /// clauses over impls).
+ ///
+ /// After some unification and things have been done, it makes
+ /// sense to try and prove again -- of course, at that point, the
+ /// canonical form will be different, making this a distinct
+ /// query.
+ Ambiguous,
+}
+
+impl Certainty {
+ pub fn is_proven(&self) -> bool {
+ match self {
+ Certainty::Proven => true,
+ Certainty::Ambiguous => false,
+ }
+ }
+}
+
+impl<'tcx, R> QueryResponse<'tcx, R> {
+ pub fn is_proven(&self) -> bool {
+ self.certainty.is_proven()
+ }
+}
+
+impl<'tcx, R> Canonical<'tcx, QueryResponse<'tcx, R>> {
+ pub fn is_proven(&self) -> bool {
+ self.value.is_proven()
+ }
+
+ pub fn is_ambiguous(&self) -> bool {
+ !self.is_proven()
+ }
+}
+
+impl<'tcx, R> Canonical<'tcx, ty::ParamEnvAnd<'tcx, R>> {
+ #[inline]
+ pub fn without_const(mut self) -> Self {
+ self.value = self.value.without_const();
+ self
+ }
+}
+
+impl<'tcx, V> Canonical<'tcx, V> {
+ /// Allows you to map the `value` of a canonical while keeping the
+ /// same set of bound variables.
+ ///
+ /// **WARNING:** This function is very easy to mis-use, hence the
+ /// name! In particular, the new value `W` must use all **the
+ /// same type/region variables** in **precisely the same order**
+ /// as the original! (The ordering is defined by the
+ /// `TypeFoldable` implementation of the type in question.)
+ ///
+ /// An example of a **correct** use of this:
+ ///
+ /// ```rust,ignore (not real code)
+ /// let a: Canonical<'_, T> = ...;
+ /// let b: Canonical<'_, (T,)> = a.unchecked_map(|v| (v, ));
+ /// ```
+ ///
+ /// An example of an **incorrect** use of this:
+ ///
+ /// ```rust,ignore (not real code)
+ /// let a: Canonical<'tcx, T> = ...;
+ /// let ty: Ty<'tcx> = ...;
+ /// let b: Canonical<'tcx, (T, Ty<'tcx>)> = a.unchecked_map(|v| (v, ty));
+ /// ```
+ pub fn unchecked_map<W>(self, map_op: impl FnOnce(V) -> W) -> Canonical<'tcx, W> {
+ let Canonical { max_universe, variables, value } = self;
+ Canonical { max_universe, variables, value: map_op(value) }
+ }
+}
+
+pub type QueryOutlivesConstraint<'tcx> =
+ ty::Binder<'tcx, ty::OutlivesPredicate<GenericArg<'tcx>, Region<'tcx>>>;
+
+TrivialTypeTraversalAndLiftImpls! {
+ for <'tcx> {
+ crate::infer::canonical::Certainty,
+ crate::infer::canonical::CanonicalVarInfo<'tcx>,
+ crate::infer::canonical::CanonicalVarKind<'tcx>,
+ }
+}
+
+TrivialTypeTraversalImpls! {
+ for <'tcx> {
+ crate::infer::canonical::CanonicalVarInfos<'tcx>,
+ }
+}
+
+impl<'tcx> CanonicalVarValues<'tcx> {
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.var_values.len()
+ }
+
+ /// Makes an identity substitution from this one: each bound var
+ /// is matched to the same bound var, preserving the original kinds.
+ /// For example, if we have:
+ /// `self.var_values == [Type(u32), Lifetime('a), Type(u64)]`
+ /// we'll return a substitution `subst` with:
+ /// `subst.var_values == [Type(^0), Lifetime(^1), Type(^2)]`.
+ pub fn make_identity(&self, tcx: TyCtxt<'tcx>) -> Self {
+ use crate::ty::subst::GenericArgKind;
+
+ CanonicalVarValues {
+ var_values: iter::zip(&self.var_values, 0..)
+ .map(|(kind, i)| match kind.unpack() {
+ GenericArgKind::Type(..) => {
+ tcx.mk_ty(ty::Bound(ty::INNERMOST, ty::BoundVar::from_u32(i).into())).into()
+ }
+ GenericArgKind::Lifetime(..) => {
+ let br =
+ ty::BoundRegion { var: ty::BoundVar::from_u32(i), kind: ty::BrAnon(i) };
+ tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br)).into()
+ }
+ GenericArgKind::Const(ct) => tcx
+ .mk_const(ty::ConstS {
+ ty: ct.ty(),
+ kind: ty::ConstKind::Bound(ty::INNERMOST, ty::BoundVar::from_u32(i)),
+ })
+ .into(),
+ })
+ .collect(),
+ }
+ }
+}
+
+impl<'a, 'tcx> IntoIterator for &'a CanonicalVarValues<'tcx> {
+ type Item = GenericArg<'tcx>;
+ type IntoIter = ::std::iter::Cloned<::std::slice::Iter<'a, GenericArg<'tcx>>>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.var_values.iter().cloned()
+ }
+}
+
+impl<'tcx> Index<BoundVar> for CanonicalVarValues<'tcx> {
+ type Output = GenericArg<'tcx>;
+
+ fn index(&self, value: BoundVar) -> &GenericArg<'tcx> {
+ &self.var_values[value]
+ }
+}
diff --git a/compiler/rustc_middle/src/infer/mod.rs b/compiler/rustc_middle/src/infer/mod.rs
new file mode 100644
index 000000000..38868c210
--- /dev/null
+++ b/compiler/rustc_middle/src/infer/mod.rs
@@ -0,0 +1,32 @@
+pub mod canonical;
+pub mod unify_key;
+
+use crate::ty::Region;
+use crate::ty::{OpaqueTypeKey, Ty};
+use rustc_data_structures::sync::Lrc;
+use rustc_span::Span;
+
+/// Requires that `region` must be equal to one of the regions in `choice_regions`.
+/// We often denote this using the syntax:
+///
+/// ```text
+/// R0 member of [O1..On]
+/// ```
+#[derive(Debug, Clone, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct MemberConstraint<'tcx> {
+ /// The `DefId` and substs of the opaque type causing this constraint.
+ /// Used for error reporting.
+ pub key: OpaqueTypeKey<'tcx>,
+
+ /// The span where the hidden type was instantiated.
+ pub definition_span: Span,
+
+ /// The hidden type in which `member_region` appears: used for error reporting.
+ pub hidden_ty: Ty<'tcx>,
+
+ /// The region `R0`.
+ pub member_region: Region<'tcx>,
+
+ /// The options `O1..On`.
+ pub choice_regions: Lrc<Vec<Region<'tcx>>>,
+}
diff --git a/compiler/rustc_middle/src/infer/unify_key.rs b/compiler/rustc_middle/src/infer/unify_key.rs
new file mode 100644
index 000000000..f2627885d
--- /dev/null
+++ b/compiler/rustc_middle/src/infer/unify_key.rs
@@ -0,0 +1,162 @@
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_data_structures::unify::{NoError, UnifyKey, UnifyValue};
+use rustc_span::def_id::DefId;
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+use std::cmp;
+use std::marker::PhantomData;
+
+pub trait ToType {
+ fn to_type<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
+}
+
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub struct UnifiedRegion<'tcx>(pub Option<ty::Region<'tcx>>);
+
+#[derive(PartialEq, Copy, Clone, Debug)]
+pub struct RegionVidKey<'tcx> {
+ pub vid: ty::RegionVid,
+ pub phantom: PhantomData<UnifiedRegion<'tcx>>,
+}
+
+impl<'tcx> From<ty::RegionVid> for RegionVidKey<'tcx> {
+ fn from(vid: ty::RegionVid) -> Self {
+ RegionVidKey { vid, phantom: PhantomData }
+ }
+}
+
+impl<'tcx> UnifyKey for RegionVidKey<'tcx> {
+ type Value = UnifiedRegion<'tcx>;
+ #[inline]
+ fn index(&self) -> u32 {
+ self.vid.as_u32()
+ }
+ #[inline]
+ fn from_index(i: u32) -> Self {
+ RegionVidKey::from(ty::RegionVid::from_u32(i))
+ }
+ fn tag() -> &'static str {
+ "RegionVidKey"
+ }
+}
+
+impl<'tcx> UnifyValue for UnifiedRegion<'tcx> {
+ type Error = NoError;
+
+ fn unify_values(value1: &Self, value2: &Self) -> Result<Self, NoError> {
+ Ok(match (value1.0, value2.0) {
+ // Here we can just pick one value, because the full constraints graph
+ // will be handled later. Ideally, we might want a `MultipleValues`
+ // variant or something. For now though, this is fine.
+ (Some(_), Some(_)) => *value1,
+
+ (Some(_), _) => *value1,
+ (_, Some(_)) => *value2,
+
+ (None, None) => *value1,
+ })
+ }
+}
+
+impl ToType for ty::IntVarValue {
+ fn to_type<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match *self {
+ ty::IntType(i) => tcx.mk_mach_int(i),
+ ty::UintType(i) => tcx.mk_mach_uint(i),
+ }
+ }
+}
+
+impl ToType for ty::FloatVarValue {
+ fn to_type<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ tcx.mk_mach_float(self.0)
+ }
+}
+
+// Generic consts.
+
+#[derive(Copy, Clone, Debug)]
+pub struct ConstVariableOrigin {
+ pub kind: ConstVariableOriginKind,
+ pub span: Span,
+}
+
+/// Reasons to create a const inference variable
+#[derive(Copy, Clone, Debug)]
+pub enum ConstVariableOriginKind {
+ MiscVariable,
+ ConstInference,
+ ConstParameterDefinition(Symbol, DefId),
+ SubstitutionPlaceholder,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum ConstVariableValue<'tcx> {
+ Known { value: ty::Const<'tcx> },
+ Unknown { universe: ty::UniverseIndex },
+}
+
+impl<'tcx> ConstVariableValue<'tcx> {
+ /// If this value is known, returns the const it is known to be.
+ /// Otherwise, `None`.
+ pub fn known(&self) -> Option<ty::Const<'tcx>> {
+ match *self {
+ ConstVariableValue::Unknown { .. } => None,
+ ConstVariableValue::Known { value } => Some(value),
+ }
+ }
+}
+
+#[derive(Copy, Clone, Debug)]
+pub struct ConstVarValue<'tcx> {
+ pub origin: ConstVariableOrigin,
+ pub val: ConstVariableValue<'tcx>,
+}
+
+impl<'tcx> UnifyKey for ty::ConstVid<'tcx> {
+ type Value = ConstVarValue<'tcx>;
+ #[inline]
+ fn index(&self) -> u32 {
+ self.index
+ }
+ #[inline]
+ fn from_index(i: u32) -> Self {
+ ty::ConstVid { index: i, phantom: PhantomData }
+ }
+ fn tag() -> &'static str {
+ "ConstVid"
+ }
+}
+
+impl<'tcx> UnifyValue for ConstVarValue<'tcx> {
+ type Error = (ty::Const<'tcx>, ty::Const<'tcx>);
+
+ fn unify_values(&value1: &Self, &value2: &Self) -> Result<Self, Self::Error> {
+ Ok(match (value1.val, value2.val) {
+ (ConstVariableValue::Known { .. }, ConstVariableValue::Known { .. }) => {
+ bug!("equating two const variables, both of which have known values")
+ }
+
+ // If one side is known, prefer that one.
+ (ConstVariableValue::Known { .. }, ConstVariableValue::Unknown { .. }) => value1,
+ (ConstVariableValue::Unknown { .. }, ConstVariableValue::Known { .. }) => value2,
+
+ // If both sides are *unknown*, it hardly matters, does it?
+ (
+ ConstVariableValue::Unknown { universe: universe1 },
+ ConstVariableValue::Unknown { universe: universe2 },
+ ) => {
+ // If we unify two unbound variables, ?T and ?U, then whatever
+ // value they wind up taking (which must be the same value) must
+ // be nameable by both universes. Therefore, the resulting
+ // universe is the minimum of the two universes, because that is
+ // the one which contains the fewest names in scope.
+ let universe = cmp::min(universe1, universe2);
+ ConstVarValue {
+ val: ConstVariableValue::Unknown { universe },
+ origin: value1.origin,
+ }
+ }
+ })
+ }
+}
diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs
new file mode 100644
index 000000000..ef06c457b
--- /dev/null
+++ b/compiler/rustc_middle/src/lib.rs
@@ -0,0 +1,106 @@
+//! The "main crate" of the Rust compiler. This crate contains common
+//! type definitions that are used by the other crates in the rustc
+//! "family". Some prominent examples (note that each of these modules
+//! has their own README with further details).
+//!
+//! - **HIR.** The "high-level (H) intermediate representation (IR)" is
+//! defined in the `hir` module.
+//! - **MIR.** The "mid-level (M) intermediate representation (IR)" is
+//! defined in the `mir` module. This module contains only the
+//! *definition* of the MIR; the passes that transform and operate
+//! on MIR are found in `rustc_const_eval` crate.
+//! - **Types.** The internal representation of types used in rustc is
+//! defined in the `ty` module. This includes the **type context**
+//! (or `tcx`), which is the central context during most of
+//! compilation, containing the interners and other things.
+//!
+//! For more information about how rustc works, see the [rustc dev guide].
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/
+//!
+//! # Note
+//!
+//! This API is completely unstable and subject to change.
+
+#![doc(html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/")]
+#![feature(allocator_api)]
+#![feature(array_windows)]
+#![feature(assert_matches)]
+#![feature(backtrace)]
+#![feature(box_patterns)]
+#![feature(core_intrinsics)]
+#![feature(discriminant_kind)]
+#![feature(exhaustive_patterns)]
+#![feature(get_mut_unchecked)]
+#![feature(generic_associated_types)]
+#![feature(if_let_guard)]
+#![feature(map_first_last)]
+#![feature(negative_impls)]
+#![feature(never_type)]
+#![feature(extern_types)]
+#![feature(new_uninit)]
+#![feature(once_cell)]
+#![feature(let_chains)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(trusted_len)]
+#![feature(type_alias_impl_trait)]
+#![feature(associated_type_bounds)]
+#![feature(rustc_attrs)]
+#![feature(half_open_range_patterns)]
+#![feature(control_flow_enum)]
+#![feature(associated_type_defaults)]
+#![feature(trusted_step)]
+#![feature(try_blocks)]
+#![feature(try_reserve_kind)]
+#![feature(nonzero_ops)]
+#![feature(unwrap_infallible)]
+#![feature(decl_macro)]
+#![feature(drain_filter)]
+#![feature(intra_doc_pointers)]
+#![feature(yeet_expr)]
+#![feature(const_option)]
+#![recursion_limit = "512"]
+#![allow(rustc::potential_query_instability)]
+
+#[macro_use]
+extern crate bitflags;
+#[macro_use]
+extern crate rustc_macros;
+#[macro_use]
+extern crate rustc_data_structures;
+#[macro_use]
+extern crate tracing;
+#[macro_use]
+extern crate smallvec;
+
+#[cfg(test)]
+mod tests;
+
+#[macro_use]
+mod macros;
+
+#[macro_use]
+pub mod query;
+
+#[macro_use]
+pub mod arena;
+#[macro_use]
+pub mod dep_graph;
+pub mod hir;
+pub mod infer;
+pub mod lint;
+pub mod metadata;
+pub mod middle;
+pub mod mir;
+pub mod thir;
+pub mod traits;
+pub mod ty;
+
+pub mod util {
+ pub mod bug;
+ pub mod common;
+}
+
+// Allows macros to refer to this crate as `::rustc_middle`
+extern crate self as rustc_middle;
diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs
new file mode 100644
index 000000000..2f45222de
--- /dev/null
+++ b/compiler/rustc_middle/src/lint.rs
@@ -0,0 +1,443 @@
+use std::cmp;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_errors::{Diagnostic, DiagnosticId, LintDiagnosticBuilder, MultiSpan};
+use rustc_hir::HirId;
+use rustc_index::vec::IndexVec;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_session::lint::{
+ builtin::{self, FORBIDDEN_LINT_GROUPS},
+ FutureIncompatibilityReason, Level, Lint, LintExpectationId, LintId,
+};
+use rustc_session::Session;
+use rustc_span::hygiene::MacroKind;
+use rustc_span::source_map::{DesugaringKind, ExpnKind};
+use rustc_span::{symbol, Span, Symbol, DUMMY_SP};
+
+/// How a lint level was set.
+#[derive(Clone, Copy, PartialEq, Eq, HashStable, Debug)]
+pub enum LintLevelSource {
+ /// Lint is at the default level as declared
+ /// in rustc or a plugin.
+ Default,
+
+ /// Lint level was set by an attribute.
+ Node(Symbol, Span, Option<Symbol> /* RFC 2383 reason */),
+
+ /// Lint level was set by a command-line flag.
+ /// The provided `Level` is the level specified on the command line.
+ /// (The actual level may be lower due to `--cap-lints`.)
+ CommandLine(Symbol, Level),
+}
+
+impl LintLevelSource {
+ pub fn name(&self) -> Symbol {
+ match *self {
+ LintLevelSource::Default => symbol::kw::Default,
+ LintLevelSource::Node(name, _, _) => name,
+ LintLevelSource::CommandLine(name, _) => name,
+ }
+ }
+
+ pub fn span(&self) -> Span {
+ match *self {
+ LintLevelSource::Default => DUMMY_SP,
+ LintLevelSource::Node(_, span, _) => span,
+ LintLevelSource::CommandLine(_, _) => DUMMY_SP,
+ }
+ }
+}
+
+/// A tuple of a lint level and its source.
+pub type LevelAndSource = (Level, LintLevelSource);
+
+#[derive(Debug, HashStable)]
+pub struct LintLevelSets {
+ pub list: IndexVec<LintStackIndex, LintSet>,
+ pub lint_cap: Level,
+}
+
+rustc_index::newtype_index! {
+ #[derive(HashStable)]
+ pub struct LintStackIndex {
+ const COMMAND_LINE = 0,
+ }
+}
+
+#[derive(Debug, HashStable)]
+pub struct LintSet {
+ // -A,-W,-D flags, a `Symbol` for the flag itself and `Level` for which
+ // flag.
+ pub specs: FxHashMap<LintId, LevelAndSource>,
+
+ pub parent: LintStackIndex,
+}
+
+impl LintLevelSets {
+ pub fn new() -> Self {
+ LintLevelSets { list: IndexVec::new(), lint_cap: Level::Forbid }
+ }
+
+ pub fn get_lint_level(
+ &self,
+ lint: &'static Lint,
+ idx: LintStackIndex,
+ aux: Option<&FxHashMap<LintId, LevelAndSource>>,
+ sess: &Session,
+ ) -> LevelAndSource {
+ let (level, mut src) = self.get_lint_id_level(LintId::of(lint), idx, aux);
+
+ // If `level` is none then we actually assume the default level for this
+ // lint.
+ let mut level = level.unwrap_or_else(|| lint.default_level(sess.edition()));
+
+ // If we're about to issue a warning, check at the last minute for any
+ // directives against the warnings "lint". If, for example, there's an
+ // `allow(warnings)` in scope then we want to respect that instead.
+ //
+ // We exempt `FORBIDDEN_LINT_GROUPS` from this because it specifically
+ // triggers in cases (like #80988) where you have `forbid(warnings)`,
+ // and so if we turned that into an error, it'd defeat the purpose of the
+ // future compatibility warning.
+ if level == Level::Warn && LintId::of(lint) != LintId::of(FORBIDDEN_LINT_GROUPS) {
+ let (warnings_level, warnings_src) =
+ self.get_lint_id_level(LintId::of(builtin::WARNINGS), idx, aux);
+ if let Some(configured_warning_level) = warnings_level {
+ if configured_warning_level != Level::Warn {
+ level = configured_warning_level;
+ src = warnings_src;
+ }
+ }
+ }
+
+ // Ensure that we never exceed the `--cap-lints` argument
+ // unless the source is a --force-warn
+ level = if let LintLevelSource::CommandLine(_, Level::ForceWarn(_)) = src {
+ level
+ } else {
+ cmp::min(level, self.lint_cap)
+ };
+
+ if let Some(driver_level) = sess.driver_lint_caps.get(&LintId::of(lint)) {
+ // Ensure that we never exceed driver level.
+ level = cmp::min(*driver_level, level);
+ }
+
+ (level, src)
+ }
+
+ pub fn get_lint_id_level(
+ &self,
+ id: LintId,
+ mut idx: LintStackIndex,
+ aux: Option<&FxHashMap<LintId, LevelAndSource>>,
+ ) -> (Option<Level>, LintLevelSource) {
+ if let Some(specs) = aux {
+ if let Some(&(level, src)) = specs.get(&id) {
+ return (Some(level), src);
+ }
+ }
+ loop {
+ let LintSet { ref specs, parent } = self.list[idx];
+ if let Some(&(level, src)) = specs.get(&id) {
+ return (Some(level), src);
+ }
+ if idx == COMMAND_LINE {
+ return (None, LintLevelSource::Default);
+ }
+ idx = parent;
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct LintLevelMap {
+ /// This is a collection of lint expectations as described in RFC 2383, that
+ /// can be fulfilled during this compilation session. This means that at least
+ /// one expected lint is currently registered in the lint store.
+ ///
+ /// The [`LintExpectationId`] is stored as a part of the [`Expect`](Level::Expect)
+ /// lint level.
+ pub lint_expectations: Vec<(LintExpectationId, LintExpectation)>,
+ pub sets: LintLevelSets,
+ pub id_to_set: FxHashMap<HirId, LintStackIndex>,
+}
+
+impl LintLevelMap {
+ /// If the `id` was previously registered with `register_id` when building
+ /// this `LintLevelMap` this returns the corresponding lint level and source
+ /// of the lint level for the lint provided.
+ ///
+ /// If the `id` was not previously registered, returns `None`. If `None` is
+ /// returned then the parent of `id` should be acquired and this function
+ /// should be called again.
+ pub fn level_and_source(
+ &self,
+ lint: &'static Lint,
+ id: HirId,
+ session: &Session,
+ ) -> Option<LevelAndSource> {
+ self.id_to_set.get(&id).map(|idx| self.sets.get_lint_level(lint, *idx, None, session))
+ }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for LintLevelMap {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let LintLevelMap { ref sets, ref id_to_set, ref lint_expectations } = *self;
+
+ id_to_set.hash_stable(hcx, hasher);
+ lint_expectations.hash_stable(hcx, hasher);
+
+ hcx.while_hashing_spans(true, |hcx| sets.hash_stable(hcx, hasher))
+ }
+}
+
+/// This struct represents a lint expectation and holds all required information
+/// to emit the `unfulfilled_lint_expectations` lint if it is unfulfilled after
+/// the `LateLintPass` has completed.
+#[derive(Clone, Debug, HashStable)]
+pub struct LintExpectation {
+ /// The reason for this expectation that can optionally be added as part of
+ /// the attribute. It will be displayed as part of the lint message.
+ pub reason: Option<Symbol>,
+ /// The [`Span`] of the attribute that this expectation originated from.
+ pub emission_span: Span,
+ /// Lint messages for the `unfulfilled_lint_expectations` lint will be
+ /// adjusted to include an additional note. Therefore, we have to track if
+ /// the expectation is for the lint.
+ pub is_unfulfilled_lint_expectations: bool,
+ /// This will hold the name of the tool that this lint belongs to. For
+ /// the lint `clippy::some_lint` the tool would be `clippy`, the same
+ /// goes for `rustdoc`. This will be `None` for rustc lints
+ pub lint_tool: Option<Symbol>,
+}
+
+impl LintExpectation {
+ pub fn new(
+ reason: Option<Symbol>,
+ emission_span: Span,
+ is_unfulfilled_lint_expectations: bool,
+ lint_tool: Option<Symbol>,
+ ) -> Self {
+ Self { reason, emission_span, is_unfulfilled_lint_expectations, lint_tool }
+ }
+}
+
+pub fn explain_lint_level_source(
+ lint: &'static Lint,
+ level: Level,
+ src: LintLevelSource,
+ err: &mut Diagnostic,
+) {
+ let name = lint.name_lower();
+ match src {
+ LintLevelSource::Default => {
+ err.note_once(&format!("`#[{}({})]` on by default", level.as_str(), name));
+ }
+ LintLevelSource::CommandLine(lint_flag_val, orig_level) => {
+ let flag = match orig_level {
+ Level::Warn => "-W",
+ Level::Deny => "-D",
+ Level::Forbid => "-F",
+ Level::Allow => "-A",
+ Level::ForceWarn(_) => "--force-warn",
+ Level::Expect(_) => {
+ unreachable!("the expect level does not have a commandline flag")
+ }
+ };
+ let hyphen_case_lint_name = name.replace('_', "-");
+ if lint_flag_val.as_str() == name {
+ err.note_once(&format!(
+ "requested on the command line with `{} {}`",
+ flag, hyphen_case_lint_name
+ ));
+ } else {
+ let hyphen_case_flag_val = lint_flag_val.as_str().replace('_', "-");
+ err.note_once(&format!(
+ "`{} {}` implied by `{} {}`",
+ flag, hyphen_case_lint_name, flag, hyphen_case_flag_val
+ ));
+ }
+ }
+ LintLevelSource::Node(lint_attr_name, src, reason) => {
+ if let Some(rationale) = reason {
+ err.note(rationale.as_str());
+ }
+ err.span_note_once(src, "the lint level is defined here");
+ if lint_attr_name.as_str() != name {
+ let level_str = level.as_str();
+ err.note_once(&format!(
+ "`#[{}({})]` implied by `#[{}({})]`",
+ level_str, name, level_str, lint_attr_name
+ ));
+ }
+ }
+ }
+}
+
+pub fn struct_lint_level<'s, 'd>(
+ sess: &'s Session,
+ lint: &'static Lint,
+ level: Level,
+ src: LintLevelSource,
+ span: Option<MultiSpan>,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>) + 'd,
+) {
+ // Avoid codegen bloat from monomorphization by immediately doing dyn dispatch of `decorate` to
+ // the "real" work.
+ fn struct_lint_level_impl<'s, 'd>(
+ sess: &'s Session,
+ lint: &'static Lint,
+ level: Level,
+ src: LintLevelSource,
+ span: Option<MultiSpan>,
+ decorate: Box<dyn for<'b> FnOnce(LintDiagnosticBuilder<'b, ()>) + 'd>,
+ ) {
+ // Check for future incompatibility lints and issue a stronger warning.
+ let future_incompatible = lint.future_incompatible;
+
+ let has_future_breakage = future_incompatible.map_or(
+ // Default allow lints trigger too often for testing.
+ sess.opts.unstable_opts.future_incompat_test && lint.default_level != Level::Allow,
+ |incompat| {
+ matches!(incompat.reason, FutureIncompatibilityReason::FutureReleaseErrorReportNow)
+ },
+ );
+
+ let mut err = match (level, span) {
+ (Level::Allow, span) => {
+ if has_future_breakage {
+ if let Some(span) = span {
+ sess.struct_span_allow(span, "")
+ } else {
+ sess.struct_allow("")
+ }
+ } else {
+ return;
+ }
+ }
+ (Level::Expect(expect_id), _) => {
+ // This case is special as we actually allow the lint itself in this context, but
+ // we can't return early like in the case for `Level::Allow` because we still
+ // need the lint diagnostic to be emitted to `rustc_error::HandlerInner`.
+ //
+ // We can also not mark the lint expectation as fulfilled here right away, as it
+ // can still be cancelled in the decorate function. All of this means that we simply
+ // create a `DiagnosticBuilder` and continue as we would for warnings.
+ sess.struct_expect("", expect_id)
+ }
+ (Level::ForceWarn(Some(expect_id)), Some(span)) => {
+ sess.struct_span_warn_with_expectation(span, "", expect_id)
+ }
+ (Level::ForceWarn(Some(expect_id)), None) => {
+ sess.struct_warn_with_expectation("", expect_id)
+ }
+ (Level::Warn | Level::ForceWarn(None), Some(span)) => sess.struct_span_warn(span, ""),
+ (Level::Warn | Level::ForceWarn(None), None) => sess.struct_warn(""),
+ (Level::Deny | Level::Forbid, Some(span)) => {
+ let mut builder = sess.diagnostic().struct_err_lint("");
+ builder.set_span(span);
+ builder
+ }
+ (Level::Deny | Level::Forbid, None) => sess.diagnostic().struct_err_lint(""),
+ };
+
+ // If this code originates in a foreign macro, aka something that this crate
+ // did not itself author, then it's likely that there's nothing this crate
+ // can do about it. We probably want to skip the lint entirely.
+ if err.span.primary_spans().iter().any(|s| in_external_macro(sess, *s)) {
+ // Any suggestions made here are likely to be incorrect, so anything we
+ // emit shouldn't be automatically fixed by rustfix.
+ err.disable_suggestions();
+
+ // If this is a future incompatible that is not an edition fixing lint
+ // it'll become a hard error, so we have to emit *something*. Also,
+ // if this lint occurs in the expansion of a macro from an external crate,
+ // allow individual lints to opt-out from being reported.
+ let not_future_incompatible =
+ future_incompatible.map(|f| f.reason.edition().is_some()).unwrap_or(true);
+ if not_future_incompatible && !lint.report_in_external_macro {
+ err.cancel();
+ // Don't continue further, since we don't want to have
+ // `diag_span_note_once` called for a diagnostic that isn't emitted.
+ return;
+ }
+ }
+
+ // Lint diagnostics that are covered by the expect level will not be emitted outside
+ // the compiler. It is therefore not necessary to add any information for the user.
+ // This will therefore directly call the decorate function which will in turn emit
+ // the `Diagnostic`.
+ if let Level::Expect(_) = level {
+ let name = lint.name_lower();
+ err.code(DiagnosticId::Lint { name, has_future_breakage, is_force_warn: false });
+ decorate(LintDiagnosticBuilder::new(err));
+ return;
+ }
+
+ explain_lint_level_source(lint, level, src, &mut err);
+
+ let name = lint.name_lower();
+ let is_force_warn = matches!(level, Level::ForceWarn(_));
+ err.code(DiagnosticId::Lint { name, has_future_breakage, is_force_warn });
+
+ if let Some(future_incompatible) = future_incompatible {
+ let explanation = match future_incompatible.reason {
+ FutureIncompatibilityReason::FutureReleaseError
+ | FutureIncompatibilityReason::FutureReleaseErrorReportNow => {
+ "this was previously accepted by the compiler but is being phased out; \
+ it will become a hard error in a future release!"
+ .to_owned()
+ }
+ FutureIncompatibilityReason::FutureReleaseSemanticsChange => {
+ "this will change its meaning in a future release!".to_owned()
+ }
+ FutureIncompatibilityReason::EditionError(edition) => {
+ let current_edition = sess.edition();
+ format!(
+ "this is accepted in the current edition (Rust {}) but is a hard error in Rust {}!",
+ current_edition, edition
+ )
+ }
+ FutureIncompatibilityReason::EditionSemanticsChange(edition) => {
+ format!("this changes meaning in Rust {}", edition)
+ }
+ FutureIncompatibilityReason::Custom(reason) => reason.to_owned(),
+ };
+
+ if future_incompatible.explain_reason {
+ err.warn(&explanation);
+ }
+ if !future_incompatible.reference.is_empty() {
+ let citation =
+ format!("for more information, see {}", future_incompatible.reference);
+ err.note(&citation);
+ }
+ }
+
+ // Finally, run `decorate`. This function is also responsible for emitting the diagnostic.
+ decorate(LintDiagnosticBuilder::new(err));
+ }
+ struct_lint_level_impl(sess, lint, level, src, span, Box::new(decorate))
+}
+
+/// Returns whether `span` originates in a foreign crate's external macro.
+///
+/// This is used to test whether a lint should not even begin to figure out whether it should
+/// be reported on the current node.
+pub fn in_external_macro(sess: &Session, span: Span) -> bool {
+ let expn_data = span.ctxt().outer_expn_data();
+ match expn_data.kind {
+ ExpnKind::Inlined
+ | ExpnKind::Root
+ | ExpnKind::Desugaring(DesugaringKind::ForLoop | DesugaringKind::WhileLoop) => false,
+ ExpnKind::AstPass(_) | ExpnKind::Desugaring(_) => true, // well, it's "external"
+ ExpnKind::Macro(MacroKind::Bang, _) => {
+ // Dummy span for the `def_site` means it's an external macro.
+ expn_data.def_site.is_dummy() || sess.source_map().is_imported(expn_data.def_site)
+ }
+ ExpnKind::Macro { .. } => true, // definitely a plugin
+ }
+}
diff --git a/compiler/rustc_middle/src/macros.rs b/compiler/rustc_middle/src/macros.rs
new file mode 100644
index 000000000..0e85c60a3
--- /dev/null
+++ b/compiler/rustc_middle/src/macros.rs
@@ -0,0 +1,232 @@
+#[macro_export]
+macro_rules! bug {
+ () => ( $crate::bug!("impossible case reached") );
+ ($msg:expr) => ({ $crate::util::bug::bug_fmt(::std::format_args!($msg)) });
+ ($msg:expr,) => ({ $crate::bug!($msg) });
+ ($fmt:expr, $($arg:tt)+) => ({
+ $crate::util::bug::bug_fmt(::std::format_args!($fmt, $($arg)+))
+ });
+}
+
+#[macro_export]
+macro_rules! span_bug {
+ ($span:expr, $msg:expr) => ({ $crate::util::bug::span_bug_fmt($span, ::std::format_args!($msg)) });
+ ($span:expr, $msg:expr,) => ({ $crate::span_bug!($span, $msg) });
+ ($span:expr, $fmt:expr, $($arg:tt)+) => ({
+ $crate::util::bug::span_bug_fmt($span, ::std::format_args!($fmt, $($arg)+))
+ });
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Lift and TypeFoldable/TypeVisitable macros
+//
+// When possible, use one of these (relatively) convenient macros to write
+// the impls for you.
+
+#[macro_export]
+macro_rules! CloneLiftImpls {
+ (for <$tcx:lifetime> { $($ty:ty,)+ }) => {
+ $(
+ impl<$tcx> $crate::ty::Lift<$tcx> for $ty {
+ type Lifted = Self;
+ fn lift_to_tcx(self, _: $crate::ty::TyCtxt<$tcx>) -> Option<Self> {
+ Some(self)
+ }
+ }
+ )+
+ };
+
+ ($($ty:ty,)+) => {
+ CloneLiftImpls! {
+ for <'tcx> {
+ $($ty,)+
+ }
+ }
+ };
+}
+
+/// Used for types that are `Copy` and which **do not care arena
+/// allocated data** (i.e., don't need to be folded).
+#[macro_export]
+macro_rules! TrivialTypeTraversalImpls {
+ (for <$tcx:lifetime> { $($ty:ty,)+ }) => {
+ $(
+ impl<$tcx> $crate::ty::fold::TypeFoldable<$tcx> for $ty {
+ fn try_fold_with<F: $crate::ty::fold::FallibleTypeFolder<$tcx>>(
+ self,
+ _: &mut F
+ ) -> ::std::result::Result<$ty, F::Error> {
+ Ok(self)
+ }
+ }
+
+ impl<$tcx> $crate::ty::visit::TypeVisitable<$tcx> for $ty {
+ fn visit_with<F: $crate::ty::visit::TypeVisitor<$tcx>>(
+ &self,
+ _: &mut F)
+ -> ::std::ops::ControlFlow<F::BreakTy>
+ {
+ ::std::ops::ControlFlow::CONTINUE
+ }
+ }
+ )+
+ };
+
+ ($($ty:ty,)+) => {
+ TrivialTypeTraversalImpls! {
+ for <'tcx> {
+ $($ty,)+
+ }
+ }
+ };
+}
+
+#[macro_export]
+macro_rules! TrivialTypeTraversalAndLiftImpls {
+ ($($t:tt)*) => {
+ TrivialTypeTraversalImpls! { $($t)* }
+ CloneLiftImpls! { $($t)* }
+ }
+}
+
+#[macro_export]
+macro_rules! EnumTypeTraversalImpl {
+ (impl<$($p:tt),*> TypeFoldable<$tcx:tt> for $s:path {
+ $($variants:tt)*
+ } $(where $($wc:tt)*)*) => {
+ impl<$($p),*> $crate::ty::fold::TypeFoldable<$tcx> for $s
+ $(where $($wc)*)*
+ {
+ fn try_fold_with<V: $crate::ty::fold::FallibleTypeFolder<$tcx>>(
+ self,
+ folder: &mut V,
+ ) -> ::std::result::Result<Self, V::Error> {
+ EnumTypeTraversalImpl!(@FoldVariants(self, folder) input($($variants)*) output())
+ }
+ }
+ };
+
+ (impl<$($p:tt),*> TypeVisitable<$tcx:tt> for $s:path {
+ $($variants:tt)*
+ } $(where $($wc:tt)*)*) => {
+ impl<$($p),*> $crate::ty::visit::TypeVisitable<$tcx> for $s
+ $(where $($wc)*)*
+ {
+ fn visit_with<V: $crate::ty::visit::TypeVisitor<$tcx>>(
+ &self,
+ visitor: &mut V,
+ ) -> ::std::ops::ControlFlow<V::BreakTy> {
+ EnumTypeTraversalImpl!(@VisitVariants(self, visitor) input($($variants)*) output())
+ }
+ }
+ };
+
+ (@FoldVariants($this:expr, $folder:expr) input() output($($output:tt)*)) => {
+ Ok(match $this {
+ $($output)*
+ })
+ };
+
+ (@FoldVariants($this:expr, $folder:expr)
+ input( ($variant:path) ( $($variant_arg:ident),* ) , $($input:tt)*)
+ output( $($output:tt)*) ) => {
+ EnumTypeTraversalImpl!(
+ @FoldVariants($this, $folder)
+ input($($input)*)
+ output(
+ $variant ( $($variant_arg),* ) => {
+ $variant (
+ $($crate::ty::fold::TypeFoldable::try_fold_with($variant_arg, $folder)?),*
+ )
+ }
+ $($output)*
+ )
+ )
+ };
+
+ (@FoldVariants($this:expr, $folder:expr)
+ input( ($variant:path) { $($variant_arg:ident),* $(,)? } , $($input:tt)*)
+ output( $($output:tt)*) ) => {
+ EnumTypeTraversalImpl!(
+ @FoldVariants($this, $folder)
+ input($($input)*)
+ output(
+ $variant { $($variant_arg),* } => {
+ $variant {
+ $($variant_arg: $crate::ty::fold::TypeFoldable::fold_with(
+ $variant_arg, $folder
+ )?),* }
+ }
+ $($output)*
+ )
+ )
+ };
+
+ (@FoldVariants($this:expr, $folder:expr)
+ input( ($variant:path), $($input:tt)*)
+ output( $($output:tt)*) ) => {
+ EnumTypeTraversalImpl!(
+ @FoldVariants($this, $folder)
+ input($($input)*)
+ output(
+ $variant => { $variant }
+ $($output)*
+ )
+ )
+ };
+
+ (@VisitVariants($this:expr, $visitor:expr) input() output($($output:tt)*)) => {
+ match $this {
+ $($output)*
+ }
+ };
+
+ (@VisitVariants($this:expr, $visitor:expr)
+ input( ($variant:path) ( $($variant_arg:ident),* ) , $($input:tt)*)
+ output( $($output:tt)*) ) => {
+ EnumTypeTraversalImpl!(
+ @VisitVariants($this, $visitor)
+ input($($input)*)
+ output(
+ $variant ( $($variant_arg),* ) => {
+ $($crate::ty::visit::TypeVisitable::visit_with(
+ $variant_arg, $visitor
+ )?;)*
+ ::std::ops::ControlFlow::CONTINUE
+ }
+ $($output)*
+ )
+ )
+ };
+
+ (@VisitVariants($this:expr, $visitor:expr)
+ input( ($variant:path) { $($variant_arg:ident),* $(,)? } , $($input:tt)*)
+ output( $($output:tt)*) ) => {
+ EnumTypeTraversalImpl!(
+ @VisitVariants($this, $visitor)
+ input($($input)*)
+ output(
+ $variant { $($variant_arg),* } => {
+ $($crate::ty::visit::TypeVisitable::visit_with(
+ $variant_arg, $visitor
+ )?;)*
+ ::std::ops::ControlFlow::CONTINUE
+ }
+ $($output)*
+ )
+ )
+ };
+
+ (@VisitVariants($this:expr, $visitor:expr)
+ input( ($variant:path), $($input:tt)*)
+ output( $($output:tt)*) ) => {
+ EnumTypeTraversalImpl!(
+ @VisitVariants($this, $visitor)
+ input($($input)*)
+ output(
+ $variant => { ::std::ops::ControlFlow::CONTINUE }
+ $($output)*
+ )
+ )
+ };
+}
diff --git a/compiler/rustc_middle/src/metadata.rs b/compiler/rustc_middle/src/metadata.rs
new file mode 100644
index 000000000..c8e78747d
--- /dev/null
+++ b/compiler/rustc_middle/src/metadata.rs
@@ -0,0 +1,26 @@
+use crate::ty;
+
+use rustc_hir::def::Res;
+use rustc_macros::HashStable;
+use rustc_span::symbol::Ident;
+use rustc_span::Span;
+
+/// This structure is supposed to keep enough data to re-create `NameBinding`s for other crates
+/// during name resolution. Right now the bindings are not recreated entirely precisely so we may
+/// need to add more data in the future to correctly support macros 2.0, for example.
+/// Module child can be either a proper item or a reexport (including private imports).
+/// In case of reexport all the fields describe the reexport item itself, not what it refers to.
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct ModChild {
+ /// Name of the item.
+ pub ident: Ident,
+ /// Resolution result corresponding to the item.
+ /// Local variables cannot be exported, so this `Res` doesn't need the ID parameter.
+ pub res: Res<!>,
+ /// Visibility of the item.
+ pub vis: ty::Visibility,
+ /// Span of the item.
+ pub span: Span,
+ /// A proper `macro_rules` item (not a reexport).
+ pub macro_rules: bool,
+}
diff --git a/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
new file mode 100644
index 000000000..45d33a165
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/codegen_fn_attrs.rs
@@ -0,0 +1,146 @@
+use crate::mir::mono::Linkage;
+use rustc_attr::{InlineAttr, InstructionSetAttr, OptimizeAttr};
+use rustc_span::symbol::Symbol;
+use rustc_target::spec::SanitizerSet;
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, Debug)]
+pub struct CodegenFnAttrs {
+ pub flags: CodegenFnAttrFlags,
+ /// Parsed representation of the `#[inline]` attribute
+ pub inline: InlineAttr,
+ /// Parsed representation of the `#[optimize]` attribute
+ pub optimize: OptimizeAttr,
+ /// The `#[export_name = "..."]` attribute, indicating a custom symbol a
+ /// function should be exported under
+ pub export_name: Option<Symbol>,
+ /// The `#[link_name = "..."]` attribute, indicating a custom symbol an
+ /// imported function should be imported as. Note that `export_name`
+ /// probably isn't set when this is set, this is for foreign items while
+ /// `#[export_name]` is for Rust-defined functions.
+ pub link_name: Option<Symbol>,
+ /// The `#[link_ordinal = "..."]` attribute, indicating an ordinal an
+ /// imported function has in the dynamic library. Note that this must not
+ /// be set when `link_name` is set. This is for foreign items with the
+ /// "raw-dylib" kind.
+ pub link_ordinal: Option<u16>,
+ /// The `#[target_feature(enable = "...")]` attribute and the enabled
+ /// features (only enabled features are supported right now).
+ pub target_features: Vec<Symbol>,
+ /// The `#[linkage = "..."]` attribute and the value we found.
+ pub linkage: Option<Linkage>,
+ /// The `#[link_section = "..."]` attribute, or what executable section this
+ /// should be placed in.
+ pub link_section: Option<Symbol>,
+ /// The `#[no_sanitize(...)]` attribute. Indicates sanitizers for which
+ /// instrumentation should be disabled inside the annotated function.
+ pub no_sanitize: SanitizerSet,
+ /// The `#[instruction_set(set)]` attribute. Indicates if the generated code should
+ /// be generated against a specific instruction set. Only usable on architectures which allow
+ /// switching between multiple instruction sets.
+ pub instruction_set: Option<InstructionSetAttr>,
+ /// The `#[repr(align(...))]` attribute. Indicates the value of which the function should be
+ /// aligned to.
+ pub alignment: Option<u32>,
+}
+
+bitflags! {
+ #[derive(TyEncodable, TyDecodable, HashStable)]
+ pub struct CodegenFnAttrFlags: u32 {
+ /// `#[cold]`: a hint to LLVM that this function, when called, is never on
+ /// the hot path.
+ const COLD = 1 << 0;
+ /// `#[rustc_allocator]`: a hint to LLVM that the pointer returned from this
+ /// function is never null and the function has no side effects other than allocating.
+ const ALLOCATOR = 1 << 1;
+ /// An indicator that function will never unwind. Will become obsolete
+ /// once C-unwind is fully stabilized.
+ const NEVER_UNWIND = 1 << 3;
+ /// `#[naked]`: an indicator to LLVM that no function prologue/epilogue
+ /// should be generated.
+ const NAKED = 1 << 4;
+ /// `#[no_mangle]`: an indicator that the function's name should be the same
+ /// as its symbol.
+ const NO_MANGLE = 1 << 5;
+ /// `#[rustc_std_internal_symbol]`: an indicator that this symbol is a
+ /// "weird symbol" for the standard library in that it has slightly
+ /// different linkage, visibility, and reachability rules.
+ const RUSTC_STD_INTERNAL_SYMBOL = 1 << 6;
+ /// `#[thread_local]`: indicates a static is actually a thread local
+ /// piece of memory
+ const THREAD_LOCAL = 1 << 8;
+ /// `#[used]`: indicates that LLVM can't eliminate this function (but the
+ /// linker can!).
+ const USED = 1 << 9;
+ /// `#[ffi_returns_twice]`, indicates that an extern function can return
+ /// multiple times
+ const FFI_RETURNS_TWICE = 1 << 10;
+ /// `#[track_caller]`: allow access to the caller location
+ const TRACK_CALLER = 1 << 11;
+ /// #[ffi_pure]: applies clang's `pure` attribute to a foreign function
+ /// declaration.
+ const FFI_PURE = 1 << 12;
+ /// #[ffi_const]: applies clang's `const` attribute to a foreign function
+ /// declaration.
+ const FFI_CONST = 1 << 13;
+ /// #[cmse_nonsecure_entry]: with a TrustZone-M extension, declare a
+ /// function as an entry function from Non-Secure code.
+ const CMSE_NONSECURE_ENTRY = 1 << 14;
+ /// `#[no_coverage]`: indicates that the function should be ignored by
+ /// the MIR `InstrumentCoverage` pass and not added to the coverage map
+ /// during codegen.
+ const NO_COVERAGE = 1 << 15;
+ /// `#[used(linker)]`: indicates that LLVM nor the linker can eliminate this function.
+ const USED_LINKER = 1 << 16;
+ /// `#[rustc_deallocator]`: a hint to LLVM that the function only deallocates memory.
+ const DEALLOCATOR = 1 << 17;
+ /// `#[rustc_reallocator]`: a hint to LLVM that the function only reallocates memory.
+ const REALLOCATOR = 1 << 18;
+ /// `#[rustc_allocator_zeroed]`: a hint to LLVM that the function only allocates zeroed memory.
+ const ALLOCATOR_ZEROED = 1 << 19;
+ }
+}
+
+impl CodegenFnAttrs {
+ pub const EMPTY: &'static Self = &Self::new();
+
+ pub const fn new() -> CodegenFnAttrs {
+ CodegenFnAttrs {
+ flags: CodegenFnAttrFlags::empty(),
+ inline: InlineAttr::None,
+ optimize: OptimizeAttr::None,
+ export_name: None,
+ link_name: None,
+ link_ordinal: None,
+ target_features: vec![],
+ linkage: None,
+ link_section: None,
+ no_sanitize: SanitizerSet::empty(),
+ instruction_set: None,
+ alignment: None,
+ }
+ }
+
+ /// Returns `true` if `#[inline]` or `#[inline(always)]` is present.
+ pub fn requests_inline(&self) -> bool {
+ match self.inline {
+ InlineAttr::Hint | InlineAttr::Always => true,
+ InlineAttr::None | InlineAttr::Never => false,
+ }
+ }
+
+ /// Returns `true` if it looks like this symbol needs to be exported, for example:
+ ///
+ /// * `#[no_mangle]` is present
+ /// * `#[export_name(...)]` is present
+ /// * `#[linkage]` is present
+ pub fn contains_extern_indicator(&self) -> bool {
+ self.flags.contains(CodegenFnAttrFlags::NO_MANGLE)
+ || self.export_name.is_some()
+ || match self.linkage {
+ // These are private, so make sure we don't try to consider
+ // them external.
+ None | Some(Linkage::Internal | Linkage::Private) => false,
+ Some(_) => true,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/middle/dependency_format.rs b/compiler/rustc_middle/src/middle/dependency_format.rs
new file mode 100644
index 000000000..e079843bf
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/dependency_format.rs
@@ -0,0 +1,28 @@
+//! Type definitions for learning about the dependency formats of all upstream
+//! crates (rlibs/dylibs/oh my).
+//!
+//! For all the gory details, see the provider of the `dependency_formats`
+//! query.
+
+use rustc_session::config::CrateType;
+
+/// A list of dependencies for a certain crate type.
+///
+/// The length of this vector is the same as the number of external crates used.
+/// The value is None if the crate does not need to be linked (it was found
+/// statically in another dylib), or Some(kind) if it needs to be linked as
+/// `kind` (either static or dynamic).
+pub type DependencyList = Vec<Linkage>;
+
+/// A mapping of all required dependencies for a particular flavor of output.
+///
+/// This is local to the tcx, and is generally relevant to one session.
+pub type Dependencies = Vec<(CrateType, DependencyList)>;
+
+#[derive(Copy, Clone, PartialEq, Debug, HashStable, Encodable, Decodable)]
+pub enum Linkage {
+ NotLinked,
+ IncludedFromDylib,
+ Static,
+ Dynamic,
+}
diff --git a/compiler/rustc_middle/src/middle/exported_symbols.rs b/compiler/rustc_middle/src/middle/exported_symbols.rs
new file mode 100644
index 000000000..631fd09ec
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/exported_symbols.rs
@@ -0,0 +1,72 @@
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_hir::def_id::{DefId, LOCAL_CRATE};
+use rustc_macros::HashStable;
+
+/// The SymbolExportLevel of a symbols specifies from which kinds of crates
+/// the symbol will be exported. `C` symbols will be exported from any
+/// kind of crate, including cdylibs which export very few things.
+/// `Rust` will only be exported if the crate produced is a Rust
+/// dylib.
+#[derive(Eq, PartialEq, Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub enum SymbolExportLevel {
+ C,
+ Rust,
+}
+
+impl SymbolExportLevel {
+ pub fn is_below_threshold(self, threshold: SymbolExportLevel) -> bool {
+ threshold == SymbolExportLevel::Rust // export everything from Rust dylibs
+ || self == SymbolExportLevel::C
+ }
+}
+
+/// Kind of exported symbols.
+#[derive(Eq, PartialEq, Debug, Copy, Clone, Encodable, Decodable, HashStable)]
+pub enum SymbolExportKind {
+ Text,
+ Data,
+ Tls,
+}
+
+/// The `SymbolExportInfo` of a symbols specifies symbol-related information
+/// that is relevant to code generation and linking.
+#[derive(Eq, PartialEq, Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub struct SymbolExportInfo {
+ pub level: SymbolExportLevel,
+ pub kind: SymbolExportKind,
+ pub used: bool,
+}
+
+#[derive(Eq, PartialEq, Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub enum ExportedSymbol<'tcx> {
+ NonGeneric(DefId),
+ Generic(DefId, SubstsRef<'tcx>),
+ DropGlue(Ty<'tcx>),
+ NoDefId(ty::SymbolName<'tcx>),
+}
+
+impl<'tcx> ExportedSymbol<'tcx> {
+ /// This is the symbol name of an instance if it is instantiated in the
+ /// local crate.
+ pub fn symbol_name_for_local_instance(&self, tcx: TyCtxt<'tcx>) -> ty::SymbolName<'tcx> {
+ match *self {
+ ExportedSymbol::NonGeneric(def_id) => tcx.symbol_name(ty::Instance::mono(tcx, def_id)),
+ ExportedSymbol::Generic(def_id, substs) => {
+ tcx.symbol_name(ty::Instance::new(def_id, substs))
+ }
+ ExportedSymbol::DropGlue(ty) => {
+ tcx.symbol_name(ty::Instance::resolve_drop_in_place(tcx, ty))
+ }
+ ExportedSymbol::NoDefId(symbol_name) => symbol_name,
+ }
+ }
+}
+
+pub fn metadata_symbol_name(tcx: TyCtxt<'_>) -> String {
+ format!(
+ "rust_metadata_{}_{:08x}",
+ tcx.crate_name(LOCAL_CRATE),
+ tcx.sess.local_stable_crate_id().to_u64(),
+ )
+}
diff --git a/compiler/rustc_middle/src/middle/lang_items.rs b/compiler/rustc_middle/src/middle/lang_items.rs
new file mode 100644
index 000000000..cc9706f2d
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/lang_items.rs
@@ -0,0 +1,61 @@
+//! Detecting language items.
+//!
+//! Language items are items that represent concepts intrinsic to the language
+//! itself. Examples are:
+//!
+//! * Traits that specify "kinds"; e.g., `Sync`, `Send`.
+//! * Traits that represent operators; e.g., `Add`, `Sub`, `Index`.
+//! * Functions called by the compiler itself.
+
+use crate::ty::{self, TyCtxt};
+
+use rustc_hir::def_id::DefId;
+use rustc_hir::LangItem;
+use rustc_span::Span;
+use rustc_target::spec::PanicStrategy;
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Returns the `DefId` for a given `LangItem`.
+ /// If not found, fatally aborts compilation.
+ pub fn require_lang_item(self, lang_item: LangItem, span: Option<Span>) -> DefId {
+ self.lang_items().require(lang_item).unwrap_or_else(|msg| {
+ if let Some(span) = span {
+ self.sess.span_fatal(span, &msg)
+ } else {
+ self.sess.fatal(&msg)
+ }
+ })
+ }
+
+ pub fn fn_trait_kind_from_lang_item(self, id: DefId) -> Option<ty::ClosureKind> {
+ let items = self.lang_items();
+ match Some(id) {
+ x if x == items.fn_trait() => Some(ty::ClosureKind::Fn),
+ x if x == items.fn_mut_trait() => Some(ty::ClosureKind::FnMut),
+ x if x == items.fn_once_trait() => Some(ty::ClosureKind::FnOnce),
+ _ => None,
+ }
+ }
+
+ pub fn is_weak_lang_item(self, item_def_id: DefId) -> bool {
+ self.lang_items().is_weak_lang_item(item_def_id)
+ }
+}
+
+/// Returns `true` if the specified `lang_item` must be present for this
+/// compilation.
+///
+/// Not all lang items are always required for each compilation, particularly in
+/// the case of panic=abort. In these situations some lang items are injected by
+/// crates and don't actually need to be defined in libstd.
+pub fn required(tcx: TyCtxt<'_>, lang_item: LangItem) -> bool {
+ // If we're not compiling with unwinding, we won't actually need these
+ // symbols. Other panic runtimes ensure that the relevant symbols are
+ // available to link things together, but they're never exercised.
+ match tcx.sess.panic_strategy() {
+ PanicStrategy::Abort => {
+ lang_item != LangItem::EhPersonality && lang_item != LangItem::EhCatchTypeinfo
+ }
+ PanicStrategy::Unwind => true,
+ }
+}
diff --git a/compiler/rustc_middle/src/middle/limits.rs b/compiler/rustc_middle/src/middle/limits.rs
new file mode 100644
index 000000000..acced0492
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/limits.rs
@@ -0,0 +1,85 @@
+//! Registering limits:
+//! * recursion_limit,
+//! * move_size_limit,
+//! * type_length_limit, and
+//! * const_eval_limit
+//!
+//! There are various parts of the compiler that must impose arbitrary limits
+//! on how deeply they recurse to prevent stack overflow. Users can override
+//! this via an attribute on the crate like `#![recursion_limit="22"]`. This pass
+//! just peeks and looks for that attribute.
+
+use crate::bug;
+use crate::ty;
+use rustc_ast::Attribute;
+use rustc_session::Session;
+use rustc_session::{Limit, Limits};
+use rustc_span::symbol::{sym, Symbol};
+
+use std::num::IntErrorKind;
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ providers.limits = |tcx, ()| Limits {
+ recursion_limit: get_recursion_limit(tcx.hir().krate_attrs(), tcx.sess),
+ move_size_limit: get_limit(
+ tcx.hir().krate_attrs(),
+ tcx.sess,
+ sym::move_size_limit,
+ tcx.sess.opts.unstable_opts.move_size_limit.unwrap_or(0),
+ ),
+ type_length_limit: get_limit(
+ tcx.hir().krate_attrs(),
+ tcx.sess,
+ sym::type_length_limit,
+ 1048576,
+ ),
+ const_eval_limit: get_limit(
+ tcx.hir().krate_attrs(),
+ tcx.sess,
+ sym::const_eval_limit,
+ 1_000_000,
+ ),
+ }
+}
+
+pub fn get_recursion_limit(krate_attrs: &[Attribute], sess: &Session) -> Limit {
+ get_limit(krate_attrs, sess, sym::recursion_limit, 128)
+}
+
+fn get_limit(krate_attrs: &[Attribute], sess: &Session, name: Symbol, default: usize) -> Limit {
+ for attr in krate_attrs {
+ if !attr.has_name(name) {
+ continue;
+ }
+
+ if let Some(s) = attr.value_str() {
+ match s.as_str().parse() {
+ Ok(n) => return Limit::new(n),
+ Err(e) => {
+ let mut err =
+ sess.struct_span_err(attr.span, "`limit` must be a non-negative integer");
+
+ let value_span = attr
+ .meta()
+ .and_then(|meta| meta.name_value_literal_span())
+ .unwrap_or(attr.span);
+
+ let error_str = match e.kind() {
+ IntErrorKind::PosOverflow => "`limit` is too large",
+ IntErrorKind::Empty => "`limit` must be a non-negative integer",
+ IntErrorKind::InvalidDigit => "not a valid integer",
+ IntErrorKind::NegOverflow => {
+ bug!("`limit` should never negatively overflow")
+ }
+ IntErrorKind::Zero => bug!("zero is a valid `limit`"),
+ kind => bug!("unimplemented IntErrorKind variant: {:?}", kind),
+ };
+
+ err.span_label(value_span, error_str);
+ err.emit();
+ }
+ }
+ }
+ }
+ return Limit::new(default);
+}
diff --git a/compiler/rustc_middle/src/middle/mod.rs b/compiler/rustc_middle/src/middle/mod.rs
new file mode 100644
index 000000000..8dc68b1f5
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/mod.rs
@@ -0,0 +1,37 @@
+pub mod codegen_fn_attrs;
+pub mod dependency_format;
+pub mod exported_symbols;
+pub mod lang_items;
+pub mod lib_features {
+ use rustc_data_structures::fx::FxHashMap;
+ use rustc_span::{symbol::Symbol, Span};
+
+ #[derive(HashStable, Debug)]
+ pub struct LibFeatures {
+ /// A map from feature to stabilisation version.
+ pub stable: FxHashMap<Symbol, (Symbol, Span)>,
+ pub unstable: FxHashMap<Symbol, Span>,
+ }
+
+ impl LibFeatures {
+ pub fn to_vec(&self) -> Vec<(Symbol, Option<Symbol>)> {
+ let mut all_features: Vec<_> = self
+ .stable
+ .iter()
+ .map(|(f, (s, _))| (*f, Some(*s)))
+ .chain(self.unstable.iter().map(|(f, _)| (*f, None)))
+ .collect();
+ all_features.sort_unstable_by(|a, b| a.0.as_str().partial_cmp(b.0.as_str()).unwrap());
+ all_features
+ }
+ }
+}
+pub mod limits;
+pub mod privacy;
+pub mod region;
+pub mod resolve_lifetime;
+pub mod stability;
+
+pub fn provide(providers: &mut crate::ty::query::Providers) {
+ limits::provide(providers);
+}
diff --git a/compiler/rustc_middle/src/middle/privacy.rs b/compiler/rustc_middle/src/middle/privacy.rs
new file mode 100644
index 000000000..751c7f464
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/privacy.rs
@@ -0,0 +1,64 @@
+//! A pass that checks to make sure private fields and methods aren't used
+//! outside their scopes. This pass will also generate a set of exported items
+//! which are available for use externally when compiled as a library.
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_macros::HashStable;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_span::def_id::LocalDefId;
+use std::hash::Hash;
+
+/// Represents the levels of accessibility an item can have.
+///
+/// The variants are sorted in ascending order of accessibility.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, HashStable)]
+pub enum AccessLevel {
+ /// Superset of `AccessLevel::Reachable` used to mark impl Trait items.
+ ReachableFromImplTrait,
+ /// Exported items + items participating in various kinds of public interfaces,
+ /// but not directly nameable. For example, if function `fn f() -> T {...}` is
+ /// public, then type `T` is reachable. Its values can be obtained by other crates
+ /// even if the type itself is not nameable.
+ Reachable,
+ /// Public items + items accessible to other crates with the help of `pub use` re-exports.
+ Exported,
+ /// Items accessible to other crates directly, without the help of re-exports.
+ Public,
+}
+
+/// Holds a map of accessibility levels for reachable HIR nodes.
+#[derive(Debug, Clone)]
+pub struct AccessLevels<Id = LocalDefId> {
+ pub map: FxHashMap<Id, AccessLevel>,
+}
+
+impl<Id: Hash + Eq> AccessLevels<Id> {
+ /// See `AccessLevel::Reachable`.
+ pub fn is_reachable(&self, id: Id) -> bool {
+ self.map.get(&id) >= Some(&AccessLevel::Reachable)
+ }
+
+ /// See `AccessLevel::Exported`.
+ pub fn is_exported(&self, id: Id) -> bool {
+ self.map.get(&id) >= Some(&AccessLevel::Exported)
+ }
+
+ /// See `AccessLevel::Public`.
+ pub fn is_public(&self, id: Id) -> bool {
+ self.map.get(&id) >= Some(&AccessLevel::Public)
+ }
+}
+
+impl<Id> Default for AccessLevels<Id> {
+ fn default() -> Self {
+ AccessLevels { map: Default::default() }
+ }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for AccessLevels {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let AccessLevels { ref map } = *self;
+ map.hash_stable(hcx, hasher);
+ }
+}
diff --git a/compiler/rustc_middle/src/middle/region.rs b/compiler/rustc_middle/src/middle/region.rs
new file mode 100644
index 000000000..c886175c6
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/region.rs
@@ -0,0 +1,443 @@
+//! This file declares the `ScopeTree` type, which describes
+//! the parent links in the region hierarchy.
+//!
+//! For more information about how MIR-based region-checking works,
+//! see the [rustc dev guide].
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/borrow_check.html
+
+use crate::ty::TyCtxt;
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir as hir;
+use rustc_hir::Node;
+use rustc_macros::HashStable;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_span::{Span, DUMMY_SP};
+
+use std::fmt;
+use std::ops::Deref;
+
+/// Represents a statically-describable scope that can be used to
+/// bound the lifetime/region for values.
+///
+/// `Node(node_id)`: Any AST node that has any scope at all has the
+/// `Node(node_id)` scope. Other variants represent special cases not
+/// immediately derivable from the abstract syntax tree structure.
+///
+/// `DestructionScope(node_id)` represents the scope of destructors
+/// implicitly-attached to `node_id` that run immediately after the
+/// expression for `node_id` itself. Not every AST node carries a
+/// `DestructionScope`, but those that are `terminating_scopes` do;
+/// see discussion with `ScopeTree`.
+///
+/// `Remainder { block, statement_index }` represents
+/// the scope of user code running immediately after the initializer
+/// expression for the indexed statement, until the end of the block.
+///
+/// So: the following code can be broken down into the scopes beneath:
+///
+/// ```text
+/// let a = f().g( 'b: { let x = d(); let y = d(); x.h(y) } ) ;
+///
+/// +-+ (D12.)
+/// +-+ (D11.)
+/// +---------+ (R10.)
+/// +-+ (D9.)
+/// +----------+ (M8.)
+/// +----------------------+ (R7.)
+/// +-+ (D6.)
+/// +----------+ (M5.)
+/// +-----------------------------------+ (M4.)
+/// +--------------------------------------------------+ (M3.)
+/// +--+ (M2.)
+/// +-----------------------------------------------------------+ (M1.)
+///
+/// (M1.): Node scope of the whole `let a = ...;` statement.
+/// (M2.): Node scope of the `f()` expression.
+/// (M3.): Node scope of the `f().g(..)` expression.
+/// (M4.): Node scope of the block labeled `'b:`.
+/// (M5.): Node scope of the `let x = d();` statement
+/// (D6.): DestructionScope for temporaries created during M5.
+/// (R7.): Remainder scope for block `'b:`, stmt 0 (let x = ...).
+/// (M8.): Node scope of the `let y = d();` statement.
+/// (D9.): DestructionScope for temporaries created during M8.
+/// (R10.): Remainder scope for block `'b:`, stmt 1 (let y = ...).
+/// (D11.): DestructionScope for temporaries and bindings from block `'b:`.
+/// (D12.): DestructionScope for temporaries created during M1 (e.g., f()).
+/// ```
+///
+/// Note that while the above picture shows the destruction scopes
+/// as following their corresponding node scopes, in the internal
+/// data structures of the compiler the destruction scopes are
+/// represented as enclosing parents. This is sound because we use the
+/// enclosing parent relationship just to ensure that referenced
+/// values live long enough; phrased another way, the starting point
+/// of each range is not really the important thing in the above
+/// picture, but rather the ending point.
+//
+// FIXME(pnkfelix): this currently derives `PartialOrd` and `Ord` to
+// placate the same deriving in `ty::FreeRegion`, but we may want to
+// actually attach a more meaningful ordering to scopes than the one
+// generated via deriving here.
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Copy, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct Scope {
+ pub id: hir::ItemLocalId,
+ pub data: ScopeData,
+}
+
+impl fmt::Debug for Scope {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.data {
+ ScopeData::Node => write!(fmt, "Node({:?})", self.id),
+ ScopeData::CallSite => write!(fmt, "CallSite({:?})", self.id),
+ ScopeData::Arguments => write!(fmt, "Arguments({:?})", self.id),
+ ScopeData::Destruction => write!(fmt, "Destruction({:?})", self.id),
+ ScopeData::IfThen => write!(fmt, "IfThen({:?})", self.id),
+ ScopeData::Remainder(fsi) => write!(
+ fmt,
+ "Remainder {{ block: {:?}, first_statement_index: {}}}",
+ self.id,
+ fsi.as_u32(),
+ ),
+ }
+ }
+}
+
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, Debug, Copy, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum ScopeData {
+ Node,
+
+ /// Scope of the call-site for a function or closure
+ /// (outlives the arguments as well as the body).
+ CallSite,
+
+ /// Scope of arguments passed to a function or closure
+ /// (they outlive its body).
+ Arguments,
+
+ /// Scope of destructors for temporaries of node-id.
+ Destruction,
+
+ /// Scope of the condition and then block of an if expression
+ /// Used for variables introduced in an if-let expression.
+ IfThen,
+
+ /// Scope following a `let id = expr;` binding in a block.
+ Remainder(FirstStatementIndex),
+}
+
+rustc_index::newtype_index! {
+ /// Represents a subscope of `block` for a binding that is introduced
+ /// by `block.stmts[first_statement_index]`. Such subscopes represent
+ /// a suffix of the block. Note that each subscope does not include
+ /// the initializer expression, if any, for the statement indexed by
+ /// `first_statement_index`.
+ ///
+ /// For example, given `{ let (a, b) = EXPR_1; let c = EXPR_2; ... }`:
+ ///
+ /// * The subscope with `first_statement_index == 0` is scope of both
+ /// `a` and `b`; it does not include EXPR_1, but does include
+ /// everything after that first `let`. (If you want a scope that
+ /// includes EXPR_1 as well, then do not use `Scope::Remainder`,
+ /// but instead another `Scope` that encompasses the whole block,
+ /// e.g., `Scope::Node`.
+ ///
+ /// * The subscope with `first_statement_index == 1` is scope of `c`,
+ /// and thus does not include EXPR_2, but covers the `...`.
+ pub struct FirstStatementIndex {
+ derive [HashStable]
+ }
+}
+
+// compilation error if size of `ScopeData` is not the same as a `u32`
+static_assert_size!(ScopeData, 4);
+
+impl Scope {
+ /// Returns an item-local ID associated with this scope.
+ ///
+ /// N.B., likely to be replaced as API is refined; e.g., pnkfelix
+ /// anticipates `fn entry_node_id` and `fn each_exit_node_id`.
+ pub fn item_local_id(&self) -> hir::ItemLocalId {
+ self.id
+ }
+
+ pub fn hir_id(&self, scope_tree: &ScopeTree) -> Option<hir::HirId> {
+ scope_tree
+ .root_body
+ .map(|hir_id| hir::HirId { owner: hir_id.owner, local_id: self.item_local_id() })
+ }
+
+ /// Returns the span of this `Scope`. Note that in general the
+ /// returned span may not correspond to the span of any `NodeId` in
+ /// the AST.
+ pub fn span(&self, tcx: TyCtxt<'_>, scope_tree: &ScopeTree) -> Span {
+ let Some(hir_id) = self.hir_id(scope_tree) else {
+ return DUMMY_SP;
+ };
+ let span = tcx.hir().span(hir_id);
+ if let ScopeData::Remainder(first_statement_index) = self.data {
+ if let Node::Block(ref blk) = tcx.hir().get(hir_id) {
+ // Want span for scope starting after the
+ // indexed statement and ending at end of
+ // `blk`; reuse span of `blk` and shift `lo`
+ // forward to end of indexed statement.
+ //
+ // (This is the special case alluded to in the
+ // doc-comment for this method)
+
+ let stmt_span = blk.stmts[first_statement_index.index()].span;
+
+ // To avoid issues with macro-generated spans, the span
+ // of the statement must be nested in that of the block.
+ if span.lo() <= stmt_span.lo() && stmt_span.lo() <= span.hi() {
+ return span.with_lo(stmt_span.lo());
+ }
+ }
+ }
+ span
+ }
+}
+
+pub type ScopeDepth = u32;
+
+/// The region scope tree encodes information about region relationships.
+#[derive(TyEncodable, TyDecodable, Default, Debug)]
+pub struct ScopeTree {
+ /// If not empty, this body is the root of this region hierarchy.
+ pub root_body: Option<hir::HirId>,
+
+ /// Maps from a scope ID to the enclosing scope id;
+ /// this is usually corresponding to the lexical nesting, though
+ /// in the case of closures the parent scope is the innermost
+ /// conditional expression or repeating block. (Note that the
+ /// enclosing scope ID for the block associated with a closure is
+ /// the closure itself.)
+ pub parent_map: FxIndexMap<Scope, (Scope, ScopeDepth)>,
+
+ /// Maps from a variable or binding ID to the block in which that
+ /// variable is declared.
+ var_map: FxIndexMap<hir::ItemLocalId, Scope>,
+
+ /// Maps from a `NodeId` to the associated destruction scope (if any).
+ destruction_scopes: FxIndexMap<hir::ItemLocalId, Scope>,
+
+ /// Identifies expressions which, if captured into a temporary, ought to
+ /// have a temporary whose lifetime extends to the end of the enclosing *block*,
+ /// and not the enclosing *statement*. Expressions that are not present in this
+ /// table are not rvalue candidates. The set of rvalue candidates is computed
+ /// during type check based on a traversal of the AST.
+ pub rvalue_candidates: FxHashMap<hir::HirId, RvalueCandidateType>,
+
+ /// If there are any `yield` nested within a scope, this map
+ /// stores the `Span` of the last one and its index in the
+ /// postorder of the Visitor traversal on the HIR.
+ ///
+ /// HIR Visitor postorder indexes might seem like a peculiar
+ /// thing to care about. but it turns out that HIR bindings
+ /// and the temporary results of HIR expressions are never
+ /// storage-live at the end of HIR nodes with postorder indexes
+ /// lower than theirs, and therefore don't need to be suspended
+ /// at yield-points at these indexes.
+ ///
+ /// For an example, suppose we have some code such as:
+ /// ```rust,ignore (example)
+ /// foo(f(), yield y, bar(g()))
+ /// ```
+ ///
+ /// With the HIR tree (calls numbered for expository purposes)
+ ///
+ /// ```text
+ /// Call#0(foo, [Call#1(f), Yield(y), Call#2(bar, Call#3(g))])
+ /// ```
+ ///
+ /// Obviously, the result of `f()` was created before the yield
+ /// (and therefore needs to be kept valid over the yield) while
+ /// the result of `g()` occurs after the yield (and therefore
+ /// doesn't). If we want to infer that, we can look at the
+ /// postorder traversal:
+ /// ```plain,ignore
+ /// `foo` `f` Call#1 `y` Yield `bar` `g` Call#3 Call#2 Call#0
+ /// ```
+ ///
+ /// In which we can easily see that `Call#1` occurs before the yield,
+ /// and `Call#3` after it.
+ ///
+ /// To see that this method works, consider:
+ ///
+ /// Let `D` be our binding/temporary and `U` be our other HIR node, with
+ /// `HIR-postorder(U) < HIR-postorder(D)`. Suppose, as in our example,
+ /// U is the yield and D is one of the calls.
+ /// Let's show that `D` is storage-dead at `U`.
+ ///
+ /// Remember that storage-live/storage-dead refers to the state of
+ /// the *storage*, and does not consider moves/drop flags.
+ ///
+ /// Then:
+ ///
+ /// 1. From the ordering guarantee of HIR visitors (see
+ /// `rustc_hir::intravisit`), `D` does not dominate `U`.
+ ///
+ /// 2. Therefore, `D` is *potentially* storage-dead at `U` (because
+ /// we might visit `U` without ever getting to `D`).
+ ///
+ /// 3. However, we guarantee that at each HIR point, each
+ /// binding/temporary is always either always storage-live
+ /// or always storage-dead. This is what is being guaranteed
+ /// by `terminating_scopes` including all blocks where the
+ /// count of executions is not guaranteed.
+ ///
+ /// 4. By `2.` and `3.`, `D` is *statically* storage-dead at `U`,
+ /// QED.
+ ///
+ /// This property ought to not on (3) in an essential way -- it
+ /// is probably still correct even if we have "unrestricted" terminating
+ /// scopes. However, why use the complicated proof when a simple one
+ /// works?
+ ///
+ /// A subtle thing: `box` expressions, such as `box (&x, yield 2, &y)`. It
+ /// might seem that a `box` expression creates a `Box<T>` temporary
+ /// when it *starts* executing, at `HIR-preorder(BOX-EXPR)`. That might
+ /// be true in the MIR desugaring, but it is not important in the semantics.
+ ///
+ /// The reason is that semantically, until the `box` expression returns,
+ /// the values are still owned by their containing expressions. So
+ /// we'll see that `&x`.
+ pub yield_in_scope: FxHashMap<Scope, Vec<YieldData>>,
+
+ /// The number of visit_expr and visit_pat calls done in the body.
+ /// Used to sanity check visit_expr/visit_pat call count when
+ /// calculating generator interiors.
+ pub body_expr_count: FxHashMap<hir::BodyId, usize>,
+}
+
+/// Identifies the reason that a given expression is an rvalue candidate
+/// (see the `rvalue_candidates` field for more information what rvalue
+/// candidates in general). In constants, the `lifetime` field is None
+/// to indicate that certain expressions escape into 'static and
+/// should have no local cleanup scope.
+#[derive(Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub enum RvalueCandidateType {
+ Borrow { target: hir::ItemLocalId, lifetime: Option<Scope> },
+ Pattern { target: hir::ItemLocalId, lifetime: Option<Scope> },
+}
+
+#[derive(Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub struct YieldData {
+ /// The `Span` of the yield.
+ pub span: Span,
+ /// The number of expressions and patterns appearing before the `yield` in the body, plus one.
+ pub expr_and_pat_count: usize,
+ pub source: hir::YieldSource,
+}
+
+impl ScopeTree {
+ pub fn record_scope_parent(&mut self, child: Scope, parent: Option<(Scope, ScopeDepth)>) {
+ debug!("{:?}.parent = {:?}", child, parent);
+
+ if let Some(p) = parent {
+ let prev = self.parent_map.insert(child, p);
+ assert!(prev.is_none());
+ }
+
+ // Record the destruction scopes for later so we can query them.
+ if let ScopeData::Destruction = child.data {
+ self.destruction_scopes.insert(child.item_local_id(), child);
+ }
+ }
+
+ pub fn opt_destruction_scope(&self, n: hir::ItemLocalId) -> Option<Scope> {
+ self.destruction_scopes.get(&n).cloned()
+ }
+
+ pub fn record_var_scope(&mut self, var: hir::ItemLocalId, lifetime: Scope) {
+ debug!("record_var_scope(sub={:?}, sup={:?})", var, lifetime);
+ assert!(var != lifetime.item_local_id());
+ self.var_map.insert(var, lifetime);
+ }
+
+ pub fn record_rvalue_candidate(
+ &mut self,
+ var: hir::HirId,
+ candidate_type: RvalueCandidateType,
+ ) {
+ debug!("record_rvalue_candidate(var={var:?}, type={candidate_type:?})");
+ match &candidate_type {
+ RvalueCandidateType::Borrow { lifetime: Some(lifetime), .. }
+ | RvalueCandidateType::Pattern { lifetime: Some(lifetime), .. } => {
+ assert!(var.local_id != lifetime.item_local_id())
+ }
+ _ => {}
+ }
+ self.rvalue_candidates.insert(var, candidate_type);
+ }
+
+ /// Returns the narrowest scope that encloses `id`, if any.
+ pub fn opt_encl_scope(&self, id: Scope) -> Option<Scope> {
+ self.parent_map.get(&id).cloned().map(|(p, _)| p)
+ }
+
+ /// Returns the lifetime of the local variable `var_id`, if any.
+ pub fn var_scope(&self, var_id: hir::ItemLocalId) -> Option<Scope> {
+ self.var_map.get(&var_id).cloned()
+ }
+
+ /// Returns `true` if `subscope` is equal to or is lexically nested inside `superscope`, and
+ /// `false` otherwise.
+ ///
+ /// Used by clippy.
+ pub fn is_subscope_of(&self, subscope: Scope, superscope: Scope) -> bool {
+ let mut s = subscope;
+ debug!("is_subscope_of({:?}, {:?})", subscope, superscope);
+ while superscope != s {
+ match self.opt_encl_scope(s) {
+ None => {
+ debug!("is_subscope_of({:?}, {:?}, s={:?})=false", subscope, superscope, s);
+ return false;
+ }
+ Some(scope) => s = scope,
+ }
+ }
+
+ debug!("is_subscope_of({:?}, {:?})=true", subscope, superscope);
+
+ true
+ }
+
+ /// Checks whether the given scope contains a `yield`. If so,
+ /// returns `Some(YieldData)`. If not, returns `None`.
+ pub fn yield_in_scope(&self, scope: Scope) -> Option<&[YieldData]> {
+ self.yield_in_scope.get(&scope).map(Deref::deref)
+ }
+
+ /// Gives the number of expressions visited in a body.
+ /// Used to sanity check visit_expr call count when
+ /// calculating generator interiors.
+ pub fn body_expr_count(&self, body_id: hir::BodyId) -> Option<usize> {
+ self.body_expr_count.get(&body_id).copied()
+ }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for ScopeTree {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let ScopeTree {
+ root_body,
+ ref body_expr_count,
+ ref parent_map,
+ ref var_map,
+ ref destruction_scopes,
+ ref rvalue_candidates,
+ ref yield_in_scope,
+ } = *self;
+
+ root_body.hash_stable(hcx, hasher);
+ body_expr_count.hash_stable(hcx, hasher);
+ parent_map.hash_stable(hcx, hasher);
+ var_map.hash_stable(hcx, hasher);
+ destruction_scopes.hash_stable(hcx, hasher);
+ rvalue_candidates.hash_stable(hcx, hasher);
+ yield_in_scope.hash_stable(hcx, hasher);
+ }
+}
diff --git a/compiler/rustc_middle/src/middle/resolve_lifetime.rs b/compiler/rustc_middle/src/middle/resolve_lifetime.rs
new file mode 100644
index 000000000..9b2f44567
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/resolve_lifetime.rs
@@ -0,0 +1,54 @@
+//! Name resolution for lifetimes: type declarations.
+
+use crate::ty;
+
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::ItemLocalId;
+use rustc_macros::HashStable;
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, Debug, HashStable)]
+pub enum Region {
+ Static,
+ EarlyBound(/* index */ u32, /* lifetime decl */ DefId),
+ LateBound(ty::DebruijnIndex, /* late-bound index */ u32, /* lifetime decl */ DefId),
+ Free(DefId, /* lifetime decl */ DefId),
+}
+
+/// A set containing, at most, one known element.
+/// If two distinct values are inserted into a set, then it
+/// becomes `Many`, which can be used to detect ambiguities.
+#[derive(Copy, Clone, PartialEq, Eq, TyEncodable, TyDecodable, Debug, HashStable)]
+pub enum Set1<T> {
+ Empty,
+ One(T),
+ Many,
+}
+
+impl<T: PartialEq> Set1<T> {
+ pub fn insert(&mut self, value: T) {
+ *self = match self {
+ Set1::Empty => Set1::One(value),
+ Set1::One(old) if *old == value => return,
+ _ => Set1::Many,
+ };
+ }
+}
+
+pub type ObjectLifetimeDefault = Set1<Region>;
+
+/// Maps the id of each lifetime reference to the lifetime decl
+/// that it corresponds to.
+#[derive(Default, HashStable, Debug)]
+pub struct ResolveLifetimes {
+ /// Maps from every use of a named (not anonymous) lifetime to a
+ /// `Region` describing how that region is bound
+ pub defs: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Region>>,
+
+ /// Set of lifetime def ids that are late-bound; a region can
+ /// be late-bound if (a) it does NOT appear in a where-clause and
+ /// (b) it DOES appear in the arguments.
+ pub late_bound: FxHashMap<LocalDefId, FxHashSet<LocalDefId>>,
+
+ pub late_bound_vars: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>>,
+}
diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs
new file mode 100644
index 000000000..414912dd0
--- /dev/null
+++ b/compiler/rustc_middle/src/middle/stability.rs
@@ -0,0 +1,591 @@
+//! A pass that annotates every item and method with its stability level,
+//! propagating default levels lexically from parent to children ast nodes.
+
+pub use self::StabilityLevel::*;
+
+use crate::ty::{self, DefIdTree, TyCtxt};
+use rustc_ast::NodeId;
+use rustc_attr::{self as attr, ConstStability, Deprecation, Stability};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{Applicability, Diagnostic};
+use rustc_feature::GateIssue;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_hir::{self as hir, HirId};
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_session::lint::builtin::{DEPRECATED, DEPRECATED_IN_FUTURE, SOFT_UNSTABLE};
+use rustc_session::lint::{BuiltinLintDiagnostics, Level, Lint, LintBuffer};
+use rustc_session::parse::feature_err_issue;
+use rustc_session::Session;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::Span;
+use std::num::NonZeroU32;
+
+#[derive(PartialEq, Clone, Copy, Debug)]
+pub enum StabilityLevel {
+ Unstable,
+ Stable,
+}
+
+/// An entry in the `depr_map`.
+#[derive(Copy, Clone, HashStable, Debug, Encodable, Decodable)]
+pub struct DeprecationEntry {
+ /// The metadata of the attribute associated with this entry.
+ pub attr: Deprecation,
+ /// The `DefId` where the attr was originally attached. `None` for non-local
+ /// `DefId`'s.
+ origin: Option<LocalDefId>,
+}
+
+impl DeprecationEntry {
+ pub fn local(attr: Deprecation, def_id: LocalDefId) -> DeprecationEntry {
+ DeprecationEntry { attr, origin: Some(def_id) }
+ }
+
+ pub fn external(attr: Deprecation) -> DeprecationEntry {
+ DeprecationEntry { attr, origin: None }
+ }
+
+ pub fn same_origin(&self, other: &DeprecationEntry) -> bool {
+ match (self.origin, other.origin) {
+ (Some(o1), Some(o2)) => o1 == o2,
+ _ => false,
+ }
+ }
+}
+
+/// A stability index, giving the stability level for items and methods.
+#[derive(HashStable, Debug)]
+pub struct Index {
+ /// This is mostly a cache, except the stabilities of local items
+ /// are filled by the annotator.
+ pub stab_map: FxHashMap<LocalDefId, Stability>,
+ pub const_stab_map: FxHashMap<LocalDefId, ConstStability>,
+ pub depr_map: FxHashMap<LocalDefId, DeprecationEntry>,
+ /// Mapping from feature name to feature name based on the `implied_by` field of `#[unstable]`
+ /// attributes. If a `#[unstable(feature = "implier", implied_by = "impliee")]` attribute
+ /// exists, then this map will have a `impliee -> implier` entry.
+ ///
+ /// This mapping is necessary unless both the `#[stable]` and `#[unstable]` attributes should
+ /// specify their implications (both `implies` and `implied_by`). If only one of the two
+ /// attributes do (as in the current implementation, `implied_by` in `#[unstable]`), then this
+ /// mapping is necessary for diagnostics. When a "unnecessary feature attribute" error is
+ /// reported, only the `#[stable]` attribute information is available, so the map is necessary
+ /// to know that the feature implies another feature. If it were reversed, and the `#[stable]`
+ /// attribute had an `implies` meta item, then a map would be necessary when avoiding a "use of
+ /// unstable feature" error for a feature that was implied.
+ pub implications: FxHashMap<Symbol, Symbol>,
+}
+
+impl Index {
+ pub fn local_stability(&self, def_id: LocalDefId) -> Option<Stability> {
+ self.stab_map.get(&def_id).copied()
+ }
+
+ pub fn local_const_stability(&self, def_id: LocalDefId) -> Option<ConstStability> {
+ self.const_stab_map.get(&def_id).copied()
+ }
+
+ pub fn local_deprecation_entry(&self, def_id: LocalDefId) -> Option<DeprecationEntry> {
+ self.depr_map.get(&def_id).cloned()
+ }
+}
+
+pub fn report_unstable(
+ sess: &Session,
+ feature: Symbol,
+ reason: Option<Symbol>,
+ issue: Option<NonZeroU32>,
+ suggestion: Option<(Span, String, String, Applicability)>,
+ is_soft: bool,
+ span: Span,
+ soft_handler: impl FnOnce(&'static Lint, Span, &str),
+) {
+ let msg = match reason {
+ Some(r) => format!("use of unstable library feature '{}': {}", feature, r),
+ None => format!("use of unstable library feature '{}'", &feature),
+ };
+
+ if is_soft {
+ soft_handler(SOFT_UNSTABLE, span, &msg)
+ } else {
+ let mut err =
+ feature_err_issue(&sess.parse_sess, feature, span, GateIssue::Library(issue), &msg);
+ if let Some((inner_types, ref msg, sugg, applicability)) = suggestion {
+ err.span_suggestion(inner_types, msg, sugg, applicability);
+ }
+ err.emit();
+ }
+}
+
+/// Checks whether an item marked with `deprecated(since="X")` is currently
+/// deprecated (i.e., whether X is not greater than the current rustc version).
+pub fn deprecation_in_effect(depr: &Deprecation) -> bool {
+ let is_since_rustc_version = depr.is_since_rustc_version;
+ let since = depr.since.as_ref().map(Symbol::as_str);
+
+ fn parse_version(ver: &str) -> Vec<u32> {
+ // We ignore non-integer components of the version (e.g., "nightly").
+ ver.split(|c| c == '.' || c == '-').flat_map(|s| s.parse()).collect()
+ }
+
+ if !is_since_rustc_version {
+ // The `since` field doesn't have semantic purpose without `#![staged_api]`.
+ return true;
+ }
+
+ if let Some(since) = since {
+ if since == "TBD" {
+ return false;
+ }
+
+ if let Some(rustc) = option_env!("CFG_RELEASE") {
+ let since: Vec<u32> = parse_version(&since);
+ let rustc: Vec<u32> = parse_version(rustc);
+ // We simply treat invalid `since` attributes as relating to a previous
+ // Rust version, thus always displaying the warning.
+ if since.len() != 3 {
+ return true;
+ }
+ return since <= rustc;
+ }
+ };
+
+ // Assume deprecation is in effect if "since" field is missing
+ // or if we can't determine the current Rust version.
+ true
+}
+
+pub fn deprecation_suggestion(
+ diag: &mut Diagnostic,
+ kind: &str,
+ suggestion: Option<Symbol>,
+ span: Span,
+) {
+ if let Some(suggestion) = suggestion {
+ diag.span_suggestion_verbose(
+ span,
+ &format!("replace the use of the deprecated {}", kind),
+ suggestion,
+ Applicability::MachineApplicable,
+ );
+ }
+}
+
+fn deprecation_lint(is_in_effect: bool) -> &'static Lint {
+ if is_in_effect { DEPRECATED } else { DEPRECATED_IN_FUTURE }
+}
+
+fn deprecation_message(
+ is_in_effect: bool,
+ since: Option<Symbol>,
+ note: Option<Symbol>,
+ kind: &str,
+ path: &str,
+) -> String {
+ let message = if is_in_effect {
+ format!("use of deprecated {} `{}`", kind, path)
+ } else {
+ let since = since.as_ref().map(Symbol::as_str);
+
+ if since == Some("TBD") {
+ format!("use of {} `{}` that will be deprecated in a future Rust version", kind, path)
+ } else {
+ format!(
+ "use of {} `{}` that will be deprecated in future version {}",
+ kind,
+ path,
+ since.unwrap()
+ )
+ }
+ };
+
+ match note {
+ Some(reason) => format!("{}: {}", message, reason),
+ None => message,
+ }
+}
+
+pub fn deprecation_message_and_lint(
+ depr: &Deprecation,
+ kind: &str,
+ path: &str,
+) -> (String, &'static Lint) {
+ let is_in_effect = deprecation_in_effect(depr);
+ (
+ deprecation_message(is_in_effect, depr.since, depr.note, kind, path),
+ deprecation_lint(is_in_effect),
+ )
+}
+
+pub fn early_report_deprecation<'a>(
+ lint_buffer: &'a mut LintBuffer,
+ message: &str,
+ suggestion: Option<Symbol>,
+ lint: &'static Lint,
+ span: Span,
+ node_id: NodeId,
+) {
+ if span.in_derive_expansion() {
+ return;
+ }
+
+ let diag = BuiltinLintDiagnostics::DeprecatedMacro(suggestion, span);
+ lint_buffer.buffer_lint_with_diagnostic(lint, node_id, span, message, diag);
+}
+
+fn late_report_deprecation(
+ tcx: TyCtxt<'_>,
+ message: &str,
+ suggestion: Option<Symbol>,
+ lint: &'static Lint,
+ span: Span,
+ method_span: Option<Span>,
+ hir_id: HirId,
+ def_id: DefId,
+) {
+ if span.in_derive_expansion() {
+ return;
+ }
+ let method_span = method_span.unwrap_or(span);
+ tcx.struct_span_lint_hir(lint, hir_id, method_span, |lint| {
+ let mut diag = lint.build(message);
+ if let hir::Node::Expr(_) = tcx.hir().get(hir_id) {
+ let kind = tcx.def_kind(def_id).descr(def_id);
+ deprecation_suggestion(&mut diag, kind, suggestion, method_span);
+ }
+ diag.emit();
+ });
+}
+
+/// Result of `TyCtxt::eval_stability`.
+pub enum EvalResult {
+ /// We can use the item because it is stable or we provided the
+ /// corresponding feature gate.
+ Allow,
+ /// We cannot use the item because it is unstable and we did not provide the
+ /// corresponding feature gate.
+ Deny {
+ feature: Symbol,
+ reason: Option<Symbol>,
+ issue: Option<NonZeroU32>,
+ suggestion: Option<(Span, String, String, Applicability)>,
+ is_soft: bool,
+ },
+ /// The item does not have the `#[stable]` or `#[unstable]` marker assigned.
+ Unmarked,
+}
+
+// See issue #38412.
+fn skip_stability_check_due_to_privacy(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ if tcx.def_kind(def_id) == DefKind::TyParam {
+ // Have no visibility, considered public for the purpose of this check.
+ return false;
+ }
+ match tcx.visibility(def_id) {
+ // Must check stability for `pub` items.
+ ty::Visibility::Public => false,
+
+ // These are not visible outside crate; therefore
+ // stability markers are irrelevant, if even present.
+ ty::Visibility::Restricted(..) | ty::Visibility::Invisible => true,
+ }
+}
+
+// See issue #83250.
+fn suggestion_for_allocator_api(
+ tcx: TyCtxt<'_>,
+ def_id: DefId,
+ span: Span,
+ feature: Symbol,
+) -> Option<(Span, String, String, Applicability)> {
+ if feature == sym::allocator_api {
+ if let Some(trait_) = tcx.opt_parent(def_id) {
+ if tcx.is_diagnostic_item(sym::Vec, trait_) {
+ let sm = tcx.sess.parse_sess.source_map();
+ let inner_types = sm.span_extend_to_prev_char(span, '<', true);
+ if let Ok(snippet) = sm.span_to_snippet(inner_types) {
+ return Some((
+ inner_types,
+ "consider wrapping the inner types in tuple".to_string(),
+ format!("({})", snippet),
+ Applicability::MaybeIncorrect,
+ ));
+ }
+ }
+ }
+ }
+ None
+}
+
+/// An override option for eval_stability.
+pub enum AllowUnstable {
+ /// Don't emit an unstable error for the item
+ Yes,
+ /// Handle the item normally
+ No,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Evaluates the stability of an item.
+ ///
+ /// Returns `EvalResult::Allow` if the item is stable, or unstable but the corresponding
+ /// `#![feature]` has been provided. Returns `EvalResult::Deny` which describes the offending
+ /// unstable feature otherwise.
+ ///
+ /// If `id` is `Some(_)`, this function will also check if the item at `def_id` has been
+ /// deprecated. If the item is indeed deprecated, we will emit a deprecation lint attached to
+ /// `id`.
+ pub fn eval_stability(
+ self,
+ def_id: DefId,
+ id: Option<HirId>,
+ span: Span,
+ method_span: Option<Span>,
+ ) -> EvalResult {
+ self.eval_stability_allow_unstable(def_id, id, span, method_span, AllowUnstable::No)
+ }
+
+ /// Evaluates the stability of an item.
+ ///
+ /// Returns `EvalResult::Allow` if the item is stable, or unstable but the corresponding
+ /// `#![feature]` has been provided. Returns `EvalResult::Deny` which describes the offending
+ /// unstable feature otherwise.
+ ///
+ /// If `id` is `Some(_)`, this function will also check if the item at `def_id` has been
+ /// deprecated. If the item is indeed deprecated, we will emit a deprecation lint attached to
+ /// `id`.
+ ///
+ /// Pass `AllowUnstable::Yes` to `allow_unstable` to force an unstable item to be allowed. Deprecation warnings will be emitted normally.
+ pub fn eval_stability_allow_unstable(
+ self,
+ def_id: DefId,
+ id: Option<HirId>,
+ span: Span,
+ method_span: Option<Span>,
+ allow_unstable: AllowUnstable,
+ ) -> EvalResult {
+ // Deprecated attributes apply in-crate and cross-crate.
+ if let Some(id) = id {
+ if let Some(depr_entry) = self.lookup_deprecation_entry(def_id) {
+ let parent_def_id = self.hir().get_parent_item(id);
+ let skip = self
+ .lookup_deprecation_entry(parent_def_id.to_def_id())
+ .map_or(false, |parent_depr| parent_depr.same_origin(&depr_entry));
+
+ // #[deprecated] doesn't emit a notice if we're not on the
+ // topmost deprecation. For example, if a struct is deprecated,
+ // the use of a field won't be linted.
+ //
+ // With #![staged_api], we want to emit down the whole
+ // hierarchy.
+ let depr_attr = &depr_entry.attr;
+ if !skip || depr_attr.is_since_rustc_version {
+ // Calculating message for lint involves calling `self.def_path_str`.
+ // Which by default to calculate visible path will invoke expensive `visible_parent_map` query.
+ // So we skip message calculation altogether, if lint is allowed.
+ let is_in_effect = deprecation_in_effect(depr_attr);
+ let lint = deprecation_lint(is_in_effect);
+ if self.lint_level_at_node(lint, id).0 != Level::Allow {
+ let def_path = with_no_trimmed_paths!(self.def_path_str(def_id));
+ let def_kind = self.def_kind(def_id).descr(def_id);
+
+ late_report_deprecation(
+ self,
+ &deprecation_message(
+ is_in_effect,
+ depr_attr.since,
+ depr_attr.note,
+ def_kind,
+ &def_path,
+ ),
+ depr_attr.suggestion,
+ lint,
+ span,
+ method_span,
+ id,
+ def_id,
+ );
+ }
+ }
+ };
+ }
+
+ let is_staged_api = self.lookup_stability(def_id.krate.as_def_id()).is_some();
+ if !is_staged_api {
+ return EvalResult::Allow;
+ }
+
+ let stability = self.lookup_stability(def_id);
+ debug!(
+ "stability: \
+ inspecting def_id={:?} span={:?} of stability={:?}",
+ def_id, span, stability
+ );
+
+ // Only the cross-crate scenario matters when checking unstable APIs
+ let cross_crate = !def_id.is_local();
+ if !cross_crate {
+ return EvalResult::Allow;
+ }
+
+ // Issue #38412: private items lack stability markers.
+ if skip_stability_check_due_to_privacy(self, def_id) {
+ return EvalResult::Allow;
+ }
+
+ match stability {
+ Some(Stability {
+ level: attr::Unstable { reason, issue, is_soft, implied_by },
+ feature,
+ ..
+ }) => {
+ if span.allows_unstable(feature) {
+ debug!("stability: skipping span={:?} since it is internal", span);
+ return EvalResult::Allow;
+ }
+ if self.features().active(feature) {
+ return EvalResult::Allow;
+ }
+
+ // If this item was previously part of a now-stabilized feature which is still
+ // active (i.e. the user hasn't removed the attribute for the stabilized feature
+ // yet) then allow use of this item.
+ if let Some(implied_by) = implied_by && self.features().active(implied_by) {
+ return EvalResult::Allow;
+ }
+
+ // When we're compiling the compiler itself we may pull in
+ // crates from crates.io, but those crates may depend on other
+ // crates also pulled in from crates.io. We want to ideally be
+ // able to compile everything without requiring upstream
+ // modifications, so in the case that this looks like a
+ // `rustc_private` crate (e.g., a compiler crate) and we also have
+ // the `-Z force-unstable-if-unmarked` flag present (we're
+ // compiling a compiler crate), then let this missing feature
+ // annotation slide.
+ if feature == sym::rustc_private && issue == NonZeroU32::new(27812) {
+ if self.sess.opts.unstable_opts.force_unstable_if_unmarked {
+ return EvalResult::Allow;
+ }
+ }
+
+ if matches!(allow_unstable, AllowUnstable::Yes) {
+ return EvalResult::Allow;
+ }
+
+ let suggestion = suggestion_for_allocator_api(self, def_id, span, feature);
+ EvalResult::Deny {
+ feature,
+ reason: reason.to_opt_reason(),
+ issue,
+ suggestion,
+ is_soft,
+ }
+ }
+ Some(_) => {
+ // Stable APIs are always ok to call and deprecated APIs are
+ // handled by the lint emitting logic above.
+ EvalResult::Allow
+ }
+ None => EvalResult::Unmarked,
+ }
+ }
+
+ /// Checks if an item is stable or error out.
+ ///
+ /// If the item defined by `def_id` is unstable and the corresponding `#![feature]` does not
+ /// exist, emits an error.
+ ///
+ /// This function will also check if the item is deprecated.
+ /// If so, and `id` is not `None`, a deprecated lint attached to `id` will be emitted.
+ ///
+ /// Returns `true` if item is allowed aka, stable or unstable under an enabled feature.
+ pub fn check_stability(
+ self,
+ def_id: DefId,
+ id: Option<HirId>,
+ span: Span,
+ method_span: Option<Span>,
+ ) -> bool {
+ self.check_stability_allow_unstable(def_id, id, span, method_span, AllowUnstable::No)
+ }
+
+ /// Checks if an item is stable or error out.
+ ///
+ /// If the item defined by `def_id` is unstable and the corresponding `#![feature]` does not
+ /// exist, emits an error.
+ ///
+ /// This function will also check if the item is deprecated.
+ /// If so, and `id` is not `None`, a deprecated lint attached to `id` will be emitted.
+ ///
+ /// Pass `AllowUnstable::Yes` to `allow_unstable` to force an unstable item to be allowed. Deprecation warnings will be emitted normally.
+ ///
+ /// Returns `true` if item is allowed aka, stable or unstable under an enabled feature.
+ pub fn check_stability_allow_unstable(
+ self,
+ def_id: DefId,
+ id: Option<HirId>,
+ span: Span,
+ method_span: Option<Span>,
+ allow_unstable: AllowUnstable,
+ ) -> bool {
+ self.check_optional_stability(
+ def_id,
+ id,
+ span,
+ method_span,
+ allow_unstable,
+ |span, def_id| {
+ // The API could be uncallable for other reasons, for example when a private module
+ // was referenced.
+ self.sess.delay_span_bug(span, &format!("encountered unmarked API: {:?}", def_id));
+ },
+ )
+ }
+
+ /// Like `check_stability`, except that we permit items to have custom behaviour for
+ /// missing stability attributes (not necessarily just emit a `bug!`). This is necessary
+ /// for default generic parameters, which only have stability attributes if they were
+ /// added after the type on which they're defined.
+ ///
+ /// Returns `true` if item is allowed aka, stable or unstable under an enabled feature.
+ pub fn check_optional_stability(
+ self,
+ def_id: DefId,
+ id: Option<HirId>,
+ span: Span,
+ method_span: Option<Span>,
+ allow_unstable: AllowUnstable,
+ unmarked: impl FnOnce(Span, DefId),
+ ) -> bool {
+ let soft_handler = |lint, span, msg: &_| {
+ self.struct_span_lint_hir(lint, id.unwrap_or(hir::CRATE_HIR_ID), span, |lint| {
+ lint.build(msg).emit();
+ })
+ };
+ let eval_result =
+ self.eval_stability_allow_unstable(def_id, id, span, method_span, allow_unstable);
+ let is_allowed = matches!(eval_result, EvalResult::Allow);
+ match eval_result {
+ EvalResult::Allow => {}
+ EvalResult::Deny { feature, reason, issue, suggestion, is_soft } => report_unstable(
+ self.sess,
+ feature,
+ reason,
+ issue,
+ suggestion,
+ is_soft,
+ span,
+ soft_handler,
+ ),
+ EvalResult::Unmarked => unmarked(span, def_id),
+ }
+
+ is_allowed
+ }
+
+ pub fn lookup_deprecation(self, id: DefId) -> Option<Deprecation> {
+ self.lookup_deprecation_entry(id).map(|depr| depr.attr)
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/basic_blocks.rs b/compiler/rustc_middle/src/mir/basic_blocks.rs
new file mode 100644
index 000000000..78080fcd5
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/basic_blocks.rs
@@ -0,0 +1,147 @@
+use crate::mir::graph_cyclic_cache::GraphIsCyclicCache;
+use crate::mir::predecessors::{PredecessorCache, Predecessors};
+use crate::mir::switch_sources::{SwitchSourceCache, SwitchSources};
+use crate::mir::traversal::PostorderCache;
+use crate::mir::{BasicBlock, BasicBlockData, Successors, START_BLOCK};
+
+use rustc_data_structures::graph;
+use rustc_data_structures::graph::dominators::{dominators, Dominators};
+use rustc_index::vec::IndexVec;
+
+#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable, TypeVisitable)]
+pub struct BasicBlocks<'tcx> {
+ basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ predecessor_cache: PredecessorCache,
+ switch_source_cache: SwitchSourceCache,
+ is_cyclic: GraphIsCyclicCache,
+ postorder_cache: PostorderCache,
+}
+
+impl<'tcx> BasicBlocks<'tcx> {
+ #[inline]
+ pub fn new(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self {
+ BasicBlocks {
+ basic_blocks,
+ predecessor_cache: PredecessorCache::new(),
+ switch_source_cache: SwitchSourceCache::new(),
+ is_cyclic: GraphIsCyclicCache::new(),
+ postorder_cache: PostorderCache::new(),
+ }
+ }
+
+ /// Returns true if control-flow graph contains a cycle reachable from the `START_BLOCK`.
+ #[inline]
+ pub fn is_cfg_cyclic(&self) -> bool {
+ self.is_cyclic.is_cyclic(self)
+ }
+
+ #[inline]
+ pub fn dominators(&self) -> Dominators<BasicBlock> {
+ dominators(&self)
+ }
+
+ /// Returns predecessors for each basic block.
+ #[inline]
+ pub fn predecessors(&self) -> &Predecessors {
+ self.predecessor_cache.compute(&self.basic_blocks)
+ }
+
+ /// Returns basic blocks in a postorder.
+ #[inline]
+ pub fn postorder(&self) -> &[BasicBlock] {
+ self.postorder_cache.compute(&self.basic_blocks)
+ }
+
+ /// `switch_sources()[&(target, switch)]` returns a list of switch
+ /// values that lead to a `target` block from a `switch` block.
+ #[inline]
+ pub fn switch_sources(&self) -> &SwitchSources {
+ self.switch_source_cache.compute(&self.basic_blocks)
+ }
+
+ /// Returns mutable reference to basic blocks. Invalidates CFG cache.
+ #[inline]
+ pub fn as_mut(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+ self.invalidate_cfg_cache();
+ &mut self.basic_blocks
+ }
+
+ /// Get mutable access to basic blocks without invalidating the CFG cache.
+ ///
+ /// By calling this method instead of e.g. [`BasicBlocks::as_mut`] you promise not to change
+ /// the CFG. This means that
+ ///
+ /// 1) The number of basic blocks remains unchanged
+ /// 2) The set of successors of each terminator remains unchanged.
+ /// 3) For each `TerminatorKind::SwitchInt`, the `targets` remains the same and the terminator
+ /// kind is not changed.
+ ///
+ /// If any of these conditions cannot be upheld, you should call [`BasicBlocks::invalidate_cfg_cache`].
+ #[inline]
+ pub fn as_mut_preserves_cfg(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+ &mut self.basic_blocks
+ }
+
+ /// Invalidates cached information about the CFG.
+ ///
+ /// You will only ever need this if you have also called [`BasicBlocks::as_mut_preserves_cfg`].
+ /// All other methods that allow you to mutate the basic blocks also call this method
+ /// themselves, thereby avoiding any risk of accidentaly cache invalidation.
+ pub fn invalidate_cfg_cache(&mut self) {
+ self.predecessor_cache.invalidate();
+ self.switch_source_cache.invalidate();
+ self.is_cyclic.invalidate();
+ self.postorder_cache.invalidate();
+ }
+}
+
+impl<'tcx> std::ops::Deref for BasicBlocks<'tcx> {
+ type Target = IndexVec<BasicBlock, BasicBlockData<'tcx>>;
+
+ #[inline]
+ fn deref(&self) -> &IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+ &self.basic_blocks
+ }
+}
+
+impl<'tcx> graph::DirectedGraph for BasicBlocks<'tcx> {
+ type Node = BasicBlock;
+}
+
+impl<'tcx> graph::WithNumNodes for BasicBlocks<'tcx> {
+ #[inline]
+ fn num_nodes(&self) -> usize {
+ self.basic_blocks.len()
+ }
+}
+
+impl<'tcx> graph::WithStartNode for BasicBlocks<'tcx> {
+ #[inline]
+ fn start_node(&self) -> Self::Node {
+ START_BLOCK
+ }
+}
+
+impl<'tcx> graph::WithSuccessors for BasicBlocks<'tcx> {
+ #[inline]
+ fn successors(&self, node: Self::Node) -> <Self as graph::GraphSuccessors<'_>>::Iter {
+ self.basic_blocks[node].terminator().successors()
+ }
+}
+
+impl<'a, 'b> graph::GraphSuccessors<'b> for BasicBlocks<'a> {
+ type Item = BasicBlock;
+ type Iter = Successors<'b>;
+}
+
+impl<'tcx, 'graph> graph::GraphPredecessors<'graph> for BasicBlocks<'tcx> {
+ type Item = BasicBlock;
+ type Iter = std::iter::Copied<std::slice::Iter<'graph, BasicBlock>>;
+}
+
+impl<'tcx> graph::WithPredecessors for BasicBlocks<'tcx> {
+ #[inline]
+ fn predecessors(&self, node: Self::Node) -> <Self as graph::GraphPredecessors<'_>>::Iter {
+ self.predecessors()[node].iter().copied()
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/coverage.rs b/compiler/rustc_middle/src/mir/coverage.rs
new file mode 100644
index 000000000..efa946452
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/coverage.rs
@@ -0,0 +1,186 @@
+//! Metadata from source code coverage analysis and instrumentation.
+
+use rustc_macros::HashStable;
+use rustc_span::Symbol;
+
+use std::cmp::Ord;
+use std::fmt::{self, Debug, Formatter};
+
+rustc_index::newtype_index! {
+ /// An ExpressionOperandId value is assigned directly from either a
+ /// CounterValueReference.as_u32() (which ascend from 1) or an ExpressionOperandId.as_u32()
+ /// (which _*descend*_ from u32::MAX). Id value `0` (zero) represents a virtual counter with a
+ /// constant value of `0`.
+ pub struct ExpressionOperandId {
+ derive [HashStable]
+ DEBUG_FORMAT = "ExpressionOperandId({})",
+ MAX = 0xFFFF_FFFF,
+ }
+}
+
+impl ExpressionOperandId {
+ /// An expression operand for a "zero counter", as described in the following references:
+ ///
+ /// * <https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#counter>
+ /// * <https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#tag>
+ /// * <https://github.com/rust-lang/llvm-project/blob/rustc/13.0-2021-09-30/llvm/docs/CoverageMappingFormat.rst#counter-expressions>
+ ///
+ /// This operand can be used to count two or more separate code regions with a single counter,
+ /// if they run sequentially with no branches, by injecting the `Counter` in a `BasicBlock` for
+ /// one of the code regions, and inserting `CounterExpression`s ("add ZERO to the counter") in
+ /// the coverage map for the other code regions.
+ pub const ZERO: Self = Self::from_u32(0);
+}
+
+rustc_index::newtype_index! {
+ pub struct CounterValueReference {
+ derive [HashStable]
+ DEBUG_FORMAT = "CounterValueReference({})",
+ MAX = 0xFFFF_FFFF,
+ }
+}
+
+impl CounterValueReference {
+ /// Counters start at 1 to reserve 0 for ExpressionOperandId::ZERO.
+ pub const START: Self = Self::from_u32(1);
+
+ /// Returns explicitly-requested zero-based version of the counter id, used
+ /// during codegen. LLVM expects zero-based indexes.
+ pub fn zero_based_index(self) -> u32 {
+ let one_based_index = self.as_u32();
+ debug_assert!(one_based_index > 0);
+ one_based_index - 1
+ }
+}
+
+rustc_index::newtype_index! {
+ /// InjectedExpressionId.as_u32() converts to ExpressionOperandId.as_u32()
+ ///
+ /// Values descend from u32::MAX.
+ pub struct InjectedExpressionId {
+ derive [HashStable]
+ DEBUG_FORMAT = "InjectedExpressionId({})",
+ MAX = 0xFFFF_FFFF,
+ }
+}
+
+rustc_index::newtype_index! {
+ /// InjectedExpressionIndex.as_u32() translates to u32::MAX - ExpressionOperandId.as_u32()
+ ///
+ /// Values ascend from 0.
+ pub struct InjectedExpressionIndex {
+ derive [HashStable]
+ DEBUG_FORMAT = "InjectedExpressionIndex({})",
+ MAX = 0xFFFF_FFFF,
+ }
+}
+
+rustc_index::newtype_index! {
+ /// MappedExpressionIndex values ascend from zero, and are recalculated indexes based on their
+ /// array position in the LLVM coverage map "Expressions" array, which is assembled during the
+ /// "mapgen" process. They cannot be computed algorithmically, from the other `newtype_index`s.
+ pub struct MappedExpressionIndex {
+ derive [HashStable]
+ DEBUG_FORMAT = "MappedExpressionIndex({})",
+ MAX = 0xFFFF_FFFF,
+ }
+}
+
+impl From<CounterValueReference> for ExpressionOperandId {
+ #[inline]
+ fn from(v: CounterValueReference) -> ExpressionOperandId {
+ ExpressionOperandId::from(v.as_u32())
+ }
+}
+
+impl From<InjectedExpressionId> for ExpressionOperandId {
+ #[inline]
+ fn from(v: InjectedExpressionId) -> ExpressionOperandId {
+ ExpressionOperandId::from(v.as_u32())
+ }
+}
+
+#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable, TypeVisitable)]
+pub enum CoverageKind {
+ Counter {
+ function_source_hash: u64,
+ id: CounterValueReference,
+ },
+ Expression {
+ id: InjectedExpressionId,
+ lhs: ExpressionOperandId,
+ op: Op,
+ rhs: ExpressionOperandId,
+ },
+ Unreachable,
+}
+
+impl CoverageKind {
+ pub fn as_operand_id(&self) -> ExpressionOperandId {
+ use CoverageKind::*;
+ match *self {
+ Counter { id, .. } => ExpressionOperandId::from(id),
+ Expression { id, .. } => ExpressionOperandId::from(id),
+ Unreachable => bug!("Unreachable coverage cannot be part of an expression"),
+ }
+ }
+
+ pub fn is_expression(&self) -> bool {
+ matches!(self, Self::Expression { .. })
+ }
+}
+
+impl Debug for CoverageKind {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ use CoverageKind::*;
+ match self {
+ Counter { id, .. } => write!(fmt, "Counter({:?})", id.index()),
+ Expression { id, lhs, op, rhs } => write!(
+ fmt,
+ "Expression({:?}) = {} {} {}",
+ id.index(),
+ lhs.index(),
+ if *op == Op::Add { "+" } else { "-" },
+ rhs.index(),
+ ),
+ Unreachable => write!(fmt, "Unreachable"),
+ }
+ }
+}
+
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct CodeRegion {
+ pub file_name: Symbol,
+ pub start_line: u32,
+ pub start_col: u32,
+ pub end_line: u32,
+ pub end_col: u32,
+}
+
+impl Debug for CodeRegion {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ write!(
+ fmt,
+ "{}:{}:{} - {}:{}",
+ self.file_name, self.start_line, self.start_col, self.end_line, self.end_col
+ )
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum Op {
+ Subtract,
+ Add,
+}
+
+impl Op {
+ pub fn is_add(&self) -> bool {
+ matches!(self, Self::Add)
+ }
+
+ pub fn is_subtract(&self) -> bool {
+ matches!(self, Self::Subtract)
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/generic_graph.rs b/compiler/rustc_middle/src/mir/generic_graph.rs
new file mode 100644
index 000000000..f3621cd99
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/generic_graph.rs
@@ -0,0 +1,69 @@
+use gsgdt::{Edge, Graph, Node, NodeStyle};
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+
+/// Convert an MIR function into a gsgdt Graph
+pub fn mir_fn_to_generic_graph<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'_>) -> Graph {
+ let def_id = body.source.def_id();
+ let def_name = graphviz_safe_def_name(def_id);
+ let graph_name = format!("Mir_{}", def_name);
+ let dark_mode = tcx.sess.opts.unstable_opts.graphviz_dark_mode;
+
+ // Nodes
+ let nodes: Vec<Node> = body
+ .basic_blocks()
+ .iter_enumerated()
+ .map(|(block, _)| bb_to_graph_node(block, body, dark_mode))
+ .collect();
+
+ // Edges
+ let mut edges = Vec::new();
+ for (source, _) in body.basic_blocks().iter_enumerated() {
+ let def_id = body.source.def_id();
+ let terminator = body[source].terminator();
+ let labels = terminator.kind.fmt_successor_labels();
+
+ for (target, label) in terminator.successors().zip(labels) {
+ let src = node(def_id, source);
+ let trg = node(def_id, target);
+ edges.push(Edge::new(src, trg, label.to_string()));
+ }
+ }
+
+ Graph::new(graph_name, nodes, edges)
+}
+
+fn bb_to_graph_node(block: BasicBlock, body: &Body<'_>, dark_mode: bool) -> Node {
+ let def_id = body.source.def_id();
+ let data = &body[block];
+ let label = node(def_id, block);
+
+ let (title, bgcolor) = if data.is_cleanup {
+ let color = if dark_mode { "royalblue" } else { "lightblue" };
+ (format!("{} (cleanup)", block.index()), color)
+ } else {
+ let color = if dark_mode { "dimgray" } else { "gray" };
+ (format!("{}", block.index()), color)
+ };
+
+ let style = NodeStyle { title_bg: Some(bgcolor.to_owned()), ..Default::default() };
+ let mut stmts: Vec<String> = data.statements.iter().map(|x| format!("{:?}", x)).collect();
+
+ // add the terminator to the stmts, gsgdt can print it out separately
+ let mut terminator_head = String::new();
+ data.terminator().kind.fmt_head(&mut terminator_head).unwrap();
+ stmts.push(terminator_head);
+
+ Node::new(stmts, label, title, style)
+}
+
+// Must match `[0-9A-Za-z_]*`. This does not appear in the rendered graph, so
+// it does not have to be user friendly.
+pub fn graphviz_safe_def_name(def_id: DefId) -> String {
+ format!("{}_{}", def_id.krate.index(), def_id.index.index(),)
+}
+
+fn node(def_id: DefId, block: BasicBlock) -> String {
+ format!("bb{}__{}", block.index(), graphviz_safe_def_name(def_id))
+}
diff --git a/compiler/rustc_middle/src/mir/generic_graphviz.rs b/compiler/rustc_middle/src/mir/generic_graphviz.rs
new file mode 100644
index 000000000..11ac45943
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/generic_graphviz.rs
@@ -0,0 +1,173 @@
+use rustc_data_structures::graph::{self, iterate};
+use rustc_graphviz as dot;
+use rustc_middle::ty::TyCtxt;
+use std::io::{self, Write};
+
+pub struct GraphvizWriter<
+ 'a,
+ G: graph::DirectedGraph + graph::WithSuccessors + graph::WithStartNode + graph::WithNumNodes,
+ NodeContentFn: Fn(<G as graph::DirectedGraph>::Node) -> Vec<String>,
+ EdgeLabelsFn: Fn(<G as graph::DirectedGraph>::Node) -> Vec<String>,
+> {
+ graph: &'a G,
+ is_subgraph: bool,
+ graphviz_name: String,
+ graph_label: Option<String>,
+ node_content_fn: NodeContentFn,
+ edge_labels_fn: EdgeLabelsFn,
+}
+
+impl<
+ 'a,
+ G: graph::DirectedGraph + graph::WithSuccessors + graph::WithStartNode + graph::WithNumNodes,
+ NodeContentFn: Fn(<G as graph::DirectedGraph>::Node) -> Vec<String>,
+ EdgeLabelsFn: Fn(<G as graph::DirectedGraph>::Node) -> Vec<String>,
+> GraphvizWriter<'a, G, NodeContentFn, EdgeLabelsFn>
+{
+ pub fn new(
+ graph: &'a G,
+ graphviz_name: &str,
+ node_content_fn: NodeContentFn,
+ edge_labels_fn: EdgeLabelsFn,
+ ) -> Self {
+ Self {
+ graph,
+ is_subgraph: false,
+ graphviz_name: graphviz_name.to_owned(),
+ graph_label: None,
+ node_content_fn,
+ edge_labels_fn,
+ }
+ }
+
+ pub fn set_graph_label(&mut self, graph_label: &str) {
+ self.graph_label = Some(graph_label.to_owned());
+ }
+
+ /// Write a graphviz DOT of the graph
+ pub fn write_graphviz<'tcx, W>(&self, tcx: TyCtxt<'tcx>, w: &mut W) -> io::Result<()>
+ where
+ W: Write,
+ {
+ let kind = if self.is_subgraph { "subgraph" } else { "digraph" };
+ let cluster = if self.is_subgraph { "cluster_" } else { "" }; // Print border around graph
+ // FIXME(richkadel): If/when migrating the MIR graphviz to this generic implementation,
+ // prepend "Mir_" to the graphviz_safe_def_name(def_id)
+ writeln!(w, "{} {}{} {{", kind, cluster, self.graphviz_name)?;
+
+ // Global graph properties
+ let font = format!(r#"fontname="{}""#, tcx.sess.opts.unstable_opts.graphviz_font);
+ let mut graph_attrs = vec![&font[..]];
+ let mut content_attrs = vec![&font[..]];
+
+ let dark_mode = tcx.sess.opts.unstable_opts.graphviz_dark_mode;
+ if dark_mode {
+ graph_attrs.push(r#"bgcolor="black""#);
+ graph_attrs.push(r#"fontcolor="white""#);
+ content_attrs.push(r#"color="white""#);
+ content_attrs.push(r#"fontcolor="white""#);
+ }
+
+ writeln!(w, r#" graph [{}];"#, graph_attrs.join(" "))?;
+ let content_attrs_str = content_attrs.join(" ");
+ writeln!(w, r#" node [{}];"#, content_attrs_str)?;
+ writeln!(w, r#" edge [{}];"#, content_attrs_str)?;
+
+ // Graph label
+ if let Some(graph_label) = &self.graph_label {
+ self.write_graph_label(graph_label, w)?;
+ }
+
+ // Nodes
+ for node in iterate::post_order_from(self.graph, self.graph.start_node()) {
+ self.write_node(node, dark_mode, w)?;
+ }
+
+ // Edges
+ for source in iterate::post_order_from(self.graph, self.graph.start_node()) {
+ self.write_edges(source, w)?;
+ }
+ writeln!(w, "}}")
+ }
+
+ /// Write a graphviz DOT node for the given node.
+ pub fn write_node<W>(&self, node: G::Node, dark_mode: bool, w: &mut W) -> io::Result<()>
+ where
+ W: Write,
+ {
+ // Start a new node with the label to follow, in one of DOT's pseudo-HTML tables.
+ write!(w, r#" {} [shape="none", label=<"#, self.node(node))?;
+
+ write!(w, r#"<table border="0" cellborder="1" cellspacing="0">"#)?;
+
+ // FIXME(richkadel): If/when migrating the MIR graphviz to this generic implementation,
+ // we need generic way to know if node header should have a different color. For example,
+ // for MIR:
+ //
+ // let (blk, bgcolor) = if data.is_cleanup {
+ // let color = if dark_mode { "royalblue" } else { "lightblue" };
+ // (format!("{:?} (cleanup)", node), color)
+ // } else {
+ // let color = if dark_mode { "dimgray" } else { "gray" };
+ // (format!("{:?}", node), color)
+ // };
+ let color = if dark_mode { "dimgray" } else { "gray" };
+ let (blk, bgcolor) = (format!("{:?}", node), color);
+ write!(
+ w,
+ r#"<tr><td bgcolor="{bgcolor}" {attrs} colspan="{colspan}">{blk}</td></tr>"#,
+ attrs = r#"align="center""#,
+ colspan = 1,
+ blk = blk,
+ bgcolor = bgcolor
+ )?;
+
+ for section in (self.node_content_fn)(node) {
+ write!(
+ w,
+ r#"<tr><td align="left" balign="left">{}</td></tr>"#,
+ dot::escape_html(&section).replace('\n', "<br/>")
+ )?;
+ }
+
+ // Close the table
+ write!(w, "</table>")?;
+
+ // Close the node label and the node itself.
+ writeln!(w, ">];")
+ }
+
+ /// Write graphviz DOT edges with labels between the given node and all of its successors.
+ fn write_edges<W>(&self, source: G::Node, w: &mut W) -> io::Result<()>
+ where
+ W: Write,
+ {
+ let edge_labels = (self.edge_labels_fn)(source);
+ for (index, target) in self.graph.successors(source).enumerate() {
+ let src = self.node(source);
+ let trg = self.node(target);
+ let escaped_edge_label = if let Some(edge_label) = edge_labels.get(index) {
+ dot::escape_html(edge_label).replace('\n', r#"<br align="left"/>"#)
+ } else {
+ "".to_owned()
+ };
+ writeln!(w, r#" {} -> {} [label=<{}>];"#, src, trg, escaped_edge_label)?;
+ }
+ Ok(())
+ }
+
+ /// Write the graphviz DOT label for the overall graph. This is essentially a block of text that
+ /// will appear below the graph.
+ fn write_graph_label<W>(&self, label: &str, w: &mut W) -> io::Result<()>
+ where
+ W: Write,
+ {
+ let lines = label.split('\n').map(|s| dot::escape_html(s)).collect::<Vec<_>>();
+ let escaped_label = lines.join(r#"<br align="left"/>"#);
+ writeln!(w, r#" label=<<br/><br/>{}<br align="left"/><br/><br/><br/>>;"#, escaped_label)
+ }
+
+ fn node(&self, node: G::Node) -> String {
+ format!("{:?}__{}", node, self.graphviz_name)
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/graph_cyclic_cache.rs b/compiler/rustc_middle/src/mir/graph_cyclic_cache.rs
new file mode 100644
index 000000000..f97bf2883
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/graph_cyclic_cache.rs
@@ -0,0 +1,63 @@
+use rustc_data_structures::graph::{
+ self, DirectedGraph, WithNumNodes, WithStartNode, WithSuccessors,
+};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::OnceCell;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+
+/// Helper type to cache the result of `graph::is_cyclic`.
+#[derive(Clone, Debug)]
+pub(super) struct GraphIsCyclicCache {
+ cache: OnceCell<bool>,
+}
+
+impl GraphIsCyclicCache {
+ #[inline]
+ pub(super) fn new() -> Self {
+ GraphIsCyclicCache { cache: OnceCell::new() }
+ }
+
+ pub(super) fn is_cyclic<G>(&self, graph: &G) -> bool
+ where
+ G: ?Sized + DirectedGraph + WithStartNode + WithSuccessors + WithNumNodes,
+ {
+ *self.cache.get_or_init(|| graph::is_cyclic(graph))
+ }
+
+ /// Invalidates the cache.
+ #[inline]
+ pub(super) fn invalidate(&mut self) {
+ // Invalidating the cache requires mutating the MIR, which in turn requires a unique
+ // reference (`&mut`) to the `mir::Body`. Because of this, we can assume that all
+ // callers of `invalidate` have a unique reference to the MIR and thus to the
+ // cache. This means we never need to do synchronization when `invalidate` is called,
+ // we can simply reinitialize the `OnceCell`.
+ self.cache = OnceCell::new();
+ }
+}
+
+impl<S: Encoder> Encodable<S> for GraphIsCyclicCache {
+ #[inline]
+ fn encode(&self, s: &mut S) {
+ Encodable::encode(&(), s);
+ }
+}
+
+impl<D: Decoder> Decodable<D> for GraphIsCyclicCache {
+ #[inline]
+ fn decode(d: &mut D) -> Self {
+ let () = Decodable::decode(d);
+ Self::new()
+ }
+}
+
+impl<CTX> HashStable<CTX> for GraphIsCyclicCache {
+ #[inline]
+ fn hash_stable(&self, _: &mut CTX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ GraphIsCyclicCache,
+}
diff --git a/compiler/rustc_middle/src/mir/graphviz.rs b/compiler/rustc_middle/src/mir/graphviz.rs
new file mode 100644
index 000000000..5de56dad0
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/graphviz.rs
@@ -0,0 +1,134 @@
+use gsgdt::GraphvizSettings;
+use rustc_graphviz as dot;
+use rustc_hir::def_id::DefId;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use std::fmt::Debug;
+use std::io::{self, Write};
+
+use super::generic_graph::mir_fn_to_generic_graph;
+use super::pretty::dump_mir_def_ids;
+
+/// Write a graphviz DOT graph of a list of MIRs.
+pub fn write_mir_graphviz<W>(tcx: TyCtxt<'_>, single: Option<DefId>, w: &mut W) -> io::Result<()>
+where
+ W: Write,
+{
+ let def_ids = dump_mir_def_ids(tcx, single);
+
+ let mirs =
+ def_ids
+ .iter()
+ .flat_map(|def_id| {
+ if tcx.is_const_fn_raw(*def_id) {
+ vec![tcx.optimized_mir(*def_id), tcx.mir_for_ctfe(*def_id)]
+ } else {
+ vec![tcx.instance_mir(ty::InstanceDef::Item(ty::WithOptConstParam::unknown(
+ *def_id,
+ )))]
+ }
+ })
+ .collect::<Vec<_>>();
+
+ let use_subgraphs = mirs.len() > 1;
+ if use_subgraphs {
+ writeln!(w, "digraph __crate__ {{")?;
+ }
+
+ for mir in mirs {
+ write_mir_fn_graphviz(tcx, mir, use_subgraphs, w)?;
+ }
+
+ if use_subgraphs {
+ writeln!(w, "}}")?;
+ }
+
+ Ok(())
+}
+
+/// Write a graphviz DOT graph of the MIR.
+pub fn write_mir_fn_graphviz<'tcx, W>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'_>,
+ subgraph: bool,
+ w: &mut W,
+) -> io::Result<()>
+where
+ W: Write,
+{
+ // Global graph properties
+ let font = format!(r#"fontname="{}""#, tcx.sess.opts.unstable_opts.graphviz_font);
+ let mut graph_attrs = vec![&font[..]];
+ let mut content_attrs = vec![&font[..]];
+
+ let dark_mode = tcx.sess.opts.unstable_opts.graphviz_dark_mode;
+ if dark_mode {
+ graph_attrs.push(r#"bgcolor="black""#);
+ graph_attrs.push(r#"fontcolor="white""#);
+ content_attrs.push(r#"color="white""#);
+ content_attrs.push(r#"fontcolor="white""#);
+ }
+
+ // Graph label
+ let mut label = String::from("");
+ // FIXME: remove this unwrap
+ write_graph_label(tcx, body, &mut label).unwrap();
+ let g = mir_fn_to_generic_graph(tcx, body);
+ let settings = GraphvizSettings {
+ graph_attrs: Some(graph_attrs.join(" ")),
+ node_attrs: Some(content_attrs.join(" ")),
+ edge_attrs: Some(content_attrs.join(" ")),
+ graph_label: Some(label),
+ };
+ g.to_dot(w, &settings, subgraph)
+}
+
+/// Write the graphviz DOT label for the overall graph. This is essentially a block of text that
+/// will appear below the graph, showing the type of the `fn` this MIR represents and the types of
+/// all the variables and temporaries.
+fn write_graph_label<'tcx, W: std::fmt::Write>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'_>,
+ w: &mut W,
+) -> std::fmt::Result {
+ let def_id = body.source.def_id();
+
+ write!(w, "fn {}(", dot::escape_html(&tcx.def_path_str(def_id)))?;
+
+ // fn argument types.
+ for (i, arg) in body.args_iter().enumerate() {
+ if i > 0 {
+ write!(w, ", ")?;
+ }
+ write!(w, "{:?}: {}", Place::from(arg), escape(&body.local_decls[arg].ty))?;
+ }
+
+ write!(w, ") -&gt; {}", escape(&body.return_ty()))?;
+ write!(w, r#"<br align="left"/>"#)?;
+
+ for local in body.vars_and_temps_iter() {
+ let decl = &body.local_decls[local];
+
+ write!(w, "let ")?;
+ if decl.mutability == Mutability::Mut {
+ write!(w, "mut ")?;
+ }
+
+ write!(w, r#"{:?}: {};<br align="left"/>"#, Place::from(local), escape(&decl.ty))?;
+ }
+
+ for var_debug_info in &body.var_debug_info {
+ write!(
+ w,
+ r#"debug {} =&gt; {};<br align="left"/>"#,
+ var_debug_info.name,
+ escape(&var_debug_info.value),
+ )?;
+ }
+
+ Ok(())
+}
+
+fn escape<T: Debug>(t: &T) -> String {
+ dot::escape_html(&format!("{:?}", t))
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs
new file mode 100644
index 000000000..db7e0fb8a
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs
@@ -0,0 +1,1300 @@
+//! The virtual memory representation of the MIR interpreter.
+
+use std::borrow::Cow;
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+use std::hash;
+use std::iter;
+use std::ops::{Deref, Range};
+use std::ptr;
+
+use rustc_ast::Mutability;
+use rustc_data_structures::intern::Interned;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_span::DUMMY_SP;
+use rustc_target::abi::{Align, HasDataLayout, Size};
+
+use super::{
+ read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance,
+ ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, ScalarSizeMismatch, UndefinedBehaviorInfo,
+ UninitBytesAccess, UnsupportedOpInfo,
+};
+use crate::ty;
+
+/// This type represents an Allocation in the Miri/CTFE core engine.
+///
+/// Its public API is rather low-level, working directly with allocation offsets and a custom error
+/// type to account for the lack of an AllocId on this level. The Miri/CTFE core engine `memory`
+/// module provides higher-level access.
+// Note: for performance reasons when interning, some of the `Allocation` fields can be partially
+// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
+#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct Allocation<Prov = AllocId, Extra = ()> {
+ /// The actual bytes of the allocation.
+ /// Note that the bytes of a pointer represent the offset of the pointer.
+ bytes: Box<[u8]>,
+ /// Maps from byte addresses to extra data for each pointer.
+ /// Only the first byte of a pointer is inserted into the map; i.e.,
+ /// every entry in this map applies to `pointer_size` consecutive bytes starting
+ /// at the given offset.
+ relocations: Relocations<Prov>,
+ /// Denotes which part of this allocation is initialized.
+ init_mask: InitMask,
+ /// The alignment of the allocation to detect unaligned reads.
+ /// (`Align` guarantees that this is a power of two.)
+ pub align: Align,
+ /// `true` if the allocation is mutable.
+ /// Also used by codegen to determine if a static should be put into mutable memory,
+ /// which happens for `static mut` and `static` with interior mutability.
+ pub mutability: Mutability,
+ /// Extra state for the machine.
+ pub extra: Extra,
+}
+
+/// This is the maximum size we will hash at a time, when interning an `Allocation` and its
+/// `InitMask`. Note, we hash that amount of bytes twice: at the start, and at the end of a buffer.
+/// Used when these two structures are large: we only partially hash the larger fields in that
+/// situation. See the comment at the top of their respective `Hash` impl for more details.
+const MAX_BYTES_TO_HASH: usize = 64;
+
+/// This is the maximum size (in bytes) for which a buffer will be fully hashed, when interning.
+/// Otherwise, it will be partially hashed in 2 slices, requiring at least 2 `MAX_BYTES_TO_HASH`
+/// bytes.
+const MAX_HASHED_BUFFER_LEN: usize = 2 * MAX_BYTES_TO_HASH;
+
+// Const allocations are only hashed for interning. However, they can be large, making the hashing
+// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
+// big buffers like the actual bytes of allocation. We can partially hash some fields when they're
+// large.
+impl hash::Hash for Allocation {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ // Partially hash the `bytes` buffer when it is large. To limit collisions with common
+ // prefixes and suffixes, we hash the length and some slices of the buffer.
+ let byte_count = self.bytes.len();
+ if byte_count > MAX_HASHED_BUFFER_LEN {
+ // Hash the buffer's length.
+ byte_count.hash(state);
+
+ // And its head and tail.
+ self.bytes[..MAX_BYTES_TO_HASH].hash(state);
+ self.bytes[byte_count - MAX_BYTES_TO_HASH..].hash(state);
+ } else {
+ self.bytes.hash(state);
+ }
+
+ // Hash the other fields as usual.
+ self.relocations.hash(state);
+ self.init_mask.hash(state);
+ self.align.hash(state);
+ self.mutability.hash(state);
+ self.extra.hash(state);
+ }
+}
+
+/// Interned types generally have an `Outer` type and an `Inner` type, where
+/// `Outer` is a newtype around `Interned<Inner>`, and all the operations are
+/// done on `Outer`, because all occurrences are interned. E.g. `Ty` is an
+/// outer type and `TyS` is its inner type.
+///
+/// Here things are different because only const allocations are interned. This
+/// means that both the inner type (`Allocation`) and the outer type
+/// (`ConstAllocation`) are used quite a bit.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
+#[rustc_pass_by_value]
+pub struct ConstAllocation<'tcx, Prov = AllocId, Extra = ()>(
+ pub Interned<'tcx, Allocation<Prov, Extra>>,
+);
+
+impl<'tcx> fmt::Debug for ConstAllocation<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // This matches how `Allocation` is printed. We print it like this to
+ // avoid having to update expected output in a lot of tests.
+ write!(f, "{:?}", self.inner())
+ }
+}
+
+impl<'tcx, Prov, Extra> ConstAllocation<'tcx, Prov, Extra> {
+ pub fn inner(self) -> &'tcx Allocation<Prov, Extra> {
+ self.0.0
+ }
+}
+
+/// We have our own error type that does not know about the `AllocId`; that information
+/// is added when converting to `InterpError`.
+#[derive(Debug)]
+pub enum AllocError {
+ /// A scalar had the wrong size.
+ ScalarSizeMismatch(ScalarSizeMismatch),
+ /// Encountered a pointer where we needed raw bytes.
+ ReadPointerAsBytes,
+ /// Partially overwriting a pointer.
+ PartialPointerOverwrite(Size),
+ /// Using uninitialized data where it is not allowed.
+ InvalidUninitBytes(Option<UninitBytesAccess>),
+}
+pub type AllocResult<T = ()> = Result<T, AllocError>;
+
+impl From<ScalarSizeMismatch> for AllocError {
+ fn from(s: ScalarSizeMismatch) -> Self {
+ AllocError::ScalarSizeMismatch(s)
+ }
+}
+
+impl AllocError {
+ pub fn to_interp_error<'tcx>(self, alloc_id: AllocId) -> InterpError<'tcx> {
+ use AllocError::*;
+ match self {
+ ScalarSizeMismatch(s) => {
+ InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ScalarSizeMismatch(s))
+ }
+ ReadPointerAsBytes => InterpError::Unsupported(UnsupportedOpInfo::ReadPointerAsBytes),
+ PartialPointerOverwrite(offset) => InterpError::Unsupported(
+ UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)),
+ ),
+ InvalidUninitBytes(info) => InterpError::UndefinedBehavior(
+ UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))),
+ ),
+ }
+ }
+}
+
+/// The information that makes up a memory access: offset and size.
+#[derive(Copy, Clone)]
+pub struct AllocRange {
+ pub start: Size,
+ pub size: Size,
+}
+
+impl fmt::Debug for AllocRange {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "[{:#x}..{:#x}]", self.start.bytes(), self.end().bytes())
+ }
+}
+
+/// Free-starting constructor for less syntactic overhead.
+#[inline(always)]
+pub fn alloc_range(start: Size, size: Size) -> AllocRange {
+ AllocRange { start, size }
+}
+
+impl AllocRange {
+ #[inline]
+ pub fn from(r: Range<Size>) -> Self {
+ alloc_range(r.start, r.end - r.start) // `Size` subtraction (overflow-checked)
+ }
+
+ #[inline(always)]
+ pub fn end(self) -> Size {
+ self.start + self.size // This does overflow checking.
+ }
+
+ /// Returns the `subrange` within this range; panics if it is not a subrange.
+ #[inline]
+ pub fn subrange(self, subrange: AllocRange) -> AllocRange {
+ let sub_start = self.start + subrange.start;
+ let range = alloc_range(sub_start, subrange.size);
+ assert!(range.end() <= self.end(), "access outside the bounds for given AllocRange");
+ range
+ }
+}
+
+// The constructors are all without extra; the extra gets added by a machine hook later.
+impl<Prov> Allocation<Prov> {
+ /// Creates an allocation initialized by the given bytes
+ pub fn from_bytes<'a>(
+ slice: impl Into<Cow<'a, [u8]>>,
+ align: Align,
+ mutability: Mutability,
+ ) -> Self {
+ let bytes = Box::<[u8]>::from(slice.into());
+ let size = Size::from_bytes(bytes.len());
+ Self {
+ bytes,
+ relocations: Relocations::new(),
+ init_mask: InitMask::new(size, true),
+ align,
+ mutability,
+ extra: (),
+ }
+ }
+
+ pub fn from_bytes_byte_aligned_immutable<'a>(slice: impl Into<Cow<'a, [u8]>>) -> Self {
+ Allocation::from_bytes(slice, Align::ONE, Mutability::Not)
+ }
+
+ /// Try to create an Allocation of `size` bytes, failing if there is not enough memory
+ /// available to the compiler to do so.
+ ///
+ /// If `panic_on_fail` is true, this will never return `Err`.
+ pub fn uninit<'tcx>(size: Size, align: Align, panic_on_fail: bool) -> InterpResult<'tcx, Self> {
+ let bytes = Box::<[u8]>::try_new_zeroed_slice(size.bytes_usize()).map_err(|_| {
+ // This results in an error that can happen non-deterministically, since the memory
+ // available to the compiler can change between runs. Normally queries are always
+ // deterministic. However, we can be non-deterministic here because all uses of const
+ // evaluation (including ConstProp!) will make compilation fail (via hard error
+ // or ICE) upon encountering a `MemoryExhausted` error.
+ if panic_on_fail {
+ panic!("Allocation::uninit called with panic_on_fail had allocation failure")
+ }
+ ty::tls::with(|tcx| {
+ tcx.sess.delay_span_bug(DUMMY_SP, "exhausted memory during interpretation")
+ });
+ InterpError::ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted)
+ })?;
+ // SAFETY: the box was zero-allocated, which is a valid initial value for Box<[u8]>
+ let bytes = unsafe { bytes.assume_init() };
+ Ok(Allocation {
+ bytes,
+ relocations: Relocations::new(),
+ init_mask: InitMask::new(size, false),
+ align,
+ mutability: Mutability::Mut,
+ extra: (),
+ })
+ }
+}
+
+impl Allocation {
+ /// Adjust allocation from the ones in tcx to a custom Machine instance
+ /// with a different Provenance and Extra type.
+ pub fn adjust_from_tcx<Prov, Extra, Err>(
+ self,
+ cx: &impl HasDataLayout,
+ extra: Extra,
+ mut adjust_ptr: impl FnMut(Pointer<AllocId>) -> Result<Pointer<Prov>, Err>,
+ ) -> Result<Allocation<Prov, Extra>, Err> {
+ // Compute new pointer provenance, which also adjusts the bytes.
+ let mut bytes = self.bytes;
+ let mut new_relocations = Vec::with_capacity(self.relocations.0.len());
+ let ptr_size = cx.data_layout().pointer_size.bytes_usize();
+ let endian = cx.data_layout().endian;
+ for &(offset, alloc_id) in self.relocations.iter() {
+ let idx = offset.bytes_usize();
+ let ptr_bytes = &mut bytes[idx..idx + ptr_size];
+ let bits = read_target_uint(endian, ptr_bytes).unwrap();
+ let (ptr_prov, ptr_offset) =
+ adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts();
+ write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap();
+ new_relocations.push((offset, ptr_prov));
+ }
+ // Create allocation.
+ Ok(Allocation {
+ bytes,
+ relocations: Relocations::from_presorted(new_relocations),
+ init_mask: self.init_mask,
+ align: self.align,
+ mutability: self.mutability,
+ extra,
+ })
+ }
+}
+
+/// Raw accessors. Provide access to otherwise private bytes.
+impl<Prov, Extra> Allocation<Prov, Extra> {
+ pub fn len(&self) -> usize {
+ self.bytes.len()
+ }
+
+ pub fn size(&self) -> Size {
+ Size::from_bytes(self.len())
+ }
+
+ /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs
+ /// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the
+ /// edges) at all.
+ /// This must not be used for reads affecting the interpreter execution.
+ pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] {
+ &self.bytes[range]
+ }
+
+ /// Returns the mask indicating which bytes are initialized.
+ pub fn init_mask(&self) -> &InitMask {
+ &self.init_mask
+ }
+
+ /// Returns the relocation list.
+ pub fn relocations(&self) -> &Relocations<Prov> {
+ &self.relocations
+ }
+}
+
+/// Byte accessors.
+impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
+ /// This is the entirely abstraction-violating way to just grab the raw bytes without
+ /// caring about relocations. It just deduplicates some code between `read_scalar`
+ /// and `get_bytes_internal`.
+ fn get_bytes_even_more_internal(&self, range: AllocRange) -> &[u8] {
+ &self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]
+ }
+
+ /// The last argument controls whether we error out when there are uninitialized or pointer
+ /// bytes. However, we *always* error when there are relocations overlapping the edges of the
+ /// range.
+ ///
+ /// You should never call this, call `get_bytes` or `get_bytes_with_uninit_and_ptr` instead,
+ ///
+ /// This function also guarantees that the resulting pointer will remain stable
+ /// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies
+ /// on that.
+ ///
+ /// It is the caller's responsibility to check bounds and alignment beforehand.
+ fn get_bytes_internal(
+ &self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ check_init_and_ptr: bool,
+ ) -> AllocResult<&[u8]> {
+ if check_init_and_ptr {
+ self.check_init(range)?;
+ self.check_relocations(cx, range)?;
+ } else {
+ // We still don't want relocations on the *edges*.
+ self.check_relocation_edges(cx, range)?;
+ }
+
+ Ok(self.get_bytes_even_more_internal(range))
+ }
+
+ /// Checks that these bytes are initialized and not pointer bytes, and then return them
+ /// as a slice.
+ ///
+ /// It is the caller's responsibility to check bounds and alignment beforehand.
+ /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
+ /// on `InterpCx` instead.
+ #[inline]
+ pub fn get_bytes(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult<&[u8]> {
+ self.get_bytes_internal(cx, range, true)
+ }
+
+ /// It is the caller's responsibility to handle uninitialized and pointer bytes.
+ /// However, this still checks that there are no relocations on the *edges*.
+ ///
+ /// It is the caller's responsibility to check bounds and alignment beforehand.
+ #[inline]
+ pub fn get_bytes_with_uninit_and_ptr(
+ &self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ ) -> AllocResult<&[u8]> {
+ self.get_bytes_internal(cx, range, false)
+ }
+
+ /// Just calling this already marks everything as defined and removes relocations,
+ /// so be sure to actually put data there!
+ ///
+ /// It is the caller's responsibility to check bounds and alignment beforehand.
+ /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods
+ /// on `InterpCx` instead.
+ pub fn get_bytes_mut(
+ &mut self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ ) -> AllocResult<&mut [u8]> {
+ self.mark_init(range, true);
+ self.clear_relocations(cx, range)?;
+
+ Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()])
+ }
+
+ /// A raw pointer variant of `get_bytes_mut` that avoids invalidating existing aliases into this memory.
+ pub fn get_bytes_mut_ptr(
+ &mut self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ ) -> AllocResult<*mut [u8]> {
+ self.mark_init(range, true);
+ self.clear_relocations(cx, range)?;
+
+ assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check
+ let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize());
+ let len = range.end().bytes_usize() - range.start.bytes_usize();
+ Ok(ptr::slice_from_raw_parts_mut(begin_ptr, len))
+ }
+}
+
+/// Reading and writing.
+impl<Prov: Provenance, Extra> Allocation<Prov, Extra> {
+ /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a
+ /// relocation. If `allow_uninit`/`allow_ptr` is `false`, also enforces that the memory in the
+ /// given range contains no uninitialized bytes/relocations.
+ pub fn check_bytes(
+ &self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ allow_uninit: bool,
+ allow_ptr: bool,
+ ) -> AllocResult {
+ // Check bounds and relocations on the edges.
+ self.get_bytes_with_uninit_and_ptr(cx, range)?;
+ // Check uninit and ptr.
+ if !allow_uninit {
+ self.check_init(range)?;
+ }
+ if !allow_ptr {
+ self.check_relocations(cx, range)?;
+ }
+ Ok(())
+ }
+
+ /// Reads a *non-ZST* scalar.
+ ///
+ /// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine
+ /// supports that) provenance is entirely ignored.
+ ///
+ /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
+ /// for ZSTness anyway due to integer pointers being valid for ZSTs.
+ ///
+ /// It is the caller's responsibility to check bounds and alignment beforehand.
+ /// Most likely, you want to call `InterpCx::read_scalar` instead of this method.
+ pub fn read_scalar(
+ &self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ read_provenance: bool,
+ ) -> AllocResult<ScalarMaybeUninit<Prov>> {
+ if read_provenance {
+ assert_eq!(range.size, cx.data_layout().pointer_size);
+ }
+
+ // First and foremost, if anything is uninit, bail.
+ if self.is_init(range).is_err() {
+ // This inflates uninitialized bytes to the entire scalar, even if only a few
+ // bytes are uninitialized.
+ return Ok(ScalarMaybeUninit::Uninit);
+ }
+
+ // If we are doing a pointer read, and there is a relocation exactly where we
+ // are reading, then we can put data and relocation back together and return that.
+ if read_provenance && let Some(&prov) = self.relocations.get(&range.start) {
+ // We already checked init and relocations, so we can use this function.
+ let bytes = self.get_bytes_even_more_internal(range);
+ let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
+ let ptr = Pointer::new(prov, Size::from_bytes(bits));
+ return Ok(ScalarMaybeUninit::from_pointer(ptr, cx));
+ }
+
+ // If we are *not* reading a pointer, and we can just ignore relocations,
+ // then do exactly that.
+ if !read_provenance && Prov::OFFSET_IS_ADDR {
+ // We just strip provenance.
+ let bytes = self.get_bytes_even_more_internal(range);
+ let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
+ return Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)));
+ }
+
+ // It's complicated. Better make sure there is no provenance anywhere.
+ // FIXME: If !OFFSET_IS_ADDR, this is the best we can do. But if OFFSET_IS_ADDR, then
+ // `read_pointer` is true and we ideally would distinguish the following two cases:
+ // - The entire `range` is covered by 2 relocations for the same provenance.
+ // Then we should return a pointer with that provenance.
+ // - The range has inhomogeneous provenance. Then we should return just the
+ // underlying bits.
+ let bytes = self.get_bytes(cx, range)?;
+ let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap();
+ Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size)))
+ }
+
+ /// Writes a *non-ZST* scalar.
+ ///
+ /// ZSTs can't be read because in order to obtain a `Pointer`, we need to check
+ /// for ZSTness anyway due to integer pointers being valid for ZSTs.
+ ///
+ /// It is the caller's responsibility to check bounds and alignment beforehand.
+ /// Most likely, you want to call `InterpCx::write_scalar` instead of this method.
+ #[instrument(skip(self, cx), level = "debug")]
+ pub fn write_scalar(
+ &mut self,
+ cx: &impl HasDataLayout,
+ range: AllocRange,
+ val: ScalarMaybeUninit<Prov>,
+ ) -> AllocResult {
+ assert!(self.mutability == Mutability::Mut);
+
+ let val = match val {
+ ScalarMaybeUninit::Scalar(scalar) => scalar,
+ ScalarMaybeUninit::Uninit => {
+ return self.write_uninit(cx, range);
+ }
+ };
+
+ // `to_bits_or_ptr_internal` is the right method because we just want to store this data
+ // as-is into memory.
+ let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? {
+ Err(val) => {
+ let (provenance, offset) = val.into_parts();
+ (u128::from(offset.bytes()), Some(provenance))
+ }
+ Ok(data) => (data, None),
+ };
+
+ let endian = cx.data_layout().endian;
+ let dst = self.get_bytes_mut(cx, range)?;
+ write_target_uint(endian, dst, bytes).unwrap();
+
+ // See if we have to also write a relocation.
+ if let Some(provenance) = provenance {
+ self.relocations.0.insert(range.start, provenance);
+ }
+
+ Ok(())
+ }
+
+ /// Write "uninit" to the given memory range.
+ pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
+ self.mark_init(range, false);
+ self.clear_relocations(cx, range)?;
+ return Ok(());
+ }
+}
+
+/// Relocations.
+impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
+ /// Returns all relocations overlapping with the given pointer-offset pair.
+ fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] {
+ // We have to go back `pointer_size - 1` bytes, as that one would still overlap with
+ // the beginning of this range.
+ let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1);
+ self.relocations.range(Size::from_bytes(start)..range.end())
+ }
+
+ /// Returns whether this allocation has relocations overlapping with the given range.
+ ///
+ /// Note: this function exists to allow `get_relocations` to be private, in order to somewhat
+ /// limit access to relocations outside of the `Allocation` abstraction.
+ ///
+ pub fn has_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool {
+ !self.get_relocations(cx, range).is_empty()
+ }
+
+ /// Checks that there are no relocations overlapping with the given range.
+ #[inline(always)]
+ fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
+ if self.has_relocations(cx, range) { Err(AllocError::ReadPointerAsBytes) } else { Ok(()) }
+ }
+
+ /// Removes all relocations inside the given range.
+ /// If there are relocations overlapping with the edges, they
+ /// are removed as well *and* the bytes they cover are marked as
+ /// uninitialized. This is a somewhat odd "spooky action at a distance",
+ /// but it allows strictly more code to run than if we would just error
+ /// immediately in that case.
+ fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult
+ where
+ Prov: Provenance,
+ {
+ // Find the start and end of the given range and its outermost relocations.
+ let (first, last) = {
+ // Find all relocations overlapping the given range.
+ let relocations = self.get_relocations(cx, range);
+ if relocations.is_empty() {
+ return Ok(());
+ }
+
+ (
+ relocations.first().unwrap().0,
+ relocations.last().unwrap().0 + cx.data_layout().pointer_size,
+ )
+ };
+ let start = range.start;
+ let end = range.end();
+
+ // We need to handle clearing the relocations from parts of a pointer.
+ // FIXME: Miri should preserve partial relocations; see
+ // https://github.com/rust-lang/miri/issues/2181.
+ if first < start {
+ if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
+ return Err(AllocError::PartialPointerOverwrite(first));
+ }
+ warn!(
+ "Partial pointer overwrite! De-initializing memory at offsets {first:?}..{start:?}."
+ );
+ self.init_mask.set_range(first, start, false);
+ }
+ if last > end {
+ if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE {
+ return Err(AllocError::PartialPointerOverwrite(
+ last - cx.data_layout().pointer_size,
+ ));
+ }
+ warn!(
+ "Partial pointer overwrite! De-initializing memory at offsets {end:?}..{last:?}."
+ );
+ self.init_mask.set_range(end, last, false);
+ }
+
+ // Forget all the relocations.
+ // Since relocations do not overlap, we know that removing until `last` (exclusive) is fine,
+ // i.e., this will not remove any other relocations just after the ones we care about.
+ self.relocations.0.remove_range(first..last);
+
+ Ok(())
+ }
+
+ /// Errors if there are relocations overlapping with the edges of the
+ /// given memory range.
+ #[inline]
+ fn check_relocation_edges(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult {
+ self.check_relocations(cx, alloc_range(range.start, Size::ZERO))?;
+ self.check_relocations(cx, alloc_range(range.end(), Size::ZERO))?;
+ Ok(())
+ }
+}
+
+/// "Relocations" stores the provenance information of pointers stored in memory.
+#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+pub struct Relocations<Prov = AllocId>(SortedMap<Size, Prov>);
+
+impl<Prov> Relocations<Prov> {
+ pub fn new() -> Self {
+ Relocations(SortedMap::new())
+ }
+
+ // The caller must guarantee that the given relocations are already sorted
+ // by address and contain no duplicates.
+ pub fn from_presorted(r: Vec<(Size, Prov)>) -> Self {
+ Relocations(SortedMap::from_presorted_elements(r))
+ }
+}
+
+impl<Prov> Deref for Relocations<Prov> {
+ type Target = SortedMap<Size, Prov>;
+
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+/// A partial, owned list of relocations to transfer into another allocation.
+///
+/// Offsets are already adjusted to the destination allocation.
+pub struct AllocationRelocations<Prov> {
+ dest_relocations: Vec<(Size, Prov)>,
+}
+
+impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
+ pub fn prepare_relocation_copy(
+ &self,
+ cx: &impl HasDataLayout,
+ src: AllocRange,
+ dest: Size,
+ count: u64,
+ ) -> AllocationRelocations<Prov> {
+ let relocations = self.get_relocations(cx, src);
+ if relocations.is_empty() {
+ return AllocationRelocations { dest_relocations: Vec::new() };
+ }
+
+ let size = src.size;
+ let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize));
+
+ // If `count` is large, this is rather wasteful -- we are allocating a big array here, which
+ // is mostly filled with redundant information since it's just N copies of the same `Prov`s
+ // at slightly adjusted offsets. The reason we do this is so that in `mark_relocation_range`
+ // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces
+ // the right sequence of relocations for all N copies.
+ for i in 0..count {
+ new_relocations.extend(relocations.iter().map(|&(offset, reloc)| {
+ // compute offset for current repetition
+ let dest_offset = dest + size * i; // `Size` operations
+ (
+ // shift offsets from source allocation to destination allocation
+ (offset + dest_offset) - src.start, // `Size` operations
+ reloc,
+ )
+ }));
+ }
+
+ AllocationRelocations { dest_relocations: new_relocations }
+ }
+
+ /// Applies a relocation copy.
+ /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected
+ /// to be clear of relocations.
+ ///
+ /// This is dangerous to use as it can violate internal `Allocation` invariants!
+ /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
+ pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Prov>) {
+ self.relocations.0.insert_presorted(relocations.dest_relocations);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Uninitialized byte tracking
+////////////////////////////////////////////////////////////////////////////////
+
+type Block = u64;
+
+/// A bitmask where each bit refers to the byte with the same index. If the bit is `true`, the byte
+/// is initialized. If it is `false` the byte is uninitialized.
+// Note: for performance reasons when interning, some of the `InitMask` fields can be partially
+// hashed. (see the `Hash` impl below for more details), so the impl is not derived.
+#[derive(Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct InitMask {
+ blocks: Vec<Block>,
+ len: Size,
+}
+
+// Const allocations are only hashed for interning. However, they can be large, making the hashing
+// expensive especially since it uses `FxHash`: it's better suited to short keys, not potentially
+// big buffers like the allocation's init mask. We can partially hash some fields when they're
+// large.
+impl hash::Hash for InitMask {
+ fn hash<H: hash::Hasher>(&self, state: &mut H) {
+ const MAX_BLOCKS_TO_HASH: usize = MAX_BYTES_TO_HASH / std::mem::size_of::<Block>();
+ const MAX_BLOCKS_LEN: usize = MAX_HASHED_BUFFER_LEN / std::mem::size_of::<Block>();
+
+ // Partially hash the `blocks` buffer when it is large. To limit collisions with common
+ // prefixes and suffixes, we hash the length and some slices of the buffer.
+ let block_count = self.blocks.len();
+ if block_count > MAX_BLOCKS_LEN {
+ // Hash the buffer's length.
+ block_count.hash(state);
+
+ // And its head and tail.
+ self.blocks[..MAX_BLOCKS_TO_HASH].hash(state);
+ self.blocks[block_count - MAX_BLOCKS_TO_HASH..].hash(state);
+ } else {
+ self.blocks.hash(state);
+ }
+
+ // Hash the other fields as usual.
+ self.len.hash(state);
+ }
+}
+
+impl InitMask {
+ pub const BLOCK_SIZE: u64 = 64;
+
+ #[inline]
+ fn bit_index(bits: Size) -> (usize, usize) {
+ // BLOCK_SIZE is the number of bits that can fit in a `Block`.
+ // Each bit in a `Block` represents the initialization state of one byte of an allocation,
+ // so we use `.bytes()` here.
+ let bits = bits.bytes();
+ let a = bits / InitMask::BLOCK_SIZE;
+ let b = bits % InitMask::BLOCK_SIZE;
+ (usize::try_from(a).unwrap(), usize::try_from(b).unwrap())
+ }
+
+ #[inline]
+ fn size_from_bit_index(block: impl TryInto<u64>, bit: impl TryInto<u64>) -> Size {
+ let block = block.try_into().ok().unwrap();
+ let bit = bit.try_into().ok().unwrap();
+ Size::from_bytes(block * InitMask::BLOCK_SIZE + bit)
+ }
+
+ pub fn new(size: Size, state: bool) -> Self {
+ let mut m = InitMask { blocks: vec![], len: Size::ZERO };
+ m.grow(size, state);
+ m
+ }
+
+ pub fn set_range(&mut self, start: Size, end: Size, new_state: bool) {
+ let len = self.len;
+ if end > len {
+ self.grow(end - len, new_state);
+ }
+ self.set_range_inbounds(start, end, new_state);
+ }
+
+ pub fn set_range_inbounds(&mut self, start: Size, end: Size, new_state: bool) {
+ let (blocka, bita) = Self::bit_index(start);
+ let (blockb, bitb) = Self::bit_index(end);
+ if blocka == blockb {
+ // First set all bits except the first `bita`,
+ // then unset the last `64 - bitb` bits.
+ let range = if bitb == 0 {
+ u64::MAX << bita
+ } else {
+ (u64::MAX << bita) & (u64::MAX >> (64 - bitb))
+ };
+ if new_state {
+ self.blocks[blocka] |= range;
+ } else {
+ self.blocks[blocka] &= !range;
+ }
+ return;
+ }
+ // across block boundaries
+ if new_state {
+ // Set `bita..64` to `1`.
+ self.blocks[blocka] |= u64::MAX << bita;
+ // Set `0..bitb` to `1`.
+ if bitb != 0 {
+ self.blocks[blockb] |= u64::MAX >> (64 - bitb);
+ }
+ // Fill in all the other blocks (much faster than one bit at a time).
+ for block in (blocka + 1)..blockb {
+ self.blocks[block] = u64::MAX;
+ }
+ } else {
+ // Set `bita..64` to `0`.
+ self.blocks[blocka] &= !(u64::MAX << bita);
+ // Set `0..bitb` to `0`.
+ if bitb != 0 {
+ self.blocks[blockb] &= !(u64::MAX >> (64 - bitb));
+ }
+ // Fill in all the other blocks (much faster than one bit at a time).
+ for block in (blocka + 1)..blockb {
+ self.blocks[block] = 0;
+ }
+ }
+ }
+
+ #[inline]
+ pub fn get(&self, i: Size) -> bool {
+ let (block, bit) = Self::bit_index(i);
+ (self.blocks[block] & (1 << bit)) != 0
+ }
+
+ #[inline]
+ pub fn set(&mut self, i: Size, new_state: bool) {
+ let (block, bit) = Self::bit_index(i);
+ self.set_bit(block, bit, new_state);
+ }
+
+ #[inline]
+ fn set_bit(&mut self, block: usize, bit: usize, new_state: bool) {
+ if new_state {
+ self.blocks[block] |= 1 << bit;
+ } else {
+ self.blocks[block] &= !(1 << bit);
+ }
+ }
+
+ pub fn grow(&mut self, amount: Size, new_state: bool) {
+ if amount.bytes() == 0 {
+ return;
+ }
+ let unused_trailing_bits =
+ u64::try_from(self.blocks.len()).unwrap() * Self::BLOCK_SIZE - self.len.bytes();
+ if amount.bytes() > unused_trailing_bits {
+ let additional_blocks = amount.bytes() / Self::BLOCK_SIZE + 1;
+ self.blocks.extend(
+ // FIXME(oli-obk): optimize this by repeating `new_state as Block`.
+ iter::repeat(0).take(usize::try_from(additional_blocks).unwrap()),
+ );
+ }
+ let start = self.len;
+ self.len += amount;
+ self.set_range_inbounds(start, start + amount, new_state); // `Size` operation
+ }
+
+ /// Returns the index of the first bit in `start..end` (end-exclusive) that is equal to is_init.
+ fn find_bit(&self, start: Size, end: Size, is_init: bool) -> Option<Size> {
+ /// A fast implementation of `find_bit`,
+ /// which skips over an entire block at a time if it's all 0s (resp. 1s),
+ /// and finds the first 1 (resp. 0) bit inside a block using `trailing_zeros` instead of a loop.
+ ///
+ /// Note that all examples below are written with 8 (instead of 64) bit blocks for simplicity,
+ /// and with the least significant bit (and lowest block) first:
+ /// ```text
+ /// 00000000|00000000
+ /// ^ ^ ^ ^
+ /// index: 0 7 8 15
+ /// ```
+ /// Also, if not stated, assume that `is_init = true`, that is, we are searching for the first 1 bit.
+ fn find_bit_fast(
+ init_mask: &InitMask,
+ start: Size,
+ end: Size,
+ is_init: bool,
+ ) -> Option<Size> {
+ /// Search one block, returning the index of the first bit equal to `is_init`.
+ fn search_block(
+ bits: Block,
+ block: usize,
+ start_bit: usize,
+ is_init: bool,
+ ) -> Option<Size> {
+ // For the following examples, assume this function was called with:
+ // bits = 0b00111011
+ // start_bit = 3
+ // is_init = false
+ // Note that, for the examples in this function, the most significant bit is written first,
+ // which is backwards compared to the comments in `find_bit`/`find_bit_fast`.
+
+ // Invert bits so we're always looking for the first set bit.
+ // ! 0b00111011
+ // bits = 0b11000100
+ let bits = if is_init { bits } else { !bits };
+ // Mask off unused start bits.
+ // 0b11000100
+ // & 0b11111000
+ // bits = 0b11000000
+ let bits = bits & (!0 << start_bit);
+ // Find set bit, if any.
+ // bit = trailing_zeros(0b11000000)
+ // bit = 6
+ if bits == 0 {
+ None
+ } else {
+ let bit = bits.trailing_zeros();
+ Some(InitMask::size_from_bit_index(block, bit))
+ }
+ }
+
+ if start >= end {
+ return None;
+ }
+
+ // Convert `start` and `end` to block indexes and bit indexes within each block.
+ // We must convert `end` to an inclusive bound to handle block boundaries correctly.
+ //
+ // For example:
+ //
+ // (a) 00000000|00000000 (b) 00000000|
+ // ^~~~~~~~~~~^ ^~~~~~~~~^
+ // start end start end
+ //
+ // In both cases, the block index of `end` is 1.
+ // But we do want to search block 1 in (a), and we don't in (b).
+ //
+ // We subtract 1 from both end positions to make them inclusive:
+ //
+ // (a) 00000000|00000000 (b) 00000000|
+ // ^~~~~~~~~~^ ^~~~~~~^
+ // start end_inclusive start end_inclusive
+ //
+ // For (a), the block index of `end_inclusive` is 1, and for (b), it's 0.
+ // This provides the desired behavior of searching blocks 0 and 1 for (a),
+ // and searching only block 0 for (b).
+ // There is no concern of overflows since we checked for `start >= end` above.
+ let (start_block, start_bit) = InitMask::bit_index(start);
+ let end_inclusive = Size::from_bytes(end.bytes() - 1);
+ let (end_block_inclusive, _) = InitMask::bit_index(end_inclusive);
+
+ // Handle first block: need to skip `start_bit` bits.
+ //
+ // We need to handle the first block separately,
+ // because there may be bits earlier in the block that should be ignored,
+ // such as the bit marked (1) in this example:
+ //
+ // (1)
+ // -|------
+ // (c) 01000000|00000000|00000001
+ // ^~~~~~~~~~~~~~~~~~^
+ // start end
+ if let Some(i) =
+ search_block(init_mask.blocks[start_block], start_block, start_bit, is_init)
+ {
+ // If the range is less than a block, we may find a matching bit after `end`.
+ //
+ // For example, we shouldn't successfully find bit (2), because it's after `end`:
+ //
+ // (2)
+ // -------|
+ // (d) 00000001|00000000|00000001
+ // ^~~~~^
+ // start end
+ //
+ // An alternative would be to mask off end bits in the same way as we do for start bits,
+ // but performing this check afterwards is faster and simpler to implement.
+ if i < end {
+ return Some(i);
+ } else {
+ return None;
+ }
+ }
+
+ // Handle remaining blocks.
+ //
+ // We can skip over an entire block at once if it's all 0s (resp. 1s).
+ // The block marked (3) in this example is the first block that will be handled by this loop,
+ // and it will be skipped for that reason:
+ //
+ // (3)
+ // --------
+ // (e) 01000000|00000000|00000001
+ // ^~~~~~~~~~~~~~~~~~^
+ // start end
+ if start_block < end_block_inclusive {
+ // This loop is written in a specific way for performance.
+ // Notably: `..end_block_inclusive + 1` is used for an inclusive range instead of `..=end_block_inclusive`,
+ // and `.zip(start_block + 1..)` is used to track the index instead of `.enumerate().skip().take()`,
+ // because both alternatives result in significantly worse codegen.
+ // `end_block_inclusive + 1` is guaranteed not to wrap, because `end_block_inclusive <= end / BLOCK_SIZE`,
+ // and `BLOCK_SIZE` (the number of bits per block) will always be at least 8 (1 byte).
+ for (&bits, block) in init_mask.blocks[start_block + 1..end_block_inclusive + 1]
+ .iter()
+ .zip(start_block + 1..)
+ {
+ if let Some(i) = search_block(bits, block, 0, is_init) {
+ // If this is the last block, we may find a matching bit after `end`.
+ //
+ // For example, we shouldn't successfully find bit (4), because it's after `end`:
+ //
+ // (4)
+ // -------|
+ // (f) 00000001|00000000|00000001
+ // ^~~~~~~~~~~~~~~~~~^
+ // start end
+ //
+ // As above with example (d), we could handle the end block separately and mask off end bits,
+ // but unconditionally searching an entire block at once and performing this check afterwards
+ // is faster and much simpler to implement.
+ if i < end {
+ return Some(i);
+ } else {
+ return None;
+ }
+ }
+ }
+ }
+
+ None
+ }
+
+ #[cfg_attr(not(debug_assertions), allow(dead_code))]
+ fn find_bit_slow(
+ init_mask: &InitMask,
+ start: Size,
+ end: Size,
+ is_init: bool,
+ ) -> Option<Size> {
+ (start..end).find(|&i| init_mask.get(i) == is_init)
+ }
+
+ let result = find_bit_fast(self, start, end, is_init);
+
+ debug_assert_eq!(
+ result,
+ find_bit_slow(self, start, end, is_init),
+ "optimized implementation of find_bit is wrong for start={:?} end={:?} is_init={} init_mask={:#?}",
+ start,
+ end,
+ is_init,
+ self
+ );
+
+ result
+ }
+}
+
+/// A contiguous chunk of initialized or uninitialized memory.
+pub enum InitChunk {
+ Init(Range<Size>),
+ Uninit(Range<Size>),
+}
+
+impl InitChunk {
+ #[inline]
+ pub fn is_init(&self) -> bool {
+ match self {
+ Self::Init(_) => true,
+ Self::Uninit(_) => false,
+ }
+ }
+
+ #[inline]
+ pub fn range(&self) -> Range<Size> {
+ match self {
+ Self::Init(r) => r.clone(),
+ Self::Uninit(r) => r.clone(),
+ }
+ }
+}
+
+impl InitMask {
+ /// Checks whether the range `start..end` (end-exclusive) is entirely initialized.
+ ///
+ /// Returns `Ok(())` if it's initialized. Otherwise returns a range of byte
+ /// indexes for the first contiguous span of the uninitialized access.
+ #[inline]
+ pub fn is_range_initialized(&self, start: Size, end: Size) -> Result<(), AllocRange> {
+ if end > self.len {
+ return Err(AllocRange::from(self.len..end));
+ }
+
+ let uninit_start = self.find_bit(start, end, false);
+
+ match uninit_start {
+ Some(uninit_start) => {
+ let uninit_end = self.find_bit(uninit_start, end, true).unwrap_or(end);
+ Err(AllocRange::from(uninit_start..uninit_end))
+ }
+ None => Ok(()),
+ }
+ }
+
+ /// Returns an iterator, yielding a range of byte indexes for each contiguous region
+ /// of initialized or uninitialized bytes inside the range `start..end` (end-exclusive).
+ ///
+ /// The iterator guarantees the following:
+ /// - Chunks are nonempty.
+ /// - Chunks are adjacent (each range's start is equal to the previous range's end).
+ /// - Chunks span exactly `start..end` (the first starts at `start`, the last ends at `end`).
+ /// - Chunks alternate between [`InitChunk::Init`] and [`InitChunk::Uninit`].
+ #[inline]
+ pub fn range_as_init_chunks(&self, start: Size, end: Size) -> InitChunkIter<'_> {
+ assert!(end <= self.len);
+
+ let is_init = if start < end {
+ self.get(start)
+ } else {
+ // `start..end` is empty: there are no chunks, so use some arbitrary value
+ false
+ };
+
+ InitChunkIter { init_mask: self, is_init, start, end }
+ }
+}
+
+/// Yields [`InitChunk`]s. See [`InitMask::range_as_init_chunks`].
+#[derive(Clone)]
+pub struct InitChunkIter<'a> {
+ init_mask: &'a InitMask,
+ /// Whether the next chunk we will return is initialized.
+ /// If there are no more chunks, contains some arbitrary value.
+ is_init: bool,
+ /// The current byte index into `init_mask`.
+ start: Size,
+ /// The end byte index into `init_mask`.
+ end: Size,
+}
+
+impl<'a> Iterator for InitChunkIter<'a> {
+ type Item = InitChunk;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.start >= self.end {
+ return None;
+ }
+
+ let end_of_chunk =
+ self.init_mask.find_bit(self.start, self.end, !self.is_init).unwrap_or(self.end);
+ let range = self.start..end_of_chunk;
+
+ let ret =
+ Some(if self.is_init { InitChunk::Init(range) } else { InitChunk::Uninit(range) });
+
+ self.is_init = !self.is_init;
+ self.start = end_of_chunk;
+
+ ret
+ }
+}
+
+/// Uninitialized bytes.
+impl<Prov: Copy, Extra> Allocation<Prov, Extra> {
+ /// Checks whether the given range is entirely initialized.
+ ///
+ /// Returns `Ok(())` if it's initialized. Otherwise returns the range of byte
+ /// indexes of the first contiguous uninitialized access.
+ fn is_init(&self, range: AllocRange) -> Result<(), AllocRange> {
+ self.init_mask.is_range_initialized(range.start, range.end()) // `Size` addition
+ }
+
+ /// Checks that a range of bytes is initialized. If not, returns the `InvalidUninitBytes`
+ /// error which will report the first range of bytes which is uninitialized.
+ fn check_init(&self, range: AllocRange) -> AllocResult {
+ self.is_init(range).map_err(|uninit_range| {
+ AllocError::InvalidUninitBytes(Some(UninitBytesAccess {
+ access: range,
+ uninit: uninit_range,
+ }))
+ })
+ }
+
+ fn mark_init(&mut self, range: AllocRange, is_init: bool) {
+ if range.size.bytes() == 0 {
+ return;
+ }
+ assert!(self.mutability == Mutability::Mut);
+ self.init_mask.set_range(range.start, range.end(), is_init);
+ }
+}
+
+/// Run-length encoding of the uninit mask.
+/// Used to copy parts of a mask multiple times to another allocation.
+pub struct InitMaskCompressed {
+ /// Whether the first range is initialized.
+ initial: bool,
+ /// The lengths of ranges that are run-length encoded.
+ /// The initialization state of the ranges alternate starting with `initial`.
+ ranges: smallvec::SmallVec<[u64; 1]>,
+}
+
+impl InitMaskCompressed {
+ pub fn no_bytes_init(&self) -> bool {
+ // The `ranges` are run-length encoded and of alternating initialization state.
+ // So if `ranges.len() > 1` then the second block is an initialized range.
+ !self.initial && self.ranges.len() == 1
+ }
+}
+
+/// Transferring the initialization mask to other allocations.
+impl<Prov, Extra> Allocation<Prov, Extra> {
+ /// Creates a run-length encoding of the initialization mask; panics if range is empty.
+ ///
+ /// This is essentially a more space-efficient version of
+ /// `InitMask::range_as_init_chunks(...).collect::<Vec<_>>()`.
+ pub fn compress_uninit_range(&self, range: AllocRange) -> InitMaskCompressed {
+ // Since we are copying `size` bytes from `src` to `dest + i * size` (`for i in 0..repeat`),
+ // a naive initialization mask copying algorithm would repeatedly have to read the initialization mask from
+ // the source and write it to the destination. Even if we optimized the memory accesses,
+ // we'd be doing all of this `repeat` times.
+ // Therefore we precompute a compressed version of the initialization mask of the source value and
+ // then write it back `repeat` times without computing any more information from the source.
+
+ // A precomputed cache for ranges of initialized / uninitialized bits
+ // 0000010010001110 will become
+ // `[5, 1, 2, 1, 3, 3, 1]`,
+ // where each element toggles the state.
+
+ let mut ranges = smallvec::SmallVec::<[u64; 1]>::new();
+
+ let mut chunks = self.init_mask.range_as_init_chunks(range.start, range.end()).peekable();
+
+ let initial = chunks.peek().expect("range should be nonempty").is_init();
+
+ // Here we rely on `range_as_init_chunks` to yield alternating init/uninit chunks.
+ for chunk in chunks {
+ let len = chunk.range().end.bytes() - chunk.range().start.bytes();
+ ranges.push(len);
+ }
+
+ InitMaskCompressed { ranges, initial }
+ }
+
+ /// Applies multiple instances of the run-length encoding to the initialization mask.
+ ///
+ /// This is dangerous to use as it can violate internal `Allocation` invariants!
+ /// It only exists to support an efficient implementation of `mem_copy_repeatedly`.
+ pub fn mark_compressed_init_range(
+ &mut self,
+ defined: &InitMaskCompressed,
+ range: AllocRange,
+ repeat: u64,
+ ) {
+ // An optimization where we can just overwrite an entire range of initialization
+ // bits if they are going to be uniformly `1` or `0`.
+ if defined.ranges.len() <= 1 {
+ self.init_mask.set_range_inbounds(
+ range.start,
+ range.start + range.size * repeat, // `Size` operations
+ defined.initial,
+ );
+ return;
+ }
+
+ for mut j in 0..repeat {
+ j *= range.size.bytes();
+ j += range.start.bytes();
+ let mut cur = defined.initial;
+ for range in &defined.ranges {
+ let old_j = j;
+ j += range;
+ self.init_mask.set_range_inbounds(
+ Size::from_bytes(old_j),
+ Size::from_bytes(j),
+ cur,
+ );
+ cur = !cur;
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
new file mode 100644
index 000000000..cecb55578
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -0,0 +1,551 @@
+use super::{AllocId, AllocRange, ConstAlloc, Pointer, Scalar};
+
+use crate::mir::interpret::ConstValue;
+use crate::ty::{layout, query::TyCtxtAt, tls, Ty, ValTree};
+
+use rustc_data_structures::sync::Lock;
+use rustc_errors::{pluralize, struct_span_err, DiagnosticBuilder, ErrorGuaranteed};
+use rustc_macros::HashStable;
+use rustc_session::CtfeBacktrace;
+use rustc_span::def_id::DefId;
+use rustc_target::abi::{call, Align, Size};
+use std::{any::Any, backtrace::Backtrace, fmt};
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
+pub enum ErrorHandled {
+ /// Already reported an error for this evaluation, and the compilation is
+ /// *guaranteed* to fail. Warnings/lints *must not* produce `Reported`.
+ Reported(ErrorGuaranteed),
+ /// Already emitted a lint for this evaluation.
+ Linted,
+ /// Don't emit an error, the evaluation failed because the MIR was generic
+ /// and the substs didn't fully monomorphize it.
+ TooGeneric,
+}
+
+impl From<ErrorGuaranteed> for ErrorHandled {
+ fn from(err: ErrorGuaranteed) -> ErrorHandled {
+ ErrorHandled::Reported(err)
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ ErrorHandled,
+}
+
+pub type EvalToAllocationRawResult<'tcx> = Result<ConstAlloc<'tcx>, ErrorHandled>;
+pub type EvalToConstValueResult<'tcx> = Result<ConstValue<'tcx>, ErrorHandled>;
+pub type EvalToValTreeResult<'tcx> = Result<Option<ValTree<'tcx>>, ErrorHandled>;
+
+pub fn struct_error<'tcx>(
+ tcx: TyCtxtAt<'tcx>,
+ msg: &str,
+) -> DiagnosticBuilder<'tcx, ErrorGuaranteed> {
+ struct_span_err!(tcx.sess, tcx.span, E0080, "{}", msg)
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(InterpErrorInfo<'_>, 8);
+
+/// Packages the kind of error we got from the const code interpreter
+/// up with a Rust-level backtrace of where the error occurred.
+/// These should always be constructed by calling `.into()` on
+/// an `InterpError`. In `rustc_mir::interpret`, we have `throw_err_*`
+/// macros for this.
+#[derive(Debug)]
+pub struct InterpErrorInfo<'tcx>(Box<InterpErrorInfoInner<'tcx>>);
+
+#[derive(Debug)]
+struct InterpErrorInfoInner<'tcx> {
+ kind: InterpError<'tcx>,
+ backtrace: Option<Box<Backtrace>>,
+}
+
+impl fmt::Display for InterpErrorInfo<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}", self.0.kind)
+ }
+}
+
+impl<'tcx> InterpErrorInfo<'tcx> {
+ pub fn print_backtrace(&self) {
+ if let Some(backtrace) = self.0.backtrace.as_ref() {
+ print_backtrace(backtrace);
+ }
+ }
+
+ pub fn into_kind(self) -> InterpError<'tcx> {
+ let InterpErrorInfo(box InterpErrorInfoInner { kind, .. }) = self;
+ kind
+ }
+
+ #[inline]
+ pub fn kind(&self) -> &InterpError<'tcx> {
+ &self.0.kind
+ }
+}
+
+fn print_backtrace(backtrace: &Backtrace) {
+ eprintln!("\n\nAn error occurred in miri:\n{}", backtrace);
+}
+
+impl From<ErrorHandled> for InterpErrorInfo<'_> {
+ fn from(err: ErrorHandled) -> Self {
+ match err {
+ ErrorHandled::Reported(ErrorGuaranteed { .. }) | ErrorHandled::Linted => {
+ err_inval!(ReferencedConstant)
+ }
+ ErrorHandled::TooGeneric => err_inval!(TooGeneric),
+ }
+ .into()
+ }
+}
+
+impl From<ErrorGuaranteed> for InterpErrorInfo<'_> {
+ fn from(err: ErrorGuaranteed) -> Self {
+ InterpError::InvalidProgram(InvalidProgramInfo::AlreadyReported(err)).into()
+ }
+}
+
+impl<'tcx> From<InterpError<'tcx>> for InterpErrorInfo<'tcx> {
+ fn from(kind: InterpError<'tcx>) -> Self {
+ let capture_backtrace = tls::with_opt(|tcx| {
+ if let Some(tcx) = tcx {
+ *Lock::borrow(&tcx.sess.ctfe_backtrace)
+ } else {
+ CtfeBacktrace::Disabled
+ }
+ });
+
+ let backtrace = match capture_backtrace {
+ CtfeBacktrace::Disabled => None,
+ CtfeBacktrace::Capture => Some(Box::new(Backtrace::force_capture())),
+ CtfeBacktrace::Immediate => {
+ // Print it now.
+ let backtrace = Backtrace::force_capture();
+ print_backtrace(&backtrace);
+ None
+ }
+ };
+
+ InterpErrorInfo(Box::new(InterpErrorInfoInner { kind, backtrace }))
+ }
+}
+
+/// Error information for when the program we executed turned out not to actually be a valid
+/// program. This cannot happen in stand-alone Miri, but it can happen during CTFE/ConstProp
+/// where we work on generic code or execution does not have all information available.
+pub enum InvalidProgramInfo<'tcx> {
+ /// Resolution can fail if we are in a too generic context.
+ TooGeneric,
+ /// Cannot compute this constant because it depends on another one
+ /// which already produced an error.
+ ReferencedConstant,
+ /// Abort in case errors are already reported.
+ AlreadyReported(ErrorGuaranteed),
+ /// An error occurred during layout computation.
+ Layout(layout::LayoutError<'tcx>),
+ /// An error occurred during FnAbi computation: the passed --target lacks FFI support
+ /// (which unfortunately typeck does not reject).
+ /// Not using `FnAbiError` as that contains a nested `LayoutError`.
+ FnAbiAdjustForForeignAbi(call::AdjustForForeignAbiError),
+ /// SizeOf of unsized type was requested.
+ SizeOfUnsizedType(Ty<'tcx>),
+}
+
+impl fmt::Display for InvalidProgramInfo<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use InvalidProgramInfo::*;
+ match self {
+ TooGeneric => write!(f, "encountered overly generic constant"),
+ ReferencedConstant => write!(f, "referenced constant has errors"),
+ AlreadyReported(ErrorGuaranteed { .. }) => {
+ write!(f, "encountered constants with type errors, stopping evaluation")
+ }
+ Layout(ref err) => write!(f, "{err}"),
+ FnAbiAdjustForForeignAbi(ref err) => write!(f, "{err}"),
+ SizeOfUnsizedType(ty) => write!(f, "size_of called on unsized type `{ty}`"),
+ }
+ }
+}
+
+/// Details of why a pointer had to be in-bounds.
+#[derive(Debug, Copy, Clone, TyEncodable, TyDecodable, HashStable)]
+pub enum CheckInAllocMsg {
+ /// We are dereferencing a pointer (i.e., creating a place).
+ DerefTest,
+ /// We are access memory.
+ MemoryAccessTest,
+ /// We are doing pointer arithmetic.
+ PointerArithmeticTest,
+ /// We are doing pointer offset_from.
+ OffsetFromTest,
+ /// None of the above -- generic/unspecific inbounds test.
+ InboundsTest,
+}
+
+impl fmt::Display for CheckInAllocMsg {
+ /// When this is printed as an error the context looks like this:
+ /// "{msg}{pointer} is a dangling pointer".
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "{}",
+ match *self {
+ CheckInAllocMsg::DerefTest => "dereferencing pointer failed: ",
+ CheckInAllocMsg::MemoryAccessTest => "memory access failed: ",
+ CheckInAllocMsg::PointerArithmeticTest => "out-of-bounds pointer arithmetic: ",
+ CheckInAllocMsg::OffsetFromTest => "out-of-bounds offset_from: ",
+ CheckInAllocMsg::InboundsTest => "out-of-bounds pointer use: ",
+ }
+ )
+ }
+}
+
+/// Details of an access to uninitialized bytes where it is not allowed.
+#[derive(Debug)]
+pub struct UninitBytesAccess {
+ /// Range of the original memory access.
+ pub access: AllocRange,
+ /// Range of the uninit memory that was encountered. (Might not be maximal.)
+ pub uninit: AllocRange,
+}
+
+/// Information about a size mismatch.
+#[derive(Debug)]
+pub struct ScalarSizeMismatch {
+ pub target_size: u64,
+ pub data_size: u64,
+}
+
+/// Error information for when the program caused Undefined Behavior.
+pub enum UndefinedBehaviorInfo {
+ /// Free-form case. Only for errors that are never caught!
+ Ub(String),
+ /// Unreachable code was executed.
+ Unreachable,
+ /// A slice/array index projection went out-of-bounds.
+ BoundsCheckFailed {
+ len: u64,
+ index: u64,
+ },
+ /// Something was divided by 0 (x / 0).
+ DivisionByZero,
+ /// Something was "remainded" by 0 (x % 0).
+ RemainderByZero,
+ /// Signed division overflowed (INT_MIN / -1).
+ DivisionOverflow,
+ /// Signed remainder overflowed (INT_MIN % -1).
+ RemainderOverflow,
+ /// Overflowing inbounds pointer arithmetic.
+ PointerArithOverflow,
+ /// Invalid metadata in a wide pointer (using `str` to avoid allocations).
+ InvalidMeta(&'static str),
+ /// Reading a C string that does not end within its allocation.
+ UnterminatedCString(Pointer),
+ /// Dereferencing a dangling pointer after it got freed.
+ PointerUseAfterFree(AllocId),
+ /// Used a pointer outside the bounds it is valid for.
+ /// (If `ptr_size > 0`, determines the size of the memory range that was expected to be in-bounds.)
+ PointerOutOfBounds {
+ alloc_id: AllocId,
+ alloc_size: Size,
+ ptr_offset: i64,
+ ptr_size: Size,
+ msg: CheckInAllocMsg,
+ },
+ /// Using an integer as a pointer in the wrong way.
+ DanglingIntPointer(u64, CheckInAllocMsg),
+ /// Used a pointer with bad alignment.
+ AlignmentCheckFailed {
+ required: Align,
+ has: Align,
+ },
+ /// Writing to read-only memory.
+ WriteToReadOnly(AllocId),
+ // Trying to access the data behind a function pointer.
+ DerefFunctionPointer(AllocId),
+ // Trying to access the data behind a vtable pointer.
+ DerefVTablePointer(AllocId),
+ /// The value validity check found a problem.
+ /// Should only be thrown by `validity.rs` and always point out which part of the value
+ /// is the problem.
+ ValidationFailure {
+ /// The "path" to the value in question, e.g. `.0[5].field` for a struct
+ /// field in the 6th element of an array that is the first element of a tuple.
+ path: Option<String>,
+ msg: String,
+ },
+ /// Using a non-boolean `u8` as bool.
+ InvalidBool(u8),
+ /// Using a non-character `u32` as character.
+ InvalidChar(u32),
+ /// The tag of an enum does not encode an actual discriminant.
+ InvalidTag(Scalar),
+ /// Using a pointer-not-to-a-function as function pointer.
+ InvalidFunctionPointer(Pointer),
+ /// Using a pointer-not-to-a-vtable as vtable pointer.
+ InvalidVTablePointer(Pointer),
+ /// Using a string that is not valid UTF-8,
+ InvalidStr(std::str::Utf8Error),
+ /// Using uninitialized data where it is not allowed.
+ InvalidUninitBytes(Option<(AllocId, UninitBytesAccess)>),
+ /// Working with a local that is not currently live.
+ DeadLocal,
+ /// Data size is not equal to target size.
+ ScalarSizeMismatch(ScalarSizeMismatch),
+ /// A discriminant of an uninhabited enum variant is written.
+ UninhabitedEnumVariantWritten,
+}
+
+impl fmt::Display for UndefinedBehaviorInfo {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use UndefinedBehaviorInfo::*;
+ match self {
+ Ub(msg) => write!(f, "{msg}"),
+ Unreachable => write!(f, "entering unreachable code"),
+ BoundsCheckFailed { ref len, ref index } => {
+ write!(f, "indexing out of bounds: the len is {len} but the index is {index}")
+ }
+ DivisionByZero => write!(f, "dividing by zero"),
+ RemainderByZero => write!(f, "calculating the remainder with a divisor of zero"),
+ DivisionOverflow => write!(f, "overflow in signed division (dividing MIN by -1)"),
+ RemainderOverflow => write!(f, "overflow in signed remainder (dividing MIN by -1)"),
+ PointerArithOverflow => write!(f, "overflowing in-bounds pointer arithmetic"),
+ InvalidMeta(msg) => write!(f, "invalid metadata in wide pointer: {msg}"),
+ UnterminatedCString(p) => write!(
+ f,
+ "reading a null-terminated string starting at {p:?} with no null found before end of allocation",
+ ),
+ PointerUseAfterFree(a) => {
+ write!(f, "pointer to {a:?} was dereferenced after this allocation got freed")
+ }
+ PointerOutOfBounds { alloc_id, alloc_size, ptr_offset, ptr_size: Size::ZERO, msg } => {
+ write!(
+ f,
+ "{msg}{alloc_id:?} has size {alloc_size}, so pointer at offset {ptr_offset} is out-of-bounds",
+ alloc_size = alloc_size.bytes(),
+ )
+ }
+ PointerOutOfBounds { alloc_id, alloc_size, ptr_offset, ptr_size, msg } => write!(
+ f,
+ "{msg}{alloc_id:?} has size {alloc_size}, so pointer to {ptr_size} byte{ptr_size_p} starting at offset {ptr_offset} is out-of-bounds",
+ alloc_size = alloc_size.bytes(),
+ ptr_size = ptr_size.bytes(),
+ ptr_size_p = pluralize!(ptr_size.bytes()),
+ ),
+ DanglingIntPointer(i, msg) => {
+ write!(
+ f,
+ "{msg}{pointer} is a dangling pointer (it has no provenance)",
+ pointer = Pointer::<Option<AllocId>>::from_addr(*i),
+ )
+ }
+ AlignmentCheckFailed { required, has } => write!(
+ f,
+ "accessing memory with alignment {has}, but alignment {required} is required",
+ has = has.bytes(),
+ required = required.bytes()
+ ),
+ WriteToReadOnly(a) => write!(f, "writing to {a:?} which is read-only"),
+ DerefFunctionPointer(a) => write!(f, "accessing {a:?} which contains a function"),
+ DerefVTablePointer(a) => write!(f, "accessing {a:?} which contains a vtable"),
+ ValidationFailure { path: None, msg } => {
+ write!(f, "constructing invalid value: {msg}")
+ }
+ ValidationFailure { path: Some(path), msg } => {
+ write!(f, "constructing invalid value at {path}: {msg}")
+ }
+ InvalidBool(b) => {
+ write!(f, "interpreting an invalid 8-bit value as a bool: 0x{b:02x}")
+ }
+ InvalidChar(c) => {
+ write!(f, "interpreting an invalid 32-bit value as a char: 0x{c:08x}")
+ }
+ InvalidTag(val) => write!(f, "enum value has invalid tag: {val:x}"),
+ InvalidFunctionPointer(p) => {
+ write!(f, "using {p:?} as function pointer but it does not point to a function")
+ }
+ InvalidVTablePointer(p) => {
+ write!(f, "using {p:?} as vtable pointer but it does not point to a vtable")
+ }
+ InvalidStr(err) => write!(f, "this string is not valid UTF-8: {err}"),
+ InvalidUninitBytes(Some((alloc, info))) => write!(
+ f,
+ "reading memory at {alloc:?}{access:?}, \
+ but memory is uninitialized at {uninit:?}, \
+ and this operation requires initialized memory",
+ access = info.access,
+ uninit = info.uninit,
+ ),
+ InvalidUninitBytes(None) => write!(
+ f,
+ "using uninitialized data, but this operation requires initialized memory"
+ ),
+ DeadLocal => write!(f, "accessing a dead local variable"),
+ ScalarSizeMismatch(self::ScalarSizeMismatch { target_size, data_size }) => write!(
+ f,
+ "scalar size mismatch: expected {target_size} bytes but got {data_size} bytes instead",
+ ),
+ UninhabitedEnumVariantWritten => {
+ write!(f, "writing discriminant of an uninhabited enum")
+ }
+ }
+ }
+}
+
+/// Error information for when the program did something that might (or might not) be correct
+/// to do according to the Rust spec, but due to limitations in the interpreter, the
+/// operation could not be carried out. These limitations can differ between CTFE and the
+/// Miri engine, e.g., CTFE does not support dereferencing pointers at integral addresses.
+pub enum UnsupportedOpInfo {
+ /// Free-form case. Only for errors that are never caught!
+ Unsupported(String),
+ /// Encountered a pointer where we needed raw bytes.
+ ReadPointerAsBytes,
+ /// Overwriting parts of a pointer; the resulting state cannot be represented in our
+ /// `Allocation` data structure. See <https://github.com/rust-lang/miri/issues/2181>.
+ PartialPointerOverwrite(Pointer<AllocId>),
+ //
+ // The variants below are only reachable from CTFE/const prop, miri will never emit them.
+ //
+ /// Accessing thread local statics
+ ThreadLocalStatic(DefId),
+ /// Accessing an unsupported extern static.
+ ReadExternStatic(DefId),
+}
+
+impl fmt::Display for UnsupportedOpInfo {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use UnsupportedOpInfo::*;
+ match self {
+ Unsupported(ref msg) => write!(f, "{msg}"),
+ ReadPointerAsBytes => write!(f, "unable to turn pointer into raw bytes"),
+ PartialPointerOverwrite(ptr) => {
+ write!(f, "unable to overwrite parts of a pointer in memory at {ptr:?}")
+ }
+ ThreadLocalStatic(did) => write!(f, "cannot access thread local static ({did:?})"),
+ ReadExternStatic(did) => write!(f, "cannot read from extern static ({did:?})"),
+ }
+ }
+}
+
+/// Error information for when the program exhausted the resources granted to it
+/// by the interpreter.
+pub enum ResourceExhaustionInfo {
+ /// The stack grew too big.
+ StackFrameLimitReached,
+ /// The program ran for too long.
+ ///
+ /// The exact limit is set by the `const_eval_limit` attribute.
+ StepLimitReached,
+ /// There is not enough memory to perform an allocation.
+ MemoryExhausted,
+}
+
+impl fmt::Display for ResourceExhaustionInfo {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use ResourceExhaustionInfo::*;
+ match self {
+ StackFrameLimitReached => {
+ write!(f, "reached the configured maximum number of stack frames")
+ }
+ StepLimitReached => {
+ write!(f, "exceeded interpreter step limit (see `#[const_eval_limit]`)")
+ }
+ MemoryExhausted => {
+ write!(f, "tried to allocate more memory than available to compiler")
+ }
+ }
+ }
+}
+
+/// A trait to work around not having trait object upcasting.
+pub trait AsAny: Any {
+ fn as_any(&self) -> &dyn Any;
+}
+impl<T: Any> AsAny for T {
+ #[inline(always)]
+ fn as_any(&self) -> &dyn Any {
+ self
+ }
+}
+
+/// A trait for machine-specific errors (or other "machine stop" conditions).
+pub trait MachineStopType: AsAny + fmt::Display + Send {
+ /// If `true`, emit a hard error instead of going through the `CONST_ERR` lint
+ fn is_hard_err(&self) -> bool {
+ false
+ }
+}
+
+impl dyn MachineStopType {
+ #[inline(always)]
+ pub fn downcast_ref<T: Any>(&self) -> Option<&T> {
+ self.as_any().downcast_ref()
+ }
+}
+
+pub enum InterpError<'tcx> {
+ /// The program caused undefined behavior.
+ UndefinedBehavior(UndefinedBehaviorInfo),
+ /// The program did something the interpreter does not support (some of these *might* be UB
+ /// but the interpreter is not sure).
+ Unsupported(UnsupportedOpInfo),
+ /// The program was invalid (ill-typed, bad MIR, not sufficiently monomorphized, ...).
+ InvalidProgram(InvalidProgramInfo<'tcx>),
+ /// The program exhausted the interpreter's resources (stack/heap too big,
+ /// execution takes too long, ...).
+ ResourceExhaustion(ResourceExhaustionInfo),
+ /// Stop execution for a machine-controlled reason. This is never raised by
+ /// the core engine itself.
+ MachineStop(Box<dyn MachineStopType>),
+}
+
+pub type InterpResult<'tcx, T = ()> = Result<T, InterpErrorInfo<'tcx>>;
+
+impl fmt::Display for InterpError<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use InterpError::*;
+ match *self {
+ Unsupported(ref msg) => write!(f, "{msg}"),
+ InvalidProgram(ref msg) => write!(f, "{msg}"),
+ UndefinedBehavior(ref msg) => write!(f, "{msg}"),
+ ResourceExhaustion(ref msg) => write!(f, "{msg}"),
+ MachineStop(ref msg) => write!(f, "{msg}"),
+ }
+ }
+}
+
+// Forward `Debug` to `Display`, so it does not look awful.
+impl fmt::Debug for InterpError<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl InterpError<'_> {
+ /// Some errors do string formatting even if the error is never printed.
+ /// To avoid performance issues, there are places where we want to be sure to never raise these formatting errors,
+ /// so this method lets us detect them and `bug!` on unexpected errors.
+ pub fn formatted_string(&self) -> bool {
+ matches!(
+ self,
+ InterpError::Unsupported(UnsupportedOpInfo::Unsupported(_))
+ | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::ValidationFailure { .. })
+ | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::Ub(_))
+ )
+ }
+
+ /// Should this error be reported as a hard error, preventing compilation, or a soft error,
+ /// causing a deny-by-default lint?
+ pub fn is_hard_err(&self) -> bool {
+ use InterpError::*;
+ match *self {
+ MachineStop(ref err) => err.is_hard_err(),
+ UndefinedBehavior(_) => true,
+ ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted) => true,
+ _ => false,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs
new file mode 100644
index 000000000..967f8ece1
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/mod.rs
@@ -0,0 +1,633 @@
+//! An interpreter for MIR used in CTFE and by miri.
+
+#[macro_export]
+macro_rules! err_unsup {
+ ($($tt:tt)*) => {
+ $crate::mir::interpret::InterpError::Unsupported(
+ $crate::mir::interpret::UnsupportedOpInfo::$($tt)*
+ )
+ };
+}
+
+#[macro_export]
+macro_rules! err_unsup_format {
+ ($($tt:tt)*) => { err_unsup!(Unsupported(format!($($tt)*))) };
+}
+
+#[macro_export]
+macro_rules! err_inval {
+ ($($tt:tt)*) => {
+ $crate::mir::interpret::InterpError::InvalidProgram(
+ $crate::mir::interpret::InvalidProgramInfo::$($tt)*
+ )
+ };
+}
+
+#[macro_export]
+macro_rules! err_ub {
+ ($($tt:tt)*) => {
+ $crate::mir::interpret::InterpError::UndefinedBehavior(
+ $crate::mir::interpret::UndefinedBehaviorInfo::$($tt)*
+ )
+ };
+}
+
+#[macro_export]
+macro_rules! err_ub_format {
+ ($($tt:tt)*) => { err_ub!(Ub(format!($($tt)*))) };
+}
+
+#[macro_export]
+macro_rules! err_exhaust {
+ ($($tt:tt)*) => {
+ $crate::mir::interpret::InterpError::ResourceExhaustion(
+ $crate::mir::interpret::ResourceExhaustionInfo::$($tt)*
+ )
+ };
+}
+
+#[macro_export]
+macro_rules! err_machine_stop {
+ ($($tt:tt)*) => {
+ $crate::mir::interpret::InterpError::MachineStop(Box::new($($tt)*))
+ };
+}
+
+// In the `throw_*` macros, avoid `return` to make them work with `try {}`.
+#[macro_export]
+macro_rules! throw_unsup {
+ ($($tt:tt)*) => { do yeet err_unsup!($($tt)*) };
+}
+
+#[macro_export]
+macro_rules! throw_unsup_format {
+ ($($tt:tt)*) => { throw_unsup!(Unsupported(format!($($tt)*))) };
+}
+
+#[macro_export]
+macro_rules! throw_inval {
+ ($($tt:tt)*) => { do yeet err_inval!($($tt)*) };
+}
+
+#[macro_export]
+macro_rules! throw_ub {
+ ($($tt:tt)*) => { do yeet err_ub!($($tt)*) };
+}
+
+#[macro_export]
+macro_rules! throw_ub_format {
+ ($($tt:tt)*) => { throw_ub!(Ub(format!($($tt)*))) };
+}
+
+#[macro_export]
+macro_rules! throw_exhaust {
+ ($($tt:tt)*) => { do yeet err_exhaust!($($tt)*) };
+}
+
+#[macro_export]
+macro_rules! throw_machine_stop {
+ ($($tt:tt)*) => { do yeet err_machine_stop!($($tt)*) };
+}
+
+mod allocation;
+mod error;
+mod pointer;
+mod queries;
+mod value;
+
+use std::convert::TryFrom;
+use std::fmt;
+use std::io;
+use std::io::{Read, Write};
+use std::num::{NonZeroU32, NonZeroU64};
+use std::sync::atomic::{AtomicU32, Ordering};
+
+use rustc_ast::LitKind;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sync::{HashMapExt, Lock};
+use rustc_data_structures::tiny_list::TinyList;
+use rustc_hir::def_id::DefId;
+use rustc_macros::HashStable;
+use rustc_middle::ty::print::with_no_trimmed_paths;
+use rustc_serialize::{Decodable, Encodable};
+use rustc_target::abi::Endian;
+
+use crate::mir;
+use crate::ty::codec::{TyDecoder, TyEncoder};
+use crate::ty::subst::GenericArgKind;
+use crate::ty::{self, Instance, Ty, TyCtxt};
+
+pub use self::error::{
+ struct_error, CheckInAllocMsg, ErrorHandled, EvalToAllocationRawResult, EvalToConstValueResult,
+ EvalToValTreeResult, InterpError, InterpErrorInfo, InterpResult, InvalidProgramInfo,
+ MachineStopType, ResourceExhaustionInfo, ScalarSizeMismatch, UndefinedBehaviorInfo,
+ UninitBytesAccess, UnsupportedOpInfo,
+};
+
+pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMaybeUninit};
+
+pub use self::allocation::{
+ alloc_range, AllocRange, Allocation, ConstAllocation, InitChunk, InitChunkIter, InitMask,
+ Relocations,
+};
+
+pub use self::pointer::{Pointer, PointerArithmetic, Provenance};
+
+/// Uniquely identifies one of the following:
+/// - A constant
+/// - A static
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, Lift)]
+pub struct GlobalId<'tcx> {
+ /// For a constant or static, the `Instance` of the item itself.
+ /// For a promoted global, the `Instance` of the function they belong to.
+ pub instance: ty::Instance<'tcx>,
+
+ /// The index for promoted globals within their function's `mir::Body`.
+ pub promoted: Option<mir::Promoted>,
+}
+
+impl<'tcx> GlobalId<'tcx> {
+ pub fn display(self, tcx: TyCtxt<'tcx>) -> String {
+ let instance_name = with_no_trimmed_paths!(tcx.def_path_str(self.instance.def.def_id()));
+ if let Some(promoted) = self.promoted {
+ format!("{}::{:?}", instance_name, promoted)
+ } else {
+ instance_name
+ }
+ }
+}
+
+/// Input argument for `tcx.lit_to_const`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, HashStable)]
+pub struct LitToConstInput<'tcx> {
+ /// The absolute value of the resultant constant.
+ pub lit: &'tcx LitKind,
+ /// The type of the constant.
+ pub ty: Ty<'tcx>,
+ /// If the constant is negative.
+ pub neg: bool,
+}
+
+/// Error type for `tcx.lit_to_const`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, HashStable)]
+pub enum LitToConstError {
+ /// The literal's inferred type did not match the expected `ty` in the input.
+ /// This is used for graceful error handling (`delay_span_bug`) in
+ /// type checking (`Const::from_anon_const`).
+ TypeError,
+ Reported,
+}
+
+#[derive(Copy, Clone, Eq, Hash, Ord, PartialEq, PartialOrd)]
+pub struct AllocId(pub NonZeroU64);
+
+// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
+// all the Miri types.
+impl fmt::Debug for AllocId {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if f.alternate() { write!(f, "a{}", self.0) } else { write!(f, "alloc{}", self.0) }
+ }
+}
+
+// No "Display" since AllocIds are not usually user-visible.
+
+#[derive(TyDecodable, TyEncodable)]
+enum AllocDiscriminant {
+ Alloc,
+ Fn,
+ VTable,
+ Static,
+}
+
+pub fn specialized_encode_alloc_id<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>>(
+ encoder: &mut E,
+ tcx: TyCtxt<'tcx>,
+ alloc_id: AllocId,
+) {
+ match tcx.global_alloc(alloc_id) {
+ GlobalAlloc::Memory(alloc) => {
+ trace!("encoding {:?} with {:#?}", alloc_id, alloc);
+ AllocDiscriminant::Alloc.encode(encoder);
+ alloc.encode(encoder);
+ }
+ GlobalAlloc::Function(fn_instance) => {
+ trace!("encoding {:?} with {:#?}", alloc_id, fn_instance);
+ AllocDiscriminant::Fn.encode(encoder);
+ fn_instance.encode(encoder);
+ }
+ GlobalAlloc::VTable(ty, poly_trait_ref) => {
+ trace!("encoding {:?} with {ty:#?}, {poly_trait_ref:#?}", alloc_id);
+ AllocDiscriminant::VTable.encode(encoder);
+ ty.encode(encoder);
+ poly_trait_ref.encode(encoder);
+ }
+ GlobalAlloc::Static(did) => {
+ assert!(!tcx.is_thread_local_static(did));
+ // References to statics doesn't need to know about their allocations,
+ // just about its `DefId`.
+ AllocDiscriminant::Static.encode(encoder);
+ did.encode(encoder);
+ }
+ }
+}
+
+// Used to avoid infinite recursion when decoding cyclic allocations.
+type DecodingSessionId = NonZeroU32;
+
+#[derive(Clone)]
+enum State {
+ Empty,
+ InProgressNonAlloc(TinyList<DecodingSessionId>),
+ InProgress(TinyList<DecodingSessionId>, AllocId),
+ Done(AllocId),
+}
+
+pub struct AllocDecodingState {
+ // For each `AllocId`, we keep track of which decoding state it's currently in.
+ decoding_state: Vec<Lock<State>>,
+ // The offsets of each allocation in the data stream.
+ data_offsets: Vec<u32>,
+}
+
+impl AllocDecodingState {
+ #[inline]
+ pub fn new_decoding_session(&self) -> AllocDecodingSession<'_> {
+ static DECODER_SESSION_ID: AtomicU32 = AtomicU32::new(0);
+ let counter = DECODER_SESSION_ID.fetch_add(1, Ordering::SeqCst);
+
+ // Make sure this is never zero.
+ let session_id = DecodingSessionId::new((counter & 0x7FFFFFFF) + 1).unwrap();
+
+ AllocDecodingSession { state: self, session_id }
+ }
+
+ pub fn new(data_offsets: Vec<u32>) -> Self {
+ let decoding_state = vec![Lock::new(State::Empty); data_offsets.len()];
+
+ Self { decoding_state, data_offsets }
+ }
+}
+
+#[derive(Copy, Clone)]
+pub struct AllocDecodingSession<'s> {
+ state: &'s AllocDecodingState,
+ session_id: DecodingSessionId,
+}
+
+impl<'s> AllocDecodingSession<'s> {
+ /// Decodes an `AllocId` in a thread-safe way.
+ pub fn decode_alloc_id<'tcx, D>(&self, decoder: &mut D) -> AllocId
+ where
+ D: TyDecoder<I = TyCtxt<'tcx>>,
+ {
+ // Read the index of the allocation.
+ let idx = usize::try_from(decoder.read_u32()).unwrap();
+ let pos = usize::try_from(self.state.data_offsets[idx]).unwrap();
+
+ // Decode the `AllocDiscriminant` now so that we know if we have to reserve an
+ // `AllocId`.
+ let (alloc_kind, pos) = decoder.with_position(pos, |decoder| {
+ let alloc_kind = AllocDiscriminant::decode(decoder);
+ (alloc_kind, decoder.position())
+ });
+
+ // Check the decoding state to see if it's already decoded or if we should
+ // decode it here.
+ let alloc_id = {
+ let mut entry = self.state.decoding_state[idx].lock();
+
+ match *entry {
+ State::Done(alloc_id) => {
+ return alloc_id;
+ }
+ ref mut entry @ State::Empty => {
+ // We are allowed to decode.
+ match alloc_kind {
+ AllocDiscriminant::Alloc => {
+ // If this is an allocation, we need to reserve an
+ // `AllocId` so we can decode cyclic graphs.
+ let alloc_id = decoder.interner().reserve_alloc_id();
+ *entry =
+ State::InProgress(TinyList::new_single(self.session_id), alloc_id);
+ Some(alloc_id)
+ }
+ AllocDiscriminant::Fn
+ | AllocDiscriminant::Static
+ | AllocDiscriminant::VTable => {
+ // Fns and statics cannot be cyclic, and their `AllocId`
+ // is determined later by interning.
+ *entry =
+ State::InProgressNonAlloc(TinyList::new_single(self.session_id));
+ None
+ }
+ }
+ }
+ State::InProgressNonAlloc(ref mut sessions) => {
+ if sessions.contains(&self.session_id) {
+ bug!("this should be unreachable");
+ } else {
+ // Start decoding concurrently.
+ sessions.insert(self.session_id);
+ None
+ }
+ }
+ State::InProgress(ref mut sessions, alloc_id) => {
+ if sessions.contains(&self.session_id) {
+ // Don't recurse.
+ return alloc_id;
+ } else {
+ // Start decoding concurrently.
+ sessions.insert(self.session_id);
+ Some(alloc_id)
+ }
+ }
+ }
+ };
+
+ // Now decode the actual data.
+ let alloc_id = decoder.with_position(pos, |decoder| {
+ match alloc_kind {
+ AllocDiscriminant::Alloc => {
+ let alloc = <ConstAllocation<'tcx> as Decodable<_>>::decode(decoder);
+ // We already have a reserved `AllocId`.
+ let alloc_id = alloc_id.unwrap();
+ trace!("decoded alloc {:?}: {:#?}", alloc_id, alloc);
+ decoder.interner().set_alloc_id_same_memory(alloc_id, alloc);
+ alloc_id
+ }
+ AllocDiscriminant::Fn => {
+ assert!(alloc_id.is_none());
+ trace!("creating fn alloc ID");
+ let instance = ty::Instance::decode(decoder);
+ trace!("decoded fn alloc instance: {:?}", instance);
+ let alloc_id = decoder.interner().create_fn_alloc(instance);
+ alloc_id
+ }
+ AllocDiscriminant::VTable => {
+ assert!(alloc_id.is_none());
+ trace!("creating vtable alloc ID");
+ let ty = <Ty<'_> as Decodable<D>>::decode(decoder);
+ let poly_trait_ref =
+ <Option<ty::PolyExistentialTraitRef<'_>> as Decodable<D>>::decode(decoder);
+ trace!("decoded vtable alloc instance: {ty:?}, {poly_trait_ref:?}");
+ let alloc_id = decoder.interner().create_vtable_alloc(ty, poly_trait_ref);
+ alloc_id
+ }
+ AllocDiscriminant::Static => {
+ assert!(alloc_id.is_none());
+ trace!("creating extern static alloc ID");
+ let did = <DefId as Decodable<D>>::decode(decoder);
+ trace!("decoded static def-ID: {:?}", did);
+ let alloc_id = decoder.interner().create_static_alloc(did);
+ alloc_id
+ }
+ }
+ });
+
+ self.state.decoding_state[idx].with_lock(|entry| {
+ *entry = State::Done(alloc_id);
+ });
+
+ alloc_id
+ }
+}
+
+/// An allocation in the global (tcx-managed) memory can be either a function pointer,
+/// a static, or a "real" allocation with some data in it.
+#[derive(Debug, Clone, Eq, PartialEq, Hash, TyDecodable, TyEncodable, HashStable)]
+pub enum GlobalAlloc<'tcx> {
+ /// The alloc ID is used as a function pointer.
+ Function(Instance<'tcx>),
+ /// This alloc ID points to a symbolic (not-reified) vtable.
+ VTable(Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>),
+ /// The alloc ID points to a "lazy" static variable that did not get computed (yet).
+ /// This is also used to break the cycle in recursive statics.
+ Static(DefId),
+ /// The alloc ID points to memory.
+ Memory(ConstAllocation<'tcx>),
+}
+
+impl<'tcx> GlobalAlloc<'tcx> {
+ /// Panics if the `GlobalAlloc` does not refer to an `GlobalAlloc::Memory`
+ #[track_caller]
+ #[inline]
+ pub fn unwrap_memory(&self) -> ConstAllocation<'tcx> {
+ match *self {
+ GlobalAlloc::Memory(mem) => mem,
+ _ => bug!("expected memory, got {:?}", self),
+ }
+ }
+
+ /// Panics if the `GlobalAlloc` is not `GlobalAlloc::Function`
+ #[track_caller]
+ #[inline]
+ pub fn unwrap_fn(&self) -> Instance<'tcx> {
+ match *self {
+ GlobalAlloc::Function(instance) => instance,
+ _ => bug!("expected function, got {:?}", self),
+ }
+ }
+
+ /// Panics if the `GlobalAlloc` is not `GlobalAlloc::VTable`
+ #[track_caller]
+ #[inline]
+ pub fn unwrap_vtable(&self) -> (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>) {
+ match *self {
+ GlobalAlloc::VTable(ty, poly_trait_ref) => (ty, poly_trait_ref),
+ _ => bug!("expected vtable, got {:?}", self),
+ }
+ }
+}
+
+pub(crate) struct AllocMap<'tcx> {
+ /// Maps `AllocId`s to their corresponding allocations.
+ alloc_map: FxHashMap<AllocId, GlobalAlloc<'tcx>>,
+
+ /// Used to ensure that statics and functions only get one associated `AllocId`.
+ /// Should never contain a `GlobalAlloc::Memory`!
+ //
+ // FIXME: Should we just have two separate dedup maps for statics and functions each?
+ dedup: FxHashMap<GlobalAlloc<'tcx>, AllocId>,
+
+ /// The `AllocId` to assign to the next requested ID.
+ /// Always incremented; never gets smaller.
+ next_id: AllocId,
+}
+
+impl<'tcx> AllocMap<'tcx> {
+ pub(crate) fn new() -> Self {
+ AllocMap {
+ alloc_map: Default::default(),
+ dedup: Default::default(),
+ next_id: AllocId(NonZeroU64::new(1).unwrap()),
+ }
+ }
+ fn reserve(&mut self) -> AllocId {
+ let next = self.next_id;
+ self.next_id.0 = self.next_id.0.checked_add(1).expect(
+ "You overflowed a u64 by incrementing by 1... \
+ You've just earned yourself a free drink if we ever meet. \
+ Seriously, how did you do that?!",
+ );
+ next
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Obtains a new allocation ID that can be referenced but does not
+ /// yet have an allocation backing it.
+ ///
+ /// Make sure to call `set_alloc_id_memory` or `set_alloc_id_same_memory` before returning such
+ /// an `AllocId` from a query.
+ pub fn reserve_alloc_id(self) -> AllocId {
+ self.alloc_map.lock().reserve()
+ }
+
+ /// Reserves a new ID *if* this allocation has not been dedup-reserved before.
+ /// Should only be used for "symbolic" allocations (function pointers, vtables, statics), we
+ /// don't want to dedup IDs for "real" memory!
+ fn reserve_and_set_dedup(self, alloc: GlobalAlloc<'tcx>) -> AllocId {
+ let mut alloc_map = self.alloc_map.lock();
+ match alloc {
+ GlobalAlloc::Function(..) | GlobalAlloc::Static(..) | GlobalAlloc::VTable(..) => {}
+ GlobalAlloc::Memory(..) => bug!("Trying to dedup-reserve memory with real data!"),
+ }
+ if let Some(&alloc_id) = alloc_map.dedup.get(&alloc) {
+ return alloc_id;
+ }
+ let id = alloc_map.reserve();
+ debug!("creating alloc {alloc:?} with id {id:?}");
+ alloc_map.alloc_map.insert(id, alloc.clone());
+ alloc_map.dedup.insert(alloc, id);
+ id
+ }
+
+ /// Generates an `AllocId` for a static or return a cached one in case this function has been
+ /// called on the same static before.
+ pub fn create_static_alloc(self, static_id: DefId) -> AllocId {
+ self.reserve_and_set_dedup(GlobalAlloc::Static(static_id))
+ }
+
+ /// Generates an `AllocId` for a function. Depending on the function type,
+ /// this might get deduplicated or assigned a new ID each time.
+ pub fn create_fn_alloc(self, instance: Instance<'tcx>) -> AllocId {
+ // Functions cannot be identified by pointers, as asm-equal functions can get deduplicated
+ // by the linker (we set the "unnamed_addr" attribute for LLVM) and functions can be
+ // duplicated across crates.
+ // We thus generate a new `AllocId` for every mention of a function. This means that
+ // `main as fn() == main as fn()` is false, while `let x = main as fn(); x == x` is true.
+ // However, formatting code relies on function identity (see #58320), so we only do
+ // this for generic functions. Lifetime parameters are ignored.
+ let is_generic = instance
+ .substs
+ .into_iter()
+ .any(|kind| !matches!(kind.unpack(), GenericArgKind::Lifetime(_)));
+ if is_generic {
+ // Get a fresh ID.
+ let mut alloc_map = self.alloc_map.lock();
+ let id = alloc_map.reserve();
+ alloc_map.alloc_map.insert(id, GlobalAlloc::Function(instance));
+ id
+ } else {
+ // Deduplicate.
+ self.reserve_and_set_dedup(GlobalAlloc::Function(instance))
+ }
+ }
+
+ /// Generates an `AllocId` for a (symbolic, not-reified) vtable. Will get deduplicated.
+ pub fn create_vtable_alloc(
+ self,
+ ty: Ty<'tcx>,
+ poly_trait_ref: Option<ty::PolyExistentialTraitRef<'tcx>>,
+ ) -> AllocId {
+ self.reserve_and_set_dedup(GlobalAlloc::VTable(ty, poly_trait_ref))
+ }
+
+ /// Interns the `Allocation` and return a new `AllocId`, even if there's already an identical
+ /// `Allocation` with a different `AllocId`.
+ /// Statics with identical content will still point to the same `Allocation`, i.e.,
+ /// their data will be deduplicated through `Allocation` interning -- but they
+ /// are different places in memory and as such need different IDs.
+ pub fn create_memory_alloc(self, mem: ConstAllocation<'tcx>) -> AllocId {
+ let id = self.reserve_alloc_id();
+ self.set_alloc_id_memory(id, mem);
+ id
+ }
+
+ /// Returns `None` in case the `AllocId` is dangling. An `InterpretCx` can still have a
+ /// local `Allocation` for that `AllocId`, but having such an `AllocId` in a constant is
+ /// illegal and will likely ICE.
+ /// This function exists to allow const eval to detect the difference between evaluation-
+ /// local dangling pointers and allocations in constants/statics.
+ #[inline]
+ pub fn try_get_global_alloc(self, id: AllocId) -> Option<GlobalAlloc<'tcx>> {
+ self.alloc_map.lock().alloc_map.get(&id).cloned()
+ }
+
+ #[inline]
+ #[track_caller]
+ /// Panics in case the `AllocId` is dangling. Since that is impossible for `AllocId`s in
+ /// constants (as all constants must pass interning and validation that check for dangling
+ /// ids), this function is frequently used throughout rustc, but should not be used within
+ /// the miri engine.
+ pub fn global_alloc(self, id: AllocId) -> GlobalAlloc<'tcx> {
+ match self.try_get_global_alloc(id) {
+ Some(alloc) => alloc,
+ None => bug!("could not find allocation for {id:?}"),
+ }
+ }
+
+ /// Freezes an `AllocId` created with `reserve` by pointing it at an `Allocation`. Trying to
+ /// call this function twice, even with the same `Allocation` will ICE the compiler.
+ pub fn set_alloc_id_memory(self, id: AllocId, mem: ConstAllocation<'tcx>) {
+ if let Some(old) = self.alloc_map.lock().alloc_map.insert(id, GlobalAlloc::Memory(mem)) {
+ bug!("tried to set allocation ID {id:?}, but it was already existing as {old:#?}");
+ }
+ }
+
+ /// Freezes an `AllocId` created with `reserve` by pointing it at an `Allocation`. May be called
+ /// twice for the same `(AllocId, Allocation)` pair.
+ fn set_alloc_id_same_memory(self, id: AllocId, mem: ConstAllocation<'tcx>) {
+ self.alloc_map.lock().alloc_map.insert_same(id, GlobalAlloc::Memory(mem));
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Methods to access integers in the target endianness
+////////////////////////////////////////////////////////////////////////////////
+
+#[inline]
+pub fn write_target_uint(
+ endianness: Endian,
+ mut target: &mut [u8],
+ data: u128,
+) -> Result<(), io::Error> {
+ // This u128 holds an "any-size uint" (since smaller uints can fits in it)
+ // So we do not write all bytes of the u128, just the "payload".
+ match endianness {
+ Endian::Little => target.write(&data.to_le_bytes())?,
+ Endian::Big => target.write(&data.to_be_bytes()[16 - target.len()..])?,
+ };
+ debug_assert!(target.len() == 0); // We should have filled the target buffer.
+ Ok(())
+}
+
+#[inline]
+pub fn read_target_uint(endianness: Endian, mut source: &[u8]) -> Result<u128, io::Error> {
+ // This u128 holds an "any-size uint" (since smaller uints can fits in it)
+ let mut buf = [0u8; std::mem::size_of::<u128>()];
+ // So we do not read exactly 16 bytes into the u128, just the "payload".
+ let uint = match endianness {
+ Endian::Little => {
+ source.read(&mut buf)?;
+ Ok(u128::from_le_bytes(buf))
+ }
+ Endian::Big => {
+ source.read(&mut buf[16 - source.len()..])?;
+ Ok(u128::from_be_bytes(buf))
+ }
+ };
+ debug_assert!(source.len() == 0); // We should have consumed the source buffer.
+ uint
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs
new file mode 100644
index 000000000..384954cbb
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs
@@ -0,0 +1,307 @@
+use super::{AllocId, InterpResult};
+
+use rustc_macros::HashStable;
+use rustc_target::abi::{HasDataLayout, Size};
+
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+
+////////////////////////////////////////////////////////////////////////////////
+// Pointer arithmetic
+////////////////////////////////////////////////////////////////////////////////
+
+pub trait PointerArithmetic: HasDataLayout {
+ // These are not supposed to be overridden.
+
+ #[inline(always)]
+ fn pointer_size(&self) -> Size {
+ self.data_layout().pointer_size
+ }
+
+ #[inline(always)]
+ fn max_size_of_val(&self) -> Size {
+ Size::from_bytes(self.machine_isize_max())
+ }
+
+ #[inline]
+ fn machine_usize_max(&self) -> u64 {
+ self.pointer_size().unsigned_int_max().try_into().unwrap()
+ }
+
+ #[inline]
+ fn machine_isize_min(&self) -> i64 {
+ self.pointer_size().signed_int_min().try_into().unwrap()
+ }
+
+ #[inline]
+ fn machine_isize_max(&self) -> i64 {
+ self.pointer_size().signed_int_max().try_into().unwrap()
+ }
+
+ #[inline]
+ fn machine_usize_to_isize(&self, val: u64) -> i64 {
+ let val = val as i64;
+ // Now wrap-around into the machine_isize range.
+ if val > self.machine_isize_max() {
+ // This can only happen the the ptr size is < 64, so we know max_usize_plus_1 fits into
+ // i64.
+ debug_assert!(self.pointer_size().bits() < 64);
+ let max_usize_plus_1 = 1u128 << self.pointer_size().bits();
+ val - i64::try_from(max_usize_plus_1).unwrap()
+ } else {
+ val
+ }
+ }
+
+ /// Helper function: truncate given value-"overflowed flag" pair to pointer size and
+ /// update "overflowed flag" if there was an overflow.
+ /// This should be called by all the other methods before returning!
+ #[inline]
+ fn truncate_to_ptr(&self, (val, over): (u64, bool)) -> (u64, bool) {
+ let val = u128::from(val);
+ let max_ptr_plus_1 = 1u128 << self.pointer_size().bits();
+ (u64::try_from(val % max_ptr_plus_1).unwrap(), over || val >= max_ptr_plus_1)
+ }
+
+ #[inline]
+ fn overflowing_offset(&self, val: u64, i: u64) -> (u64, bool) {
+ // We do not need to check if i fits in a machine usize. If it doesn't,
+ // either the wrapping_add will wrap or res will not fit in a pointer.
+ let res = val.overflowing_add(i);
+ self.truncate_to_ptr(res)
+ }
+
+ #[inline]
+ fn overflowing_signed_offset(&self, val: u64, i: i64) -> (u64, bool) {
+ // We need to make sure that i fits in a machine isize.
+ let n = i.unsigned_abs();
+ if i >= 0 {
+ let (val, over) = self.overflowing_offset(val, n);
+ (val, over || i > self.machine_isize_max())
+ } else {
+ let res = val.overflowing_sub(n);
+ let (val, over) = self.truncate_to_ptr(res);
+ (val, over || i < self.machine_isize_min())
+ }
+ }
+
+ #[inline]
+ fn offset<'tcx>(&self, val: u64, i: u64) -> InterpResult<'tcx, u64> {
+ let (res, over) = self.overflowing_offset(val, i);
+ if over { throw_ub!(PointerArithOverflow) } else { Ok(res) }
+ }
+
+ #[inline]
+ fn signed_offset<'tcx>(&self, val: u64, i: i64) -> InterpResult<'tcx, u64> {
+ let (res, over) = self.overflowing_signed_offset(val, i);
+ if over { throw_ub!(PointerArithOverflow) } else { Ok(res) }
+ }
+}
+
+impl<T: HasDataLayout> PointerArithmetic for T {}
+
+/// This trait abstracts over the kind of provenance that is associated with a `Pointer`. It is
+/// mostly opaque; the `Machine` trait extends it with some more operations that also have access to
+/// some global state.
+/// We don't actually care about this `Debug` bound (we use `Provenance::fmt` to format the entire
+/// pointer), but `derive` adds some unnecessary bounds.
+pub trait Provenance: Copy + fmt::Debug {
+ /// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address.
+ /// If `true, ptr-to-int casts work by simply discarding the provenance.
+ /// If `false`, ptr-to-int casts are not supported. The offset *must* be relative in that case.
+ const OFFSET_IS_ADDR: bool;
+
+ /// We also use this trait to control whether to abort execution when a pointer is being partially overwritten
+ /// (this avoids a separate trait in `allocation.rs` just for this purpose).
+ const ERR_ON_PARTIAL_PTR_OVERWRITE: bool;
+
+ /// Determines how a pointer should be printed.
+ fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result
+ where
+ Self: Sized;
+
+ /// If `OFFSET_IS_ADDR == false`, provenance must always be able to
+ /// identify the allocation this ptr points to (i.e., this must return `Some`).
+ /// Otherwise this function is best-effort (but must agree with `Machine::ptr_get_alloc`).
+ /// (Identifying the offset in that allocation, however, is harder -- use `Memory::ptr_get_alloc` for that.)
+ fn get_alloc_id(self) -> Option<AllocId>;
+}
+
+impl Provenance for AllocId {
+ // With the `AllocId` as provenance, the `offset` is interpreted *relative to the allocation*,
+ // so ptr-to-int casts are not possible (since we do not know the global physical offset).
+ const OFFSET_IS_ADDR: bool = false;
+
+ // For now, do not allow this, so that we keep our options open.
+ const ERR_ON_PARTIAL_PTR_OVERWRITE: bool = true;
+
+ fn fmt(ptr: &Pointer<Self>, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Forward `alternate` flag to `alloc_id` printing.
+ if f.alternate() {
+ write!(f, "{:#?}", ptr.provenance)?;
+ } else {
+ write!(f, "{:?}", ptr.provenance)?;
+ }
+ // Print offset only if it is non-zero.
+ if ptr.offset.bytes() > 0 {
+ write!(f, "+{:#x}", ptr.offset.bytes())?;
+ }
+ Ok(())
+ }
+
+ fn get_alloc_id(self) -> Option<AllocId> {
+ Some(self)
+ }
+}
+
+/// Represents a pointer in the Miri engine.
+///
+/// Pointers are "tagged" with provenance information; typically the `AllocId` they belong to.
+#[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub struct Pointer<Prov = AllocId> {
+ pub(super) offset: Size, // kept private to avoid accidental misinterpretation (meaning depends on `Prov` type)
+ pub provenance: Prov,
+}
+
+static_assert_size!(Pointer, 16);
+// `Option<Prov>` pointers are also passed around quite a bit
+// (but not stored in permanent machine state).
+static_assert_size!(Pointer<Option<AllocId>>, 16);
+
+// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
+// all the Miri types.
+impl<Prov: Provenance> fmt::Debug for Pointer<Prov> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ Provenance::fmt(self, f)
+ }
+}
+
+impl<Prov: Provenance> fmt::Debug for Pointer<Option<Prov>> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.provenance {
+ Some(prov) => Provenance::fmt(&Pointer::new(prov, self.offset), f),
+ None => write!(f, "{:#x}[noalloc]", self.offset.bytes()),
+ }
+ }
+}
+
+impl<Prov: Provenance> fmt::Display for Pointer<Option<Prov>> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if self.provenance.is_none() && self.offset.bytes() == 0 {
+ write!(f, "null pointer")
+ } else {
+ fmt::Debug::fmt(self, f)
+ }
+ }
+}
+
+/// Produces a `Pointer` that points to the beginning of the `Allocation`.
+impl From<AllocId> for Pointer {
+ #[inline(always)]
+ fn from(alloc_id: AllocId) -> Self {
+ Pointer::new(alloc_id, Size::ZERO)
+ }
+}
+
+impl<Prov> From<Pointer<Prov>> for Pointer<Option<Prov>> {
+ #[inline(always)]
+ fn from(ptr: Pointer<Prov>) -> Self {
+ let (prov, offset) = ptr.into_parts();
+ Pointer::new(Some(prov), offset)
+ }
+}
+
+impl<Prov> Pointer<Option<Prov>> {
+ /// Convert this pointer that *might* have a provenance into a pointer that *definitely* has a
+ /// provenance, or an absolute address.
+ ///
+ /// This is rarely what you want; call `ptr_try_get_alloc_id` instead.
+ pub fn into_pointer_or_addr(self) -> Result<Pointer<Prov>, Size> {
+ match self.provenance {
+ Some(prov) => Ok(Pointer::new(prov, self.offset)),
+ None => Err(self.offset),
+ }
+ }
+
+ /// Returns the absolute address the pointer points to.
+ /// Only works if Prov::OFFSET_IS_ADDR is true!
+ pub fn addr(self) -> Size
+ where
+ Prov: Provenance,
+ {
+ assert!(Prov::OFFSET_IS_ADDR);
+ self.offset
+ }
+}
+
+impl<Prov> Pointer<Option<Prov>> {
+ #[inline(always)]
+ pub fn from_addr(addr: u64) -> Self {
+ Pointer { provenance: None, offset: Size::from_bytes(addr) }
+ }
+
+ #[inline(always)]
+ pub fn null() -> Self {
+ Pointer::from_addr(0)
+ }
+}
+
+impl<'tcx, Prov> Pointer<Prov> {
+ #[inline(always)]
+ pub fn new(provenance: Prov, offset: Size) -> Self {
+ Pointer { provenance, offset }
+ }
+
+ /// Obtain the constituents of this pointer. Not that the meaning of the offset depends on the type `Prov`!
+ /// This function must only be used in the implementation of `Machine::ptr_get_alloc`,
+ /// and when a `Pointer` is taken apart to be stored efficiently in an `Allocation`.
+ #[inline(always)]
+ pub fn into_parts(self) -> (Prov, Size) {
+ (self.provenance, self.offset)
+ }
+
+ pub fn map_provenance(self, f: impl FnOnce(Prov) -> Prov) -> Self {
+ Pointer { provenance: f(self.provenance), ..self }
+ }
+
+ #[inline]
+ pub fn offset(self, i: Size, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
+ Ok(Pointer {
+ offset: Size::from_bytes(cx.data_layout().offset(self.offset.bytes(), i.bytes())?),
+ ..self
+ })
+ }
+
+ #[inline]
+ pub fn overflowing_offset(self, i: Size, cx: &impl HasDataLayout) -> (Self, bool) {
+ let (res, over) = cx.data_layout().overflowing_offset(self.offset.bytes(), i.bytes());
+ let ptr = Pointer { offset: Size::from_bytes(res), ..self };
+ (ptr, over)
+ }
+
+ #[inline(always)]
+ pub fn wrapping_offset(self, i: Size, cx: &impl HasDataLayout) -> Self {
+ self.overflowing_offset(i, cx).0
+ }
+
+ #[inline]
+ pub fn signed_offset(self, i: i64, cx: &impl HasDataLayout) -> InterpResult<'tcx, Self> {
+ Ok(Pointer {
+ offset: Size::from_bytes(cx.data_layout().signed_offset(self.offset.bytes(), i)?),
+ ..self
+ })
+ }
+
+ #[inline]
+ pub fn overflowing_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> (Self, bool) {
+ let (res, over) = cx.data_layout().overflowing_signed_offset(self.offset.bytes(), i);
+ let ptr = Pointer { offset: Size::from_bytes(res), ..self };
+ (ptr, over)
+ }
+
+ #[inline(always)]
+ pub fn wrapping_signed_offset(self, i: i64, cx: &impl HasDataLayout) -> Self {
+ self.overflowing_signed_offset(i, cx).0
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs
new file mode 100644
index 000000000..786927e2d
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/queries.rs
@@ -0,0 +1,217 @@
+use super::{ErrorHandled, EvalToConstValueResult, EvalToValTreeResult, GlobalId};
+
+use crate::mir;
+use crate::ty::subst::InternalSubsts;
+use crate::ty::visit::TypeVisitable;
+use crate::ty::{self, query::TyCtxtAt, query::TyCtxtEnsure, TyCtxt};
+use rustc_hir::def_id::DefId;
+use rustc_span::{Span, DUMMY_SP};
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Evaluates a constant without providing any substitutions. This is useful to evaluate consts
+ /// that can't take any generic arguments like statics, const items or enum discriminants. If a
+ /// generic parameter is used within the constant `ErrorHandled::ToGeneric` will be returned.
+ #[instrument(skip(self), level = "debug")]
+ pub fn const_eval_poly(self, def_id: DefId) -> EvalToConstValueResult<'tcx> {
+ // In some situations def_id will have substitutions within scope, but they aren't allowed
+ // to be used. So we can't use `Instance::mono`, instead we feed unresolved substitutions
+ // into `const_eval` which will return `ErrorHandled::ToGeneric` if any of them are
+ // encountered.
+ let substs = InternalSubsts::identity_for_item(self, def_id);
+ let instance = ty::Instance::new(def_id, substs);
+ let cid = GlobalId { instance, promoted: None };
+ let param_env = self.param_env(def_id).with_reveal_all_normalized(self);
+ self.const_eval_global_id(param_env, cid, None)
+ }
+ /// Resolves and evaluates a constant.
+ ///
+ /// The constant can be located on a trait like `<A as B>::C`, in which case the given
+ /// substitutions and environment are used to resolve the constant. Alternatively if the
+ /// constant has generic parameters in scope the substitutions are used to evaluate the value of
+ /// the constant. For example in `fn foo<T>() { let _ = [0; bar::<T>()]; }` the repeat count
+ /// constant `bar::<T>()` requires a substitution for `T`, if the substitution for `T` is still
+ /// too generic for the constant to be evaluated then `Err(ErrorHandled::TooGeneric)` is
+ /// returned.
+ #[instrument(level = "debug", skip(self))]
+ pub fn const_eval_resolve(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ ct: ty::Unevaluated<'tcx>,
+ span: Option<Span>,
+ ) -> EvalToConstValueResult<'tcx> {
+ // Cannot resolve `Unevaluated` constants that contain inference
+ // variables. We reject those here since `resolve_opt_const_arg`
+ // would fail otherwise.
+ //
+ // When trying to evaluate constants containing inference variables,
+ // use `Infcx::const_eval_resolve` instead.
+ if ct.substs.has_infer_types_or_consts() {
+ bug!("did not expect inference variables here");
+ }
+
+ match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) {
+ Ok(Some(instance)) => {
+ let cid = GlobalId { instance, promoted: ct.promoted };
+ self.const_eval_global_id(param_env, cid, span)
+ }
+ Ok(None) => Err(ErrorHandled::TooGeneric),
+ Err(error_reported) => Err(ErrorHandled::Reported(error_reported)),
+ }
+ }
+
+ #[instrument(level = "debug", skip(self))]
+ pub fn const_eval_resolve_for_typeck(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ ct: ty::Unevaluated<'tcx>,
+ span: Option<Span>,
+ ) -> EvalToValTreeResult<'tcx> {
+ // Cannot resolve `Unevaluated` constants that contain inference
+ // variables. We reject those here since `resolve_opt_const_arg`
+ // would fail otherwise.
+ //
+ // When trying to evaluate constants containing inference variables,
+ // use `Infcx::const_eval_resolve` instead.
+ if ct.substs.has_infer_types_or_consts() {
+ bug!("did not expect inference variables here");
+ }
+
+ match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) {
+ Ok(Some(instance)) => {
+ let cid = GlobalId { instance, promoted: ct.promoted };
+ self.const_eval_global_id_for_typeck(param_env, cid, span)
+ }
+ Ok(None) => Err(ErrorHandled::TooGeneric),
+ Err(error_reported) => Err(ErrorHandled::Reported(error_reported)),
+ }
+ }
+
+ pub fn const_eval_instance(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ instance: ty::Instance<'tcx>,
+ span: Option<Span>,
+ ) -> EvalToConstValueResult<'tcx> {
+ self.const_eval_global_id(param_env, GlobalId { instance, promoted: None }, span)
+ }
+
+ /// Evaluate a constant to a `ConstValue`.
+ #[instrument(skip(self), level = "debug")]
+ pub fn const_eval_global_id(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ cid: GlobalId<'tcx>,
+ span: Option<Span>,
+ ) -> EvalToConstValueResult<'tcx> {
+ let param_env = param_env.with_const();
+ // Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should
+ // improve caching of queries.
+ let inputs = self.erase_regions(param_env.and(cid));
+ if let Some(span) = span {
+ self.at(span).eval_to_const_value_raw(inputs)
+ } else {
+ self.eval_to_const_value_raw(inputs)
+ }
+ }
+
+ /// Evaluate a constant to a type-level constant.
+ #[instrument(skip(self), level = "debug")]
+ pub fn const_eval_global_id_for_typeck(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ cid: GlobalId<'tcx>,
+ span: Option<Span>,
+ ) -> EvalToValTreeResult<'tcx> {
+ let param_env = param_env.with_const();
+ debug!(?param_env);
+ // Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should
+ // improve caching of queries.
+ let inputs = self.erase_regions(param_env.and(cid));
+ debug!(?inputs);
+ if let Some(span) = span {
+ self.at(span).eval_to_valtree(inputs)
+ } else {
+ self.eval_to_valtree(inputs)
+ }
+ }
+
+ /// Evaluate a static's initializer, returning the allocation of the initializer's memory.
+ #[inline(always)]
+ pub fn eval_static_initializer(
+ self,
+ def_id: DefId,
+ ) -> Result<mir::ConstAllocation<'tcx>, ErrorHandled> {
+ self.at(DUMMY_SP).eval_static_initializer(def_id)
+ }
+}
+
+impl<'tcx> TyCtxtAt<'tcx> {
+ /// Evaluate a static's initializer, returning the allocation of the initializer's memory.
+ pub fn eval_static_initializer(
+ self,
+ def_id: DefId,
+ ) -> Result<mir::ConstAllocation<'tcx>, ErrorHandled> {
+ trace!("eval_static_initializer: Need to compute {:?}", def_id);
+ assert!(self.is_static(def_id));
+ let instance = ty::Instance::mono(*self, def_id);
+ let gid = GlobalId { instance, promoted: None };
+ self.eval_to_allocation(gid, ty::ParamEnv::reveal_all())
+ }
+
+ /// Evaluate anything constant-like, returning the allocation of the final memory.
+ fn eval_to_allocation(
+ self,
+ gid: GlobalId<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Result<mir::ConstAllocation<'tcx>, ErrorHandled> {
+ let param_env = param_env.with_const();
+ trace!("eval_to_allocation: Need to compute {:?}", gid);
+ let raw_const = self.eval_to_allocation_raw(param_env.and(gid))?;
+ Ok(self.global_alloc(raw_const.alloc_id).unwrap_memory())
+ }
+}
+
+impl<'tcx> TyCtxtEnsure<'tcx> {
+ /// Evaluates a constant without providing any substitutions. This is useful to evaluate consts
+ /// that can't take any generic arguments like statics, const items or enum discriminants. If a
+ /// generic parameter is used within the constant `ErrorHandled::ToGeneric` will be returned.
+ #[instrument(skip(self), level = "debug")]
+ pub fn const_eval_poly(self, def_id: DefId) {
+ // In some situations def_id will have substitutions within scope, but they aren't allowed
+ // to be used. So we can't use `Instance::mono`, instead we feed unresolved substitutions
+ // into `const_eval` which will return `ErrorHandled::ToGeneric` if any of them are
+ // encountered.
+ let substs = InternalSubsts::identity_for_item(self.tcx, def_id);
+ let instance = ty::Instance::new(def_id, substs);
+ let cid = GlobalId { instance, promoted: None };
+ let param_env =
+ self.tcx.param_env(def_id).with_reveal_all_normalized(self.tcx).with_const();
+ // Const-eval shouldn't depend on lifetimes at all, so we can erase them, which should
+ // improve caching of queries.
+ let inputs = self.tcx.erase_regions(param_env.and(cid));
+ self.eval_to_const_value_raw(inputs)
+ }
+
+ /// Evaluate a static's initializer, returning the allocation of the initializer's memory.
+ pub fn eval_static_initializer(self, def_id: DefId) {
+ trace!("eval_static_initializer: Need to compute {:?}", def_id);
+ assert!(self.tcx.is_static(def_id));
+ let instance = ty::Instance::mono(self.tcx, def_id);
+ let gid = GlobalId { instance, promoted: None };
+ let param_env = ty::ParamEnv::reveal_all().with_const();
+ trace!("eval_to_allocation: Need to compute {:?}", gid);
+ self.eval_to_allocation_raw(param_env.and(gid))
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Destructure a mir constant ADT or array into its variant index and its field values.
+ /// Panics if the destructuring fails, use `try_destructure_mir_constant` for fallible version.
+ pub fn destructure_mir_constant(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ constant: mir::ConstantKind<'tcx>,
+ ) -> mir::DestructuredMirConstant<'tcx> {
+ self.try_destructure_mir_constant(param_env.and(constant)).unwrap()
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs
new file mode 100644
index 000000000..834c114ee
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/interpret/value.rs
@@ -0,0 +1,651 @@
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+
+use rustc_apfloat::{
+ ieee::{Double, Single},
+ Float,
+};
+use rustc_macros::HashStable;
+use rustc_target::abi::{HasDataLayout, Size};
+
+use crate::ty::{Lift, ParamEnv, ScalarInt, Ty, TyCtxt};
+
+use super::{
+ AllocId, AllocRange, ConstAllocation, InterpResult, Pointer, PointerArithmetic, Provenance,
+ ScalarSizeMismatch,
+};
+
+/// Represents the result of const evaluation via the `eval_to_allocation` query.
+#[derive(Copy, Clone, HashStable, TyEncodable, TyDecodable, Debug, Hash, Eq, PartialEq)]
+pub struct ConstAlloc<'tcx> {
+ // the value lives here, at offset 0, and that allocation definitely is an `AllocKind::Memory`
+ // (so you can use `AllocMap::unwrap_memory`).
+ pub alloc_id: AllocId,
+ pub ty: Ty<'tcx>,
+}
+
+/// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for
+/// array length computations, enum discriminants and the pattern matching logic.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub enum ConstValue<'tcx> {
+ /// Used only for types with `layout::abi::Scalar` ABI.
+ ///
+ /// Not using the enum `Value` to encode that this must not be `Uninit`.
+ Scalar(Scalar),
+
+ /// Only used for ZSTs.
+ ZeroSized,
+
+ /// Used only for `&[u8]` and `&str`
+ Slice { data: ConstAllocation<'tcx>, start: usize, end: usize },
+
+ /// A value not represented/representable by `Scalar` or `Slice`
+ ByRef {
+ /// The backing memory of the value, may contain more memory than needed for just the value
+ /// in order to share `ConstAllocation`s between values
+ alloc: ConstAllocation<'tcx>,
+ /// Offset into `alloc`
+ offset: Size,
+ },
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(ConstValue<'_>, 32);
+
+impl<'a, 'tcx> Lift<'tcx> for ConstValue<'a> {
+ type Lifted = ConstValue<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ConstValue<'tcx>> {
+ Some(match self {
+ ConstValue::Scalar(s) => ConstValue::Scalar(s),
+ ConstValue::ZeroSized => ConstValue::ZeroSized,
+ ConstValue::Slice { data, start, end } => {
+ ConstValue::Slice { data: tcx.lift(data)?, start, end }
+ }
+ ConstValue::ByRef { alloc, offset } => {
+ ConstValue::ByRef { alloc: tcx.lift(alloc)?, offset }
+ }
+ })
+ }
+}
+
+impl<'tcx> ConstValue<'tcx> {
+ #[inline]
+ pub fn try_to_scalar(&self) -> Option<Scalar<AllocId>> {
+ match *self {
+ ConstValue::ByRef { .. } | ConstValue::Slice { .. } | ConstValue::ZeroSized => None,
+ ConstValue::Scalar(val) => Some(val),
+ }
+ }
+
+ pub fn try_to_scalar_int(&self) -> Option<ScalarInt> {
+ Some(self.try_to_scalar()?.assert_int())
+ }
+
+ pub fn try_to_bits(&self, size: Size) -> Option<u128> {
+ self.try_to_scalar_int()?.to_bits(size).ok()
+ }
+
+ pub fn try_to_bool(&self) -> Option<bool> {
+ self.try_to_scalar_int()?.try_into().ok()
+ }
+
+ pub fn try_to_machine_usize(&self, tcx: TyCtxt<'tcx>) -> Option<u64> {
+ self.try_to_scalar_int()?.try_to_machine_usize(tcx).ok()
+ }
+
+ pub fn try_to_bits_for_ty(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Option<u128> {
+ let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
+ self.try_to_bits(size)
+ }
+
+ pub fn from_bool(b: bool) -> Self {
+ ConstValue::Scalar(Scalar::from_bool(b))
+ }
+
+ pub fn from_u64(i: u64) -> Self {
+ ConstValue::Scalar(Scalar::from_u64(i))
+ }
+
+ pub fn from_machine_usize(i: u64, cx: &impl HasDataLayout) -> Self {
+ ConstValue::Scalar(Scalar::from_machine_usize(i, cx))
+ }
+}
+
+/// A `Scalar` represents an immediate, primitive value existing outside of a
+/// `memory::Allocation`. It is in many ways like a small chunk of an `Allocation`, up to 16 bytes in
+/// size. Like a range of bytes in an `Allocation`, a `Scalar` can either represent the raw bytes
+/// of a simple value or a pointer into another `Allocation`
+///
+/// These variants would be private if there was a convenient way to achieve that in Rust.
+/// Do *not* match on a `Scalar`! Use the various `to_*` methods instead.
+#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub enum Scalar<Prov = AllocId> {
+ /// The raw bytes of a simple value.
+ Int(ScalarInt),
+
+ /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of
+ /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the
+ /// relocation and its associated offset together as a `Pointer` here.
+ ///
+ /// We also store the size of the pointer, such that a `Scalar` always knows how big it is.
+ /// The size is always the pointer size of the current target, but this is not information
+ /// that we always have readily available.
+ Ptr(Pointer<Prov>, u8),
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(Scalar, 24);
+
+// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
+// all the Miri types.
+impl<Prov: Provenance> fmt::Debug for Scalar<Prov> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Scalar::Ptr(ptr, _size) => write!(f, "{:?}", ptr),
+ Scalar::Int(int) => write!(f, "{:?}", int),
+ }
+ }
+}
+
+impl<Prov: Provenance> fmt::Display for Scalar<Prov> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Scalar::Ptr(ptr, _size) => write!(f, "pointer to {:?}", ptr),
+ Scalar::Int(int) => write!(f, "{}", int),
+ }
+ }
+}
+
+impl<Prov: Provenance> fmt::LowerHex for Scalar<Prov> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Scalar::Ptr(ptr, _size) => write!(f, "pointer to {:?}", ptr),
+ Scalar::Int(int) => write!(f, "{:#x}", int),
+ }
+ }
+}
+
+impl<Prov> From<Single> for Scalar<Prov> {
+ #[inline(always)]
+ fn from(f: Single) -> Self {
+ Scalar::from_f32(f)
+ }
+}
+
+impl<Prov> From<Double> for Scalar<Prov> {
+ #[inline(always)]
+ fn from(f: Double) -> Self {
+ Scalar::from_f64(f)
+ }
+}
+
+impl<Prov> From<ScalarInt> for Scalar<Prov> {
+ #[inline(always)]
+ fn from(ptr: ScalarInt) -> Self {
+ Scalar::Int(ptr)
+ }
+}
+
+impl<Prov> Scalar<Prov> {
+ #[inline(always)]
+ pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
+ Scalar::Ptr(ptr, u8::try_from(cx.pointer_size().bytes()).unwrap())
+ }
+
+ /// Create a Scalar from a pointer with an `Option<_>` provenance (where `None` represents a
+ /// plain integer / "invalid" pointer).
+ pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
+ match ptr.into_parts() {
+ (Some(prov), offset) => Scalar::from_pointer(Pointer::new(prov, offset), cx),
+ (None, offset) => {
+ Scalar::Int(ScalarInt::try_from_uint(offset.bytes(), cx.pointer_size()).unwrap())
+ }
+ }
+ }
+
+ #[inline]
+ pub fn null_ptr(cx: &impl HasDataLayout) -> Self {
+ Scalar::Int(ScalarInt::null(cx.pointer_size()))
+ }
+
+ #[inline]
+ pub fn from_bool(b: bool) -> Self {
+ Scalar::Int(b.into())
+ }
+
+ #[inline]
+ pub fn from_char(c: char) -> Self {
+ Scalar::Int(c.into())
+ }
+
+ #[inline]
+ pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
+ ScalarInt::try_from_uint(i, size).map(Scalar::Int)
+ }
+
+ #[inline]
+ pub fn from_uint(i: impl Into<u128>, size: Size) -> Self {
+ let i = i.into();
+ Self::try_from_uint(i, size)
+ .unwrap_or_else(|| bug!("Unsigned value {:#x} does not fit in {} bits", i, size.bits()))
+ }
+
+ #[inline]
+ pub fn from_u8(i: u8) -> Self {
+ Scalar::Int(i.into())
+ }
+
+ #[inline]
+ pub fn from_u16(i: u16) -> Self {
+ Scalar::Int(i.into())
+ }
+
+ #[inline]
+ pub fn from_u32(i: u32) -> Self {
+ Scalar::Int(i.into())
+ }
+
+ #[inline]
+ pub fn from_u64(i: u64) -> Self {
+ Scalar::Int(i.into())
+ }
+
+ #[inline]
+ pub fn from_machine_usize(i: u64, cx: &impl HasDataLayout) -> Self {
+ Self::from_uint(i, cx.data_layout().pointer_size)
+ }
+
+ #[inline]
+ pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
+ ScalarInt::try_from_int(i, size).map(Scalar::Int)
+ }
+
+ #[inline]
+ pub fn from_int(i: impl Into<i128>, size: Size) -> Self {
+ let i = i.into();
+ Self::try_from_int(i, size)
+ .unwrap_or_else(|| bug!("Signed value {:#x} does not fit in {} bits", i, size.bits()))
+ }
+
+ #[inline]
+ pub fn from_i32(i: i32) -> Self {
+ Self::from_int(i, Size::from_bits(32))
+ }
+
+ #[inline]
+ pub fn from_i64(i: i64) -> Self {
+ Self::from_int(i, Size::from_bits(64))
+ }
+
+ #[inline]
+ pub fn from_machine_isize(i: i64, cx: &impl HasDataLayout) -> Self {
+ Self::from_int(i, cx.data_layout().pointer_size)
+ }
+
+ #[inline]
+ pub fn from_f32(f: Single) -> Self {
+ Scalar::Int(f.into())
+ }
+
+ #[inline]
+ pub fn from_f64(f: Double) -> Self {
+ Scalar::Int(f.into())
+ }
+
+ /// This is almost certainly not the method you want! You should dispatch on the type
+ /// and use `to_{u8,u16,...}`/`scalar_to_ptr` to perform ptr-to-int / int-to-ptr casts as needed.
+ ///
+ /// This method only exists for the benefit of low-level operations that truly need to treat the
+ /// scalar in whatever form it is.
+ ///
+ /// This throws UB (instead of ICEing) on a size mismatch since size mismatches can arise in
+ /// Miri when someone declares a function that we shim (such as `malloc`) with a wrong type.
+ #[inline]
+ pub fn to_bits_or_ptr_internal(
+ self,
+ target_size: Size,
+ ) -> Result<Result<u128, Pointer<Prov>>, ScalarSizeMismatch> {
+ assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
+ Ok(match self {
+ Scalar::Int(int) => Ok(int.to_bits(target_size).map_err(|size| {
+ ScalarSizeMismatch { target_size: target_size.bytes(), data_size: size.bytes() }
+ })?),
+ Scalar::Ptr(ptr, sz) => {
+ if target_size.bytes() != u64::from(sz) {
+ return Err(ScalarSizeMismatch {
+ target_size: target_size.bytes(),
+ data_size: sz.into(),
+ });
+ }
+ Err(ptr)
+ }
+ })
+ }
+}
+
+impl<'tcx, Prov: Provenance> Scalar<Prov> {
+ pub fn to_pointer(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, Pointer<Option<Prov>>> {
+ match self
+ .to_bits_or_ptr_internal(cx.pointer_size())
+ .map_err(|s| err_ub!(ScalarSizeMismatch(s)))?
+ {
+ Err(ptr) => Ok(ptr.into()),
+ Ok(bits) => {
+ let addr = u64::try_from(bits).unwrap();
+ Ok(Pointer::from_addr(addr))
+ }
+ }
+ }
+
+ /// Fundamental scalar-to-int (cast) operation. Many convenience wrappers exist below, that you
+ /// likely want to use instead.
+ ///
+ /// Will perform ptr-to-int casts if needed and possible.
+ /// If that fails, we know the offset is relative, so we return an "erased" Scalar
+ /// (which is useful for error messages but not much else).
+ #[inline]
+ pub fn try_to_int(self) -> Result<ScalarInt, Scalar<AllocId>> {
+ match self {
+ Scalar::Int(int) => Ok(int),
+ Scalar::Ptr(ptr, sz) => {
+ if Prov::OFFSET_IS_ADDR {
+ Ok(ScalarInt::try_from_uint(ptr.offset.bytes(), Size::from_bytes(sz)).unwrap())
+ } else {
+ // We know `offset` is relative, since `OFFSET_IS_ADDR == false`.
+ let (prov, offset) = ptr.into_parts();
+ // Because `OFFSET_IS_ADDR == false`, this unwrap can never fail.
+ Err(Scalar::Ptr(Pointer::new(prov.get_alloc_id().unwrap(), offset), sz))
+ }
+ }
+ }
+ }
+
+ #[inline(always)]
+ pub fn assert_int(self) -> ScalarInt {
+ self.try_to_int().unwrap()
+ }
+
+ /// This throws UB (instead of ICEing) on a size mismatch since size mismatches can arise in
+ /// Miri when someone declares a function that we shim (such as `malloc`) with a wrong type.
+ #[inline]
+ pub fn to_bits(self, target_size: Size) -> InterpResult<'tcx, u128> {
+ assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
+ self.try_to_int().map_err(|_| err_unsup!(ReadPointerAsBytes))?.to_bits(target_size).map_err(
+ |size| {
+ err_ub!(ScalarSizeMismatch(ScalarSizeMismatch {
+ target_size: target_size.bytes(),
+ data_size: size.bytes(),
+ }))
+ .into()
+ },
+ )
+ }
+
+ #[inline(always)]
+ pub fn assert_bits(self, target_size: Size) -> u128 {
+ self.to_bits(target_size).unwrap()
+ }
+
+ pub fn to_bool(self) -> InterpResult<'tcx, bool> {
+ let val = self.to_u8()?;
+ match val {
+ 0 => Ok(false),
+ 1 => Ok(true),
+ _ => throw_ub!(InvalidBool(val)),
+ }
+ }
+
+ pub fn to_char(self) -> InterpResult<'tcx, char> {
+ let val = self.to_u32()?;
+ match std::char::from_u32(val) {
+ Some(c) => Ok(c),
+ None => throw_ub!(InvalidChar(val)),
+ }
+ }
+
+ /// Converts the scalar to produce an unsigned integer of the given size.
+ /// Fails if the scalar is a pointer.
+ #[inline]
+ pub fn to_uint(self, size: Size) -> InterpResult<'tcx, u128> {
+ self.to_bits(size)
+ }
+
+ /// Converts the scalar to produce a `u8`. Fails if the scalar is a pointer.
+ pub fn to_u8(self) -> InterpResult<'tcx, u8> {
+ self.to_uint(Size::from_bits(8)).map(|v| u8::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce a `u16`. Fails if the scalar is a pointer.
+ pub fn to_u16(self) -> InterpResult<'tcx, u16> {
+ self.to_uint(Size::from_bits(16)).map(|v| u16::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce a `u32`. Fails if the scalar is a pointer.
+ pub fn to_u32(self) -> InterpResult<'tcx, u32> {
+ self.to_uint(Size::from_bits(32)).map(|v| u32::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce a `u64`. Fails if the scalar is a pointer.
+ pub fn to_u64(self) -> InterpResult<'tcx, u64> {
+ self.to_uint(Size::from_bits(64)).map(|v| u64::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce a `u128`. Fails if the scalar is a pointer.
+ pub fn to_u128(self) -> InterpResult<'tcx, u128> {
+ self.to_uint(Size::from_bits(128))
+ }
+
+ /// Converts the scalar to produce a machine-pointer-sized unsigned integer.
+ /// Fails if the scalar is a pointer.
+ pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
+ let b = self.to_uint(cx.data_layout().pointer_size)?;
+ Ok(u64::try_from(b).unwrap())
+ }
+
+ /// Converts the scalar to produce a signed integer of the given size.
+ /// Fails if the scalar is a pointer.
+ #[inline]
+ pub fn to_int(self, size: Size) -> InterpResult<'tcx, i128> {
+ let b = self.to_bits(size)?;
+ Ok(size.sign_extend(b) as i128)
+ }
+
+ /// Converts the scalar to produce an `i8`. Fails if the scalar is a pointer.
+ pub fn to_i8(self) -> InterpResult<'tcx, i8> {
+ self.to_int(Size::from_bits(8)).map(|v| i8::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce an `i16`. Fails if the scalar is a pointer.
+ pub fn to_i16(self) -> InterpResult<'tcx, i16> {
+ self.to_int(Size::from_bits(16)).map(|v| i16::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce an `i32`. Fails if the scalar is a pointer.
+ pub fn to_i32(self) -> InterpResult<'tcx, i32> {
+ self.to_int(Size::from_bits(32)).map(|v| i32::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce an `i64`. Fails if the scalar is a pointer.
+ pub fn to_i64(self) -> InterpResult<'tcx, i64> {
+ self.to_int(Size::from_bits(64)).map(|v| i64::try_from(v).unwrap())
+ }
+
+ /// Converts the scalar to produce an `i128`. Fails if the scalar is a pointer.
+ pub fn to_i128(self) -> InterpResult<'tcx, i128> {
+ self.to_int(Size::from_bits(128))
+ }
+
+ /// Converts the scalar to produce a machine-pointer-sized signed integer.
+ /// Fails if the scalar is a pointer.
+ pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
+ let b = self.to_int(cx.data_layout().pointer_size)?;
+ Ok(i64::try_from(b).unwrap())
+ }
+
+ #[inline]
+ pub fn to_f32(self) -> InterpResult<'tcx, Single> {
+ // Going through `u32` to check size and truncation.
+ Ok(Single::from_bits(self.to_u32()?.into()))
+ }
+
+ #[inline]
+ pub fn to_f64(self) -> InterpResult<'tcx, Double> {
+ // Going through `u64` to check size and truncation.
+ Ok(Double::from_bits(self.to_u64()?.into()))
+ }
+}
+
+#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)]
+pub enum ScalarMaybeUninit<Prov = AllocId> {
+ Scalar(Scalar<Prov>),
+ Uninit,
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(ScalarMaybeUninit, 24);
+
+impl<Prov> From<Scalar<Prov>> for ScalarMaybeUninit<Prov> {
+ #[inline(always)]
+ fn from(s: Scalar<Prov>) -> Self {
+ ScalarMaybeUninit::Scalar(s)
+ }
+}
+
+// We want the `Debug` output to be readable as it is used by `derive(Debug)` for
+// all the Miri types.
+impl<Prov: Provenance> fmt::Debug for ScalarMaybeUninit<Prov> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ ScalarMaybeUninit::Uninit => write!(f, "<uninitialized>"),
+ ScalarMaybeUninit::Scalar(s) => write!(f, "{:?}", s),
+ }
+ }
+}
+
+impl<Prov: Provenance> fmt::LowerHex for ScalarMaybeUninit<Prov> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ ScalarMaybeUninit::Uninit => write!(f, "uninitialized bytes"),
+ ScalarMaybeUninit::Scalar(s) => write!(f, "{:x}", s),
+ }
+ }
+}
+
+impl<Prov> ScalarMaybeUninit<Prov> {
+ #[inline]
+ pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self {
+ ScalarMaybeUninit::Scalar(Scalar::from_pointer(ptr, cx))
+ }
+
+ #[inline]
+ pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self {
+ ScalarMaybeUninit::Scalar(Scalar::from_maybe_pointer(ptr, cx))
+ }
+
+ #[inline]
+ pub fn check_init<'tcx>(self) -> InterpResult<'tcx, Scalar<Prov>> {
+ match self {
+ ScalarMaybeUninit::Scalar(scalar) => Ok(scalar),
+ ScalarMaybeUninit::Uninit => throw_ub!(InvalidUninitBytes(None)),
+ }
+ }
+}
+
+impl<'tcx, Prov: Provenance> ScalarMaybeUninit<Prov> {
+ #[inline(always)]
+ pub fn to_pointer(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, Pointer<Option<Prov>>> {
+ self.check_init()?.to_pointer(cx)
+ }
+
+ #[inline(always)]
+ pub fn to_bool(self) -> InterpResult<'tcx, bool> {
+ self.check_init()?.to_bool()
+ }
+
+ #[inline(always)]
+ pub fn to_char(self) -> InterpResult<'tcx, char> {
+ self.check_init()?.to_char()
+ }
+
+ #[inline(always)]
+ pub fn to_f32(self) -> InterpResult<'tcx, Single> {
+ self.check_init()?.to_f32()
+ }
+
+ #[inline(always)]
+ pub fn to_f64(self) -> InterpResult<'tcx, Double> {
+ self.check_init()?.to_f64()
+ }
+
+ #[inline(always)]
+ pub fn to_u8(self) -> InterpResult<'tcx, u8> {
+ self.check_init()?.to_u8()
+ }
+
+ #[inline(always)]
+ pub fn to_u16(self) -> InterpResult<'tcx, u16> {
+ self.check_init()?.to_u16()
+ }
+
+ #[inline(always)]
+ pub fn to_u32(self) -> InterpResult<'tcx, u32> {
+ self.check_init()?.to_u32()
+ }
+
+ #[inline(always)]
+ pub fn to_u64(self) -> InterpResult<'tcx, u64> {
+ self.check_init()?.to_u64()
+ }
+
+ #[inline(always)]
+ pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> {
+ self.check_init()?.to_machine_usize(cx)
+ }
+
+ #[inline(always)]
+ pub fn to_i8(self) -> InterpResult<'tcx, i8> {
+ self.check_init()?.to_i8()
+ }
+
+ #[inline(always)]
+ pub fn to_i16(self) -> InterpResult<'tcx, i16> {
+ self.check_init()?.to_i16()
+ }
+
+ #[inline(always)]
+ pub fn to_i32(self) -> InterpResult<'tcx, i32> {
+ self.check_init()?.to_i32()
+ }
+
+ #[inline(always)]
+ pub fn to_i64(self) -> InterpResult<'tcx, i64> {
+ self.check_init()?.to_i64()
+ }
+
+ #[inline(always)]
+ pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> {
+ self.check_init()?.to_machine_isize(cx)
+ }
+}
+
+/// Gets the bytes of a constant slice value.
+pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) -> &'tcx [u8] {
+ if let ConstValue::Slice { data, start, end } = val {
+ let len = end - start;
+ data.inner()
+ .get_bytes(
+ cx,
+ AllocRange { start: Size::from_bytes(start), size: Size::from_bytes(len) },
+ )
+ .unwrap_or_else(|err| bug!("const slice is invalid: {:?}", err))
+ } else {
+ bug!("expected const slice, but found another const value");
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
new file mode 100644
index 000000000..7ab71f900
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -0,0 +1,2900 @@
+//! MIR datatypes and passes. See the [rustc dev guide] for more info.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html
+
+use crate::mir::interpret::{
+ AllocRange, ConstAllocation, ConstValue, GlobalAlloc, LitToConstInput, Scalar,
+};
+use crate::mir::visit::MirVisitable;
+use crate::ty::codec::{TyDecoder, TyEncoder};
+use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable};
+use crate::ty::print::{FmtPrinter, Printer};
+use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef};
+use crate::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use crate::ty::{self, List, Ty, TyCtxt};
+use crate::ty::{AdtDef, InstanceDef, ScalarInt, UserTypeAnnotationIndex};
+
+use rustc_data_structures::captures::Captures;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def::{CtorKind, Namespace};
+use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID};
+use rustc_hir::{self, GeneratorKind};
+use rustc_hir::{self as hir, HirId};
+use rustc_session::Session;
+use rustc_target::abi::{Size, VariantIdx};
+
+use polonius_engine::Atom;
+pub use rustc_ast::Mutability;
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::graph::dominators::Dominators;
+use rustc_index::bit_set::BitMatrix;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_serialize::{Decodable, Encodable};
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+
+use either::Either;
+
+use std::borrow::Cow;
+use std::convert::TryInto;
+use std::fmt::{self, Debug, Display, Formatter, Write};
+use std::ops::{ControlFlow, Index, IndexMut};
+use std::{iter, mem};
+
+pub use self::query::*;
+pub use basic_blocks::BasicBlocks;
+
+mod basic_blocks;
+pub mod coverage;
+mod generic_graph;
+pub mod generic_graphviz;
+mod graph_cyclic_cache;
+pub mod graphviz;
+pub mod interpret;
+pub mod mono;
+pub mod patch;
+mod predecessors;
+pub mod pretty;
+mod query;
+pub mod spanview;
+mod syntax;
+pub use syntax::*;
+mod switch_sources;
+pub mod tcx;
+pub mod terminator;
+pub use terminator::*;
+
+pub mod traversal;
+mod type_foldable;
+mod type_visitable;
+pub mod visit;
+
+pub use self::generic_graph::graphviz_safe_def_name;
+pub use self::graphviz::write_mir_graphviz;
+pub use self::pretty::{
+ create_dump_file, display_allocation, dump_enabled, dump_mir, write_mir_pretty, PassWhere,
+};
+
+/// Types for locals
+pub type LocalDecls<'tcx> = IndexVec<Local, LocalDecl<'tcx>>;
+
+pub trait HasLocalDecls<'tcx> {
+ fn local_decls(&self) -> &LocalDecls<'tcx>;
+}
+
+impl<'tcx> HasLocalDecls<'tcx> for LocalDecls<'tcx> {
+ #[inline]
+ fn local_decls(&self) -> &LocalDecls<'tcx> {
+ self
+ }
+}
+
+impl<'tcx> HasLocalDecls<'tcx> for Body<'tcx> {
+ #[inline]
+ fn local_decls(&self) -> &LocalDecls<'tcx> {
+ &self.local_decls
+ }
+}
+
+/// A streamlined trait that you can implement to create a pass; the
+/// pass will be named after the type, and it will consist of a main
+/// loop that goes over each available MIR and applies `run_pass`.
+pub trait MirPass<'tcx> {
+ fn name(&self) -> Cow<'_, str> {
+ let name = std::any::type_name::<Self>();
+ if let Some(tail) = name.rfind(':') {
+ Cow::from(&name[tail + 1..])
+ } else {
+ Cow::from(name)
+ }
+ }
+
+ /// Returns `true` if this pass is enabled with the current combination of compiler flags.
+ fn is_enabled(&self, _sess: &Session) -> bool {
+ true
+ }
+
+ fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>);
+
+ /// If this pass causes the MIR to enter a new phase, return that phase.
+ fn phase_change(&self) -> Option<MirPhase> {
+ None
+ }
+
+ fn is_mir_dump_enabled(&self) -> bool {
+ true
+ }
+}
+
+impl MirPhase {
+ /// Gets the index of the current MirPhase within the set of all `MirPhase`s.
+ pub fn phase_index(&self) -> usize {
+ *self as usize
+ }
+}
+
+/// Where a specific `mir::Body` comes from.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+#[derive(HashStable, TyEncodable, TyDecodable, TypeFoldable, TypeVisitable)]
+pub struct MirSource<'tcx> {
+ pub instance: InstanceDef<'tcx>,
+
+ /// If `Some`, this is a promoted rvalue within the parent function.
+ pub promoted: Option<Promoted>,
+}
+
+impl<'tcx> MirSource<'tcx> {
+ pub fn item(def_id: DefId) -> Self {
+ MirSource {
+ instance: InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)),
+ promoted: None,
+ }
+ }
+
+ pub fn from_instance(instance: InstanceDef<'tcx>) -> Self {
+ MirSource { instance, promoted: None }
+ }
+
+ pub fn with_opt_param(self) -> ty::WithOptConstParam<DefId> {
+ self.instance.with_opt_param()
+ }
+
+ #[inline]
+ pub fn def_id(&self) -> DefId {
+ self.instance.def_id()
+ }
+}
+
+#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable, TypeVisitable)]
+pub struct GeneratorInfo<'tcx> {
+ /// The yield type of the function, if it is a generator.
+ pub yield_ty: Option<Ty<'tcx>>,
+
+ /// Generator drop glue.
+ pub generator_drop: Option<Body<'tcx>>,
+
+ /// The layout of a generator. Produced by the state transformation.
+ pub generator_layout: Option<GeneratorLayout<'tcx>>,
+
+ /// If this is a generator then record the type of source expression that caused this generator
+ /// to be created.
+ pub generator_kind: GeneratorKind,
+}
+
+/// The lowered representation of a single function.
+#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable, TypeFoldable, TypeVisitable)]
+pub struct Body<'tcx> {
+ /// A list of basic blocks. References to basic block use a newtyped index type [`BasicBlock`]
+ /// that indexes into this vector.
+ pub basic_blocks: BasicBlocks<'tcx>,
+
+ /// Records how far through the "desugaring and optimization" process this particular
+ /// MIR has traversed. This is particularly useful when inlining, since in that context
+ /// we instantiate the promoted constants and add them to our promoted vector -- but those
+ /// promoted items have already been optimized, whereas ours have not. This field allows
+ /// us to see the difference and forego optimization on the inlined promoted items.
+ pub phase: MirPhase,
+
+ pub source: MirSource<'tcx>,
+
+ /// A list of source scopes; these are referenced by statements
+ /// and used for debuginfo. Indexed by a `SourceScope`.
+ pub source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
+
+ pub generator: Option<Box<GeneratorInfo<'tcx>>>,
+
+ /// Declarations of locals.
+ ///
+ /// The first local is the return value pointer, followed by `arg_count`
+ /// locals for the function arguments, followed by any user-declared
+ /// variables and temporaries.
+ pub local_decls: LocalDecls<'tcx>,
+
+ /// User type annotations.
+ pub user_type_annotations: ty::CanonicalUserTypeAnnotations<'tcx>,
+
+ /// The number of arguments this function takes.
+ ///
+ /// Starting at local 1, `arg_count` locals will be provided by the caller
+ /// and can be assumed to be initialized.
+ ///
+ /// If this MIR was built for a constant, this will be 0.
+ pub arg_count: usize,
+
+ /// Mark an argument local (which must be a tuple) as getting passed as
+ /// its individual components at the LLVM level.
+ ///
+ /// This is used for the "rust-call" ABI.
+ pub spread_arg: Option<Local>,
+
+ /// Debug information pertaining to user variables, including captures.
+ pub var_debug_info: Vec<VarDebugInfo<'tcx>>,
+
+ /// A span representing this MIR, for error reporting.
+ pub span: Span,
+
+ /// Constants that are required to evaluate successfully for this MIR to be well-formed.
+ /// We hold in this field all the constants we are not able to evaluate yet.
+ pub required_consts: Vec<Constant<'tcx>>,
+
+ /// Does this body use generic parameters. This is used for the `ConstEvaluatable` check.
+ ///
+ /// Note that this does not actually mean that this body is not computable right now.
+ /// The repeat count in the following example is polymorphic, but can still be evaluated
+ /// without knowing anything about the type parameter `T`.
+ ///
+ /// ```rust
+ /// fn test<T>() {
+ /// let _ = [0; std::mem::size_of::<*mut T>()];
+ /// }
+ /// ```
+ ///
+ /// **WARNING**: Do not change this flags after the MIR was originally created, even if an optimization
+ /// removed the last mention of all generic params. We do not want to rely on optimizations and
+ /// potentially allow things like `[u8; std::mem::size_of::<T>() * 0]` due to this.
+ pub is_polymorphic: bool,
+
+ pub tainted_by_errors: Option<ErrorGuaranteed>,
+}
+
+impl<'tcx> Body<'tcx> {
+ pub fn new(
+ source: MirSource<'tcx>,
+ basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ source_scopes: IndexVec<SourceScope, SourceScopeData<'tcx>>,
+ local_decls: LocalDecls<'tcx>,
+ user_type_annotations: ty::CanonicalUserTypeAnnotations<'tcx>,
+ arg_count: usize,
+ var_debug_info: Vec<VarDebugInfo<'tcx>>,
+ span: Span,
+ generator_kind: Option<GeneratorKind>,
+ tainted_by_errors: Option<ErrorGuaranteed>,
+ ) -> Self {
+ // We need `arg_count` locals, and one for the return place.
+ assert!(
+ local_decls.len() > arg_count,
+ "expected at least {} locals, got {}",
+ arg_count + 1,
+ local_decls.len()
+ );
+
+ let mut body = Body {
+ phase: MirPhase::Built,
+ source,
+ basic_blocks: BasicBlocks::new(basic_blocks),
+ source_scopes,
+ generator: generator_kind.map(|generator_kind| {
+ Box::new(GeneratorInfo {
+ yield_ty: None,
+ generator_drop: None,
+ generator_layout: None,
+ generator_kind,
+ })
+ }),
+ local_decls,
+ user_type_annotations,
+ arg_count,
+ spread_arg: None,
+ var_debug_info,
+ span,
+ required_consts: Vec::new(),
+ is_polymorphic: false,
+ tainted_by_errors,
+ };
+ body.is_polymorphic = body.has_param_types_or_consts();
+ body
+ }
+
+ /// Returns a partially initialized MIR body containing only a list of basic blocks.
+ ///
+ /// The returned MIR contains no `LocalDecl`s (even for the return place) or source scopes. It
+ /// is only useful for testing but cannot be `#[cfg(test)]` because it is used in a different
+ /// crate.
+ pub fn new_cfg_only(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self {
+ let mut body = Body {
+ phase: MirPhase::Built,
+ source: MirSource::item(CRATE_DEF_ID.to_def_id()),
+ basic_blocks: BasicBlocks::new(basic_blocks),
+ source_scopes: IndexVec::new(),
+ generator: None,
+ local_decls: IndexVec::new(),
+ user_type_annotations: IndexVec::new(),
+ arg_count: 0,
+ spread_arg: None,
+ span: DUMMY_SP,
+ required_consts: Vec::new(),
+ var_debug_info: Vec::new(),
+ is_polymorphic: false,
+ tainted_by_errors: None,
+ };
+ body.is_polymorphic = body.has_param_types_or_consts();
+ body
+ }
+
+ #[inline]
+ pub fn basic_blocks(&self) -> &IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+ &self.basic_blocks
+ }
+
+ #[inline]
+ pub fn basic_blocks_mut(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'tcx>> {
+ self.basic_blocks.as_mut()
+ }
+
+ #[inline]
+ pub fn local_kind(&self, local: Local) -> LocalKind {
+ let index = local.as_usize();
+ if index == 0 {
+ debug_assert!(
+ self.local_decls[local].mutability == Mutability::Mut,
+ "return place should be mutable"
+ );
+
+ LocalKind::ReturnPointer
+ } else if index < self.arg_count + 1 {
+ LocalKind::Arg
+ } else if self.local_decls[local].is_user_variable() {
+ LocalKind::Var
+ } else {
+ LocalKind::Temp
+ }
+ }
+
+ /// Returns an iterator over all user-declared mutable locals.
+ #[inline]
+ pub fn mut_vars_iter<'a>(&'a self) -> impl Iterator<Item = Local> + Captures<'tcx> + 'a {
+ (self.arg_count + 1..self.local_decls.len()).filter_map(move |index| {
+ let local = Local::new(index);
+ let decl = &self.local_decls[local];
+ if decl.is_user_variable() && decl.mutability == Mutability::Mut {
+ Some(local)
+ } else {
+ None
+ }
+ })
+ }
+
+ /// Returns an iterator over all user-declared mutable arguments and locals.
+ #[inline]
+ pub fn mut_vars_and_args_iter<'a>(
+ &'a self,
+ ) -> impl Iterator<Item = Local> + Captures<'tcx> + 'a {
+ (1..self.local_decls.len()).filter_map(move |index| {
+ let local = Local::new(index);
+ let decl = &self.local_decls[local];
+ if (decl.is_user_variable() || index < self.arg_count + 1)
+ && decl.mutability == Mutability::Mut
+ {
+ Some(local)
+ } else {
+ None
+ }
+ })
+ }
+
+ /// Returns an iterator over all function arguments.
+ #[inline]
+ pub fn args_iter(&self) -> impl Iterator<Item = Local> + ExactSizeIterator {
+ (1..self.arg_count + 1).map(Local::new)
+ }
+
+ /// Returns an iterator over all user-defined variables and compiler-generated temporaries (all
+ /// locals that are neither arguments nor the return place).
+ #[inline]
+ pub fn vars_and_temps_iter(
+ &self,
+ ) -> impl DoubleEndedIterator<Item = Local> + ExactSizeIterator {
+ (self.arg_count + 1..self.local_decls.len()).map(Local::new)
+ }
+
+ #[inline]
+ pub fn drain_vars_and_temps<'a>(&'a mut self) -> impl Iterator<Item = LocalDecl<'tcx>> + 'a {
+ self.local_decls.drain(self.arg_count + 1..)
+ }
+
+ /// Returns the source info associated with `location`.
+ pub fn source_info(&self, location: Location) -> &SourceInfo {
+ let block = &self[location.block];
+ let stmts = &block.statements;
+ let idx = location.statement_index;
+ if idx < stmts.len() {
+ &stmts[idx].source_info
+ } else {
+ assert_eq!(idx, stmts.len());
+ &block.terminator().source_info
+ }
+ }
+
+ /// Returns the return type; it always return first element from `local_decls` array.
+ #[inline]
+ pub fn return_ty(&self) -> Ty<'tcx> {
+ self.local_decls[RETURN_PLACE].ty
+ }
+
+ /// Returns the return type; it always return first element from `local_decls` array.
+ #[inline]
+ pub fn bound_return_ty(&self) -> ty::EarlyBinder<Ty<'tcx>> {
+ ty::EarlyBinder(self.local_decls[RETURN_PLACE].ty)
+ }
+
+ /// Gets the location of the terminator for the given block.
+ #[inline]
+ pub fn terminator_loc(&self, bb: BasicBlock) -> Location {
+ Location { block: bb, statement_index: self[bb].statements.len() }
+ }
+
+ pub fn stmt_at(&self, location: Location) -> Either<&Statement<'tcx>, &Terminator<'tcx>> {
+ let Location { block, statement_index } = location;
+ let block_data = &self.basic_blocks[block];
+ block_data
+ .statements
+ .get(statement_index)
+ .map(Either::Left)
+ .unwrap_or_else(|| Either::Right(block_data.terminator()))
+ }
+
+ #[inline]
+ pub fn yield_ty(&self) -> Option<Ty<'tcx>> {
+ self.generator.as_ref().and_then(|generator| generator.yield_ty)
+ }
+
+ #[inline]
+ pub fn generator_layout(&self) -> Option<&GeneratorLayout<'tcx>> {
+ self.generator.as_ref().and_then(|generator| generator.generator_layout.as_ref())
+ }
+
+ #[inline]
+ pub fn generator_drop(&self) -> Option<&Body<'tcx>> {
+ self.generator.as_ref().and_then(|generator| generator.generator_drop.as_ref())
+ }
+
+ #[inline]
+ pub fn generator_kind(&self) -> Option<GeneratorKind> {
+ self.generator.as_ref().map(|generator| generator.generator_kind)
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum Safety {
+ Safe,
+ /// Unsafe because of compiler-generated unsafe code, like `await` desugaring
+ BuiltinUnsafe,
+ /// Unsafe because of an unsafe fn
+ FnUnsafe,
+ /// Unsafe because of an `unsafe` block
+ ExplicitUnsafe(hir::HirId),
+}
+
+impl<'tcx> Index<BasicBlock> for Body<'tcx> {
+ type Output = BasicBlockData<'tcx>;
+
+ #[inline]
+ fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> {
+ &self.basic_blocks()[index]
+ }
+}
+
+impl<'tcx> IndexMut<BasicBlock> for Body<'tcx> {
+ #[inline]
+ fn index_mut(&mut self, index: BasicBlock) -> &mut BasicBlockData<'tcx> {
+ &mut self.basic_blocks.as_mut()[index]
+ }
+}
+
+#[derive(Copy, Clone, Debug, HashStable, TypeFoldable, TypeVisitable)]
+pub enum ClearCrossCrate<T> {
+ Clear,
+ Set(T),
+}
+
+impl<T> ClearCrossCrate<T> {
+ pub fn as_ref(&self) -> ClearCrossCrate<&T> {
+ match self {
+ ClearCrossCrate::Clear => ClearCrossCrate::Clear,
+ ClearCrossCrate::Set(v) => ClearCrossCrate::Set(v),
+ }
+ }
+
+ pub fn assert_crate_local(self) -> T {
+ match self {
+ ClearCrossCrate::Clear => bug!("unwrapping cross-crate data"),
+ ClearCrossCrate::Set(v) => v,
+ }
+ }
+}
+
+const TAG_CLEAR_CROSS_CRATE_CLEAR: u8 = 0;
+const TAG_CLEAR_CROSS_CRATE_SET: u8 = 1;
+
+impl<E: TyEncoder, T: Encodable<E>> Encodable<E> for ClearCrossCrate<T> {
+ #[inline]
+ fn encode(&self, e: &mut E) {
+ if E::CLEAR_CROSS_CRATE {
+ return;
+ }
+
+ match *self {
+ ClearCrossCrate::Clear => TAG_CLEAR_CROSS_CRATE_CLEAR.encode(e),
+ ClearCrossCrate::Set(ref val) => {
+ TAG_CLEAR_CROSS_CRATE_SET.encode(e);
+ val.encode(e);
+ }
+ }
+ }
+}
+impl<D: TyDecoder, T: Decodable<D>> Decodable<D> for ClearCrossCrate<T> {
+ #[inline]
+ fn decode(d: &mut D) -> ClearCrossCrate<T> {
+ if D::CLEAR_CROSS_CRATE {
+ return ClearCrossCrate::Clear;
+ }
+
+ let discr = u8::decode(d);
+
+ match discr {
+ TAG_CLEAR_CROSS_CRATE_CLEAR => ClearCrossCrate::Clear,
+ TAG_CLEAR_CROSS_CRATE_SET => {
+ let val = T::decode(d);
+ ClearCrossCrate::Set(val)
+ }
+ tag => panic!("Invalid tag for ClearCrossCrate: {:?}", tag),
+ }
+ }
+}
+
+/// Grouped information about the source code origin of a MIR entity.
+/// Intended to be inspected by diagnostics and debuginfo.
+/// Most passes can work with it as a whole, within a single function.
+// The unofficial Cranelift backend, at least as of #65828, needs `SourceInfo` to implement `Eq` and
+// `Hash`. Please ping @bjorn3 if removing them.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub struct SourceInfo {
+ /// The source span for the AST pertaining to this MIR entity.
+ pub span: Span,
+
+ /// The source scope, keeping track of which bindings can be
+ /// seen by debuginfo, active lint levels, `unsafe {...}`, etc.
+ pub scope: SourceScope,
+}
+
+impl SourceInfo {
+ #[inline]
+ pub fn outermost(span: Span) -> Self {
+ SourceInfo { span, scope: OUTERMOST_SOURCE_SCOPE }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Variables and temps
+
+rustc_index::newtype_index! {
+ pub struct Local {
+ derive [HashStable]
+ DEBUG_FORMAT = "_{}",
+ const RETURN_PLACE = 0,
+ }
+}
+
+impl Atom for Local {
+ fn index(self) -> usize {
+ Idx::index(self)
+ }
+}
+
+/// Classifies locals into categories. See `Body::local_kind`.
+#[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable)]
+pub enum LocalKind {
+ /// User-declared variable binding.
+ Var,
+ /// Compiler-introduced temporary.
+ Temp,
+ /// Function argument.
+ Arg,
+ /// Location of function's return value.
+ ReturnPointer,
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct VarBindingForm<'tcx> {
+ /// Is variable bound via `x`, `mut x`, `ref x`, or `ref mut x`?
+ pub binding_mode: ty::BindingMode,
+ /// If an explicit type was provided for this variable binding,
+ /// this holds the source Span of that type.
+ ///
+ /// NOTE: if you want to change this to a `HirId`, be wary that
+ /// doing so breaks incremental compilation (as of this writing),
+ /// while a `Span` does not cause our tests to fail.
+ pub opt_ty_info: Option<Span>,
+ /// Place of the RHS of the =, or the subject of the `match` where this
+ /// variable is initialized. None in the case of `let PATTERN;`.
+ /// Some((None, ..)) in the case of and `let [mut] x = ...` because
+ /// (a) the right-hand side isn't evaluated as a place expression.
+ /// (b) it gives a way to separate this case from the remaining cases
+ /// for diagnostics.
+ pub opt_match_place: Option<(Option<Place<'tcx>>, Span)>,
+ /// The span of the pattern in which this variable was bound.
+ pub pat_span: Span,
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable)]
+pub enum BindingForm<'tcx> {
+ /// This is a binding for a non-`self` binding, or a `self` that has an explicit type.
+ Var(VarBindingForm<'tcx>),
+ /// Binding for a `self`/`&self`/`&mut self` binding where the type is implicit.
+ ImplicitSelf(ImplicitSelfKind),
+ /// Reference used in a guard expression to ensure immutability.
+ RefForGuard,
+}
+
+/// Represents what type of implicit self a function has, if any.
+#[derive(Clone, Copy, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum ImplicitSelfKind {
+ /// Represents a `fn x(self);`.
+ Imm,
+ /// Represents a `fn x(mut self);`.
+ Mut,
+ /// Represents a `fn x(&self);`.
+ ImmRef,
+ /// Represents a `fn x(&mut self);`.
+ MutRef,
+ /// Represents when a function does not have a self argument or
+ /// when a function has a `self: X` argument.
+ None,
+}
+
+TrivialTypeTraversalAndLiftImpls! { BindingForm<'tcx>, }
+
+mod binding_form_impl {
+ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+ use rustc_query_system::ich::StableHashingContext;
+
+ impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for super::BindingForm<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ use super::BindingForm::*;
+ std::mem::discriminant(self).hash_stable(hcx, hasher);
+
+ match self {
+ Var(binding) => binding.hash_stable(hcx, hasher),
+ ImplicitSelf(kind) => kind.hash_stable(hcx, hasher),
+ RefForGuard => (),
+ }
+ }
+ }
+}
+
+/// `BlockTailInfo` is attached to the `LocalDecl` for temporaries
+/// created during evaluation of expressions in a block tail
+/// expression; that is, a block like `{ STMT_1; STMT_2; EXPR }`.
+///
+/// It is used to improve diagnostics when such temporaries are
+/// involved in borrow_check errors, e.g., explanations of where the
+/// temporaries come from, when their destructors are run, and/or how
+/// one might revise the code to satisfy the borrow checker's rules.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct BlockTailInfo {
+ /// If `true`, then the value resulting from evaluating this tail
+ /// expression is ignored by the block's expression context.
+ ///
+ /// Examples include `{ ...; tail };` and `let _ = { ...; tail };`
+ /// but not e.g., `let _x = { ...; tail };`
+ pub tail_result_is_ignored: bool,
+
+ /// `Span` of the tail expression.
+ pub span: Span,
+}
+
+/// A MIR local.
+///
+/// This can be a binding declared by the user, a temporary inserted by the compiler, a function
+/// argument, or the return place.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct LocalDecl<'tcx> {
+ /// Whether this is a mutable binding (i.e., `let x` or `let mut x`).
+ ///
+ /// Temporaries and the return place are always mutable.
+ pub mutability: Mutability,
+
+ // FIXME(matthewjasper) Don't store in this in `Body`
+ pub local_info: Option<Box<LocalInfo<'tcx>>>,
+
+ /// `true` if this is an internal local.
+ ///
+ /// These locals are not based on types in the source code and are only used
+ /// for a few desugarings at the moment.
+ ///
+ /// The generator transformation will sanity check the locals which are live
+ /// across a suspension point against the type components of the generator
+ /// which type checking knows are live across a suspension point. We need to
+ /// flag drop flags to avoid triggering this check as they are introduced
+ /// outside of type inference.
+ ///
+ /// This should be sound because the drop flags are fully algebraic, and
+ /// therefore don't affect the auto-trait or outlives properties of the
+ /// generator.
+ pub internal: bool,
+
+ /// If this local is a temporary and `is_block_tail` is `Some`,
+ /// then it is a temporary created for evaluation of some
+ /// subexpression of some block's tail expression (with no
+ /// intervening statement context).
+ // FIXME(matthewjasper) Don't store in this in `Body`
+ pub is_block_tail: Option<BlockTailInfo>,
+
+ /// The type of this local.
+ pub ty: Ty<'tcx>,
+
+ /// If the user manually ascribed a type to this variable,
+ /// e.g., via `let x: T`, then we carry that type here. The MIR
+ /// borrow checker needs this information since it can affect
+ /// region inference.
+ // FIXME(matthewjasper) Don't store in this in `Body`
+ pub user_ty: Option<Box<UserTypeProjections>>,
+
+ /// The *syntactic* (i.e., not visibility) source scope the local is defined
+ /// in. If the local was defined in a let-statement, this
+ /// is *within* the let-statement, rather than outside
+ /// of it.
+ ///
+ /// This is needed because the visibility source scope of locals within
+ /// a let-statement is weird.
+ ///
+ /// The reason is that we want the local to be *within* the let-statement
+ /// for lint purposes, but we want the local to be *after* the let-statement
+ /// for names-in-scope purposes.
+ ///
+ /// That's it, if we have a let-statement like the one in this
+ /// function:
+ ///
+ /// ```
+ /// fn foo(x: &str) {
+ /// #[allow(unused_mut)]
+ /// let mut x: u32 = { // <- one unused mut
+ /// let mut y: u32 = x.parse().unwrap();
+ /// y + 2
+ /// };
+ /// drop(x);
+ /// }
+ /// ```
+ ///
+ /// Then, from a lint point of view, the declaration of `x: u32`
+ /// (and `y: u32`) are within the `#[allow(unused_mut)]` scope - the
+ /// lint scopes are the same as the AST/HIR nesting.
+ ///
+ /// However, from a name lookup point of view, the scopes look more like
+ /// as if the let-statements were `match` expressions:
+ ///
+ /// ```
+ /// fn foo(x: &str) {
+ /// match {
+ /// match x.parse::<u32>().unwrap() {
+ /// y => y + 2
+ /// }
+ /// } {
+ /// x => drop(x)
+ /// };
+ /// }
+ /// ```
+ ///
+ /// We care about the name-lookup scopes for debuginfo - if the
+ /// debuginfo instruction pointer is at the call to `x.parse()`, we
+ /// want `x` to refer to `x: &str`, but if it is at the call to
+ /// `drop(x)`, we want it to refer to `x: u32`.
+ ///
+ /// To allow both uses to work, we need to have more than a single scope
+ /// for a local. We have the `source_info.scope` represent the "syntactic"
+ /// lint scope (with a variable being under its let block) while the
+ /// `var_debug_info.source_info.scope` represents the "local variable"
+ /// scope (where the "rest" of a block is under all prior let-statements).
+ ///
+ /// The end result looks like this:
+ ///
+ /// ```text
+ /// ROOT SCOPE
+ /// │{ argument x: &str }
+ /// │
+ /// │ │{ #[allow(unused_mut)] } // This is actually split into 2 scopes
+ /// │ │ // in practice because I'm lazy.
+ /// │ │
+ /// │ │← x.source_info.scope
+ /// │ │← `x.parse().unwrap()`
+ /// │ │
+ /// │ │ │← y.source_info.scope
+ /// │ │
+ /// │ │ │{ let y: u32 }
+ /// │ │ │
+ /// │ │ │← y.var_debug_info.source_info.scope
+ /// │ │ │← `y + 2`
+ /// │
+ /// │ │{ let x: u32 }
+ /// │ │← x.var_debug_info.source_info.scope
+ /// │ │← `drop(x)` // This accesses `x: u32`.
+ /// ```
+ pub source_info: SourceInfo,
+}
+
+// `LocalDecl` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(LocalDecl<'_>, 56);
+
+/// Extra information about a some locals that's used for diagnostics and for
+/// classifying variables into local variables, statics, etc, which is needed e.g.
+/// for unsafety checking.
+///
+/// Not used for non-StaticRef temporaries, the return place, or anonymous
+/// function parameters.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub enum LocalInfo<'tcx> {
+ /// A user-defined local variable or function parameter
+ ///
+ /// The `BindingForm` is solely used for local diagnostics when generating
+ /// warnings/errors when compiling the current crate, and therefore it need
+ /// not be visible across crates.
+ User(ClearCrossCrate<BindingForm<'tcx>>),
+ /// A temporary created that references the static with the given `DefId`.
+ StaticRef { def_id: DefId, is_thread_local: bool },
+ /// A temporary created that references the const with the given `DefId`
+ ConstRef { def_id: DefId },
+ /// A temporary created during the creation of an aggregate
+ /// (e.g. a temporary for `foo` in `MyStruct { my_field: foo }`)
+ AggregateTemp,
+ /// A temporary created during the pass `Derefer` to avoid it's retagging
+ DerefTemp,
+}
+
+impl<'tcx> LocalDecl<'tcx> {
+ /// Returns `true` only if local is a binding that can itself be
+ /// made mutable via the addition of the `mut` keyword, namely
+ /// something like the occurrences of `x` in:
+ /// - `fn foo(x: Type) { ... }`,
+ /// - `let x = ...`,
+ /// - or `match ... { C(x) => ... }`
+ pub fn can_be_made_mutable(&self) -> bool {
+ matches!(
+ self.local_info,
+ Some(box LocalInfo::User(ClearCrossCrate::Set(
+ BindingForm::Var(VarBindingForm {
+ binding_mode: ty::BindingMode::BindByValue(_),
+ opt_ty_info: _,
+ opt_match_place: _,
+ pat_span: _,
+ }) | BindingForm::ImplicitSelf(ImplicitSelfKind::Imm),
+ )))
+ )
+ }
+
+ /// Returns `true` if local is definitely not a `ref ident` or
+ /// `ref mut ident` binding. (Such bindings cannot be made into
+ /// mutable bindings, but the inverse does not necessarily hold).
+ pub fn is_nonref_binding(&self) -> bool {
+ matches!(
+ self.local_info,
+ Some(box LocalInfo::User(ClearCrossCrate::Set(
+ BindingForm::Var(VarBindingForm {
+ binding_mode: ty::BindingMode::BindByValue(_),
+ opt_ty_info: _,
+ opt_match_place: _,
+ pat_span: _,
+ }) | BindingForm::ImplicitSelf(_),
+ )))
+ )
+ }
+
+ /// Returns `true` if this variable is a named variable or function
+ /// parameter declared by the user.
+ #[inline]
+ pub fn is_user_variable(&self) -> bool {
+ matches!(self.local_info, Some(box LocalInfo::User(_)))
+ }
+
+ /// Returns `true` if this is a reference to a variable bound in a `match`
+ /// expression that is used to access said variable for the guard of the
+ /// match arm.
+ pub fn is_ref_for_guard(&self) -> bool {
+ matches!(
+ self.local_info,
+ Some(box LocalInfo::User(ClearCrossCrate::Set(BindingForm::RefForGuard)))
+ )
+ }
+
+ /// Returns `Some` if this is a reference to a static item that is used to
+ /// access that static.
+ pub fn is_ref_to_static(&self) -> bool {
+ matches!(self.local_info, Some(box LocalInfo::StaticRef { .. }))
+ }
+
+ /// Returns `Some` if this is a reference to a thread-local static item that is used to
+ /// access that static.
+ pub fn is_ref_to_thread_local(&self) -> bool {
+ match self.local_info {
+ Some(box LocalInfo::StaticRef { is_thread_local, .. }) => is_thread_local,
+ _ => false,
+ }
+ }
+
+ /// Returns `true` if this is a DerefTemp
+ pub fn is_deref_temp(&self) -> bool {
+ match self.local_info {
+ Some(box LocalInfo::DerefTemp) => return true,
+ _ => (),
+ }
+ return false;
+ }
+
+ /// Returns `true` is the local is from a compiler desugaring, e.g.,
+ /// `__next` from a `for` loop.
+ #[inline]
+ pub fn from_compiler_desugaring(&self) -> bool {
+ self.source_info.span.desugaring_kind().is_some()
+ }
+
+ /// Creates a new `LocalDecl` for a temporary: mutable, non-internal.
+ #[inline]
+ pub fn new(ty: Ty<'tcx>, span: Span) -> Self {
+ Self::with_source_info(ty, SourceInfo::outermost(span))
+ }
+
+ /// Like `LocalDecl::new`, but takes a `SourceInfo` instead of a `Span`.
+ #[inline]
+ pub fn with_source_info(ty: Ty<'tcx>, source_info: SourceInfo) -> Self {
+ LocalDecl {
+ mutability: Mutability::Mut,
+ local_info: None,
+ internal: false,
+ is_block_tail: None,
+ ty,
+ user_ty: None,
+ source_info,
+ }
+ }
+
+ /// Converts `self` into same `LocalDecl` except tagged as internal.
+ #[inline]
+ pub fn internal(mut self) -> Self {
+ self.internal = true;
+ self
+ }
+
+ /// Converts `self` into same `LocalDecl` except tagged as immutable.
+ #[inline]
+ pub fn immutable(mut self) -> Self {
+ self.mutability = Mutability::Not;
+ self
+ }
+
+ /// Converts `self` into same `LocalDecl` except tagged as internal temporary.
+ #[inline]
+ pub fn block_tail(mut self, info: BlockTailInfo) -> Self {
+ assert!(self.is_block_tail.is_none());
+ self.is_block_tail = Some(info);
+ self
+ }
+}
+
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub enum VarDebugInfoContents<'tcx> {
+ /// NOTE(eddyb) There's an unenforced invariant that this `Place` is
+ /// based on a `Local`, not a `Static`, and contains no indexing.
+ Place(Place<'tcx>),
+ Const(Constant<'tcx>),
+}
+
+impl<'tcx> Debug for VarDebugInfoContents<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ match self {
+ VarDebugInfoContents::Const(c) => write!(fmt, "{}", c),
+ VarDebugInfoContents::Place(p) => write!(fmt, "{:?}", p),
+ }
+ }
+}
+
+/// Debug information pertaining to a user variable.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct VarDebugInfo<'tcx> {
+ pub name: Symbol,
+
+ /// Source info of the user variable, including the scope
+ /// within which the variable is visible (to debuginfo)
+ /// (see `LocalDecl`'s `source_info` field for more details).
+ pub source_info: SourceInfo,
+
+ /// Where the data for this user variable is to be found.
+ pub value: VarDebugInfoContents<'tcx>,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// BasicBlock
+
+rustc_index::newtype_index! {
+ /// A node in the MIR [control-flow graph][CFG].
+ ///
+ /// There are no branches (e.g., `if`s, function calls, etc.) within a basic block, which makes
+ /// it easier to do [data-flow analyses] and optimizations. Instead, branches are represented
+ /// as an edge in a graph between basic blocks.
+ ///
+ /// Basic blocks consist of a series of [statements][Statement], ending with a
+ /// [terminator][Terminator]. Basic blocks can have multiple predecessors and successors,
+ /// however there is a MIR pass ([`CriticalCallEdges`]) that removes *critical edges*, which
+ /// are edges that go from a multi-successor node to a multi-predecessor node. This pass is
+ /// needed because some analyses require that there are no critical edges in the CFG.
+ ///
+ /// Note that this type is just an index into [`Body.basic_blocks`](Body::basic_blocks);
+ /// the actual data that a basic block holds is in [`BasicBlockData`].
+ ///
+ /// Read more about basic blocks in the [rustc-dev-guide][guide-mir].
+ ///
+ /// [CFG]: https://rustc-dev-guide.rust-lang.org/appendix/background.html#cfg
+ /// [data-flow analyses]:
+ /// https://rustc-dev-guide.rust-lang.org/appendix/background.html#what-is-a-dataflow-analysis
+ /// [`CriticalCallEdges`]: ../../rustc_const_eval/transform/add_call_guards/enum.AddCallGuards.html#variant.CriticalCallEdges
+ /// [guide-mir]: https://rustc-dev-guide.rust-lang.org/mir/
+ pub struct BasicBlock {
+ derive [HashStable]
+ DEBUG_FORMAT = "bb{}",
+ const START_BLOCK = 0,
+ }
+}
+
+impl BasicBlock {
+ pub fn start_location(self) -> Location {
+ Location { block: self, statement_index: 0 }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// BasicBlockData
+
+/// Data for a basic block, including a list of its statements.
+///
+/// See [`BasicBlock`] for documentation on what basic blocks are at a high level.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct BasicBlockData<'tcx> {
+ /// List of statements in this block.
+ pub statements: Vec<Statement<'tcx>>,
+
+ /// Terminator for this block.
+ ///
+ /// N.B., this should generally ONLY be `None` during construction.
+ /// Therefore, you should generally access it via the
+ /// `terminator()` or `terminator_mut()` methods. The only
+ /// exception is that certain passes, such as `simplify_cfg`, swap
+ /// out the terminator temporarily with `None` while they continue
+ /// to recurse over the set of basic blocks.
+ pub terminator: Option<Terminator<'tcx>>,
+
+ /// If true, this block lies on an unwind path. This is used
+ /// during codegen where distinct kinds of basic blocks may be
+ /// generated (particularly for MSVC cleanup). Unwind blocks must
+ /// only branch to other unwind blocks.
+ pub is_cleanup: bool,
+}
+
+impl<'tcx> BasicBlockData<'tcx> {
+ pub fn new(terminator: Option<Terminator<'tcx>>) -> BasicBlockData<'tcx> {
+ BasicBlockData { statements: vec![], terminator, is_cleanup: false }
+ }
+
+ /// Accessor for terminator.
+ ///
+ /// Terminator may not be None after construction of the basic block is complete. This accessor
+ /// provides a convenient way to reach the terminator.
+ #[inline]
+ pub fn terminator(&self) -> &Terminator<'tcx> {
+ self.terminator.as_ref().expect("invalid terminator state")
+ }
+
+ #[inline]
+ pub fn terminator_mut(&mut self) -> &mut Terminator<'tcx> {
+ self.terminator.as_mut().expect("invalid terminator state")
+ }
+
+ pub fn retain_statements<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&mut Statement<'_>) -> bool,
+ {
+ for s in &mut self.statements {
+ if !f(s) {
+ s.make_nop();
+ }
+ }
+ }
+
+ pub fn expand_statements<F, I>(&mut self, mut f: F)
+ where
+ F: FnMut(&mut Statement<'tcx>) -> Option<I>,
+ I: iter::TrustedLen<Item = Statement<'tcx>>,
+ {
+ // Gather all the iterators we'll need to splice in, and their positions.
+ let mut splices: Vec<(usize, I)> = vec![];
+ let mut extra_stmts = 0;
+ for (i, s) in self.statements.iter_mut().enumerate() {
+ if let Some(mut new_stmts) = f(s) {
+ if let Some(first) = new_stmts.next() {
+ // We can already store the first new statement.
+ *s = first;
+
+ // Save the other statements for optimized splicing.
+ let remaining = new_stmts.size_hint().0;
+ if remaining > 0 {
+ splices.push((i + 1 + extra_stmts, new_stmts));
+ extra_stmts += remaining;
+ }
+ } else {
+ s.make_nop();
+ }
+ }
+ }
+
+ // Splice in the new statements, from the end of the block.
+ // FIXME(eddyb) This could be more efficient with a "gap buffer"
+ // where a range of elements ("gap") is left uninitialized, with
+ // splicing adding new elements to the end of that gap and moving
+ // existing elements from before the gap to the end of the gap.
+ // For now, this is safe code, emulating a gap but initializing it.
+ let mut gap = self.statements.len()..self.statements.len() + extra_stmts;
+ self.statements.resize(
+ gap.end,
+ Statement { source_info: SourceInfo::outermost(DUMMY_SP), kind: StatementKind::Nop },
+ );
+ for (splice_start, new_stmts) in splices.into_iter().rev() {
+ let splice_end = splice_start + new_stmts.size_hint().0;
+ while gap.end > splice_end {
+ gap.start -= 1;
+ gap.end -= 1;
+ self.statements.swap(gap.start, gap.end);
+ }
+ self.statements.splice(splice_start..splice_end, new_stmts);
+ gap.end = splice_start;
+ }
+ }
+
+ pub fn visitable(&self, index: usize) -> &dyn MirVisitable<'tcx> {
+ if index < self.statements.len() { &self.statements[index] } else { &self.terminator }
+ }
+}
+
+impl<O> AssertKind<O> {
+ /// Getting a description does not require `O` to be printable, and does not
+ /// require allocation.
+ /// The caller is expected to handle `BoundsCheck` separately.
+ pub fn description(&self) -> &'static str {
+ use AssertKind::*;
+ match self {
+ Overflow(BinOp::Add, _, _) => "attempt to add with overflow",
+ Overflow(BinOp::Sub, _, _) => "attempt to subtract with overflow",
+ Overflow(BinOp::Mul, _, _) => "attempt to multiply with overflow",
+ Overflow(BinOp::Div, _, _) => "attempt to divide with overflow",
+ Overflow(BinOp::Rem, _, _) => "attempt to calculate the remainder with overflow",
+ OverflowNeg(_) => "attempt to negate with overflow",
+ Overflow(BinOp::Shr, _, _) => "attempt to shift right with overflow",
+ Overflow(BinOp::Shl, _, _) => "attempt to shift left with overflow",
+ Overflow(op, _, _) => bug!("{:?} cannot overflow", op),
+ DivisionByZero(_) => "attempt to divide by zero",
+ RemainderByZero(_) => "attempt to calculate the remainder with a divisor of zero",
+ ResumedAfterReturn(GeneratorKind::Gen) => "generator resumed after completion",
+ ResumedAfterReturn(GeneratorKind::Async(_)) => "`async fn` resumed after completion",
+ ResumedAfterPanic(GeneratorKind::Gen) => "generator resumed after panicking",
+ ResumedAfterPanic(GeneratorKind::Async(_)) => "`async fn` resumed after panicking",
+ BoundsCheck { .. } => bug!("Unexpected AssertKind"),
+ }
+ }
+
+ /// Format the message arguments for the `assert(cond, msg..)` terminator in MIR printing.
+ pub fn fmt_assert_args<W: Write>(&self, f: &mut W) -> fmt::Result
+ where
+ O: Debug,
+ {
+ use AssertKind::*;
+ match self {
+ BoundsCheck { ref len, ref index } => write!(
+ f,
+ "\"index out of bounds: the length is {{}} but the index is {{}}\", {:?}, {:?}",
+ len, index
+ ),
+
+ OverflowNeg(op) => {
+ write!(f, "\"attempt to negate `{{}}`, which would overflow\", {:?}", op)
+ }
+ DivisionByZero(op) => write!(f, "\"attempt to divide `{{}}` by zero\", {:?}", op),
+ RemainderByZero(op) => write!(
+ f,
+ "\"attempt to calculate the remainder of `{{}}` with a divisor of zero\", {:?}",
+ op
+ ),
+ Overflow(BinOp::Add, l, r) => write!(
+ f,
+ "\"attempt to compute `{{}} + {{}}`, which would overflow\", {:?}, {:?}",
+ l, r
+ ),
+ Overflow(BinOp::Sub, l, r) => write!(
+ f,
+ "\"attempt to compute `{{}} - {{}}`, which would overflow\", {:?}, {:?}",
+ l, r
+ ),
+ Overflow(BinOp::Mul, l, r) => write!(
+ f,
+ "\"attempt to compute `{{}} * {{}}`, which would overflow\", {:?}, {:?}",
+ l, r
+ ),
+ Overflow(BinOp::Div, l, r) => write!(
+ f,
+ "\"attempt to compute `{{}} / {{}}`, which would overflow\", {:?}, {:?}",
+ l, r
+ ),
+ Overflow(BinOp::Rem, l, r) => write!(
+ f,
+ "\"attempt to compute the remainder of `{{}} % {{}}`, which would overflow\", {:?}, {:?}",
+ l, r
+ ),
+ Overflow(BinOp::Shr, _, r) => {
+ write!(f, "\"attempt to shift right by `{{}}`, which would overflow\", {:?}", r)
+ }
+ Overflow(BinOp::Shl, _, r) => {
+ write!(f, "\"attempt to shift left by `{{}}`, which would overflow\", {:?}", r)
+ }
+ _ => write!(f, "\"{}\"", self.description()),
+ }
+ }
+}
+
+impl<O: fmt::Debug> fmt::Debug for AssertKind<O> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use AssertKind::*;
+ match self {
+ BoundsCheck { ref len, ref index } => write!(
+ f,
+ "index out of bounds: the length is {:?} but the index is {:?}",
+ len, index
+ ),
+ OverflowNeg(op) => write!(f, "attempt to negate `{:#?}`, which would overflow", op),
+ DivisionByZero(op) => write!(f, "attempt to divide `{:#?}` by zero", op),
+ RemainderByZero(op) => write!(
+ f,
+ "attempt to calculate the remainder of `{:#?}` with a divisor of zero",
+ op
+ ),
+ Overflow(BinOp::Add, l, r) => {
+ write!(f, "attempt to compute `{:#?} + {:#?}`, which would overflow", l, r)
+ }
+ Overflow(BinOp::Sub, l, r) => {
+ write!(f, "attempt to compute `{:#?} - {:#?}`, which would overflow", l, r)
+ }
+ Overflow(BinOp::Mul, l, r) => {
+ write!(f, "attempt to compute `{:#?} * {:#?}`, which would overflow", l, r)
+ }
+ Overflow(BinOp::Div, l, r) => {
+ write!(f, "attempt to compute `{:#?} / {:#?}`, which would overflow", l, r)
+ }
+ Overflow(BinOp::Rem, l, r) => write!(
+ f,
+ "attempt to compute the remainder of `{:#?} % {:#?}`, which would overflow",
+ l, r
+ ),
+ Overflow(BinOp::Shr, _, r) => {
+ write!(f, "attempt to shift right by `{:#?}`, which would overflow", r)
+ }
+ Overflow(BinOp::Shl, _, r) => {
+ write!(f, "attempt to shift left by `{:#?}`, which would overflow", r)
+ }
+ _ => write!(f, "{}", self.description()),
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Statements
+
+/// A statement in a basic block, including information about its source code.
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct Statement<'tcx> {
+ pub source_info: SourceInfo,
+ pub kind: StatementKind<'tcx>,
+}
+
+// `Statement` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(Statement<'_>, 32);
+
+impl Statement<'_> {
+ /// Changes a statement to a nop. This is both faster than deleting instructions and avoids
+ /// invalidating statement indices in `Location`s.
+ pub fn make_nop(&mut self) {
+ self.kind = StatementKind::Nop
+ }
+
+ /// Changes a statement to a nop and returns the original statement.
+ #[must_use = "If you don't need the statement, use `make_nop` instead"]
+ pub fn replace_nop(&mut self) -> Self {
+ Statement {
+ source_info: self.source_info,
+ kind: mem::replace(&mut self.kind, StatementKind::Nop),
+ }
+ }
+}
+
+impl Debug for Statement<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ use self::StatementKind::*;
+ match self.kind {
+ Assign(box (ref place, ref rv)) => write!(fmt, "{:?} = {:?}", place, rv),
+ FakeRead(box (ref cause, ref place)) => {
+ write!(fmt, "FakeRead({:?}, {:?})", cause, place)
+ }
+ Retag(ref kind, ref place) => write!(
+ fmt,
+ "Retag({}{:?})",
+ match kind {
+ RetagKind::FnEntry => "[fn entry] ",
+ RetagKind::TwoPhase => "[2phase] ",
+ RetagKind::Raw => "[raw] ",
+ RetagKind::Default => "",
+ },
+ place,
+ ),
+ StorageLive(ref place) => write!(fmt, "StorageLive({:?})", place),
+ StorageDead(ref place) => write!(fmt, "StorageDead({:?})", place),
+ SetDiscriminant { ref place, variant_index } => {
+ write!(fmt, "discriminant({:?}) = {:?}", place, variant_index)
+ }
+ Deinit(ref place) => write!(fmt, "Deinit({:?})", place),
+ AscribeUserType(box (ref place, ref c_ty), ref variance) => {
+ write!(fmt, "AscribeUserType({:?}, {:?}, {:?})", place, variance, c_ty)
+ }
+ Coverage(box self::Coverage { ref kind, code_region: Some(ref rgn) }) => {
+ write!(fmt, "Coverage::{:?} for {:?}", kind, rgn)
+ }
+ Coverage(box ref coverage) => write!(fmt, "Coverage::{:?}", coverage.kind),
+ CopyNonOverlapping(box crate::mir::CopyNonOverlapping {
+ ref src,
+ ref dst,
+ ref count,
+ }) => {
+ write!(fmt, "copy_nonoverlapping(src={:?}, dst={:?}, count={:?})", src, dst, count)
+ }
+ Nop => write!(fmt, "nop"),
+ }
+ }
+}
+
+impl<'tcx> StatementKind<'tcx> {
+ pub fn as_assign_mut(&mut self) -> Option<&mut (Place<'tcx>, Rvalue<'tcx>)> {
+ match self {
+ StatementKind::Assign(x) => Some(x),
+ _ => None,
+ }
+ }
+
+ pub fn as_assign(&self) -> Option<&(Place<'tcx>, Rvalue<'tcx>)> {
+ match self {
+ StatementKind::Assign(x) => Some(x),
+ _ => None,
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Places
+
+impl<V, T> ProjectionElem<V, T> {
+ /// Returns `true` if the target of this projection may refer to a different region of memory
+ /// than the base.
+ fn is_indirect(&self) -> bool {
+ match self {
+ Self::Deref => true,
+
+ Self::Field(_, _)
+ | Self::Index(_)
+ | Self::ConstantIndex { .. }
+ | Self::Subslice { .. }
+ | Self::Downcast(_, _) => false,
+ }
+ }
+
+ /// Returns `true` if this is a `Downcast` projection with the given `VariantIdx`.
+ pub fn is_downcast_to(&self, v: VariantIdx) -> bool {
+ matches!(*self, Self::Downcast(_, x) if x == v)
+ }
+
+ /// Returns `true` if this is a `Field` projection with the given index.
+ pub fn is_field_to(&self, f: Field) -> bool {
+ matches!(*self, Self::Field(x, _) if x == f)
+ }
+}
+
+/// Alias for projections as they appear in `UserTypeProjection`, where we
+/// need neither the `V` parameter for `Index` nor the `T` for `Field`.
+pub type ProjectionKind = ProjectionElem<(), ()>;
+
+rustc_index::newtype_index! {
+ /// A [newtype'd][wrapper] index type in the MIR [control-flow graph][CFG]
+ ///
+ /// A field (e.g., `f` in `_1.f`) is one variant of [`ProjectionElem`]. Conceptually,
+ /// rustc can identify that a field projection refers to either two different regions of memory
+ /// or the same one between the base and the 'projection element'.
+ /// Read more about projections in the [rustc-dev-guide][mir-datatypes]
+ ///
+ /// [wrapper]: https://rustc-dev-guide.rust-lang.org/appendix/glossary.html#newtype
+ /// [CFG]: https://rustc-dev-guide.rust-lang.org/appendix/background.html#cfg
+ /// [mir-datatypes]: https://rustc-dev-guide.rust-lang.org/mir/index.html#mir-data-types
+ pub struct Field {
+ derive [HashStable]
+ DEBUG_FORMAT = "field[{}]"
+ }
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+pub struct PlaceRef<'tcx> {
+ pub local: Local,
+ pub projection: &'tcx [PlaceElem<'tcx>],
+}
+
+// Once we stop implementing `Ord` for `DefId`,
+// this impl will be unnecessary. Until then, we'll
+// leave this impl in place to prevent re-adding a
+// dependnecy on the `Ord` impl for `DefId`
+impl<'tcx> !PartialOrd for PlaceRef<'tcx> {}
+
+impl<'tcx> Place<'tcx> {
+ // FIXME change this to a const fn by also making List::empty a const fn.
+ pub fn return_place() -> Place<'tcx> {
+ Place { local: RETURN_PLACE, projection: List::empty() }
+ }
+
+ /// Returns `true` if this `Place` contains a `Deref` projection.
+ ///
+ /// If `Place::is_indirect` returns false, the caller knows that the `Place` refers to the
+ /// same region of memory as its base.
+ pub fn is_indirect(&self) -> bool {
+ self.projection.iter().any(|elem| elem.is_indirect())
+ }
+
+ /// If MirPhase >= Derefered and if projection contains Deref,
+ /// It's guaranteed to be in the first place
+ pub fn has_deref(&self) -> bool {
+ // To make sure this is not accidently used in wrong mir phase
+ debug_assert!(!self.projection[1..].contains(&PlaceElem::Deref));
+ self.projection.first() == Some(&PlaceElem::Deref)
+ }
+
+ /// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
+ /// a single deref of a local.
+ #[inline(always)]
+ pub fn local_or_deref_local(&self) -> Option<Local> {
+ self.as_ref().local_or_deref_local()
+ }
+
+ /// If this place represents a local variable like `_X` with no
+ /// projections, return `Some(_X)`.
+ #[inline(always)]
+ pub fn as_local(&self) -> Option<Local> {
+ self.as_ref().as_local()
+ }
+
+ #[inline]
+ pub fn as_ref(&self) -> PlaceRef<'tcx> {
+ PlaceRef { local: self.local, projection: &self.projection }
+ }
+
+ /// Iterate over the projections in evaluation order, i.e., the first element is the base with
+ /// its projection and then subsequently more projections are added.
+ /// As a concrete example, given the place a.b.c, this would yield:
+ /// - (a, .b)
+ /// - (a.b, .c)
+ ///
+ /// Given a place without projections, the iterator is empty.
+ #[inline]
+ pub fn iter_projections(
+ self,
+ ) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + DoubleEndedIterator {
+ self.as_ref().iter_projections()
+ }
+
+ /// Generates a new place by appending `more_projections` to the existing ones
+ /// and interning the result.
+ pub fn project_deeper(self, more_projections: &[PlaceElem<'tcx>], tcx: TyCtxt<'tcx>) -> Self {
+ if more_projections.is_empty() {
+ return self;
+ }
+
+ let mut v: Vec<PlaceElem<'tcx>>;
+
+ let new_projections = if self.projection.is_empty() {
+ more_projections
+ } else {
+ v = Vec::with_capacity(self.projection.len() + more_projections.len());
+ v.extend(self.projection);
+ v.extend(more_projections);
+ &v
+ };
+
+ Place { local: self.local, projection: tcx.intern_place_elems(new_projections) }
+ }
+}
+
+impl From<Local> for Place<'_> {
+ fn from(local: Local) -> Self {
+ Place { local, projection: List::empty() }
+ }
+}
+
+impl<'tcx> PlaceRef<'tcx> {
+ /// Finds the innermost `Local` from this `Place`, *if* it is either a local itself or
+ /// a single deref of a local.
+ pub fn local_or_deref_local(&self) -> Option<Local> {
+ match *self {
+ PlaceRef { local, projection: [] }
+ | PlaceRef { local, projection: [ProjectionElem::Deref] } => Some(local),
+ _ => None,
+ }
+ }
+
+ /// If MirPhase >= Derefered and if projection contains Deref,
+ /// It's guaranteed to be in the first place
+ pub fn has_deref(&self) -> bool {
+ self.projection.first() == Some(&PlaceElem::Deref)
+ }
+
+ /// If this place represents a local variable like `_X` with no
+ /// projections, return `Some(_X)`.
+ #[inline]
+ pub fn as_local(&self) -> Option<Local> {
+ match *self {
+ PlaceRef { local, projection: [] } => Some(local),
+ _ => None,
+ }
+ }
+
+ #[inline]
+ pub fn last_projection(&self) -> Option<(PlaceRef<'tcx>, PlaceElem<'tcx>)> {
+ if let &[ref proj_base @ .., elem] = self.projection {
+ Some((PlaceRef { local: self.local, projection: proj_base }, elem))
+ } else {
+ None
+ }
+ }
+
+ /// Iterate over the projections in evaluation order, i.e., the first element is the base with
+ /// its projection and then subsequently more projections are added.
+ /// As a concrete example, given the place a.b.c, this would yield:
+ /// - (a, .b)
+ /// - (a.b, .c)
+ ///
+ /// Given a place without projections, the iterator is empty.
+ #[inline]
+ pub fn iter_projections(
+ self,
+ ) -> impl Iterator<Item = (PlaceRef<'tcx>, PlaceElem<'tcx>)> + DoubleEndedIterator {
+ self.projection.iter().enumerate().map(move |(i, proj)| {
+ let base = PlaceRef { local: self.local, projection: &self.projection[..i] };
+ (base, *proj)
+ })
+ }
+}
+
+impl Debug for Place<'_> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ for elem in self.projection.iter().rev() {
+ match elem {
+ ProjectionElem::Downcast(_, _) | ProjectionElem::Field(_, _) => {
+ write!(fmt, "(").unwrap();
+ }
+ ProjectionElem::Deref => {
+ write!(fmt, "(*").unwrap();
+ }
+ ProjectionElem::Index(_)
+ | ProjectionElem::ConstantIndex { .. }
+ | ProjectionElem::Subslice { .. } => {}
+ }
+ }
+
+ write!(fmt, "{:?}", self.local)?;
+
+ for elem in self.projection.iter() {
+ match elem {
+ ProjectionElem::Downcast(Some(name), _index) => {
+ write!(fmt, " as {})", name)?;
+ }
+ ProjectionElem::Downcast(None, index) => {
+ write!(fmt, " as variant#{:?})", index)?;
+ }
+ ProjectionElem::Deref => {
+ write!(fmt, ")")?;
+ }
+ ProjectionElem::Field(field, ty) => {
+ write!(fmt, ".{:?}: {:?})", field.index(), ty)?;
+ }
+ ProjectionElem::Index(ref index) => {
+ write!(fmt, "[{:?}]", index)?;
+ }
+ ProjectionElem::ConstantIndex { offset, min_length, from_end: false } => {
+ write!(fmt, "[{:?} of {:?}]", offset, min_length)?;
+ }
+ ProjectionElem::ConstantIndex { offset, min_length, from_end: true } => {
+ write!(fmt, "[-{:?} of {:?}]", offset, min_length)?;
+ }
+ ProjectionElem::Subslice { from, to, from_end: true } if to == 0 => {
+ write!(fmt, "[{:?}:]", from)?;
+ }
+ ProjectionElem::Subslice { from, to, from_end: true } if from == 0 => {
+ write!(fmt, "[:-{:?}]", to)?;
+ }
+ ProjectionElem::Subslice { from, to, from_end: true } => {
+ write!(fmt, "[{:?}:-{:?}]", from, to)?;
+ }
+ ProjectionElem::Subslice { from, to, from_end: false } => {
+ write!(fmt, "[{:?}..{:?}]", from, to)?;
+ }
+ }
+ }
+
+ Ok(())
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Scopes
+
+rustc_index::newtype_index! {
+ pub struct SourceScope {
+ derive [HashStable]
+ DEBUG_FORMAT = "scope[{}]",
+ const OUTERMOST_SOURCE_SCOPE = 0,
+ }
+}
+
+impl SourceScope {
+ /// Finds the original HirId this MIR item came from.
+ /// This is necessary after MIR optimizations, as otherwise we get a HirId
+ /// from the function that was inlined instead of the function call site.
+ pub fn lint_root<'tcx>(
+ self,
+ source_scopes: &IndexVec<SourceScope, SourceScopeData<'tcx>>,
+ ) -> Option<HirId> {
+ let mut data = &source_scopes[self];
+ // FIXME(oli-obk): we should be able to just walk the `inlined_parent_scope`, but it
+ // does not work as I thought it would. Needs more investigation and documentation.
+ while data.inlined.is_some() {
+ trace!(?data);
+ data = &source_scopes[data.parent_scope.unwrap()];
+ }
+ trace!(?data);
+ match &data.local_data {
+ ClearCrossCrate::Set(data) => Some(data.lint_root),
+ ClearCrossCrate::Clear => None,
+ }
+ }
+
+ /// The instance this source scope was inlined from, if any.
+ #[inline]
+ pub fn inlined_instance<'tcx>(
+ self,
+ source_scopes: &IndexVec<SourceScope, SourceScopeData<'tcx>>,
+ ) -> Option<ty::Instance<'tcx>> {
+ let scope_data = &source_scopes[self];
+ if let Some((inlined_instance, _)) = scope_data.inlined {
+ Some(inlined_instance)
+ } else if let Some(inlined_scope) = scope_data.inlined_parent_scope {
+ Some(source_scopes[inlined_scope].inlined.unwrap().0)
+ } else {
+ None
+ }
+ }
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct SourceScopeData<'tcx> {
+ pub span: Span,
+ pub parent_scope: Option<SourceScope>,
+
+ /// Whether this scope is the root of a scope tree of another body,
+ /// inlined into this body by the MIR inliner.
+ /// `ty::Instance` is the callee, and the `Span` is the call site.
+ pub inlined: Option<(ty::Instance<'tcx>, Span)>,
+
+ /// Nearest (transitive) parent scope (if any) which is inlined.
+ /// This is an optimization over walking up `parent_scope`
+ /// until a scope with `inlined: Some(...)` is found.
+ pub inlined_parent_scope: Option<SourceScope>,
+
+ /// Crate-local information for this source scope, that can't (and
+ /// needn't) be tracked across crates.
+ pub local_data: ClearCrossCrate<SourceScopeLocalData>,
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct SourceScopeLocalData {
+ /// An `HirId` with lint levels equivalent to this scope's lint levels.
+ pub lint_root: hir::HirId,
+ /// The unsafe block that contains this node.
+ pub safety: Safety,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Operands
+
+impl<'tcx> Debug for Operand<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ use self::Operand::*;
+ match *self {
+ Constant(ref a) => write!(fmt, "{:?}", a),
+ Copy(ref place) => write!(fmt, "{:?}", place),
+ Move(ref place) => write!(fmt, "move {:?}", place),
+ }
+ }
+}
+
+impl<'tcx> Operand<'tcx> {
+ /// Convenience helper to make a constant that refers to the fn
+ /// with given `DefId` and substs. Since this is used to synthesize
+ /// MIR, assumes `user_ty` is None.
+ pub fn function_handle(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ span: Span,
+ ) -> Self {
+ let ty = tcx.bound_type_of(def_id).subst(tcx, substs);
+ Operand::Constant(Box::new(Constant {
+ span,
+ user_ty: None,
+ literal: ConstantKind::Val(ConstValue::ZeroSized, ty),
+ }))
+ }
+
+ pub fn is_move(&self) -> bool {
+ matches!(self, Operand::Move(..))
+ }
+
+ /// Convenience helper to make a literal-like constant from a given scalar value.
+ /// Since this is used to synthesize MIR, assumes `user_ty` is None.
+ pub fn const_from_scalar(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ val: Scalar,
+ span: Span,
+ ) -> Operand<'tcx> {
+ debug_assert!({
+ let param_env_and_ty = ty::ParamEnv::empty().and(ty);
+ let type_size = tcx
+ .layout_of(param_env_and_ty)
+ .unwrap_or_else(|e| panic!("could not compute layout for {:?}: {:?}", ty, e))
+ .size;
+ let scalar_size = match val {
+ Scalar::Int(int) => int.size(),
+ _ => panic!("Invalid scalar type {:?}", val),
+ };
+ scalar_size == type_size
+ });
+ Operand::Constant(Box::new(Constant {
+ span,
+ user_ty: None,
+ literal: ConstantKind::Val(ConstValue::Scalar(val), ty),
+ }))
+ }
+
+ pub fn to_copy(&self) -> Self {
+ match *self {
+ Operand::Copy(_) | Operand::Constant(_) => self.clone(),
+ Operand::Move(place) => Operand::Copy(place),
+ }
+ }
+
+ /// Returns the `Place` that is the target of this `Operand`, or `None` if this `Operand` is a
+ /// constant.
+ pub fn place(&self) -> Option<Place<'tcx>> {
+ match self {
+ Operand::Copy(place) | Operand::Move(place) => Some(*place),
+ Operand::Constant(_) => None,
+ }
+ }
+
+ /// Returns the `Constant` that is the target of this `Operand`, or `None` if this `Operand` is a
+ /// place.
+ pub fn constant(&self) -> Option<&Constant<'tcx>> {
+ match self {
+ Operand::Constant(x) => Some(&**x),
+ Operand::Copy(_) | Operand::Move(_) => None,
+ }
+ }
+
+ /// Gets the `ty::FnDef` from an operand if it's a constant function item.
+ ///
+ /// While this is unlikely in general, it's the normal case of what you'll
+ /// find as the `func` in a [`TerminatorKind::Call`].
+ pub fn const_fn_def(&self) -> Option<(DefId, SubstsRef<'tcx>)> {
+ let const_ty = self.constant()?.literal.ty();
+ if let ty::FnDef(def_id, substs) = *const_ty.kind() { Some((def_id, substs)) } else { None }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+/// Rvalues
+
+impl<'tcx> Rvalue<'tcx> {
+ /// Returns true if rvalue can be safely removed when the result is unused.
+ #[inline]
+ pub fn is_safe_to_remove(&self) -> bool {
+ match self {
+ // Pointer to int casts may be side-effects due to exposing the provenance.
+ // While the model is undecided, we should be conservative. See
+ // <https://www.ralfj.de/blog/2022/04/11/provenance-exposed.html>
+ Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => false,
+
+ Rvalue::Use(_)
+ | Rvalue::CopyForDeref(_)
+ | Rvalue::Repeat(_, _)
+ | Rvalue::Ref(_, _, _)
+ | Rvalue::ThreadLocalRef(_)
+ | Rvalue::AddressOf(_, _)
+ | Rvalue::Len(_)
+ | Rvalue::Cast(
+ CastKind::Misc | CastKind::Pointer(_) | CastKind::PointerFromExposedAddress,
+ _,
+ _,
+ )
+ | Rvalue::BinaryOp(_, _)
+ | Rvalue::CheckedBinaryOp(_, _)
+ | Rvalue::NullaryOp(_, _)
+ | Rvalue::UnaryOp(_, _)
+ | Rvalue::Discriminant(_)
+ | Rvalue::Aggregate(_, _)
+ | Rvalue::ShallowInitBox(_, _) => true,
+ }
+ }
+}
+
+impl BorrowKind {
+ pub fn allows_two_phase_borrow(&self) -> bool {
+ match *self {
+ BorrowKind::Shared | BorrowKind::Shallow | BorrowKind::Unique => false,
+ BorrowKind::Mut { allow_two_phase_borrow } => allow_two_phase_borrow,
+ }
+ }
+
+ pub fn describe_mutability(&self) -> &str {
+ match *self {
+ BorrowKind::Shared | BorrowKind::Shallow | BorrowKind::Unique => "immutable",
+ BorrowKind::Mut { .. } => "mutable",
+ }
+ }
+}
+
+impl BinOp {
+ pub fn is_checkable(self) -> bool {
+ use self::BinOp::*;
+ matches!(self, Add | Sub | Mul | Shl | Shr)
+ }
+}
+
+impl<'tcx> Debug for Rvalue<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ use self::Rvalue::*;
+
+ match *self {
+ Use(ref place) => write!(fmt, "{:?}", place),
+ Repeat(ref a, b) => {
+ write!(fmt, "[{:?}; ", a)?;
+ pretty_print_const(b, fmt, false)?;
+ write!(fmt, "]")
+ }
+ Len(ref a) => write!(fmt, "Len({:?})", a),
+ Cast(ref kind, ref place, ref ty) => {
+ write!(fmt, "{:?} as {:?} ({:?})", place, ty, kind)
+ }
+ BinaryOp(ref op, box (ref a, ref b)) => write!(fmt, "{:?}({:?}, {:?})", op, a, b),
+ CheckedBinaryOp(ref op, box (ref a, ref b)) => {
+ write!(fmt, "Checked{:?}({:?}, {:?})", op, a, b)
+ }
+ UnaryOp(ref op, ref a) => write!(fmt, "{:?}({:?})", op, a),
+ Discriminant(ref place) => write!(fmt, "discriminant({:?})", place),
+ NullaryOp(ref op, ref t) => write!(fmt, "{:?}({:?})", op, t),
+ ThreadLocalRef(did) => ty::tls::with(|tcx| {
+ let muta = tcx.static_mutability(did).unwrap().prefix_str();
+ write!(fmt, "&/*tls*/ {}{}", muta, tcx.def_path_str(did))
+ }),
+ Ref(region, borrow_kind, ref place) => {
+ let kind_str = match borrow_kind {
+ BorrowKind::Shared => "",
+ BorrowKind::Shallow => "shallow ",
+ BorrowKind::Mut { .. } | BorrowKind::Unique => "mut ",
+ };
+
+ // When printing regions, add trailing space if necessary.
+ let print_region = ty::tls::with(|tcx| {
+ tcx.sess.verbose() || tcx.sess.opts.unstable_opts.identify_regions
+ });
+ let region = if print_region {
+ let mut region = region.to_string();
+ if !region.is_empty() {
+ region.push(' ');
+ }
+ region
+ } else {
+ // Do not even print 'static
+ String::new()
+ };
+ write!(fmt, "&{}{}{:?}", region, kind_str, place)
+ }
+
+ CopyForDeref(ref place) => write!(fmt, "deref_copy {:#?}", place),
+
+ AddressOf(mutability, ref place) => {
+ let kind_str = match mutability {
+ Mutability::Mut => "mut",
+ Mutability::Not => "const",
+ };
+
+ write!(fmt, "&raw {} {:?}", kind_str, place)
+ }
+
+ Aggregate(ref kind, ref places) => {
+ let fmt_tuple = |fmt: &mut Formatter<'_>, name: &str| {
+ let mut tuple_fmt = fmt.debug_tuple(name);
+ for place in places {
+ tuple_fmt.field(place);
+ }
+ tuple_fmt.finish()
+ };
+
+ match **kind {
+ AggregateKind::Array(_) => write!(fmt, "{:?}", places),
+
+ AggregateKind::Tuple => {
+ if places.is_empty() {
+ write!(fmt, "()")
+ } else {
+ fmt_tuple(fmt, "")
+ }
+ }
+
+ AggregateKind::Adt(adt_did, variant, substs, _user_ty, _) => {
+ ty::tls::with(|tcx| {
+ let variant_def = &tcx.adt_def(adt_did).variant(variant);
+ let substs = tcx.lift(substs).expect("could not lift for printing");
+ let name = FmtPrinter::new(tcx, Namespace::ValueNS)
+ .print_def_path(variant_def.def_id, substs)?
+ .into_buffer();
+
+ match variant_def.ctor_kind {
+ CtorKind::Const => fmt.write_str(&name),
+ CtorKind::Fn => fmt_tuple(fmt, &name),
+ CtorKind::Fictive => {
+ let mut struct_fmt = fmt.debug_struct(&name);
+ for (field, place) in iter::zip(&variant_def.fields, places) {
+ struct_fmt.field(field.name.as_str(), place);
+ }
+ struct_fmt.finish()
+ }
+ }
+ })
+ }
+
+ AggregateKind::Closure(def_id, substs) => ty::tls::with(|tcx| {
+ let name = if tcx.sess.opts.unstable_opts.span_free_formats {
+ let substs = tcx.lift(substs).unwrap();
+ format!(
+ "[closure@{}]",
+ tcx.def_path_str_with_substs(def_id.to_def_id(), substs),
+ )
+ } else {
+ let span = tcx.def_span(def_id);
+ format!(
+ "[closure@{}]",
+ tcx.sess.source_map().span_to_diagnostic_string(span)
+ )
+ };
+ let mut struct_fmt = fmt.debug_struct(&name);
+
+ // FIXME(project-rfc-2229#48): This should be a list of capture names/places
+ if let Some(upvars) = tcx.upvars_mentioned(def_id) {
+ for (&var_id, place) in iter::zip(upvars.keys(), places) {
+ let var_name = tcx.hir().name(var_id);
+ struct_fmt.field(var_name.as_str(), place);
+ }
+ }
+
+ struct_fmt.finish()
+ }),
+
+ AggregateKind::Generator(def_id, _, _) => ty::tls::with(|tcx| {
+ let name = format!("[generator@{:?}]", tcx.def_span(def_id));
+ let mut struct_fmt = fmt.debug_struct(&name);
+
+ // FIXME(project-rfc-2229#48): This should be a list of capture names/places
+ if let Some(upvars) = tcx.upvars_mentioned(def_id) {
+ for (&var_id, place) in iter::zip(upvars.keys(), places) {
+ let var_name = tcx.hir().name(var_id);
+ struct_fmt.field(var_name.as_str(), place);
+ }
+ }
+
+ struct_fmt.finish()
+ }),
+ }
+ }
+
+ ShallowInitBox(ref place, ref ty) => {
+ write!(fmt, "ShallowInitBox({:?}, {:?})", place, ty)
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+/// Constants
+///
+/// Two constants are equal if they are the same constant. Note that
+/// this does not necessarily mean that they are `==` in Rust. In
+/// particular, one must be wary of `NaN`!
+
+#[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub struct Constant<'tcx> {
+ pub span: Span,
+
+ /// Optional user-given type: for something like
+ /// `collect::<Vec<_>>`, this would be present and would
+ /// indicate that `Vec<_>` was explicitly specified.
+ ///
+ /// Needed for NLL to impose user-given type constraints.
+ pub user_ty: Option<UserTypeAnnotationIndex>,
+
+ pub literal: ConstantKind<'tcx>,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable, Debug)]
+#[derive(Lift)]
+pub enum ConstantKind<'tcx> {
+ /// This constant came from the type system
+ Ty(ty::Const<'tcx>),
+ /// This constant cannot go back into the type system, as it represents
+ /// something the type system cannot handle (e.g. pointers).
+ Val(interpret::ConstValue<'tcx>, Ty<'tcx>),
+}
+
+impl<'tcx> Constant<'tcx> {
+ pub fn check_static_ptr(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
+ match self.literal.try_to_scalar() {
+ Some(Scalar::Ptr(ptr, _size)) => match tcx.global_alloc(ptr.provenance) {
+ GlobalAlloc::Static(def_id) => {
+ assert!(!tcx.is_thread_local_static(def_id));
+ Some(def_id)
+ }
+ _ => None,
+ },
+ _ => None,
+ }
+ }
+ #[inline]
+ pub fn ty(&self) -> Ty<'tcx> {
+ self.literal.ty()
+ }
+}
+
+impl<'tcx> ConstantKind<'tcx> {
+ /// Returns `None` if the constant is not trivially safe for use in the type system.
+ #[inline]
+ pub fn const_for_ty(&self) -> Option<ty::Const<'tcx>> {
+ match self {
+ ConstantKind::Ty(c) => Some(*c),
+ ConstantKind::Val(..) => None,
+ }
+ }
+
+ #[inline(always)]
+ pub fn ty(&self) -> Ty<'tcx> {
+ match self {
+ ConstantKind::Ty(c) => c.ty(),
+ ConstantKind::Val(_, ty) => *ty,
+ }
+ }
+
+ #[inline]
+ pub fn try_to_value(self, tcx: TyCtxt<'tcx>) -> Option<interpret::ConstValue<'tcx>> {
+ match self {
+ ConstantKind::Ty(c) => match c.kind() {
+ ty::ConstKind::Value(valtree) => Some(tcx.valtree_to_const_val((c.ty(), valtree))),
+ _ => None,
+ },
+ ConstantKind::Val(val, _) => Some(val),
+ }
+ }
+
+ #[inline]
+ pub fn try_to_scalar(self) -> Option<Scalar> {
+ match self {
+ ConstantKind::Ty(c) => match c.kind() {
+ ty::ConstKind::Value(valtree) => match valtree {
+ ty::ValTree::Leaf(scalar_int) => Some(Scalar::Int(scalar_int)),
+ ty::ValTree::Branch(_) => None,
+ },
+ _ => None,
+ },
+ ConstantKind::Val(val, _) => val.try_to_scalar(),
+ }
+ }
+
+ #[inline]
+ pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
+ Some(self.try_to_scalar()?.assert_int())
+ }
+
+ #[inline]
+ pub fn try_to_bits(self, size: Size) -> Option<u128> {
+ self.try_to_scalar_int()?.to_bits(size).ok()
+ }
+
+ #[inline]
+ pub fn try_to_bool(self) -> Option<bool> {
+ self.try_to_scalar_int()?.try_into().ok()
+ }
+
+ #[inline]
+ pub fn eval(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
+ match self {
+ Self::Ty(c) => {
+ if let Some(val) = c.kind().try_eval_for_mir(tcx, param_env) {
+ match val {
+ Ok(val) => Self::Val(val, c.ty()),
+ Err(_) => Self::Ty(tcx.const_error(self.ty())),
+ }
+ } else {
+ self
+ }
+ }
+ Self::Val(_, _) => self,
+ }
+ }
+
+ /// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
+ #[inline]
+ pub fn eval_bits(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, ty: Ty<'tcx>) -> u128 {
+ self.try_eval_bits(tcx, param_env, ty)
+ .unwrap_or_else(|| bug!("expected bits of {:#?}, got {:#?}", ty, self))
+ }
+
+ #[inline]
+ pub fn try_eval_bits(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Option<u128> {
+ match self {
+ Self::Ty(ct) => ct.try_eval_bits(tcx, param_env, ty),
+ Self::Val(val, t) => {
+ assert_eq!(*t, ty);
+ let size =
+ tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
+ val.try_to_bits(size)
+ }
+ }
+ }
+
+ #[inline]
+ pub fn try_eval_bool(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Option<bool> {
+ match self {
+ Self::Ty(ct) => ct.try_eval_bool(tcx, param_env),
+ Self::Val(val, _) => val.try_to_bool(),
+ }
+ }
+
+ #[inline]
+ pub fn try_eval_usize(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Option<u64> {
+ match self {
+ Self::Ty(ct) => ct.try_eval_usize(tcx, param_env),
+ Self::Val(val, _) => val.try_to_machine_usize(tcx),
+ }
+ }
+
+ #[inline]
+ pub fn from_value(val: ConstValue<'tcx>, ty: Ty<'tcx>) -> Self {
+ Self::Val(val, ty)
+ }
+
+ pub fn from_bits(
+ tcx: TyCtxt<'tcx>,
+ bits: u128,
+ param_env_ty: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+ ) -> Self {
+ let size = tcx
+ .layout_of(param_env_ty)
+ .unwrap_or_else(|e| {
+ bug!("could not compute layout for {:?}: {:?}", param_env_ty.value, e)
+ })
+ .size;
+ let cv = ConstValue::Scalar(Scalar::from_uint(bits, size));
+
+ Self::Val(cv, param_env_ty.value)
+ }
+
+ #[inline]
+ pub fn from_bool(tcx: TyCtxt<'tcx>, v: bool) -> Self {
+ let cv = ConstValue::from_bool(v);
+ Self::Val(cv, tcx.types.bool)
+ }
+
+ #[inline]
+ pub fn zero_sized(ty: Ty<'tcx>) -> Self {
+ let cv = ConstValue::ZeroSized;
+ Self::Val(cv, ty)
+ }
+
+ pub fn from_usize(tcx: TyCtxt<'tcx>, n: u64) -> Self {
+ let ty = tcx.types.usize;
+ Self::from_bits(tcx, n as u128, ty::ParamEnv::empty().and(ty))
+ }
+
+ #[inline]
+ pub fn from_scalar(_tcx: TyCtxt<'tcx>, s: Scalar, ty: Ty<'tcx>) -> Self {
+ let val = ConstValue::Scalar(s);
+ Self::Val(val, ty)
+ }
+
+ /// Literals are converted to `ConstantKindVal`, const generic parameters are eagerly
+ /// converted to a constant, everything else becomes `Unevaluated`.
+ pub fn from_anon_const(
+ tcx: TyCtxt<'tcx>,
+ def_id: LocalDefId,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Self {
+ Self::from_opt_const_arg_anon_const(tcx, ty::WithOptConstParam::unknown(def_id), param_env)
+ }
+
+ #[instrument(skip(tcx), level = "debug")]
+ pub fn from_inline_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self {
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+ let body_id = match tcx.hir().get(hir_id) {
+ hir::Node::AnonConst(ac) => ac.body,
+ _ => span_bug!(
+ tcx.def_span(def_id.to_def_id()),
+ "from_inline_const can only process anonymous constants"
+ ),
+ };
+ let expr = &tcx.hir().body(body_id).value;
+ let ty = tcx.typeck(def_id).node_type(hir_id);
+
+ let lit_input = match expr.kind {
+ hir::ExprKind::Lit(ref lit) => Some(LitToConstInput { lit: &lit.node, ty, neg: false }),
+ hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => match expr.kind {
+ hir::ExprKind::Lit(ref lit) => {
+ Some(LitToConstInput { lit: &lit.node, ty, neg: true })
+ }
+ _ => None,
+ },
+ _ => None,
+ };
+ if let Some(lit_input) = lit_input {
+ // If an error occurred, ignore that it's a literal and leave reporting the error up to
+ // mir.
+ match tcx.at(expr.span).lit_to_mir_constant(lit_input) {
+ Ok(c) => return c,
+ Err(_) => {}
+ }
+ }
+
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id());
+ let parent_substs =
+ tcx.erase_regions(InternalSubsts::identity_for_item(tcx, typeck_root_def_id));
+ let substs =
+ ty::InlineConstSubsts::new(tcx, ty::InlineConstSubstsParts { parent_substs, ty })
+ .substs;
+ let uneval_const = tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def: ty::WithOptConstParam::unknown(def_id).to_global(),
+ substs,
+ promoted: None,
+ }),
+ ty,
+ });
+ debug!(?uneval_const);
+ debug_assert!(!uneval_const.has_free_regions());
+
+ Self::Ty(uneval_const)
+ }
+
+ #[instrument(skip(tcx), level = "debug")]
+ fn from_opt_const_arg_anon_const(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Self {
+ let body_id = match tcx.hir().get_by_def_id(def.did) {
+ hir::Node::AnonConst(ac) => ac.body,
+ _ => span_bug!(
+ tcx.def_span(def.did.to_def_id()),
+ "from_anon_const can only process anonymous constants"
+ ),
+ };
+
+ let expr = &tcx.hir().body(body_id).value;
+ debug!(?expr);
+
+ // Unwrap a block, so that e.g. `{ P }` is recognised as a parameter. Const arguments
+ // currently have to be wrapped in curly brackets, so it's necessary to special-case.
+ let expr = match &expr.kind {
+ hir::ExprKind::Block(block, _) if block.stmts.is_empty() && block.expr.is_some() => {
+ block.expr.as_ref().unwrap()
+ }
+ _ => expr,
+ };
+ debug!("expr.kind: {:?}", expr.kind);
+
+ let ty = tcx.type_of(def.def_id_for_type_of());
+ debug!(?ty);
+
+ // FIXME(const_generics): We currently have to special case parameters because `min_const_generics`
+ // does not provide the parents generics to anonymous constants. We still allow generic const
+ // parameters by themselves however, e.g. `N`. These constants would cause an ICE if we were to
+ // ever try to substitute the generic parameters in their bodies.
+ //
+ // While this doesn't happen as these constants are always used as `ty::ConstKind::Param`, it does
+ // cause issues if we were to remove that special-case and try to evaluate the constant instead.
+ use hir::{def::DefKind::ConstParam, def::Res, ExprKind, Path, QPath};
+ match expr.kind {
+ ExprKind::Path(QPath::Resolved(_, &Path { res: Res::Def(ConstParam, def_id), .. })) => {
+ // Find the name and index of the const parameter by indexing the generics of
+ // the parent item and construct a `ParamConst`.
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ let item_id = tcx.hir().get_parent_node(hir_id);
+ let item_def_id = tcx.hir().local_def_id(item_id);
+ let generics = tcx.generics_of(item_def_id.to_def_id());
+ let index = generics.param_def_id_to_index[&def_id];
+ let name = tcx.hir().name(hir_id);
+ let ty_const = tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Param(ty::ParamConst::new(index, name)),
+ ty,
+ });
+ debug!(?ty_const);
+
+ return Self::Ty(ty_const);
+ }
+ _ => {}
+ }
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def.did);
+ let parent_substs = if let Some(parent_hir_id) = tcx.hir().find_parent_node(hir_id) {
+ if let Some(parent_did) = tcx.hir().opt_local_def_id(parent_hir_id) {
+ InternalSubsts::identity_for_item(tcx, parent_did.to_def_id())
+ } else {
+ tcx.mk_substs(Vec::<GenericArg<'tcx>>::new().into_iter())
+ }
+ } else {
+ tcx.mk_substs(Vec::<GenericArg<'tcx>>::new().into_iter())
+ };
+ debug!(?parent_substs);
+
+ let did = def.did.to_def_id();
+ let child_substs = InternalSubsts::identity_for_item(tcx, did);
+ let substs = tcx.mk_substs(parent_substs.into_iter().chain(child_substs.into_iter()));
+ debug!(?substs);
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def.did);
+ let span = tcx.hir().span(hir_id);
+ let uneval = ty::Unevaluated::new(def.to_global(), substs);
+ debug!(?span, ?param_env);
+
+ match tcx.const_eval_resolve(param_env, uneval, Some(span)) {
+ Ok(val) => {
+ debug!("evaluated const value: {:?}", val);
+ Self::Val(val, ty)
+ }
+ Err(_) => {
+ debug!("error encountered during evaluation");
+ // Error was handled in `const_eval_resolve`. Here we just create a
+ // new unevaluated const and error hard later in codegen
+ let ty_const = tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def: def.to_global(),
+ substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()),
+ promoted: None,
+ }),
+ ty,
+ });
+ debug!(?ty_const);
+
+ Self::Ty(ty_const)
+ }
+ }
+ }
+
+ pub fn from_const(c: ty::Const<'tcx>, tcx: TyCtxt<'tcx>) -> Self {
+ match c.kind() {
+ ty::ConstKind::Value(valtree) => {
+ let const_val = tcx.valtree_to_const_val((c.ty(), valtree));
+ Self::Val(const_val, c.ty())
+ }
+ _ => Self::Ty(c),
+ }
+ }
+}
+
+/// A collection of projections into user types.
+///
+/// They are projections because a binding can occur a part of a
+/// parent pattern that has been ascribed a type.
+///
+/// Its a collection because there can be multiple type ascriptions on
+/// the path from the root of the pattern down to the binding itself.
+///
+/// An example:
+///
+/// ```ignore (illustrative)
+/// struct S<'a>((i32, &'a str), String);
+/// let S((_, w): (i32, &'static str), _): S = ...;
+/// // ------ ^^^^^^^^^^^^^^^^^^^ (1)
+/// // --------------------------------- ^ (2)
+/// ```
+///
+/// The highlights labelled `(1)` show the subpattern `(_, w)` being
+/// ascribed the type `(i32, &'static str)`.
+///
+/// The highlights labelled `(2)` show the whole pattern being
+/// ascribed the type `S`.
+///
+/// In this example, when we descend to `w`, we will have built up the
+/// following two projected types:
+///
+/// * base: `S`, projection: `(base.0).1`
+/// * base: `(i32, &'static str)`, projection: `base.1`
+///
+/// The first will lead to the constraint `w: &'1 str` (for some
+/// inferred region `'1`). The second will lead to the constraint `w:
+/// &'static str`.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct UserTypeProjections {
+ pub contents: Vec<(UserTypeProjection, Span)>,
+}
+
+impl<'tcx> UserTypeProjections {
+ pub fn none() -> Self {
+ UserTypeProjections { contents: vec![] }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.contents.is_empty()
+ }
+
+ pub fn projections_and_spans(
+ &self,
+ ) -> impl Iterator<Item = &(UserTypeProjection, Span)> + ExactSizeIterator {
+ self.contents.iter()
+ }
+
+ pub fn projections(&self) -> impl Iterator<Item = &UserTypeProjection> + ExactSizeIterator {
+ self.contents.iter().map(|&(ref user_type, _span)| user_type)
+ }
+
+ pub fn push_projection(mut self, user_ty: &UserTypeProjection, span: Span) -> Self {
+ self.contents.push((user_ty.clone(), span));
+ self
+ }
+
+ fn map_projections(
+ mut self,
+ mut f: impl FnMut(UserTypeProjection) -> UserTypeProjection,
+ ) -> Self {
+ self.contents = self.contents.into_iter().map(|(proj, span)| (f(proj), span)).collect();
+ self
+ }
+
+ pub fn index(self) -> Self {
+ self.map_projections(|pat_ty_proj| pat_ty_proj.index())
+ }
+
+ pub fn subslice(self, from: u64, to: u64) -> Self {
+ self.map_projections(|pat_ty_proj| pat_ty_proj.subslice(from, to))
+ }
+
+ pub fn deref(self) -> Self {
+ self.map_projections(|pat_ty_proj| pat_ty_proj.deref())
+ }
+
+ pub fn leaf(self, field: Field) -> Self {
+ self.map_projections(|pat_ty_proj| pat_ty_proj.leaf(field))
+ }
+
+ pub fn variant(self, adt_def: AdtDef<'tcx>, variant_index: VariantIdx, field: Field) -> Self {
+ self.map_projections(|pat_ty_proj| pat_ty_proj.variant(adt_def, variant_index, field))
+ }
+}
+
+/// Encodes the effect of a user-supplied type annotation on the
+/// subcomponents of a pattern. The effect is determined by applying the
+/// given list of projections to some underlying base type. Often,
+/// the projection element list `projs` is empty, in which case this
+/// directly encodes a type in `base`. But in the case of complex patterns with
+/// subpatterns and bindings, we want to apply only a *part* of the type to a variable,
+/// in which case the `projs` vector is used.
+///
+/// Examples:
+///
+/// * `let x: T = ...` -- here, the `projs` vector is empty.
+///
+/// * `let (x, _): T = ...` -- here, the `projs` vector would contain
+/// `field[0]` (aka `.0`), indicating that the type of `s` is
+/// determined by finding the type of the `.0` field from `T`.
+#[derive(Clone, Debug, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
+pub struct UserTypeProjection {
+ pub base: UserTypeAnnotationIndex,
+ pub projs: Vec<ProjectionKind>,
+}
+
+impl Copy for ProjectionKind {}
+
+impl UserTypeProjection {
+ pub(crate) fn index(mut self) -> Self {
+ self.projs.push(ProjectionElem::Index(()));
+ self
+ }
+
+ pub(crate) fn subslice(mut self, from: u64, to: u64) -> Self {
+ self.projs.push(ProjectionElem::Subslice { from, to, from_end: true });
+ self
+ }
+
+ pub(crate) fn deref(mut self) -> Self {
+ self.projs.push(ProjectionElem::Deref);
+ self
+ }
+
+ pub(crate) fn leaf(mut self, field: Field) -> Self {
+ self.projs.push(ProjectionElem::Field(field, ()));
+ self
+ }
+
+ pub(crate) fn variant(
+ mut self,
+ adt_def: AdtDef<'_>,
+ variant_index: VariantIdx,
+ field: Field,
+ ) -> Self {
+ self.projs.push(ProjectionElem::Downcast(
+ Some(adt_def.variant(variant_index).name),
+ variant_index,
+ ));
+ self.projs.push(ProjectionElem::Field(field, ()));
+ self
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! { ProjectionKind, }
+
+impl<'tcx> TypeFoldable<'tcx> for UserTypeProjection {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(UserTypeProjection {
+ base: self.base.try_fold_with(folder)?,
+ projs: self.projs.try_fold_with(folder)?,
+ })
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for UserTypeProjection {
+ fn visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> ControlFlow<Vs::BreakTy> {
+ self.base.visit_with(visitor)
+ // Note: there's nothing in `self.proj` to visit.
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct Promoted {
+ derive [HashStable]
+ DEBUG_FORMAT = "promoted[{}]"
+ }
+}
+
+impl<'tcx> Debug for Constant<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ write!(fmt, "{}", self)
+ }
+}
+
+impl<'tcx> Display for Constant<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ match self.ty().kind() {
+ ty::FnDef(..) => {}
+ _ => write!(fmt, "const ")?,
+ }
+ Display::fmt(&self.literal, fmt)
+ }
+}
+
+impl<'tcx> Display for ConstantKind<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ match *self {
+ ConstantKind::Ty(c) => pretty_print_const(c, fmt, true),
+ ConstantKind::Val(val, ty) => pretty_print_const_value(val, ty, fmt, true),
+ }
+ }
+}
+
+fn pretty_print_const<'tcx>(
+ c: ty::Const<'tcx>,
+ fmt: &mut Formatter<'_>,
+ print_types: bool,
+) -> fmt::Result {
+ use crate::ty::print::PrettyPrinter;
+ ty::tls::with(|tcx| {
+ let literal = tcx.lift(c).unwrap();
+ let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ let cx = cx.pretty_print_const(literal, print_types)?;
+ fmt.write_str(&cx.into_buffer())?;
+ Ok(())
+ })
+}
+
+fn pretty_print_byte_str(fmt: &mut Formatter<'_>, byte_str: &[u8]) -> fmt::Result {
+ fmt.write_str("b\"")?;
+ for &c in byte_str {
+ for e in std::ascii::escape_default(c) {
+ fmt.write_char(e as char)?;
+ }
+ }
+ fmt.write_str("\"")?;
+
+ Ok(())
+}
+
+fn comma_sep<'tcx>(fmt: &mut Formatter<'_>, elems: Vec<ConstantKind<'tcx>>) -> fmt::Result {
+ let mut first = true;
+ for elem in elems {
+ if !first {
+ fmt.write_str(", ")?;
+ }
+ fmt.write_str(&format!("{}", elem))?;
+ first = false;
+ }
+ Ok(())
+}
+
+// FIXME: Move that into `mir/pretty.rs`.
+fn pretty_print_const_value<'tcx>(
+ ct: ConstValue<'tcx>,
+ ty: Ty<'tcx>,
+ fmt: &mut Formatter<'_>,
+ print_ty: bool,
+) -> fmt::Result {
+ use crate::ty::print::PrettyPrinter;
+
+ ty::tls::with(|tcx| {
+ let ct = tcx.lift(ct).unwrap();
+ let ty = tcx.lift(ty).unwrap();
+
+ if tcx.sess.verbose() {
+ fmt.write_str(&format!("ConstValue({:?}: {})", ct, ty))?;
+ return Ok(());
+ }
+
+ let u8_type = tcx.types.u8;
+ match (ct, ty.kind()) {
+ // Byte/string slices, printed as (byte) string literals.
+ (ConstValue::Slice { data, start, end }, ty::Ref(_, inner, _)) => {
+ match inner.kind() {
+ ty::Slice(t) => {
+ if *t == u8_type {
+ // The `inspect` here is okay since we checked the bounds, and there are
+ // no relocations (we have an active slice reference here). We don't use
+ // this result to affect interpreter execution.
+ let byte_str = data
+ .inner()
+ .inspect_with_uninit_and_ptr_outside_interpreter(start..end);
+ pretty_print_byte_str(fmt, byte_str)?;
+ return Ok(());
+ }
+ }
+ ty::Str => {
+ // The `inspect` here is okay since we checked the bounds, and there are no
+ // relocations (we have an active `str` reference here). We don't use this
+ // result to affect interpreter execution.
+ let slice = data
+ .inner()
+ .inspect_with_uninit_and_ptr_outside_interpreter(start..end);
+ fmt.write_str(&format!("{:?}", String::from_utf8_lossy(slice)))?;
+ return Ok(());
+ }
+ _ => {}
+ }
+ }
+ (ConstValue::ByRef { alloc, offset }, ty::Array(t, n)) if *t == u8_type => {
+ let n = n.kind().try_to_bits(tcx.data_layout.pointer_size).unwrap();
+ // cast is ok because we already checked for pointer size (32 or 64 bit) above
+ let range = AllocRange { start: offset, size: Size::from_bytes(n) };
+ let byte_str = alloc.inner().get_bytes(&tcx, range).unwrap();
+ fmt.write_str("*")?;
+ pretty_print_byte_str(fmt, byte_str)?;
+ return Ok(());
+ }
+ // Aggregates, printed as array/tuple/struct/variant construction syntax.
+ //
+ // NB: the `has_param_types_or_consts` check ensures that we can use
+ // the `destructure_const` query with an empty `ty::ParamEnv` without
+ // introducing ICEs (e.g. via `layout_of`) from missing bounds.
+ // E.g. `transmute([0usize; 2]): (u8, *mut T)` needs to know `T: Sized`
+ // to be able to destructure the tuple into `(0u8, *mut T)
+ //
+ // FIXME(eddyb) for `--emit=mir`/`-Z dump-mir`, we should provide the
+ // correct `ty::ParamEnv` to allow printing *all* constant values.
+ (_, ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) if !ty.has_param_types_or_consts() => {
+ let ct = tcx.lift(ct).unwrap();
+ let ty = tcx.lift(ty).unwrap();
+ if let Some(contents) = tcx.try_destructure_mir_constant(
+ ty::ParamEnv::reveal_all().and(ConstantKind::Val(ct, ty)),
+ ) {
+ let fields = contents.fields.iter().copied().collect::<Vec<_>>();
+ match *ty.kind() {
+ ty::Array(..) => {
+ fmt.write_str("[")?;
+ comma_sep(fmt, fields)?;
+ fmt.write_str("]")?;
+ }
+ ty::Tuple(..) => {
+ fmt.write_str("(")?;
+ comma_sep(fmt, fields)?;
+ if contents.fields.len() == 1 {
+ fmt.write_str(",")?;
+ }
+ fmt.write_str(")")?;
+ }
+ ty::Adt(def, _) if def.variants().is_empty() => {
+ fmt.write_str(&format!("{{unreachable(): {}}}", ty))?;
+ }
+ ty::Adt(def, substs) => {
+ let variant_idx = contents
+ .variant
+ .expect("destructed mir constant of adt without variant idx");
+ let variant_def = &def.variant(variant_idx);
+ let substs = tcx.lift(substs).unwrap();
+ let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ let cx = cx.print_value_path(variant_def.def_id, substs)?;
+ fmt.write_str(&cx.into_buffer())?;
+
+ match variant_def.ctor_kind {
+ CtorKind::Const => {}
+ CtorKind::Fn => {
+ fmt.write_str("(")?;
+ comma_sep(fmt, fields)?;
+ fmt.write_str(")")?;
+ }
+ CtorKind::Fictive => {
+ fmt.write_str(" {{ ")?;
+ let mut first = true;
+ for (field_def, field) in iter::zip(&variant_def.fields, fields)
+ {
+ if !first {
+ fmt.write_str(", ")?;
+ }
+ fmt.write_str(&format!("{}: {}", field_def.name, field))?;
+ first = false;
+ }
+ fmt.write_str(" }}")?;
+ }
+ }
+ }
+ _ => unreachable!(),
+ }
+ return Ok(());
+ } else {
+ // Fall back to debug pretty printing for invalid constants.
+ fmt.write_str(&format!("{:?}", ct))?;
+ if print_ty {
+ fmt.write_str(&format!(": {}", ty))?;
+ }
+ return Ok(());
+ };
+ }
+ (ConstValue::Scalar(scalar), _) => {
+ let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ let ty = tcx.lift(ty).unwrap();
+ cx = cx.pretty_print_const_scalar(scalar, ty, print_ty)?;
+ fmt.write_str(&cx.into_buffer())?;
+ return Ok(());
+ }
+ (ConstValue::ZeroSized, ty::FnDef(d, s)) => {
+ let mut cx = FmtPrinter::new(tcx, Namespace::ValueNS);
+ cx.print_alloc_ids = true;
+ let cx = cx.print_value_path(*d, s)?;
+ fmt.write_str(&cx.into_buffer())?;
+ return Ok(());
+ }
+ // FIXME(oli-obk): also pretty print arrays and other aggregate constants by reading
+ // their fields instead of just dumping the memory.
+ _ => {}
+ }
+ // fallback
+ fmt.write_str(&format!("{:?}", ct))?;
+ if print_ty {
+ fmt.write_str(&format!(": {}", ty))?;
+ }
+ Ok(())
+ })
+}
+
+/// `Location` represents the position of the start of the statement; or, if
+/// `statement_index` equals the number of statements, then the start of the
+/// terminator.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, HashStable)]
+pub struct Location {
+ /// The block that the location is within.
+ pub block: BasicBlock,
+
+ pub statement_index: usize,
+}
+
+impl fmt::Debug for Location {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "{:?}[{}]", self.block, self.statement_index)
+ }
+}
+
+impl Location {
+ pub const START: Location = Location { block: START_BLOCK, statement_index: 0 };
+
+ /// Returns the location immediately after this one within the enclosing block.
+ ///
+ /// Note that if this location represents a terminator, then the
+ /// resulting location would be out of bounds and invalid.
+ pub fn successor_within_block(&self) -> Location {
+ Location { block: self.block, statement_index: self.statement_index + 1 }
+ }
+
+ /// Returns `true` if `other` is earlier in the control flow graph than `self`.
+ pub fn is_predecessor_of<'tcx>(&self, other: Location, body: &Body<'tcx>) -> bool {
+ // If we are in the same block as the other location and are an earlier statement
+ // then we are a predecessor of `other`.
+ if self.block == other.block && self.statement_index < other.statement_index {
+ return true;
+ }
+
+ let predecessors = body.basic_blocks.predecessors();
+
+ // If we're in another block, then we want to check that block is a predecessor of `other`.
+ let mut queue: Vec<BasicBlock> = predecessors[other.block].to_vec();
+ let mut visited = FxHashSet::default();
+
+ while let Some(block) = queue.pop() {
+ // If we haven't visited this block before, then make sure we visit its predecessors.
+ if visited.insert(block) {
+ queue.extend(predecessors[block].iter().cloned());
+ } else {
+ continue;
+ }
+
+ // If we found the block that `self` is in, then we are a predecessor of `other` (since
+ // we found that block by looking at the predecessors of `other`).
+ if self.block == block {
+ return true;
+ }
+ }
+
+ false
+ }
+
+ pub fn dominates(&self, other: Location, dominators: &Dominators<BasicBlock>) -> bool {
+ if self.block == other.block {
+ self.statement_index <= other.statement_index
+ } else {
+ dominators.is_dominated_by(other.block, self.block)
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs
new file mode 100644
index 000000000..21ae121e1
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/mono.rs
@@ -0,0 +1,527 @@
+use crate::dep_graph::{DepNode, WorkProduct, WorkProductId};
+use crate::ty::{subst::InternalSubsts, Instance, InstanceDef, SymbolName, TyCtxt};
+use rustc_attr::InlineAttr;
+use rustc_data_structures::base_n;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir::def_id::{CrateNum, DefId, LOCAL_CRATE};
+use rustc_hir::ItemId;
+use rustc_index::vec::Idx;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_session::config::OptLevel;
+use rustc_span::source_map::Span;
+use rustc_span::symbol::Symbol;
+use std::fmt;
+use std::hash::Hash;
+
+/// Describes how a monomorphization will be instantiated in object files.
+#[derive(PartialEq)]
+pub enum InstantiationMode {
+ /// There will be exactly one instance of the given MonoItem. It will have
+ /// external linkage so that it can be linked to from other codegen units.
+ GloballyShared {
+ /// In some compilation scenarios we may decide to take functions that
+ /// are typically `LocalCopy` and instead move them to `GloballyShared`
+ /// to avoid codegenning them a bunch of times. In this situation,
+ /// however, our local copy may conflict with other crates also
+ /// inlining the same function.
+ ///
+ /// This flag indicates that this situation is occurring, and informs
+ /// symbol name calculation that some extra mangling is needed to
+ /// avoid conflicts. Note that this may eventually go away entirely if
+ /// ThinLTO enables us to *always* have a globally shared instance of a
+ /// function within one crate's compilation.
+ may_conflict: bool,
+ },
+
+ /// Each codegen unit containing a reference to the given MonoItem will
+ /// have its own private copy of the function (with internal linkage).
+ LocalCopy,
+}
+
+#[derive(PartialEq, Eq, Clone, Copy, Debug, Hash, HashStable)]
+pub enum MonoItem<'tcx> {
+ Fn(Instance<'tcx>),
+ Static(DefId),
+ GlobalAsm(ItemId),
+}
+
+impl<'tcx> MonoItem<'tcx> {
+ /// Returns `true` if the mono item is user-defined (i.e. not compiler-generated, like shims).
+ pub fn is_user_defined(&self) -> bool {
+ match *self {
+ MonoItem::Fn(instance) => matches!(instance.def, InstanceDef::Item(..)),
+ MonoItem::Static(..) | MonoItem::GlobalAsm(..) => true,
+ }
+ }
+
+ pub fn size_estimate(&self, tcx: TyCtxt<'tcx>) -> usize {
+ match *self {
+ MonoItem::Fn(instance) => {
+ // Estimate the size of a function based on how many statements
+ // it contains.
+ tcx.instance_def_size_estimate(instance.def)
+ }
+ // Conservatively estimate the size of a static declaration
+ // or assembly to be 1.
+ MonoItem::Static(_) | MonoItem::GlobalAsm(_) => 1,
+ }
+ }
+
+ pub fn is_generic_fn(&self) -> bool {
+ match *self {
+ MonoItem::Fn(ref instance) => instance.substs.non_erasable_generics().next().is_some(),
+ MonoItem::Static(..) | MonoItem::GlobalAsm(..) => false,
+ }
+ }
+
+ pub fn symbol_name(&self, tcx: TyCtxt<'tcx>) -> SymbolName<'tcx> {
+ match *self {
+ MonoItem::Fn(instance) => tcx.symbol_name(instance),
+ MonoItem::Static(def_id) => tcx.symbol_name(Instance::mono(tcx, def_id)),
+ MonoItem::GlobalAsm(item_id) => {
+ SymbolName::new(tcx, &format!("global_asm_{:?}", item_id.def_id))
+ }
+ }
+ }
+
+ pub fn instantiation_mode(&self, tcx: TyCtxt<'tcx>) -> InstantiationMode {
+ let generate_cgu_internal_copies = tcx
+ .sess
+ .opts
+ .unstable_opts
+ .inline_in_all_cgus
+ .unwrap_or_else(|| tcx.sess.opts.optimize != OptLevel::No)
+ && !tcx.sess.link_dead_code();
+
+ match *self {
+ MonoItem::Fn(ref instance) => {
+ let entry_def_id = tcx.entry_fn(()).map(|(id, _)| id);
+ // If this function isn't inlined or otherwise has an extern
+ // indicator, then we'll be creating a globally shared version.
+ if tcx.codegen_fn_attrs(instance.def_id()).contains_extern_indicator()
+ || !instance.def.generates_cgu_internal_copy(tcx)
+ || Some(instance.def_id()) == entry_def_id
+ {
+ return InstantiationMode::GloballyShared { may_conflict: false };
+ }
+
+ // At this point we don't have explicit linkage and we're an
+ // inlined function. If we're inlining into all CGUs then we'll
+ // be creating a local copy per CGU.
+ if generate_cgu_internal_copies {
+ return InstantiationMode::LocalCopy;
+ }
+
+ // Finally, if this is `#[inline(always)]` we're sure to respect
+ // that with an inline copy per CGU, but otherwise we'll be
+ // creating one copy of this `#[inline]` function which may
+ // conflict with upstream crates as it could be an exported
+ // symbol.
+ match tcx.codegen_fn_attrs(instance.def_id()).inline {
+ InlineAttr::Always => InstantiationMode::LocalCopy,
+ _ => InstantiationMode::GloballyShared { may_conflict: true },
+ }
+ }
+ MonoItem::Static(..) | MonoItem::GlobalAsm(..) => {
+ InstantiationMode::GloballyShared { may_conflict: false }
+ }
+ }
+ }
+
+ pub fn explicit_linkage(&self, tcx: TyCtxt<'tcx>) -> Option<Linkage> {
+ let def_id = match *self {
+ MonoItem::Fn(ref instance) => instance.def_id(),
+ MonoItem::Static(def_id) => def_id,
+ MonoItem::GlobalAsm(..) => return None,
+ };
+
+ let codegen_fn_attrs = tcx.codegen_fn_attrs(def_id);
+ codegen_fn_attrs.linkage
+ }
+
+ /// Returns `true` if this instance is instantiable - whether it has no unsatisfied
+ /// predicates.
+ ///
+ /// In order to codegen an item, all of its predicates must hold, because
+ /// otherwise the item does not make sense. Type-checking ensures that
+ /// the predicates of every item that is *used by* a valid item *do*
+ /// hold, so we can rely on that.
+ ///
+ /// However, we codegen collector roots (reachable items) and functions
+ /// in vtables when they are seen, even if they are not used, and so they
+ /// might not be instantiable. For example, a programmer can define this
+ /// public function:
+ ///
+ /// pub fn foo<'a>(s: &'a mut ()) where &'a mut (): Clone {
+ /// <&mut () as Clone>::clone(&s);
+ /// }
+ ///
+ /// That function can't be codegened, because the method `<&mut () as Clone>::clone`
+ /// does not exist. Luckily for us, that function can't ever be used,
+ /// because that would require for `&'a mut (): Clone` to hold, so we
+ /// can just not emit any code, or even a linker reference for it.
+ ///
+ /// Similarly, if a vtable method has such a signature, and therefore can't
+ /// be used, we can just not emit it and have a placeholder (a null pointer,
+ /// which will never be accessed) in its place.
+ pub fn is_instantiable(&self, tcx: TyCtxt<'tcx>) -> bool {
+ debug!("is_instantiable({:?})", self);
+ let (def_id, substs) = match *self {
+ MonoItem::Fn(ref instance) => (instance.def_id(), instance.substs),
+ MonoItem::Static(def_id) => (def_id, InternalSubsts::empty()),
+ // global asm never has predicates
+ MonoItem::GlobalAsm(..) => return true,
+ };
+
+ !tcx.subst_and_check_impossible_predicates((def_id, &substs))
+ }
+
+ pub fn local_span(&self, tcx: TyCtxt<'tcx>) -> Option<Span> {
+ match *self {
+ MonoItem::Fn(Instance { def, .. }) => def.def_id().as_local(),
+ MonoItem::Static(def_id) => def_id.as_local(),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.def_id),
+ }
+ .map(|def_id| tcx.def_span(def_id))
+ }
+
+ // Only used by rustc_codegen_cranelift
+ pub fn codegen_dep_node(&self, tcx: TyCtxt<'tcx>) -> DepNode {
+ crate::dep_graph::make_compile_mono_item(tcx, self)
+ }
+
+ /// Returns the item's `CrateNum`
+ pub fn krate(&self) -> CrateNum {
+ match self {
+ MonoItem::Fn(ref instance) => instance.def_id().krate,
+ MonoItem::Static(def_id) => def_id.krate,
+ MonoItem::GlobalAsm(..) => LOCAL_CRATE,
+ }
+ }
+}
+
+impl<'tcx> fmt::Display for MonoItem<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ MonoItem::Fn(instance) => write!(f, "fn {}", instance),
+ MonoItem::Static(def_id) => {
+ write!(f, "static {}", Instance::new(def_id, InternalSubsts::empty()))
+ }
+ MonoItem::GlobalAsm(..) => write!(f, "global_asm"),
+ }
+ }
+}
+
+#[derive(Debug)]
+pub struct CodegenUnit<'tcx> {
+ /// A name for this CGU. Incremental compilation requires that
+ /// name be unique amongst **all** crates. Therefore, it should
+ /// contain something unique to this crate (e.g., a module path)
+ /// as well as the crate name and disambiguator.
+ name: Symbol,
+ items: FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)>,
+ size_estimate: Option<usize>,
+ primary: bool,
+ /// True if this is CGU is used to hold code coverage information for dead code,
+ /// false otherwise.
+ is_code_coverage_dead_code_cgu: bool,
+}
+
+/// Specifies the linkage type for a `MonoItem`.
+///
+/// See <https://llvm.org/docs/LangRef.html#linkage-types> for more details about these variants.
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum Linkage {
+ External,
+ AvailableExternally,
+ LinkOnceAny,
+ LinkOnceODR,
+ WeakAny,
+ WeakODR,
+ Appending,
+ Internal,
+ Private,
+ ExternalWeak,
+ Common,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, HashStable)]
+pub enum Visibility {
+ Default,
+ Hidden,
+ Protected,
+}
+
+impl<'tcx> CodegenUnit<'tcx> {
+ #[inline]
+ pub fn new(name: Symbol) -> CodegenUnit<'tcx> {
+ CodegenUnit {
+ name,
+ items: Default::default(),
+ size_estimate: None,
+ primary: false,
+ is_code_coverage_dead_code_cgu: false,
+ }
+ }
+
+ pub fn name(&self) -> Symbol {
+ self.name
+ }
+
+ pub fn set_name(&mut self, name: Symbol) {
+ self.name = name;
+ }
+
+ pub fn is_primary(&self) -> bool {
+ self.primary
+ }
+
+ pub fn make_primary(&mut self) {
+ self.primary = true;
+ }
+
+ pub fn items(&self) -> &FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)> {
+ &self.items
+ }
+
+ pub fn items_mut(&mut self) -> &mut FxHashMap<MonoItem<'tcx>, (Linkage, Visibility)> {
+ &mut self.items
+ }
+
+ pub fn is_code_coverage_dead_code_cgu(&self) -> bool {
+ self.is_code_coverage_dead_code_cgu
+ }
+
+ /// Marks this CGU as the one used to contain code coverage information for dead code.
+ pub fn make_code_coverage_dead_code_cgu(&mut self) {
+ self.is_code_coverage_dead_code_cgu = true;
+ }
+
+ pub fn mangle_name(human_readable_name: &str) -> String {
+ // We generate a 80 bit hash from the name. This should be enough to
+ // avoid collisions and is still reasonably short for filenames.
+ let mut hasher = StableHasher::new();
+ human_readable_name.hash(&mut hasher);
+ let hash: u128 = hasher.finish();
+ let hash = hash & ((1u128 << 80) - 1);
+ base_n::encode(hash, base_n::CASE_INSENSITIVE)
+ }
+
+ pub fn estimate_size(&mut self, tcx: TyCtxt<'tcx>) {
+ // Estimate the size of a codegen unit as (approximately) the number of MIR
+ // statements it corresponds to.
+ self.size_estimate = Some(self.items.keys().map(|mi| mi.size_estimate(tcx)).sum());
+ }
+
+ #[inline]
+ pub fn size_estimate(&self) -> usize {
+ // Should only be called if `estimate_size` has previously been called.
+ self.size_estimate.expect("estimate_size must be called before getting a size_estimate")
+ }
+
+ pub fn modify_size_estimate(&mut self, delta: usize) {
+ assert!(self.size_estimate.is_some());
+ if let Some(size_estimate) = self.size_estimate {
+ self.size_estimate = Some(size_estimate + delta);
+ }
+ }
+
+ pub fn contains_item(&self, item: &MonoItem<'tcx>) -> bool {
+ self.items().contains_key(item)
+ }
+
+ pub fn work_product_id(&self) -> WorkProductId {
+ WorkProductId::from_cgu_name(self.name().as_str())
+ }
+
+ pub fn previous_work_product(&self, tcx: TyCtxt<'_>) -> WorkProduct {
+ let work_product_id = self.work_product_id();
+ tcx.dep_graph
+ .previous_work_product(&work_product_id)
+ .unwrap_or_else(|| panic!("Could not find work-product for CGU `{}`", self.name()))
+ }
+
+ pub fn items_in_deterministic_order(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ ) -> Vec<(MonoItem<'tcx>, (Linkage, Visibility))> {
+ // The codegen tests rely on items being process in the same order as
+ // they appear in the file, so for local items, we sort by node_id first
+ #[derive(PartialEq, Eq, PartialOrd, Ord)]
+ pub struct ItemSortKey<'tcx>(Option<usize>, SymbolName<'tcx>);
+
+ fn item_sort_key<'tcx>(tcx: TyCtxt<'tcx>, item: MonoItem<'tcx>) -> ItemSortKey<'tcx> {
+ ItemSortKey(
+ match item {
+ MonoItem::Fn(ref instance) => {
+ match instance.def {
+ // We only want to take HirIds of user-defined
+ // instances into account. The others don't matter for
+ // the codegen tests and can even make item order
+ // unstable.
+ InstanceDef::Item(def) => def.did.as_local().map(Idx::index),
+ InstanceDef::VTableShim(..)
+ | InstanceDef::ReifyShim(..)
+ | InstanceDef::Intrinsic(..)
+ | InstanceDef::FnPtrShim(..)
+ | InstanceDef::Virtual(..)
+ | InstanceDef::ClosureOnceShim { .. }
+ | InstanceDef::DropGlue(..)
+ | InstanceDef::CloneShim(..) => None,
+ }
+ }
+ MonoItem::Static(def_id) => def_id.as_local().map(Idx::index),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.def_id.index()),
+ },
+ item.symbol_name(tcx),
+ )
+ }
+
+ let mut items: Vec<_> = self.items().iter().map(|(&i, &l)| (i, l)).collect();
+ items.sort_by_cached_key(|&(i, _)| item_sort_key(tcx, i));
+ items
+ }
+
+ pub fn codegen_dep_node(&self, tcx: TyCtxt<'tcx>) -> DepNode {
+ crate::dep_graph::make_compile_codegen_unit(tcx, self.name())
+ }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for CodegenUnit<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let CodegenUnit {
+ ref items,
+ name,
+ // The size estimate is not relevant to the hash
+ size_estimate: _,
+ primary: _,
+ is_code_coverage_dead_code_cgu,
+ } = *self;
+
+ name.hash_stable(hcx, hasher);
+ is_code_coverage_dead_code_cgu.hash_stable(hcx, hasher);
+
+ let mut items: Vec<(Fingerprint, _)> = items
+ .iter()
+ .map(|(mono_item, &attrs)| {
+ let mut hasher = StableHasher::new();
+ mono_item.hash_stable(hcx, &mut hasher);
+ let mono_item_fingerprint = hasher.finish();
+ (mono_item_fingerprint, attrs)
+ })
+ .collect();
+
+ items.sort_unstable_by_key(|i| i.0);
+ items.hash_stable(hcx, hasher);
+ }
+}
+
+pub struct CodegenUnitNameBuilder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ cache: FxHashMap<CrateNum, String>,
+}
+
+impl<'tcx> CodegenUnitNameBuilder<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>) -> Self {
+ CodegenUnitNameBuilder { tcx, cache: Default::default() }
+ }
+
+ /// CGU names should fulfill the following requirements:
+ /// - They should be able to act as a file name on any kind of file system
+ /// - They should not collide with other CGU names, even for different versions
+ /// of the same crate.
+ ///
+ /// Consequently, we don't use special characters except for '.' and '-' and we
+ /// prefix each name with the crate-name and crate-disambiguator.
+ ///
+ /// This function will build CGU names of the form:
+ ///
+ /// ```text
+ /// <crate-name>.<crate-disambiguator>[-in-<local-crate-id>](-<component>)*[.<special-suffix>]
+ /// <local-crate-id> = <local-crate-name>.<local-crate-disambiguator>
+ /// ```
+ ///
+ /// The '.' before `<special-suffix>` makes sure that names with a special
+ /// suffix can never collide with a name built out of regular Rust
+ /// identifiers (e.g., module paths).
+ pub fn build_cgu_name<I, C, S>(
+ &mut self,
+ cnum: CrateNum,
+ components: I,
+ special_suffix: Option<S>,
+ ) -> Symbol
+ where
+ I: IntoIterator<Item = C>,
+ C: fmt::Display,
+ S: fmt::Display,
+ {
+ let cgu_name = self.build_cgu_name_no_mangle(cnum, components, special_suffix);
+
+ if self.tcx.sess.opts.unstable_opts.human_readable_cgu_names {
+ cgu_name
+ } else {
+ Symbol::intern(&CodegenUnit::mangle_name(cgu_name.as_str()))
+ }
+ }
+
+ /// Same as `CodegenUnit::build_cgu_name()` but will never mangle the
+ /// resulting name.
+ pub fn build_cgu_name_no_mangle<I, C, S>(
+ &mut self,
+ cnum: CrateNum,
+ components: I,
+ special_suffix: Option<S>,
+ ) -> Symbol
+ where
+ I: IntoIterator<Item = C>,
+ C: fmt::Display,
+ S: fmt::Display,
+ {
+ use std::fmt::Write;
+
+ let mut cgu_name = String::with_capacity(64);
+
+ // Start out with the crate name and disambiguator
+ let tcx = self.tcx;
+ let crate_prefix = self.cache.entry(cnum).or_insert_with(|| {
+ // Whenever the cnum is not LOCAL_CRATE we also mix in the
+ // local crate's ID. Otherwise there can be collisions between CGUs
+ // instantiating stuff for upstream crates.
+ let local_crate_id = if cnum != LOCAL_CRATE {
+ let local_stable_crate_id = tcx.sess.local_stable_crate_id();
+ format!(
+ "-in-{}.{:08x}",
+ tcx.crate_name(LOCAL_CRATE),
+ local_stable_crate_id.to_u64() as u32,
+ )
+ } else {
+ String::new()
+ };
+
+ let stable_crate_id = tcx.sess.local_stable_crate_id();
+ format!(
+ "{}.{:08x}{}",
+ tcx.crate_name(cnum),
+ stable_crate_id.to_u64() as u32,
+ local_crate_id,
+ )
+ });
+
+ write!(cgu_name, "{}", crate_prefix).unwrap();
+
+ // Add the components
+ for component in components {
+ write!(cgu_name, "-{}", component).unwrap();
+ }
+
+ if let Some(special_suffix) = special_suffix {
+ // We add a dot in here so it cannot clash with anything in a regular
+ // Rust identifier
+ write!(cgu_name, ".{}", special_suffix).unwrap();
+ }
+
+ Symbol::intern(&cgu_name)
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/patch.rs b/compiler/rustc_middle/src/mir/patch.rs
new file mode 100644
index 000000000..15496842d
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/patch.rs
@@ -0,0 +1,196 @@
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_middle::mir::*;
+use rustc_middle::ty::Ty;
+use rustc_span::Span;
+
+/// This struct represents a patch to MIR, which can add
+/// new statements and basic blocks and patch over block
+/// terminators.
+pub struct MirPatch<'tcx> {
+ patch_map: IndexVec<BasicBlock, Option<TerminatorKind<'tcx>>>,
+ new_blocks: Vec<BasicBlockData<'tcx>>,
+ new_statements: Vec<(Location, StatementKind<'tcx>)>,
+ new_locals: Vec<LocalDecl<'tcx>>,
+ resume_block: Option<BasicBlock>,
+ body_span: Span,
+ next_local: usize,
+}
+
+impl<'tcx> MirPatch<'tcx> {
+ pub fn new(body: &Body<'tcx>) -> Self {
+ let mut result = MirPatch {
+ patch_map: IndexVec::from_elem(None, body.basic_blocks()),
+ new_blocks: vec![],
+ new_statements: vec![],
+ new_locals: vec![],
+ next_local: body.local_decls.len(),
+ resume_block: None,
+ body_span: body.span,
+ };
+
+ // Check if we already have a resume block
+ for (bb, block) in body.basic_blocks().iter_enumerated() {
+ if let TerminatorKind::Resume = block.terminator().kind && block.statements.is_empty() {
+ result.resume_block = Some(bb);
+ break;
+ }
+ }
+
+ result
+ }
+
+ pub fn resume_block(&mut self) -> BasicBlock {
+ if let Some(bb) = self.resume_block {
+ return bb;
+ }
+
+ let bb = self.new_block(BasicBlockData {
+ statements: vec![],
+ terminator: Some(Terminator {
+ source_info: SourceInfo::outermost(self.body_span),
+ kind: TerminatorKind::Resume,
+ }),
+ is_cleanup: true,
+ });
+ self.resume_block = Some(bb);
+ bb
+ }
+
+ pub fn is_patched(&self, bb: BasicBlock) -> bool {
+ self.patch_map[bb].is_some()
+ }
+
+ pub fn terminator_loc(&self, body: &Body<'tcx>, bb: BasicBlock) -> Location {
+ let offset = match bb.index().checked_sub(body.basic_blocks().len()) {
+ Some(index) => self.new_blocks[index].statements.len(),
+ None => body[bb].statements.len(),
+ };
+ Location { block: bb, statement_index: offset }
+ }
+
+ pub fn new_local_with_info(
+ &mut self,
+ ty: Ty<'tcx>,
+ span: Span,
+ local_info: Option<Box<LocalInfo<'tcx>>>,
+ ) -> Local {
+ let index = self.next_local;
+ self.next_local += 1;
+ let mut new_decl = LocalDecl::new(ty, span);
+ new_decl.local_info = local_info;
+ self.new_locals.push(new_decl);
+ Local::new(index as usize)
+ }
+
+ pub fn new_temp(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
+ self.new_local_with_info(ty, span, None)
+ }
+
+ pub fn new_internal(&mut self, ty: Ty<'tcx>, span: Span) -> Local {
+ let index = self.next_local;
+ self.next_local += 1;
+ self.new_locals.push(LocalDecl::new(ty, span).internal());
+ Local::new(index as usize)
+ }
+
+ pub fn new_block(&mut self, data: BasicBlockData<'tcx>) -> BasicBlock {
+ let block = BasicBlock::new(self.patch_map.len());
+ debug!("MirPatch: new_block: {:?}: {:?}", block, data);
+ self.new_blocks.push(data);
+ self.patch_map.push(None);
+ block
+ }
+
+ pub fn patch_terminator(&mut self, block: BasicBlock, new: TerminatorKind<'tcx>) {
+ assert!(self.patch_map[block].is_none());
+ debug!("MirPatch: patch_terminator({:?}, {:?})", block, new);
+ self.patch_map[block] = Some(new);
+ }
+
+ pub fn add_statement(&mut self, loc: Location, stmt: StatementKind<'tcx>) {
+ debug!("MirPatch: add_statement({:?}, {:?})", loc, stmt);
+ self.new_statements.push((loc, stmt));
+ }
+
+ pub fn add_assign(&mut self, loc: Location, place: Place<'tcx>, rv: Rvalue<'tcx>) {
+ self.add_statement(loc, StatementKind::Assign(Box::new((place, rv))));
+ }
+
+ pub fn apply(self, body: &mut Body<'tcx>) {
+ debug!(
+ "MirPatch: {:?} new temps, starting from index {}: {:?}",
+ self.new_locals.len(),
+ body.local_decls.len(),
+ self.new_locals
+ );
+ debug!(
+ "MirPatch: {} new blocks, starting from index {}",
+ self.new_blocks.len(),
+ body.basic_blocks().len()
+ );
+ let bbs = if self.patch_map.is_empty() && self.new_blocks.is_empty() {
+ body.basic_blocks.as_mut_preserves_cfg()
+ } else {
+ body.basic_blocks.as_mut()
+ };
+ bbs.extend(self.new_blocks);
+ body.local_decls.extend(self.new_locals);
+ for (src, patch) in self.patch_map.into_iter_enumerated() {
+ if let Some(patch) = patch {
+ debug!("MirPatch: patching block {:?}", src);
+ bbs[src].terminator_mut().kind = patch;
+ }
+ }
+
+ let mut new_statements = self.new_statements;
+ new_statements.sort_by_key(|s| s.0);
+
+ let mut delta = 0;
+ let mut last_bb = START_BLOCK;
+ let mut stmts_and_targets: Vec<(Statement<'_>, BasicBlock)> = Vec::new();
+ for (mut loc, stmt) in new_statements {
+ if loc.block != last_bb {
+ delta = 0;
+ last_bb = loc.block;
+ }
+ debug!("MirPatch: adding statement {:?} at loc {:?}+{}", stmt, loc, delta);
+ loc.statement_index += delta;
+ let source_info = Self::source_info_for_index(&body[loc.block], loc);
+
+ // For mir-opt `Derefer` to work in all cases we need to
+ // get terminator's targets and apply the statement to all of them.
+ if loc.statement_index > body[loc.block].statements.len() {
+ let term = body[loc.block].terminator();
+ for i in term.successors() {
+ stmts_and_targets.push((Statement { source_info, kind: stmt.clone() }, i));
+ }
+ delta += 1;
+ continue;
+ }
+
+ body[loc.block]
+ .statements
+ .insert(loc.statement_index, Statement { source_info, kind: stmt });
+ delta += 1;
+ }
+
+ for (stmt, target) in stmts_and_targets.into_iter().rev() {
+ body[target].statements.insert(0, stmt);
+ }
+ }
+
+ pub fn source_info_for_index(data: &BasicBlockData<'_>, loc: Location) -> SourceInfo {
+ match data.statements.get(loc.statement_index) {
+ Some(stmt) => stmt.source_info,
+ None => data.terminator().source_info,
+ }
+ }
+
+ pub fn source_info_for_location(&self, body: &Body<'tcx>, loc: Location) -> SourceInfo {
+ let data = match loc.block.index().checked_sub(body.basic_blocks().len()) {
+ Some(new) => &self.new_blocks[new],
+ None => &body[loc.block],
+ };
+ Self::source_info_for_index(data, loc)
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/predecessors.rs b/compiler/rustc_middle/src/mir/predecessors.rs
new file mode 100644
index 000000000..5f1fadaf3
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/predecessors.rs
@@ -0,0 +1,78 @@
+//! Lazily compute the reverse control-flow graph for the MIR.
+
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::OnceCell;
+use rustc_index::vec::IndexVec;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use smallvec::SmallVec;
+
+use crate::mir::{BasicBlock, BasicBlockData};
+
+// Typically 95%+ of basic blocks have 4 or fewer predecessors.
+pub type Predecessors = IndexVec<BasicBlock, SmallVec<[BasicBlock; 4]>>;
+
+#[derive(Clone, Debug)]
+pub(super) struct PredecessorCache {
+ cache: OnceCell<Predecessors>,
+}
+
+impl PredecessorCache {
+ #[inline]
+ pub(super) fn new() -> Self {
+ PredecessorCache { cache: OnceCell::new() }
+ }
+
+ /// Invalidates the predecessor cache.
+ #[inline]
+ pub(super) fn invalidate(&mut self) {
+ // Invalidating the predecessor cache requires mutating the MIR, which in turn requires a
+ // unique reference (`&mut`) to the `mir::Body`. Because of this, we can assume that all
+ // callers of `invalidate` have a unique reference to the MIR and thus to the predecessor
+ // cache. This means we never need to do synchronization when `invalidate` is called, we can
+ // simply reinitialize the `OnceCell`.
+ self.cache = OnceCell::new();
+ }
+
+ /// Returns the predecessor graph for this MIR.
+ #[inline]
+ pub(super) fn compute(
+ &self,
+ basic_blocks: &IndexVec<BasicBlock, BasicBlockData<'_>>,
+ ) -> &Predecessors {
+ self.cache.get_or_init(|| {
+ let mut preds = IndexVec::from_elem(SmallVec::new(), basic_blocks);
+ for (bb, data) in basic_blocks.iter_enumerated() {
+ if let Some(term) = &data.terminator {
+ for succ in term.successors() {
+ preds[succ].push(bb);
+ }
+ }
+ }
+
+ preds
+ })
+ }
+}
+
+impl<S: Encoder> Encodable<S> for PredecessorCache {
+ #[inline]
+ fn encode(&self, _s: &mut S) {}
+}
+
+impl<D: Decoder> Decodable<D> for PredecessorCache {
+ #[inline]
+ fn decode(_: &mut D) -> Self {
+ Self::new()
+ }
+}
+
+impl<CTX> HashStable<CTX> for PredecessorCache {
+ #[inline]
+ fn hash_stable(&self, _: &mut CTX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ PredecessorCache,
+}
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
new file mode 100644
index 000000000..0ce41337b
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -0,0 +1,1067 @@
+use std::collections::BTreeSet;
+use std::fmt::Display;
+use std::fmt::Write as _;
+use std::fs;
+use std::io::{self, Write};
+use std::path::{Path, PathBuf};
+
+use super::graphviz::write_mir_fn_graphviz;
+use super::spanview::write_mir_fn_spanview;
+use either::Either;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::Idx;
+use rustc_middle::mir::interpret::{
+ read_target_uint, AllocId, Allocation, ConstAllocation, ConstValue, GlobalAlloc, Pointer,
+ Provenance,
+};
+use rustc_middle::mir::visit::Visitor;
+use rustc_middle::mir::MirSource;
+use rustc_middle::mir::*;
+use rustc_middle::ty::{self, TyCtxt};
+use rustc_target::abi::Size;
+
+const INDENT: &str = " ";
+/// Alignment for lining up comments following MIR statements
+pub(crate) const ALIGN: usize = 40;
+
+/// An indication of where we are in the control flow graph. Used for printing
+/// extra information in `dump_mir`
+pub enum PassWhere {
+ /// We have not started dumping the control flow graph, but we are about to.
+ BeforeCFG,
+
+ /// We just finished dumping the control flow graph. This is right before EOF
+ AfterCFG,
+
+ /// We are about to start dumping the given basic block.
+ BeforeBlock(BasicBlock),
+
+ /// We are just about to dump the given statement or terminator.
+ BeforeLocation(Location),
+
+ /// We just dumped the given statement or terminator.
+ AfterLocation(Location),
+
+ /// We just dumped the terminator for a block but not the closing `}`.
+ AfterTerminator(BasicBlock),
+}
+
+/// If the session is properly configured, dumps a human-readable
+/// representation of the mir into:
+///
+/// ```text
+/// rustc.node<node_id>.<pass_num>.<pass_name>.<disambiguator>
+/// ```
+///
+/// Output from this function is controlled by passing `-Z dump-mir=<filter>`,
+/// where `<filter>` takes the following forms:
+///
+/// - `all` -- dump MIR for all fns, all passes, all everything
+/// - a filter defined by a set of substrings combined with `&` and `|`
+/// (`&` has higher precedence). At least one of the `|`-separated groups
+/// must match; an `|`-separated group matches if all of its `&`-separated
+/// substrings are matched.
+///
+/// Example:
+///
+/// - `nll` == match if `nll` appears in the name
+/// - `foo & nll` == match if `foo` and `nll` both appear in the name
+/// - `foo & nll | typeck` == match if `foo` and `nll` both appear in the name
+/// or `typeck` appears in the name.
+/// - `foo & nll | bar & typeck` == match if `foo` and `nll` both appear in the name
+/// or `typeck` and `bar` both appear in the name.
+#[inline]
+pub fn dump_mir<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ pass_num: Option<&dyn Display>,
+ pass_name: &str,
+ disambiguator: &dyn Display,
+ body: &Body<'tcx>,
+ extra_data: F,
+) where
+ F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+ if !dump_enabled(tcx, pass_name, body.source.def_id()) {
+ return;
+ }
+
+ dump_matched_mir_node(tcx, pass_num, pass_name, disambiguator, body, extra_data);
+}
+
+pub fn dump_enabled<'tcx>(tcx: TyCtxt<'tcx>, pass_name: &str, def_id: DefId) -> bool {
+ let Some(ref filters) = tcx.sess.opts.unstable_opts.dump_mir else {
+ return false;
+ };
+ // see notes on #41697 below
+ let node_path = ty::print::with_forced_impl_filename_line!(tcx.def_path_str(def_id));
+ filters.split('|').any(|or_filter| {
+ or_filter.split('&').all(|and_filter| {
+ let and_filter_trimmed = and_filter.trim();
+ and_filter_trimmed == "all"
+ || pass_name.contains(and_filter_trimmed)
+ || node_path.contains(and_filter_trimmed)
+ })
+ })
+}
+
+// #41697 -- we use `with_forced_impl_filename_line()` because
+// `def_path_str()` would otherwise trigger `type_of`, and this can
+// run while we are already attempting to evaluate `type_of`.
+
+fn dump_matched_mir_node<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ pass_num: Option<&dyn Display>,
+ pass_name: &str,
+ disambiguator: &dyn Display,
+ body: &Body<'tcx>,
+ mut extra_data: F,
+) where
+ F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+ let _: io::Result<()> = try {
+ let mut file =
+ create_dump_file(tcx, "mir", pass_num, pass_name, disambiguator, body.source)?;
+ // see notes on #41697 above
+ let def_path =
+ ty::print::with_forced_impl_filename_line!(tcx.def_path_str(body.source.def_id()));
+ write!(file, "// MIR for `{}", def_path)?;
+ match body.source.promoted {
+ None => write!(file, "`")?,
+ Some(promoted) => write!(file, "::{:?}`", promoted)?,
+ }
+ writeln!(file, " {} {}", disambiguator, pass_name)?;
+ if let Some(ref layout) = body.generator_layout() {
+ writeln!(file, "/* generator_layout = {:#?} */", layout)?;
+ }
+ writeln!(file)?;
+ extra_data(PassWhere::BeforeCFG, &mut file)?;
+ write_user_type_annotations(tcx, body, &mut file)?;
+ write_mir_fn(tcx, body, &mut extra_data, &mut file)?;
+ extra_data(PassWhere::AfterCFG, &mut file)?;
+ };
+
+ if tcx.sess.opts.unstable_opts.dump_mir_graphviz {
+ let _: io::Result<()> = try {
+ let mut file =
+ create_dump_file(tcx, "dot", pass_num, pass_name, disambiguator, body.source)?;
+ write_mir_fn_graphviz(tcx, body, false, &mut file)?;
+ };
+ }
+
+ if let Some(spanview) = tcx.sess.opts.unstable_opts.dump_mir_spanview {
+ let _: io::Result<()> = try {
+ let file_basename =
+ dump_file_basename(tcx, pass_num, pass_name, disambiguator, body.source);
+ let mut file = create_dump_file_with_basename(tcx, &file_basename, "html")?;
+ if body.source.def_id().is_local() {
+ write_mir_fn_spanview(tcx, body, spanview, &file_basename, &mut file)?;
+ }
+ };
+ }
+}
+
+/// Returns the file basename portion (without extension) of a filename path
+/// where we should dump a MIR representation output files.
+fn dump_file_basename<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ pass_num: Option<&dyn Display>,
+ pass_name: &str,
+ disambiguator: &dyn Display,
+ source: MirSource<'tcx>,
+) -> String {
+ let promotion_id = match source.promoted {
+ Some(id) => format!("-{:?}", id),
+ None => String::new(),
+ };
+
+ let pass_num = if tcx.sess.opts.unstable_opts.dump_mir_exclude_pass_number {
+ String::new()
+ } else {
+ match pass_num {
+ None => ".-------".to_string(),
+ Some(pass_num) => format!(".{}", pass_num),
+ }
+ };
+
+ let crate_name = tcx.crate_name(source.def_id().krate);
+ let item_name = tcx.def_path(source.def_id()).to_filename_friendly_no_crate();
+ // All drop shims have the same DefId, so we have to add the type
+ // to get unique file names.
+ let shim_disambiguator = match source.instance {
+ ty::InstanceDef::DropGlue(_, Some(ty)) => {
+ // Unfortunately, pretty-printed typed are not very filename-friendly.
+ // We dome some filtering.
+ let mut s = ".".to_owned();
+ s.extend(ty.to_string().chars().filter_map(|c| match c {
+ ' ' => None,
+ ':' | '<' | '>' => Some('_'),
+ c => Some(c),
+ }));
+ s
+ }
+ _ => String::new(),
+ };
+
+ format!(
+ "{}.{}{}{}{}.{}.{}",
+ crate_name, item_name, shim_disambiguator, promotion_id, pass_num, pass_name, disambiguator,
+ )
+}
+
+/// Returns the path to the filename where we should dump a given MIR.
+/// Also used by other bits of code (e.g., NLL inference) that dump
+/// graphviz data or other things.
+fn dump_path(tcx: TyCtxt<'_>, basename: &str, extension: &str) -> PathBuf {
+ let mut file_path = PathBuf::new();
+ file_path.push(Path::new(&tcx.sess.opts.unstable_opts.dump_mir_dir));
+
+ let file_name = format!("{}.{}", basename, extension,);
+
+ file_path.push(&file_name);
+
+ file_path
+}
+
+/// Attempts to open the MIR dump file with the given name and extension.
+fn create_dump_file_with_basename(
+ tcx: TyCtxt<'_>,
+ file_basename: &str,
+ extension: &str,
+) -> io::Result<io::BufWriter<fs::File>> {
+ let file_path = dump_path(tcx, file_basename, extension);
+ if let Some(parent) = file_path.parent() {
+ fs::create_dir_all(parent).map_err(|e| {
+ io::Error::new(
+ e.kind(),
+ format!("IO error creating MIR dump directory: {:?}; {}", parent, e),
+ )
+ })?;
+ }
+ Ok(io::BufWriter::new(fs::File::create(&file_path).map_err(|e| {
+ io::Error::new(e.kind(), format!("IO error creating MIR dump file: {:?}; {}", file_path, e))
+ })?))
+}
+
+/// Attempts to open a file where we should dump a given MIR or other
+/// bit of MIR-related data. Used by `mir-dump`, but also by other
+/// bits of code (e.g., NLL inference) that dump graphviz data or
+/// other things, and hence takes the extension as an argument.
+pub fn create_dump_file<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ extension: &str,
+ pass_num: Option<&dyn Display>,
+ pass_name: &str,
+ disambiguator: &dyn Display,
+ source: MirSource<'tcx>,
+) -> io::Result<io::BufWriter<fs::File>> {
+ create_dump_file_with_basename(
+ tcx,
+ &dump_file_basename(tcx, pass_num, pass_name, disambiguator, source),
+ extension,
+ )
+}
+
+/// Write out a human-readable textual representation for the given MIR.
+pub fn write_mir_pretty<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ single: Option<DefId>,
+ w: &mut dyn Write,
+) -> io::Result<()> {
+ writeln!(w, "// WARNING: This output format is intended for human consumers only")?;
+ writeln!(w, "// and is subject to change without notice. Knock yourself out.")?;
+
+ let mut first = true;
+ for def_id in dump_mir_def_ids(tcx, single) {
+ if first {
+ first = false;
+ } else {
+ // Put empty lines between all items
+ writeln!(w)?;
+ }
+
+ let render_body = |w: &mut dyn Write, body| -> io::Result<()> {
+ write_mir_fn(tcx, body, &mut |_, _| Ok(()), w)?;
+
+ for body in tcx.promoted_mir(def_id) {
+ writeln!(w)?;
+ write_mir_fn(tcx, body, &mut |_, _| Ok(()), w)?;
+ }
+ Ok(())
+ };
+
+ // For `const fn` we want to render both the optimized MIR and the MIR for ctfe.
+ if tcx.is_const_fn_raw(def_id) {
+ render_body(w, tcx.optimized_mir(def_id))?;
+ writeln!(w)?;
+ writeln!(w, "// MIR FOR CTFE")?;
+ // Do not use `render_body`, as that would render the promoteds again, but these
+ // are shared between mir_for_ctfe and optimized_mir
+ write_mir_fn(tcx, tcx.mir_for_ctfe(def_id), &mut |_, _| Ok(()), w)?;
+ } else {
+ let instance_mir =
+ tcx.instance_mir(ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)));
+ render_body(w, instance_mir)?;
+ }
+ }
+ Ok(())
+}
+
+/// Write out a human-readable textual representation for the given function.
+pub fn write_mir_fn<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ extra_data: &mut F,
+ w: &mut dyn Write,
+) -> io::Result<()>
+where
+ F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+ write_mir_intro(tcx, body, w)?;
+ for block in body.basic_blocks().indices() {
+ extra_data(PassWhere::BeforeBlock(block), w)?;
+ write_basic_block(tcx, block, body, extra_data, w)?;
+ if block.index() + 1 != body.basic_blocks().len() {
+ writeln!(w)?;
+ }
+ }
+
+ writeln!(w, "}}")?;
+
+ write_allocations(tcx, body, w)?;
+
+ Ok(())
+}
+
+/// Write out a human-readable textual representation for the given basic block.
+pub fn write_basic_block<'tcx, F>(
+ tcx: TyCtxt<'tcx>,
+ block: BasicBlock,
+ body: &Body<'tcx>,
+ extra_data: &mut F,
+ w: &mut dyn Write,
+) -> io::Result<()>
+where
+ F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>,
+{
+ let data = &body[block];
+
+ // Basic block label at the top.
+ let cleanup_text = if data.is_cleanup { " (cleanup)" } else { "" };
+ writeln!(w, "{}{:?}{}: {{", INDENT, block, cleanup_text)?;
+
+ // List of statements in the middle.
+ let mut current_location = Location { block, statement_index: 0 };
+ for statement in &data.statements {
+ extra_data(PassWhere::BeforeLocation(current_location), w)?;
+ let indented_body = format!("{0}{0}{1:?};", INDENT, statement);
+ writeln!(
+ w,
+ "{:A$} // {}{}",
+ indented_body,
+ if tcx.sess.verbose() { format!("{:?}: ", current_location) } else { String::new() },
+ comment(tcx, statement.source_info, body.span),
+ A = ALIGN,
+ )?;
+
+ write_extra(tcx, w, |visitor| {
+ visitor.visit_statement(statement, current_location);
+ })?;
+
+ extra_data(PassWhere::AfterLocation(current_location), w)?;
+
+ current_location.statement_index += 1;
+ }
+
+ // Terminator at the bottom.
+ extra_data(PassWhere::BeforeLocation(current_location), w)?;
+ let indented_terminator = format!("{0}{0}{1:?};", INDENT, data.terminator().kind);
+ writeln!(
+ w,
+ "{:A$} // {}{}",
+ indented_terminator,
+ if tcx.sess.verbose() { format!("{:?}: ", current_location) } else { String::new() },
+ comment(tcx, data.terminator().source_info, body.span),
+ A = ALIGN,
+ )?;
+
+ write_extra(tcx, w, |visitor| {
+ visitor.visit_terminator(data.terminator(), current_location);
+ })?;
+
+ extra_data(PassWhere::AfterLocation(current_location), w)?;
+ extra_data(PassWhere::AfterTerminator(block), w)?;
+
+ writeln!(w, "{}}}", INDENT)
+}
+
+/// After we print the main statement, we sometimes dump extra
+/// information. There's often a lot of little things "nuzzled up" in
+/// a statement.
+fn write_extra<'tcx, F>(tcx: TyCtxt<'tcx>, write: &mut dyn Write, mut visit_op: F) -> io::Result<()>
+where
+ F: FnMut(&mut ExtraComments<'tcx>),
+{
+ let mut extra_comments = ExtraComments { tcx, comments: vec![] };
+ visit_op(&mut extra_comments);
+ for comment in extra_comments.comments {
+ writeln!(write, "{:A$} // {}", "", comment, A = ALIGN)?;
+ }
+ Ok(())
+}
+
+struct ExtraComments<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ comments: Vec<String>,
+}
+
+impl<'tcx> ExtraComments<'tcx> {
+ fn push(&mut self, lines: &str) {
+ for line in lines.split('\n') {
+ self.comments.push(line.to_string());
+ }
+ }
+}
+
+fn use_verbose<'tcx>(ty: Ty<'tcx>, fn_def: bool) -> bool {
+ match *ty.kind() {
+ ty::Int(_) | ty::Uint(_) | ty::Bool | ty::Char | ty::Float(_) => false,
+ // Unit type
+ ty::Tuple(g_args) if g_args.is_empty() => false,
+ ty::Tuple(g_args) => g_args.iter().any(|g_arg| use_verbose(g_arg, fn_def)),
+ ty::Array(ty, _) => use_verbose(ty, fn_def),
+ ty::FnDef(..) => fn_def,
+ _ => true,
+ }
+}
+
+impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
+ fn visit_constant(&mut self, constant: &Constant<'tcx>, _location: Location) {
+ let Constant { span, user_ty, literal } = constant;
+ if use_verbose(literal.ty(), true) {
+ self.push("mir::Constant");
+ self.push(&format!(
+ "+ span: {}",
+ self.tcx.sess.source_map().span_to_embeddable_string(*span)
+ ));
+ if let Some(user_ty) = user_ty {
+ self.push(&format!("+ user_ty: {:?}", user_ty));
+ }
+
+ // FIXME: this is a poor version of `pretty_print_const_value`.
+ let fmt_val = |val: &ConstValue<'tcx>| match val {
+ ConstValue::ZeroSized => format!("<ZST>"),
+ ConstValue::Scalar(s) => format!("Scalar({:?})", s),
+ ConstValue::Slice { .. } => format!("Slice(..)"),
+ ConstValue::ByRef { .. } => format!("ByRef(..)"),
+ };
+
+ let fmt_valtree = |valtree: &ty::ValTree<'tcx>| match valtree {
+ ty::ValTree::Leaf(leaf) => format!("ValTree::Leaf({:?})", leaf),
+ ty::ValTree::Branch(_) => format!("ValTree::Branch(..)"),
+ };
+
+ let val = match literal {
+ ConstantKind::Ty(ct) => match ct.kind() {
+ ty::ConstKind::Param(p) => format!("Param({})", p),
+ ty::ConstKind::Unevaluated(uv) => format!(
+ "Unevaluated({}, {:?}, {:?})",
+ self.tcx.def_path_str(uv.def.did),
+ uv.substs,
+ uv.promoted,
+ ),
+ ty::ConstKind::Value(val) => format!("Value({})", fmt_valtree(&val)),
+ ty::ConstKind::Error(_) => "Error".to_string(),
+ // These variants shouldn't exist in the MIR.
+ ty::ConstKind::Placeholder(_)
+ | ty::ConstKind::Infer(_)
+ | ty::ConstKind::Bound(..) => bug!("unexpected MIR constant: {:?}", literal),
+ },
+ // To keep the diffs small, we render this like we render `ty::Const::Value`.
+ //
+ // This changes once `ty::Const::Value` is represented using valtrees.
+ ConstantKind::Val(val, _) => format!("Value({})", fmt_val(&val)),
+ };
+
+ // This reflects what `Const` looked liked before `val` was renamed
+ // as `kind`. We print it like this to avoid having to update
+ // expected output in a lot of tests.
+ self.push(&format!("+ literal: Const {{ ty: {}, val: {} }}", literal.ty(), val));
+ }
+ }
+
+ fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
+ self.super_rvalue(rvalue, location);
+ if let Rvalue::Aggregate(kind, _) = rvalue {
+ match **kind {
+ AggregateKind::Closure(def_id, substs) => {
+ self.push("closure");
+ self.push(&format!("+ def_id: {:?}", def_id));
+ self.push(&format!("+ substs: {:#?}", substs));
+ }
+
+ AggregateKind::Generator(def_id, substs, movability) => {
+ self.push("generator");
+ self.push(&format!("+ def_id: {:?}", def_id));
+ self.push(&format!("+ substs: {:#?}", substs));
+ self.push(&format!("+ movability: {:?}", movability));
+ }
+
+ AggregateKind::Adt(_, _, _, Some(user_ty), _) => {
+ self.push("adt");
+ self.push(&format!("+ user_ty: {:?}", user_ty));
+ }
+
+ _ => {}
+ }
+ }
+ }
+}
+
+fn comment(tcx: TyCtxt<'_>, SourceInfo { span, scope }: SourceInfo, function_span: Span) -> String {
+ let location = if tcx.sess.opts.unstable_opts.mir_pretty_relative_line_numbers {
+ tcx.sess.source_map().span_to_relative_line_string(span, function_span)
+ } else {
+ tcx.sess.source_map().span_to_embeddable_string(span)
+ };
+
+ format!("scope {} at {}", scope.index(), location,)
+}
+
+/// Prints local variables in a scope tree.
+fn write_scope_tree(
+ tcx: TyCtxt<'_>,
+ body: &Body<'_>,
+ scope_tree: &FxHashMap<SourceScope, Vec<SourceScope>>,
+ w: &mut dyn Write,
+ parent: SourceScope,
+ depth: usize,
+) -> io::Result<()> {
+ let indent = depth * INDENT.len();
+
+ // Local variable debuginfo.
+ for var_debug_info in &body.var_debug_info {
+ if var_debug_info.source_info.scope != parent {
+ // Not declared in this scope.
+ continue;
+ }
+
+ let indented_debug_info = format!(
+ "{0:1$}debug {2} => {3:?};",
+ INDENT, indent, var_debug_info.name, var_debug_info.value,
+ );
+
+ writeln!(
+ w,
+ "{0:1$} // in {2}",
+ indented_debug_info,
+ ALIGN,
+ comment(tcx, var_debug_info.source_info, body.span),
+ )?;
+ }
+
+ // Local variable types.
+ for (local, local_decl) in body.local_decls.iter_enumerated() {
+ if (1..body.arg_count + 1).contains(&local.index()) {
+ // Skip over argument locals, they're printed in the signature.
+ continue;
+ }
+
+ if local_decl.source_info.scope != parent {
+ // Not declared in this scope.
+ continue;
+ }
+
+ let mut_str = if local_decl.mutability == Mutability::Mut { "mut " } else { "" };
+
+ let mut indented_decl =
+ format!("{0:1$}let {2}{3:?}: {4:?}", INDENT, indent, mut_str, local, local_decl.ty);
+ if let Some(user_ty) = &local_decl.user_ty {
+ for user_ty in user_ty.projections() {
+ write!(indented_decl, " as {:?}", user_ty).unwrap();
+ }
+ }
+ indented_decl.push(';');
+
+ let local_name = if local == RETURN_PLACE { " return place" } else { "" };
+
+ writeln!(
+ w,
+ "{0:1$} //{2} in {3}",
+ indented_decl,
+ ALIGN,
+ local_name,
+ comment(tcx, local_decl.source_info, body.span),
+ )?;
+ }
+
+ let Some(children) = scope_tree.get(&parent) else {
+ return Ok(());
+ };
+
+ for &child in children {
+ let child_data = &body.source_scopes[child];
+ assert_eq!(child_data.parent_scope, Some(parent));
+
+ let (special, span) = if let Some((callee, callsite_span)) = child_data.inlined {
+ (
+ format!(
+ " (inlined {}{})",
+ if callee.def.requires_caller_location(tcx) { "#[track_caller] " } else { "" },
+ callee
+ ),
+ Some(callsite_span),
+ )
+ } else {
+ (String::new(), None)
+ };
+
+ let indented_header = format!("{0:1$}scope {2}{3} {{", "", indent, child.index(), special);
+
+ if let Some(span) = span {
+ writeln!(
+ w,
+ "{0:1$} // at {2}",
+ indented_header,
+ ALIGN,
+ tcx.sess.source_map().span_to_embeddable_string(span),
+ )?;
+ } else {
+ writeln!(w, "{}", indented_header)?;
+ }
+
+ write_scope_tree(tcx, body, scope_tree, w, child, depth + 1)?;
+ writeln!(w, "{0:1$}}}", "", depth * INDENT.len())?;
+ }
+
+ Ok(())
+}
+
+/// Write out a human-readable textual representation of the MIR's `fn` type and the types of its
+/// local variables (both user-defined bindings and compiler temporaries).
+pub fn write_mir_intro<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'_>,
+ w: &mut dyn Write,
+) -> io::Result<()> {
+ write_mir_sig(tcx, body, w)?;
+ writeln!(w, "{{")?;
+
+ // construct a scope tree and write it out
+ let mut scope_tree: FxHashMap<SourceScope, Vec<SourceScope>> = Default::default();
+ for (index, scope_data) in body.source_scopes.iter().enumerate() {
+ if let Some(parent) = scope_data.parent_scope {
+ scope_tree.entry(parent).or_default().push(SourceScope::new(index));
+ } else {
+ // Only the argument scope has no parent, because it's the root.
+ assert_eq!(index, OUTERMOST_SOURCE_SCOPE.index());
+ }
+ }
+
+ write_scope_tree(tcx, body, &scope_tree, w, OUTERMOST_SOURCE_SCOPE, 1)?;
+
+ // Add an empty line before the first block is printed.
+ writeln!(w)?;
+
+ Ok(())
+}
+
+/// Find all `AllocId`s mentioned (recursively) in the MIR body and print their corresponding
+/// allocations.
+pub fn write_allocations<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'_>,
+ w: &mut dyn Write,
+) -> io::Result<()> {
+ fn alloc_ids_from_alloc(
+ alloc: ConstAllocation<'_>,
+ ) -> impl DoubleEndedIterator<Item = AllocId> + '_ {
+ alloc.inner().relocations().values().map(|id| *id)
+ }
+
+ fn alloc_ids_from_const_val(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ {
+ match val {
+ ConstValue::Scalar(interpret::Scalar::Ptr(ptr, _)) => {
+ Either::Left(Either::Left(std::iter::once(ptr.provenance)))
+ }
+ ConstValue::Scalar(interpret::Scalar::Int { .. }) => {
+ Either::Left(Either::Right(std::iter::empty()))
+ }
+ ConstValue::ZeroSized => Either::Left(Either::Right(std::iter::empty())),
+ ConstValue::ByRef { alloc, .. } | ConstValue::Slice { data: alloc, .. } => {
+ Either::Right(alloc_ids_from_alloc(alloc))
+ }
+ }
+ }
+ struct CollectAllocIds(BTreeSet<AllocId>);
+
+ impl<'tcx> Visitor<'tcx> for CollectAllocIds {
+ fn visit_constant(&mut self, c: &Constant<'tcx>, loc: Location) {
+ match c.literal {
+ ConstantKind::Ty(c) => self.visit_const(c, loc),
+ ConstantKind::Val(val, _) => {
+ self.0.extend(alloc_ids_from_const_val(val));
+ }
+ }
+ }
+ }
+
+ let mut visitor = CollectAllocIds(Default::default());
+ visitor.visit_body(body);
+
+ // `seen` contains all seen allocations, including the ones we have *not* printed yet.
+ // The protocol is to first `insert` into `seen`, and only if that returns `true`
+ // then push to `todo`.
+ let mut seen = visitor.0;
+ let mut todo: Vec<_> = seen.iter().copied().collect();
+ while let Some(id) = todo.pop() {
+ let mut write_allocation_track_relocs =
+ |w: &mut dyn Write, alloc: ConstAllocation<'tcx>| -> io::Result<()> {
+ // `.rev()` because we are popping them from the back of the `todo` vector.
+ for id in alloc_ids_from_alloc(alloc).rev() {
+ if seen.insert(id) {
+ todo.push(id);
+ }
+ }
+ write!(w, "{}", display_allocation(tcx, alloc.inner()))
+ };
+ write!(w, "\n{id:?}")?;
+ match tcx.try_get_global_alloc(id) {
+ // This can't really happen unless there are bugs, but it doesn't cost us anything to
+ // gracefully handle it and allow buggy rustc to be debugged via allocation printing.
+ None => write!(w, " (deallocated)")?,
+ Some(GlobalAlloc::Function(inst)) => write!(w, " (fn: {inst})")?,
+ Some(GlobalAlloc::VTable(ty, Some(trait_ref))) => {
+ write!(w, " (vtable: impl {trait_ref} for {ty})")?
+ }
+ Some(GlobalAlloc::VTable(ty, None)) => {
+ write!(w, " (vtable: impl <auto trait> for {ty})")?
+ }
+ Some(GlobalAlloc::Static(did)) if !tcx.is_foreign_item(did) => {
+ match tcx.eval_static_initializer(did) {
+ Ok(alloc) => {
+ write!(w, " (static: {}, ", tcx.def_path_str(did))?;
+ write_allocation_track_relocs(w, alloc)?;
+ }
+ Err(_) => write!(
+ w,
+ " (static: {}, error during initializer evaluation)",
+ tcx.def_path_str(did)
+ )?,
+ }
+ }
+ Some(GlobalAlloc::Static(did)) => {
+ write!(w, " (extern static: {})", tcx.def_path_str(did))?
+ }
+ Some(GlobalAlloc::Memory(alloc)) => {
+ write!(w, " (")?;
+ write_allocation_track_relocs(w, alloc)?
+ }
+ }
+ writeln!(w)?;
+ }
+ Ok(())
+}
+
+/// Dumps the size and metadata and content of an allocation to the given writer.
+/// The expectation is that the caller first prints other relevant metadata, so the exact
+/// format of this function is (*without* leading or trailing newline):
+///
+/// ```text
+/// size: {}, align: {}) {
+/// <bytes>
+/// }
+/// ```
+///
+/// The byte format is similar to how hex editors print bytes. Each line starts with the address of
+/// the start of the line, followed by all bytes in hex format (space separated).
+/// If the allocation is small enough to fit into a single line, no start address is given.
+/// After the hex dump, an ascii dump follows, replacing all unprintable characters (control
+/// characters or characters whose value is larger than 127) with a `.`
+/// This also prints relocations adequately.
+pub fn display_allocation<'a, 'tcx, Prov, Extra>(
+ tcx: TyCtxt<'tcx>,
+ alloc: &'a Allocation<Prov, Extra>,
+) -> RenderAllocation<'a, 'tcx, Prov, Extra> {
+ RenderAllocation { tcx, alloc }
+}
+
+#[doc(hidden)]
+pub struct RenderAllocation<'a, 'tcx, Prov, Extra> {
+ tcx: TyCtxt<'tcx>,
+ alloc: &'a Allocation<Prov, Extra>,
+}
+
+impl<'a, 'tcx, Prov: Provenance, Extra> std::fmt::Display
+ for RenderAllocation<'a, 'tcx, Prov, Extra>
+{
+ fn fmt(&self, w: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let RenderAllocation { tcx, alloc } = *self;
+ write!(w, "size: {}, align: {})", alloc.size().bytes(), alloc.align.bytes())?;
+ if alloc.size() == Size::ZERO {
+ // We are done.
+ return write!(w, " {{}}");
+ }
+ // Write allocation bytes.
+ writeln!(w, " {{")?;
+ write_allocation_bytes(tcx, alloc, w, " ")?;
+ write!(w, "}}")?;
+ Ok(())
+ }
+}
+
+fn write_allocation_endline(w: &mut dyn std::fmt::Write, ascii: &str) -> std::fmt::Result {
+ for _ in 0..(BYTES_PER_LINE - ascii.chars().count()) {
+ write!(w, " ")?;
+ }
+ writeln!(w, " │ {}", ascii)
+}
+
+/// Number of bytes to print per allocation hex dump line.
+const BYTES_PER_LINE: usize = 16;
+
+/// Prints the line start address and returns the new line start address.
+fn write_allocation_newline(
+ w: &mut dyn std::fmt::Write,
+ mut line_start: Size,
+ ascii: &str,
+ pos_width: usize,
+ prefix: &str,
+) -> Result<Size, std::fmt::Error> {
+ write_allocation_endline(w, ascii)?;
+ line_start += Size::from_bytes(BYTES_PER_LINE);
+ write!(w, "{}0x{:02$x} │ ", prefix, line_start.bytes(), pos_width)?;
+ Ok(line_start)
+}
+
+/// The `prefix` argument allows callers to add an arbitrary prefix before each line (even if there
+/// is only one line). Note that your prefix should contain a trailing space as the lines are
+/// printed directly after it.
+fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>(
+ tcx: TyCtxt<'tcx>,
+ alloc: &Allocation<Prov, Extra>,
+ w: &mut dyn std::fmt::Write,
+ prefix: &str,
+) -> std::fmt::Result {
+ let num_lines = alloc.size().bytes_usize().saturating_sub(BYTES_PER_LINE);
+ // Number of chars needed to represent all line numbers.
+ let pos_width = hex_number_length(alloc.size().bytes());
+
+ if num_lines > 0 {
+ write!(w, "{}0x{:02$x} │ ", prefix, 0, pos_width)?;
+ } else {
+ write!(w, "{}", prefix)?;
+ }
+
+ let mut i = Size::ZERO;
+ let mut line_start = Size::ZERO;
+
+ let ptr_size = tcx.data_layout.pointer_size;
+
+ let mut ascii = String::new();
+
+ let oversized_ptr = |target: &mut String, width| {
+ if target.len() > width {
+ write!(target, " ({} ptr bytes)", ptr_size.bytes()).unwrap();
+ }
+ };
+
+ while i < alloc.size() {
+ // The line start already has a space. While we could remove that space from the line start
+ // printing and unconditionally print a space here, that would cause the single-line case
+ // to have a single space before it, which looks weird.
+ if i != line_start {
+ write!(w, " ")?;
+ }
+ if let Some(&prov) = alloc.relocations().get(&i) {
+ // Memory with a relocation must be defined
+ assert!(alloc.init_mask().is_range_initialized(i, i + ptr_size).is_ok());
+ let j = i.bytes_usize();
+ let offset = alloc
+ .inspect_with_uninit_and_ptr_outside_interpreter(j..j + ptr_size.bytes_usize());
+ let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap();
+ let offset = Size::from_bytes(offset);
+ let relocation_width = |bytes| bytes * 3;
+ let ptr = Pointer::new(prov, offset);
+ let mut target = format!("{:?}", ptr);
+ if target.len() > relocation_width(ptr_size.bytes_usize() - 1) {
+ // This is too long, try to save some space.
+ target = format!("{:#?}", ptr);
+ }
+ if ((i - line_start) + ptr_size).bytes_usize() > BYTES_PER_LINE {
+ // This branch handles the situation where a relocation starts in the current line
+ // but ends in the next one.
+ let remainder = Size::from_bytes(BYTES_PER_LINE) - (i - line_start);
+ let overflow = ptr_size - remainder;
+ let remainder_width = relocation_width(remainder.bytes_usize()) - 2;
+ let overflow_width = relocation_width(overflow.bytes_usize() - 1) + 1;
+ ascii.push('╾');
+ for _ in 0..remainder.bytes() - 1 {
+ ascii.push('─');
+ }
+ if overflow_width > remainder_width && overflow_width >= target.len() {
+ // The case where the relocation fits into the part in the next line
+ write!(w, "╾{0:─^1$}", "", remainder_width)?;
+ line_start =
+ write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
+ ascii.clear();
+ write!(w, "{0:─^1$}╼", target, overflow_width)?;
+ } else {
+ oversized_ptr(&mut target, remainder_width);
+ write!(w, "╾{0:─^1$}", target, remainder_width)?;
+ line_start =
+ write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
+ write!(w, "{0:─^1$}╼", "", overflow_width)?;
+ ascii.clear();
+ }
+ for _ in 0..overflow.bytes() - 1 {
+ ascii.push('─');
+ }
+ ascii.push('╼');
+ i += ptr_size;
+ continue;
+ } else {
+ // This branch handles a relocation that starts and ends in the current line.
+ let relocation_width = relocation_width(ptr_size.bytes_usize() - 1);
+ oversized_ptr(&mut target, relocation_width);
+ ascii.push('╾');
+ write!(w, "╾{0:─^1$}╼", target, relocation_width)?;
+ for _ in 0..ptr_size.bytes() - 2 {
+ ascii.push('─');
+ }
+ ascii.push('╼');
+ i += ptr_size;
+ }
+ } else if alloc.init_mask().is_range_initialized(i, i + Size::from_bytes(1)).is_ok() {
+ let j = i.bytes_usize();
+
+ // Checked definedness (and thus range) and relocations. This access also doesn't
+ // influence interpreter execution but is only for debugging.
+ let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0];
+ write!(w, "{:02x}", c)?;
+ if c.is_ascii_control() || c >= 0x80 {
+ ascii.push('.');
+ } else {
+ ascii.push(char::from(c));
+ }
+ i += Size::from_bytes(1);
+ } else {
+ write!(w, "__")?;
+ ascii.push('░');
+ i += Size::from_bytes(1);
+ }
+ // Print a new line header if the next line still has some bytes to print.
+ if i == line_start + Size::from_bytes(BYTES_PER_LINE) && i != alloc.size() {
+ line_start = write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?;
+ ascii.clear();
+ }
+ }
+ write_allocation_endline(w, &ascii)?;
+
+ Ok(())
+}
+
+fn write_mir_sig(tcx: TyCtxt<'_>, body: &Body<'_>, w: &mut dyn Write) -> io::Result<()> {
+ use rustc_hir::def::DefKind;
+
+ trace!("write_mir_sig: {:?}", body.source.instance);
+ let def_id = body.source.def_id();
+ let kind = tcx.def_kind(def_id);
+ let is_function = match kind {
+ DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(..) => true,
+ _ => tcx.is_closure(def_id),
+ };
+ match (kind, body.source.promoted) {
+ (_, Some(i)) => write!(w, "{:?} in ", i)?,
+ (DefKind::Const | DefKind::AssocConst, _) => write!(w, "const ")?,
+ (DefKind::Static(hir::Mutability::Not), _) => write!(w, "static ")?,
+ (DefKind::Static(hir::Mutability::Mut), _) => write!(w, "static mut ")?,
+ (_, _) if is_function => write!(w, "fn ")?,
+ (DefKind::AnonConst | DefKind::InlineConst, _) => {} // things like anon const, not an item
+ _ => bug!("Unexpected def kind {:?}", kind),
+ }
+
+ ty::print::with_forced_impl_filename_line! {
+ // see notes on #41697 elsewhere
+ write!(w, "{}", tcx.def_path_str(def_id))?
+ }
+
+ if body.source.promoted.is_none() && is_function {
+ write!(w, "(")?;
+
+ // fn argument types.
+ for (i, arg) in body.args_iter().enumerate() {
+ if i != 0 {
+ write!(w, ", ")?;
+ }
+ write!(w, "{:?}: {}", Place::from(arg), body.local_decls[arg].ty)?;
+ }
+
+ write!(w, ") -> {}", body.return_ty())?;
+ } else {
+ assert_eq!(body.arg_count, 0);
+ write!(w, ": {} =", body.return_ty())?;
+ }
+
+ if let Some(yield_ty) = body.yield_ty() {
+ writeln!(w)?;
+ writeln!(w, "yields {}", yield_ty)?;
+ }
+
+ write!(w, " ")?;
+ // Next thing that gets printed is the opening {
+
+ Ok(())
+}
+
+fn write_user_type_annotations(
+ tcx: TyCtxt<'_>,
+ body: &Body<'_>,
+ w: &mut dyn Write,
+) -> io::Result<()> {
+ if !body.user_type_annotations.is_empty() {
+ writeln!(w, "| User Type Annotations")?;
+ }
+ for (index, annotation) in body.user_type_annotations.iter_enumerated() {
+ writeln!(
+ w,
+ "| {:?}: user_ty: {:?}, span: {}, inferred_ty: {:?}",
+ index.index(),
+ annotation.user_ty,
+ tcx.sess.source_map().span_to_embeddable_string(annotation.span),
+ annotation.inferred_ty,
+ )?;
+ }
+ if !body.user_type_annotations.is_empty() {
+ writeln!(w, "|")?;
+ }
+ Ok(())
+}
+
+pub fn dump_mir_def_ids(tcx: TyCtxt<'_>, single: Option<DefId>) -> Vec<DefId> {
+ if let Some(i) = single {
+ vec![i]
+ } else {
+ tcx.mir_keys(()).iter().map(|def_id| def_id.to_def_id()).collect()
+ }
+}
+
+/// Calc converted u64 decimal into hex and return it's length in chars
+///
+/// ```ignore (cannot-test-private-function)
+/// assert_eq!(1, hex_number_length(0));
+/// assert_eq!(1, hex_number_length(1));
+/// assert_eq!(2, hex_number_length(16));
+/// ```
+fn hex_number_length(x: u64) -> usize {
+ if x == 0 {
+ return 1;
+ }
+ let mut length = 0;
+ let mut x_left = x;
+ while x_left > 0 {
+ x_left /= 16;
+ length += 1;
+ }
+ length
+}
diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs
new file mode 100644
index 000000000..dd9f8795f
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/query.rs
@@ -0,0 +1,476 @@
+//! Values computed by queries that use MIR.
+
+use crate::mir::{Body, ConstantKind, Promoted};
+use crate::ty::{self, OpaqueHiddenType, Ty, TyCtxt};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::vec_map::VecMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_index::bit_set::BitMatrix;
+use rustc_index::vec::IndexVec;
+use rustc_span::Span;
+use rustc_target::abi::VariantIdx;
+use smallvec::SmallVec;
+use std::cell::Cell;
+use std::fmt::{self, Debug};
+
+use super::{Field, SourceInfo};
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
+pub enum UnsafetyViolationKind {
+ /// Unsafe operation outside `unsafe`.
+ General,
+ /// Unsafe operation in an `unsafe fn` but outside an `unsafe` block.
+ /// Has to be handled as a lint for backwards compatibility.
+ UnsafeFn,
+}
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
+pub enum UnsafetyViolationDetails {
+ CallToUnsafeFunction,
+ UseOfInlineAssembly,
+ InitializingTypeWith,
+ CastOfPointerToInt,
+ UseOfMutableStatic,
+ UseOfExternStatic,
+ DerefOfRawPointer,
+ AccessToUnionField,
+ MutationOfLayoutConstrainedField,
+ BorrowOfLayoutConstrainedField,
+ CallToFunctionWith,
+}
+
+impl UnsafetyViolationDetails {
+ pub fn description_and_note(&self) -> (&'static str, &'static str) {
+ use UnsafetyViolationDetails::*;
+ match self {
+ CallToUnsafeFunction => (
+ "call to unsafe function",
+ "consult the function's documentation for information on how to avoid undefined \
+ behavior",
+ ),
+ UseOfInlineAssembly => (
+ "use of inline assembly",
+ "inline assembly is entirely unchecked and can cause undefined behavior",
+ ),
+ InitializingTypeWith => (
+ "initializing type with `rustc_layout_scalar_valid_range` attr",
+ "initializing a layout restricted type's field with a value outside the valid \
+ range is undefined behavior",
+ ),
+ CastOfPointerToInt => {
+ ("cast of pointer to int", "casting pointers to integers in constants")
+ }
+ UseOfMutableStatic => (
+ "use of mutable static",
+ "mutable statics can be mutated by multiple threads: aliasing violations or data \
+ races will cause undefined behavior",
+ ),
+ UseOfExternStatic => (
+ "use of extern static",
+ "extern statics are not controlled by the Rust type system: invalid data, \
+ aliasing violations or data races will cause undefined behavior",
+ ),
+ DerefOfRawPointer => (
+ "dereference of raw pointer",
+ "raw pointers may be null, dangling or unaligned; they can violate aliasing rules \
+ and cause data races: all of these are undefined behavior",
+ ),
+ AccessToUnionField => (
+ "access to union field",
+ "the field may not be properly initialized: using uninitialized data will cause \
+ undefined behavior",
+ ),
+ MutationOfLayoutConstrainedField => (
+ "mutation of layout constrained field",
+ "mutating layout constrained fields cannot statically be checked for valid values",
+ ),
+ BorrowOfLayoutConstrainedField => (
+ "borrow of layout constrained field with interior mutability",
+ "references to fields of layout constrained fields lose the constraints. Coupled \
+ with interior mutability, the field can be changed to invalid values",
+ ),
+ CallToFunctionWith => (
+ "call to function with `#[target_feature]`",
+ "can only be called if the required target features are available",
+ ),
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
+pub struct UnsafetyViolation {
+ pub source_info: SourceInfo,
+ pub lint_root: hir::HirId,
+ pub kind: UnsafetyViolationKind,
+ pub details: UnsafetyViolationDetails,
+}
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
+pub enum UnusedUnsafe {
+ /// `unsafe` block contains no unsafe operations
+ /// > ``unnecessary `unsafe` block``
+ Unused,
+ /// `unsafe` block nested under another (used) `unsafe` block
+ /// > ``… because it's nested under this `unsafe` block``
+ InUnsafeBlock(hir::HirId),
+ /// `unsafe` block nested under `unsafe fn`
+ /// > ``… because it's nested under this `unsafe fn` ``
+ ///
+ /// the second HirId here indicates the first usage of the `unsafe` block,
+ /// which allows retrieval of the LintLevelSource for why that operation would
+ /// have been permitted without the block
+ InUnsafeFn(hir::HirId, hir::HirId),
+}
+
+#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)]
+pub enum UsedUnsafeBlockData {
+ SomeDisallowedInUnsafeFn,
+ // the HirId here indicates the first usage of the `unsafe` block
+ // (i.e. the one that's first encountered in the MIR traversal of the unsafety check)
+ AllAllowedInUnsafeFn(hir::HirId),
+}
+
+#[derive(TyEncodable, TyDecodable, HashStable, Debug)]
+pub struct UnsafetyCheckResult {
+ /// Violations that are propagated *upwards* from this function.
+ pub violations: Vec<UnsafetyViolation>,
+
+ /// Used `unsafe` blocks in this function. This is used for the "unused_unsafe" lint.
+ ///
+ /// The keys are the used `unsafe` blocks, the UnusedUnsafeKind indicates whether
+ /// or not any of the usages happen at a place that doesn't allow `unsafe_op_in_unsafe_fn`.
+ pub used_unsafe_blocks: FxHashMap<hir::HirId, UsedUnsafeBlockData>,
+
+ /// This is `Some` iff the item is not a closure.
+ pub unused_unsafes: Option<Vec<(hir::HirId, UnusedUnsafe)>>,
+}
+
+rustc_index::newtype_index! {
+ pub struct GeneratorSavedLocal {
+ derive [HashStable]
+ DEBUG_FORMAT = "_{}",
+ }
+}
+
+/// The layout of generator state.
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct GeneratorLayout<'tcx> {
+ /// The type of every local stored inside the generator.
+ pub field_tys: IndexVec<GeneratorSavedLocal, Ty<'tcx>>,
+
+ /// Which of the above fields are in each variant. Note that one field may
+ /// be stored in multiple variants.
+ pub variant_fields: IndexVec<VariantIdx, IndexVec<Field, GeneratorSavedLocal>>,
+
+ /// The source that led to each variant being created (usually, a yield or
+ /// await).
+ pub variant_source_info: IndexVec<VariantIdx, SourceInfo>,
+
+ /// Which saved locals are storage-live at the same time. Locals that do not
+ /// have conflicts with each other are allowed to overlap in the computed
+ /// layout.
+ pub storage_conflicts: BitMatrix<GeneratorSavedLocal, GeneratorSavedLocal>,
+}
+
+impl Debug for GeneratorLayout<'_> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// Prints an iterator of (key, value) tuples as a map.
+ struct MapPrinter<'a, K, V>(Cell<Option<Box<dyn Iterator<Item = (K, V)> + 'a>>>);
+ impl<'a, K, V> MapPrinter<'a, K, V> {
+ fn new(iter: impl Iterator<Item = (K, V)> + 'a) -> Self {
+ Self(Cell::new(Some(Box::new(iter))))
+ }
+ }
+ impl<'a, K: Debug, V: Debug> Debug for MapPrinter<'a, K, V> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt.debug_map().entries(self.0.take().unwrap()).finish()
+ }
+ }
+
+ /// Prints the generator variant name.
+ struct GenVariantPrinter(VariantIdx);
+ impl From<VariantIdx> for GenVariantPrinter {
+ fn from(idx: VariantIdx) -> Self {
+ GenVariantPrinter(idx)
+ }
+ }
+ impl Debug for GenVariantPrinter {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let variant_name = ty::GeneratorSubsts::variant_name(self.0);
+ if fmt.alternate() {
+ write!(fmt, "{:9}({:?})", variant_name, self.0)
+ } else {
+ write!(fmt, "{}", variant_name)
+ }
+ }
+ }
+
+ /// Forces its contents to print in regular mode instead of alternate mode.
+ struct OneLinePrinter<T>(T);
+ impl<T: Debug> Debug for OneLinePrinter<T> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(fmt, "{:?}", self.0)
+ }
+ }
+
+ fmt.debug_struct("GeneratorLayout")
+ .field("field_tys", &MapPrinter::new(self.field_tys.iter_enumerated()))
+ .field(
+ "variant_fields",
+ &MapPrinter::new(
+ self.variant_fields
+ .iter_enumerated()
+ .map(|(k, v)| (GenVariantPrinter(k), OneLinePrinter(v))),
+ ),
+ )
+ .field("storage_conflicts", &self.storage_conflicts)
+ .finish()
+ }
+}
+
+#[derive(Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct BorrowCheckResult<'tcx> {
+ /// All the opaque types that are restricted to concrete types
+ /// by this function. Unlike the value in `TypeckResults`, this has
+ /// unerased regions.
+ pub concrete_opaque_types: VecMap<LocalDefId, OpaqueHiddenType<'tcx>>,
+ pub closure_requirements: Option<ClosureRegionRequirements<'tcx>>,
+ pub used_mut_upvars: SmallVec<[Field; 8]>,
+ pub tainted_by_errors: Option<ErrorGuaranteed>,
+}
+
+/// The result of the `mir_const_qualif` query.
+///
+/// Each field (except `error_occurred`) corresponds to an implementer of the `Qualif` trait in
+/// `rustc_const_eval/src/transform/check_consts/qualifs.rs`. See that file for more information on each
+/// `Qualif`.
+#[derive(Clone, Copy, Debug, Default, TyEncodable, TyDecodable, HashStable)]
+pub struct ConstQualifs {
+ pub has_mut_interior: bool,
+ pub needs_drop: bool,
+ pub needs_non_const_drop: bool,
+ pub custom_eq: bool,
+ pub tainted_by_errors: Option<ErrorGuaranteed>,
+}
+
+/// After we borrow check a closure, we are left with various
+/// requirements that we have inferred between the free regions that
+/// appear in the closure's signature or on its field types. These
+/// requirements are then verified and proved by the closure's
+/// creating function. This struct encodes those requirements.
+///
+/// The requirements are listed as being between various `RegionVid`. The 0th
+/// region refers to `'static`; subsequent region vids refer to the free
+/// regions that appear in the closure (or generator's) type, in order of
+/// appearance. (This numbering is actually defined by the `UniversalRegions`
+/// struct in the NLL region checker. See for example
+/// `UniversalRegions::closure_mapping`.) Note the free regions in the
+/// closure's signature and captures are erased.
+///
+/// Example: If type check produces a closure with the closure substs:
+///
+/// ```text
+/// ClosureSubsts = [
+/// 'a, // From the parent.
+/// 'b,
+/// i8, // the "closure kind"
+/// for<'x> fn(&'<erased> &'x u32) -> &'x u32, // the "closure signature"
+/// &'<erased> String, // some upvar
+/// ]
+/// ```
+///
+/// We would "renumber" each free region to a unique vid, as follows:
+///
+/// ```text
+/// ClosureSubsts = [
+/// '1, // From the parent.
+/// '2,
+/// i8, // the "closure kind"
+/// for<'x> fn(&'3 &'x u32) -> &'x u32, // the "closure signature"
+/// &'4 String, // some upvar
+/// ]
+/// ```
+///
+/// Now the code might impose a requirement like `'1: '2`. When an
+/// instance of the closure is created, the corresponding free regions
+/// can be extracted from its type and constrained to have the given
+/// outlives relationship.
+///
+/// In some cases, we have to record outlives requirements between types and
+/// regions as well. In that case, if those types include any regions, those
+/// regions are recorded using their external names (`ReStatic`,
+/// `ReEarlyBound`, `ReFree`). We use these because in a query response we
+/// cannot use `ReVar` (which is what we use internally within the rest of the
+/// NLL code).
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct ClosureRegionRequirements<'tcx> {
+ /// The number of external regions defined on the closure. In our
+ /// example above, it would be 3 -- one for `'static`, then `'1`
+ /// and `'2`. This is just used for a sanity check later on, to
+ /// make sure that the number of regions we see at the callsite
+ /// matches.
+ pub num_external_vids: usize,
+
+ /// Requirements between the various free regions defined in
+ /// indices.
+ pub outlives_requirements: Vec<ClosureOutlivesRequirement<'tcx>>,
+}
+
+/// Indicates an outlives-constraint between a type or between two
+/// free regions declared on the closure.
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct ClosureOutlivesRequirement<'tcx> {
+ // This region or type ...
+ pub subject: ClosureOutlivesSubject<'tcx>,
+
+ // ... must outlive this one.
+ pub outlived_free_region: ty::RegionVid,
+
+ // If not, report an error here ...
+ pub blame_span: Span,
+
+ // ... due to this reason.
+ pub category: ConstraintCategory<'tcx>,
+}
+
+// Make sure this enum doesn't unintentionally grow
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+rustc_data_structures::static_assert_size!(ConstraintCategory<'_>, 16);
+
+/// Outlives-constraints can be categorized to determine whether and why they
+/// are interesting (for error reporting). Order of variants indicates sort
+/// order of the category, thereby influencing diagnostic output.
+///
+/// See also `rustc_const_eval::borrow_check::constraints`.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub enum ConstraintCategory<'tcx> {
+ Return(ReturnConstraint),
+ Yield,
+ UseAsConst,
+ UseAsStatic,
+ TypeAnnotation,
+ Cast,
+
+ /// A constraint that came from checking the body of a closure.
+ ///
+ /// We try to get the category that the closure used when reporting this.
+ ClosureBounds,
+
+ /// Contains the function type if available.
+ CallArgument(Option<Ty<'tcx>>),
+ CopyBound,
+ SizedBound,
+ Assignment,
+ /// A constraint that came from a usage of a variable (e.g. in an ADT expression
+ /// like `Foo { field: my_val }`)
+ Usage,
+ OpaqueType,
+ ClosureUpvar(Field),
+
+ /// A constraint from a user-written predicate
+ /// with the provided span, written on the item
+ /// with the given `DefId`
+ Predicate(Span),
+
+ /// A "boring" constraint (caused by the given location) is one that
+ /// the user probably doesn't want to see described in diagnostics,
+ /// because it is kind of an artifact of the type system setup.
+ Boring,
+ // Boring and applicable everywhere.
+ BoringNoLocation,
+
+ /// A constraint that doesn't correspond to anything the user sees.
+ Internal,
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)]
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub enum ReturnConstraint {
+ Normal,
+ ClosureUpvar(Field),
+}
+
+/// The subject of a `ClosureOutlivesRequirement` -- that is, the thing
+/// that must outlive some region.
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum ClosureOutlivesSubject<'tcx> {
+ /// Subject is a type, typically a type parameter, but could also
+ /// be a projection. Indicates a requirement like `T: 'a` being
+ /// passed to the caller, where the type here is `T`.
+ ///
+ /// The type here is guaranteed not to contain any free regions at
+ /// present.
+ Ty(Ty<'tcx>),
+
+ /// Subject is a free region from the closure. Indicates a requirement
+ /// like `'a: 'b` being passed to the caller; the region here is `'a`.
+ Region(ty::RegionVid),
+}
+
+/// The constituent parts of a type level constant of kind ADT or array.
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct DestructuredConst<'tcx> {
+ pub variant: Option<VariantIdx>,
+ pub fields: &'tcx [ty::Const<'tcx>],
+}
+
+/// The constituent parts of a mir constant of kind ADT or array.
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct DestructuredMirConstant<'tcx> {
+ pub variant: Option<VariantIdx>,
+ pub fields: &'tcx [ConstantKind<'tcx>],
+}
+
+/// Coverage information summarized from a MIR if instrumented for source code coverage (see
+/// compiler option `-Cinstrument-coverage`). This information is generated by the
+/// `InstrumentCoverage` MIR pass and can be retrieved via the `coverageinfo` query.
+#[derive(Clone, TyEncodable, TyDecodable, Debug, HashStable)]
+pub struct CoverageInfo {
+ /// The total number of coverage region counters added to the MIR `Body`.
+ pub num_counters: u32,
+
+ /// The total number of coverage region counter expressions added to the MIR `Body`.
+ pub num_expressions: u32,
+}
+
+/// Shims which make dealing with `WithOptConstParam` easier.
+///
+/// For more information on why this is needed, consider looking
+/// at the docs for `WithOptConstParam` itself.
+impl<'tcx> TyCtxt<'tcx> {
+ #[inline]
+ pub fn mir_const_qualif_opt_const_arg(
+ self,
+ def: ty::WithOptConstParam<LocalDefId>,
+ ) -> ConstQualifs {
+ if let Some(param_did) = def.const_param_did {
+ self.mir_const_qualif_const_arg((def.did, param_did))
+ } else {
+ self.mir_const_qualif(def.did)
+ }
+ }
+
+ #[inline]
+ pub fn promoted_mir_opt_const_arg(
+ self,
+ def: ty::WithOptConstParam<DefId>,
+ ) -> &'tcx IndexVec<Promoted, Body<'tcx>> {
+ if let Some((did, param_did)) = def.as_const_arg() {
+ self.promoted_mir_of_const_arg((did, param_did))
+ } else {
+ self.promoted_mir(def.did)
+ }
+ }
+
+ #[inline]
+ pub fn mir_for_ctfe_opt_const_arg(self, def: ty::WithOptConstParam<DefId>) -> &'tcx Body<'tcx> {
+ if let Some((did, param_did)) = def.as_const_arg() {
+ self.mir_for_ctfe_of_const_arg((did, param_did))
+ } else {
+ self.mir_for_ctfe(def.did)
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/spanview.rs b/compiler/rustc_middle/src/mir/spanview.rs
new file mode 100644
index 000000000..4418b848e
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/spanview.rs
@@ -0,0 +1,691 @@
+use rustc_hir::def_id::DefId;
+use rustc_middle::hir;
+use rustc_middle::mir::*;
+use rustc_middle::ty::TyCtxt;
+use rustc_session::config::MirSpanview;
+use rustc_span::{BytePos, Pos, Span, SyntaxContext};
+
+use std::cmp;
+use std::io::{self, Write};
+
+pub const TOOLTIP_INDENT: &str = " ";
+
+const CARET: char = '\u{2038}'; // Unicode `CARET`
+const ANNOTATION_LEFT_BRACKET: char = '\u{298a}'; // Unicode `Z NOTATION RIGHT BINDING BRACKET
+const ANNOTATION_RIGHT_BRACKET: char = '\u{2989}'; // Unicode `Z NOTATION LEFT BINDING BRACKET`
+const NEW_LINE_SPAN: &str = "</span>\n<span class=\"line\">";
+const HEADER: &str = r#"<!DOCTYPE html>
+<html>
+<head>"#;
+const START_BODY: &str = r#"</head>
+<body>"#;
+const FOOTER: &str = r#"</body>
+</html>"#;
+
+const STYLE_SECTION: &str = r#"<style>
+ .line {
+ counter-increment: line;
+ }
+ .line:before {
+ content: counter(line) ": ";
+ font-family: Menlo, Monaco, monospace;
+ font-style: italic;
+ width: 3.8em;
+ display: inline-block;
+ text-align: right;
+ filter: opacity(50%);
+ -webkit-user-select: none;
+ }
+ .code {
+ color: #dddddd;
+ background-color: #222222;
+ font-family: Menlo, Monaco, monospace;
+ line-height: 1.4em;
+ border-bottom: 2px solid #222222;
+ white-space: pre;
+ display: inline-block;
+ }
+ .odd {
+ background-color: #55bbff;
+ color: #223311;
+ }
+ .even {
+ background-color: #ee7756;
+ color: #551133;
+ }
+ .code {
+ --index: calc(var(--layer) - 1);
+ padding-top: calc(var(--index) * 0.15em);
+ filter:
+ hue-rotate(calc(var(--index) * 25deg))
+ saturate(calc(100% - (var(--index) * 2%)))
+ brightness(calc(100% - (var(--index) * 1.5%)));
+ }
+ .annotation {
+ color: #4444ff;
+ font-family: monospace;
+ font-style: italic;
+ display: none;
+ -webkit-user-select: none;
+ }
+ body:active .annotation {
+ /* requires holding mouse down anywhere on the page */
+ display: inline-block;
+ }
+ span:hover .annotation {
+ /* requires hover over a span ONLY on its first line */
+ display: inline-block;
+ }
+</style>"#;
+
+/// Metadata to highlight the span of a MIR BasicBlock, Statement, or Terminator.
+#[derive(Clone, Debug)]
+pub struct SpanViewable {
+ pub bb: BasicBlock,
+ pub span: Span,
+ pub id: String,
+ pub tooltip: String,
+}
+
+/// Write a spanview HTML+CSS file to analyze MIR element spans.
+pub fn write_mir_fn_spanview<'tcx, W>(
+ tcx: TyCtxt<'tcx>,
+ body: &Body<'tcx>,
+ spanview: MirSpanview,
+ title: &str,
+ w: &mut W,
+) -> io::Result<()>
+where
+ W: Write,
+{
+ let def_id = body.source.def_id();
+ let hir_body = hir_body(tcx, def_id);
+ if hir_body.is_none() {
+ return Ok(());
+ }
+ let body_span = hir_body.unwrap().value.span;
+ let mut span_viewables = Vec::new();
+ for (bb, data) in body.basic_blocks().iter_enumerated() {
+ match spanview {
+ MirSpanview::Statement => {
+ for (i, statement) in data.statements.iter().enumerate() {
+ if let Some(span_viewable) =
+ statement_span_viewable(tcx, body_span, bb, i, statement)
+ {
+ span_viewables.push(span_viewable);
+ }
+ }
+ if let Some(span_viewable) = terminator_span_viewable(tcx, body_span, bb, data) {
+ span_viewables.push(span_viewable);
+ }
+ }
+ MirSpanview::Terminator => {
+ if let Some(span_viewable) = terminator_span_viewable(tcx, body_span, bb, data) {
+ span_viewables.push(span_viewable);
+ }
+ }
+ MirSpanview::Block => {
+ if let Some(span_viewable) = block_span_viewable(tcx, body_span, bb, data) {
+ span_viewables.push(span_viewable);
+ }
+ }
+ }
+ }
+ write_document(tcx, fn_span(tcx, def_id), span_viewables, title, w)?;
+ Ok(())
+}
+
+/// Generate a spanview HTML+CSS document for the given local function `def_id`, and a pre-generated
+/// list `SpanViewable`s.
+pub fn write_document<'tcx, W>(
+ tcx: TyCtxt<'tcx>,
+ spanview_span: Span,
+ mut span_viewables: Vec<SpanViewable>,
+ title: &str,
+ w: &mut W,
+) -> io::Result<()>
+where
+ W: Write,
+{
+ let mut from_pos = spanview_span.lo();
+ let end_pos = spanview_span.hi();
+ let source_map = tcx.sess.source_map();
+ let start = source_map.lookup_char_pos(from_pos);
+ let indent_to_initial_start_col = " ".repeat(start.col.to_usize());
+ debug!(
+ "spanview_span={:?}; source is:\n{}{}",
+ spanview_span,
+ indent_to_initial_start_col,
+ source_map.span_to_snippet(spanview_span).expect("function should have printable source")
+ );
+ writeln!(w, "{}", HEADER)?;
+ writeln!(w, "<title>{}</title>", title)?;
+ writeln!(w, "{}", STYLE_SECTION)?;
+ writeln!(w, "{}", START_BODY)?;
+ write!(
+ w,
+ r#"<div class="code" style="counter-reset: line {}"><span class="line">{}"#,
+ start.line - 1,
+ indent_to_initial_start_col,
+ )?;
+ span_viewables.sort_unstable_by(|a, b| {
+ let a = a.span;
+ let b = b.span;
+ if a.lo() == b.lo() {
+ // Sort hi() in reverse order so shorter spans are attempted after longer spans.
+ // This should give shorter spans a higher "layer", so they are not covered by
+ // the longer spans.
+ b.hi().partial_cmp(&a.hi())
+ } else {
+ a.lo().partial_cmp(&b.lo())
+ }
+ .unwrap()
+ });
+ let mut ordered_viewables = &span_viewables[..];
+ const LOWEST_VIEWABLE_LAYER: usize = 1;
+ let mut alt = false;
+ while ordered_viewables.len() > 0 {
+ debug!(
+ "calling write_next_viewable with from_pos={}, end_pos={}, and viewables len={}",
+ from_pos.to_usize(),
+ end_pos.to_usize(),
+ ordered_viewables.len()
+ );
+ let curr_id = &ordered_viewables[0].id;
+ let (next_from_pos, next_ordered_viewables) = write_next_viewable_with_overlaps(
+ tcx,
+ from_pos,
+ end_pos,
+ ordered_viewables,
+ alt,
+ LOWEST_VIEWABLE_LAYER,
+ w,
+ )?;
+ debug!(
+ "DONE calling write_next_viewable, with new from_pos={}, \
+ and remaining viewables len={}",
+ next_from_pos.to_usize(),
+ next_ordered_viewables.len()
+ );
+ assert!(
+ from_pos != next_from_pos || ordered_viewables.len() != next_ordered_viewables.len(),
+ "write_next_viewable_with_overlaps() must make a state change"
+ );
+ from_pos = next_from_pos;
+ if next_ordered_viewables.len() != ordered_viewables.len() {
+ ordered_viewables = next_ordered_viewables;
+ if let Some(next_ordered_viewable) = ordered_viewables.first() {
+ if &next_ordered_viewable.id != curr_id {
+ alt = !alt;
+ }
+ }
+ }
+ }
+ if from_pos < end_pos {
+ write_coverage_gap(tcx, from_pos, end_pos, w)?;
+ }
+ writeln!(w, r#"</span></div>"#)?;
+ writeln!(w, "{}", FOOTER)?;
+ Ok(())
+}
+
+/// Format a string showing the start line and column, and end line and column within a file.
+pub fn source_range_no_file<'tcx>(tcx: TyCtxt<'tcx>, span: Span) -> String {
+ let source_map = tcx.sess.source_map();
+ let start = source_map.lookup_char_pos(span.lo());
+ let end = source_map.lookup_char_pos(span.hi());
+ format!("{}:{}-{}:{}", start.line, start.col.to_usize() + 1, end.line, end.col.to_usize() + 1)
+}
+
+pub fn statement_kind_name(statement: &Statement<'_>) -> &'static str {
+ use StatementKind::*;
+ match statement.kind {
+ Assign(..) => "Assign",
+ FakeRead(..) => "FakeRead",
+ SetDiscriminant { .. } => "SetDiscriminant",
+ Deinit(..) => "Deinit",
+ StorageLive(..) => "StorageLive",
+ StorageDead(..) => "StorageDead",
+ Retag(..) => "Retag",
+ AscribeUserType(..) => "AscribeUserType",
+ Coverage(..) => "Coverage",
+ CopyNonOverlapping(..) => "CopyNonOverlapping",
+ Nop => "Nop",
+ }
+}
+
+pub fn terminator_kind_name(term: &Terminator<'_>) -> &'static str {
+ use TerminatorKind::*;
+ match term.kind {
+ Goto { .. } => "Goto",
+ SwitchInt { .. } => "SwitchInt",
+ Resume => "Resume",
+ Abort => "Abort",
+ Return => "Return",
+ Unreachable => "Unreachable",
+ Drop { .. } => "Drop",
+ DropAndReplace { .. } => "DropAndReplace",
+ Call { .. } => "Call",
+ Assert { .. } => "Assert",
+ Yield { .. } => "Yield",
+ GeneratorDrop => "GeneratorDrop",
+ FalseEdge { .. } => "FalseEdge",
+ FalseUnwind { .. } => "FalseUnwind",
+ InlineAsm { .. } => "InlineAsm",
+ }
+}
+
+fn statement_span_viewable<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body_span: Span,
+ bb: BasicBlock,
+ i: usize,
+ statement: &Statement<'tcx>,
+) -> Option<SpanViewable> {
+ let span = statement.source_info.span;
+ if !body_span.contains(span) {
+ return None;
+ }
+ let id = format!("{}[{}]", bb.index(), i);
+ let tooltip = tooltip(tcx, &id, span, vec![statement.clone()], &None);
+ Some(SpanViewable { bb, span, id, tooltip })
+}
+
+fn terminator_span_viewable<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body_span: Span,
+ bb: BasicBlock,
+ data: &BasicBlockData<'tcx>,
+) -> Option<SpanViewable> {
+ let term = data.terminator();
+ let span = term.source_info.span;
+ if !body_span.contains(span) {
+ return None;
+ }
+ let id = format!("{}:{}", bb.index(), terminator_kind_name(term));
+ let tooltip = tooltip(tcx, &id, span, vec![], &data.terminator);
+ Some(SpanViewable { bb, span, id, tooltip })
+}
+
+fn block_span_viewable<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ body_span: Span,
+ bb: BasicBlock,
+ data: &BasicBlockData<'tcx>,
+) -> Option<SpanViewable> {
+ let span = compute_block_span(data, body_span);
+ if !body_span.contains(span) {
+ return None;
+ }
+ let id = format!("{}", bb.index());
+ let tooltip = tooltip(tcx, &id, span, data.statements.clone(), &data.terminator);
+ Some(SpanViewable { bb, span, id, tooltip })
+}
+
+fn compute_block_span<'tcx>(data: &BasicBlockData<'tcx>, body_span: Span) -> Span {
+ let mut span = data.terminator().source_info.span;
+ for statement_span in data.statements.iter().map(|statement| statement.source_info.span) {
+ // Only combine Spans from the root context, and within the function's body_span.
+ if statement_span.ctxt() == SyntaxContext::root() && body_span.contains(statement_span) {
+ span = span.to(statement_span);
+ }
+ }
+ span
+}
+
+/// Recursively process each ordered span. Spans that overlap will have progressively varying
+/// styles, such as increased padding for each overlap. Non-overlapping adjacent spans will
+/// have alternating style choices, to help distinguish between them if, visually adjacent.
+/// The `layer` is incremented for each overlap, and the `alt` bool alternates between true
+/// and false, for each adjacent non-overlapping span. Source code between the spans (code
+/// that is not in any coverage region) has neutral styling.
+fn write_next_viewable_with_overlaps<'tcx, 'b, W>(
+ tcx: TyCtxt<'tcx>,
+ mut from_pos: BytePos,
+ mut to_pos: BytePos,
+ ordered_viewables: &'b [SpanViewable],
+ alt: bool,
+ layer: usize,
+ w: &mut W,
+) -> io::Result<(BytePos, &'b [SpanViewable])>
+where
+ W: Write,
+{
+ let debug_indent = " ".repeat(layer);
+ let (viewable, mut remaining_viewables) =
+ ordered_viewables.split_first().expect("ordered_viewables should have some");
+
+ if from_pos < viewable.span.lo() {
+ debug!(
+ "{}advance from_pos to next SpanViewable (from from_pos={} to viewable.span.lo()={} \
+ of {:?}), with to_pos={}",
+ debug_indent,
+ from_pos.to_usize(),
+ viewable.span.lo().to_usize(),
+ viewable.span,
+ to_pos.to_usize()
+ );
+ let hi = cmp::min(viewable.span.lo(), to_pos);
+ write_coverage_gap(tcx, from_pos, hi, w)?;
+ from_pos = hi;
+ if from_pos < viewable.span.lo() {
+ debug!(
+ "{}EARLY RETURN: stopped before getting to next SpanViewable, at {}",
+ debug_indent,
+ from_pos.to_usize()
+ );
+ return Ok((from_pos, ordered_viewables));
+ }
+ }
+
+ if from_pos < viewable.span.hi() {
+ // Set to_pos to the end of this `viewable` to ensure the recursive calls stop writing
+ // with room to print the tail.
+ to_pos = cmp::min(viewable.span.hi(), to_pos);
+ debug!(
+ "{}update to_pos (if not closer) to viewable.span.hi()={}; to_pos is now {}",
+ debug_indent,
+ viewable.span.hi().to_usize(),
+ to_pos.to_usize()
+ );
+ }
+
+ let mut subalt = false;
+ while remaining_viewables.len() > 0 && remaining_viewables[0].span.overlaps(viewable.span) {
+ let overlapping_viewable = &remaining_viewables[0];
+ debug!("{}overlapping_viewable.span={:?}", debug_indent, overlapping_viewable.span);
+
+ let span =
+ trim_span(viewable.span, from_pos, cmp::min(overlapping_viewable.span.lo(), to_pos));
+ let mut some_html_snippet = if from_pos <= viewable.span.hi() || viewable.span.is_empty() {
+ // `viewable` is not yet fully rendered, so start writing the span, up to either the
+ // `to_pos` or the next `overlapping_viewable`, whichever comes first.
+ debug!(
+ "{}make html_snippet (may not write it if early exit) for partial span {:?} \
+ of viewable.span {:?}",
+ debug_indent, span, viewable.span
+ );
+ from_pos = span.hi();
+ make_html_snippet(tcx, span, Some(&viewable))
+ } else {
+ None
+ };
+
+ // Defer writing the HTML snippet (until after early return checks) ONLY for empty spans.
+ // An empty Span with Some(html_snippet) is probably a tail marker. If there is an early
+ // exit, there should be another opportunity to write the tail marker.
+ if !span.is_empty() {
+ if let Some(ref html_snippet) = some_html_snippet {
+ debug!(
+ "{}write html_snippet for that partial span of viewable.span {:?}",
+ debug_indent, viewable.span
+ );
+ write_span(html_snippet, &viewable.tooltip, alt, layer, w)?;
+ }
+ some_html_snippet = None;
+ }
+
+ if from_pos < overlapping_viewable.span.lo() {
+ debug!(
+ "{}EARLY RETURN: from_pos={} has not yet reached the \
+ overlapping_viewable.span {:?}",
+ debug_indent,
+ from_pos.to_usize(),
+ overlapping_viewable.span
+ );
+ // must have reached `to_pos` before reaching the start of the
+ // `overlapping_viewable.span`
+ return Ok((from_pos, ordered_viewables));
+ }
+
+ if from_pos == to_pos
+ && !(from_pos == overlapping_viewable.span.lo() && overlapping_viewable.span.is_empty())
+ {
+ debug!(
+ "{}EARLY RETURN: from_pos=to_pos={} and overlapping_viewable.span {:?} is not \
+ empty, or not from_pos",
+ debug_indent,
+ to_pos.to_usize(),
+ overlapping_viewable.span
+ );
+ // `to_pos` must have occurred before the overlapping viewable. Return
+ // `ordered_viewables` so we can continue rendering the `viewable`, from after the
+ // `to_pos`.
+ return Ok((from_pos, ordered_viewables));
+ }
+
+ if let Some(ref html_snippet) = some_html_snippet {
+ debug!(
+ "{}write html_snippet for that partial span of viewable.span {:?}",
+ debug_indent, viewable.span
+ );
+ write_span(html_snippet, &viewable.tooltip, alt, layer, w)?;
+ }
+
+ debug!(
+ "{}recursively calling write_next_viewable with from_pos={}, to_pos={}, \
+ and viewables len={}",
+ debug_indent,
+ from_pos.to_usize(),
+ to_pos.to_usize(),
+ remaining_viewables.len()
+ );
+ // Write the overlaps (and the overlaps' overlaps, if any) up to `to_pos`.
+ let curr_id = &remaining_viewables[0].id;
+ let (next_from_pos, next_remaining_viewables) = write_next_viewable_with_overlaps(
+ tcx,
+ from_pos,
+ to_pos,
+ &remaining_viewables,
+ subalt,
+ layer + 1,
+ w,
+ )?;
+ debug!(
+ "{}DONE recursively calling write_next_viewable, with new from_pos={}, and remaining \
+ viewables len={}",
+ debug_indent,
+ next_from_pos.to_usize(),
+ next_remaining_viewables.len()
+ );
+ assert!(
+ from_pos != next_from_pos
+ || remaining_viewables.len() != next_remaining_viewables.len(),
+ "write_next_viewable_with_overlaps() must make a state change"
+ );
+ from_pos = next_from_pos;
+ if next_remaining_viewables.len() != remaining_viewables.len() {
+ remaining_viewables = next_remaining_viewables;
+ if let Some(next_ordered_viewable) = remaining_viewables.first() {
+ if &next_ordered_viewable.id != curr_id {
+ subalt = !subalt;
+ }
+ }
+ }
+ }
+ if from_pos <= viewable.span.hi() {
+ let span = trim_span(viewable.span, from_pos, to_pos);
+ debug!(
+ "{}After overlaps, writing (end span?) {:?} of viewable.span {:?}",
+ debug_indent, span, viewable.span
+ );
+ if let Some(ref html_snippet) = make_html_snippet(tcx, span, Some(&viewable)) {
+ from_pos = span.hi();
+ write_span(html_snippet, &viewable.tooltip, alt, layer, w)?;
+ }
+ }
+ debug!("{}RETURN: No more overlap", debug_indent);
+ Ok((
+ from_pos,
+ if from_pos < viewable.span.hi() { ordered_viewables } else { remaining_viewables },
+ ))
+}
+
+#[inline(always)]
+fn write_coverage_gap<'tcx, W>(
+ tcx: TyCtxt<'tcx>,
+ lo: BytePos,
+ hi: BytePos,
+ w: &mut W,
+) -> io::Result<()>
+where
+ W: Write,
+{
+ let span = Span::with_root_ctxt(lo, hi);
+ if let Some(ref html_snippet) = make_html_snippet(tcx, span, None) {
+ write_span(html_snippet, "", false, 0, w)
+ } else {
+ Ok(())
+ }
+}
+
+fn write_span<W>(
+ html_snippet: &str,
+ tooltip: &str,
+ alt: bool,
+ layer: usize,
+ w: &mut W,
+) -> io::Result<()>
+where
+ W: Write,
+{
+ let maybe_alt_class = if layer > 0 {
+ if alt { " odd" } else { " even" }
+ } else {
+ ""
+ };
+ let maybe_title_attr = if !tooltip.is_empty() {
+ format!(" title=\"{}\"", escape_attr(tooltip))
+ } else {
+ "".to_owned()
+ };
+ if layer == 1 {
+ write!(w, "<span>")?;
+ }
+ for (i, line) in html_snippet.lines().enumerate() {
+ if i > 0 {
+ write!(w, "{}", NEW_LINE_SPAN)?;
+ }
+ write!(
+ w,
+ r#"<span class="code{}" style="--layer: {}"{}>{}</span>"#,
+ maybe_alt_class, layer, maybe_title_attr, line
+ )?;
+ }
+ // Check for and translate trailing newlines, because `str::lines()` ignores them
+ if html_snippet.ends_with('\n') {
+ write!(w, "{}", NEW_LINE_SPAN)?;
+ }
+ if layer == 1 {
+ write!(w, "</span>")?;
+ }
+ Ok(())
+}
+
+fn make_html_snippet<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ some_viewable: Option<&SpanViewable>,
+) -> Option<String> {
+ let source_map = tcx.sess.source_map();
+ let snippet = source_map
+ .span_to_snippet(span)
+ .unwrap_or_else(|err| bug!("span_to_snippet error for span {:?}: {:?}", span, err));
+ let html_snippet = if let Some(viewable) = some_viewable {
+ let is_head = span.lo() == viewable.span.lo();
+ let is_tail = span.hi() == viewable.span.hi();
+ let mut labeled_snippet = if is_head {
+ format!(r#"<span class="annotation">{}{}</span>"#, viewable.id, ANNOTATION_LEFT_BRACKET)
+ } else {
+ "".to_owned()
+ };
+ if span.is_empty() {
+ if is_head && is_tail {
+ labeled_snippet.push(CARET);
+ }
+ } else {
+ labeled_snippet.push_str(&escape_html(&snippet));
+ };
+ if is_tail {
+ labeled_snippet.push_str(&format!(
+ r#"<span class="annotation">{}{}</span>"#,
+ ANNOTATION_RIGHT_BRACKET, viewable.id
+ ));
+ }
+ labeled_snippet
+ } else {
+ escape_html(&snippet)
+ };
+ if html_snippet.is_empty() { None } else { Some(html_snippet) }
+}
+
+fn tooltip<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ spanview_id: &str,
+ span: Span,
+ statements: Vec<Statement<'tcx>>,
+ terminator: &Option<Terminator<'tcx>>,
+) -> String {
+ let source_map = tcx.sess.source_map();
+ let mut text = Vec::new();
+ text.push(format!("{}: {}:", spanview_id, &source_map.span_to_embeddable_string(span)));
+ for statement in statements {
+ let source_range = source_range_no_file(tcx, statement.source_info.span);
+ text.push(format!(
+ "\n{}{}: {}: {:?}",
+ TOOLTIP_INDENT,
+ source_range,
+ statement_kind_name(&statement),
+ statement
+ ));
+ }
+ if let Some(term) = terminator {
+ let source_range = source_range_no_file(tcx, term.source_info.span);
+ text.push(format!(
+ "\n{}{}: {}: {:?}",
+ TOOLTIP_INDENT,
+ source_range,
+ terminator_kind_name(term),
+ term.kind
+ ));
+ }
+ text.join("")
+}
+
+fn trim_span(span: Span, from_pos: BytePos, to_pos: BytePos) -> Span {
+ trim_span_hi(trim_span_lo(span, from_pos), to_pos)
+}
+
+fn trim_span_lo(span: Span, from_pos: BytePos) -> Span {
+ if from_pos <= span.lo() { span } else { span.with_lo(cmp::min(span.hi(), from_pos)) }
+}
+
+fn trim_span_hi(span: Span, to_pos: BytePos) -> Span {
+ if to_pos >= span.hi() { span } else { span.with_hi(cmp::max(span.lo(), to_pos)) }
+}
+
+fn fn_span<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Span {
+ let fn_decl_span = tcx.def_span(def_id);
+ if let Some(body_span) = hir_body(tcx, def_id).map(|hir_body| hir_body.value.span) {
+ if fn_decl_span.eq_ctxt(body_span) { fn_decl_span.to(body_span) } else { body_span }
+ } else {
+ fn_decl_span
+ }
+}
+
+fn hir_body<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Option<&'tcx rustc_hir::Body<'tcx>> {
+ let hir_node = tcx.hir().get_if_local(def_id).expect("expected DefId is local");
+ hir::map::associated_body(hir_node).map(|fn_body_id| tcx.hir().body(fn_body_id))
+}
+
+fn escape_html(s: &str) -> String {
+ s.replace('&', "&amp;").replace('<', "&lt;").replace('>', "&gt;")
+}
+
+fn escape_attr(s: &str) -> String {
+ s.replace('&', "&amp;")
+ .replace('\"', "&quot;")
+ .replace('\'', "&#39;")
+ .replace('<', "&lt;")
+ .replace('>', "&gt;")
+}
diff --git a/compiler/rustc_middle/src/mir/switch_sources.rs b/compiler/rustc_middle/src/mir/switch_sources.rs
new file mode 100644
index 000000000..b91c0c257
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/switch_sources.rs
@@ -0,0 +1,78 @@
+//! Lazily compute the inverse of each `SwitchInt`'s switch targets. Modeled after
+//! `Predecessors`/`PredecessorCache`.
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::OnceCell;
+use rustc_index::vec::IndexVec;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use smallvec::SmallVec;
+
+use crate::mir::{BasicBlock, BasicBlockData, Terminator, TerminatorKind};
+
+pub type SwitchSources = FxHashMap<(BasicBlock, BasicBlock), SmallVec<[Option<u128>; 1]>>;
+
+#[derive(Clone, Debug)]
+pub(super) struct SwitchSourceCache {
+ cache: OnceCell<SwitchSources>,
+}
+
+impl SwitchSourceCache {
+ #[inline]
+ pub(super) fn new() -> Self {
+ SwitchSourceCache { cache: OnceCell::new() }
+ }
+
+ /// Invalidates the switch source cache.
+ #[inline]
+ pub(super) fn invalidate(&mut self) {
+ self.cache = OnceCell::new();
+ }
+
+ /// Returns the switch sources for this MIR.
+ #[inline]
+ pub(super) fn compute(
+ &self,
+ basic_blocks: &IndexVec<BasicBlock, BasicBlockData<'_>>,
+ ) -> &SwitchSources {
+ self.cache.get_or_init(|| {
+ let mut switch_sources: SwitchSources = FxHashMap::default();
+ for (bb, data) in basic_blocks.iter_enumerated() {
+ if let Some(Terminator {
+ kind: TerminatorKind::SwitchInt { targets, .. }, ..
+ }) = &data.terminator
+ {
+ for (value, target) in targets.iter() {
+ switch_sources.entry((target, bb)).or_default().push(Some(value));
+ }
+ switch_sources.entry((targets.otherwise(), bb)).or_default().push(None);
+ }
+ }
+
+ switch_sources
+ })
+ }
+}
+
+impl<S: Encoder> Encodable<S> for SwitchSourceCache {
+ #[inline]
+ fn encode(&self, _s: &mut S) {}
+}
+
+impl<D: Decoder> Decodable<D> for SwitchSourceCache {
+ #[inline]
+ fn decode(_: &mut D) -> Self {
+ Self::new()
+ }
+}
+
+impl<CTX> HashStable<CTX> for SwitchSourceCache {
+ #[inline]
+ fn hash_stable(&self, _: &mut CTX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ SwitchSourceCache,
+}
diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs
new file mode 100644
index 000000000..eb90169d0
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/syntax.rs
@@ -0,0 +1,1168 @@
+//! This defines the syntax of MIR, i.e., the set of available MIR operations, and other definitions
+//! closely related to MIR semantics.
+//! This is in a dedicated file so that changes to this file can be reviewed more carefully.
+//! The intention is that this file only contains datatype declarations, no code.
+
+use super::{BasicBlock, Constant, Field, Local, SwitchTargets, UserTypeProjection};
+
+use crate::mir::coverage::{CodeRegion, CoverageKind};
+use crate::ty::adjustment::PointerCast;
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, List, Ty};
+use crate::ty::{Region, UserTypeAnnotationIndex};
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_hir::def_id::DefId;
+use rustc_hir::{self as hir};
+use rustc_hir::{self, GeneratorKind};
+use rustc_target::abi::VariantIdx;
+
+use rustc_ast::Mutability;
+use rustc_span::def_id::LocalDefId;
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+use rustc_target::asm::InlineAsmRegOrRegClass;
+
+/// The various "big phases" that MIR goes through.
+///
+/// These phases all describe dialects of MIR. Since all MIR uses the same datastructures, the
+/// dialects forbid certain variants or values in certain phases. The sections below summarize the
+/// changes, but do not document them thoroughly. The full documentation is found in the appropriate
+/// documentation for the thing the change is affecting.
+///
+/// Warning: ordering of variants is significant.
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)]
+#[derive(HashStable)]
+pub enum MirPhase {
+ /// The dialect of MIR used during all phases before `DropsLowered` is the same. This is also
+ /// the MIR that analysis such as borrowck uses.
+ ///
+ /// One important thing to remember about the behavior of this section of MIR is that drop terminators
+ /// (including drop and replace) are *conditional*. The elaborate drops pass will then replace each
+ /// instance of a drop terminator with a nop, an unconditional drop, or a drop conditioned on a drop
+ /// flag. Of course, this means that it is important that the drop elaboration can accurately recognize
+ /// when things are initialized and when things are de-initialized. That means any code running on this
+ /// version of MIR must be sure to produce output that drop elaboration can reason about. See the
+ /// section on the drop terminatorss for more details.
+ Built = 0,
+ // FIXME(oli-obk): it's unclear whether we still need this phase (and its corresponding query).
+ // We used to have this for pre-miri MIR based const eval.
+ Const = 1,
+ /// This phase checks the MIR for promotable elements and takes them out of the main MIR body
+ /// by creating a new MIR body per promoted element. After this phase (and thus the termination
+ /// of the `mir_promoted` query), these promoted elements are available in the `promoted_mir`
+ /// query.
+ ConstsPromoted = 2,
+ /// After this projections may only contain deref projections as the first element.
+ Derefered = 3,
+ /// Beginning with this phase, the following variants are disallowed:
+ /// * [`TerminatorKind::DropAndReplace`]
+ /// * [`TerminatorKind::FalseUnwind`]
+ /// * [`TerminatorKind::FalseEdge`]
+ /// * [`StatementKind::FakeRead`]
+ /// * [`StatementKind::AscribeUserType`]
+ /// * [`Rvalue::Ref`] with `BorrowKind::Shallow`
+ ///
+ /// And the following variant is allowed:
+ /// * [`StatementKind::Retag`]
+ ///
+ /// Furthermore, `Drop` now uses explicit drop flags visible in the MIR and reaching a `Drop`
+ /// terminator means that the auto-generated drop glue will be invoked. Also, `Copy` operands
+ /// are allowed for non-`Copy` types.
+ DropsLowered = 4,
+ /// Beginning with this phase, the following variant is disallowed:
+ /// * [`Rvalue::Aggregate`] for any `AggregateKind` except `Array`
+ ///
+ /// And the following variant is allowed:
+ /// * [`StatementKind::SetDiscriminant`]
+ Deaggregated = 5,
+ /// Before this phase, generators are in the "source code" form, featuring `yield` statements
+ /// and such. With this phase change, they are transformed into a proper state machine. Running
+ /// optimizations before this change can be potentially dangerous because the source code is to
+ /// some extent a "lie." In particular, `yield` terminators effectively make the value of all
+ /// locals visible to the caller. This means that dead store elimination before them, or code
+ /// motion across them, is not correct in general. This is also exasperated by type checking
+ /// having pre-computed a list of the types that it thinks are ok to be live across a yield
+ /// point - this is necessary to decide eg whether autotraits are implemented. Introducing new
+ /// types across a yield point will lead to ICEs becaues of this.
+ ///
+ /// Beginning with this phase, the following variants are disallowed:
+ /// * [`TerminatorKind::Yield`]
+ /// * [`TerminatorKind::GeneratorDrop`]
+ /// * [`ProjectionElem::Deref`] of `Box`
+ GeneratorsLowered = 6,
+ Optimized = 7,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Borrow kinds
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(Hash, HashStable)]
+pub enum BorrowKind {
+ /// Data must be immutable and is aliasable.
+ Shared,
+
+ /// The immediately borrowed place must be immutable, but projections from
+ /// it don't need to be. For example, a shallow borrow of `a.b` doesn't
+ /// conflict with a mutable borrow of `a.b.c`.
+ ///
+ /// This is used when lowering matches: when matching on a place we want to
+ /// ensure that place have the same value from the start of the match until
+ /// an arm is selected. This prevents this code from compiling:
+ /// ```compile_fail,E0510
+ /// let mut x = &Some(0);
+ /// match *x {
+ /// None => (),
+ /// Some(_) if { x = &None; false } => (),
+ /// Some(_) => (),
+ /// }
+ /// ```
+ /// This can't be a shared borrow because mutably borrowing (*x as Some).0
+ /// should not prevent `if let None = x { ... }`, for example, because the
+ /// mutating `(*x as Some).0` can't affect the discriminant of `x`.
+ /// We can also report errors with this kind of borrow differently.
+ Shallow,
+
+ /// Data must be immutable but not aliasable. This kind of borrow
+ /// cannot currently be expressed by the user and is used only in
+ /// implicit closure bindings. It is needed when the closure is
+ /// borrowing or mutating a mutable referent, e.g.:
+ /// ```
+ /// let mut z = 3;
+ /// let x: &mut isize = &mut z;
+ /// let y = || *x += 5;
+ /// ```
+ /// If we were to try to translate this closure into a more explicit
+ /// form, we'd encounter an error with the code as written:
+ /// ```compile_fail,E0594
+ /// struct Env<'a> { x: &'a &'a mut isize }
+ /// let mut z = 3;
+ /// let x: &mut isize = &mut z;
+ /// let y = (&mut Env { x: &x }, fn_ptr); // Closure is pair of env and fn
+ /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
+ /// ```
+ /// This is then illegal because you cannot mutate an `&mut` found
+ /// in an aliasable location. To solve, you'd have to translate with
+ /// an `&mut` borrow:
+ /// ```compile_fail,E0596
+ /// struct Env<'a> { x: &'a mut &'a mut isize }
+ /// let mut z = 3;
+ /// let x: &mut isize = &mut z;
+ /// let y = (&mut Env { x: &mut x }, fn_ptr); // changed from &x to &mut x
+ /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
+ /// ```
+ /// Now the assignment to `**env.x` is legal, but creating a
+ /// mutable pointer to `x` is not because `x` is not mutable. We
+ /// could fix this by declaring `x` as `let mut x`. This is ok in
+ /// user code, if awkward, but extra weird for closures, since the
+ /// borrow is hidden.
+ ///
+ /// So we introduce a "unique imm" borrow -- the referent is
+ /// immutable, but not aliasable. This solves the problem. For
+ /// simplicity, we don't give users the way to express this
+ /// borrow, it's just used when translating closures.
+ Unique,
+
+ /// Data is mutable and not aliasable.
+ Mut {
+ /// `true` if this borrow arose from method-call auto-ref
+ /// (i.e., `adjustment::Adjust::Borrow`).
+ allow_two_phase_borrow: bool,
+ },
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Statements
+
+/// The various kinds of statements that can appear in MIR.
+///
+/// Not all of these are allowed at every [`MirPhase`]. Check the documentation there to see which
+/// ones you do not have to worry about. The MIR validator will generally enforce such restrictions,
+/// causing an ICE if they are violated.
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum StatementKind<'tcx> {
+ /// Assign statements roughly correspond to an assignment in Rust proper (`x = ...`) except
+ /// without the possibility of dropping the previous value (that must be done separately, if at
+ /// all). The *exact* way this works is undecided. It probably does something like evaluating
+ /// the LHS to a place and the RHS to a value, and then storing the value to the place. Various
+ /// parts of this may do type specific things that are more complicated than simply copying
+ /// bytes.
+ ///
+ /// **Needs clarification**: The implication of the above idea would be that assignment implies
+ /// that the resulting value is initialized. I believe we could commit to this separately from
+ /// committing to whatever part of the memory model we would need to decide on to make the above
+ /// paragragh precise. Do we want to?
+ ///
+ /// Assignments in which the types of the place and rvalue differ are not well-formed.
+ ///
+ /// **Needs clarification**: Do we ever want to worry about non-free (in the body) lifetimes for
+ /// the typing requirement in post drop-elaboration MIR? I think probably not - I'm not sure we
+ /// could meaningfully require this anyway. How about free lifetimes? Is ignoring this
+ /// interesting for optimizations? Do we want to allow such optimizations?
+ ///
+ /// **Needs clarification**: We currently require that the LHS place not overlap with any place
+ /// read as part of computation of the RHS for some rvalues (generally those not producing
+ /// primitives). This requirement is under discussion in [#68364]. As a part of this discussion,
+ /// it is also unclear in what order the components are evaluated.
+ ///
+ /// [#68364]: https://github.com/rust-lang/rust/issues/68364
+ ///
+ /// See [`Rvalue`] documentation for details on each of those.
+ Assign(Box<(Place<'tcx>, Rvalue<'tcx>)>),
+
+ /// This represents all the reading that a pattern match may do (e.g., inspecting constants and
+ /// discriminant values), and the kind of pattern it comes from. This is in order to adapt
+ /// potential error messages to these specific patterns.
+ ///
+ /// Note that this also is emitted for regular `let` bindings to ensure that locals that are
+ /// never accessed still get some sanity checks for, e.g., `let x: ! = ..;`
+ ///
+ /// When executed at runtime this is a nop.
+ ///
+ /// Disallowed after drop elaboration.
+ FakeRead(Box<(FakeReadCause, Place<'tcx>)>),
+
+ /// Write the discriminant for a variant to the enum Place.
+ ///
+ /// This is permitted for both generators and ADTs. This does not necessarily write to the
+ /// entire place; instead, it writes to the minimum set of bytes as required by the layout for
+ /// the type.
+ SetDiscriminant { place: Box<Place<'tcx>>, variant_index: VariantIdx },
+
+ /// Deinitializes the place.
+ ///
+ /// This writes `uninit` bytes to the entire place.
+ Deinit(Box<Place<'tcx>>),
+
+ /// `StorageLive` and `StorageDead` statements mark the live range of a local.
+ ///
+ /// At any point during the execution of a function, each local is either allocated or
+ /// unallocated. Except as noted below, all locals except function parameters are initially
+ /// unallocated. `StorageLive` statements cause memory to be allocated for the local while
+ /// `StorageDead` statements cause the memory to be freed. Using a local in any way (not only
+ /// reading/writing from it) while it is unallocated is UB.
+ ///
+ /// Some locals have no `StorageLive` or `StorageDead` statements within the entire MIR body.
+ /// These locals are implicitly allocated for the full duration of the function. There is a
+ /// convenience method at `rustc_mir_dataflow::storage::always_storage_live_locals` for
+ /// computing these locals.
+ ///
+ /// If the local is already allocated, calling `StorageLive` again is UB. However, for an
+ /// unallocated local an additional `StorageDead` all is simply a nop.
+ StorageLive(Local),
+
+ /// See `StorageLive` above.
+ StorageDead(Local),
+
+ /// Retag references in the given place, ensuring they got fresh tags.
+ ///
+ /// This is part of the Stacked Borrows model. These statements are currently only interpreted
+ /// by miri and only generated when `-Z mir-emit-retag` is passed. See
+ /// <https://internals.rust-lang.org/t/stacked-borrows-an-aliasing-model-for-rust/8153/> for
+ /// more details.
+ ///
+ /// For code that is not specific to stacked borrows, you should consider retags to read
+ /// and modify the place in an opaque way.
+ Retag(RetagKind, Box<Place<'tcx>>),
+
+ /// Encodes a user's type ascription. These need to be preserved
+ /// intact so that NLL can respect them. For example:
+ /// ```ignore (illustrative)
+ /// let a: T = y;
+ /// ```
+ /// The effect of this annotation is to relate the type `T_y` of the place `y`
+ /// to the user-given type `T`. The effect depends on the specified variance:
+ ///
+ /// - `Covariant` -- requires that `T_y <: T`
+ /// - `Contravariant` -- requires that `T_y :> T`
+ /// - `Invariant` -- requires that `T_y == T`
+ /// - `Bivariant` -- no effect
+ ///
+ /// When executed at runtime this is a nop.
+ ///
+ /// Disallowed after drop elaboration.
+ AscribeUserType(Box<(Place<'tcx>, UserTypeProjection)>, ty::Variance),
+
+ /// Marks the start of a "coverage region", injected with '-Cinstrument-coverage'. A
+ /// `Coverage` statement carries metadata about the coverage region, used to inject a coverage
+ /// map into the binary. If `Coverage::kind` is a `Counter`, the statement also generates
+ /// executable code, to increment a counter variable at runtime, each time the code region is
+ /// executed.
+ Coverage(Box<Coverage>),
+
+ /// Denotes a call to the intrinsic function `copy_nonoverlapping`.
+ ///
+ /// First, all three operands are evaluated. `src` and `dest` must each be a reference, pointer,
+ /// or `Box` pointing to the same type `T`. `count` must evaluate to a `usize`. Then, `src` and
+ /// `dest` are dereferenced, and `count * size_of::<T>()` bytes beginning with the first byte of
+ /// the `src` place are copied to the continguous range of bytes beginning with the first byte
+ /// of `dest`.
+ ///
+ /// **Needs clarification**: In what order are operands computed and dereferenced? It should
+ /// probably match the order for assignment, but that is also undecided.
+ ///
+ /// **Needs clarification**: Is this typed or not, ie is there a typed load and store involved?
+ /// I vaguely remember Ralf saying somewhere that he thought it should not be.
+ CopyNonOverlapping(Box<CopyNonOverlapping<'tcx>>),
+
+ /// No-op. Useful for deleting instructions without affecting statement indices.
+ Nop,
+}
+
+/// Describes what kind of retag is to be performed.
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, Hash, HashStable)]
+#[rustc_pass_by_value]
+pub enum RetagKind {
+ /// The initial retag when entering a function.
+ FnEntry,
+ /// Retag preparing for a two-phase borrow.
+ TwoPhase,
+ /// Retagging raw pointers.
+ Raw,
+ /// A "normal" retag.
+ Default,
+}
+
+/// The `FakeReadCause` describes the type of pattern why a FakeRead statement exists.
+#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, Hash, HashStable, PartialEq)]
+pub enum FakeReadCause {
+ /// Inject a fake read of the borrowed input at the end of each guards
+ /// code.
+ ///
+ /// This should ensure that you cannot change the variant for an enum while
+ /// you are in the midst of matching on it.
+ ForMatchGuard,
+
+ /// `let x: !; match x {}` doesn't generate any read of x so we need to
+ /// generate a read of x to check that it is initialized and safe.
+ ///
+ /// If a closure pattern matches a Place starting with an Upvar, then we introduce a
+ /// FakeRead for that Place outside the closure, in such a case this option would be
+ /// Some(closure_def_id).
+ /// Otherwise, the value of the optional LocalDefId will be None.
+ //
+ // We can use LocaDefId here since fake read statements are removed
+ // before codegen in the `CleanupNonCodegenStatements` pass.
+ ForMatchedPlace(Option<LocalDefId>),
+
+ /// A fake read of the RefWithinGuard version of a bind-by-value variable
+ /// in a match guard to ensure that its value hasn't change by the time
+ /// we create the OutsideGuard version.
+ ForGuardBinding,
+
+ /// Officially, the semantics of
+ ///
+ /// `let pattern = <expr>;`
+ ///
+ /// is that `<expr>` is evaluated into a temporary and then this temporary is
+ /// into the pattern.
+ ///
+ /// However, if we see the simple pattern `let var = <expr>`, we optimize this to
+ /// evaluate `<expr>` directly into the variable `var`. This is mostly unobservable,
+ /// but in some cases it can affect the borrow checker, as in #53695.
+ /// Therefore, we insert a "fake read" here to ensure that we get
+ /// appropriate errors.
+ ///
+ /// If a closure pattern matches a Place starting with an Upvar, then we introduce a
+ /// FakeRead for that Place outside the closure, in such a case this option would be
+ /// Some(closure_def_id).
+ /// Otherwise, the value of the optional DefId will be None.
+ ForLet(Option<LocalDefId>),
+
+ /// If we have an index expression like
+ ///
+ /// (*x)[1][{ x = y; 4}]
+ ///
+ /// then the first bounds check is invalidated when we evaluate the second
+ /// index expression. Thus we create a fake borrow of `x` across the second
+ /// indexer, which will cause a borrow check error.
+ ForIndex,
+}
+
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct Coverage {
+ pub kind: CoverageKind,
+ pub code_region: Option<CodeRegion>,
+}
+
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct CopyNonOverlapping<'tcx> {
+ pub src: Operand<'tcx>,
+ pub dst: Operand<'tcx>,
+ /// Number of elements to copy from src to dest, not bytes.
+ pub count: Operand<'tcx>,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Terminators
+
+/// The various kinds of terminators, representing ways of exiting from a basic block.
+///
+/// A note on unwinding: Panics may occur during the execution of some terminators. Depending on the
+/// `-C panic` flag, this may either cause the program to abort or the call stack to unwind. Such
+/// terminators have a `cleanup: Option<BasicBlock>` field on them. If stack unwinding occurs, then
+/// once the current function is reached, execution continues at the given basic block, if any. If
+/// `cleanup` is `None` then no cleanup is performed, and the stack continues unwinding. This is
+/// equivalent to the execution of a `Resume` terminator.
+///
+/// The basic block pointed to by a `cleanup` field must have its `cleanup` flag set. `cleanup`
+/// basic blocks have a couple restrictions:
+/// 1. All `cleanup` fields in them must be `None`.
+/// 2. `Return` terminators are not allowed in them. `Abort` and `Unwind` terminators are.
+/// 3. All other basic blocks (in the current body) that are reachable from `cleanup` basic blocks
+/// must also be `cleanup`. This is a part of the type system and checked statically, so it is
+/// still an error to have such an edge in the CFG even if it's known that it won't be taken at
+/// runtime.
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
+pub enum TerminatorKind<'tcx> {
+ /// Block has one successor; we continue execution there.
+ Goto { target: BasicBlock },
+
+ /// Switches based on the computed value.
+ ///
+ /// First, evaluates the `discr` operand. The type of the operand must be a signed or unsigned
+ /// integer, char, or bool, and must match the given type. Then, if the list of switch targets
+ /// contains the computed value, continues execution at the associated basic block. Otherwise,
+ /// continues execution at the "otherwise" basic block.
+ ///
+ /// Target values may not appear more than once.
+ SwitchInt {
+ /// The discriminant value being tested.
+ discr: Operand<'tcx>,
+
+ /// The type of value being tested.
+ /// This is always the same as the type of `discr`.
+ /// FIXME: remove this redundant information. Currently, it is relied on by pretty-printing.
+ switch_ty: Ty<'tcx>,
+
+ targets: SwitchTargets,
+ },
+
+ /// Indicates that the landing pad is finished and that the process should continue unwinding.
+ ///
+ /// Like a return, this marks the end of this invocation of the function.
+ ///
+ /// Only permitted in cleanup blocks. `Resume` is not permitted with `-C unwind=abort` after
+ /// deaggregation runs.
+ Resume,
+
+ /// Indicates that the landing pad is finished and that the process should abort.
+ ///
+ /// Used to prevent unwinding for foreign items or with `-C unwind=abort`. Only permitted in
+ /// cleanup blocks.
+ Abort,
+
+ /// Returns from the function.
+ ///
+ /// Like function calls, the exact semantics of returns in Rust are unclear. Returning very
+ /// likely at least assigns the value currently in the return place (`_0`) to the place
+ /// specified in the associated `Call` terminator in the calling function, as if assigned via
+ /// `dest = move _0`. It might additionally do other things, like have side-effects in the
+ /// aliasing model.
+ ///
+ /// If the body is a generator body, this has slightly different semantics; it instead causes a
+ /// `GeneratorState::Returned(_0)` to be created (as if by an `Aggregate` rvalue) and assigned
+ /// to the return place.
+ Return,
+
+ /// Indicates a terminator that can never be reached.
+ ///
+ /// Executing this terminator is UB.
+ Unreachable,
+
+ /// The behavior of this statement differs significantly before and after drop elaboration.
+ /// After drop elaboration, `Drop` executes the drop glue for the specified place, after which
+ /// it continues execution/unwinds at the given basic blocks. It is possible that executing drop
+ /// glue is special - this would be part of Rust's memory model. (**FIXME**: due we have an
+ /// issue tracking if drop glue has any interesting semantics in addition to those of a function
+ /// call?)
+ ///
+ /// `Drop` before drop elaboration is a *conditional* execution of the drop glue. Specifically, the
+ /// `Drop` will be executed if...
+ ///
+ /// **Needs clarification**: End of that sentence. This in effect should document the exact
+ /// behavior of drop elaboration. The following sounds vaguely right, but I'm not quite sure:
+ ///
+ /// > The drop glue is executed if, among all statements executed within this `Body`, an assignment to
+ /// > the place or one of its "parents" occurred more recently than a move out of it. This does not
+ /// > consider indirect assignments.
+ Drop { place: Place<'tcx>, target: BasicBlock, unwind: Option<BasicBlock> },
+
+ /// Drops the place and assigns a new value to it.
+ ///
+ /// This first performs the exact same operation as the pre drop-elaboration `Drop` terminator;
+ /// it then additionally assigns the `value` to the `place` as if by an assignment statement.
+ /// This assignment occurs both in the unwind and the regular code paths. The semantics are best
+ /// explained by the elaboration:
+ ///
+ /// ```ignore (MIR)
+ /// BB0 {
+ /// DropAndReplace(P <- V, goto BB1, unwind BB2)
+ /// }
+ /// ```
+ ///
+ /// becomes
+ ///
+ /// ```ignore (MIR)
+ /// BB0 {
+ /// Drop(P, goto BB1, unwind BB2)
+ /// }
+ /// BB1 {
+ /// // P is now uninitialized
+ /// P <- V
+ /// }
+ /// BB2 {
+ /// // P is now uninitialized -- its dtor panicked
+ /// P <- V
+ /// }
+ /// ```
+ ///
+ /// Disallowed after drop elaboration.
+ DropAndReplace {
+ place: Place<'tcx>,
+ value: Operand<'tcx>,
+ target: BasicBlock,
+ unwind: Option<BasicBlock>,
+ },
+
+ /// Roughly speaking, evaluates the `func` operand and the arguments, and starts execution of
+ /// the referred to function. The operand types must match the argument types of the function.
+ /// The return place type must match the return type. The type of the `func` operand must be
+ /// callable, meaning either a function pointer, a function type, or a closure type.
+ ///
+ /// **Needs clarification**: The exact semantics of this. Current backends rely on `move`
+ /// operands not aliasing the return place. It is unclear how this is justified in MIR, see
+ /// [#71117].
+ ///
+ /// [#71117]: https://github.com/rust-lang/rust/issues/71117
+ Call {
+ /// The function that’s being called.
+ func: Operand<'tcx>,
+ /// Arguments the function is called with.
+ /// These are owned by the callee, which is free to modify them.
+ /// This allows the memory occupied by "by-value" arguments to be
+ /// reused across function calls without duplicating the contents.
+ args: Vec<Operand<'tcx>>,
+ /// Where the returned value will be written
+ destination: Place<'tcx>,
+ /// Where to go after this call returns. If none, the call necessarily diverges.
+ target: Option<BasicBlock>,
+ /// Cleanups to be done if the call unwinds.
+ cleanup: Option<BasicBlock>,
+ /// `true` if this is from a call in HIR rather than from an overloaded
+ /// operator. True for overloaded function call.
+ from_hir_call: bool,
+ /// This `Span` is the span of the function, without the dot and receiver
+ /// (e.g. `foo(a, b)` in `x.foo(a, b)`
+ fn_span: Span,
+ },
+
+ /// Evaluates the operand, which must have type `bool`. If it is not equal to `expected`,
+ /// initiates a panic. Initiating a panic corresponds to a `Call` terminator with some
+ /// unspecified constant as the function to call, all the operands stored in the `AssertMessage`
+ /// as parameters, and `None` for the destination. Keep in mind that the `cleanup` path is not
+ /// necessarily executed even in the case of a panic, for example in `-C panic=abort`. If the
+ /// assertion does not fail, execution continues at the specified basic block.
+ Assert {
+ cond: Operand<'tcx>,
+ expected: bool,
+ msg: AssertMessage<'tcx>,
+ target: BasicBlock,
+ cleanup: Option<BasicBlock>,
+ },
+
+ /// Marks a suspend point.
+ ///
+ /// Like `Return` terminators in generator bodies, this computes `value` and then a
+ /// `GeneratorState::Yielded(value)` as if by `Aggregate` rvalue. That value is then assigned to
+ /// the return place of the function calling this one, and execution continues in the calling
+ /// function. When next invoked with the same first argument, execution of this function
+ /// continues at the `resume` basic block, with the second argument written to the `resume_arg`
+ /// place. If the generator is dropped before then, the `drop` basic block is invoked.
+ ///
+ /// Not permitted in bodies that are not generator bodies, or after generator lowering.
+ ///
+ /// **Needs clarification**: What about the evaluation order of the `resume_arg` and `value`?
+ Yield {
+ /// The value to return.
+ value: Operand<'tcx>,
+ /// Where to resume to.
+ resume: BasicBlock,
+ /// The place to store the resume argument in.
+ resume_arg: Place<'tcx>,
+ /// Cleanup to be done if the generator is dropped at this suspend point.
+ drop: Option<BasicBlock>,
+ },
+
+ /// Indicates the end of dropping a generator.
+ ///
+ /// Semantically just a `return` (from the generators drop glue). Only permitted in the same situations
+ /// as `yield`.
+ ///
+ /// **Needs clarification**: Is that even correct? The generator drop code is always confusing
+ /// to me, because it's not even really in the current body.
+ ///
+ /// **Needs clarification**: Are there type system constraints on these terminators? Should
+ /// there be a "block type" like `cleanup` blocks for them?
+ GeneratorDrop,
+
+ /// A block where control flow only ever takes one real path, but borrowck needs to be more
+ /// conservative.
+ ///
+ /// At runtime this is semantically just a goto.
+ ///
+ /// Disallowed after drop elaboration.
+ FalseEdge {
+ /// The target normal control flow will take.
+ real_target: BasicBlock,
+ /// A block control flow could conceptually jump to, but won't in
+ /// practice.
+ imaginary_target: BasicBlock,
+ },
+
+ /// A terminator for blocks that only take one path in reality, but where we reserve the right
+ /// to unwind in borrowck, even if it won't happen in practice. This can arise in infinite loops
+ /// with no function calls for example.
+ ///
+ /// At runtime this is semantically just a goto.
+ ///
+ /// Disallowed after drop elaboration.
+ FalseUnwind {
+ /// The target normal control flow will take.
+ real_target: BasicBlock,
+ /// The imaginary cleanup block link. This particular path will never be taken
+ /// in practice, but in order to avoid fragility we want to always
+ /// consider it in borrowck. We don't want to accept programs which
+ /// pass borrowck only when `panic=abort` or some assertions are disabled
+ /// due to release vs. debug mode builds. This needs to be an `Option` because
+ /// of the `remove_noop_landing_pads` and `abort_unwinding_calls` passes.
+ unwind: Option<BasicBlock>,
+ },
+
+ /// Block ends with an inline assembly block. This is a terminator since
+ /// inline assembly is allowed to diverge.
+ InlineAsm {
+ /// The template for the inline assembly, with placeholders.
+ template: &'tcx [InlineAsmTemplatePiece],
+
+ /// The operands for the inline assembly, as `Operand`s or `Place`s.
+ operands: Vec<InlineAsmOperand<'tcx>>,
+
+ /// Miscellaneous options for the inline assembly.
+ options: InlineAsmOptions,
+
+ /// Source spans for each line of the inline assembly code. These are
+ /// used to map assembler errors back to the line in the source code.
+ line_spans: &'tcx [Span],
+
+ /// Destination block after the inline assembly returns, unless it is
+ /// diverging (InlineAsmOptions::NORETURN).
+ destination: Option<BasicBlock>,
+
+ /// Cleanup to be done if the inline assembly unwinds. This is present
+ /// if and only if InlineAsmOptions::MAY_UNWIND is set.
+ cleanup: Option<BasicBlock>,
+ },
+}
+
+/// Information about an assertion failure.
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)]
+pub enum AssertKind<O> {
+ BoundsCheck { len: O, index: O },
+ Overflow(BinOp, O, O),
+ OverflowNeg(O),
+ DivisionByZero(O),
+ RemainderByZero(O),
+ ResumedAfterReturn(GeneratorKind),
+ ResumedAfterPanic(GeneratorKind),
+}
+
+#[derive(Clone, Debug, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum InlineAsmOperand<'tcx> {
+ In {
+ reg: InlineAsmRegOrRegClass,
+ value: Operand<'tcx>,
+ },
+ Out {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ place: Option<Place<'tcx>>,
+ },
+ InOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ in_value: Operand<'tcx>,
+ out_place: Option<Place<'tcx>>,
+ },
+ Const {
+ value: Box<Constant<'tcx>>,
+ },
+ SymFn {
+ value: Box<Constant<'tcx>>,
+ },
+ SymStatic {
+ def_id: DefId,
+ },
+}
+
+/// Type for MIR `Assert` terminator error messages.
+pub type AssertMessage<'tcx> = AssertKind<Operand<'tcx>>;
+
+///////////////////////////////////////////////////////////////////////////
+// Places
+
+/// Places roughly correspond to a "location in memory." Places in MIR are the same mathematical
+/// object as places in Rust. This of course means that what exactly they are is undecided and part
+/// of the Rust memory model. However, they will likely contain at least the following pieces of
+/// information in some form:
+///
+/// 1. The address in memory that the place refers to.
+/// 2. The provenance with which the place is being accessed.
+/// 3. The type of the place and an optional variant index. See [`PlaceTy`][super::tcx::PlaceTy].
+/// 4. Optionally, some metadata. This exists if and only if the type of the place is not `Sized`.
+///
+/// We'll give a description below of how all pieces of the place except for the provenance are
+/// calculated. We cannot give a description of the provenance, because that is part of the
+/// undecided aliasing model - we only include it here at all to acknowledge its existence.
+///
+/// Each local naturally corresponds to the place `Place { local, projection: [] }`. This place has
+/// the address of the local's allocation and the type of the local.
+///
+/// **Needs clarification:** Unsized locals seem to present a bit of an issue. Their allocation
+/// can't actually be created on `StorageLive`, because it's unclear how big to make the allocation.
+/// Furthermore, MIR produces assignments to unsized locals, although that is not permitted under
+/// `#![feature(unsized_locals)]` in Rust. Besides just putting "unsized locals are special and
+/// different" in a bunch of places, I (JakobDegen) don't know how to incorporate this behavior into
+/// the current MIR semantics in a clean way - possibly this needs some design work first.
+///
+/// For places that are not locals, ie they have a non-empty list of projections, we define the
+/// values as a function of the parent place, that is the place with its last [`ProjectionElem`]
+/// stripped. The way this is computed of course depends on the kind of that last projection
+/// element:
+///
+/// - [`Downcast`](ProjectionElem::Downcast): This projection sets the place's variant index to the
+/// given one, and makes no other changes. A `Downcast` projection on a place with its variant
+/// index already set is not well-formed.
+/// - [`Field`](ProjectionElem::Field): `Field` projections take their parent place and create a
+/// place referring to one of the fields of the type. The resulting address is the parent
+/// address, plus the offset of the field. The type becomes the type of the field. If the parent
+/// was unsized and so had metadata associated with it, then the metadata is retained if the
+/// field is unsized and thrown out if it is sized.
+///
+/// These projections are only legal for tuples, ADTs, closures, and generators. If the ADT or
+/// generator has more than one variant, the parent place's variant index must be set, indicating
+/// which variant is being used. If it has just one variant, the variant index may or may not be
+/// included - the single possible variant is inferred if it is not included.
+/// - [`ConstantIndex`](ProjectionElem::ConstantIndex): Computes an offset in units of `T` into the
+/// place as described in the documentation for the `ProjectionElem`. The resulting address is
+/// the parent's address plus that offset, and the type is `T`. This is only legal if the parent
+/// place has type `[T; N]` or `[T]` (*not* `&[T]`). Since such a `T` is always sized, any
+/// resulting metadata is thrown out.
+/// - [`Subslice`](ProjectionElem::Subslice): This projection calculates an offset and a new
+/// address in a similar manner as `ConstantIndex`. It is also only legal on `[T; N]` and `[T]`.
+/// However, this yields a `Place` of type `[T]`, and additionally sets the metadata to be the
+/// length of the subslice.
+/// - [`Index`](ProjectionElem::Index): Like `ConstantIndex`, only legal on `[T; N]` or `[T]`.
+/// However, `Index` additionally takes a local from which the value of the index is computed at
+/// runtime. Computing the value of the index involves interpreting the `Local` as a
+/// `Place { local, projection: [] }`, and then computing its value as if done via
+/// [`Operand::Copy`]. The array/slice is then indexed with the resulting value. The local must
+/// have type `usize`.
+/// - [`Deref`](ProjectionElem::Deref): Derefs are the last type of projection, and the most
+/// complicated. They are only legal on parent places that are references, pointers, or `Box`. A
+/// `Deref` projection begins by loading a value from the parent place, as if by
+/// [`Operand::Copy`]. It then dereferences the resulting pointer, creating a place of the
+/// pointee's type. The resulting address is the address that was stored in the pointer. If the
+/// pointee type is unsized, the pointer additionally stored the value of the metadata.
+///
+/// Computing a place may cause UB. One possibility is that the pointer used for a `Deref` may not
+/// be suitably aligned. Another possibility is that the place is not in bounds, meaning it does not
+/// point to an actual allocation.
+///
+/// However, if this is actually UB and when the UB kicks in is undecided. This is being discussed
+/// in [UCG#319]. The options include that every place must obey those rules, that only some places
+/// must obey them, or that places impose no rules of their own.
+///
+/// [UCG#319]: https://github.com/rust-lang/unsafe-code-guidelines/issues/319
+///
+/// Rust currently requires that every place obey those two rules. This is checked by MIRI and taken
+/// advantage of by codegen (via `gep inbounds`). That is possibly subject to change.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, HashStable)]
+pub struct Place<'tcx> {
+ pub local: Local,
+
+ /// projection out of a place (access a field, deref a pointer, etc)
+ pub projection: &'tcx List<PlaceElem<'tcx>>,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub enum ProjectionElem<V, T> {
+ Deref,
+ Field(Field, T),
+ /// Index into a slice/array.
+ ///
+ /// Note that this does not also dereference, and so it does not exactly correspond to slice
+ /// indexing in Rust. In other words, in the below Rust code:
+ ///
+ /// ```rust
+ /// let x = &[1, 2, 3, 4];
+ /// let i = 2;
+ /// x[i];
+ /// ```
+ ///
+ /// The `x[i]` is turned into a `Deref` followed by an `Index`, not just an `Index`. The same
+ /// thing is true of the `ConstantIndex` and `Subslice` projections below.
+ Index(V),
+
+ /// These indices are generated by slice patterns. Easiest to explain
+ /// by example:
+ ///
+ /// ```ignore (illustrative)
+ /// [X, _, .._, _, _] => { offset: 0, min_length: 4, from_end: false },
+ /// [_, X, .._, _, _] => { offset: 1, min_length: 4, from_end: false },
+ /// [_, _, .._, X, _] => { offset: 2, min_length: 4, from_end: true },
+ /// [_, _, .._, _, X] => { offset: 1, min_length: 4, from_end: true },
+ /// ```
+ ConstantIndex {
+ /// index or -index (in Python terms), depending on from_end
+ offset: u64,
+ /// The thing being indexed must be at least this long. For arrays this
+ /// is always the exact length.
+ min_length: u64,
+ /// Counting backwards from end? This is always false when indexing an
+ /// array.
+ from_end: bool,
+ },
+
+ /// These indices are generated by slice patterns.
+ ///
+ /// If `from_end` is true `slice[from..slice.len() - to]`.
+ /// Otherwise `array[from..to]`.
+ Subslice {
+ from: u64,
+ to: u64,
+ /// Whether `to` counts from the start or end of the array/slice.
+ /// For `PlaceElem`s this is `true` if and only if the base is a slice.
+ /// For `ProjectionKind`, this can also be `true` for arrays.
+ from_end: bool,
+ },
+
+ /// "Downcast" to a variant of an enum or a generator.
+ ///
+ /// The included Symbol is the name of the variant, used for printing MIR.
+ Downcast(Option<Symbol>, VariantIdx),
+}
+
+/// Alias for projections as they appear in places, where the base is a place
+/// and the index is a local.
+pub type PlaceElem<'tcx> = ProjectionElem<Local, Ty<'tcx>>;
+
+///////////////////////////////////////////////////////////////////////////
+// Operands
+
+/// An operand in MIR represents a "value" in Rust, the definition of which is undecided and part of
+/// the memory model. One proposal for a definition of values can be found [on UCG][value-def].
+///
+/// [value-def]: https://github.com/rust-lang/unsafe-code-guidelines/blob/master/wip/value-domain.md
+///
+/// The most common way to create values is via loading a place. Loading a place is an operation
+/// which reads the memory of the place and converts it to a value. This is a fundamentally *typed*
+/// operation. The nature of the value produced depends on the type of the conversion. Furthermore,
+/// there may be other effects: if the type has a validity constraint loading the place might be UB
+/// if the validity constraint is not met.
+///
+/// **Needs clarification:** Ralf proposes that loading a place not have side-effects.
+/// This is what is implemented in miri today. Are these the semantics we want for MIR? Is this
+/// something we can even decide without knowing more about Rust's memory model?
+///
+/// **Needs clarifiation:** Is loading a place that has its variant index set well-formed? Miri
+/// currently implements it, but it seems like this may be something to check against in the
+/// validator.
+#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum Operand<'tcx> {
+ /// Creates a value by loading the given place.
+ ///
+ /// Before drop elaboration, the type of the place must be `Copy`. After drop elaboration there
+ /// is no such requirement.
+ Copy(Place<'tcx>),
+
+ /// Creates a value by performing loading the place, just like the `Copy` operand.
+ ///
+ /// This *may* additionally overwrite the place with `uninit` bytes, depending on how we decide
+ /// in [UCG#188]. You should not emit MIR that may attempt a subsequent second load of this
+ /// place without first re-initializing it.
+ ///
+ /// [UCG#188]: https://github.com/rust-lang/unsafe-code-guidelines/issues/188
+ Move(Place<'tcx>),
+
+ /// Constants are already semantically values, and remain unchanged.
+ Constant(Box<Constant<'tcx>>),
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Rvalues
+
+/// The various kinds of rvalues that can appear in MIR.
+///
+/// Not all of these are allowed at every [`MirPhase`] - when this is the case, it's stated below.
+///
+/// Computing any rvalue begins by evaluating the places and operands in some order (**Needs
+/// clarification**: Which order?). These are then used to produce a "value" - the same kind of
+/// value that an [`Operand`] produces.
+#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)]
+pub enum Rvalue<'tcx> {
+ /// Yields the operand unchanged
+ Use(Operand<'tcx>),
+
+ /// Creates an array where each element is the value of the operand.
+ ///
+ /// This is the cause of a bug in the case where the repetition count is zero because the value
+ /// is not dropped, see [#74836].
+ ///
+ /// Corresponds to source code like `[x; 32]`.
+ ///
+ /// [#74836]: https://github.com/rust-lang/rust/issues/74836
+ Repeat(Operand<'tcx>, ty::Const<'tcx>),
+
+ /// Creates a reference of the indicated kind to the place.
+ ///
+ /// There is not much to document here, because besides the obvious parts the semantics of this
+ /// are essentially entirely a part of the aliasing model. There are many UCG issues discussing
+ /// exactly what the behavior of this operation should be.
+ ///
+ /// `Shallow` borrows are disallowed after drop lowering.
+ Ref(Region<'tcx>, BorrowKind, Place<'tcx>),
+
+ /// Creates a pointer/reference to the given thread local.
+ ///
+ /// The yielded type is a `*mut T` if the static is mutable, otherwise if the static is extern a
+ /// `*const T`, and if neither of those apply a `&T`.
+ ///
+ /// **Note:** This is a runtime operation that actually executes code and is in this sense more
+ /// like a function call. Also, eliminating dead stores of this rvalue causes `fn main() {}` to
+ /// SIGILL for some reason that I (JakobDegen) never got a chance to look into.
+ ///
+ /// **Needs clarification**: Are there weird additional semantics here related to the runtime
+ /// nature of this operation?
+ ThreadLocalRef(DefId),
+
+ /// Creates a pointer with the indicated mutability to the place.
+ ///
+ /// This is generated by pointer casts like `&v as *const _` or raw address of expressions like
+ /// `&raw v` or `addr_of!(v)`.
+ ///
+ /// Like with references, the semantics of this operation are heavily dependent on the aliasing
+ /// model.
+ AddressOf(Mutability, Place<'tcx>),
+
+ /// Yields the length of the place, as a `usize`.
+ ///
+ /// If the type of the place is an array, this is the array length. For slices (`[T]`, not
+ /// `&[T]`) this accesses the place's metadata to determine the length. This rvalue is
+ /// ill-formed for places of other types.
+ Len(Place<'tcx>),
+
+ /// Performs essentially all of the casts that can be performed via `as`.
+ ///
+ /// This allows for casts from/to a variety of types.
+ ///
+ /// **FIXME**: Document exactly which `CastKind`s allow which types of casts. Figure out why
+ /// `ArrayToPointer` and `MutToConstPointer` are special.
+ Cast(CastKind, Operand<'tcx>, Ty<'tcx>),
+
+ /// * `Offset` has the same semantics as [`offset`](pointer::offset), except that the second
+ /// parameter may be a `usize` as well.
+ /// * The comparison operations accept `bool`s, `char`s, signed or unsigned integers, floats,
+ /// raw pointers, or function pointers and return a `bool`. The types of the operands must be
+ /// matching, up to the usual caveat of the lifetimes in function pointers.
+ /// * Left and right shift operations accept signed or unsigned integers not necessarily of the
+ /// same type and return a value of the same type as their LHS. Like in Rust, the RHS is
+ /// truncated as needed.
+ /// * The `Bit*` operations accept signed integers, unsigned integers, or bools with matching
+ /// types and return a value of that type.
+ /// * The remaining operations accept signed integers, unsigned integers, or floats with
+ /// matching types and return a value of that type.
+ BinaryOp(BinOp, Box<(Operand<'tcx>, Operand<'tcx>)>),
+
+ /// Same as `BinaryOp`, but yields `(T, bool)` with a `bool` indicating an error condition.
+ ///
+ /// When overflow checking is disabled and we are generating run-time code, the error condition
+ /// is false. Otherwise, and always during CTFE, the error condition is determined as described
+ /// below.
+ ///
+ /// For addition, subtraction, and multiplication on integers the error condition is set when
+ /// the infinite precision result would be unequal to the actual result.
+ ///
+ /// For shift operations on integers the error condition is set when the value of right-hand
+ /// side is greater than or equal to the number of bits in the type of the left-hand side, or
+ /// when the value of right-hand side is negative.
+ ///
+ /// Other combinations of types and operators are unsupported.
+ CheckedBinaryOp(BinOp, Box<(Operand<'tcx>, Operand<'tcx>)>),
+
+ /// Computes a value as described by the operation.
+ NullaryOp(NullOp, Ty<'tcx>),
+
+ /// Exactly like `BinaryOp`, but less operands.
+ ///
+ /// Also does two's-complement arithmetic. Negation requires a signed integer or a float;
+ /// bitwise not requires a signed integer, unsigned integer, or bool. Both operation kinds
+ /// return a value with the same type as their operand.
+ UnaryOp(UnOp, Operand<'tcx>),
+
+ /// Computes the discriminant of the place, returning it as an integer of type
+ /// [`discriminant_ty`]. Returns zero for types without discriminant.
+ ///
+ /// The validity requirements for the underlying value are undecided for this rvalue, see
+ /// [#91095]. Note too that the value of the discriminant is not the same thing as the
+ /// variant index; use [`discriminant_for_variant`] to convert.
+ ///
+ /// [`discriminant_ty`]: crate::ty::Ty::discriminant_ty
+ /// [#91095]: https://github.com/rust-lang/rust/issues/91095
+ /// [`discriminant_for_variant`]: crate::ty::Ty::discriminant_for_variant
+ Discriminant(Place<'tcx>),
+
+ /// Creates an aggregate value, like a tuple or struct.
+ ///
+ /// This is needed because dataflow analysis needs to distinguish
+ /// `dest = Foo { x: ..., y: ... }` from `dest.x = ...; dest.y = ...;` in the case that `Foo`
+ /// has a destructor.
+ ///
+ /// Disallowed after deaggregation for all aggregate kinds except `Array` and `Generator`. After
+ /// generator lowering, `Generator` aggregate kinds are disallowed too.
+ Aggregate(Box<AggregateKind<'tcx>>, Vec<Operand<'tcx>>),
+
+ /// Transmutes a `*mut u8` into shallow-initialized `Box<T>`.
+ ///
+ /// This is different from a normal transmute because dataflow analysis will treat the box as
+ /// initialized but its content as uninitialized. Like other pointer casts, this in general
+ /// affects alias analysis.
+ ShallowInitBox(Operand<'tcx>, Ty<'tcx>),
+
+ /// A CopyForDeref is equivalent to a read from a place at the
+ /// codegen level, but is treated specially by drop elaboration. When such a read happens, it
+ /// is guaranteed (via nature of the mir_opt `Derefer` in rustc_mir_transform/src/deref_separator)
+ /// that the only use of the returned value is a deref operation, immediately
+ /// followed by one or more projections. Drop elaboration treats this rvalue as if the
+ /// read never happened and just projects further. This allows simplifying various MIR
+ /// optimizations and codegen backends that previously had to handle deref operations anywhere
+ /// in a place.
+ CopyForDeref(Place<'tcx>),
+}
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum CastKind {
+ /// An exposing pointer to address cast. A cast between a pointer and an integer type, or
+ /// between a function pointer and an integer type.
+ /// See the docs on `expose_addr` for more details.
+ PointerExposeAddress,
+ /// An address-to-pointer cast that picks up an exposed provenance.
+ /// See the docs on `from_exposed_addr` for more details.
+ PointerFromExposedAddress,
+ /// All sorts of pointer-to-pointer casts. Note that reference-to-raw-ptr casts are
+ /// translated into `&raw mut/const *r`, i.e., they are not actually casts.
+ Pointer(PointerCast),
+ /// Remaining unclassified casts.
+ Misc,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum AggregateKind<'tcx> {
+ /// The type is of the element
+ Array(Ty<'tcx>),
+ Tuple,
+
+ /// The second field is the variant index. It's equal to 0 for struct
+ /// and union expressions. The fourth field is
+ /// active field number and is present only for union expressions
+ /// -- e.g., for a union expression `SomeUnion { c: .. }`, the
+ /// active field index would identity the field `c`
+ Adt(DefId, VariantIdx, SubstsRef<'tcx>, Option<UserTypeAnnotationIndex>, Option<usize>),
+
+ // Note: We can use LocalDefId since closures and generators a deaggregated
+ // before codegen.
+ Closure(LocalDefId, SubstsRef<'tcx>),
+ Generator(LocalDefId, SubstsRef<'tcx>, hir::Movability),
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum NullOp {
+ /// Returns the size of a value of that type
+ SizeOf,
+ /// Returns the minimum alignment of a type
+ AlignOf,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum UnOp {
+ /// The `!` operator for logical inversion
+ Not,
+ /// The `-` operator for negation
+ Neg,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, PartialOrd, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum BinOp {
+ /// The `+` operator (addition)
+ Add,
+ /// The `-` operator (subtraction)
+ Sub,
+ /// The `*` operator (multiplication)
+ Mul,
+ /// The `/` operator (division)
+ ///
+ /// Division by zero is UB, because the compiler should have inserted checks
+ /// prior to this.
+ Div,
+ /// The `%` operator (modulus)
+ ///
+ /// Using zero as the modulus (second operand) is UB, because the compiler
+ /// should have inserted checks prior to this.
+ Rem,
+ /// The `^` operator (bitwise xor)
+ BitXor,
+ /// The `&` operator (bitwise and)
+ BitAnd,
+ /// The `|` operator (bitwise or)
+ BitOr,
+ /// The `<<` operator (shift left)
+ ///
+ /// The offset is truncated to the size of the first operand before shifting.
+ Shl,
+ /// The `>>` operator (shift right)
+ ///
+ /// The offset is truncated to the size of the first operand before shifting.
+ Shr,
+ /// The `==` operator (equality)
+ Eq,
+ /// The `<` operator (less than)
+ Lt,
+ /// The `<=` operator (less than or equal to)
+ Le,
+ /// The `!=` operator (not equal to)
+ Ne,
+ /// The `>=` operator (greater than or equal to)
+ Ge,
+ /// The `>` operator (greater than)
+ Gt,
+ /// The `ptr.offset` operator
+ Offset,
+}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ // These are in alphabetical order, which is easy to maintain.
+ static_assert_size!(AggregateKind<'_>, 48);
+ static_assert_size!(Operand<'_>, 24);
+ static_assert_size!(Place<'_>, 16);
+ static_assert_size!(PlaceElem<'_>, 24);
+ static_assert_size!(Rvalue<'_>, 40);
+}
diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs
new file mode 100644
index 000000000..405003156
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/tcx.rs
@@ -0,0 +1,307 @@
+/*!
+ * Methods for the various MIR types. These are intended for use after
+ * building is complete.
+ */
+
+use crate::mir::*;
+use crate::ty::subst::Subst;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_hir as hir;
+use rustc_target::abi::VariantIdx;
+
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct PlaceTy<'tcx> {
+ pub ty: Ty<'tcx>,
+ /// Downcast to a particular variant of an enum or a generator, if included.
+ pub variant_index: Option<VariantIdx>,
+}
+
+// At least on 64 bit systems, `PlaceTy` should not be larger than two or three pointers.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(PlaceTy<'_>, 16);
+
+impl<'tcx> PlaceTy<'tcx> {
+ #[inline]
+ pub fn from_ty(ty: Ty<'tcx>) -> PlaceTy<'tcx> {
+ PlaceTy { ty, variant_index: None }
+ }
+
+ /// `place_ty.field_ty(tcx, f)` computes the type at a given field
+ /// of a record or enum-variant. (Most clients of `PlaceTy` can
+ /// instead just extract the relevant type directly from their
+ /// `PlaceElem`, but some instances of `ProjectionElem<V, T>` do
+ /// not carry a `Ty` for `T`.)
+ ///
+ /// Note that the resulting type has not been normalized.
+ pub fn field_ty(self, tcx: TyCtxt<'tcx>, f: Field) -> Ty<'tcx> {
+ let answer = match self.ty.kind() {
+ ty::Adt(adt_def, substs) => {
+ let variant_def = match self.variant_index {
+ None => adt_def.non_enum_variant(),
+ Some(variant_index) => {
+ assert!(adt_def.is_enum());
+ &adt_def.variant(variant_index)
+ }
+ };
+ let field_def = &variant_def.fields[f.index()];
+ field_def.ty(tcx, substs)
+ }
+ ty::Tuple(tys) => tys[f.index()],
+ _ => bug!("extracting field of non-tuple non-adt: {:?}", self),
+ };
+ debug!("field_ty self: {:?} f: {:?} yields: {:?}", self, f, answer);
+ answer
+ }
+
+ /// Convenience wrapper around `projection_ty_core` for
+ /// `PlaceElem`, where we can just use the `Ty` that is already
+ /// stored inline on field projection elems.
+ pub fn projection_ty(self, tcx: TyCtxt<'tcx>, elem: PlaceElem<'tcx>) -> PlaceTy<'tcx> {
+ self.projection_ty_core(tcx, ty::ParamEnv::empty(), &elem, |_, _, ty| ty)
+ }
+
+ /// `place_ty.projection_ty_core(tcx, elem, |...| { ... })`
+ /// projects `place_ty` onto `elem`, returning the appropriate
+ /// `Ty` or downcast variant corresponding to that projection.
+ /// The `handle_field` callback must map a `Field` to its `Ty`,
+ /// (which should be trivial when `T` = `Ty`).
+ pub fn projection_ty_core<V, T>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ elem: &ProjectionElem<V, T>,
+ mut handle_field: impl FnMut(&Self, Field, T) -> Ty<'tcx>,
+ ) -> PlaceTy<'tcx>
+ where
+ V: ::std::fmt::Debug,
+ T: ::std::fmt::Debug + Copy,
+ {
+ if self.variant_index.is_some() && !matches!(elem, ProjectionElem::Field(..)) {
+ bug!("cannot use non field projection on downcasted place")
+ }
+ let answer = match *elem {
+ ProjectionElem::Deref => {
+ let ty = self
+ .ty
+ .builtin_deref(true)
+ .unwrap_or_else(|| {
+ bug!("deref projection of non-dereferenceable ty {:?}", self)
+ })
+ .ty;
+ PlaceTy::from_ty(ty)
+ }
+ ProjectionElem::Index(_) | ProjectionElem::ConstantIndex { .. } => {
+ PlaceTy::from_ty(self.ty.builtin_index().unwrap())
+ }
+ ProjectionElem::Subslice { from, to, from_end } => {
+ PlaceTy::from_ty(match self.ty.kind() {
+ ty::Slice(..) => self.ty,
+ ty::Array(inner, _) if !from_end => tcx.mk_array(*inner, (to - from) as u64),
+ ty::Array(inner, size) if from_end => {
+ let size = size.eval_usize(tcx, param_env);
+ let len = size - (from as u64) - (to as u64);
+ tcx.mk_array(*inner, len)
+ }
+ _ => bug!("cannot subslice non-array type: `{:?}`", self),
+ })
+ }
+ ProjectionElem::Downcast(_name, index) => {
+ PlaceTy { ty: self.ty, variant_index: Some(index) }
+ }
+ ProjectionElem::Field(f, fty) => PlaceTy::from_ty(handle_field(&self, f, fty)),
+ };
+ debug!("projection_ty self: {:?} elem: {:?} yields: {:?}", self, elem, answer);
+ answer
+ }
+}
+
+impl<'tcx> Place<'tcx> {
+ pub fn ty_from<D>(
+ local: Local,
+ projection: &[PlaceElem<'tcx>],
+ local_decls: &D,
+ tcx: TyCtxt<'tcx>,
+ ) -> PlaceTy<'tcx>
+ where
+ D: HasLocalDecls<'tcx>,
+ {
+ projection
+ .iter()
+ .fold(PlaceTy::from_ty(local_decls.local_decls()[local].ty), |place_ty, &elem| {
+ place_ty.projection_ty(tcx, elem)
+ })
+ }
+
+ pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx>
+ where
+ D: HasLocalDecls<'tcx>,
+ {
+ Place::ty_from(self.local, &self.projection, local_decls, tcx)
+ }
+}
+
+impl<'tcx> PlaceRef<'tcx> {
+ pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> PlaceTy<'tcx>
+ where
+ D: HasLocalDecls<'tcx>,
+ {
+ Place::ty_from(self.local, &self.projection, local_decls, tcx)
+ }
+}
+
+pub enum RvalueInitializationState {
+ Shallow,
+ Deep,
+}
+
+impl<'tcx> Rvalue<'tcx> {
+ pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
+ where
+ D: HasLocalDecls<'tcx>,
+ {
+ match *self {
+ Rvalue::Use(ref operand) => operand.ty(local_decls, tcx),
+ Rvalue::Repeat(ref operand, count) => {
+ tcx.mk_ty(ty::Array(operand.ty(local_decls, tcx), count))
+ }
+ Rvalue::ThreadLocalRef(did) => {
+ let static_ty = tcx.type_of(did);
+ if tcx.is_mutable_static(did) {
+ tcx.mk_mut_ptr(static_ty)
+ } else if tcx.is_foreign_item(did) {
+ tcx.mk_imm_ptr(static_ty)
+ } else {
+ // FIXME: These things don't *really* have 'static lifetime.
+ tcx.mk_imm_ref(tcx.lifetimes.re_static, static_ty)
+ }
+ }
+ Rvalue::Ref(reg, bk, ref place) => {
+ let place_ty = place.ty(local_decls, tcx).ty;
+ tcx.mk_ref(reg, ty::TypeAndMut { ty: place_ty, mutbl: bk.to_mutbl_lossy() })
+ }
+ Rvalue::AddressOf(mutability, ref place) => {
+ let place_ty = place.ty(local_decls, tcx).ty;
+ tcx.mk_ptr(ty::TypeAndMut { ty: place_ty, mutbl: mutability })
+ }
+ Rvalue::Len(..) => tcx.types.usize,
+ Rvalue::Cast(.., ty) => ty,
+ Rvalue::BinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs_ty = lhs.ty(local_decls, tcx);
+ let rhs_ty = rhs.ty(local_decls, tcx);
+ op.ty(tcx, lhs_ty, rhs_ty)
+ }
+ Rvalue::CheckedBinaryOp(op, box (ref lhs, ref rhs)) => {
+ let lhs_ty = lhs.ty(local_decls, tcx);
+ let rhs_ty = rhs.ty(local_decls, tcx);
+ let ty = op.ty(tcx, lhs_ty, rhs_ty);
+ tcx.intern_tup(&[ty, tcx.types.bool])
+ }
+ Rvalue::UnaryOp(UnOp::Not | UnOp::Neg, ref operand) => operand.ty(local_decls, tcx),
+ Rvalue::Discriminant(ref place) => place.ty(local_decls, tcx).ty.discriminant_ty(tcx),
+ Rvalue::NullaryOp(NullOp::SizeOf | NullOp::AlignOf, _) => tcx.types.usize,
+ Rvalue::Aggregate(ref ak, ref ops) => match **ak {
+ AggregateKind::Array(ty) => tcx.mk_array(ty, ops.len() as u64),
+ AggregateKind::Tuple => tcx.mk_tup(ops.iter().map(|op| op.ty(local_decls, tcx))),
+ AggregateKind::Adt(did, _, substs, _, _) => {
+ tcx.bound_type_of(did).subst(tcx, substs)
+ }
+ AggregateKind::Closure(did, substs) => tcx.mk_closure(did.to_def_id(), substs),
+ AggregateKind::Generator(did, substs, movability) => {
+ tcx.mk_generator(did.to_def_id(), substs, movability)
+ }
+ },
+ Rvalue::ShallowInitBox(_, ty) => tcx.mk_box(ty),
+ Rvalue::CopyForDeref(ref place) => place.ty(local_decls, tcx).ty,
+ }
+ }
+
+ #[inline]
+ /// Returns `true` if this rvalue is deeply initialized (most rvalues) or
+ /// whether its only shallowly initialized (`Rvalue::Box`).
+ pub fn initialization_state(&self) -> RvalueInitializationState {
+ match *self {
+ Rvalue::ShallowInitBox(_, _) => RvalueInitializationState::Shallow,
+ _ => RvalueInitializationState::Deep,
+ }
+ }
+}
+
+impl<'tcx> Operand<'tcx> {
+ pub fn ty<D>(&self, local_decls: &D, tcx: TyCtxt<'tcx>) -> Ty<'tcx>
+ where
+ D: HasLocalDecls<'tcx>,
+ {
+ match self {
+ &Operand::Copy(ref l) | &Operand::Move(ref l) => l.ty(local_decls, tcx).ty,
+ &Operand::Constant(ref c) => c.literal.ty(),
+ }
+ }
+}
+
+impl<'tcx> BinOp {
+ pub fn ty(&self, tcx: TyCtxt<'tcx>, lhs_ty: Ty<'tcx>, rhs_ty: Ty<'tcx>) -> Ty<'tcx> {
+ // FIXME: handle SIMD correctly
+ match self {
+ &BinOp::Add
+ | &BinOp::Sub
+ | &BinOp::Mul
+ | &BinOp::Div
+ | &BinOp::Rem
+ | &BinOp::BitXor
+ | &BinOp::BitAnd
+ | &BinOp::BitOr => {
+ // these should be integers or floats of the same size.
+ assert_eq!(lhs_ty, rhs_ty);
+ lhs_ty
+ }
+ &BinOp::Shl | &BinOp::Shr | &BinOp::Offset => {
+ lhs_ty // lhs_ty can be != rhs_ty
+ }
+ &BinOp::Eq | &BinOp::Lt | &BinOp::Le | &BinOp::Ne | &BinOp::Ge | &BinOp::Gt => {
+ tcx.types.bool
+ }
+ }
+ }
+}
+
+impl BorrowKind {
+ pub fn to_mutbl_lossy(self) -> hir::Mutability {
+ match self {
+ BorrowKind::Mut { .. } => hir::Mutability::Mut,
+ BorrowKind::Shared => hir::Mutability::Not,
+
+ // We have no type corresponding to a unique imm borrow, so
+ // use `&mut`. It gives all the capabilities of a `&uniq`
+ // and hence is a safe "over approximation".
+ BorrowKind::Unique => hir::Mutability::Mut,
+
+ // We have no type corresponding to a shallow borrow, so use
+ // `&` as an approximation.
+ BorrowKind::Shallow => hir::Mutability::Not,
+ }
+ }
+}
+
+impl BinOp {
+ pub fn to_hir_binop(self) -> hir::BinOpKind {
+ match self {
+ BinOp::Add => hir::BinOpKind::Add,
+ BinOp::Sub => hir::BinOpKind::Sub,
+ BinOp::Mul => hir::BinOpKind::Mul,
+ BinOp::Div => hir::BinOpKind::Div,
+ BinOp::Rem => hir::BinOpKind::Rem,
+ BinOp::BitXor => hir::BinOpKind::BitXor,
+ BinOp::BitAnd => hir::BinOpKind::BitAnd,
+ BinOp::BitOr => hir::BinOpKind::BitOr,
+ BinOp::Shl => hir::BinOpKind::Shl,
+ BinOp::Shr => hir::BinOpKind::Shr,
+ BinOp::Eq => hir::BinOpKind::Eq,
+ BinOp::Ne => hir::BinOpKind::Ne,
+ BinOp::Lt => hir::BinOpKind::Lt,
+ BinOp::Gt => hir::BinOpKind::Gt,
+ BinOp::Le => hir::BinOpKind::Le,
+ BinOp::Ge => hir::BinOpKind::Ge,
+ BinOp::Offset => unreachable!(),
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/terminator.rs b/compiler/rustc_middle/src/mir/terminator.rs
new file mode 100644
index 000000000..9ccf5aea6
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/terminator.rs
@@ -0,0 +1,448 @@
+use crate::mir;
+use crate::mir::interpret::Scalar;
+use crate::ty::{self, Ty, TyCtxt};
+use smallvec::{smallvec, SmallVec};
+
+use super::{BasicBlock, InlineAsmOperand, Operand, SourceInfo, TerminatorKind};
+use rustc_ast::InlineAsmTemplatePiece;
+pub use rustc_ast::Mutability;
+use rustc_macros::HashStable;
+use std::borrow::Cow;
+use std::fmt::{self, Debug, Formatter, Write};
+use std::iter;
+use std::slice;
+
+pub use super::query::*;
+
+#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)]
+pub struct SwitchTargets {
+ /// Possible values. The locations to branch to in each case
+ /// are found in the corresponding indices from the `targets` vector.
+ values: SmallVec<[u128; 1]>,
+
+ /// Possible branch sites. The last element of this vector is used
+ /// for the otherwise branch, so targets.len() == values.len() + 1
+ /// should hold.
+ //
+ // This invariant is quite non-obvious and also could be improved.
+ // One way to make this invariant is to have something like this instead:
+ //
+ // branches: Vec<(ConstInt, BasicBlock)>,
+ // otherwise: Option<BasicBlock> // exhaustive if None
+ //
+ // However we’ve decided to keep this as-is until we figure a case
+ // where some other approach seems to be strictly better than other.
+ targets: SmallVec<[BasicBlock; 2]>,
+}
+
+impl SwitchTargets {
+ /// Creates switch targets from an iterator of values and target blocks.
+ ///
+ /// The iterator may be empty, in which case the `SwitchInt` instruction is equivalent to
+ /// `goto otherwise;`.
+ pub fn new(targets: impl Iterator<Item = (u128, BasicBlock)>, otherwise: BasicBlock) -> Self {
+ let (values, mut targets): (SmallVec<_>, SmallVec<_>) = targets.unzip();
+ targets.push(otherwise);
+ Self { values, targets }
+ }
+
+ /// Builds a switch targets definition that jumps to `then` if the tested value equals `value`,
+ /// and to `else_` if not.
+ pub fn static_if(value: u128, then: BasicBlock, else_: BasicBlock) -> Self {
+ Self { values: smallvec![value], targets: smallvec![then, else_] }
+ }
+
+ /// Returns the fallback target that is jumped to when none of the values match the operand.
+ pub fn otherwise(&self) -> BasicBlock {
+ *self.targets.last().unwrap()
+ }
+
+ /// Returns an iterator over the switch targets.
+ ///
+ /// The iterator will yield tuples containing the value and corresponding target to jump to, not
+ /// including the `otherwise` fallback target.
+ ///
+ /// Note that this may yield 0 elements. Only the `otherwise` branch is mandatory.
+ pub fn iter(&self) -> SwitchTargetsIter<'_> {
+ SwitchTargetsIter { inner: iter::zip(&self.values, &self.targets) }
+ }
+
+ /// Returns a slice with all possible jump targets (including the fallback target).
+ pub fn all_targets(&self) -> &[BasicBlock] {
+ &self.targets
+ }
+
+ pub fn all_targets_mut(&mut self) -> &mut [BasicBlock] {
+ &mut self.targets
+ }
+
+ /// Finds the `BasicBlock` to which this `SwitchInt` will branch given the
+ /// specific value. This cannot fail, as it'll return the `otherwise`
+ /// branch if there's not a specific match for the value.
+ pub fn target_for_value(&self, value: u128) -> BasicBlock {
+ self.iter().find_map(|(v, t)| (v == value).then_some(t)).unwrap_or_else(|| self.otherwise())
+ }
+}
+
+pub struct SwitchTargetsIter<'a> {
+ inner: iter::Zip<slice::Iter<'a, u128>, slice::Iter<'a, BasicBlock>>,
+}
+
+impl<'a> Iterator for SwitchTargetsIter<'a> {
+ type Item = (u128, BasicBlock);
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.inner.next().map(|(val, bb)| (*val, *bb))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<'a> ExactSizeIterator for SwitchTargetsIter<'a> {}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct Terminator<'tcx> {
+ pub source_info: SourceInfo,
+ pub kind: TerminatorKind<'tcx>,
+}
+
+pub type Successors<'a> = impl Iterator<Item = BasicBlock> + 'a;
+pub type SuccessorsMut<'a> =
+ iter::Chain<std::option::IntoIter<&'a mut BasicBlock>, slice::IterMut<'a, BasicBlock>>;
+
+impl<'tcx> Terminator<'tcx> {
+ pub fn successors(&self) -> Successors<'_> {
+ self.kind.successors()
+ }
+
+ pub fn successors_mut(&mut self) -> SuccessorsMut<'_> {
+ self.kind.successors_mut()
+ }
+
+ pub fn unwind(&self) -> Option<&Option<BasicBlock>> {
+ self.kind.unwind()
+ }
+
+ pub fn unwind_mut(&mut self) -> Option<&mut Option<BasicBlock>> {
+ self.kind.unwind_mut()
+ }
+}
+
+impl<'tcx> TerminatorKind<'tcx> {
+ pub fn if_(
+ tcx: TyCtxt<'tcx>,
+ cond: Operand<'tcx>,
+ t: BasicBlock,
+ f: BasicBlock,
+ ) -> TerminatorKind<'tcx> {
+ TerminatorKind::SwitchInt {
+ discr: cond,
+ switch_ty: tcx.types.bool,
+ targets: SwitchTargets::static_if(0, f, t),
+ }
+ }
+
+ pub fn successors(&self) -> Successors<'_> {
+ use self::TerminatorKind::*;
+ match *self {
+ Resume
+ | Abort
+ | GeneratorDrop
+ | Return
+ | Unreachable
+ | Call { target: None, cleanup: None, .. }
+ | InlineAsm { destination: None, cleanup: None, .. } => {
+ None.into_iter().chain((&[]).into_iter().copied())
+ }
+ Goto { target: t }
+ | Call { target: None, cleanup: Some(t), .. }
+ | Call { target: Some(t), cleanup: None, .. }
+ | Yield { resume: t, drop: None, .. }
+ | DropAndReplace { target: t, unwind: None, .. }
+ | Drop { target: t, unwind: None, .. }
+ | Assert { target: t, cleanup: None, .. }
+ | FalseUnwind { real_target: t, unwind: None }
+ | InlineAsm { destination: Some(t), cleanup: None, .. }
+ | InlineAsm { destination: None, cleanup: Some(t), .. } => {
+ Some(t).into_iter().chain((&[]).into_iter().copied())
+ }
+ Call { target: Some(t), cleanup: Some(ref u), .. }
+ | Yield { resume: t, drop: Some(ref u), .. }
+ | DropAndReplace { target: t, unwind: Some(ref u), .. }
+ | Drop { target: t, unwind: Some(ref u), .. }
+ | Assert { target: t, cleanup: Some(ref u), .. }
+ | FalseUnwind { real_target: t, unwind: Some(ref u) }
+ | InlineAsm { destination: Some(t), cleanup: Some(ref u), .. } => {
+ Some(t).into_iter().chain(slice::from_ref(u).into_iter().copied())
+ }
+ SwitchInt { ref targets, .. } => {
+ None.into_iter().chain(targets.targets.iter().copied())
+ }
+ FalseEdge { real_target, ref imaginary_target } => Some(real_target)
+ .into_iter()
+ .chain(slice::from_ref(imaginary_target).into_iter().copied()),
+ }
+ }
+
+ pub fn successors_mut(&mut self) -> SuccessorsMut<'_> {
+ use self::TerminatorKind::*;
+ match *self {
+ Resume
+ | Abort
+ | GeneratorDrop
+ | Return
+ | Unreachable
+ | Call { target: None, cleanup: None, .. }
+ | InlineAsm { destination: None, cleanup: None, .. } => None.into_iter().chain(&mut []),
+ Goto { target: ref mut t }
+ | Call { target: None, cleanup: Some(ref mut t), .. }
+ | Call { target: Some(ref mut t), cleanup: None, .. }
+ | Yield { resume: ref mut t, drop: None, .. }
+ | DropAndReplace { target: ref mut t, unwind: None, .. }
+ | Drop { target: ref mut t, unwind: None, .. }
+ | Assert { target: ref mut t, cleanup: None, .. }
+ | FalseUnwind { real_target: ref mut t, unwind: None }
+ | InlineAsm { destination: Some(ref mut t), cleanup: None, .. }
+ | InlineAsm { destination: None, cleanup: Some(ref mut t), .. } => {
+ Some(t).into_iter().chain(&mut [])
+ }
+ Call { target: Some(ref mut t), cleanup: Some(ref mut u), .. }
+ | Yield { resume: ref mut t, drop: Some(ref mut u), .. }
+ | DropAndReplace { target: ref mut t, unwind: Some(ref mut u), .. }
+ | Drop { target: ref mut t, unwind: Some(ref mut u), .. }
+ | Assert { target: ref mut t, cleanup: Some(ref mut u), .. }
+ | FalseUnwind { real_target: ref mut t, unwind: Some(ref mut u) }
+ | InlineAsm { destination: Some(ref mut t), cleanup: Some(ref mut u), .. } => {
+ Some(t).into_iter().chain(slice::from_mut(u))
+ }
+ SwitchInt { ref mut targets, .. } => None.into_iter().chain(&mut targets.targets),
+ FalseEdge { ref mut real_target, ref mut imaginary_target } => {
+ Some(real_target).into_iter().chain(slice::from_mut(imaginary_target))
+ }
+ }
+ }
+
+ pub fn unwind(&self) -> Option<&Option<BasicBlock>> {
+ match *self {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::FalseEdge { .. } => None,
+ TerminatorKind::Call { cleanup: ref unwind, .. }
+ | TerminatorKind::Assert { cleanup: ref unwind, .. }
+ | TerminatorKind::DropAndReplace { ref unwind, .. }
+ | TerminatorKind::Drop { ref unwind, .. }
+ | TerminatorKind::FalseUnwind { ref unwind, .. }
+ | TerminatorKind::InlineAsm { cleanup: ref unwind, .. } => Some(unwind),
+ }
+ }
+
+ pub fn unwind_mut(&mut self) -> Option<&mut Option<BasicBlock>> {
+ match *self {
+ TerminatorKind::Goto { .. }
+ | TerminatorKind::Resume
+ | TerminatorKind::Abort
+ | TerminatorKind::Return
+ | TerminatorKind::Unreachable
+ | TerminatorKind::GeneratorDrop
+ | TerminatorKind::Yield { .. }
+ | TerminatorKind::SwitchInt { .. }
+ | TerminatorKind::FalseEdge { .. } => None,
+ TerminatorKind::Call { cleanup: ref mut unwind, .. }
+ | TerminatorKind::Assert { cleanup: ref mut unwind, .. }
+ | TerminatorKind::DropAndReplace { ref mut unwind, .. }
+ | TerminatorKind::Drop { ref mut unwind, .. }
+ | TerminatorKind::FalseUnwind { ref mut unwind, .. }
+ | TerminatorKind::InlineAsm { cleanup: ref mut unwind, .. } => Some(unwind),
+ }
+ }
+
+ pub fn as_switch(&self) -> Option<(&Operand<'tcx>, Ty<'tcx>, &SwitchTargets)> {
+ match self {
+ TerminatorKind::SwitchInt { discr, switch_ty, targets } => {
+ Some((discr, *switch_ty, targets))
+ }
+ _ => None,
+ }
+ }
+
+ pub fn as_goto(&self) -> Option<BasicBlock> {
+ match self {
+ TerminatorKind::Goto { target } => Some(*target),
+ _ => None,
+ }
+ }
+}
+
+impl<'tcx> Debug for TerminatorKind<'tcx> {
+ fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
+ self.fmt_head(fmt)?;
+ let successor_count = self.successors().count();
+ let labels = self.fmt_successor_labels();
+ assert_eq!(successor_count, labels.len());
+
+ match successor_count {
+ 0 => Ok(()),
+
+ 1 => write!(fmt, " -> {:?}", self.successors().next().unwrap()),
+
+ _ => {
+ write!(fmt, " -> [")?;
+ for (i, target) in self.successors().enumerate() {
+ if i > 0 {
+ write!(fmt, ", ")?;
+ }
+ write!(fmt, "{}: {:?}", labels[i], target)?;
+ }
+ write!(fmt, "]")
+ }
+ }
+ }
+}
+
+impl<'tcx> TerminatorKind<'tcx> {
+ /// Writes the "head" part of the terminator; that is, its name and the data it uses to pick the
+ /// successor basic block, if any. The only information not included is the list of possible
+ /// successors, which may be rendered differently between the text and the graphviz format.
+ pub fn fmt_head<W: Write>(&self, fmt: &mut W) -> fmt::Result {
+ use self::TerminatorKind::*;
+ match self {
+ Goto { .. } => write!(fmt, "goto"),
+ SwitchInt { discr, .. } => write!(fmt, "switchInt({:?})", discr),
+ Return => write!(fmt, "return"),
+ GeneratorDrop => write!(fmt, "generator_drop"),
+ Resume => write!(fmt, "resume"),
+ Abort => write!(fmt, "abort"),
+ Yield { value, resume_arg, .. } => write!(fmt, "{:?} = yield({:?})", resume_arg, value),
+ Unreachable => write!(fmt, "unreachable"),
+ Drop { place, .. } => write!(fmt, "drop({:?})", place),
+ DropAndReplace { place, value, .. } => {
+ write!(fmt, "replace({:?} <- {:?})", place, value)
+ }
+ Call { func, args, destination, .. } => {
+ write!(fmt, "{:?} = ", destination)?;
+ write!(fmt, "{:?}(", func)?;
+ for (index, arg) in args.iter().enumerate() {
+ if index > 0 {
+ write!(fmt, ", ")?;
+ }
+ write!(fmt, "{:?}", arg)?;
+ }
+ write!(fmt, ")")
+ }
+ Assert { cond, expected, msg, .. } => {
+ write!(fmt, "assert(")?;
+ if !expected {
+ write!(fmt, "!")?;
+ }
+ write!(fmt, "{:?}, ", cond)?;
+ msg.fmt_assert_args(fmt)?;
+ write!(fmt, ")")
+ }
+ FalseEdge { .. } => write!(fmt, "falseEdge"),
+ FalseUnwind { .. } => write!(fmt, "falseUnwind"),
+ InlineAsm { template, ref operands, options, .. } => {
+ write!(fmt, "asm!(\"{}\"", InlineAsmTemplatePiece::to_string(template))?;
+ for op in operands {
+ write!(fmt, ", ")?;
+ let print_late = |&late| if late { "late" } else { "" };
+ match op {
+ InlineAsmOperand::In { reg, value } => {
+ write!(fmt, "in({}) {:?}", reg, value)?;
+ }
+ InlineAsmOperand::Out { reg, late, place: Some(place) } => {
+ write!(fmt, "{}out({}) {:?}", print_late(late), reg, place)?;
+ }
+ InlineAsmOperand::Out { reg, late, place: None } => {
+ write!(fmt, "{}out({}) _", print_late(late), reg)?;
+ }
+ InlineAsmOperand::InOut {
+ reg,
+ late,
+ in_value,
+ out_place: Some(out_place),
+ } => {
+ write!(
+ fmt,
+ "in{}out({}) {:?} => {:?}",
+ print_late(late),
+ reg,
+ in_value,
+ out_place
+ )?;
+ }
+ InlineAsmOperand::InOut { reg, late, in_value, out_place: None } => {
+ write!(fmt, "in{}out({}) {:?} => _", print_late(late), reg, in_value)?;
+ }
+ InlineAsmOperand::Const { value } => {
+ write!(fmt, "const {:?}", value)?;
+ }
+ InlineAsmOperand::SymFn { value } => {
+ write!(fmt, "sym_fn {:?}", value)?;
+ }
+ InlineAsmOperand::SymStatic { def_id } => {
+ write!(fmt, "sym_static {:?}", def_id)?;
+ }
+ }
+ }
+ write!(fmt, ", options({:?}))", options)
+ }
+ }
+ }
+
+ /// Returns the list of labels for the edges to the successor basic blocks.
+ pub fn fmt_successor_labels(&self) -> Vec<Cow<'static, str>> {
+ use self::TerminatorKind::*;
+ match *self {
+ Return | Resume | Abort | Unreachable | GeneratorDrop => vec![],
+ Goto { .. } => vec!["".into()],
+ SwitchInt { ref targets, switch_ty, .. } => ty::tls::with(|tcx| {
+ let param_env = ty::ParamEnv::empty();
+ let switch_ty = tcx.lift(switch_ty).unwrap();
+ let size = tcx.layout_of(param_env.and(switch_ty)).unwrap().size;
+ targets
+ .values
+ .iter()
+ .map(|&u| {
+ mir::ConstantKind::from_scalar(tcx, Scalar::from_uint(u, size), switch_ty)
+ .to_string()
+ .into()
+ })
+ .chain(iter::once("otherwise".into()))
+ .collect()
+ }),
+ Call { target: Some(_), cleanup: Some(_), .. } => {
+ vec!["return".into(), "unwind".into()]
+ }
+ Call { target: Some(_), cleanup: None, .. } => vec!["return".into()],
+ Call { target: None, cleanup: Some(_), .. } => vec!["unwind".into()],
+ Call { target: None, cleanup: None, .. } => vec![],
+ Yield { drop: Some(_), .. } => vec!["resume".into(), "drop".into()],
+ Yield { drop: None, .. } => vec!["resume".into()],
+ DropAndReplace { unwind: None, .. } | Drop { unwind: None, .. } => {
+ vec!["return".into()]
+ }
+ DropAndReplace { unwind: Some(_), .. } | Drop { unwind: Some(_), .. } => {
+ vec!["return".into(), "unwind".into()]
+ }
+ Assert { cleanup: None, .. } => vec!["".into()],
+ Assert { .. } => vec!["success".into(), "unwind".into()],
+ FalseEdge { .. } => vec!["real".into(), "imaginary".into()],
+ FalseUnwind { unwind: Some(_), .. } => vec!["real".into(), "cleanup".into()],
+ FalseUnwind { unwind: None, .. } => vec!["real".into()],
+ InlineAsm { destination: Some(_), cleanup: Some(_), .. } => {
+ vec!["return".into(), "unwind".into()]
+ }
+ InlineAsm { destination: Some(_), cleanup: None, .. } => vec!["return".into()],
+ InlineAsm { destination: None, cleanup: Some(_), .. } => vec!["unwind".into()],
+ InlineAsm { destination: None, cleanup: None, .. } => vec![],
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs
new file mode 100644
index 000000000..627dc32f3
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/traversal.rs
@@ -0,0 +1,388 @@
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::sync::OnceCell;
+use rustc_index::bit_set::BitSet;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+
+use super::*;
+
+/// Preorder traversal of a graph.
+///
+/// Preorder traversal is when each node is visited after at least one of its predecessors. If you
+/// are familiar with some basic graph theory, then this performs a depth first search and returns
+/// nodes in order of discovery time.
+///
+/// ```text
+///
+/// A
+/// / \
+/// / \
+/// B C
+/// \ /
+/// \ /
+/// D
+/// ```
+///
+/// A preorder traversal of this graph is either `A B D C` or `A C D B`
+#[derive(Clone)]
+pub struct Preorder<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ visited: BitSet<BasicBlock>,
+ worklist: Vec<BasicBlock>,
+ root_is_start_block: bool,
+}
+
+impl<'a, 'tcx> Preorder<'a, 'tcx> {
+ pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> Preorder<'a, 'tcx> {
+ let worklist = vec![root];
+
+ Preorder {
+ body,
+ visited: BitSet::new_empty(body.basic_blocks().len()),
+ worklist,
+ root_is_start_block: root == START_BLOCK,
+ }
+ }
+}
+
+pub fn preorder<'a, 'tcx>(body: &'a Body<'tcx>) -> Preorder<'a, 'tcx> {
+ Preorder::new(body, START_BLOCK)
+}
+
+impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> {
+ type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+ fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+ while let Some(idx) = self.worklist.pop() {
+ if !self.visited.insert(idx) {
+ continue;
+ }
+
+ let data = &self.body[idx];
+
+ if let Some(ref term) = data.terminator {
+ self.worklist.extend(term.successors());
+ }
+
+ return Some((idx, data));
+ }
+
+ None
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // All the blocks, minus the number of blocks we've visited.
+ let upper = self.body.basic_blocks().len() - self.visited.count();
+
+ let lower = if self.root_is_start_block {
+ // We will visit all remaining blocks exactly once.
+ upper
+ } else {
+ self.worklist.len()
+ };
+
+ (lower, Some(upper))
+ }
+}
+
+/// Postorder traversal of a graph.
+///
+/// Postorder traversal is when each node is visited after all of its successors, except when the
+/// successor is only reachable by a back-edge. If you are familiar with some basic graph theory,
+/// then this performs a depth first search and returns nodes in order of completion time.
+///
+///
+/// ```text
+///
+/// A
+/// / \
+/// / \
+/// B C
+/// \ /
+/// \ /
+/// D
+/// ```
+///
+/// A Postorder traversal of this graph is `D B C A` or `D C B A`
+pub struct Postorder<'a, 'tcx> {
+ basic_blocks: &'a IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ visited: BitSet<BasicBlock>,
+ visit_stack: Vec<(BasicBlock, Successors<'a>)>,
+ root_is_start_block: bool,
+}
+
+impl<'a, 'tcx> Postorder<'a, 'tcx> {
+ pub fn new(
+ basic_blocks: &'a IndexVec<BasicBlock, BasicBlockData<'tcx>>,
+ root: BasicBlock,
+ ) -> Postorder<'a, 'tcx> {
+ let mut po = Postorder {
+ basic_blocks,
+ visited: BitSet::new_empty(basic_blocks.len()),
+ visit_stack: Vec::new(),
+ root_is_start_block: root == START_BLOCK,
+ };
+
+ let data = &po.basic_blocks[root];
+
+ if let Some(ref term) = data.terminator {
+ po.visited.insert(root);
+ po.visit_stack.push((root, term.successors()));
+ po.traverse_successor();
+ }
+
+ po
+ }
+
+ fn traverse_successor(&mut self) {
+ // This is quite a complex loop due to 1. the borrow checker not liking it much
+ // and 2. what exactly is going on is not clear
+ //
+ // It does the actual traversal of the graph, while the `next` method on the iterator
+ // just pops off of the stack. `visit_stack` is a stack containing pairs of nodes and
+ // iterators over the successors of those nodes. Each iteration attempts to get the next
+ // node from the top of the stack, then pushes that node and an iterator over the
+ // successors to the top of the stack. This loop only grows `visit_stack`, stopping when
+ // we reach a child that has no children that we haven't already visited.
+ //
+ // For a graph that looks like this:
+ //
+ // A
+ // / \
+ // / \
+ // B C
+ // | |
+ // | |
+ // D |
+ // \ /
+ // \ /
+ // E
+ //
+ // The state of the stack starts out with just the root node (`A` in this case);
+ // [(A, [B, C])]
+ //
+ // When the first call to `traverse_successor` happens, the following happens:
+ //
+ // [(B, [D]), // `B` taken from the successors of `A`, pushed to the
+ // // top of the stack along with the successors of `B`
+ // (A, [C])]
+ //
+ // [(D, [E]), // `D` taken from successors of `B`, pushed to stack
+ // (B, []),
+ // (A, [C])]
+ //
+ // [(E, []), // `E` taken from successors of `D`, pushed to stack
+ // (D, []),
+ // (B, []),
+ // (A, [C])]
+ //
+ // Now that the top of the stack has no successors we can traverse, each item will
+ // be popped off during iteration until we get back to `A`. This yields [E, D, B].
+ //
+ // When we yield `B` and call `traverse_successor`, we push `C` to the stack, but
+ // since we've already visited `E`, that child isn't added to the stack. The last
+ // two iterations yield `C` and finally `A` for a final traversal of [E, D, B, C, A]
+ loop {
+ let bb = if let Some(&mut (_, ref mut iter)) = self.visit_stack.last_mut() {
+ if let Some(bb) = iter.next() {
+ bb
+ } else {
+ break;
+ }
+ } else {
+ break;
+ };
+
+ if self.visited.insert(bb) {
+ if let Some(term) = &self.basic_blocks[bb].terminator {
+ self.visit_stack.push((bb, term.successors()));
+ }
+ }
+ }
+ }
+}
+
+pub fn postorder<'a, 'tcx>(body: &'a Body<'tcx>) -> Postorder<'a, 'tcx> {
+ Postorder::new(&body.basic_blocks, START_BLOCK)
+}
+
+impl<'a, 'tcx> Iterator for Postorder<'a, 'tcx> {
+ type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+ fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+ let next = self.visit_stack.pop();
+ if next.is_some() {
+ self.traverse_successor();
+ }
+
+ next.map(|(bb, _)| (bb, &self.basic_blocks[bb]))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ // All the blocks, minus the number of blocks we've visited.
+ let upper = self.basic_blocks.len() - self.visited.count();
+
+ let lower = if self.root_is_start_block {
+ // We will visit all remaining blocks exactly once.
+ upper
+ } else {
+ self.visit_stack.len()
+ };
+
+ (lower, Some(upper))
+ }
+}
+
+/// Reverse postorder traversal of a graph
+///
+/// Reverse postorder is the reverse order of a postorder traversal.
+/// This is different to a preorder traversal and represents a natural
+/// linearization of control-flow.
+///
+/// ```text
+///
+/// A
+/// / \
+/// / \
+/// B C
+/// \ /
+/// \ /
+/// D
+/// ```
+///
+/// A reverse postorder traversal of this graph is either `A B C D` or `A C B D`
+/// Note that for a graph containing no loops (i.e., A DAG), this is equivalent to
+/// a topological sort.
+///
+/// Construction of a `ReversePostorder` traversal requires doing a full
+/// postorder traversal of the graph, therefore this traversal should be
+/// constructed as few times as possible. Use the `reset` method to be able
+/// to re-use the traversal
+#[derive(Clone)]
+pub struct ReversePostorder<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ blocks: Vec<BasicBlock>,
+ idx: usize,
+}
+
+impl<'a, 'tcx> ReversePostorder<'a, 'tcx> {
+ pub fn new(body: &'a Body<'tcx>, root: BasicBlock) -> ReversePostorder<'a, 'tcx> {
+ let blocks: Vec<_> = Postorder::new(&body.basic_blocks, root).map(|(bb, _)| bb).collect();
+ let len = blocks.len();
+ ReversePostorder { body, blocks, idx: len }
+ }
+}
+
+impl<'a, 'tcx> Iterator for ReversePostorder<'a, 'tcx> {
+ type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+ fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+ if self.idx == 0 {
+ return None;
+ }
+ self.idx -= 1;
+
+ self.blocks.get(self.idx).map(|&bb| (bb, &self.body[bb]))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.idx, Some(self.idx))
+ }
+}
+
+impl<'a, 'tcx> ExactSizeIterator for ReversePostorder<'a, 'tcx> {}
+
+/// Returns an iterator over all basic blocks reachable from the `START_BLOCK` in no particular
+/// order.
+///
+/// This is clearer than writing `preorder` in cases where the order doesn't matter.
+pub fn reachable<'a, 'tcx>(
+ body: &'a Body<'tcx>,
+) -> impl 'a + Iterator<Item = (BasicBlock, &'a BasicBlockData<'tcx>)> {
+ preorder(body)
+}
+
+/// Returns a `BitSet` containing all basic blocks reachable from the `START_BLOCK`.
+pub fn reachable_as_bitset<'tcx>(body: &Body<'tcx>) -> BitSet<BasicBlock> {
+ let mut iter = preorder(body);
+ (&mut iter).for_each(drop);
+ iter.visited
+}
+
+#[derive(Clone)]
+pub struct ReversePostorderIter<'a, 'tcx> {
+ body: &'a Body<'tcx>,
+ blocks: &'a [BasicBlock],
+ idx: usize,
+}
+
+impl<'a, 'tcx> Iterator for ReversePostorderIter<'a, 'tcx> {
+ type Item = (BasicBlock, &'a BasicBlockData<'tcx>);
+
+ fn next(&mut self) -> Option<(BasicBlock, &'a BasicBlockData<'tcx>)> {
+ if self.idx == 0 {
+ return None;
+ }
+ self.idx -= 1;
+
+ self.blocks.get(self.idx).map(|&bb| (bb, &self.body[bb]))
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.idx, Some(self.idx))
+ }
+}
+
+impl<'a, 'tcx> ExactSizeIterator for ReversePostorderIter<'a, 'tcx> {}
+
+pub fn reverse_postorder<'a, 'tcx>(body: &'a Body<'tcx>) -> ReversePostorderIter<'a, 'tcx> {
+ let blocks = body.basic_blocks.postorder();
+ let len = blocks.len();
+ ReversePostorderIter { body, blocks, idx: len }
+}
+
+#[derive(Clone, Debug)]
+pub(super) struct PostorderCache {
+ cache: OnceCell<Vec<BasicBlock>>,
+}
+
+impl PostorderCache {
+ #[inline]
+ pub(super) fn new() -> Self {
+ PostorderCache { cache: OnceCell::new() }
+ }
+
+ /// Invalidates the postorder cache.
+ #[inline]
+ pub(super) fn invalidate(&mut self) {
+ self.cache = OnceCell::new();
+ }
+
+ /// Returns the `&[BasicBlocks]` represents the postorder graph for this MIR.
+ #[inline]
+ pub(super) fn compute(&self, body: &IndexVec<BasicBlock, BasicBlockData<'_>>) -> &[BasicBlock] {
+ self.cache.get_or_init(|| Postorder::new(body, START_BLOCK).map(|(bb, _)| bb).collect())
+ }
+}
+
+impl<S: Encoder> Encodable<S> for PostorderCache {
+ #[inline]
+ fn encode(&self, _s: &mut S) {}
+}
+
+impl<D: Decoder> Decodable<D> for PostorderCache {
+ #[inline]
+ fn decode(_: &mut D) -> Self {
+ Self::new()
+ }
+}
+
+impl<CTX> HashStable<CTX> for PostorderCache {
+ #[inline]
+ fn hash_stable(&self, _: &mut CTX, _: &mut StableHasher) {
+ // do nothing
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ PostorderCache,
+}
diff --git a/compiler/rustc_middle/src/mir/type_foldable.rs b/compiler/rustc_middle/src/mir/type_foldable.rs
new file mode 100644
index 000000000..82a6b0c50
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/type_foldable.rs
@@ -0,0 +1,240 @@
+//! `TypeFoldable` implementations for MIR types
+
+use super::*;
+use crate::ty;
+use rustc_data_structures::functor::IdFunctor;
+
+TrivialTypeTraversalAndLiftImpls! {
+ BlockTailInfo,
+ MirPhase,
+ SourceInfo,
+ FakeReadCause,
+ RetagKind,
+ SourceScope,
+ SourceScopeLocalData,
+ UserTypeAnnotationIndex,
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ use crate::mir::TerminatorKind::*;
+
+ let kind = match self.kind {
+ Goto { target } => Goto { target },
+ SwitchInt { discr, switch_ty, targets } => SwitchInt {
+ discr: discr.try_fold_with(folder)?,
+ switch_ty: switch_ty.try_fold_with(folder)?,
+ targets,
+ },
+ Drop { place, target, unwind } => {
+ Drop { place: place.try_fold_with(folder)?, target, unwind }
+ }
+ DropAndReplace { place, value, target, unwind } => DropAndReplace {
+ place: place.try_fold_with(folder)?,
+ value: value.try_fold_with(folder)?,
+ target,
+ unwind,
+ },
+ Yield { value, resume, resume_arg, drop } => Yield {
+ value: value.try_fold_with(folder)?,
+ resume,
+ resume_arg: resume_arg.try_fold_with(folder)?,
+ drop,
+ },
+ Call { func, args, destination, target, cleanup, from_hir_call, fn_span } => Call {
+ func: func.try_fold_with(folder)?,
+ args: args.try_fold_with(folder)?,
+ destination: destination.try_fold_with(folder)?,
+ target,
+ cleanup,
+ from_hir_call,
+ fn_span,
+ },
+ Assert { cond, expected, msg, target, cleanup } => {
+ use AssertKind::*;
+ let msg = match msg {
+ BoundsCheck { len, index } => BoundsCheck {
+ len: len.try_fold_with(folder)?,
+ index: index.try_fold_with(folder)?,
+ },
+ Overflow(op, l, r) => {
+ Overflow(op, l.try_fold_with(folder)?, r.try_fold_with(folder)?)
+ }
+ OverflowNeg(op) => OverflowNeg(op.try_fold_with(folder)?),
+ DivisionByZero(op) => DivisionByZero(op.try_fold_with(folder)?),
+ RemainderByZero(op) => RemainderByZero(op.try_fold_with(folder)?),
+ ResumedAfterReturn(_) | ResumedAfterPanic(_) => msg,
+ };
+ Assert { cond: cond.try_fold_with(folder)?, expected, msg, target, cleanup }
+ }
+ GeneratorDrop => GeneratorDrop,
+ Resume => Resume,
+ Abort => Abort,
+ Return => Return,
+ Unreachable => Unreachable,
+ FalseEdge { real_target, imaginary_target } => {
+ FalseEdge { real_target, imaginary_target }
+ }
+ FalseUnwind { real_target, unwind } => FalseUnwind { real_target, unwind },
+ InlineAsm { template, operands, options, line_spans, destination, cleanup } => {
+ InlineAsm {
+ template,
+ operands: operands.try_fold_with(folder)?,
+ options,
+ line_spans,
+ destination,
+ cleanup,
+ }
+ }
+ };
+ Ok(Terminator { source_info: self.source_info, kind })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for GeneratorKind {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Place<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(Place {
+ local: self.local.try_fold_with(folder)?,
+ projection: self.projection.try_fold_with(folder)?,
+ })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<PlaceElem<'tcx>> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ ty::util::fold_list(self, folder, |tcx, v| tcx.intern_place_elems(v))
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ use crate::mir::Rvalue::*;
+ Ok(match self {
+ Use(op) => Use(op.try_fold_with(folder)?),
+ Repeat(op, len) => Repeat(op.try_fold_with(folder)?, len.try_fold_with(folder)?),
+ ThreadLocalRef(did) => ThreadLocalRef(did.try_fold_with(folder)?),
+ Ref(region, bk, place) => {
+ Ref(region.try_fold_with(folder)?, bk, place.try_fold_with(folder)?)
+ }
+ CopyForDeref(place) => CopyForDeref(place.try_fold_with(folder)?),
+ AddressOf(mutability, place) => AddressOf(mutability, place.try_fold_with(folder)?),
+ Len(place) => Len(place.try_fold_with(folder)?),
+ Cast(kind, op, ty) => Cast(kind, op.try_fold_with(folder)?, ty.try_fold_with(folder)?),
+ BinaryOp(op, box (rhs, lhs)) => {
+ BinaryOp(op, Box::new((rhs.try_fold_with(folder)?, lhs.try_fold_with(folder)?)))
+ }
+ CheckedBinaryOp(op, box (rhs, lhs)) => CheckedBinaryOp(
+ op,
+ Box::new((rhs.try_fold_with(folder)?, lhs.try_fold_with(folder)?)),
+ ),
+ UnaryOp(op, val) => UnaryOp(op, val.try_fold_with(folder)?),
+ Discriminant(place) => Discriminant(place.try_fold_with(folder)?),
+ NullaryOp(op, ty) => NullaryOp(op, ty.try_fold_with(folder)?),
+ Aggregate(kind, fields) => {
+ let kind = kind.try_map_id(|kind| {
+ Ok(match kind {
+ AggregateKind::Array(ty) => AggregateKind::Array(ty.try_fold_with(folder)?),
+ AggregateKind::Tuple => AggregateKind::Tuple,
+ AggregateKind::Adt(def, v, substs, user_ty, n) => AggregateKind::Adt(
+ def,
+ v,
+ substs.try_fold_with(folder)?,
+ user_ty.try_fold_with(folder)?,
+ n,
+ ),
+ AggregateKind::Closure(id, substs) => {
+ AggregateKind::Closure(id, substs.try_fold_with(folder)?)
+ }
+ AggregateKind::Generator(id, substs, movablity) => {
+ AggregateKind::Generator(id, substs.try_fold_with(folder)?, movablity)
+ }
+ })
+ })?;
+ Aggregate(kind, fields.try_fold_with(folder)?)
+ }
+ ShallowInitBox(op, ty) => {
+ ShallowInitBox(op.try_fold_with(folder)?, ty.try_fold_with(folder)?)
+ }
+ })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(match self {
+ Operand::Copy(place) => Operand::Copy(place.try_fold_with(folder)?),
+ Operand::Move(place) => Operand::Move(place.try_fold_with(folder)?),
+ Operand::Constant(c) => Operand::Constant(c.try_fold_with(folder)?),
+ })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for PlaceElem<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ use crate::mir::ProjectionElem::*;
+
+ Ok(match self {
+ Deref => Deref,
+ Field(f, ty) => Field(f, ty.try_fold_with(folder)?),
+ Index(v) => Index(v.try_fold_with(folder)?),
+ Downcast(symbol, variantidx) => Downcast(symbol, variantidx),
+ ConstantIndex { offset, min_length, from_end } => {
+ ConstantIndex { offset, min_length, from_end }
+ }
+ Subslice { from, to, from_end } => Subslice { from, to, from_end },
+ })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Field {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for GeneratorSavedLocal {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx, R: Idx, C: Idx> TypeFoldable<'tcx> for BitMatrix<R, C> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(Constant {
+ span: self.span,
+ user_ty: self.user_ty.try_fold_with(folder)?,
+ literal: self.literal.try_fold_with(folder)?,
+ })
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ConstantKind<'tcx> {
+ #[inline(always)]
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_mir_const(self)
+ }
+}
+
+impl<'tcx> TypeSuperFoldable<'tcx> for ConstantKind<'tcx> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ match self {
+ ConstantKind::Ty(c) => Ok(ConstantKind::Ty(c.try_fold_with(folder)?)),
+ ConstantKind::Val(v, t) => Ok(ConstantKind::Val(v, t.try_fold_with(folder)?)),
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/type_visitable.rs b/compiler/rustc_middle/src/mir/type_visitable.rs
new file mode 100644
index 000000000..6a0801cb0
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/type_visitable.rs
@@ -0,0 +1,190 @@
+//! `TypeVisitable` implementations for MIR types
+
+use super::*;
+use crate::ty;
+
+impl<'tcx> TypeVisitable<'tcx> for Terminator<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ use crate::mir::TerminatorKind::*;
+
+ match self.kind {
+ SwitchInt { ref discr, switch_ty, .. } => {
+ discr.visit_with(visitor)?;
+ switch_ty.visit_with(visitor)
+ }
+ Drop { ref place, .. } => place.visit_with(visitor),
+ DropAndReplace { ref place, ref value, .. } => {
+ place.visit_with(visitor)?;
+ value.visit_with(visitor)
+ }
+ Yield { ref value, .. } => value.visit_with(visitor),
+ Call { ref func, ref args, ref destination, .. } => {
+ destination.visit_with(visitor)?;
+ func.visit_with(visitor)?;
+ args.visit_with(visitor)
+ }
+ Assert { ref cond, ref msg, .. } => {
+ cond.visit_with(visitor)?;
+ use AssertKind::*;
+ match msg {
+ BoundsCheck { ref len, ref index } => {
+ len.visit_with(visitor)?;
+ index.visit_with(visitor)
+ }
+ Overflow(_, l, r) => {
+ l.visit_with(visitor)?;
+ r.visit_with(visitor)
+ }
+ OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => {
+ op.visit_with(visitor)
+ }
+ ResumedAfterReturn(_) | ResumedAfterPanic(_) => ControlFlow::CONTINUE,
+ }
+ }
+ InlineAsm { ref operands, .. } => operands.visit_with(visitor),
+ Goto { .. }
+ | Resume
+ | Abort
+ | Return
+ | GeneratorDrop
+ | Unreachable
+ | FalseEdge { .. }
+ | FalseUnwind { .. } => ControlFlow::CONTINUE,
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for GeneratorKind {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Place<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.local.visit_with(visitor)?;
+ self.projection.visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<PlaceElem<'tcx>> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Rvalue<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ use crate::mir::Rvalue::*;
+ match *self {
+ Use(ref op) => op.visit_with(visitor),
+ CopyForDeref(ref place) => {
+ let op = &Operand::Copy(*place);
+ op.visit_with(visitor)
+ }
+ Repeat(ref op, _) => op.visit_with(visitor),
+ ThreadLocalRef(did) => did.visit_with(visitor),
+ Ref(region, _, ref place) => {
+ region.visit_with(visitor)?;
+ place.visit_with(visitor)
+ }
+ AddressOf(_, ref place) => place.visit_with(visitor),
+ Len(ref place) => place.visit_with(visitor),
+ Cast(_, ref op, ty) => {
+ op.visit_with(visitor)?;
+ ty.visit_with(visitor)
+ }
+ BinaryOp(_, box (ref rhs, ref lhs)) | CheckedBinaryOp(_, box (ref rhs, ref lhs)) => {
+ rhs.visit_with(visitor)?;
+ lhs.visit_with(visitor)
+ }
+ UnaryOp(_, ref val) => val.visit_with(visitor),
+ Discriminant(ref place) => place.visit_with(visitor),
+ NullaryOp(_, ty) => ty.visit_with(visitor),
+ Aggregate(ref kind, ref fields) => {
+ match **kind {
+ AggregateKind::Array(ty) => {
+ ty.visit_with(visitor)?;
+ }
+ AggregateKind::Tuple => {}
+ AggregateKind::Adt(_, _, substs, user_ty, _) => {
+ substs.visit_with(visitor)?;
+ user_ty.visit_with(visitor)?;
+ }
+ AggregateKind::Closure(_, substs) => {
+ substs.visit_with(visitor)?;
+ }
+ AggregateKind::Generator(_, substs, _) => {
+ substs.visit_with(visitor)?;
+ }
+ }
+ fields.visit_with(visitor)
+ }
+ ShallowInitBox(ref op, ty) => {
+ op.visit_with(visitor)?;
+ ty.visit_with(visitor)
+ }
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Operand<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ match *self {
+ Operand::Copy(ref place) | Operand::Move(ref place) => place.visit_with(visitor),
+ Operand::Constant(ref c) => c.visit_with(visitor),
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for PlaceElem<'tcx> {
+ fn visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> ControlFlow<Vs::BreakTy> {
+ use crate::mir::ProjectionElem::*;
+
+ match self {
+ Field(_, ty) => ty.visit_with(visitor),
+ Index(v) => v.visit_with(visitor),
+ _ => ControlFlow::CONTINUE,
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Field {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for GeneratorSavedLocal {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx, R: Idx, C: Idx> TypeVisitable<'tcx> for BitMatrix<R, C> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Constant<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.literal.visit_with(visitor)?;
+ self.user_ty.visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ConstantKind<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_mir_const(*self)
+ }
+}
+
+impl<'tcx> TypeSuperVisitable<'tcx> for ConstantKind<'tcx> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ match *self {
+ ConstantKind::Ty(c) => c.visit_with(visitor),
+ ConstantKind::Val(_, t) => t.visit_with(visitor),
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
new file mode 100644
index 000000000..891608764
--- /dev/null
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -0,0 +1,1330 @@
+//! # The MIR Visitor
+//!
+//! ## Overview
+//!
+//! There are two visitors, one for immutable and one for mutable references,
+//! but both are generated by the following macro. The code is written according
+//! to the following conventions:
+//!
+//! - introduce a `visit_foo` and a `super_foo` method for every MIR type
+//! - `visit_foo`, by default, calls `super_foo`
+//! - `super_foo`, by default, destructures the `foo` and calls `visit_foo`
+//!
+//! This allows you as a user to override `visit_foo` for types are
+//! interested in, and invoke (within that method) call
+//! `self.super_foo` to get the default behavior. Just as in an OO
+//! language, you should never call `super` methods ordinarily except
+//! in that circumstance.
+//!
+//! For the most part, we do not destructure things external to the
+//! MIR, e.g., types, spans, etc, but simply visit them and stop. This
+//! avoids duplication with other visitors like `TypeFoldable`.
+//!
+//! ## Updating
+//!
+//! The code is written in a very deliberate style intended to minimize
+//! the chance of things being overlooked. You'll notice that we always
+//! use pattern matching to reference fields and we ensure that all
+//! matches are exhaustive.
+//!
+//! For example, the `super_basic_block_data` method begins like this:
+//!
+//! ```ignore (pseudo-rust)
+//! fn super_basic_block_data(
+//! &mut self,
+//! block: BasicBlock,
+//! data: & $($mutability)? BasicBlockData<'tcx>
+//! ) {
+//! let BasicBlockData {
+//! statements,
+//! terminator,
+//! is_cleanup: _
+//! } = *data;
+//!
+//! for statement in statements {
+//! self.visit_statement(block, statement);
+//! }
+//!
+//! ...
+//! }
+//! ```
+//!
+//! Here we used `let BasicBlockData { <fields> } = *data` deliberately,
+//! rather than writing `data.statements` in the body. This is because if one
+//! adds a new field to `BasicBlockData`, one will be forced to revise this code,
+//! and hence one will (hopefully) invoke the correct visit methods (if any).
+//!
+//! For this to work, ALL MATCHES MUST BE EXHAUSTIVE IN FIELDS AND VARIANTS.
+//! That means you never write `..` to skip over fields, nor do you write `_`
+//! to skip over variants in a `match`.
+//!
+//! The only place that `_` is acceptable is to match a field (or
+//! variant argument) that does not require visiting, as in
+//! `is_cleanup` above.
+
+use crate::mir::*;
+use crate::ty::subst::SubstsRef;
+use crate::ty::{CanonicalUserTypeAnnotation, Ty};
+use rustc_span::Span;
+
+macro_rules! make_mir_visitor {
+ ($visitor_trait_name:ident, $($mutability:ident)?) => {
+ pub trait $visitor_trait_name<'tcx> {
+ // Override these, and call `self.super_xxx` to revert back to the
+ // default behavior.
+
+ fn visit_body(
+ &mut self,
+ body: &$($mutability)? Body<'tcx>,
+ ) {
+ self.super_body(body);
+ }
+
+ fn visit_basic_block_data(
+ &mut self,
+ block: BasicBlock,
+ data: & $($mutability)? BasicBlockData<'tcx>,
+ ) {
+ self.super_basic_block_data(block, data);
+ }
+
+ fn visit_source_scope_data(
+ &mut self,
+ scope_data: & $($mutability)? SourceScopeData<'tcx>,
+ ) {
+ self.super_source_scope_data(scope_data);
+ }
+
+ fn visit_statement(
+ &mut self,
+ statement: & $($mutability)? Statement<'tcx>,
+ location: Location,
+ ) {
+ self.super_statement(statement, location);
+ }
+
+ fn visit_assign(
+ &mut self,
+ place: & $($mutability)? Place<'tcx>,
+ rvalue: & $($mutability)? Rvalue<'tcx>,
+ location: Location,
+ ) {
+ self.super_assign(place, rvalue, location);
+ }
+
+ fn visit_terminator(
+ &mut self,
+ terminator: & $($mutability)? Terminator<'tcx>,
+ location: Location,
+ ) {
+ self.super_terminator(terminator, location);
+ }
+
+ fn visit_assert_message(
+ &mut self,
+ msg: & $($mutability)? AssertMessage<'tcx>,
+ location: Location,
+ ) {
+ self.super_assert_message(msg, location);
+ }
+
+ fn visit_rvalue(
+ &mut self,
+ rvalue: & $($mutability)? Rvalue<'tcx>,
+ location: Location,
+ ) {
+ self.super_rvalue(rvalue, location);
+ }
+
+ fn visit_operand(
+ &mut self,
+ operand: & $($mutability)? Operand<'tcx>,
+ location: Location,
+ ) {
+ self.super_operand(operand, location);
+ }
+
+ fn visit_ascribe_user_ty(
+ &mut self,
+ place: & $($mutability)? Place<'tcx>,
+ variance: $(& $mutability)? ty::Variance,
+ user_ty: & $($mutability)? UserTypeProjection,
+ location: Location,
+ ) {
+ self.super_ascribe_user_ty(place, variance, user_ty, location);
+ }
+
+ fn visit_coverage(
+ &mut self,
+ coverage: & $($mutability)? Coverage,
+ location: Location,
+ ) {
+ self.super_coverage(coverage, location);
+ }
+
+ fn visit_retag(
+ &mut self,
+ kind: $(& $mutability)? RetagKind,
+ place: & $($mutability)? Place<'tcx>,
+ location: Location,
+ ) {
+ self.super_retag(kind, place, location);
+ }
+
+ fn visit_place(
+ &mut self,
+ place: & $($mutability)? Place<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ self.super_place(place, context, location);
+ }
+
+ visit_place_fns!($($mutability)?);
+
+ fn visit_constant(
+ &mut self,
+ constant: & $($mutability)? Constant<'tcx>,
+ location: Location,
+ ) {
+ self.super_constant(constant, location);
+ }
+
+ fn visit_span(
+ &mut self,
+ span: $(& $mutability)? Span,
+ ) {
+ self.super_span(span);
+ }
+
+ fn visit_source_info(
+ &mut self,
+ source_info: & $($mutability)? SourceInfo,
+ ) {
+ self.super_source_info(source_info);
+ }
+
+ fn visit_ty(
+ &mut self,
+ ty: $(& $mutability)? Ty<'tcx>,
+ _: TyContext,
+ ) {
+ self.super_ty(ty);
+ }
+
+ fn visit_user_type_projection(
+ &mut self,
+ ty: & $($mutability)? UserTypeProjection,
+ ) {
+ self.super_user_type_projection(ty);
+ }
+
+ fn visit_user_type_annotation(
+ &mut self,
+ index: UserTypeAnnotationIndex,
+ ty: & $($mutability)? CanonicalUserTypeAnnotation<'tcx>,
+ ) {
+ self.super_user_type_annotation(index, ty);
+ }
+
+ fn visit_region(
+ &mut self,
+ region: $(& $mutability)? ty::Region<'tcx>,
+ _: Location,
+ ) {
+ self.super_region(region);
+ }
+
+ fn visit_const(
+ &mut self,
+ constant: $(& $mutability)? ty::Const<'tcx>,
+ _: Location,
+ ) {
+ self.super_const(constant);
+ }
+
+ fn visit_substs(
+ &mut self,
+ substs: & $($mutability)? SubstsRef<'tcx>,
+ _: Location,
+ ) {
+ self.super_substs(substs);
+ }
+
+ fn visit_local_decl(
+ &mut self,
+ local: Local,
+ local_decl: & $($mutability)? LocalDecl<'tcx>,
+ ) {
+ self.super_local_decl(local, local_decl);
+ }
+
+ fn visit_var_debug_info(
+ &mut self,
+ var_debug_info: & $($mutability)* VarDebugInfo<'tcx>,
+ ) {
+ self.super_var_debug_info(var_debug_info);
+ }
+
+ fn visit_local(
+ &mut self,
+ _local: $(& $mutability)? Local,
+ _context: PlaceContext,
+ _location: Location,
+ ) {}
+
+ fn visit_source_scope(
+ &mut self,
+ scope: $(& $mutability)? SourceScope,
+ ) {
+ self.super_source_scope(scope);
+ }
+
+ // The `super_xxx` methods comprise the default behavior and are
+ // not meant to be overridden.
+
+ fn super_body(
+ &mut self,
+ body: &$($mutability)? Body<'tcx>,
+ ) {
+ let span = body.span;
+ if let Some(gen) = &$($mutability)? body.generator {
+ if let Some(yield_ty) = $(& $mutability)? gen.yield_ty {
+ self.visit_ty(
+ yield_ty,
+ TyContext::YieldTy(SourceInfo::outermost(span))
+ );
+ }
+ }
+
+ // for best performance, we want to use an iterator rather
+ // than a for-loop, to avoid calling `body::Body::invalidate` for
+ // each basic block.
+ #[allow(unused_macro_rules)]
+ macro_rules! basic_blocks {
+ (mut) => (body.basic_blocks_mut().iter_enumerated_mut());
+ () => (body.basic_blocks().iter_enumerated());
+ }
+ for (bb, data) in basic_blocks!($($mutability)?) {
+ self.visit_basic_block_data(bb, data);
+ }
+
+ for scope in &$($mutability)? body.source_scopes {
+ self.visit_source_scope_data(scope);
+ }
+
+ self.visit_ty(
+ $(& $mutability)? body.return_ty(),
+ TyContext::ReturnTy(SourceInfo::outermost(body.span))
+ );
+
+ for local in body.local_decls.indices() {
+ self.visit_local_decl(local, & $($mutability)? body.local_decls[local]);
+ }
+
+ #[allow(unused_macro_rules)]
+ macro_rules! type_annotations {
+ (mut) => (body.user_type_annotations.iter_enumerated_mut());
+ () => (body.user_type_annotations.iter_enumerated());
+ }
+
+ for (index, annotation) in type_annotations!($($mutability)?) {
+ self.visit_user_type_annotation(
+ index, annotation
+ );
+ }
+
+ for var_debug_info in &$($mutability)? body.var_debug_info {
+ self.visit_var_debug_info(var_debug_info);
+ }
+
+ self.visit_span($(& $mutability)? body.span);
+
+ for const_ in &$($mutability)? body.required_consts {
+ let location = START_BLOCK.start_location();
+ self.visit_constant(const_, location);
+ }
+ }
+
+ fn super_basic_block_data(&mut self,
+ block: BasicBlock,
+ data: & $($mutability)? BasicBlockData<'tcx>) {
+ let BasicBlockData {
+ statements,
+ terminator,
+ is_cleanup: _
+ } = data;
+
+ let mut index = 0;
+ for statement in statements {
+ let location = Location { block, statement_index: index };
+ self.visit_statement(statement, location);
+ index += 1;
+ }
+
+ if let Some(terminator) = terminator {
+ let location = Location { block, statement_index: index };
+ self.visit_terminator(terminator, location);
+ }
+ }
+
+ fn super_source_scope_data(
+ &mut self,
+ scope_data: & $($mutability)? SourceScopeData<'tcx>,
+ ) {
+ let SourceScopeData {
+ span,
+ parent_scope,
+ inlined,
+ inlined_parent_scope,
+ local_data: _,
+ } = scope_data;
+
+ self.visit_span($(& $mutability)? *span);
+ if let Some(parent_scope) = parent_scope {
+ self.visit_source_scope($(& $mutability)? *parent_scope);
+ }
+ if let Some((callee, callsite_span)) = inlined {
+ let location = START_BLOCK.start_location();
+
+ self.visit_span($(& $mutability)? *callsite_span);
+
+ let ty::Instance { def: callee_def, substs: callee_substs } = callee;
+ match callee_def {
+ ty::InstanceDef::Item(_def_id) => {}
+
+ ty::InstanceDef::Intrinsic(_def_id) |
+ ty::InstanceDef::VTableShim(_def_id) |
+ ty::InstanceDef::ReifyShim(_def_id) |
+ ty::InstanceDef::Virtual(_def_id, _) |
+ ty::InstanceDef::ClosureOnceShim { call_once: _def_id, track_caller: _ } |
+ ty::InstanceDef::DropGlue(_def_id, None) => {}
+
+ ty::InstanceDef::FnPtrShim(_def_id, ty) |
+ ty::InstanceDef::DropGlue(_def_id, Some(ty)) |
+ ty::InstanceDef::CloneShim(_def_id, ty) => {
+ // FIXME(eddyb) use a better `TyContext` here.
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ }
+ }
+ self.visit_substs(callee_substs, location);
+ }
+ if let Some(inlined_parent_scope) = inlined_parent_scope {
+ self.visit_source_scope($(& $mutability)? *inlined_parent_scope);
+ }
+ }
+
+ fn super_statement(&mut self,
+ statement: & $($mutability)? Statement<'tcx>,
+ location: Location) {
+ let Statement {
+ source_info,
+ kind,
+ } = statement;
+
+ self.visit_source_info(source_info);
+ match kind {
+ StatementKind::Assign(
+ box (place, rvalue)
+ ) => {
+ self.visit_assign(place, rvalue, location);
+ }
+ StatementKind::FakeRead(box (_, place)) => {
+ self.visit_place(
+ place,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect),
+ location
+ );
+ }
+ StatementKind::SetDiscriminant { place, .. } => {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::SetDiscriminant),
+ location
+ );
+ }
+ StatementKind::Deinit(place) => {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::Deinit),
+ location
+ )
+ }
+ StatementKind::StorageLive(local) => {
+ self.visit_local(
+ $(& $mutability)? *local,
+ PlaceContext::NonUse(NonUseContext::StorageLive),
+ location
+ );
+ }
+ StatementKind::StorageDead(local) => {
+ self.visit_local(
+ $(& $mutability)? *local,
+ PlaceContext::NonUse(NonUseContext::StorageDead),
+ location
+ );
+ }
+ StatementKind::Retag(kind, place) => {
+ self.visit_retag($(& $mutability)? *kind, place, location);
+ }
+ StatementKind::AscribeUserType(
+ box (place, user_ty),
+ variance
+ ) => {
+ self.visit_ascribe_user_ty(place, $(& $mutability)? *variance, user_ty, location);
+ }
+ StatementKind::Coverage(coverage) => {
+ self.visit_coverage(
+ coverage,
+ location
+ )
+ }
+ StatementKind::CopyNonOverlapping(box crate::mir::CopyNonOverlapping{
+ src,
+ dst,
+ count,
+ }) => {
+ self.visit_operand(src, location);
+ self.visit_operand(dst, location);
+ self.visit_operand(count, location)
+ }
+ StatementKind::Nop => {}
+ }
+ }
+
+ fn super_assign(&mut self,
+ place: &$($mutability)? Place<'tcx>,
+ rvalue: &$($mutability)? Rvalue<'tcx>,
+ location: Location) {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::Store),
+ location
+ );
+ self.visit_rvalue(rvalue, location);
+ }
+
+ fn super_terminator(&mut self,
+ terminator: &$($mutability)? Terminator<'tcx>,
+ location: Location) {
+ let Terminator { source_info, kind } = terminator;
+
+ self.visit_source_info(source_info);
+ match kind {
+ TerminatorKind::Goto { .. } |
+ TerminatorKind::Resume |
+ TerminatorKind::Abort |
+ TerminatorKind::GeneratorDrop |
+ TerminatorKind::Unreachable |
+ TerminatorKind::FalseEdge { .. } |
+ TerminatorKind::FalseUnwind { .. } => {}
+
+ TerminatorKind::Return => {
+ // `return` logically moves from the return place `_0`. Note that the place
+ // cannot be changed by any visitor, though.
+ let $($mutability)? local = RETURN_PLACE;
+ self.visit_local(
+ $(& $mutability)? local,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Move),
+ location,
+ );
+
+ assert_eq!(
+ local,
+ RETURN_PLACE,
+ "`MutVisitor` tried to mutate return place of `return` terminator"
+ );
+ }
+
+ TerminatorKind::SwitchInt {
+ discr,
+ switch_ty,
+ targets: _
+ } => {
+ self.visit_operand(discr, location);
+ self.visit_ty($(& $mutability)? *switch_ty, TyContext::Location(location));
+ }
+
+ TerminatorKind::Drop {
+ place,
+ target: _,
+ unwind: _,
+ } => {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::Drop),
+ location
+ );
+ }
+
+ TerminatorKind::DropAndReplace {
+ place,
+ value,
+ target: _,
+ unwind: _,
+ } => {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::Drop),
+ location
+ );
+ self.visit_operand(value, location);
+ }
+
+ TerminatorKind::Call {
+ func,
+ args,
+ destination,
+ target: _,
+ cleanup: _,
+ from_hir_call: _,
+ fn_span: _
+ } => {
+ self.visit_operand(func, location);
+ for arg in args {
+ self.visit_operand(arg, location);
+ }
+ self.visit_place(
+ destination,
+ PlaceContext::MutatingUse(MutatingUseContext::Call),
+ location
+ );
+ }
+
+ TerminatorKind::Assert {
+ cond,
+ expected: _,
+ msg,
+ target: _,
+ cleanup: _,
+ } => {
+ self.visit_operand(cond, location);
+ self.visit_assert_message(msg, location);
+ }
+
+ TerminatorKind::Yield {
+ value,
+ resume: _,
+ resume_arg,
+ drop: _,
+ } => {
+ self.visit_operand(value, location);
+ self.visit_place(
+ resume_arg,
+ PlaceContext::MutatingUse(MutatingUseContext::Yield),
+ location,
+ );
+ }
+
+ TerminatorKind::InlineAsm {
+ template: _,
+ operands,
+ options: _,
+ line_spans: _,
+ destination: _,
+ cleanup: _,
+ } => {
+ for op in operands {
+ match op {
+ InlineAsmOperand::In { value, .. } => {
+ self.visit_operand(value, location);
+ }
+ InlineAsmOperand::Out { place: Some(place), .. } => {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::AsmOutput),
+ location,
+ );
+ }
+ InlineAsmOperand::InOut { in_value, out_place, .. } => {
+ self.visit_operand(in_value, location);
+ if let Some(out_place) = out_place {
+ self.visit_place(
+ out_place,
+ PlaceContext::MutatingUse(MutatingUseContext::AsmOutput),
+ location,
+ );
+ }
+ }
+ InlineAsmOperand::Const { value }
+ | InlineAsmOperand::SymFn { value } => {
+ self.visit_constant(value, location);
+ }
+ InlineAsmOperand::Out { place: None, .. }
+ | InlineAsmOperand::SymStatic { def_id: _ } => {}
+ }
+ }
+ }
+ }
+ }
+
+ fn super_assert_message(&mut self,
+ msg: & $($mutability)? AssertMessage<'tcx>,
+ location: Location) {
+ use crate::mir::AssertKind::*;
+ match msg {
+ BoundsCheck { len, index } => {
+ self.visit_operand(len, location);
+ self.visit_operand(index, location);
+ }
+ Overflow(_, l, r) => {
+ self.visit_operand(l, location);
+ self.visit_operand(r, location);
+ }
+ OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => {
+ self.visit_operand(op, location);
+ }
+ ResumedAfterReturn(_) | ResumedAfterPanic(_) => {
+ // Nothing to visit
+ }
+ }
+ }
+
+ fn super_rvalue(&mut self,
+ rvalue: & $($mutability)? Rvalue<'tcx>,
+ location: Location) {
+ match rvalue {
+ Rvalue::Use(operand) => {
+ self.visit_operand(operand, location);
+ }
+
+ Rvalue::Repeat(value, _) => {
+ self.visit_operand(value, location);
+ }
+
+ Rvalue::ThreadLocalRef(_) => {}
+
+ Rvalue::Ref(r, bk, path) => {
+ self.visit_region($(& $mutability)? *r, location);
+ let ctx = match bk {
+ BorrowKind::Shared => PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::SharedBorrow
+ ),
+ BorrowKind::Shallow => PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::ShallowBorrow
+ ),
+ BorrowKind::Unique => PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::UniqueBorrow
+ ),
+ BorrowKind::Mut { .. } =>
+ PlaceContext::MutatingUse(MutatingUseContext::Borrow),
+ };
+ self.visit_place(path, ctx, location);
+ }
+ Rvalue::CopyForDeref(place) => {
+ self.visit_place(
+ place,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect),
+ location
+ );
+ }
+
+ Rvalue::AddressOf(m, path) => {
+ let ctx = match m {
+ Mutability::Mut => PlaceContext::MutatingUse(
+ MutatingUseContext::AddressOf
+ ),
+ Mutability::Not => PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::AddressOf
+ ),
+ };
+ self.visit_place(path, ctx, location);
+ }
+
+ Rvalue::Len(path) => {
+ self.visit_place(
+ path,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect),
+ location
+ );
+ }
+
+ Rvalue::Cast(_cast_kind, operand, ty) => {
+ self.visit_operand(operand, location);
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ }
+
+ Rvalue::BinaryOp(_bin_op, box(lhs, rhs))
+ | Rvalue::CheckedBinaryOp(_bin_op, box(lhs, rhs)) => {
+ self.visit_operand(lhs, location);
+ self.visit_operand(rhs, location);
+ }
+
+ Rvalue::UnaryOp(_un_op, op) => {
+ self.visit_operand(op, location);
+ }
+
+ Rvalue::Discriminant(place) => {
+ self.visit_place(
+ place,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Inspect),
+ location
+ );
+ }
+
+ Rvalue::NullaryOp(_op, ty) => {
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ }
+
+ Rvalue::Aggregate(kind, operands) => {
+ let kind = &$($mutability)? **kind;
+ match kind {
+ AggregateKind::Array(ty) => {
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ }
+ AggregateKind::Tuple => {
+ }
+ AggregateKind::Adt(
+ _adt_def,
+ _variant_index,
+ substs,
+ _user_substs,
+ _active_field_index
+ ) => {
+ self.visit_substs(substs, location);
+ }
+ AggregateKind::Closure(
+ _,
+ closure_substs
+ ) => {
+ self.visit_substs(closure_substs, location);
+ }
+ AggregateKind::Generator(
+ _,
+ generator_substs,
+ _movability,
+ ) => {
+ self.visit_substs(generator_substs, location);
+ }
+ }
+
+ for operand in operands {
+ self.visit_operand(operand, location);
+ }
+ }
+
+ Rvalue::ShallowInitBox(operand, ty) => {
+ self.visit_operand(operand, location);
+ self.visit_ty($(& $mutability)? *ty, TyContext::Location(location));
+ }
+ }
+ }
+
+ fn super_operand(&mut self,
+ operand: & $($mutability)? Operand<'tcx>,
+ location: Location) {
+ match operand {
+ Operand::Copy(place) => {
+ self.visit_place(
+ place,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+ location
+ );
+ }
+ Operand::Move(place) => {
+ self.visit_place(
+ place,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Move),
+ location
+ );
+ }
+ Operand::Constant(constant) => {
+ self.visit_constant(constant, location);
+ }
+ }
+ }
+
+ fn super_ascribe_user_ty(&mut self,
+ place: & $($mutability)? Place<'tcx>,
+ _variance: $(& $mutability)? ty::Variance,
+ user_ty: & $($mutability)? UserTypeProjection,
+ location: Location) {
+ self.visit_place(
+ place,
+ PlaceContext::NonUse(NonUseContext::AscribeUserTy),
+ location
+ );
+ self.visit_user_type_projection(user_ty);
+ }
+
+ fn super_coverage(&mut self,
+ _coverage: & $($mutability)? Coverage,
+ _location: Location) {
+ }
+
+ fn super_retag(&mut self,
+ _kind: $(& $mutability)? RetagKind,
+ place: & $($mutability)? Place<'tcx>,
+ location: Location) {
+ self.visit_place(
+ place,
+ PlaceContext::MutatingUse(MutatingUseContext::Retag),
+ location,
+ );
+ }
+
+ fn super_local_decl(&mut self,
+ local: Local,
+ local_decl: & $($mutability)? LocalDecl<'tcx>) {
+ let LocalDecl {
+ mutability: _,
+ ty,
+ user_ty,
+ source_info,
+ internal: _,
+ local_info: _,
+ is_block_tail: _,
+ } = local_decl;
+
+ self.visit_ty($(& $mutability)? *ty, TyContext::LocalDecl {
+ local,
+ source_info: *source_info,
+ });
+ if let Some(user_ty) = user_ty {
+ for (user_ty, _) in & $($mutability)? user_ty.contents {
+ self.visit_user_type_projection(user_ty);
+ }
+ }
+ self.visit_source_info(source_info);
+ }
+
+ fn super_var_debug_info(
+ &mut self,
+ var_debug_info: & $($mutability)? VarDebugInfo<'tcx>
+ ) {
+ let VarDebugInfo {
+ name: _,
+ source_info,
+ value,
+ } = var_debug_info;
+
+ self.visit_source_info(source_info);
+ let location = START_BLOCK.start_location();
+ match value {
+ VarDebugInfoContents::Const(c) => self.visit_constant(c, location),
+ VarDebugInfoContents::Place(place) =>
+ self.visit_place(
+ place,
+ PlaceContext::NonUse(NonUseContext::VarDebugInfo),
+ location
+ ),
+ }
+ }
+
+ fn super_source_scope(
+ &mut self,
+ _scope: $(& $mutability)? SourceScope
+ ) {}
+
+ fn super_constant(
+ &mut self,
+ constant: & $($mutability)? Constant<'tcx>,
+ location: Location
+ ) {
+ let Constant {
+ span,
+ user_ty,
+ literal,
+ } = constant;
+
+ self.visit_span($(& $mutability)? *span);
+ drop(user_ty); // no visit method for this
+ match literal {
+ ConstantKind::Ty(ct) => self.visit_const($(& $mutability)? *ct, location),
+ ConstantKind::Val(_, ty) => self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)),
+ }
+ }
+
+ fn super_span(&mut self, _span: $(& $mutability)? Span) {
+ }
+
+ fn super_source_info(&mut self, source_info: & $($mutability)? SourceInfo) {
+ let SourceInfo {
+ span,
+ scope,
+ } = source_info;
+
+ self.visit_span($(& $mutability)? *span);
+ self.visit_source_scope($(& $mutability)? *scope);
+ }
+
+ fn super_user_type_projection(
+ &mut self,
+ _ty: & $($mutability)? UserTypeProjection,
+ ) {
+ }
+
+ fn super_user_type_annotation(
+ &mut self,
+ _index: UserTypeAnnotationIndex,
+ ty: & $($mutability)? CanonicalUserTypeAnnotation<'tcx>,
+ ) {
+ self.visit_span($(& $mutability)? ty.span);
+ self.visit_ty($(& $mutability)? ty.inferred_ty, TyContext::UserTy(ty.span));
+ }
+
+ fn super_ty(&mut self, _ty: $(& $mutability)? Ty<'tcx>) {
+ }
+
+ fn super_region(&mut self, _region: $(& $mutability)? ty::Region<'tcx>) {
+ }
+
+ fn super_const(&mut self, _const: $(& $mutability)? ty::Const<'tcx>) {
+ }
+
+ fn super_substs(&mut self, _substs: & $($mutability)? SubstsRef<'tcx>) {
+ }
+
+ // Convenience methods
+
+ fn visit_location(
+ &mut self,
+ body: &$($mutability)? Body<'tcx>,
+ location: Location
+ ) {
+ #[allow(unused_macro_rules)]
+ macro_rules! basic_blocks {
+ (mut) => (body.basic_blocks_mut());
+ () => (body.basic_blocks());
+ }
+ let basic_block = & $($mutability)? basic_blocks!($($mutability)?)[location.block];
+ if basic_block.statements.len() == location.statement_index {
+ if let Some(ref $($mutability)? terminator) = basic_block.terminator {
+ self.visit_terminator(terminator, location)
+ }
+ } else {
+ let statement = & $($mutability)?
+ basic_block.statements[location.statement_index];
+ self.visit_statement(statement, location)
+ }
+ }
+ }
+ }
+}
+
+macro_rules! visit_place_fns {
+ (mut) => {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
+
+ fn super_place(
+ &mut self,
+ place: &mut Place<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ self.visit_local(&mut place.local, context, location);
+
+ if let Some(new_projection) = self.process_projection(&place.projection, location) {
+ place.projection = self.tcx().intern_place_elems(&new_projection);
+ }
+ }
+
+ fn process_projection<'a>(
+ &mut self,
+ projection: &'a [PlaceElem<'tcx>],
+ location: Location,
+ ) -> Option<Vec<PlaceElem<'tcx>>> {
+ let mut projection = Cow::Borrowed(projection);
+
+ for i in 0..projection.len() {
+ if let Some(&elem) = projection.get(i) {
+ if let Some(elem) = self.process_projection_elem(elem, location) {
+ // This converts the borrowed projection into `Cow::Owned(_)` and returns a
+ // clone of the projection so we can mutate and reintern later.
+ let vec = projection.to_mut();
+ vec[i] = elem;
+ }
+ }
+ }
+
+ match projection {
+ Cow::Borrowed(_) => None,
+ Cow::Owned(vec) => Some(vec),
+ }
+ }
+
+ fn process_projection_elem(
+ &mut self,
+ elem: PlaceElem<'tcx>,
+ location: Location,
+ ) -> Option<PlaceElem<'tcx>> {
+ match elem {
+ PlaceElem::Index(local) => {
+ let mut new_local = local;
+ self.visit_local(
+ &mut new_local,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+ location,
+ );
+
+ if new_local == local { None } else { Some(PlaceElem::Index(new_local)) }
+ }
+ PlaceElem::Field(field, ty) => {
+ let mut new_ty = ty;
+ self.visit_ty(&mut new_ty, TyContext::Location(location));
+ if ty != new_ty { Some(PlaceElem::Field(field, new_ty)) } else { None }
+ }
+ PlaceElem::Deref
+ | PlaceElem::ConstantIndex { .. }
+ | PlaceElem::Subslice { .. }
+ | PlaceElem::Downcast(..) => None,
+ }
+ }
+ };
+
+ () => {
+ fn visit_projection(
+ &mut self,
+ place_ref: PlaceRef<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ self.super_projection(place_ref, context, location);
+ }
+
+ fn visit_projection_elem(
+ &mut self,
+ local: Local,
+ proj_base: &[PlaceElem<'tcx>],
+ elem: PlaceElem<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ self.super_projection_elem(local, proj_base, elem, context, location);
+ }
+
+ fn super_place(&mut self, place: &Place<'tcx>, context: PlaceContext, location: Location) {
+ let mut context = context;
+
+ if !place.projection.is_empty() {
+ if context.is_use() {
+ // ^ Only change the context if it is a real use, not a "use" in debuginfo.
+ context = if context.is_mutating_use() {
+ PlaceContext::MutatingUse(MutatingUseContext::Projection)
+ } else {
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Projection)
+ };
+ }
+ }
+
+ self.visit_local(place.local, context, location);
+
+ self.visit_projection(place.as_ref(), context, location);
+ }
+
+ fn super_projection(
+ &mut self,
+ place_ref: PlaceRef<'tcx>,
+ context: PlaceContext,
+ location: Location,
+ ) {
+ for (base, elem) in place_ref.iter_projections().rev() {
+ let base_proj = base.projection;
+ self.visit_projection_elem(place_ref.local, base_proj, elem, context, location);
+ }
+ }
+
+ fn super_projection_elem(
+ &mut self,
+ _local: Local,
+ _proj_base: &[PlaceElem<'tcx>],
+ elem: PlaceElem<'tcx>,
+ _context: PlaceContext,
+ location: Location,
+ ) {
+ match elem {
+ ProjectionElem::Field(_field, ty) => {
+ self.visit_ty(ty, TyContext::Location(location));
+ }
+ ProjectionElem::Index(local) => {
+ self.visit_local(
+ local,
+ PlaceContext::NonMutatingUse(NonMutatingUseContext::Copy),
+ location,
+ );
+ }
+ ProjectionElem::Deref
+ | ProjectionElem::Subslice { from: _, to: _, from_end: _ }
+ | ProjectionElem::ConstantIndex { offset: _, min_length: _, from_end: _ }
+ | ProjectionElem::Downcast(_, _) => {}
+ }
+ }
+ };
+}
+
+make_mir_visitor!(Visitor,);
+make_mir_visitor!(MutVisitor, mut);
+
+pub trait MirVisitable<'tcx> {
+ fn apply(&self, location: Location, visitor: &mut dyn Visitor<'tcx>);
+}
+
+impl<'tcx> MirVisitable<'tcx> for Statement<'tcx> {
+ fn apply(&self, location: Location, visitor: &mut dyn Visitor<'tcx>) {
+ visitor.visit_statement(self, location)
+ }
+}
+
+impl<'tcx> MirVisitable<'tcx> for Terminator<'tcx> {
+ fn apply(&self, location: Location, visitor: &mut dyn Visitor<'tcx>) {
+ visitor.visit_terminator(self, location)
+ }
+}
+
+impl<'tcx> MirVisitable<'tcx> for Option<Terminator<'tcx>> {
+ fn apply(&self, location: Location, visitor: &mut dyn Visitor<'tcx>) {
+ visitor.visit_terminator(self.as_ref().unwrap(), location)
+ }
+}
+
+/// Extra information passed to `visit_ty` and friends to give context
+/// about where the type etc appears.
+#[derive(Debug)]
+pub enum TyContext {
+ LocalDecl {
+ /// The index of the local variable we are visiting.
+ local: Local,
+
+ /// The source location where this local variable was declared.
+ source_info: SourceInfo,
+ },
+
+ /// The inferred type of a user type annotation.
+ UserTy(Span),
+
+ /// The return type of the function.
+ ReturnTy(SourceInfo),
+
+ YieldTy(SourceInfo),
+
+ /// A type found at some location.
+ Location(Location),
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum NonMutatingUseContext {
+ /// Being inspected in some way, like loading a len.
+ Inspect,
+ /// Consumed as part of an operand.
+ Copy,
+ /// Consumed as part of an operand.
+ Move,
+ /// Shared borrow.
+ SharedBorrow,
+ /// Shallow borrow.
+ ShallowBorrow,
+ /// Unique borrow.
+ UniqueBorrow,
+ /// AddressOf for *const pointer.
+ AddressOf,
+ /// Used as base for another place, e.g., `x` in `x.y`. Will not mutate the place.
+ /// For example, the projection `x.y` is not marked as a mutation in these cases:
+ /// ```ignore (illustrative)
+ /// z = x.y;
+ /// f(&x.y);
+ /// ```
+ Projection,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum MutatingUseContext {
+ /// Appears as LHS of an assignment.
+ Store,
+ /// Appears on `SetDiscriminant`
+ SetDiscriminant,
+ /// Appears on `Deinit`
+ Deinit,
+ /// Output operand of an inline assembly block.
+ AsmOutput,
+ /// Destination of a call.
+ Call,
+ /// Destination of a yield.
+ Yield,
+ /// Being dropped.
+ Drop,
+ /// Mutable borrow.
+ Borrow,
+ /// AddressOf for *mut pointer.
+ AddressOf,
+ /// Used as base for another place, e.g., `x` in `x.y`. Could potentially mutate the place.
+ /// For example, the projection `x.y` is marked as a mutation in these cases:
+ /// ```ignore (illustrative)
+ /// x.y = ...;
+ /// f(&mut x.y);
+ /// ```
+ Projection,
+ /// Retagging, a "Stacked Borrows" shadow state operation
+ Retag,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum NonUseContext {
+ /// Starting a storage live range.
+ StorageLive,
+ /// Ending a storage live range.
+ StorageDead,
+ /// User type annotation assertions for NLL.
+ AscribeUserTy,
+ /// The data of a user variable, for debug info.
+ VarDebugInfo,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum PlaceContext {
+ NonMutatingUse(NonMutatingUseContext),
+ MutatingUse(MutatingUseContext),
+ NonUse(NonUseContext),
+}
+
+impl PlaceContext {
+ /// Returns `true` if this place context represents a drop.
+ #[inline]
+ pub fn is_drop(&self) -> bool {
+ matches!(self, PlaceContext::MutatingUse(MutatingUseContext::Drop))
+ }
+
+ /// Returns `true` if this place context represents a borrow.
+ pub fn is_borrow(&self) -> bool {
+ matches!(
+ self,
+ PlaceContext::NonMutatingUse(
+ NonMutatingUseContext::SharedBorrow
+ | NonMutatingUseContext::ShallowBorrow
+ | NonMutatingUseContext::UniqueBorrow
+ ) | PlaceContext::MutatingUse(MutatingUseContext::Borrow)
+ )
+ }
+
+ /// Returns `true` if this place context represents a storage live or storage dead marker.
+ #[inline]
+ pub fn is_storage_marker(&self) -> bool {
+ matches!(
+ self,
+ PlaceContext::NonUse(NonUseContext::StorageLive | NonUseContext::StorageDead)
+ )
+ }
+
+ /// Returns `true` if this place context represents a use that potentially changes the value.
+ #[inline]
+ pub fn is_mutating_use(&self) -> bool {
+ matches!(self, PlaceContext::MutatingUse(..))
+ }
+
+ /// Returns `true` if this place context represents a use.
+ #[inline]
+ pub fn is_use(&self) -> bool {
+ !matches!(self, PlaceContext::NonUse(..))
+ }
+
+ /// Returns `true` if this place context represents an assignment statement.
+ pub fn is_place_assignment(&self) -> bool {
+ matches!(
+ self,
+ PlaceContext::MutatingUse(
+ MutatingUseContext::Store
+ | MutatingUseContext::Call
+ | MutatingUseContext::AsmOutput,
+ )
+ )
+ }
+}
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
new file mode 100644
index 000000000..d8483e7e4
--- /dev/null
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -0,0 +1,2060 @@
+//! Defines the various compiler queries.
+//!
+//! For more information on the query system, see
+//! ["Queries: demand-driven compilation"](https://rustc-dev-guide.rust-lang.org/query.html).
+//! This chapter includes instructions for adding new queries.
+
+// Each of these queries corresponds to a function pointer field in the
+// `Providers` struct for requesting a value of that type, and a method
+// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
+// which memoizes and does dep-graph tracking, wrapping around the actual
+// `Providers` that the driver creates (using several `rustc_*` crates).
+//
+// The result type of each query must implement `Clone`, and additionally
+// `ty::query::values::Value`, which produces an appropriate placeholder
+// (error) value if the query resulted in a query cycle.
+// Queries marked with `fatal_cycle` do not need the latter implementation,
+// as they will raise an fatal error on query cycles instead.
+rustc_queries! {
+ query trigger_delay_span_bug(key: DefId) -> () {
+ desc { "trigger a delay span bug" }
+ }
+
+ query resolutions(_: ()) -> &'tcx ty::ResolverOutputs {
+ eval_always
+ no_hash
+ desc { "get the resolver outputs" }
+ }
+
+ query resolver_for_lowering(_: ()) -> &'tcx Steal<ty::ResolverAstLowering> {
+ eval_always
+ no_hash
+ desc { "get the resolver for lowering" }
+ }
+
+ /// Return the span for a definition.
+ /// Contrary to `def_span` below, this query returns the full absolute span of the definition.
+ /// This span is meant for dep-tracking rather than diagnostics. It should not be used outside
+ /// of rustc_middle::hir::source_map.
+ query source_span(key: LocalDefId) -> Span {
+ desc { "get the source span" }
+ }
+
+ /// Represents crate as a whole (as distinct from the top-level crate module).
+ /// If you call `hir_crate` (e.g., indirectly by calling `tcx.hir().krate()`),
+ /// we will have to assume that any change means that you need to be recompiled.
+ /// This is because the `hir_crate` query gives you access to all other items.
+ /// To avoid this fate, do not call `tcx.hir().krate()`; instead,
+ /// prefer wrappers like `tcx.visit_all_items_in_krate()`.
+ query hir_crate(key: ()) -> Crate<'tcx> {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "get the crate HIR" }
+ }
+
+ /// All items in the crate.
+ query hir_crate_items(_: ()) -> rustc_middle::hir::ModuleItems {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "get HIR crate items" }
+ }
+
+ /// The items in a module.
+ ///
+ /// This can be conveniently accessed by `tcx.hir().visit_item_likes_in_module`.
+ /// Avoid calling this query directly.
+ query hir_module_items(key: LocalDefId) -> rustc_middle::hir::ModuleItems {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "HIR module items in `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ }
+
+ /// Gives access to the HIR node for the HIR owner `key`.
+ ///
+ /// This can be conveniently accessed by methods on `tcx.hir()`.
+ /// Avoid calling this query directly.
+ query hir_owner(key: LocalDefId) -> Option<crate::hir::Owner<'tcx>> {
+ desc { |tcx| "HIR owner of `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Gives access to the HIR ID for the given `LocalDefId` owner `key`.
+ ///
+ /// This can be conveniently accessed by methods on `tcx.hir()`.
+ /// Avoid calling this query directly.
+ query local_def_id_to_hir_id(key: LocalDefId) -> hir::HirId {
+ desc { |tcx| "HIR ID of `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Gives access to the HIR node's parent for the HIR owner `key`.
+ ///
+ /// This can be conveniently accessed by methods on `tcx.hir()`.
+ /// Avoid calling this query directly.
+ query hir_owner_parent(key: LocalDefId) -> hir::HirId {
+ desc { |tcx| "HIR parent of `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Gives access to the HIR nodes and bodies inside the HIR owner `key`.
+ ///
+ /// This can be conveniently accessed by methods on `tcx.hir()`.
+ /// Avoid calling this query directly.
+ query hir_owner_nodes(key: LocalDefId) -> hir::MaybeOwner<&'tcx hir::OwnerNodes<'tcx>> {
+ desc { |tcx| "HIR owner items in `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Gives access to the HIR attributes inside the HIR owner `key`.
+ ///
+ /// This can be conveniently accessed by methods on `tcx.hir()`.
+ /// Avoid calling this query directly.
+ query hir_attrs(key: LocalDefId) -> &'tcx hir::AttributeMap<'tcx> {
+ desc { |tcx| "HIR owner attributes in `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Computes the `DefId` of the corresponding const parameter in case the `key` is a
+ /// const argument and returns `None` otherwise.
+ ///
+ /// ```ignore (incomplete)
+ /// let a = foo::<7>();
+ /// // ^ Calling `opt_const_param_of` for this argument,
+ ///
+ /// fn foo<const N: usize>()
+ /// // ^ returns this `DefId`.
+ ///
+ /// fn bar() {
+ /// // ^ While calling `opt_const_param_of` for other bodies returns `None`.
+ /// }
+ /// ```
+ // It looks like caching this query on disk actually slightly
+ // worsened performance in #74376.
+ //
+ // Once const generics are more prevalently used, we might want to
+ // consider only caching calls returning `Some`.
+ query opt_const_param_of(key: LocalDefId) -> Option<DefId> {
+ desc { |tcx| "computing the optional const parameter of `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Given the def_id of a const-generic parameter, computes the associated default const
+ /// parameter. e.g. `fn example<const N: usize=3>` called on `N` would return `3`.
+ query const_param_default(param: DefId) -> ty::Const<'tcx> {
+ desc { |tcx| "compute const default for a given parameter `{}`", tcx.def_path_str(param) }
+ cache_on_disk_if { param.is_local() }
+ separate_provide_extern
+ }
+
+ /// Returns the [`Ty`][rustc_middle::ty::Ty] of the given [`DefId`]. If the [`DefId`] points
+ /// to an alias, it will "skip" this alias to return the aliased type.
+ ///
+ /// [`DefId`]: rustc_hir::def_id::DefId
+ query type_of(key: DefId) -> Ty<'tcx> {
+ desc { |tcx|
+ "{action} `{path}`",
+ action = {
+ use rustc_hir::def::DefKind;
+ match tcx.def_kind(key) {
+ DefKind::TyAlias => "expanding type alias",
+ DefKind::TraitAlias => "expanding trait alias",
+ _ => "computing type of",
+ }
+ },
+ path = tcx.def_path_str(key),
+ }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ query analysis(key: ()) -> Result<(), ErrorGuaranteed> {
+ eval_always
+ desc { "running analysis passes on this crate" }
+ }
+
+ /// This query checks the fulfillment of collected lint expectations.
+ /// All lint emitting queries have to be done before this is executed
+ /// to ensure that all expectations can be fulfilled.
+ ///
+ /// This is an extra query to enable other drivers (like rustdoc) to
+ /// only execute a small subset of the `analysis` query, while allowing
+ /// lints to be expected. In rustc, this query will be executed as part of
+ /// the `analysis` query and doesn't have to be called a second time.
+ ///
+ /// Tools can additionally pass in a tool filter. That will restrict the
+ /// expectations to only trigger for lints starting with the listed tool
+ /// name. This is useful for cases were not all linting code from rustc
+ /// was called. With the default `None` all registered lints will also
+ /// be checked for expectation fulfillment.
+ query check_expectations(key: Option<Symbol>) -> () {
+ eval_always
+ desc { "checking lint expectations (RFC 2383)" }
+ }
+
+ /// Maps from the `DefId` of an item (trait/struct/enum/fn) to its
+ /// associated generics.
+ query generics_of(key: DefId) -> ty::Generics {
+ desc { |tcx| "computing generics of `{}`", tcx.def_path_str(key) }
+ storage(ArenaCacheSelector<'tcx>)
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Maps from the `DefId` of an item (trait/struct/enum/fn) to the
+ /// predicates (where-clauses) that must be proven true in order
+ /// to reference it. This is almost always the "predicates query"
+ /// that you want.
+ ///
+ /// `predicates_of` builds on `predicates_defined_on` -- in fact,
+ /// it is almost always the same as that query, except for the
+ /// case of traits. For traits, `predicates_of` contains
+ /// an additional `Self: Trait<...>` predicate that users don't
+ /// actually write. This reflects the fact that to invoke the
+ /// trait (e.g., via `Default::default`) you must supply types
+ /// that actually implement the trait. (However, this extra
+ /// predicate gets in the way of some checks, which are intended
+ /// to operate over only the actual where-clauses written by the
+ /// user.)
+ query predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing predicates of `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ }
+
+ /// Returns the list of bounds that can be used for
+ /// `SelectionCandidate::ProjectionCandidate(_)` and
+ /// `ProjectionTyCandidate::TraitDef`.
+ /// Specifically this is the bounds written on the trait's type
+ /// definition, or those after the `impl` keyword
+ ///
+ /// ```ignore (incomplete)
+ /// type X: Bound + 'lt
+ /// // ^^^^^^^^^^^
+ /// impl Debug + Display
+ /// // ^^^^^^^^^^^^^^^
+ /// ```
+ ///
+ /// `key` is the `DefId` of the associated type or opaque type.
+ ///
+ /// Bounds from the parent (e.g. with nested impl trait) are not included.
+ query explicit_item_bounds(key: DefId) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
+ desc { |tcx| "finding item bounds for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Elaborated version of the predicates from `explicit_item_bounds`.
+ ///
+ /// For example:
+ ///
+ /// ```
+ /// trait MyTrait {
+ /// type MyAType: Eq + ?Sized;
+ /// }
+ /// ```
+ ///
+ /// `explicit_item_bounds` returns `[<Self as MyTrait>::MyAType: Eq]`,
+ /// and `item_bounds` returns
+ /// ```text
+ /// [
+ /// <Self as Trait>::MyAType: Eq,
+ /// <Self as Trait>::MyAType: PartialEq<<Self as Trait>::MyAType>
+ /// ]
+ /// ```
+ ///
+ /// Bounds from the parent (e.g. with nested impl trait) are not included.
+ query item_bounds(key: DefId) -> &'tcx ty::List<ty::Predicate<'tcx>> {
+ desc { |tcx| "elaborating item bounds for `{}`", tcx.def_path_str(key) }
+ }
+
+ query native_libraries(_: CrateNum) -> Vec<NativeLib> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "looking up the native libraries of a linked crate" }
+ separate_provide_extern
+ }
+
+ query lint_levels(_: ()) -> LintLevelMap {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "computing the lint levels for items in this crate" }
+ }
+
+ query parent_module_from_def_id(key: LocalDefId) -> LocalDefId {
+ eval_always
+ desc { |tcx| "parent module of `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ query expn_that_defined(key: DefId) -> rustc_span::ExpnId {
+ desc { |tcx| "expansion that defined `{}`", tcx.def_path_str(key) }
+ separate_provide_extern
+ }
+
+ query is_panic_runtime(_: CrateNum) -> bool {
+ fatal_cycle
+ desc { "checking if the crate is_panic_runtime" }
+ separate_provide_extern
+ }
+
+ /// Fetch the THIR for a given body. If typeck for that body failed, returns an empty `Thir`.
+ query thir_body(key: ty::WithOptConstParam<LocalDefId>)
+ -> Result<(&'tcx Steal<thir::Thir<'tcx>>, thir::ExprId), ErrorGuaranteed>
+ {
+ // Perf tests revealed that hashing THIR is inefficient (see #85729).
+ no_hash
+ desc { |tcx| "building THIR for `{}`", tcx.def_path_str(key.did.to_def_id()) }
+ }
+
+ /// Create a THIR tree for debugging.
+ query thir_tree(key: ty::WithOptConstParam<LocalDefId>) -> String {
+ no_hash
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "constructing THIR tree for `{}`", tcx.def_path_str(key.did.to_def_id()) }
+ }
+
+ /// Set of all the `DefId`s in this crate that have MIR associated with
+ /// them. This includes all the body owners, but also things like struct
+ /// constructors.
+ query mir_keys(_: ()) -> rustc_data_structures::fx::FxIndexSet<LocalDefId> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "getting a list of all mir_keys" }
+ }
+
+ /// Maps DefId's that have an associated `mir::Body` to the result
+ /// of the MIR const-checking pass. This is the set of qualifs in
+ /// the final value of a `const`.
+ query mir_const_qualif(key: DefId) -> mir::ConstQualifs {
+ desc { |tcx| "const checking `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+ query mir_const_qualif_const_arg(
+ key: (LocalDefId, DefId)
+ ) -> mir::ConstQualifs {
+ desc {
+ |tcx| "const checking the const argument `{}`",
+ tcx.def_path_str(key.0.to_def_id())
+ }
+ }
+
+ /// Fetch the MIR for a given `DefId` right after it's built - this includes
+ /// unreachable code.
+ query mir_built(key: ty::WithOptConstParam<LocalDefId>) -> &'tcx Steal<mir::Body<'tcx>> {
+ desc { |tcx| "building MIR for `{}`", tcx.def_path_str(key.did.to_def_id()) }
+ }
+
+ /// Fetch the MIR for a given `DefId` up till the point where it is
+ /// ready for const qualification.
+ ///
+ /// See the README for the `mir` module for details.
+ query mir_const(key: ty::WithOptConstParam<LocalDefId>) -> &'tcx Steal<mir::Body<'tcx>> {
+ desc {
+ |tcx| "processing MIR for {}`{}`",
+ if key.const_param_did.is_some() { "the const argument " } else { "" },
+ tcx.def_path_str(key.did.to_def_id()),
+ }
+ no_hash
+ }
+
+ /// Try to build an abstract representation of the given constant.
+ query thir_abstract_const(
+ key: DefId
+ ) -> Result<Option<&'tcx [ty::abstract_const::Node<'tcx>]>, ErrorGuaranteed> {
+ desc {
+ |tcx| "building an abstract representation for {}", tcx.def_path_str(key),
+ }
+ separate_provide_extern
+ }
+ /// Try to build an abstract representation of the given constant.
+ query thir_abstract_const_of_const_arg(
+ key: (LocalDefId, DefId)
+ ) -> Result<Option<&'tcx [ty::abstract_const::Node<'tcx>]>, ErrorGuaranteed> {
+ desc {
+ |tcx|
+ "building an abstract representation for the const argument {}",
+ tcx.def_path_str(key.0.to_def_id()),
+ }
+ }
+
+ query try_unify_abstract_consts(key:
+ ty::ParamEnvAnd<'tcx, (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>
+ )>) -> bool {
+ desc {
+ |tcx| "trying to unify the generic constants {} and {}",
+ tcx.def_path_str(key.value.0.def.did), tcx.def_path_str(key.value.1.def.did)
+ }
+ }
+
+ query mir_drops_elaborated_and_const_checked(
+ key: ty::WithOptConstParam<LocalDefId>
+ ) -> &'tcx Steal<mir::Body<'tcx>> {
+ no_hash
+ desc { |tcx| "elaborating drops for `{}`", tcx.def_path_str(key.did.to_def_id()) }
+ }
+
+ query mir_for_ctfe(
+ key: DefId
+ ) -> &'tcx mir::Body<'tcx> {
+ desc { |tcx| "caching mir of `{}` for CTFE", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ query mir_for_ctfe_of_const_arg(key: (LocalDefId, DefId)) -> &'tcx mir::Body<'tcx> {
+ desc {
+ |tcx| "MIR for CTFE of the const argument `{}`",
+ tcx.def_path_str(key.0.to_def_id())
+ }
+ }
+
+ query mir_promoted(key: ty::WithOptConstParam<LocalDefId>) ->
+ (
+ &'tcx Steal<mir::Body<'tcx>>,
+ &'tcx Steal<IndexVec<mir::Promoted, mir::Body<'tcx>>>
+ ) {
+ no_hash
+ desc {
+ |tcx| "processing {}`{}`",
+ if key.const_param_did.is_some() { "the const argument " } else { "" },
+ tcx.def_path_str(key.did.to_def_id()),
+ }
+ }
+
+ query symbols_for_closure_captures(
+ key: (LocalDefId, LocalDefId)
+ ) -> Vec<rustc_span::Symbol> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc {
+ |tcx| "symbols for captures of closure `{}` in `{}`",
+ tcx.def_path_str(key.1.to_def_id()),
+ tcx.def_path_str(key.0.to_def_id())
+ }
+ }
+
+ /// MIR after our optimization passes have run. This is MIR that is ready
+ /// for codegen. This is also the only query that can fetch non-local MIR, at present.
+ query optimized_mir(key: DefId) -> &'tcx mir::Body<'tcx> {
+ desc { |tcx| "optimizing MIR for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Returns coverage summary info for a function, after executing the `InstrumentCoverage`
+ /// MIR pass (assuming the -Cinstrument-coverage option is enabled).
+ query coverageinfo(key: ty::InstanceDef<'tcx>) -> mir::CoverageInfo {
+ desc { |tcx| "retrieving coverage info from MIR for `{}`", tcx.def_path_str(key.def_id()) }
+ storage(ArenaCacheSelector<'tcx>)
+ }
+
+ /// Returns the `CodeRegions` for a function that has instrumented coverage, in case the
+ /// function was optimized out before codegen, and before being added to the Coverage Map.
+ query covered_code_regions(key: DefId) -> Vec<&'tcx mir::coverage::CodeRegion> {
+ desc {
+ |tcx| "retrieving the covered `CodeRegion`s, if instrumented, for `{}`",
+ tcx.def_path_str(key)
+ }
+ storage(ArenaCacheSelector<'tcx>)
+ cache_on_disk_if { key.is_local() }
+ }
+
+ /// The `DefId` is the `DefId` of the containing MIR body. Promoteds do not have their own
+ /// `DefId`. This function returns all promoteds in the specified body. The body references
+ /// promoteds by the `DefId` and the `mir::Promoted` index. This is necessary, because
+ /// after inlining a body may refer to promoteds from other bodies. In that case you still
+ /// need to use the `DefId` of the original body.
+ query promoted_mir(key: DefId) -> &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>> {
+ desc { |tcx| "optimizing promoted MIR for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+ query promoted_mir_of_const_arg(
+ key: (LocalDefId, DefId)
+ ) -> &'tcx IndexVec<mir::Promoted, mir::Body<'tcx>> {
+ desc {
+ |tcx| "optimizing promoted MIR for the const argument `{}`",
+ tcx.def_path_str(key.0.to_def_id()),
+ }
+ }
+
+ /// Erases regions from `ty` to yield a new type.
+ /// Normally you would just use `tcx.erase_regions(value)`,
+ /// however, which uses this query as a kind of cache.
+ query erase_regions_ty(ty: Ty<'tcx>) -> Ty<'tcx> {
+ // This query is not expected to have input -- as a result, it
+ // is not a good candidates for "replay" because it is essentially a
+ // pure function of its input (and hence the expectation is that
+ // no caller would be green **apart** from just these
+ // queries). Making it anonymous avoids hashing the result, which
+ // may save a bit of time.
+ anon
+ desc { "erasing regions from `{:?}`", ty }
+ }
+
+ query wasm_import_module_map(_: CrateNum) -> FxHashMap<DefId, String> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "wasm import module map" }
+ }
+
+ /// Maps from the `DefId` of an item (trait/struct/enum/fn) to the
+ /// predicates (where-clauses) directly defined on it. This is
+ /// equal to the `explicit_predicates_of` predicates plus the
+ /// `inferred_outlives_of` predicates.
+ query predicates_defined_on(key: DefId) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing predicates of `{}`", tcx.def_path_str(key) }
+ }
+
+ /// Returns everything that looks like a predicate written explicitly
+ /// by the user on a trait item.
+ ///
+ /// Traits are unusual, because predicates on associated types are
+ /// converted into bounds on that type for backwards compatibility:
+ ///
+ /// trait X where Self::U: Copy { type U; }
+ ///
+ /// becomes
+ ///
+ /// trait X { type U: Copy; }
+ ///
+ /// `explicit_predicates_of` and `explicit_item_bounds` will then take
+ /// the appropriate subsets of the predicates here.
+ query trait_explicit_predicates_and_bounds(key: LocalDefId) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing explicit predicates of trait `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Returns the predicates written explicitly by the user.
+ query explicit_predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing explicit predicates of `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Returns the inferred outlives predicates (e.g., for `struct
+ /// Foo<'a, T> { x: &'a T }`, this would return `T: 'a`).
+ query inferred_outlives_of(key: DefId) -> &'tcx [(ty::Predicate<'tcx>, Span)] {
+ desc { |tcx| "computing inferred outlives predicates of `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Maps from the `DefId` of a trait to the list of
+ /// super-predicates. This is a subset of the full list of
+ /// predicates. We store these in a separate map because we must
+ /// evaluate them even during type conversion, often before the
+ /// full predicates are available (note that supertraits have
+ /// additional acyclicity requirements).
+ query super_predicates_of(key: DefId) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing the super predicates of `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// The `Option<Ident>` is the name of an associated type. If it is `None`, then this query
+ /// returns the full set of predicates. If `Some<Ident>`, then the query returns only the
+ /// subset of super-predicates that reference traits that define the given associated type.
+ /// This is used to avoid cycles in resolving types like `T::Item`.
+ query super_predicates_that_define_assoc_type(key: (DefId, Option<rustc_span::symbol::Ident>)) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing the super traits of `{}`{}",
+ tcx.def_path_str(key.0),
+ if let Some(assoc_name) = key.1 { format!(" with associated type name `{}`", assoc_name) } else { "".to_string() },
+ }
+ }
+
+ /// To avoid cycles within the predicates of a single item we compute
+ /// per-type-parameter predicates for resolving `T::AssocTy`.
+ query type_param_predicates(key: (DefId, LocalDefId, rustc_span::symbol::Ident)) -> ty::GenericPredicates<'tcx> {
+ desc { |tcx| "computing the bounds for type parameter `{}`", tcx.hir().ty_param_name(key.1) }
+ }
+
+ query trait_def(key: DefId) -> ty::TraitDef {
+ desc { |tcx| "computing trait definition for `{}`", tcx.def_path_str(key) }
+ storage(ArenaCacheSelector<'tcx>)
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+ query adt_def(key: DefId) -> ty::AdtDef<'tcx> {
+ desc { |tcx| "computing ADT definition for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+ query adt_destructor(key: DefId) -> Option<ty::Destructor> {
+ desc { |tcx| "computing `Drop` impl for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ // The cycle error here should be reported as an error by `check_representable`.
+ // We consider the type as Sized in the meanwhile to avoid
+ // further errors (done in impl Value for AdtSizedConstraint).
+ // Use `cycle_delay_bug` to delay the cycle error here to be emitted later
+ // in case we accidentally otherwise don't emit an error.
+ query adt_sized_constraint(
+ key: DefId
+ ) -> AdtSizedConstraint<'tcx> {
+ desc { |tcx| "computing `Sized` constraints for `{}`", tcx.def_path_str(key) }
+ cycle_delay_bug
+ }
+
+ query adt_dtorck_constraint(
+ key: DefId
+ ) -> Result<&'tcx DropckConstraint<'tcx>, NoSolution> {
+ desc { |tcx| "computing drop-check constraints for `{}`", tcx.def_path_str(key) }
+ }
+
+ /// Returns `true` if this is a const fn, use the `is_const_fn` to know whether your crate
+ /// actually sees it as const fn (e.g., the const-fn-ness might be unstable and you might
+ /// not have the feature gate active).
+ ///
+ /// **Do not call this function manually.** It is only meant to cache the base data for the
+ /// `is_const_fn` function. Consider using `is_const_fn` or `is_const_fn_raw` instead.
+ query constness(key: DefId) -> hir::Constness {
+ desc { |tcx| "checking if item is const: `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ query asyncness(key: DefId) -> hir::IsAsync {
+ desc { |tcx| "checking if the function is async: `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Returns `true` if calls to the function may be promoted.
+ ///
+ /// This is either because the function is e.g., a tuple-struct or tuple-variant
+ /// constructor, or because it has the `#[rustc_promotable]` attribute. The attribute should
+ /// be removed in the future in favour of some form of check which figures out whether the
+ /// function does not inspect the bits of any of its arguments (so is essentially just a
+ /// constructor function).
+ query is_promotable_const_fn(key: DefId) -> bool {
+ desc { |tcx| "checking if item is promotable: `{}`", tcx.def_path_str(key) }
+ }
+
+ /// Returns `true` if this is a foreign item (i.e., linked via `extern { ... }`).
+ query is_foreign_item(key: DefId) -> bool {
+ desc { |tcx| "checking if `{}` is a foreign item", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Returns `Some(generator_kind)` if the node pointed to by `def_id` is a generator.
+ query generator_kind(def_id: DefId) -> Option<hir::GeneratorKind> {
+ desc { |tcx| "looking up generator kind of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ /// Gets a map with the variance of every item; use `item_variance` instead.
+ query crate_variances(_: ()) -> ty::CrateVariancesMap<'tcx> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "computing the variances for items in this crate" }
+ }
+
+ /// Maps from the `DefId` of a type or region parameter to its (inferred) variance.
+ query variances_of(def_id: DefId) -> &'tcx [ty::Variance] {
+ desc { |tcx| "computing the variances of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ /// Maps from thee `DefId` of a type to its (inferred) outlives.
+ query inferred_outlives_crate(_: ()) -> ty::CratePredicatesMap<'tcx> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "computing the inferred outlives predicates for items in this crate" }
+ }
+
+ /// Maps from an impl/trait `DefId` to a list of the `DefId`s of its items.
+ query associated_item_def_ids(key: DefId) -> &'tcx [DefId] {
+ desc { |tcx| "collecting associated items of `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Maps from a trait item to the trait item "descriptor".
+ query associated_item(key: DefId) -> ty::AssocItem {
+ desc { |tcx| "computing associated item data for `{}`", tcx.def_path_str(key) }
+ storage(ArenaCacheSelector<'tcx>)
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Collects the associated items defined on a trait or impl.
+ query associated_items(key: DefId) -> ty::AssocItems<'tcx> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "collecting associated items of {}", tcx.def_path_str(key) }
+ }
+
+ /// Maps from associated items on a trait to the corresponding associated
+ /// item on the impl specified by `impl_id`.
+ ///
+ /// For example, with the following code
+ ///
+ /// ```
+ /// struct Type {}
+ /// // DefId
+ /// trait Trait { // trait_id
+ /// fn f(); // trait_f
+ /// fn g() {} // trait_g
+ /// }
+ ///
+ /// impl Trait for Type { // impl_id
+ /// fn f() {} // impl_f
+ /// fn g() {} // impl_g
+ /// }
+ /// ```
+ ///
+ /// The map returned for `tcx.impl_item_implementor_ids(impl_id)` would be
+ ///`{ trait_f: impl_f, trait_g: impl_g }`
+ query impl_item_implementor_ids(impl_id: DefId) -> FxHashMap<DefId, DefId> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "comparing impl items against trait for {}", tcx.def_path_str(impl_id) }
+ }
+
+ /// Given an `impl_id`, return the trait it implements.
+ /// Return `None` if this is an inherent impl.
+ query impl_trait_ref(impl_id: DefId) -> Option<ty::TraitRef<'tcx>> {
+ desc { |tcx| "computing trait implemented by `{}`", tcx.def_path_str(impl_id) }
+ cache_on_disk_if { impl_id.is_local() }
+ separate_provide_extern
+ }
+ query impl_polarity(impl_id: DefId) -> ty::ImplPolarity {
+ desc { |tcx| "computing implementation polarity of `{}`", tcx.def_path_str(impl_id) }
+ cache_on_disk_if { impl_id.is_local() }
+ separate_provide_extern
+ }
+
+ query issue33140_self_ty(key: DefId) -> Option<ty::Ty<'tcx>> {
+ desc { |tcx| "computing Self type wrt issue #33140 `{}`", tcx.def_path_str(key) }
+ }
+
+ /// Maps a `DefId` of a type to a list of its inherent impls.
+ /// Contains implementations of methods that are inherent to a type.
+ /// Methods in these implementations don't need to be exported.
+ query inherent_impls(key: DefId) -> &'tcx [DefId] {
+ desc { |tcx| "collecting inherent impls for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ query incoherent_impls(key: SimplifiedType) -> &'tcx [DefId] {
+ desc { |tcx| "collecting all inherent impls for `{:?}`", key }
+ }
+
+ /// The result of unsafety-checking this `LocalDefId`.
+ query unsafety_check_result(key: LocalDefId) -> &'tcx mir::UnsafetyCheckResult {
+ desc { |tcx| "unsafety-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ }
+ query unsafety_check_result_for_const_arg(key: (LocalDefId, DefId)) -> &'tcx mir::UnsafetyCheckResult {
+ desc {
+ |tcx| "unsafety-checking the const argument `{}`",
+ tcx.def_path_str(key.0.to_def_id())
+ }
+ }
+
+ /// Unsafety-check this `LocalDefId` with THIR unsafeck. This should be
+ /// used with `-Zthir-unsafeck`.
+ query thir_check_unsafety(key: LocalDefId) {
+ desc { |tcx| "unsafety-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ }
+ query thir_check_unsafety_for_const_arg(key: (LocalDefId, DefId)) {
+ desc {
+ |tcx| "unsafety-checking the const argument `{}`",
+ tcx.def_path_str(key.0.to_def_id())
+ }
+ }
+
+ /// HACK: when evaluated, this reports an "unsafe derive on repr(packed)" error.
+ ///
+ /// Unsafety checking is executed for each method separately, but we only want
+ /// to emit this error once per derive. As there are some impls with multiple
+ /// methods, we use a query for deduplication.
+ query unsafe_derive_on_repr_packed(key: LocalDefId) -> () {
+ desc { |tcx| "processing `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ /// Computes the signature of the function.
+ query fn_sig(key: DefId) -> ty::PolyFnSig<'tcx> {
+ desc { |tcx| "computing function signature of `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ /// Performs lint checking for the module.
+ query lint_mod(key: LocalDefId) -> () {
+ desc { |tcx| "linting {}", describe_as_module(key, tcx) }
+ }
+
+ /// Checks the attributes in the module.
+ query check_mod_attrs(key: LocalDefId) -> () {
+ desc { |tcx| "checking attributes in {}", describe_as_module(key, tcx) }
+ }
+
+ /// Checks for uses of unstable APIs in the module.
+ query check_mod_unstable_api_usage(key: LocalDefId) -> () {
+ desc { |tcx| "checking for unstable API usage in {}", describe_as_module(key, tcx) }
+ }
+
+ /// Checks the const bodies in the module for illegal operations (e.g. `if` or `loop`).
+ query check_mod_const_bodies(key: LocalDefId) -> () {
+ desc { |tcx| "checking consts in {}", describe_as_module(key, tcx) }
+ }
+
+ /// Checks the loops in the module.
+ query check_mod_loops(key: LocalDefId) -> () {
+ desc { |tcx| "checking loops in {}", describe_as_module(key, tcx) }
+ }
+
+ query check_mod_naked_functions(key: LocalDefId) -> () {
+ desc { |tcx| "checking naked functions in {}", describe_as_module(key, tcx) }
+ }
+
+ query check_mod_item_types(key: LocalDefId) -> () {
+ desc { |tcx| "checking item types in {}", describe_as_module(key, tcx) }
+ }
+
+ query check_mod_privacy(key: LocalDefId) -> () {
+ desc { |tcx| "checking privacy in {}", describe_as_module(key, tcx) }
+ }
+
+ query check_mod_liveness(key: LocalDefId) -> () {
+ desc { |tcx| "checking liveness of variables in {}", describe_as_module(key, tcx) }
+ }
+
+ /// Return the live symbols in the crate for dead code check.
+ ///
+ /// The second return value maps from ADTs to ignored derived traits (e.g. Debug and Clone) and
+ /// their respective impl (i.e., part of the derive macro)
+ query live_symbols_and_ignored_derived_traits(_: ()) -> (
+ FxHashSet<LocalDefId>,
+ FxHashMap<LocalDefId, Vec<(DefId, DefId)>>
+ ) {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "find live symbols in crate" }
+ }
+
+ query check_mod_deathness(key: LocalDefId) -> () {
+ desc { |tcx| "checking deathness of variables in {}", describe_as_module(key, tcx) }
+ }
+
+ query check_mod_impl_wf(key: LocalDefId) -> () {
+ desc { |tcx| "checking that impls are well-formed in {}", describe_as_module(key, tcx) }
+ }
+
+ query check_mod_type_wf(key: LocalDefId) -> () {
+ desc { |tcx| "checking that types are well-formed in {}", describe_as_module(key, tcx) }
+ }
+
+ query collect_mod_item_types(key: LocalDefId) -> () {
+ desc { |tcx| "collecting item types in {}", describe_as_module(key, tcx) }
+ }
+
+ /// Caches `CoerceUnsized` kinds for impls on custom types.
+ query coerce_unsized_info(key: DefId) -> ty::adjustment::CoerceUnsizedInfo {
+ desc { |tcx| "computing CoerceUnsized info for `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ query typeck_item_bodies(_: ()) -> () {
+ desc { "type-checking all item bodies" }
+ }
+
+ query typeck(key: LocalDefId) -> &'tcx ty::TypeckResults<'tcx> {
+ desc { |tcx| "type-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ }
+ query typeck_const_arg(
+ key: (LocalDefId, DefId)
+ ) -> &'tcx ty::TypeckResults<'tcx> {
+ desc {
+ |tcx| "type-checking the const argument `{}`",
+ tcx.def_path_str(key.0.to_def_id()),
+ }
+ }
+ query diagnostic_only_typeck(key: LocalDefId) -> &'tcx ty::TypeckResults<'tcx> {
+ desc { |tcx| "type-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ load_cached(tcx, id) {
+ let typeck_results: Option<ty::TypeckResults<'tcx>> = tcx
+ .on_disk_cache().as_ref()
+ .and_then(|c| c.try_load_query_result(*tcx, id));
+
+ typeck_results.map(|x| &*tcx.arena.alloc(x))
+ }
+ }
+
+ query used_trait_imports(key: LocalDefId) -> &'tcx FxHashSet<LocalDefId> {
+ desc { |tcx| "used_trait_imports `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ }
+
+ query has_typeck_results(def_id: DefId) -> bool {
+ desc { |tcx| "checking whether `{}` has a body", tcx.def_path_str(def_id) }
+ }
+
+ query coherent_trait(def_id: DefId) -> () {
+ desc { |tcx| "coherence checking all impls of trait `{}`", tcx.def_path_str(def_id) }
+ }
+
+ /// Borrow-checks the function body. If this is a closure, returns
+ /// additional requirements that the closure's creator must verify.
+ query mir_borrowck(key: LocalDefId) -> &'tcx mir::BorrowCheckResult<'tcx> {
+ desc { |tcx| "borrow-checking `{}`", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if(tcx) { tcx.is_typeck_child(key.to_def_id()) }
+ }
+ query mir_borrowck_const_arg(key: (LocalDefId, DefId)) -> &'tcx mir::BorrowCheckResult<'tcx> {
+ desc {
+ |tcx| "borrow-checking the const argument`{}`",
+ tcx.def_path_str(key.0.to_def_id())
+ }
+ }
+
+ /// Gets a complete map from all types to their inherent impls.
+ /// Not meant to be used directly outside of coherence.
+ query crate_inherent_impls(k: ()) -> CrateInherentImpls {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "all inherent impls defined in crate" }
+ }
+
+ /// Checks all types in the crate for overlap in their inherent impls. Reports errors.
+ /// Not meant to be used directly outside of coherence.
+ query crate_inherent_impls_overlap_check(_: ()) -> () {
+ desc { "check for overlap between inherent impls defined in this crate" }
+ }
+
+ /// Checks whether all impls in the crate pass the overlap check, returning
+ /// which impls fail it. If all impls are correct, the returned slice is empty.
+ query orphan_check_impl(key: LocalDefId) -> Result<(), ErrorGuaranteed> {
+ desc { |tcx|
+ "checking whether impl `{}` follows the orphan rules",
+ tcx.def_path_str(key.to_def_id()),
+ }
+ }
+
+ /// Check whether the function has any recursion that could cause the inliner to trigger
+ /// a cycle. Returns the call stack causing the cycle. The call stack does not contain the
+ /// current function, just all intermediate functions.
+ query mir_callgraph_reachable(key: (ty::Instance<'tcx>, LocalDefId)) -> bool {
+ fatal_cycle
+ desc { |tcx|
+ "computing if `{}` (transitively) calls `{}`",
+ key.0,
+ tcx.def_path_str(key.1.to_def_id()),
+ }
+ }
+
+ /// Obtain all the calls into other local functions
+ query mir_inliner_callees(key: ty::InstanceDef<'tcx>) -> &'tcx [(DefId, SubstsRef<'tcx>)] {
+ fatal_cycle
+ desc { |tcx|
+ "computing all local function calls in `{}`",
+ tcx.def_path_str(key.def_id()),
+ }
+ }
+
+ /// Evaluates a constant and returns the computed allocation.
+ ///
+ /// **Do not use this** directly, use the `tcx.eval_static_initializer` wrapper.
+ query eval_to_allocation_raw(key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>)
+ -> EvalToAllocationRawResult<'tcx> {
+ desc { |tcx|
+ "const-evaluating + checking `{}`",
+ key.value.display(tcx)
+ }
+ cache_on_disk_if { true }
+ }
+
+ /// Evaluates const items or anonymous constants
+ /// (such as enum variant explicit discriminants or array lengths)
+ /// into a representation suitable for the type system and const generics.
+ ///
+ /// **Do not use this** directly, use one of the following wrappers: `tcx.const_eval_poly`,
+ /// `tcx.const_eval_resolve`, `tcx.const_eval_instance`, or `tcx.const_eval_global_id`.
+ query eval_to_const_value_raw(key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>)
+ -> EvalToConstValueResult<'tcx> {
+ desc { |tcx|
+ "simplifying constant for the type system `{}`",
+ key.value.display(tcx)
+ }
+ cache_on_disk_if { true }
+ }
+
+ /// Evaluate a constant and convert it to a type level constant or
+ /// return `None` if that is not possible.
+ query eval_to_valtree(
+ key: ty::ParamEnvAnd<'tcx, GlobalId<'tcx>>
+ ) -> EvalToValTreeResult<'tcx> {
+ desc { "evaluating type-level constant" }
+ }
+
+ /// Converts a type level constant value into `ConstValue`
+ query valtree_to_const_val(key: (Ty<'tcx>, ty::ValTree<'tcx>)) -> ConstValue<'tcx> {
+ desc { "converting type-level constant value to mir constant value"}
+ }
+
+ /// Destructures array, ADT or tuple constants into the constants
+ /// of their fields.
+ query destructure_const(key: ty::Const<'tcx>) -> ty::DestructuredConst<'tcx> {
+ desc { "destructuring type level constant"}
+ }
+
+ /// Tries to destructure an `mir::ConstantKind` ADT or array into its variant index
+ /// and its field values.
+ query try_destructure_mir_constant(key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>) -> Option<mir::DestructuredMirConstant<'tcx>> {
+ desc { "destructuring mir constant"}
+ remap_env_constness
+ }
+
+ /// Dereference a constant reference or raw pointer and turn the result into a constant
+ /// again.
+ query deref_mir_constant(
+ key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>
+ ) -> mir::ConstantKind<'tcx> {
+ desc { "dereferencing mir constant" }
+ remap_env_constness
+ }
+
+ query const_caller_location(key: (rustc_span::Symbol, u32, u32)) -> ConstValue<'tcx> {
+ desc { "get a &core::panic::Location referring to a span" }
+ }
+
+ // FIXME get rid of this with valtrees
+ query lit_to_const(
+ key: LitToConstInput<'tcx>
+ ) -> Result<ty::Const<'tcx>, LitToConstError> {
+ desc { "converting literal to const" }
+ }
+
+ query lit_to_mir_constant(key: LitToConstInput<'tcx>) -> Result<mir::ConstantKind<'tcx>, LitToConstError> {
+ desc { "converting literal to mir constant" }
+ }
+
+ query check_match(key: DefId) {
+ desc { |tcx| "match-checking `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ }
+
+ /// Performs part of the privacy check and computes "access levels".
+ query privacy_access_levels(_: ()) -> &'tcx AccessLevels {
+ eval_always
+ desc { "privacy access levels" }
+ }
+ query check_private_in_public(_: ()) -> () {
+ eval_always
+ desc { "checking for private elements in public interfaces" }
+ }
+
+ query reachable_set(_: ()) -> FxHashSet<LocalDefId> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "reachability" }
+ }
+
+ /// Per-body `region::ScopeTree`. The `DefId` should be the owner `DefId` for the body;
+ /// in the case of closures, this will be redirected to the enclosing function.
+ query region_scope_tree(def_id: DefId) -> &'tcx crate::middle::region::ScopeTree {
+ desc { |tcx| "computing drop scopes for `{}`", tcx.def_path_str(def_id) }
+ }
+
+ /// Generates a MIR body for the shim.
+ query mir_shims(key: ty::InstanceDef<'tcx>) -> mir::Body<'tcx> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "generating MIR shim for `{}`", tcx.def_path_str(key.def_id()) }
+ }
+
+ /// The `symbol_name` query provides the symbol name for calling a
+ /// given instance from the local crate. In particular, it will also
+ /// look up the correct symbol name of instances from upstream crates.
+ query symbol_name(key: ty::Instance<'tcx>) -> ty::SymbolName<'tcx> {
+ desc { "computing the symbol for `{}`", key }
+ cache_on_disk_if { true }
+ }
+
+ query opt_def_kind(def_id: DefId) -> Option<DefKind> {
+ desc { |tcx| "looking up definition kind of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ /// Gets the span for the definition.
+ query def_span(def_id: DefId) -> Span {
+ desc { |tcx| "looking up span for `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ /// Gets the span for the identifier of the definition.
+ query def_ident_span(def_id: DefId) -> Option<Span> {
+ desc { |tcx| "looking up span for `{}`'s identifier", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ query lookup_stability(def_id: DefId) -> Option<attr::Stability> {
+ desc { |tcx| "looking up stability of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ query lookup_const_stability(def_id: DefId) -> Option<attr::ConstStability> {
+ desc { |tcx| "looking up const stability of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ query should_inherit_track_caller(def_id: DefId) -> bool {
+ desc { |tcx| "computing should_inherit_track_caller of `{}`", tcx.def_path_str(def_id) }
+ }
+
+ query lookup_deprecation_entry(def_id: DefId) -> Option<DeprecationEntry> {
+ desc { |tcx| "checking whether `{}` is deprecated", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ /// Determines whether an item is annotated with `doc(hidden)`.
+ query is_doc_hidden(def_id: DefId) -> bool {
+ desc { |tcx| "checking whether `{}` is `doc(hidden)`", tcx.def_path_str(def_id) }
+ }
+
+ /// Returns the attributes on the item at `def_id`.
+ ///
+ /// Do not use this directly, use `tcx.get_attrs` instead.
+ query item_attrs(def_id: DefId) -> &'tcx [ast::Attribute] {
+ desc { |tcx| "collecting attributes of `{}`", tcx.def_path_str(def_id) }
+ separate_provide_extern
+ }
+
+ query codegen_fn_attrs(def_id: DefId) -> CodegenFnAttrs {
+ desc { |tcx| "computing codegen attributes of `{}`", tcx.def_path_str(def_id) }
+ storage(ArenaCacheSelector<'tcx>)
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ query asm_target_features(def_id: DefId) -> &'tcx FxHashSet<Symbol> {
+ desc { |tcx| "computing target features for inline asm of `{}`", tcx.def_path_str(def_id) }
+ }
+
+ query fn_arg_names(def_id: DefId) -> &'tcx [rustc_span::symbol::Ident] {
+ desc { |tcx| "looking up function parameter names for `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+ /// Gets the rendered value of the specified constant or associated constant.
+ /// Used by rustdoc.
+ query rendered_const(def_id: DefId) -> String {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "rendering constant intializer of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+ query impl_parent(def_id: DefId) -> Option<DefId> {
+ desc { |tcx| "computing specialization parent impl of `{}`", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ query is_ctfe_mir_available(key: DefId) -> bool {
+ desc { |tcx| "checking if item has ctfe mir available: `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+ query is_mir_available(key: DefId) -> bool {
+ desc { |tcx| "checking if item has mir available: `{}`", tcx.def_path_str(key) }
+ cache_on_disk_if { key.is_local() }
+ separate_provide_extern
+ }
+
+ query own_existential_vtable_entries(
+ key: ty::PolyExistentialTraitRef<'tcx>
+ ) -> &'tcx [DefId] {
+ desc { |tcx| "finding all existential vtable entries for trait {}", tcx.def_path_str(key.def_id()) }
+ }
+
+ query vtable_entries(key: ty::PolyTraitRef<'tcx>)
+ -> &'tcx [ty::VtblEntry<'tcx>] {
+ desc { |tcx| "finding all vtable entries for trait {}", tcx.def_path_str(key.def_id()) }
+ }
+
+ query vtable_trait_upcasting_coercion_new_vptr_slot(key: (ty::Ty<'tcx>, ty::Ty<'tcx>)) -> Option<usize> {
+ desc { |tcx| "finding the slot within vtable for trait object {} vtable ptr during trait upcasting coercion from {} vtable",
+ key.1, key.0 }
+ }
+
+ query vtable_allocation(key: (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>)) -> mir::interpret::AllocId {
+ desc { |tcx| "vtable const allocation for <{} as {}>",
+ key.0,
+ key.1.map(|trait_ref| format!("{}", trait_ref)).unwrap_or("_".to_owned())
+ }
+ }
+
+ query codegen_fulfill_obligation(
+ key: (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>)
+ ) -> Result<&'tcx ImplSource<'tcx, ()>, traits::CodegenObligationError> {
+ cache_on_disk_if { true }
+ desc { |tcx|
+ "checking if `{}` fulfills its obligations",
+ tcx.def_path_str(key.1.def_id())
+ }
+ }
+
+ /// Return all `impl` blocks in the current crate.
+ query all_local_trait_impls(_: ()) -> &'tcx rustc_data_structures::fx::FxIndexMap<DefId, Vec<LocalDefId>> {
+ desc { "local trait impls" }
+ }
+
+ /// Given a trait `trait_id`, return all known `impl` blocks.
+ query trait_impls_of(trait_id: DefId) -> ty::trait_def::TraitImpls {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "trait impls of `{}`", tcx.def_path_str(trait_id) }
+ }
+
+ query specialization_graph_of(trait_id: DefId) -> specialization_graph::Graph {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "building specialization graph of trait `{}`", tcx.def_path_str(trait_id) }
+ cache_on_disk_if { true }
+ }
+ query object_safety_violations(trait_id: DefId) -> &'tcx [traits::ObjectSafetyViolation] {
+ desc { |tcx| "determine object safety of trait `{}`", tcx.def_path_str(trait_id) }
+ }
+
+ /// Gets the ParameterEnvironment for a given item; this environment
+ /// will be in "user-facing" mode, meaning that it is suitable for
+ /// type-checking etc, and it does not normalize specializable
+ /// associated types. This is almost always what you want,
+ /// unless you are doing MIR optimizations, in which case you
+ /// might want to use `reveal_all()` method to change modes.
+ query param_env(def_id: DefId) -> ty::ParamEnv<'tcx> {
+ desc { |tcx| "computing normalized predicates of `{}`", tcx.def_path_str(def_id) }
+ }
+
+ /// Like `param_env`, but returns the `ParamEnv` in `Reveal::All` mode.
+ /// Prefer this over `tcx.param_env(def_id).with_reveal_all_normalized(tcx)`,
+ /// as this method is more efficient.
+ query param_env_reveal_all_normalized(def_id: DefId) -> ty::ParamEnv<'tcx> {
+ desc { |tcx| "computing revealed normalized predicates of `{}`", tcx.def_path_str(def_id) }
+ }
+
+ /// Trait selection queries. These are best used by invoking `ty.is_copy_modulo_regions()`,
+ /// `ty.is_copy()`, etc, since that will prune the environment where possible.
+ query is_copy_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "computing whether `{}` is `Copy`", env.value }
+ remap_env_constness
+ }
+ /// Query backing `Ty::is_sized`.
+ query is_sized_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "computing whether `{}` is `Sized`", env.value }
+ remap_env_constness
+ }
+ /// Query backing `Ty::is_freeze`.
+ query is_freeze_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "computing whether `{}` is freeze", env.value }
+ remap_env_constness
+ }
+ /// Query backing `Ty::is_unpin`.
+ query is_unpin_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "computing whether `{}` is `Unpin`", env.value }
+ remap_env_constness
+ }
+ /// Query backing `Ty::needs_drop`.
+ query needs_drop_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "computing whether `{}` needs drop", env.value }
+ remap_env_constness
+ }
+ /// Query backing `Ty::has_significant_drop_raw`.
+ query has_significant_drop_raw(env: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "computing whether `{}` has a significant drop", env.value }
+ remap_env_constness
+ }
+
+ /// Query backing `Ty::is_structural_eq_shallow`.
+ ///
+ /// This is only correct for ADTs. Call `is_structural_eq_shallow` to handle all types
+ /// correctly.
+ query has_structural_eq_impls(ty: Ty<'tcx>) -> bool {
+ desc {
+ "computing whether `{:?}` implements `PartialStructuralEq` and `StructuralEq`",
+ ty
+ }
+ }
+
+ /// A list of types where the ADT requires drop if and only if any of
+ /// those types require drop. If the ADT is known to always need drop
+ /// then `Err(AlwaysRequiresDrop)` is returned.
+ query adt_drop_tys(def_id: DefId) -> Result<&'tcx ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
+ desc { |tcx| "computing when `{}` needs drop", tcx.def_path_str(def_id) }
+ cache_on_disk_if { true }
+ }
+
+ /// A list of types where the ADT requires drop if and only if any of those types
+ /// has significant drop. A type marked with the attribute `rustc_insignificant_dtor`
+ /// is considered to not be significant. A drop is significant if it is implemented
+ /// by the user or does anything that will have any observable behavior (other than
+ /// freeing up memory). If the ADT is known to have a significant destructor then
+ /// `Err(AlwaysRequiresDrop)` is returned.
+ query adt_significant_drop_tys(def_id: DefId) -> Result<&'tcx ty::List<Ty<'tcx>>, AlwaysRequiresDrop> {
+ desc { |tcx| "computing when `{}` has a significant destructor", tcx.def_path_str(def_id) }
+ cache_on_disk_if { false }
+ }
+
+ /// Computes the layout of a type. Note that this implicitly
+ /// executes in "reveal all" mode, and will normalize the input type.
+ query layout_of(
+ key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>
+ ) -> Result<ty::layout::TyAndLayout<'tcx>, ty::layout::LayoutError<'tcx>> {
+ desc { "computing layout of `{}`", key.value }
+ remap_env_constness
+ }
+
+ /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
+ ///
+ /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
+ /// instead, where the instance is an `InstanceDef::Virtual`.
+ query fn_abi_of_fn_ptr(
+ key: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>
+ ) -> Result<&'tcx abi::call::FnAbi<'tcx, Ty<'tcx>>, ty::layout::FnAbiError<'tcx>> {
+ desc { "computing call ABI of `{}` function pointers", key.value.0 }
+ remap_env_constness
+ }
+
+ /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
+ /// direct calls to an `fn`.
+ ///
+ /// NB: that includes virtual calls, which are represented by "direct calls"
+ /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
+ query fn_abi_of_instance(
+ key: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>
+ ) -> Result<&'tcx abi::call::FnAbi<'tcx, Ty<'tcx>>, ty::layout::FnAbiError<'tcx>> {
+ desc { "computing call ABI of `{}`", key.value.0 }
+ remap_env_constness
+ }
+
+ query dylib_dependency_formats(_: CrateNum)
+ -> &'tcx [(CrateNum, LinkagePreference)] {
+ desc { "dylib dependency formats of crate" }
+ separate_provide_extern
+ }
+
+ query dependency_formats(_: ()) -> Lrc<crate::middle::dependency_format::Dependencies> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "get the linkage format of all dependencies" }
+ }
+
+ query is_compiler_builtins(_: CrateNum) -> bool {
+ fatal_cycle
+ desc { "checking if the crate is_compiler_builtins" }
+ separate_provide_extern
+ }
+ query has_global_allocator(_: CrateNum) -> bool {
+ // This query depends on untracked global state in CStore
+ eval_always
+ fatal_cycle
+ desc { "checking if the crate has_global_allocator" }
+ separate_provide_extern
+ }
+ query has_panic_handler(_: CrateNum) -> bool {
+ fatal_cycle
+ desc { "checking if the crate has_panic_handler" }
+ separate_provide_extern
+ }
+ query is_profiler_runtime(_: CrateNum) -> bool {
+ fatal_cycle
+ desc { "query a crate is `#![profiler_runtime]`" }
+ separate_provide_extern
+ }
+ query has_ffi_unwind_calls(key: LocalDefId) -> bool {
+ desc { |tcx| "check if `{}` contains FFI-unwind calls", tcx.def_path_str(key.to_def_id()) }
+ cache_on_disk_if { true }
+ }
+ query required_panic_strategy(_: CrateNum) -> Option<PanicStrategy> {
+ fatal_cycle
+ desc { "query a crate's required panic strategy" }
+ separate_provide_extern
+ }
+ query panic_in_drop_strategy(_: CrateNum) -> PanicStrategy {
+ fatal_cycle
+ desc { "query a crate's configured panic-in-drop strategy" }
+ separate_provide_extern
+ }
+ query is_no_builtins(_: CrateNum) -> bool {
+ fatal_cycle
+ desc { "test whether a crate has `#![no_builtins]`" }
+ separate_provide_extern
+ }
+ query symbol_mangling_version(_: CrateNum) -> SymbolManglingVersion {
+ fatal_cycle
+ desc { "query a crate's symbol mangling version" }
+ separate_provide_extern
+ }
+
+ query extern_crate(def_id: DefId) -> Option<&'tcx ExternCrate> {
+ eval_always
+ desc { "getting crate's ExternCrateData" }
+ separate_provide_extern
+ }
+
+ query specializes(_: (DefId, DefId)) -> bool {
+ desc { "computing whether impls specialize one another" }
+ }
+ query in_scope_traits_map(_: LocalDefId)
+ -> Option<&'tcx FxHashMap<ItemLocalId, Box<[TraitCandidate]>>> {
+ desc { "traits in scope at a block" }
+ }
+
+ query module_reexports(def_id: LocalDefId) -> Option<&'tcx [ModChild]> {
+ desc { |tcx| "looking up reexports of module `{}`", tcx.def_path_str(def_id.to_def_id()) }
+ }
+
+ query impl_defaultness(def_id: DefId) -> hir::Defaultness {
+ desc { |tcx| "looking up whether `{}` is a default impl", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+
+ query check_well_formed(key: LocalDefId) -> () {
+ desc { |tcx| "checking that `{}` is well-formed", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ // The `DefId`s of all non-generic functions and statics in the given crate
+ // that can be reached from outside the crate.
+ //
+ // We expect this items to be available for being linked to.
+ //
+ // This query can also be called for `LOCAL_CRATE`. In this case it will
+ // compute which items will be reachable to other crates, taking into account
+ // the kind of crate that is currently compiled. Crates with only a
+ // C interface have fewer reachable things.
+ //
+ // Does not include external symbols that don't have a corresponding DefId,
+ // like the compiler-generated `main` function and so on.
+ query reachable_non_generics(_: CrateNum)
+ -> DefIdMap<SymbolExportInfo> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "looking up the exported symbols of a crate" }
+ separate_provide_extern
+ }
+ query is_reachable_non_generic(def_id: DefId) -> bool {
+ desc { |tcx| "checking whether `{}` is an exported symbol", tcx.def_path_str(def_id) }
+ cache_on_disk_if { def_id.is_local() }
+ separate_provide_extern
+ }
+ query is_unreachable_local_definition(def_id: LocalDefId) -> bool {
+ desc { |tcx|
+ "checking whether `{}` is reachable from outside the crate",
+ tcx.def_path_str(def_id.to_def_id()),
+ }
+ }
+
+ /// The entire set of monomorphizations the local crate can safely link
+ /// to because they are exported from upstream crates. Do not depend on
+ /// this directly, as its value changes anytime a monomorphization gets
+ /// added or removed in any upstream crate. Instead use the narrower
+ /// `upstream_monomorphizations_for`, `upstream_drop_glue_for`, or, even
+ /// better, `Instance::upstream_monomorphization()`.
+ query upstream_monomorphizations(_: ()) -> DefIdMap<FxHashMap<SubstsRef<'tcx>, CrateNum>> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "collecting available upstream monomorphizations" }
+ }
+
+ /// Returns the set of upstream monomorphizations available for the
+ /// generic function identified by the given `def_id`. The query makes
+ /// sure to make a stable selection if the same monomorphization is
+ /// available in multiple upstream crates.
+ ///
+ /// You likely want to call `Instance::upstream_monomorphization()`
+ /// instead of invoking this query directly.
+ query upstream_monomorphizations_for(def_id: DefId)
+ -> Option<&'tcx FxHashMap<SubstsRef<'tcx>, CrateNum>>
+ {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx|
+ "collecting available upstream monomorphizations for `{}`",
+ tcx.def_path_str(def_id),
+ }
+ separate_provide_extern
+ }
+
+ /// Returns the upstream crate that exports drop-glue for the given
+ /// type (`substs` is expected to be a single-item list containing the
+ /// type one wants drop-glue for).
+ ///
+ /// This is a subset of `upstream_monomorphizations_for` in order to
+ /// increase dep-tracking granularity. Otherwise adding or removing any
+ /// type with drop-glue in any upstream crate would invalidate all
+ /// functions calling drop-glue of an upstream type.
+ ///
+ /// You likely want to call `Instance::upstream_monomorphization()`
+ /// instead of invoking this query directly.
+ ///
+ /// NOTE: This query could easily be extended to also support other
+ /// common functions that have are large set of monomorphizations
+ /// (like `Clone::clone` for example).
+ query upstream_drop_glue_for(substs: SubstsRef<'tcx>) -> Option<CrateNum> {
+ desc { "available upstream drop-glue for `{:?}`", substs }
+ }
+
+ query foreign_modules(_: CrateNum) -> FxHashMap<DefId, ForeignModule> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "looking up the foreign modules of a linked crate" }
+ separate_provide_extern
+ }
+
+ /// Identifies the entry-point (e.g., the `main` function) for a given
+ /// crate, returning `None` if there is no entry point (such as for library crates).
+ query entry_fn(_: ()) -> Option<(DefId, EntryFnType)> {
+ desc { "looking up the entry function of a crate" }
+ }
+ query proc_macro_decls_static(_: ()) -> Option<LocalDefId> {
+ desc { "looking up the derive registrar for a crate" }
+ }
+ // The macro which defines `rustc_metadata::provide_extern` depends on this query's name.
+ // Changing the name should cause a compiler error, but in case that changes, be aware.
+ query crate_hash(_: CrateNum) -> Svh {
+ eval_always
+ desc { "looking up the hash a crate" }
+ separate_provide_extern
+ }
+ query crate_host_hash(_: CrateNum) -> Option<Svh> {
+ eval_always
+ desc { "looking up the hash of a host version of a crate" }
+ separate_provide_extern
+ }
+ query extra_filename(_: CrateNum) -> String {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "looking up the extra filename for a crate" }
+ separate_provide_extern
+ }
+ query crate_extern_paths(_: CrateNum) -> Vec<PathBuf> {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "looking up the paths for extern crates" }
+ separate_provide_extern
+ }
+
+ /// Given a crate and a trait, look up all impls of that trait in the crate.
+ /// Return `(impl_id, self_ty)`.
+ query implementations_of_trait(_: (CrateNum, DefId)) -> &'tcx [(DefId, Option<SimplifiedType>)] {
+ desc { "looking up implementations of a trait in a crate" }
+ separate_provide_extern
+ }
+
+ /// Collects all incoherent impls for the given crate and type.
+ ///
+ /// Do not call this directly, but instead use the `incoherent_impls` query.
+ /// This query is only used to get the data necessary for that query.
+ query crate_incoherent_impls(key: (CrateNum, SimplifiedType)) -> &'tcx [DefId] {
+ desc { |tcx| "collecting all impls for a type in a crate" }
+ separate_provide_extern
+ }
+
+ query is_dllimport_foreign_item(def_id: DefId) -> bool {
+ desc { |tcx| "is_dllimport_foreign_item({})", tcx.def_path_str(def_id) }
+ }
+ query is_statically_included_foreign_item(def_id: DefId) -> bool {
+ desc { |tcx| "is_statically_included_foreign_item({})", tcx.def_path_str(def_id) }
+ }
+ query native_library_kind(def_id: DefId)
+ -> Option<NativeLibKind> {
+ desc { |tcx| "native_library_kind({})", tcx.def_path_str(def_id) }
+ }
+
+ /// Does lifetime resolution, but does not descend into trait items. This
+ /// should only be used for resolving lifetimes of on trait definitions,
+ /// and is used to avoid cycles. Importantly, `resolve_lifetimes` still visits
+ /// the same lifetimes and is responsible for diagnostics.
+ /// See `rustc_resolve::late::lifetimes for details.
+ query resolve_lifetimes_trait_definition(_: LocalDefId) -> ResolveLifetimes {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "resolving lifetimes for a trait definition" }
+ }
+ /// Does lifetime resolution on items. Importantly, we can't resolve
+ /// lifetimes directly on things like trait methods, because of trait params.
+ /// See `rustc_resolve::late::lifetimes for details.
+ query resolve_lifetimes(_: LocalDefId) -> ResolveLifetimes {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "resolving lifetimes" }
+ }
+ query named_region_map(_: LocalDefId) ->
+ Option<&'tcx FxHashMap<ItemLocalId, Region>> {
+ desc { "looking up a named region" }
+ }
+ query is_late_bound_map(_: LocalDefId) -> Option<&'tcx FxIndexSet<LocalDefId>> {
+ desc { "testing if a region is late bound" }
+ }
+ /// For a given item (like a struct), gets the default lifetimes to be used
+ /// for each parameter if a trait object were to be passed for that parameter.
+ /// For example, for `struct Foo<'a, T, U>`, this would be `['static, 'static]`.
+ /// For `struct Foo<'a, T: 'a, U>`, this would instead be `['a, 'static]`.
+ query object_lifetime_defaults(_: LocalDefId) -> Option<&'tcx [ObjectLifetimeDefault]> {
+ desc { "looking up lifetime defaults for a region on an item" }
+ }
+ query late_bound_vars_map(_: LocalDefId)
+ -> Option<&'tcx FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>> {
+ desc { "looking up late bound vars" }
+ }
+
+ query visibility(def_id: DefId) -> ty::Visibility {
+ desc { |tcx| "computing visibility of `{}`", tcx.def_path_str(def_id) }
+ separate_provide_extern
+ }
+
+ /// Computes the set of modules from which this type is visibly uninhabited.
+ /// To check whether a type is uninhabited at all (not just from a given module), you could
+ /// check whether the forest is empty.
+ query type_uninhabited_from(
+ key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>
+ ) -> ty::inhabitedness::DefIdForest<'tcx> {
+ desc { "computing the inhabitedness of `{:?}`", key }
+ remap_env_constness
+ }
+
+ query dep_kind(_: CrateNum) -> CrateDepKind {
+ eval_always
+ desc { "fetching what a dependency looks like" }
+ separate_provide_extern
+ }
+
+ /// Gets the name of the crate.
+ query crate_name(_: CrateNum) -> Symbol {
+ eval_always
+ desc { "fetching what a crate is named" }
+ separate_provide_extern
+ }
+ query module_children(def_id: DefId) -> &'tcx [ModChild] {
+ desc { |tcx| "collecting child items of module `{}`", tcx.def_path_str(def_id) }
+ separate_provide_extern
+ }
+ query extern_mod_stmt_cnum(def_id: LocalDefId) -> Option<CrateNum> {
+ desc { |tcx| "computing crate imported by `{}`", tcx.def_path_str(def_id.to_def_id()) }
+ }
+
+ query lib_features(_: ()) -> LibFeatures {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "calculating the lib features map" }
+ }
+ query defined_lib_features(_: CrateNum) -> &'tcx [(Symbol, Option<Symbol>)] {
+ desc { "calculating the lib features defined in a crate" }
+ separate_provide_extern
+ }
+ query stability_implications(_: CrateNum) -> FxHashMap<Symbol, Symbol> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "calculating the implications between `#[unstable]` features defined in a crate" }
+ separate_provide_extern
+ }
+ /// Whether the function is an intrinsic
+ query is_intrinsic(def_id: DefId) -> bool {
+ desc { |tcx| "is_intrinsic({})", tcx.def_path_str(def_id) }
+ separate_provide_extern
+ }
+ /// Returns the lang items defined in another crate by loading it from metadata.
+ query get_lang_items(_: ()) -> LanguageItems {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "calculating the lang items map" }
+ }
+
+ /// Returns all diagnostic items defined in all crates.
+ query all_diagnostic_items(_: ()) -> rustc_hir::diagnostic_items::DiagnosticItems {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "calculating the diagnostic items map" }
+ }
+
+ /// Returns the lang items defined in another crate by loading it from metadata.
+ query defined_lang_items(_: CrateNum) -> &'tcx [(DefId, usize)] {
+ desc { "calculating the lang items defined in a crate" }
+ separate_provide_extern
+ }
+
+ /// Returns the diagnostic items defined in a crate.
+ query diagnostic_items(_: CrateNum) -> rustc_hir::diagnostic_items::DiagnosticItems {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "calculating the diagnostic items map in a crate" }
+ separate_provide_extern
+ }
+
+ query missing_lang_items(_: CrateNum) -> &'tcx [LangItem] {
+ desc { "calculating the missing lang items in a crate" }
+ separate_provide_extern
+ }
+ query visible_parent_map(_: ()) -> DefIdMap<DefId> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "calculating the visible parent map" }
+ }
+ query trimmed_def_paths(_: ()) -> FxHashMap<DefId, Symbol> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "calculating trimmed def paths" }
+ }
+ query missing_extern_crate_item(_: CrateNum) -> bool {
+ eval_always
+ desc { "seeing if we're missing an `extern crate` item for this crate" }
+ separate_provide_extern
+ }
+ query used_crate_source(_: CrateNum) -> Lrc<CrateSource> {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "looking at the source for a crate" }
+ separate_provide_extern
+ }
+ /// Returns the debugger visualizers defined for this crate.
+ query debugger_visualizers(_: CrateNum) -> Vec<rustc_span::DebuggerVisualizerFile> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { "looking up the debugger visualizers for this crate" }
+ separate_provide_extern
+ }
+ query postorder_cnums(_: ()) -> &'tcx [CrateNum] {
+ eval_always
+ desc { "generating a postorder list of CrateNums" }
+ }
+ /// Returns whether or not the crate with CrateNum 'cnum'
+ /// is marked as a private dependency
+ query is_private_dep(c: CrateNum) -> bool {
+ eval_always
+ desc { "check whether crate {} is a private dependency", c }
+ separate_provide_extern
+ }
+ query allocator_kind(_: ()) -> Option<AllocatorKind> {
+ eval_always
+ desc { "allocator kind for the current crate" }
+ }
+
+ query upvars_mentioned(def_id: DefId) -> Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>> {
+ desc { |tcx| "collecting upvars mentioned in `{}`", tcx.def_path_str(def_id) }
+ }
+ query maybe_unused_trait_imports(_: ()) -> &'tcx FxIndexSet<LocalDefId> {
+ desc { "fetching potentially unused trait imports" }
+ }
+ query maybe_unused_extern_crates(_: ()) -> &'tcx [(LocalDefId, Span)] {
+ desc { "looking up all possibly unused extern crates" }
+ }
+ query names_imported_by_glob_use(def_id: LocalDefId) -> &'tcx FxHashSet<Symbol> {
+ desc { |tcx| "names_imported_by_glob_use for `{}`", tcx.def_path_str(def_id.to_def_id()) }
+ }
+
+ query stability_index(_: ()) -> stability::Index {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "calculating the stability index for the local crate" }
+ }
+ query crates(_: ()) -> &'tcx [CrateNum] {
+ eval_always
+ desc { "fetching all foreign CrateNum instances" }
+ }
+
+ /// A list of all traits in a crate, used by rustdoc and error reporting.
+ /// NOTE: Not named just `traits` due to a naming conflict.
+ query traits_in_crate(_: CrateNum) -> &'tcx [DefId] {
+ desc { "fetching all traits in a crate" }
+ separate_provide_extern
+ }
+
+ /// The list of symbols exported from the given crate.
+ ///
+ /// - All names contained in `exported_symbols(cnum)` are guaranteed to
+ /// correspond to a publicly visible symbol in `cnum` machine code.
+ /// - The `exported_symbols` sets of different crates do not intersect.
+ query exported_symbols(cnum: CrateNum) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] {
+ desc { "exported_symbols" }
+ cache_on_disk_if { *cnum == LOCAL_CRATE }
+ separate_provide_extern
+ }
+
+ query collect_and_partition_mono_items(_: ()) -> (&'tcx DefIdSet, &'tcx [CodegenUnit<'tcx>]) {
+ eval_always
+ desc { "collect_and_partition_mono_items" }
+ }
+ query is_codegened_item(def_id: DefId) -> bool {
+ desc { |tcx| "determining whether `{}` needs codegen", tcx.def_path_str(def_id) }
+ }
+
+ /// All items participating in code generation together with items inlined into them.
+ query codegened_and_inlined_items(_: ()) -> &'tcx DefIdSet {
+ eval_always
+ desc { "codegened_and_inlined_items" }
+ }
+
+ query codegen_unit(_: Symbol) -> &'tcx CodegenUnit<'tcx> {
+ desc { "codegen_unit" }
+ }
+ query unused_generic_params(key: ty::InstanceDef<'tcx>) -> FiniteBitSet<u32> {
+ cache_on_disk_if { key.def_id().is_local() }
+ desc {
+ |tcx| "determining which generic parameters are unused by `{}`",
+ tcx.def_path_str(key.def_id())
+ }
+ separate_provide_extern
+ }
+ query backend_optimization_level(_: ()) -> OptLevel {
+ desc { "optimization level used by backend" }
+ }
+
+ /// Return the filenames where output artefacts shall be stored.
+ ///
+ /// This query returns an `&Arc` because codegen backends need the value even after the `TyCtxt`
+ /// has been destroyed.
+ query output_filenames(_: ()) -> &'tcx Arc<OutputFilenames> {
+ eval_always
+ desc { "output_filenames" }
+ }
+
+ /// Do not call this query directly: invoke `normalize` instead.
+ query normalize_projection_ty(
+ goal: CanonicalProjectionGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, NormalizationResult<'tcx>>>,
+ NoSolution,
+ > {
+ desc { "normalizing `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: invoke `try_normalize_erasing_regions` instead.
+ query try_normalize_generic_arg_after_erasing_regions(
+ goal: ParamEnvAnd<'tcx, GenericArg<'tcx>>
+ ) -> Result<GenericArg<'tcx>, NoSolution> {
+ desc { "normalizing `{}`", goal.value }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: invoke `try_normalize_erasing_regions` instead.
+ query try_normalize_mir_const_after_erasing_regions(
+ goal: ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>
+ ) -> Result<mir::ConstantKind<'tcx>, NoSolution> {
+ desc { "normalizing `{}`", goal.value }
+ remap_env_constness
+ }
+
+ query implied_outlives_bounds(
+ goal: CanonicalTyGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>,
+ NoSolution,
+ > {
+ desc { "computing implied outlives bounds for `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly:
+ /// invoke `DropckOutlives::new(dropped_ty)).fully_perform(typeck.infcx)` instead.
+ query dropck_outlives(
+ goal: CanonicalTyGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, DropckOutlivesResult<'tcx>>>,
+ NoSolution,
+ > {
+ desc { "computing dropck types for `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: invoke `infcx.predicate_may_hold()` or
+ /// `infcx.predicate_must_hold()` instead.
+ query evaluate_obligation(
+ goal: CanonicalPredicateGoal<'tcx>
+ ) -> Result<traits::EvaluationResult, traits::OverflowError> {
+ desc { "evaluating trait selection obligation `{}`", goal.value.value }
+ }
+
+ query evaluate_goal(
+ goal: traits::CanonicalChalkEnvironmentAndGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+ NoSolution
+ > {
+ desc { "evaluating trait selection obligation `{}`", goal.value }
+ }
+
+ /// Do not call this query directly: part of the `Eq` type-op
+ query type_op_ascribe_user_type(
+ goal: CanonicalTypeOpAscribeUserTypeGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+ NoSolution,
+ > {
+ desc { "evaluating `type_op_ascribe_user_type` `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: part of the `Eq` type-op
+ query type_op_eq(
+ goal: CanonicalTypeOpEqGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+ NoSolution,
+ > {
+ desc { "evaluating `type_op_eq` `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: part of the `Subtype` type-op
+ query type_op_subtype(
+ goal: CanonicalTypeOpSubtypeGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+ NoSolution,
+ > {
+ desc { "evaluating `type_op_subtype` `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: part of the `ProvePredicate` type-op
+ query type_op_prove_predicate(
+ goal: CanonicalTypeOpProvePredicateGoal<'tcx>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
+ NoSolution,
+ > {
+ desc { "evaluating `type_op_prove_predicate` `{:?}`", goal }
+ }
+
+ /// Do not call this query directly: part of the `Normalize` type-op
+ query type_op_normalize_ty(
+ goal: CanonicalTypeOpNormalizeGoal<'tcx, Ty<'tcx>>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Ty<'tcx>>>,
+ NoSolution,
+ > {
+ desc { "normalizing `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: part of the `Normalize` type-op
+ query type_op_normalize_predicate(
+ goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::Predicate<'tcx>>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::Predicate<'tcx>>>,
+ NoSolution,
+ > {
+ desc { "normalizing `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: part of the `Normalize` type-op
+ query type_op_normalize_poly_fn_sig(
+ goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::PolyFnSig<'tcx>>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::PolyFnSig<'tcx>>>,
+ NoSolution,
+ > {
+ desc { "normalizing `{:?}`", goal }
+ remap_env_constness
+ }
+
+ /// Do not call this query directly: part of the `Normalize` type-op
+ query type_op_normalize_fn_sig(
+ goal: CanonicalTypeOpNormalizeGoal<'tcx, ty::FnSig<'tcx>>
+ ) -> Result<
+ &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::FnSig<'tcx>>>,
+ NoSolution,
+ > {
+ desc { "normalizing `{:?}`", goal }
+ remap_env_constness
+ }
+
+ query subst_and_check_impossible_predicates(key: (DefId, SubstsRef<'tcx>)) -> bool {
+ desc { |tcx|
+ "impossible substituted predicates:`{}`",
+ tcx.def_path_str(key.0)
+ }
+ }
+
+ query method_autoderef_steps(
+ goal: CanonicalTyGoal<'tcx>
+ ) -> MethodAutoderefStepsResult<'tcx> {
+ desc { "computing autoderef types for `{:?}`", goal }
+ remap_env_constness
+ }
+
+ query supported_target_features(_: CrateNum) -> FxHashMap<String, Option<Symbol>> {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "looking up supported target features" }
+ }
+
+ /// Get an estimate of the size of an InstanceDef based on its MIR for CGU partitioning.
+ query instance_def_size_estimate(def: ty::InstanceDef<'tcx>)
+ -> usize {
+ desc { |tcx| "estimating size for `{}`", tcx.def_path_str(def.def_id()) }
+ }
+
+ query features_query(_: ()) -> &'tcx rustc_feature::Features {
+ eval_always
+ desc { "looking up enabled feature gates" }
+ }
+
+ /// Attempt to resolve the given `DefId` to an `Instance`, for the
+ /// given generics args (`SubstsRef`), returning one of:
+ /// * `Ok(Some(instance))` on success
+ /// * `Ok(None)` when the `SubstsRef` are still too generic,
+ /// and therefore don't allow finding the final `Instance`
+ /// * `Err(ErrorGuaranteed)` when the `Instance` resolution process
+ /// couldn't complete due to errors elsewhere - this is distinct
+ /// from `Ok(None)` to avoid misleading diagnostics when an error
+ /// has already been/will be emitted, for the original cause
+ query resolve_instance(
+ key: ty::ParamEnvAnd<'tcx, (DefId, SubstsRef<'tcx>)>
+ ) -> Result<Option<ty::Instance<'tcx>>, ErrorGuaranteed> {
+ desc { "resolving instance `{}`", ty::Instance::new(key.value.0, key.value.1) }
+ remap_env_constness
+ }
+
+ query resolve_instance_of_const_arg(
+ key: ty::ParamEnvAnd<'tcx, (LocalDefId, DefId, SubstsRef<'tcx>)>
+ ) -> Result<Option<ty::Instance<'tcx>>, ErrorGuaranteed> {
+ desc {
+ "resolving instance of the const argument `{}`",
+ ty::Instance::new(key.value.0.to_def_id(), key.value.2),
+ }
+ remap_env_constness
+ }
+
+ query normalize_opaque_types(key: &'tcx ty::List<ty::Predicate<'tcx>>) -> &'tcx ty::List<ty::Predicate<'tcx>> {
+ desc { "normalizing opaque types in {:?}", key }
+ }
+
+ /// Checks whether a type is definitely uninhabited. This is
+ /// conservative: for some types that are uninhabited we return `false`,
+ /// but we only return `true` for types that are definitely uninhabited.
+ /// `ty.conservative_is_privately_uninhabited` implies that any value of type `ty`
+ /// will be `Abi::Uninhabited`. (Note that uninhabited types may have nonzero
+ /// size, to account for partial initialisation. See #49298 for details.)
+ query conservative_is_privately_uninhabited(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
+ desc { "conservatively checking if {:?} is privately uninhabited", key }
+ remap_env_constness
+ }
+
+ query limits(key: ()) -> Limits {
+ desc { "looking up limits" }
+ }
+
+ /// Performs an HIR-based well-formed check on the item with the given `HirId`. If
+ /// we get an `Unimplemented` error that matches the provided `Predicate`, return
+ /// the cause of the newly created obligation.
+ ///
+ /// This is only used by error-reporting code to get a better cause (in particular, a better
+ /// span) for an *existing* error. Therefore, it is best-effort, and may never handle
+ /// all of the cases that the normal `ty::Ty`-based wfcheck does. This is fine,
+ /// because the `ty::Ty`-based wfcheck is always run.
+ query diagnostic_hir_wf_check(key: (ty::Predicate<'tcx>, traits::WellFormedLoc)) -> Option<traits::ObligationCause<'tcx>> {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ no_hash
+ desc { "performing HIR wf-checking for predicate {:?} at item {:?}", key.0, key.1 }
+ }
+
+
+ /// The list of backend features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`,
+ /// `--target` and similar).
+ query global_backend_features(_: ()) -> Vec<String> {
+ storage(ArenaCacheSelector<'tcx>)
+ eval_always
+ desc { "computing the backend features for CLI flags" }
+ }
+
+ query generator_diagnostic_data(key: DefId) -> Option<GeneratorDiagnosticData<'tcx>> {
+ storage(ArenaCacheSelector<'tcx>)
+ desc { |tcx| "looking up generator diagnostic data of `{}`", tcx.def_path_str(key) }
+ separate_provide_extern
+ }
+
+ query permits_uninit_init(key: TyAndLayout<'tcx>) -> bool {
+ desc { "checking to see if {:?} permits being left uninit", key.ty }
+ }
+
+ query permits_zero_init(key: TyAndLayout<'tcx>) -> bool {
+ desc { "checking to see if {:?} permits being left zeroed", key.ty }
+ }
+}
diff --git a/compiler/rustc_middle/src/tests.rs b/compiler/rustc_middle/src/tests.rs
new file mode 100644
index 000000000..757e0bd3b
--- /dev/null
+++ b/compiler/rustc_middle/src/tests.rs
@@ -0,0 +1,13 @@
+use super::*;
+
+// FIXME(#27438): right now the unit tests of librustc_middle don't refer to any actual
+// functions generated in librustc_data_structures (all
+// references are through generic functions), but statics are
+// referenced from time to time. Due to this bug we won't
+// actually correctly link in the statics unless we also
+// reference a function, so be sure to reference a dummy
+// function.
+#[test]
+fn noop() {
+ rustc_data_structures::__noop_fix_for_27438();
+}
diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs
new file mode 100644
index 000000000..b856af1d8
--- /dev/null
+++ b/compiler/rustc_middle/src/thir.rs
@@ -0,0 +1,821 @@
+//! THIR datatypes and definitions. See the [rustc dev guide] for more info.
+//!
+//! If you compare the THIR [`ExprKind`] to [`hir::ExprKind`], you will see it is
+//! a good bit simpler. In fact, a number of the more straight-forward
+//! MIR simplifications are already done in the lowering to THIR. For
+//! example, method calls and overloaded operators are absent: they are
+//! expected to be converted into [`ExprKind::Call`] instances.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/thir.html
+
+use rustc_ast::{InlineAsmOptions, InlineAsmTemplatePiece};
+use rustc_hir as hir;
+use rustc_hir::def::CtorKind;
+use rustc_hir::def_id::DefId;
+use rustc_hir::RangeEnd;
+use rustc_index::newtype_index;
+use rustc_index::vec::IndexVec;
+use rustc_middle::infer::canonical::Canonical;
+use rustc_middle::middle::region;
+use rustc_middle::mir::interpret::AllocId;
+use rustc_middle::mir::{self, BinOp, BorrowKind, FakeReadCause, Field, Mutability, UnOp};
+use rustc_middle::ty::adjustment::PointerCast;
+use rustc_middle::ty::subst::SubstsRef;
+use rustc_middle::ty::CanonicalUserTypeAnnotation;
+use rustc_middle::ty::{self, AdtDef, Ty, UpvarSubsts, UserType};
+use rustc_span::{Span, Symbol, DUMMY_SP};
+use rustc_target::abi::VariantIdx;
+use rustc_target::asm::InlineAsmRegOrRegClass;
+
+use rustc_span::def_id::LocalDefId;
+use std::fmt;
+use std::ops::Index;
+
+pub mod visit;
+
+newtype_index! {
+ /// An index to an [`Arm`] stored in [`Thir::arms`]
+ #[derive(HashStable)]
+ pub struct ArmId {
+ DEBUG_FORMAT = "a{}"
+ }
+}
+
+newtype_index! {
+ /// An index to an [`Expr`] stored in [`Thir::exprs`]
+ #[derive(HashStable)]
+ pub struct ExprId {
+ DEBUG_FORMAT = "e{}"
+ }
+}
+
+newtype_index! {
+ #[derive(HashStable)]
+ /// An index to a [`Stmt`] stored in [`Thir::stmts`]
+ pub struct StmtId {
+ DEBUG_FORMAT = "s{}"
+ }
+}
+
+macro_rules! thir_with_elements {
+ ($($name:ident: $id:ty => $value:ty,)*) => {
+ /// A container for a THIR body.
+ ///
+ /// This can be indexed directly by any THIR index (e.g. [`ExprId`]).
+ #[derive(Debug, HashStable, Clone)]
+ pub struct Thir<'tcx> {
+ $(
+ pub $name: IndexVec<$id, $value>,
+ )*
+ }
+
+ impl<'tcx> Thir<'tcx> {
+ pub fn new() -> Thir<'tcx> {
+ Thir {
+ $(
+ $name: IndexVec::new(),
+ )*
+ }
+ }
+ }
+
+ $(
+ impl<'tcx> Index<$id> for Thir<'tcx> {
+ type Output = $value;
+ fn index(&self, index: $id) -> &Self::Output {
+ &self.$name[index]
+ }
+ }
+ )*
+ }
+}
+
+thir_with_elements! {
+ arms: ArmId => Arm<'tcx>,
+ exprs: ExprId => Expr<'tcx>,
+ stmts: StmtId => Stmt<'tcx>,
+}
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub enum LintLevel {
+ Inherited,
+ Explicit(hir::HirId),
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct Block {
+ /// Whether the block itself has a label. Used by `label: {}`
+ /// and `try` blocks.
+ ///
+ /// This does *not* include labels on loops, e.g. `'label: loop {}`.
+ pub targeted_by_break: bool,
+ pub region_scope: region::Scope,
+ pub opt_destruction_scope: Option<region::Scope>,
+ /// The span of the block, including the opening braces,
+ /// the label, and the `unsafe` keyword, if present.
+ pub span: Span,
+ /// The statements in the blocK.
+ pub stmts: Box<[StmtId]>,
+ /// The trailing expression of the block, if any.
+ pub expr: Option<ExprId>,
+ pub safety_mode: BlockSafety,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct Adt<'tcx> {
+ /// The ADT we're constructing.
+ pub adt_def: AdtDef<'tcx>,
+ /// The variant of the ADT.
+ pub variant_index: VariantIdx,
+ pub substs: SubstsRef<'tcx>,
+
+ /// Optional user-given substs: for something like `let x =
+ /// Bar::<T> { ... }`.
+ pub user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+
+ pub fields: Box<[FieldExpr]>,
+ /// The base, e.g. `Foo {x: 1, .. base}`.
+ pub base: Option<FruInfo<'tcx>>,
+}
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub enum BlockSafety {
+ Safe,
+ /// A compiler-generated unsafe block
+ BuiltinUnsafe,
+ /// An `unsafe` block. The `HirId` is the ID of the block.
+ ExplicitUnsafe(hir::HirId),
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct Stmt<'tcx> {
+ pub kind: StmtKind<'tcx>,
+ pub opt_destruction_scope: Option<region::Scope>,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub enum StmtKind<'tcx> {
+ /// An expression with a trailing semicolon.
+ Expr {
+ /// The scope for this statement; may be used as lifetime of temporaries.
+ scope: region::Scope,
+
+ /// The expression being evaluated in this statement.
+ expr: ExprId,
+ },
+
+ /// A `let` binding.
+ Let {
+ /// The scope for variables bound in this `let`; it covers this and
+ /// all the remaining statements in the block.
+ remainder_scope: region::Scope,
+
+ /// The scope for the initialization itself; might be used as
+ /// lifetime of temporaries.
+ init_scope: region::Scope,
+
+ /// `let <PAT> = ...`
+ ///
+ /// If a type annotation is included, it is added as an ascription pattern.
+ pattern: Pat<'tcx>,
+
+ /// `let pat: ty = <INIT>`
+ initializer: Option<ExprId>,
+
+ /// `let pat: ty = <INIT> else { <ELSE> }
+ else_block: Option<Block>,
+
+ /// The lint level for this `let` statement.
+ lint_level: LintLevel,
+ },
+}
+
+#[derive(Clone, Debug, Copy, PartialEq, Eq, Hash, HashStable, TyEncodable, TyDecodable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct LocalVarId(pub hir::HirId);
+
+/// A THIR expression.
+#[derive(Clone, Debug, HashStable)]
+pub struct Expr<'tcx> {
+ /// The type of this expression
+ pub ty: Ty<'tcx>,
+
+ /// The lifetime of this expression if it should be spilled into a
+ /// temporary; should be `None` only if in a constant context
+ pub temp_lifetime: Option<region::Scope>,
+
+ /// span of the expression in the source
+ pub span: Span,
+
+ /// kind of expression
+ pub kind: ExprKind<'tcx>,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub enum ExprKind<'tcx> {
+ /// `Scope`s are used to explicitly mark destruction scopes,
+ /// and to track the `HirId` of the expressions within the scope.
+ Scope {
+ region_scope: region::Scope,
+ lint_level: LintLevel,
+ value: ExprId,
+ },
+ /// A `box <value>` expression.
+ Box {
+ value: ExprId,
+ },
+ /// An `if` expression.
+ If {
+ if_then_scope: region::Scope,
+ cond: ExprId,
+ then: ExprId,
+ else_opt: Option<ExprId>,
+ },
+ /// A function call. Method calls and overloaded operators are converted to plain function calls.
+ Call {
+ /// The type of the function. This is often a [`FnDef`] or a [`FnPtr`].
+ ///
+ /// [`FnDef`]: ty::TyKind::FnDef
+ /// [`FnPtr`]: ty::TyKind::FnPtr
+ ty: Ty<'tcx>,
+ /// The function itself.
+ fun: ExprId,
+ /// The arguments passed to the function.
+ ///
+ /// Note: in some cases (like calling a closure), the function call `f(...args)` gets
+ /// rewritten as a call to a function trait method (e.g. `FnOnce::call_once(f, (...args))`).
+ args: Box<[ExprId]>,
+ /// Whether this is from an overloaded operator rather than a
+ /// function call from HIR. `true` for overloaded function call.
+ from_hir_call: bool,
+ /// The span of the function, without the dot and receiver
+ /// (e.g. `foo(a, b)` in `x.foo(a, b)`).
+ fn_span: Span,
+ },
+ /// A *non-overloaded* dereference.
+ Deref {
+ arg: ExprId,
+ },
+ /// A *non-overloaded* binary operation.
+ Binary {
+ op: BinOp,
+ lhs: ExprId,
+ rhs: ExprId,
+ },
+ /// A logical operation. This is distinct from `BinaryOp` because
+ /// the operands need to be lazily evaluated.
+ LogicalOp {
+ op: LogicalOp,
+ lhs: ExprId,
+ rhs: ExprId,
+ },
+ /// A *non-overloaded* unary operation. Note that here the deref (`*`)
+ /// operator is represented by `ExprKind::Deref`.
+ Unary {
+ op: UnOp,
+ arg: ExprId,
+ },
+ /// A cast: `<source> as <type>`. The type we cast to is the type of
+ /// the parent expression.
+ Cast {
+ source: ExprId,
+ },
+ Use {
+ source: ExprId,
+ }, // Use a lexpr to get a vexpr.
+ /// A coercion from `!` to any type.
+ NeverToAny {
+ source: ExprId,
+ },
+ /// A pointer cast. More information can be found in [`PointerCast`].
+ Pointer {
+ cast: PointerCast,
+ source: ExprId,
+ },
+ /// A `loop` expression.
+ Loop {
+ body: ExprId,
+ },
+ Let {
+ expr: ExprId,
+ pat: Pat<'tcx>,
+ },
+ /// A `match` expression.
+ Match {
+ scrutinee: ExprId,
+ arms: Box<[ArmId]>,
+ },
+ /// A block.
+ Block {
+ body: Block,
+ },
+ /// An assignment: `lhs = rhs`.
+ Assign {
+ lhs: ExprId,
+ rhs: ExprId,
+ },
+ /// A *non-overloaded* operation assignment, e.g. `lhs += rhs`.
+ AssignOp {
+ op: BinOp,
+ lhs: ExprId,
+ rhs: ExprId,
+ },
+ /// Access to a field of a struct, a tuple, an union, or an enum.
+ Field {
+ lhs: ExprId,
+ /// Variant containing the field.
+ variant_index: VariantIdx,
+ /// This can be a named (`.foo`) or unnamed (`.0`) field.
+ name: Field,
+ },
+ /// A *non-overloaded* indexing operation.
+ Index {
+ lhs: ExprId,
+ index: ExprId,
+ },
+ /// A local variable.
+ VarRef {
+ id: LocalVarId,
+ },
+ /// Used to represent upvars mentioned in a closure/generator
+ UpvarRef {
+ /// DefId of the closure/generator
+ closure_def_id: DefId,
+
+ /// HirId of the root variable
+ var_hir_id: LocalVarId,
+ },
+ /// A borrow, e.g. `&arg`.
+ Borrow {
+ borrow_kind: BorrowKind,
+ arg: ExprId,
+ },
+ /// A `&raw [const|mut] $place_expr` raw borrow resulting in type `*[const|mut] T`.
+ AddressOf {
+ mutability: hir::Mutability,
+ arg: ExprId,
+ },
+ /// A `break` expression.
+ Break {
+ label: region::Scope,
+ value: Option<ExprId>,
+ },
+ /// A `continue` expression.
+ Continue {
+ label: region::Scope,
+ },
+ /// A `return` expression.
+ Return {
+ value: Option<ExprId>,
+ },
+ /// An inline `const` block, e.g. `const {}`.
+ ConstBlock {
+ did: DefId,
+ substs: SubstsRef<'tcx>,
+ },
+ /// An array literal constructed from one repeated element, e.g. `[1; 5]`.
+ Repeat {
+ value: ExprId,
+ count: ty::Const<'tcx>,
+ },
+ /// An array, e.g. `[a, b, c, d]`.
+ Array {
+ fields: Box<[ExprId]>,
+ },
+ /// A tuple, e.g. `(a, b, c, d)`.
+ Tuple {
+ fields: Box<[ExprId]>,
+ },
+ /// An ADT constructor, e.g. `Foo {x: 1, y: 2}`.
+ Adt(Box<Adt<'tcx>>),
+ /// A type ascription on a place.
+ PlaceTypeAscription {
+ source: ExprId,
+ /// Type that the user gave to this expression
+ user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ },
+ /// A type ascription on a value, e.g. `42: i32`.
+ ValueTypeAscription {
+ source: ExprId,
+ /// Type that the user gave to this expression
+ user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ },
+ /// A closure definition.
+ Closure {
+ closure_id: LocalDefId,
+ substs: UpvarSubsts<'tcx>,
+ upvars: Box<[ExprId]>,
+ movability: Option<hir::Movability>,
+ fake_reads: Vec<(ExprId, FakeReadCause, hir::HirId)>,
+ },
+ /// A literal.
+ Literal {
+ lit: &'tcx hir::Lit,
+ neg: bool,
+ },
+ /// For literals that don't correspond to anything in the HIR
+ NonHirLiteral {
+ lit: ty::ScalarInt,
+ user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ },
+ /// A literal of a ZST type.
+ ZstLiteral {
+ user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ },
+ /// Associated constants and named constants
+ NamedConst {
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ user_ty: Option<Canonical<'tcx, UserType<'tcx>>>,
+ },
+ ConstParam {
+ param: ty::ParamConst,
+ def_id: DefId,
+ },
+ // FIXME improve docs for `StaticRef` by distinguishing it from `NamedConst`
+ /// A literal containing the address of a `static`.
+ ///
+ /// This is only distinguished from `Literal` so that we can register some
+ /// info for diagnostics.
+ StaticRef {
+ alloc_id: AllocId,
+ ty: Ty<'tcx>,
+ def_id: DefId,
+ },
+ /// Inline assembly, i.e. `asm!()`.
+ InlineAsm {
+ template: &'tcx [InlineAsmTemplatePiece],
+ operands: Box<[InlineAsmOperand<'tcx>]>,
+ options: InlineAsmOptions,
+ line_spans: &'tcx [Span],
+ },
+ /// An expression taking a reference to a thread local.
+ ThreadLocalRef(DefId),
+ /// A `yield` expression.
+ Yield {
+ value: ExprId,
+ },
+}
+
+/// Represents the association of a field identifier and an expression.
+///
+/// This is used in struct constructors.
+#[derive(Clone, Debug, HashStable)]
+pub struct FieldExpr {
+ pub name: Field,
+ pub expr: ExprId,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct FruInfo<'tcx> {
+ pub base: ExprId,
+ pub field_types: Box<[Ty<'tcx>]>,
+}
+
+/// A `match` arm.
+#[derive(Clone, Debug, HashStable)]
+pub struct Arm<'tcx> {
+ pub pattern: Pat<'tcx>,
+ pub guard: Option<Guard<'tcx>>,
+ pub body: ExprId,
+ pub lint_level: LintLevel,
+ pub scope: region::Scope,
+ pub span: Span,
+}
+
+/// A `match` guard.
+#[derive(Clone, Debug, HashStable)]
+pub enum Guard<'tcx> {
+ If(ExprId),
+ IfLet(Pat<'tcx>, ExprId),
+}
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub enum LogicalOp {
+ /// The `&&` operator.
+ And,
+ /// The `||` operator.
+ Or,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub enum InlineAsmOperand<'tcx> {
+ In {
+ reg: InlineAsmRegOrRegClass,
+ expr: ExprId,
+ },
+ Out {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ expr: Option<ExprId>,
+ },
+ InOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ expr: ExprId,
+ },
+ SplitInOut {
+ reg: InlineAsmRegOrRegClass,
+ late: bool,
+ in_expr: ExprId,
+ out_expr: Option<ExprId>,
+ },
+ Const {
+ value: mir::ConstantKind<'tcx>,
+ span: Span,
+ },
+ SymFn {
+ value: mir::ConstantKind<'tcx>,
+ span: Span,
+ },
+ SymStatic {
+ def_id: DefId,
+ },
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, HashStable)]
+pub enum BindingMode {
+ ByValue,
+ ByRef(BorrowKind),
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct FieldPat<'tcx> {
+ pub field: Field,
+ pub pattern: Pat<'tcx>,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct Pat<'tcx> {
+ pub ty: Ty<'tcx>,
+ pub span: Span,
+ pub kind: Box<PatKind<'tcx>>,
+}
+
+impl<'tcx> Pat<'tcx> {
+ pub fn wildcard_from_ty(ty: Ty<'tcx>) -> Self {
+ Pat { ty, span: DUMMY_SP, kind: Box::new(PatKind::Wild) }
+ }
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub struct Ascription<'tcx> {
+ pub annotation: CanonicalUserTypeAnnotation<'tcx>,
+ /// Variance to use when relating the `user_ty` to the **type of the value being
+ /// matched**. Typically, this is `Variance::Covariant`, since the value being matched must
+ /// have a type that is some subtype of the ascribed type.
+ ///
+ /// Note that this variance does not apply for any bindings within subpatterns. The type
+ /// assigned to those bindings must be exactly equal to the `user_ty` given here.
+ ///
+ /// The only place where this field is not `Covariant` is when matching constants, where
+ /// we currently use `Contravariant` -- this is because the constant type just needs to
+ /// be "comparable" to the type of the input value. So, for example:
+ ///
+ /// ```text
+ /// match x { "foo" => .. }
+ /// ```
+ ///
+ /// requires that `&'static str <: T_x`, where `T_x` is the type of `x`. Really, we should
+ /// probably be checking for a `PartialEq` impl instead, but this preserves the behavior
+ /// of the old type-check for now. See #57280 for details.
+ pub variance: ty::Variance,
+}
+
+#[derive(Clone, Debug, HashStable)]
+pub enum PatKind<'tcx> {
+ /// A wildcard pattern: `_`.
+ Wild,
+
+ AscribeUserType {
+ ascription: Ascription<'tcx>,
+ subpattern: Pat<'tcx>,
+ },
+
+ /// `x`, `ref x`, `x @ P`, etc.
+ Binding {
+ mutability: Mutability,
+ name: Symbol,
+ mode: BindingMode,
+ var: LocalVarId,
+ ty: Ty<'tcx>,
+ subpattern: Option<Pat<'tcx>>,
+ /// Is this the leftmost occurrence of the binding, i.e., is `var` the
+ /// `HirId` of this pattern?
+ is_primary: bool,
+ },
+
+ /// `Foo(...)` or `Foo{...}` or `Foo`, where `Foo` is a variant name from an ADT with
+ /// multiple variants.
+ Variant {
+ adt_def: AdtDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+ variant_index: VariantIdx,
+ subpatterns: Vec<FieldPat<'tcx>>,
+ },
+
+ /// `(...)`, `Foo(...)`, `Foo{...}`, or `Foo`, where `Foo` is a variant name from an ADT with
+ /// a single variant.
+ Leaf {
+ subpatterns: Vec<FieldPat<'tcx>>,
+ },
+
+ /// `box P`, `&P`, `&mut P`, etc.
+ Deref {
+ subpattern: Pat<'tcx>,
+ },
+
+ /// One of the following:
+ /// * `&str`, which will be handled as a string pattern and thus exhaustiveness
+ /// checking will detect if you use the same string twice in different patterns.
+ /// * integer, bool, char or float, which will be handled by exhaustiveness to cover exactly
+ /// its own value, similar to `&str`, but these values are much simpler.
+ /// * Opaque constants, that must not be matched structurally. So anything that does not derive
+ /// `PartialEq` and `Eq`.
+ Constant {
+ value: mir::ConstantKind<'tcx>,
+ },
+
+ Range(PatRange<'tcx>),
+
+ /// Matches against a slice, checking the length and extracting elements.
+ /// irrefutable when there is a slice pattern and both `prefix` and `suffix` are empty.
+ /// e.g., `&[ref xs @ ..]`.
+ Slice {
+ prefix: Vec<Pat<'tcx>>,
+ slice: Option<Pat<'tcx>>,
+ suffix: Vec<Pat<'tcx>>,
+ },
+
+ /// Fixed match against an array; irrefutable.
+ Array {
+ prefix: Vec<Pat<'tcx>>,
+ slice: Option<Pat<'tcx>>,
+ suffix: Vec<Pat<'tcx>>,
+ },
+
+ /// An or-pattern, e.g. `p | q`.
+ /// Invariant: `pats.len() >= 2`.
+ Or {
+ pats: Vec<Pat<'tcx>>,
+ },
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, HashStable)]
+pub struct PatRange<'tcx> {
+ pub lo: mir::ConstantKind<'tcx>,
+ pub hi: mir::ConstantKind<'tcx>,
+ pub end: RangeEnd,
+}
+
+impl<'tcx> fmt::Display for Pat<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Printing lists is a chore.
+ let mut first = true;
+ let mut start_or_continue = |s| {
+ if first {
+ first = false;
+ ""
+ } else {
+ s
+ }
+ };
+ let mut start_or_comma = || start_or_continue(", ");
+
+ match *self.kind {
+ PatKind::Wild => write!(f, "_"),
+ PatKind::AscribeUserType { ref subpattern, .. } => write!(f, "{}: _", subpattern),
+ PatKind::Binding { mutability, name, mode, ref subpattern, .. } => {
+ let is_mut = match mode {
+ BindingMode::ByValue => mutability == Mutability::Mut,
+ BindingMode::ByRef(bk) => {
+ write!(f, "ref ")?;
+ matches!(bk, BorrowKind::Mut { .. })
+ }
+ };
+ if is_mut {
+ write!(f, "mut ")?;
+ }
+ write!(f, "{}", name)?;
+ if let Some(ref subpattern) = *subpattern {
+ write!(f, " @ {}", subpattern)?;
+ }
+ Ok(())
+ }
+ PatKind::Variant { ref subpatterns, .. } | PatKind::Leaf { ref subpatterns } => {
+ let variant = match *self.kind {
+ PatKind::Variant { adt_def, variant_index, .. } => {
+ Some(adt_def.variant(variant_index))
+ }
+ _ => self.ty.ty_adt_def().and_then(|adt| {
+ if !adt.is_enum() { Some(adt.non_enum_variant()) } else { None }
+ }),
+ };
+
+ if let Some(variant) = variant {
+ write!(f, "{}", variant.name)?;
+
+ // Only for Adt we can have `S {...}`,
+ // which we handle separately here.
+ if variant.ctor_kind == CtorKind::Fictive {
+ write!(f, " {{ ")?;
+
+ let mut printed = 0;
+ for p in subpatterns {
+ if let PatKind::Wild = *p.pattern.kind {
+ continue;
+ }
+ let name = variant.fields[p.field.index()].name;
+ write!(f, "{}{}: {}", start_or_comma(), name, p.pattern)?;
+ printed += 1;
+ }
+
+ if printed < variant.fields.len() {
+ write!(f, "{}..", start_or_comma())?;
+ }
+
+ return write!(f, " }}");
+ }
+ }
+
+ let num_fields = variant.map_or(subpatterns.len(), |v| v.fields.len());
+ if num_fields != 0 || variant.is_none() {
+ write!(f, "(")?;
+ for i in 0..num_fields {
+ write!(f, "{}", start_or_comma())?;
+
+ // Common case: the field is where we expect it.
+ if let Some(p) = subpatterns.get(i) {
+ if p.field.index() == i {
+ write!(f, "{}", p.pattern)?;
+ continue;
+ }
+ }
+
+ // Otherwise, we have to go looking for it.
+ if let Some(p) = subpatterns.iter().find(|p| p.field.index() == i) {
+ write!(f, "{}", p.pattern)?;
+ } else {
+ write!(f, "_")?;
+ }
+ }
+ write!(f, ")")?;
+ }
+
+ Ok(())
+ }
+ PatKind::Deref { ref subpattern } => {
+ match self.ty.kind() {
+ ty::Adt(def, _) if def.is_box() => write!(f, "box ")?,
+ ty::Ref(_, _, mutbl) => {
+ write!(f, "&{}", mutbl.prefix_str())?;
+ }
+ _ => bug!("{} is a bad Deref pattern type", self.ty),
+ }
+ write!(f, "{}", subpattern)
+ }
+ PatKind::Constant { value } => write!(f, "{}", value),
+ PatKind::Range(PatRange { lo, hi, end }) => {
+ write!(f, "{}", lo)?;
+ write!(f, "{}", end)?;
+ write!(f, "{}", hi)
+ }
+ PatKind::Slice { ref prefix, ref slice, ref suffix }
+ | PatKind::Array { ref prefix, ref slice, ref suffix } => {
+ write!(f, "[")?;
+ for p in prefix {
+ write!(f, "{}{}", start_or_comma(), p)?;
+ }
+ if let Some(ref slice) = *slice {
+ write!(f, "{}", start_or_comma())?;
+ match *slice.kind {
+ PatKind::Wild => {}
+ _ => write!(f, "{}", slice)?,
+ }
+ write!(f, "..")?;
+ }
+ for p in suffix {
+ write!(f, "{}{}", start_or_comma(), p)?;
+ }
+ write!(f, "]")
+ }
+ PatKind::Or { ref pats } => {
+ for pat in pats {
+ write!(f, "{}{}", start_or_continue(" | "), pat)?;
+ }
+ Ok(())
+ }
+ }
+ }
+}
+
+// Some nodes are used a lot. Make sure they don't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+mod size_asserts {
+ use super::*;
+ // These are in alphabetical order, which is easy to maintain.
+ rustc_data_structures::static_assert_size!(Block, 56);
+ rustc_data_structures::static_assert_size!(Expr<'_>, 104);
+ rustc_data_structures::static_assert_size!(Pat<'_>, 24);
+ rustc_data_structures::static_assert_size!(Stmt<'_>, 120);
+}
diff --git a/compiler/rustc_middle/src/thir/visit.rs b/compiler/rustc_middle/src/thir/visit.rs
new file mode 100644
index 000000000..97249fdd1
--- /dev/null
+++ b/compiler/rustc_middle/src/thir/visit.rs
@@ -0,0 +1,244 @@
+use super::{
+ Arm, Block, Expr, ExprKind, Guard, InlineAsmOperand, Pat, PatKind, Stmt, StmtKind, Thir,
+};
+
+pub trait Visitor<'a, 'tcx: 'a>: Sized {
+ fn thir(&self) -> &'a Thir<'tcx>;
+
+ fn visit_expr(&mut self, expr: &Expr<'tcx>) {
+ walk_expr(self, expr);
+ }
+
+ fn visit_stmt(&mut self, stmt: &Stmt<'tcx>) {
+ walk_stmt(self, stmt);
+ }
+
+ fn visit_block(&mut self, block: &Block) {
+ walk_block(self, block);
+ }
+
+ fn visit_arm(&mut self, arm: &Arm<'tcx>) {
+ walk_arm(self, arm);
+ }
+
+ fn visit_pat(&mut self, pat: &Pat<'tcx>) {
+ walk_pat(self, pat);
+ }
+
+ // Note: We don't have visitors for `ty::Const` and `mir::ConstantKind`
+ // (even though these types occur in THIR) for consistency and to reduce confusion,
+ // since the lazy creation of constants during thir construction causes most
+ // 'constants' to not be of type `ty::Const` or `mir::ConstantKind` at that
+ // stage (they are mostly still identified by `DefId` or `hir::Lit`, see
+ // the variants `Literal`, `NonHirLiteral` and `NamedConst` in `thir::ExprKind`).
+ // You have to manually visit `ty::Const` and `mir::ConstantKind` through the
+ // other `visit*` functions.
+}
+
+pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Expr<'tcx>) {
+ use ExprKind::*;
+ match expr.kind {
+ Scope { value, region_scope: _, lint_level: _ } => {
+ visitor.visit_expr(&visitor.thir()[value])
+ }
+ Box { value } => visitor.visit_expr(&visitor.thir()[value]),
+ If { cond, then, else_opt, if_then_scope: _ } => {
+ visitor.visit_expr(&visitor.thir()[cond]);
+ visitor.visit_expr(&visitor.thir()[then]);
+ if let Some(else_expr) = else_opt {
+ visitor.visit_expr(&visitor.thir()[else_expr]);
+ }
+ }
+ Call { fun, ref args, ty: _, from_hir_call: _, fn_span: _ } => {
+ visitor.visit_expr(&visitor.thir()[fun]);
+ for &arg in &**args {
+ visitor.visit_expr(&visitor.thir()[arg]);
+ }
+ }
+ Deref { arg } => visitor.visit_expr(&visitor.thir()[arg]),
+ Binary { lhs, rhs, op: _ } | LogicalOp { lhs, rhs, op: _ } => {
+ visitor.visit_expr(&visitor.thir()[lhs]);
+ visitor.visit_expr(&visitor.thir()[rhs]);
+ }
+ Unary { arg, op: _ } => visitor.visit_expr(&visitor.thir()[arg]),
+ Cast { source } => visitor.visit_expr(&visitor.thir()[source]),
+ Use { source } => visitor.visit_expr(&visitor.thir()[source]),
+ NeverToAny { source } => visitor.visit_expr(&visitor.thir()[source]),
+ Pointer { source, cast: _ } => visitor.visit_expr(&visitor.thir()[source]),
+ Let { expr, .. } => {
+ visitor.visit_expr(&visitor.thir()[expr]);
+ }
+ Loop { body } => visitor.visit_expr(&visitor.thir()[body]),
+ Match { scrutinee, ref arms } => {
+ visitor.visit_expr(&visitor.thir()[scrutinee]);
+ for &arm in &**arms {
+ visitor.visit_arm(&visitor.thir()[arm]);
+ }
+ }
+ Block { ref body } => visitor.visit_block(body),
+ Assign { lhs, rhs } | AssignOp { lhs, rhs, op: _ } => {
+ visitor.visit_expr(&visitor.thir()[lhs]);
+ visitor.visit_expr(&visitor.thir()[rhs]);
+ }
+ Field { lhs, variant_index: _, name: _ } => visitor.visit_expr(&visitor.thir()[lhs]),
+ Index { lhs, index } => {
+ visitor.visit_expr(&visitor.thir()[lhs]);
+ visitor.visit_expr(&visitor.thir()[index]);
+ }
+ VarRef { id: _ } | UpvarRef { closure_def_id: _, var_hir_id: _ } => {}
+ Borrow { arg, borrow_kind: _ } => visitor.visit_expr(&visitor.thir()[arg]),
+ AddressOf { arg, mutability: _ } => visitor.visit_expr(&visitor.thir()[arg]),
+ Break { value, label: _ } => {
+ if let Some(value) = value {
+ visitor.visit_expr(&visitor.thir()[value])
+ }
+ }
+ Continue { label: _ } => {}
+ Return { value } => {
+ if let Some(value) = value {
+ visitor.visit_expr(&visitor.thir()[value])
+ }
+ }
+ ConstBlock { did: _, substs: _ } => {}
+ Repeat { value, count: _ } => {
+ visitor.visit_expr(&visitor.thir()[value]);
+ }
+ Array { ref fields } | Tuple { ref fields } => {
+ for &field in &**fields {
+ visitor.visit_expr(&visitor.thir()[field]);
+ }
+ }
+ Adt(box crate::thir::Adt {
+ ref fields,
+ ref base,
+ adt_def: _,
+ variant_index: _,
+ substs: _,
+ user_ty: _,
+ }) => {
+ for field in &**fields {
+ visitor.visit_expr(&visitor.thir()[field.expr]);
+ }
+ if let Some(base) = base {
+ visitor.visit_expr(&visitor.thir()[base.base]);
+ }
+ }
+ PlaceTypeAscription { source, user_ty: _ } | ValueTypeAscription { source, user_ty: _ } => {
+ visitor.visit_expr(&visitor.thir()[source])
+ }
+ Closure { closure_id: _, substs: _, upvars: _, movability: _, fake_reads: _ } => {}
+ Literal { lit: _, neg: _ } => {}
+ NonHirLiteral { lit: _, user_ty: _ } => {}
+ ZstLiteral { user_ty: _ } => {}
+ NamedConst { def_id: _, substs: _, user_ty: _ } => {}
+ ConstParam { param: _, def_id: _ } => {}
+ StaticRef { alloc_id: _, ty: _, def_id: _ } => {}
+ InlineAsm { ref operands, template: _, options: _, line_spans: _ } => {
+ for op in &**operands {
+ use InlineAsmOperand::*;
+ match op {
+ In { expr, reg: _ }
+ | Out { expr: Some(expr), reg: _, late: _ }
+ | InOut { expr, reg: _, late: _ } => visitor.visit_expr(&visitor.thir()[*expr]),
+ SplitInOut { in_expr, out_expr, reg: _, late: _ } => {
+ visitor.visit_expr(&visitor.thir()[*in_expr]);
+ if let Some(out_expr) = out_expr {
+ visitor.visit_expr(&visitor.thir()[*out_expr]);
+ }
+ }
+ Out { expr: None, reg: _, late: _ }
+ | Const { value: _, span: _ }
+ | SymFn { value: _, span: _ }
+ | SymStatic { def_id: _ } => {}
+ }
+ }
+ }
+ ThreadLocalRef(_) => {}
+ Yield { value } => visitor.visit_expr(&visitor.thir()[value]),
+ }
+}
+
+pub fn walk_stmt<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, stmt: &Stmt<'tcx>) {
+ match &stmt.kind {
+ StmtKind::Expr { expr, scope: _ } => visitor.visit_expr(&visitor.thir()[*expr]),
+ StmtKind::Let {
+ initializer,
+ remainder_scope: _,
+ init_scope: _,
+ ref pattern,
+ lint_level: _,
+ else_block,
+ } => {
+ if let Some(init) = initializer {
+ visitor.visit_expr(&visitor.thir()[*init]);
+ }
+ visitor.visit_pat(pattern);
+ if let Some(block) = else_block {
+ visitor.visit_block(block)
+ }
+ }
+ }
+}
+
+pub fn walk_block<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, block: &Block) {
+ for &stmt in &*block.stmts {
+ visitor.visit_stmt(&visitor.thir()[stmt]);
+ }
+ if let Some(expr) = block.expr {
+ visitor.visit_expr(&visitor.thir()[expr]);
+ }
+}
+
+pub fn walk_arm<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, arm: &Arm<'tcx>) {
+ match arm.guard {
+ Some(Guard::If(expr)) => visitor.visit_expr(&visitor.thir()[expr]),
+ Some(Guard::IfLet(ref pat, expr)) => {
+ visitor.visit_pat(pat);
+ visitor.visit_expr(&visitor.thir()[expr]);
+ }
+ None => {}
+ }
+ visitor.visit_pat(&arm.pattern);
+ visitor.visit_expr(&visitor.thir()[arm.body]);
+}
+
+pub fn walk_pat<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, pat: &Pat<'tcx>) {
+ use PatKind::*;
+ match pat.kind.as_ref() {
+ AscribeUserType { subpattern, ascription: _ }
+ | Deref { subpattern }
+ | Binding {
+ subpattern: Some(subpattern),
+ mutability: _,
+ mode: _,
+ var: _,
+ ty: _,
+ is_primary: _,
+ name: _,
+ } => visitor.visit_pat(&subpattern),
+ Binding { .. } | Wild => {}
+ Variant { subpatterns, adt_def: _, substs: _, variant_index: _ } | Leaf { subpatterns } => {
+ for subpattern in subpatterns {
+ visitor.visit_pat(&subpattern.pattern);
+ }
+ }
+ Constant { value: _ } => {}
+ Range(_) => {}
+ Slice { prefix, slice, suffix } | Array { prefix, slice, suffix } => {
+ for subpattern in prefix {
+ visitor.visit_pat(&subpattern);
+ }
+ if let Some(pat) = slice {
+ visitor.visit_pat(pat);
+ }
+ for subpattern in suffix {
+ visitor.visit_pat(&subpattern);
+ }
+ }
+ Or { pats } => {
+ for pat in pats {
+ visitor.visit_pat(&pat);
+ }
+ }
+ };
+}
diff --git a/compiler/rustc_middle/src/traits/chalk.rs b/compiler/rustc_middle/src/traits/chalk.rs
new file mode 100644
index 000000000..6d4af8bea
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/chalk.rs
@@ -0,0 +1,403 @@
+//! Types required for Chalk-related queries
+//!
+//! The primary purpose of this file is defining an implementation for the
+//! `chalk_ir::interner::Interner` trait. The primary purpose of this trait, as
+//! its name suggest, is to provide an abstraction boundary for creating
+//! interned Chalk types.
+
+use rustc_middle::ty::{self, AdtDef, TyCtxt};
+
+use rustc_hir::def_id::DefId;
+use rustc_target::spec::abi::Abi;
+
+use std::cmp::Ordering;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+
+#[derive(Copy, Clone)]
+pub struct RustInterner<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> Hash for RustInterner<'tcx> {
+ fn hash<H: Hasher>(&self, _state: &mut H) {}
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> Ord for RustInterner<'tcx> {
+ fn cmp(&self, _other: &Self) -> Ordering {
+ Ordering::Equal
+ }
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> PartialOrd for RustInterner<'tcx> {
+ fn partial_cmp(&self, _other: &Self) -> Option<Ordering> {
+ None
+ }
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> PartialEq for RustInterner<'tcx> {
+ fn eq(&self, _other: &Self) -> bool {
+ false
+ }
+}
+
+/// We don't ever actually need this. It's only required for derives.
+impl<'tcx> Eq for RustInterner<'tcx> {}
+
+impl fmt::Debug for RustInterner<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "RustInterner")
+ }
+}
+
+// Right now, there is no interning at all. I was running into problems with
+// adding interning in `ty/context.rs` for Chalk types with
+// `parallel-compiler = true`. -jackh726
+impl<'tcx> chalk_ir::interner::Interner for RustInterner<'tcx> {
+ type InternedType = Box<chalk_ir::TyData<Self>>;
+ type InternedLifetime = Box<chalk_ir::LifetimeData<Self>>;
+ type InternedConst = Box<chalk_ir::ConstData<Self>>;
+ type InternedConcreteConst = ty::ValTree<'tcx>;
+ type InternedGenericArg = Box<chalk_ir::GenericArgData<Self>>;
+ type InternedGoal = Box<chalk_ir::GoalData<Self>>;
+ type InternedGoals = Vec<chalk_ir::Goal<Self>>;
+ type InternedSubstitution = Vec<chalk_ir::GenericArg<Self>>;
+ type InternedProgramClause = Box<chalk_ir::ProgramClauseData<Self>>;
+ type InternedProgramClauses = Vec<chalk_ir::ProgramClause<Self>>;
+ type InternedQuantifiedWhereClauses = Vec<chalk_ir::QuantifiedWhereClause<Self>>;
+ type InternedVariableKinds = Vec<chalk_ir::VariableKind<Self>>;
+ type InternedCanonicalVarKinds = Vec<chalk_ir::CanonicalVarKind<Self>>;
+ type InternedVariances = Vec<chalk_ir::Variance>;
+ type InternedConstraints = Vec<chalk_ir::InEnvironment<chalk_ir::Constraint<Self>>>;
+ type DefId = DefId;
+ type InternedAdtId = AdtDef<'tcx>;
+ type Identifier = ();
+ type FnAbi = Abi;
+
+ fn debug_program_clause_implication(
+ pci: &chalk_ir::ProgramClauseImplication<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ let mut write = || {
+ write!(fmt, "{:?}", pci.consequence)?;
+
+ let conditions = pci.conditions.interned();
+ let constraints = pci.constraints.interned();
+
+ let conds = conditions.len();
+ let consts = constraints.len();
+ if conds == 0 && consts == 0 {
+ return Ok(());
+ }
+
+ write!(fmt, " :- ")?;
+
+ if conds != 0 {
+ for cond in &conditions[..conds - 1] {
+ write!(fmt, "{:?}, ", cond)?;
+ }
+ write!(fmt, "{:?}", conditions[conds - 1])?;
+ }
+
+ if conds != 0 && consts != 0 {
+ write!(fmt, " ; ")?;
+ }
+
+ if consts != 0 {
+ for constraint in &constraints[..consts - 1] {
+ write!(fmt, "{:?}, ", constraint)?;
+ }
+ write!(fmt, "{:?}", constraints[consts - 1])?;
+ }
+
+ Ok(())
+ };
+ Some(write())
+ }
+
+ fn debug_substitution(
+ substitution: &chalk_ir::Substitution<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(fmt, "{:?}", substitution.interned()))
+ }
+
+ fn debug_separator_trait_ref(
+ separator_trait_ref: &chalk_ir::SeparatorTraitRef<'_, Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ let substitution = &separator_trait_ref.trait_ref.substitution;
+ let parameters = substitution.interned();
+ Some(write!(
+ fmt,
+ "{:?}{}{:?}{:?}",
+ parameters[0],
+ separator_trait_ref.separator,
+ separator_trait_ref.trait_ref.trait_id,
+ chalk_ir::debug::Angle(&parameters[1..])
+ ))
+ }
+
+ fn debug_quantified_where_clauses(
+ clauses: &chalk_ir::QuantifiedWhereClauses<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(fmt, "{:?}", clauses.interned()))
+ }
+
+ fn debug_ty(ty: &chalk_ir::Ty<Self>, fmt: &mut fmt::Formatter<'_>) -> Option<fmt::Result> {
+ match &ty.interned().kind {
+ chalk_ir::TyKind::Ref(chalk_ir::Mutability::Not, lifetime, ty) => {
+ Some(write!(fmt, "(&{:?} {:?})", lifetime, ty))
+ }
+ chalk_ir::TyKind::Ref(chalk_ir::Mutability::Mut, lifetime, ty) => {
+ Some(write!(fmt, "(&{:?} mut {:?})", lifetime, ty))
+ }
+ chalk_ir::TyKind::Array(ty, len) => Some(write!(fmt, "[{:?}; {:?}]", ty, len)),
+ chalk_ir::TyKind::Slice(ty) => Some(write!(fmt, "[{:?}]", ty)),
+ chalk_ir::TyKind::Tuple(len, substs) => Some((|| {
+ write!(fmt, "(")?;
+ for (idx, substitution) in substs.interned().iter().enumerate() {
+ if idx == *len && *len != 1 {
+ // Don't add a trailing comma if the tuple has more than one element
+ write!(fmt, "{:?}", substitution)?;
+ } else {
+ write!(fmt, "{:?},", substitution)?;
+ }
+ }
+ write!(fmt, ")")
+ })()),
+ _ => None,
+ }
+ }
+
+ fn debug_alias(
+ alias_ty: &chalk_ir::AliasTy<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ match alias_ty {
+ chalk_ir::AliasTy::Projection(projection_ty) => {
+ Self::debug_projection_ty(projection_ty, fmt)
+ }
+ chalk_ir::AliasTy::Opaque(opaque_ty) => Self::debug_opaque_ty(opaque_ty, fmt),
+ }
+ }
+
+ fn debug_projection_ty(
+ projection_ty: &chalk_ir::ProjectionTy<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(
+ fmt,
+ "projection: {:?} {:?}",
+ projection_ty.associated_ty_id, projection_ty.substitution,
+ ))
+ }
+
+ fn debug_opaque_ty(
+ opaque_ty: &chalk_ir::OpaqueTy<Self>,
+ fmt: &mut fmt::Formatter<'_>,
+ ) -> Option<fmt::Result> {
+ Some(write!(fmt, "{:?}", opaque_ty.opaque_ty_id))
+ }
+
+ fn intern_ty(self, ty: chalk_ir::TyKind<Self>) -> Self::InternedType {
+ let flags = ty.compute_flags(self);
+ Box::new(chalk_ir::TyData { kind: ty, flags: flags })
+ }
+
+ fn ty_data<'a>(self, ty: &'a Self::InternedType) -> &'a chalk_ir::TyData<Self> {
+ ty
+ }
+
+ fn intern_lifetime(self, lifetime: chalk_ir::LifetimeData<Self>) -> Self::InternedLifetime {
+ Box::new(lifetime)
+ }
+
+ fn lifetime_data<'a>(
+ self,
+ lifetime: &'a Self::InternedLifetime,
+ ) -> &'a chalk_ir::LifetimeData<Self> {
+ &lifetime
+ }
+
+ fn intern_const(self, constant: chalk_ir::ConstData<Self>) -> Self::InternedConst {
+ Box::new(constant)
+ }
+
+ fn const_data<'a>(self, constant: &'a Self::InternedConst) -> &'a chalk_ir::ConstData<Self> {
+ &constant
+ }
+
+ fn const_eq(
+ self,
+ _ty: &Self::InternedType,
+ c1: &Self::InternedConcreteConst,
+ c2: &Self::InternedConcreteConst,
+ ) -> bool {
+ c1 == c2
+ }
+
+ fn intern_generic_arg(self, data: chalk_ir::GenericArgData<Self>) -> Self::InternedGenericArg {
+ Box::new(data)
+ }
+
+ fn generic_arg_data<'a>(
+ self,
+ data: &'a Self::InternedGenericArg,
+ ) -> &'a chalk_ir::GenericArgData<Self> {
+ &data
+ }
+
+ fn intern_goal(self, goal: chalk_ir::GoalData<Self>) -> Self::InternedGoal {
+ Box::new(goal)
+ }
+
+ fn goal_data<'a>(self, goal: &'a Self::InternedGoal) -> &'a chalk_ir::GoalData<Self> {
+ &goal
+ }
+
+ fn intern_goals<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::Goal<Self>, E>>,
+ ) -> Result<Self::InternedGoals, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn goals_data<'a>(self, goals: &'a Self::InternedGoals) -> &'a [chalk_ir::Goal<Self>] {
+ goals
+ }
+
+ fn intern_substitution<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::GenericArg<Self>, E>>,
+ ) -> Result<Self::InternedSubstitution, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn substitution_data<'a>(
+ self,
+ substitution: &'a Self::InternedSubstitution,
+ ) -> &'a [chalk_ir::GenericArg<Self>] {
+ substitution
+ }
+
+ fn intern_program_clause(
+ self,
+ data: chalk_ir::ProgramClauseData<Self>,
+ ) -> Self::InternedProgramClause {
+ Box::new(data)
+ }
+
+ fn program_clause_data<'a>(
+ self,
+ clause: &'a Self::InternedProgramClause,
+ ) -> &'a chalk_ir::ProgramClauseData<Self> {
+ &clause
+ }
+
+ fn intern_program_clauses<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::ProgramClause<Self>, E>>,
+ ) -> Result<Self::InternedProgramClauses, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn program_clauses_data<'a>(
+ self,
+ clauses: &'a Self::InternedProgramClauses,
+ ) -> &'a [chalk_ir::ProgramClause<Self>] {
+ clauses
+ }
+
+ fn intern_quantified_where_clauses<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::QuantifiedWhereClause<Self>, E>>,
+ ) -> Result<Self::InternedQuantifiedWhereClauses, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn quantified_where_clauses_data<'a>(
+ self,
+ clauses: &'a Self::InternedQuantifiedWhereClauses,
+ ) -> &'a [chalk_ir::QuantifiedWhereClause<Self>] {
+ clauses
+ }
+
+ fn intern_generic_arg_kinds<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::VariableKind<Self>, E>>,
+ ) -> Result<Self::InternedVariableKinds, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn variable_kinds_data<'a>(
+ self,
+ parameter_kinds: &'a Self::InternedVariableKinds,
+ ) -> &'a [chalk_ir::VariableKind<Self>] {
+ parameter_kinds
+ }
+
+ fn intern_canonical_var_kinds<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::CanonicalVarKind<Self>, E>>,
+ ) -> Result<Self::InternedCanonicalVarKinds, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn canonical_var_kinds_data<'a>(
+ self,
+ canonical_var_kinds: &'a Self::InternedCanonicalVarKinds,
+ ) -> &'a [chalk_ir::CanonicalVarKind<Self>] {
+ canonical_var_kinds
+ }
+
+ fn intern_constraints<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::InEnvironment<chalk_ir::Constraint<Self>>, E>>,
+ ) -> Result<Self::InternedConstraints, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn constraints_data<'a>(
+ self,
+ constraints: &'a Self::InternedConstraints,
+ ) -> &'a [chalk_ir::InEnvironment<chalk_ir::Constraint<Self>>] {
+ constraints
+ }
+
+ fn intern_variances<E>(
+ self,
+ data: impl IntoIterator<Item = Result<chalk_ir::Variance, E>>,
+ ) -> Result<Self::InternedVariances, E> {
+ data.into_iter().collect::<Result<Vec<_>, _>>()
+ }
+
+ fn variances_data<'a>(
+ self,
+ variances: &'a Self::InternedVariances,
+ ) -> &'a [chalk_ir::Variance] {
+ variances
+ }
+}
+
+impl<'tcx> chalk_ir::interner::HasInterner for RustInterner<'tcx> {
+ type Interner = Self;
+}
+
+/// A chalk environment and goal.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, HashStable, TypeFoldable, TypeVisitable)]
+pub struct ChalkEnvironmentAndGoal<'tcx> {
+ pub environment: &'tcx ty::List<ty::Predicate<'tcx>>,
+ pub goal: ty::Predicate<'tcx>,
+}
+
+impl<'tcx> fmt::Display for ChalkEnvironmentAndGoal<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "environment: {:?}, goal: {}", self.environment, self.goal)
+ }
+}
diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs
new file mode 100644
index 000000000..72b848c3e
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/mod.rs
@@ -0,0 +1,1026 @@
+//! Trait Resolution. See the [rustc dev guide] for more information on how this works.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html
+
+mod chalk;
+pub mod query;
+pub mod select;
+pub mod specialization_graph;
+mod structural_impls;
+pub mod util;
+
+use crate::infer::canonical::Canonical;
+use crate::ty::abstract_const::NotConstEvaluatable;
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, AdtKind, Predicate, Ty, TyCtxt};
+
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::{Applicability, Diagnostic};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+use smallvec::SmallVec;
+
+use std::borrow::Cow;
+use std::hash::{Hash, Hasher};
+
+pub use self::select::{EvaluationCache, EvaluationResult, OverflowError, SelectionCache};
+
+pub type CanonicalChalkEnvironmentAndGoal<'tcx> = Canonical<'tcx, ChalkEnvironmentAndGoal<'tcx>>;
+
+pub use self::ObligationCauseCode::*;
+
+pub use self::chalk::{ChalkEnvironmentAndGoal, RustInterner as ChalkRustInterner};
+
+/// Depending on the stage of compilation, we want projection to be
+/// more or less conservative.
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, HashStable)]
+pub enum Reveal {
+ /// At type-checking time, we refuse to project any associated
+ /// type that is marked `default`. Non-`default` ("final") types
+ /// are always projected. This is necessary in general for
+ /// soundness of specialization. However, we *could* allow
+ /// projections in fully-monomorphic cases. We choose not to,
+ /// because we prefer for `default type` to force the type
+ /// definition to be treated abstractly by any consumers of the
+ /// impl. Concretely, that means that the following example will
+ /// fail to compile:
+ ///
+ /// ```compile_fail,E0308
+ /// #![feature(specialization)]
+ /// trait Assoc {
+ /// type Output;
+ /// }
+ ///
+ /// impl<T> Assoc for T {
+ /// default type Output = bool;
+ /// }
+ ///
+ /// fn main() {
+ /// let x: <() as Assoc>::Output = true;
+ /// }
+ /// ```
+ ///
+ /// We also do not reveal the hidden type of opaque types during
+ /// type-checking.
+ UserFacing,
+
+ /// At codegen time, all monomorphic projections will succeed.
+ /// Also, `impl Trait` is normalized to the concrete type,
+ /// which has to be already collected by type-checking.
+ ///
+ /// NOTE: as `impl Trait`'s concrete type should *never*
+ /// be observable directly by the user, `Reveal::All`
+ /// should not be used by checks which may expose
+ /// type equality or type contents to the user.
+ /// There are some exceptions, e.g., around auto traits and
+ /// transmute-checking, which expose some details, but
+ /// not the whole concrete type of the `impl Trait`.
+ All,
+}
+
+/// The reason why we incurred this obligation; used for error reporting.
+///
+/// Non-misc `ObligationCauseCode`s are stored on the heap. This gives the
+/// best trade-off between keeping the type small (which makes copies cheaper)
+/// while not doing too many heap allocations.
+///
+/// We do not want to intern this as there are a lot of obligation causes which
+/// only live for a short period of time.
+#[derive(Clone, Debug, PartialEq, Eq, Lift)]
+pub struct ObligationCause<'tcx> {
+ pub span: Span,
+
+ /// The ID of the fn body that triggered this obligation. This is
+ /// used for region obligations to determine the precise
+ /// environment in which the region obligation should be evaluated
+ /// (in particular, closures can add new assumptions). See the
+ /// field `region_obligations` of the `FulfillmentContext` for more
+ /// information.
+ pub body_id: hir::HirId,
+
+ code: InternedObligationCauseCode<'tcx>,
+}
+
+// This custom hash function speeds up hashing for `Obligation` deduplication
+// greatly by skipping the `code` field, which can be large and complex. That
+// shouldn't affect hash quality much since there are several other fields in
+// `Obligation` which should be unique enough, especially the predicate itself
+// which is hashed as an interned pointer. See #90996.
+impl Hash for ObligationCause<'_> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.body_id.hash(state);
+ self.span.hash(state);
+ }
+}
+
+impl<'tcx> ObligationCause<'tcx> {
+ #[inline]
+ pub fn new(
+ span: Span,
+ body_id: hir::HirId,
+ code: ObligationCauseCode<'tcx>,
+ ) -> ObligationCause<'tcx> {
+ ObligationCause { span, body_id, code: code.into() }
+ }
+
+ pub fn misc(span: Span, body_id: hir::HirId) -> ObligationCause<'tcx> {
+ ObligationCause::new(span, body_id, MiscObligation)
+ }
+
+ #[inline(always)]
+ pub fn dummy() -> ObligationCause<'tcx> {
+ ObligationCause::dummy_with_span(DUMMY_SP)
+ }
+
+ #[inline(always)]
+ pub fn dummy_with_span(span: Span) -> ObligationCause<'tcx> {
+ ObligationCause { span, body_id: hir::CRATE_HIR_ID, code: Default::default() }
+ }
+
+ pub fn span(&self) -> Span {
+ match *self.code() {
+ ObligationCauseCode::MatchExpressionArm(box MatchExpressionArmCause {
+ arm_span,
+ ..
+ }) => arm_span,
+ _ => self.span,
+ }
+ }
+
+ #[inline]
+ pub fn code(&self) -> &ObligationCauseCode<'tcx> {
+ &self.code
+ }
+
+ pub fn map_code(
+ &mut self,
+ f: impl FnOnce(InternedObligationCauseCode<'tcx>) -> ObligationCauseCode<'tcx>,
+ ) {
+ self.code = f(std::mem::take(&mut self.code)).into();
+ }
+
+ pub fn derived_cause(
+ mut self,
+ parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
+ variant: impl FnOnce(DerivedObligationCause<'tcx>) -> ObligationCauseCode<'tcx>,
+ ) -> ObligationCause<'tcx> {
+ /*!
+ * Creates a cause for obligations that are derived from
+ * `obligation` by a recursive search (e.g., for a builtin
+ * bound, or eventually a `auto trait Foo`). If `obligation`
+ * is itself a derived obligation, this is just a clone, but
+ * otherwise we create a "derived obligation" cause so as to
+ * keep track of the original root obligation for error
+ * reporting.
+ */
+
+ // NOTE(flaper87): As of now, it keeps track of the whole error
+ // chain. Ideally, we should have a way to configure this either
+ // by using -Z verbose or just a CLI argument.
+ self.code =
+ variant(DerivedObligationCause { parent_trait_pred, parent_code: self.code }).into();
+ self
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct UnifyReceiverContext<'tcx> {
+ pub assoc_item: ty::AssocItem,
+ pub param_env: ty::ParamEnv<'tcx>,
+ pub substs: SubstsRef<'tcx>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift, Default)]
+pub struct InternedObligationCauseCode<'tcx> {
+ /// `None` for `ObligationCauseCode::MiscObligation` (a common case, occurs ~60% of
+ /// the time). `Some` otherwise.
+ code: Option<Lrc<ObligationCauseCode<'tcx>>>,
+}
+
+impl<'tcx> ObligationCauseCode<'tcx> {
+ #[inline(always)]
+ fn into(self) -> InternedObligationCauseCode<'tcx> {
+ InternedObligationCauseCode {
+ code: if let ObligationCauseCode::MiscObligation = self {
+ None
+ } else {
+ Some(Lrc::new(self))
+ },
+ }
+ }
+}
+
+impl<'tcx> std::ops::Deref for InternedObligationCauseCode<'tcx> {
+ type Target = ObligationCauseCode<'tcx>;
+
+ fn deref(&self) -> &Self::Target {
+ self.code.as_deref().unwrap_or(&ObligationCauseCode::MiscObligation)
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub enum ObligationCauseCode<'tcx> {
+ /// Not well classified or should be obvious from the span.
+ MiscObligation,
+
+ /// A slice or array is WF only if `T: Sized`.
+ SliceOrArrayElem,
+
+ /// A tuple is WF only if its middle elements are `Sized`.
+ TupleElem,
+
+ /// This is the trait reference from the given projection.
+ ProjectionWf(ty::ProjectionTy<'tcx>),
+
+ /// In an impl of trait `X` for type `Y`, type `Y` must
+ /// also implement all supertraits of `X`.
+ ItemObligation(DefId),
+
+ /// Like `ItemObligation`, but with extra detail on the source of the obligation.
+ BindingObligation(DefId, Span),
+
+ /// A type like `&'a T` is WF only if `T: 'a`.
+ ReferenceOutlivesReferent(Ty<'tcx>),
+
+ /// A type like `Box<Foo<'a> + 'b>` is WF only if `'b: 'a`.
+ ObjectTypeBound(Ty<'tcx>, ty::Region<'tcx>),
+
+ /// Obligation incurred due to an object cast.
+ ObjectCastObligation(/* Concrete type */ Ty<'tcx>, /* Object type */ Ty<'tcx>),
+
+ /// Obligation incurred due to a coercion.
+ Coercion {
+ source: Ty<'tcx>,
+ target: Ty<'tcx>,
+ },
+
+ /// Various cases where expressions must be `Sized` / `Copy` / etc.
+ /// `L = X` implies that `L` is `Sized`.
+ AssignmentLhsSized,
+ /// `(x1, .., xn)` must be `Sized`.
+ TupleInitializerSized,
+ /// `S { ... }` must be `Sized`.
+ StructInitializerSized,
+ /// Type of each variable must be `Sized`.
+ VariableType(hir::HirId),
+ /// Argument type must be `Sized`.
+ SizedArgumentType(Option<Span>),
+ /// Return type must be `Sized`.
+ SizedReturnType,
+ /// Yield type must be `Sized`.
+ SizedYieldType,
+ /// Box expression result type must be `Sized`.
+ SizedBoxType,
+ /// Inline asm operand type must be `Sized`.
+ InlineAsmSized,
+ /// `[expr; N]` requires `type_of(expr): Copy`.
+ RepeatElementCopy {
+ /// If element is a `const fn` we display a help message suggesting to move the
+ /// function call to a new `const` item while saying that `T` doesn't implement `Copy`.
+ is_const_fn: bool,
+ },
+
+ /// Types of fields (other than the last, except for packed structs) in a struct must be sized.
+ FieldSized {
+ adt_kind: AdtKind,
+ span: Span,
+ last: bool,
+ },
+
+ /// Constant expressions must be sized.
+ ConstSized,
+
+ /// `static` items must have `Sync` type.
+ SharedStatic,
+
+ BuiltinDerivedObligation(DerivedObligationCause<'tcx>),
+
+ ImplDerivedObligation(Box<ImplDerivedObligationCause<'tcx>>),
+
+ DerivedObligation(DerivedObligationCause<'tcx>),
+
+ FunctionArgumentObligation {
+ /// The node of the relevant argument in the function call.
+ arg_hir_id: hir::HirId,
+ /// The node of the function call.
+ call_hir_id: hir::HirId,
+ /// The obligation introduced by this argument.
+ parent_code: InternedObligationCauseCode<'tcx>,
+ },
+
+ /// Error derived when matching traits/impls; see ObligationCause for more details
+ CompareImplItemObligation {
+ impl_item_def_id: LocalDefId,
+ trait_item_def_id: DefId,
+ kind: ty::AssocKind,
+ },
+
+ /// Checking that the bounds of a trait's associated type hold for a given impl
+ CheckAssociatedTypeBounds {
+ impl_item_def_id: LocalDefId,
+ trait_item_def_id: DefId,
+ },
+
+ /// Checking that this expression can be assigned to its target.
+ ExprAssignable,
+
+ /// Computing common supertype in the arms of a match expression
+ MatchExpressionArm(Box<MatchExpressionArmCause<'tcx>>),
+
+ /// Type error arising from type checking a pattern against an expected type.
+ Pattern {
+ /// The span of the scrutinee or type expression which caused the `root_ty` type.
+ span: Option<Span>,
+ /// The root expected type induced by a scrutinee or type expression.
+ root_ty: Ty<'tcx>,
+ /// Whether the `Span` came from an expression or a type expression.
+ origin_expr: bool,
+ },
+
+ /// Constants in patterns must have `Structural` type.
+ ConstPatternStructural,
+
+ /// Computing common supertype in an if expression
+ IfExpression(Box<IfExpressionCause<'tcx>>),
+
+ /// Computing common supertype of an if expression with no else counter-part
+ IfExpressionWithNoElse,
+
+ /// `main` has wrong type
+ MainFunctionType,
+
+ /// `start` has wrong type
+ StartFunctionType,
+
+ /// Intrinsic has wrong type
+ IntrinsicType,
+
+ /// A let else block does not diverge
+ LetElse,
+
+ /// Method receiver
+ MethodReceiver,
+
+ UnifyReceiver(Box<UnifyReceiverContext<'tcx>>),
+
+ /// `return` with no expression
+ ReturnNoExpression,
+
+ /// `return` with an expression
+ ReturnValue(hir::HirId),
+
+ /// Return type of this function
+ ReturnType,
+
+ /// Opaque return type of this function
+ OpaqueReturnType(Option<(Ty<'tcx>, Span)>),
+
+ /// Block implicit return
+ BlockTailExpression(hir::HirId),
+
+ /// #[feature(trivial_bounds)] is not enabled
+ TrivialBound,
+
+ /// If `X` is the concrete type of an opaque type `impl Y`, then `X` must implement `Y`
+ OpaqueType,
+
+ AwaitableExpr(Option<hir::HirId>),
+
+ ForLoopIterator,
+
+ QuestionMark,
+
+ /// Well-formed checking. If a `WellFormedLoc` is provided,
+ /// then it will be used to perform HIR-based wf checking
+ /// after an error occurs, in order to generate a more precise error span.
+ /// This is purely for diagnostic purposes - it is always
+ /// correct to use `MiscObligation` instead, or to specify
+ /// `WellFormed(None)`
+ WellFormed(Option<WellFormedLoc>),
+
+ /// From `match_impl`. The cause for us having to match an impl, and the DefId we are matching against.
+ MatchImpl(ObligationCause<'tcx>, DefId),
+
+ BinOp {
+ rhs_span: Option<Span>,
+ is_lit: bool,
+ output_pred: Option<Predicate<'tcx>>,
+ },
+}
+
+/// The 'location' at which we try to perform HIR-based wf checking.
+/// This information is used to obtain an `hir::Ty`, which
+/// we can walk in order to obtain precise spans for any
+/// 'nested' types (e.g. `Foo` in `Option<Foo>`).
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable)]
+pub enum WellFormedLoc {
+ /// Use the type of the provided definition.
+ Ty(LocalDefId),
+ /// Use the type of the parameter of the provided function.
+ /// We cannot use `hir::Param`, since the function may
+ /// not have a body (e.g. a trait method definition)
+ Param {
+ /// The function to lookup the parameter in
+ function: LocalDefId,
+ /// The index of the parameter to use.
+ /// Parameters are indexed from 0, with the return type
+ /// being the last 'parameter'
+ param_idx: u16,
+ },
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct ImplDerivedObligationCause<'tcx> {
+ pub derived: DerivedObligationCause<'tcx>,
+ pub impl_def_id: DefId,
+ pub span: Span,
+}
+
+impl<'tcx> ObligationCauseCode<'tcx> {
+ // Return the base obligation, ignoring derived obligations.
+ pub fn peel_derives(&self) -> &Self {
+ let mut base_cause = self;
+ while let Some((parent_code, _)) = base_cause.parent() {
+ base_cause = parent_code;
+ }
+ base_cause
+ }
+
+ pub fn parent(&self) -> Option<(&Self, Option<ty::PolyTraitPredicate<'tcx>>)> {
+ match self {
+ FunctionArgumentObligation { parent_code, .. } => Some((parent_code, None)),
+ BuiltinDerivedObligation(derived)
+ | DerivedObligation(derived)
+ | ImplDerivedObligation(box ImplDerivedObligationCause { derived, .. }) => {
+ Some((&derived.parent_code, Some(derived.parent_trait_pred)))
+ }
+ _ => None,
+ }
+ }
+}
+
+// `ObligationCauseCode` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(ObligationCauseCode<'_>, 48);
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+pub enum StatementAsExpression {
+ CorrectType,
+ NeedsBoxing,
+}
+
+impl<'tcx> ty::Lift<'tcx> for StatementAsExpression {
+ type Lifted = StatementAsExpression;
+ fn lift_to_tcx(self, _tcx: TyCtxt<'tcx>) -> Option<StatementAsExpression> {
+ Some(self)
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct MatchExpressionArmCause<'tcx> {
+ pub arm_block_id: Option<hir::HirId>,
+ pub arm_ty: Ty<'tcx>,
+ pub arm_span: Span,
+ pub prior_arm_block_id: Option<hir::HirId>,
+ pub prior_arm_ty: Ty<'tcx>,
+ pub prior_arm_span: Span,
+ pub scrut_span: Span,
+ pub source: hir::MatchSource,
+ pub prior_arms: Vec<Span>,
+ pub scrut_hir_id: hir::HirId,
+ pub opt_suggest_box_span: Option<Span>,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[derive(Lift, TypeFoldable, TypeVisitable)]
+pub struct IfExpressionCause<'tcx> {
+ pub then_id: hir::HirId,
+ pub else_id: hir::HirId,
+ pub then_ty: Ty<'tcx>,
+ pub else_ty: Ty<'tcx>,
+ pub outer_span: Option<Span>,
+ pub opt_suggest_box_span: Option<Span>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)]
+pub struct DerivedObligationCause<'tcx> {
+ /// The trait predicate of the parent obligation that led to the
+ /// current obligation. Note that only trait obligations lead to
+ /// derived obligations, so we just store the trait predicate here
+ /// directly.
+ pub parent_trait_pred: ty::PolyTraitPredicate<'tcx>,
+
+ /// The parent trait had this cause.
+ pub parent_code: InternedObligationCauseCode<'tcx>,
+}
+
+#[derive(Clone, Debug, TypeFoldable, TypeVisitable, Lift)]
+pub enum SelectionError<'tcx> {
+ /// The trait is not implemented.
+ Unimplemented,
+ /// After a closure impl has selected, its "outputs" were evaluated
+ /// (which for closures includes the "input" type params) and they
+ /// didn't resolve. See `confirm_poly_trait_refs` for more.
+ OutputTypeParameterMismatch(
+ ty::PolyTraitRef<'tcx>,
+ ty::PolyTraitRef<'tcx>,
+ ty::error::TypeError<'tcx>,
+ ),
+ /// The trait pointed by `DefId` is not object safe.
+ TraitNotObjectSafe(DefId),
+ /// A given constant couldn't be evaluated.
+ NotConstEvaluatable(NotConstEvaluatable),
+ /// Exceeded the recursion depth during type projection.
+ Overflow(OverflowError),
+ /// Signaling that an error has already been emitted, to avoid
+ /// multiple errors being shown.
+ ErrorReporting,
+ /// Multiple applicable `impl`s where found. The `DefId`s correspond to
+ /// all the `impl`s' Items.
+ Ambiguous(Vec<DefId>),
+}
+
+/// When performing resolution, it is typically the case that there
+/// can be one of three outcomes:
+///
+/// - `Ok(Some(r))`: success occurred with result `r`
+/// - `Ok(None)`: could not definitely determine anything, usually due
+/// to inconclusive type inference.
+/// - `Err(e)`: error `e` occurred
+pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
+
+/// Given the successful resolution of an obligation, the `ImplSource`
+/// indicates where the impl comes from.
+///
+/// For example, the obligation may be satisfied by a specific impl (case A),
+/// or it may be relative to some bound that is in scope (case B).
+///
+/// ```ignore (illustrative)
+/// impl<T:Clone> Clone<T> for Option<T> { ... } // Impl_1
+/// impl<T:Clone> Clone<T> for Box<T> { ... } // Impl_2
+/// impl Clone for i32 { ... } // Impl_3
+///
+/// fn foo<T: Clone>(concrete: Option<Box<i32>>, param: T, mixed: Option<T>) {
+/// // Case A: ImplSource points at a specific impl. Only possible when
+/// // type is concretely known. If the impl itself has bounded
+/// // type parameters, ImplSource will carry resolutions for those as well:
+/// concrete.clone(); // ImplSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])])
+///
+/// // Case A: ImplSource points at a specific impl. Only possible when
+/// // type is concretely known. If the impl itself has bounded
+/// // type parameters, ImplSource will carry resolutions for those as well:
+/// concrete.clone(); // ImplSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])])
+///
+/// // Case B: ImplSource must be provided by caller. This applies when
+/// // type is a type parameter.
+/// param.clone(); // ImplSource::Param
+///
+/// // Case C: A mix of cases A and B.
+/// mixed.clone(); // ImplSource(Impl_1, [ImplSource::Param])
+/// }
+/// ```
+///
+/// ### The type parameter `N`
+///
+/// See explanation on `ImplSourceUserDefinedData`.
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum ImplSource<'tcx, N> {
+ /// ImplSource identifying a particular impl.
+ UserDefined(ImplSourceUserDefinedData<'tcx, N>),
+
+ /// ImplSource for auto trait implementations.
+ /// This carries the information and nested obligations with regards
+ /// to an auto implementation for a trait `Trait`. The nested obligations
+ /// ensure the trait implementation holds for all the constituent types.
+ AutoImpl(ImplSourceAutoImplData<N>),
+
+ /// Successful resolution to an obligation provided by the caller
+ /// for some type parameter. The `Vec<N>` represents the
+ /// obligations incurred from normalizing the where-clause (if
+ /// any).
+ Param(Vec<N>, ty::BoundConstness),
+
+ /// Virtual calls through an object.
+ Object(ImplSourceObjectData<'tcx, N>),
+
+ /// Successful resolution for a builtin trait.
+ Builtin(ImplSourceBuiltinData<N>),
+
+ /// ImplSource for trait upcasting coercion
+ TraitUpcasting(ImplSourceTraitUpcastingData<'tcx, N>),
+
+ /// ImplSource automatically generated for a closure. The `DefId` is the ID
+ /// of the closure expression. This is an `ImplSource::UserDefined` in spirit, but the
+ /// impl is generated by the compiler and does not appear in the source.
+ Closure(ImplSourceClosureData<'tcx, N>),
+
+ /// Same as above, but for a function pointer type with the given signature.
+ FnPointer(ImplSourceFnPointerData<'tcx, N>),
+
+ /// ImplSource for a builtin `DeterminantKind` trait implementation.
+ DiscriminantKind(ImplSourceDiscriminantKindData),
+
+ /// ImplSource for a builtin `Pointee` trait implementation.
+ Pointee(ImplSourcePointeeData),
+
+ /// ImplSource automatically generated for a generator.
+ Generator(ImplSourceGeneratorData<'tcx, N>),
+
+ /// ImplSource for a trait alias.
+ TraitAlias(ImplSourceTraitAliasData<'tcx, N>),
+
+ /// ImplSource for a `const Drop` implementation.
+ ConstDestruct(ImplSourceConstDestructData<N>),
+}
+
+impl<'tcx, N> ImplSource<'tcx, N> {
+ pub fn nested_obligations(self) -> Vec<N> {
+ match self {
+ ImplSource::UserDefined(i) => i.nested,
+ ImplSource::Param(n, _) => n,
+ ImplSource::Builtin(i) => i.nested,
+ ImplSource::AutoImpl(d) => d.nested,
+ ImplSource::Closure(c) => c.nested,
+ ImplSource::Generator(c) => c.nested,
+ ImplSource::Object(d) => d.nested,
+ ImplSource::FnPointer(d) => d.nested,
+ ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
+ | ImplSource::Pointee(ImplSourcePointeeData) => Vec::new(),
+ ImplSource::TraitAlias(d) => d.nested,
+ ImplSource::TraitUpcasting(d) => d.nested,
+ ImplSource::ConstDestruct(i) => i.nested,
+ }
+ }
+
+ pub fn borrow_nested_obligations(&self) -> &[N] {
+ match &self {
+ ImplSource::UserDefined(i) => &i.nested[..],
+ ImplSource::Param(n, _) => &n,
+ ImplSource::Builtin(i) => &i.nested,
+ ImplSource::AutoImpl(d) => &d.nested,
+ ImplSource::Closure(c) => &c.nested,
+ ImplSource::Generator(c) => &c.nested,
+ ImplSource::Object(d) => &d.nested,
+ ImplSource::FnPointer(d) => &d.nested,
+ ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
+ | ImplSource::Pointee(ImplSourcePointeeData) => &[],
+ ImplSource::TraitAlias(d) => &d.nested,
+ ImplSource::TraitUpcasting(d) => &d.nested,
+ ImplSource::ConstDestruct(i) => &i.nested,
+ }
+ }
+
+ pub fn map<M, F>(self, f: F) -> ImplSource<'tcx, M>
+ where
+ F: FnMut(N) -> M,
+ {
+ match self {
+ ImplSource::UserDefined(i) => ImplSource::UserDefined(ImplSourceUserDefinedData {
+ impl_def_id: i.impl_def_id,
+ substs: i.substs,
+ nested: i.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::Param(n, ct) => ImplSource::Param(n.into_iter().map(f).collect(), ct),
+ ImplSource::Builtin(i) => ImplSource::Builtin(ImplSourceBuiltinData {
+ nested: i.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::Object(o) => ImplSource::Object(ImplSourceObjectData {
+ upcast_trait_ref: o.upcast_trait_ref,
+ vtable_base: o.vtable_base,
+ nested: o.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::AutoImpl(d) => ImplSource::AutoImpl(ImplSourceAutoImplData {
+ trait_def_id: d.trait_def_id,
+ nested: d.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::Closure(c) => ImplSource::Closure(ImplSourceClosureData {
+ closure_def_id: c.closure_def_id,
+ substs: c.substs,
+ nested: c.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::Generator(c) => ImplSource::Generator(ImplSourceGeneratorData {
+ generator_def_id: c.generator_def_id,
+ substs: c.substs,
+ nested: c.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::FnPointer(p) => ImplSource::FnPointer(ImplSourceFnPointerData {
+ fn_ty: p.fn_ty,
+ nested: p.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData) => {
+ ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
+ }
+ ImplSource::Pointee(ImplSourcePointeeData) => {
+ ImplSource::Pointee(ImplSourcePointeeData)
+ }
+ ImplSource::TraitAlias(d) => ImplSource::TraitAlias(ImplSourceTraitAliasData {
+ alias_def_id: d.alias_def_id,
+ substs: d.substs,
+ nested: d.nested.into_iter().map(f).collect(),
+ }),
+ ImplSource::TraitUpcasting(d) => {
+ ImplSource::TraitUpcasting(ImplSourceTraitUpcastingData {
+ upcast_trait_ref: d.upcast_trait_ref,
+ vtable_vptr_slot: d.vtable_vptr_slot,
+ nested: d.nested.into_iter().map(f).collect(),
+ })
+ }
+ ImplSource::ConstDestruct(i) => {
+ ImplSource::ConstDestruct(ImplSourceConstDestructData {
+ nested: i.nested.into_iter().map(f).collect(),
+ })
+ }
+ }
+ }
+}
+
+/// Identifies a particular impl in the source, along with a set of
+/// substitutions from the impl's type/lifetime parameters. The
+/// `nested` vector corresponds to the nested obligations attached to
+/// the impl's type parameters.
+///
+/// The type parameter `N` indicates the type used for "nested
+/// obligations" that are required by the impl. During type-check, this
+/// is `Obligation`, as one might expect. During codegen, however, this
+/// is `()`, because codegen only requires a shallow resolution of an
+/// impl, and nested obligations are satisfied later.
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceUserDefinedData<'tcx, N> {
+ pub impl_def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceGeneratorData<'tcx, N> {
+ pub generator_def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+ /// Nested obligations. This can be non-empty if the generator
+ /// signature contains associated types.
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceClosureData<'tcx, N> {
+ pub closure_def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+ /// Nested obligations. This can be non-empty if the closure
+ /// signature contains associated types.
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceAutoImplData<N> {
+ pub trait_def_id: DefId,
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceTraitUpcastingData<'tcx, N> {
+ /// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`.
+ pub upcast_trait_ref: ty::PolyTraitRef<'tcx>,
+
+ /// The vtable is formed by concatenating together the method lists of
+ /// the base object trait and all supertraits, pointers to supertrait vtable will
+ /// be provided when necessary; this is the position of `upcast_trait_ref`'s vtable
+ /// within that vtable.
+ pub vtable_vptr_slot: Option<usize>,
+
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceBuiltinData<N> {
+ pub nested: Vec<N>,
+}
+
+#[derive(PartialEq, Eq, Clone, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceObjectData<'tcx, N> {
+ /// `Foo` upcast to the obligation trait. This will be some supertrait of `Foo`.
+ pub upcast_trait_ref: ty::PolyTraitRef<'tcx>,
+
+ /// The vtable is formed by concatenating together the method lists of
+ /// the base object trait and all supertraits, pointers to supertrait vtable will
+ /// be provided when necessary; this is the start of `upcast_trait_ref`'s methods
+ /// in that vtable.
+ pub vtable_base: usize,
+
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceFnPointerData<'tcx, N> {
+ pub fn_ty: Ty<'tcx>,
+ pub nested: Vec<N>,
+}
+
+// FIXME(@lcnr): This should be refactored and merged with other builtin vtables.
+#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub struct ImplSourceDiscriminantKindData;
+
+#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub struct ImplSourcePointeeData;
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceConstDestructData<N> {
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, PartialEq, Eq, TyEncodable, TyDecodable, HashStable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ImplSourceTraitAliasData<'tcx, N> {
+ pub alias_def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+ pub nested: Vec<N>,
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Hash, HashStable, PartialOrd, Ord)]
+pub enum ObjectSafetyViolation {
+ /// `Self: Sized` declared on the trait.
+ SizedSelf(SmallVec<[Span; 1]>),
+
+ /// Supertrait reference references `Self` an in illegal location
+ /// (e.g., `trait Foo : Bar<Self>`).
+ SupertraitSelf(SmallVec<[Span; 1]>),
+
+ /// Method has something illegal.
+ Method(Symbol, MethodViolationCode, Span),
+
+ /// Associated const.
+ AssocConst(Symbol, Span),
+
+ /// GAT
+ GAT(Symbol, Span),
+}
+
+impl ObjectSafetyViolation {
+ pub fn error_msg(&self) -> Cow<'static, str> {
+ match self {
+ ObjectSafetyViolation::SizedSelf(_) => "it requires `Self: Sized`".into(),
+ ObjectSafetyViolation::SupertraitSelf(ref spans) => {
+ if spans.iter().any(|sp| *sp != DUMMY_SP) {
+ "it uses `Self` as a type parameter".into()
+ } else {
+ "it cannot use `Self` as a type parameter in a supertrait or `where`-clause"
+ .into()
+ }
+ }
+ ObjectSafetyViolation::Method(name, MethodViolationCode::StaticMethod(_), _) => {
+ format!("associated function `{}` has no `self` parameter", name).into()
+ }
+ ObjectSafetyViolation::Method(
+ name,
+ MethodViolationCode::ReferencesSelfInput(_),
+ DUMMY_SP,
+ ) => format!("method `{}` references the `Self` type in its parameters", name).into(),
+ ObjectSafetyViolation::Method(name, MethodViolationCode::ReferencesSelfInput(_), _) => {
+ format!("method `{}` references the `Self` type in this parameter", name).into()
+ }
+ ObjectSafetyViolation::Method(name, MethodViolationCode::ReferencesSelfOutput, _) => {
+ format!("method `{}` references the `Self` type in its return type", name).into()
+ }
+ ObjectSafetyViolation::Method(
+ name,
+ MethodViolationCode::WhereClauseReferencesSelf,
+ _,
+ ) => {
+ format!("method `{}` references the `Self` type in its `where` clause", name).into()
+ }
+ ObjectSafetyViolation::Method(name, MethodViolationCode::Generic, _) => {
+ format!("method `{}` has generic type parameters", name).into()
+ }
+ ObjectSafetyViolation::Method(
+ name,
+ MethodViolationCode::UndispatchableReceiver(_),
+ _,
+ ) => format!("method `{}`'s `self` parameter cannot be dispatched on", name).into(),
+ ObjectSafetyViolation::AssocConst(name, DUMMY_SP) => {
+ format!("it contains associated `const` `{}`", name).into()
+ }
+ ObjectSafetyViolation::AssocConst(..) => "it contains this associated `const`".into(),
+ ObjectSafetyViolation::GAT(name, _) => {
+ format!("it contains the generic associated type `{}`", name).into()
+ }
+ }
+ }
+
+ pub fn solution(&self, err: &mut Diagnostic) {
+ match self {
+ ObjectSafetyViolation::SizedSelf(_) | ObjectSafetyViolation::SupertraitSelf(_) => {}
+ ObjectSafetyViolation::Method(
+ name,
+ MethodViolationCode::StaticMethod(Some((add_self_sugg, make_sized_sugg))),
+ _,
+ ) => {
+ err.span_suggestion(
+ add_self_sugg.1,
+ format!(
+ "consider turning `{}` into a method by giving it a `&self` argument",
+ name
+ ),
+ add_self_sugg.0.to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ err.span_suggestion(
+ make_sized_sugg.1,
+ format!(
+ "alternatively, consider constraining `{}` so it does not apply to \
+ trait objects",
+ name
+ ),
+ make_sized_sugg.0.to_string(),
+ Applicability::MaybeIncorrect,
+ );
+ }
+ ObjectSafetyViolation::Method(
+ name,
+ MethodViolationCode::UndispatchableReceiver(Some(span)),
+ _,
+ ) => {
+ err.span_suggestion(
+ *span,
+ &format!(
+ "consider changing method `{}`'s `self` parameter to be `&self`",
+ name
+ ),
+ "&Self",
+ Applicability::MachineApplicable,
+ );
+ }
+ ObjectSafetyViolation::AssocConst(name, _)
+ | ObjectSafetyViolation::GAT(name, _)
+ | ObjectSafetyViolation::Method(name, ..) => {
+ err.help(&format!("consider moving `{}` to another trait", name));
+ }
+ }
+ }
+
+ pub fn spans(&self) -> SmallVec<[Span; 1]> {
+ // When `span` comes from a separate crate, it'll be `DUMMY_SP`. Treat it as `None` so
+ // diagnostics use a `note` instead of a `span_label`.
+ match self {
+ ObjectSafetyViolation::SupertraitSelf(spans)
+ | ObjectSafetyViolation::SizedSelf(spans) => spans.clone(),
+ ObjectSafetyViolation::AssocConst(_, span)
+ | ObjectSafetyViolation::GAT(_, span)
+ | ObjectSafetyViolation::Method(_, _, span)
+ if *span != DUMMY_SP =>
+ {
+ smallvec![*span]
+ }
+ _ => smallvec![],
+ }
+ }
+}
+
+/// Reasons a method might not be object-safe.
+#[derive(Clone, Debug, PartialEq, Eq, Hash, HashStable, PartialOrd, Ord)]
+pub enum MethodViolationCode {
+ /// e.g., `fn foo()`
+ StaticMethod(Option<(/* add &self */ (String, Span), /* add Self: Sized */ (String, Span))>),
+
+ /// e.g., `fn foo(&self, x: Self)`
+ ReferencesSelfInput(Option<Span>),
+
+ /// e.g., `fn foo(&self) -> Self`
+ ReferencesSelfOutput,
+
+ /// e.g., `fn foo(&self) where Self: Clone`
+ WhereClauseReferencesSelf,
+
+ /// e.g., `fn foo<A>()`
+ Generic,
+
+ /// the method's receiver (`self` argument) can't be dispatched on
+ UndispatchableReceiver(Option<Span>),
+}
+
+/// These are the error cases for `codegen_fulfill_obligation`.
+#[derive(Copy, Clone, Debug, Hash, HashStable, Encodable, Decodable)]
+pub enum CodegenObligationError {
+ /// Ambiguity can happen when monomorphizing during trans
+ /// expands to some humongous type that never occurred
+ /// statically -- this humongous type can then overflow,
+ /// leading to an ambiguous result. So report this as an
+ /// overflow bug, since I believe this is the only case
+ /// where ambiguity can result.
+ Ambiguity,
+ /// This can trigger when we probe for the source of a `'static` lifetime requirement
+ /// on a trait object: `impl Foo for dyn Trait {}` has an implicit `'static` bound.
+ /// This can also trigger when we have a global bound that is not actually satisfied,
+ /// but was included during typeck due to the trivial_bounds feature.
+ Unimplemented,
+ FulfillmentError,
+}
diff --git a/compiler/rustc_middle/src/traits/query.rs b/compiler/rustc_middle/src/traits/query.rs
new file mode 100644
index 000000000..1f9b474ad
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/query.rs
@@ -0,0 +1,230 @@
+//! Experimental types for the trait query interface. The methods
+//! defined in this module are all based on **canonicalization**,
+//! which makes a canonical query by replacing unbound inference
+//! variables and regions, so that results can be reused more broadly.
+//! The providers for the queries defined here can be found in
+//! `rustc_traits`.
+
+use crate::infer::canonical::{Canonical, QueryResponse};
+use crate::ty::error::TypeError;
+use crate::ty::subst::GenericArg;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_errors::struct_span_err;
+use rustc_span::source_map::Span;
+use std::iter::FromIterator;
+
+pub mod type_op {
+ use crate::ty::fold::TypeFoldable;
+ use crate::ty::subst::UserSubsts;
+ use crate::ty::{Predicate, Ty};
+ use rustc_hir::def_id::DefId;
+ use std::fmt;
+
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
+ #[derive(TypeFoldable, TypeVisitable)]
+ pub struct AscribeUserType<'tcx> {
+ pub mir_ty: Ty<'tcx>,
+ pub def_id: DefId,
+ pub user_substs: UserSubsts<'tcx>,
+ }
+
+ impl<'tcx> AscribeUserType<'tcx> {
+ pub fn new(mir_ty: Ty<'tcx>, def_id: DefId, user_substs: UserSubsts<'tcx>) -> Self {
+ Self { mir_ty, def_id, user_substs }
+ }
+ }
+
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
+ #[derive(TypeFoldable, TypeVisitable)]
+ pub struct Eq<'tcx> {
+ pub a: Ty<'tcx>,
+ pub b: Ty<'tcx>,
+ }
+
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
+ #[derive(TypeFoldable, TypeVisitable)]
+ pub struct Subtype<'tcx> {
+ pub sub: Ty<'tcx>,
+ pub sup: Ty<'tcx>,
+ }
+
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
+ #[derive(TypeFoldable, TypeVisitable)]
+ pub struct ProvePredicate<'tcx> {
+ pub predicate: Predicate<'tcx>,
+ }
+
+ impl<'tcx> ProvePredicate<'tcx> {
+ pub fn new(predicate: Predicate<'tcx>) -> Self {
+ ProvePredicate { predicate }
+ }
+ }
+
+ #[derive(Copy, Clone, Debug, Hash, PartialEq, Eq, HashStable, Lift)]
+ #[derive(TypeFoldable, TypeVisitable)]
+ pub struct Normalize<T> {
+ pub value: T,
+ }
+
+ impl<'tcx, T> Normalize<T>
+ where
+ T: fmt::Debug + TypeFoldable<'tcx>,
+ {
+ pub fn new(value: T) -> Self {
+ Self { value }
+ }
+ }
+}
+
+pub type CanonicalProjectionGoal<'tcx> =
+ Canonical<'tcx, ty::ParamEnvAnd<'tcx, ty::ProjectionTy<'tcx>>>;
+
+pub type CanonicalTyGoal<'tcx> = Canonical<'tcx, ty::ParamEnvAnd<'tcx, Ty<'tcx>>>;
+
+pub type CanonicalPredicateGoal<'tcx> = Canonical<'tcx, ty::ParamEnvAnd<'tcx, ty::Predicate<'tcx>>>;
+
+pub type CanonicalTypeOpAscribeUserTypeGoal<'tcx> =
+ Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::AscribeUserType<'tcx>>>;
+
+pub type CanonicalTypeOpEqGoal<'tcx> = Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Eq<'tcx>>>;
+
+pub type CanonicalTypeOpSubtypeGoal<'tcx> =
+ Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Subtype<'tcx>>>;
+
+pub type CanonicalTypeOpProvePredicateGoal<'tcx> =
+ Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::ProvePredicate<'tcx>>>;
+
+pub type CanonicalTypeOpNormalizeGoal<'tcx, T> =
+ Canonical<'tcx, ty::ParamEnvAnd<'tcx, type_op::Normalize<T>>>;
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct NoSolution;
+
+pub type Fallible<T> = Result<T, NoSolution>;
+
+impl<'tcx> From<TypeError<'tcx>> for NoSolution {
+ fn from(_: TypeError<'tcx>) -> NoSolution {
+ NoSolution
+ }
+}
+
+#[derive(Clone, Debug, Default, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct DropckOutlivesResult<'tcx> {
+ pub kinds: Vec<GenericArg<'tcx>>,
+ pub overflows: Vec<Ty<'tcx>>,
+}
+
+impl<'tcx> DropckOutlivesResult<'tcx> {
+ pub fn report_overflows(&self, tcx: TyCtxt<'tcx>, span: Span, ty: Ty<'tcx>) {
+ if let Some(overflow_ty) = self.overflows.get(0) {
+ let mut err = struct_span_err!(
+ tcx.sess,
+ span,
+ E0320,
+ "overflow while adding drop-check rules for {}",
+ ty,
+ );
+ err.note(&format!("overflowed on {}", overflow_ty));
+ err.emit();
+ }
+ }
+
+ pub fn into_kinds_reporting_overflows(
+ self,
+ tcx: TyCtxt<'tcx>,
+ span: Span,
+ ty: Ty<'tcx>,
+ ) -> Vec<GenericArg<'tcx>> {
+ self.report_overflows(tcx, span, ty);
+ let DropckOutlivesResult { kinds, overflows: _ } = self;
+ kinds
+ }
+}
+
+/// A set of constraints that need to be satisfied in order for
+/// a type to be valid for destruction.
+#[derive(Clone, Debug, HashStable)]
+pub struct DropckConstraint<'tcx> {
+ /// Types that are required to be alive in order for this
+ /// type to be valid for destruction.
+ pub outlives: Vec<ty::subst::GenericArg<'tcx>>,
+
+ /// Types that could not be resolved: projections and params.
+ pub dtorck_types: Vec<Ty<'tcx>>,
+
+ /// If, during the computation of the dtorck constraint, we
+ /// overflow, that gets recorded here. The caller is expected to
+ /// report an error.
+ pub overflows: Vec<Ty<'tcx>>,
+}
+
+impl<'tcx> DropckConstraint<'tcx> {
+ pub fn empty() -> DropckConstraint<'tcx> {
+ DropckConstraint { outlives: vec![], dtorck_types: vec![], overflows: vec![] }
+ }
+}
+
+impl<'tcx> FromIterator<DropckConstraint<'tcx>> for DropckConstraint<'tcx> {
+ fn from_iter<I: IntoIterator<Item = DropckConstraint<'tcx>>>(iter: I) -> Self {
+ let mut result = Self::empty();
+
+ for DropckConstraint { outlives, dtorck_types, overflows } in iter {
+ result.outlives.extend(outlives);
+ result.dtorck_types.extend(dtorck_types);
+ result.overflows.extend(overflows);
+ }
+
+ result
+ }
+}
+
+#[derive(Debug, HashStable)]
+pub struct CandidateStep<'tcx> {
+ pub self_ty: Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>,
+ pub autoderefs: usize,
+ /// `true` if the type results from a dereference of a raw pointer.
+ /// when assembling candidates, we include these steps, but not when
+ /// picking methods. This so that if we have `foo: *const Foo` and `Foo` has methods
+ /// `fn by_raw_ptr(self: *const Self)` and `fn by_ref(&self)`, then
+ /// `foo.by_raw_ptr()` will work and `foo.by_ref()` won't.
+ pub from_unsafe_deref: bool,
+ pub unsize: bool,
+}
+
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct MethodAutoderefStepsResult<'tcx> {
+ /// The valid autoderef steps that could be find.
+ pub steps: &'tcx [CandidateStep<'tcx>],
+ /// If Some(T), a type autoderef reported an error on.
+ pub opt_bad_ty: Option<&'tcx MethodAutoderefBadTy<'tcx>>,
+ /// If `true`, `steps` has been truncated due to reaching the
+ /// recursion limit.
+ pub reached_recursion_limit: bool,
+}
+
+#[derive(Debug, HashStable)]
+pub struct MethodAutoderefBadTy<'tcx> {
+ pub reached_raw_pointer: bool,
+ pub ty: Canonical<'tcx, QueryResponse<'tcx, Ty<'tcx>>>,
+}
+
+/// Result from the `normalize_projection_ty` query.
+#[derive(Clone, Debug, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct NormalizationResult<'tcx> {
+ /// Result of normalization.
+ pub normalized_ty: Ty<'tcx>,
+}
+
+/// Outlives bounds are relationships between generic parameters,
+/// whether they both be regions (`'a: 'b`) or whether types are
+/// involved (`T: 'a`). These relationships can be extracted from the
+/// full set of predicates we understand or also from types (in which
+/// case they are called implied bounds). They are fed to the
+/// `OutlivesEnv` which in turn is supplied to the region checker and
+/// other parts of the inference system.
+#[derive(Clone, Debug, TypeFoldable, TypeVisitable, Lift, HashStable)]
+pub enum OutlivesBound<'tcx> {
+ RegionSubRegion(ty::Region<'tcx>, ty::Region<'tcx>),
+ RegionSubParam(ty::Region<'tcx>, ty::ParamTy),
+ RegionSubProjection(ty::Region<'tcx>, ty::ProjectionTy<'tcx>),
+}
diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs
new file mode 100644
index 000000000..e836ba47e
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/select.rs
@@ -0,0 +1,312 @@
+//! Candidate selection. See the [rustc dev guide] for more information on how this works.
+//!
+//! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/traits/resolution.html#selection
+
+use self::EvaluationResult::*;
+
+use super::{SelectionError, SelectionResult};
+use rustc_errors::ErrorGuaranteed;
+
+use crate::ty;
+
+use rustc_hir::def_id::DefId;
+use rustc_query_system::cache::Cache;
+
+pub type SelectionCache<'tcx> = Cache<
+ // This cache does not use `ParamEnvAnd` in its keys because `ParamEnv::and` can replace
+ // caller bounds with an empty list if the `TraitPredicate` looks global, which may happen
+ // after erasing lifetimes from the predicate.
+ (ty::ParamEnv<'tcx>, ty::TraitPredicate<'tcx>),
+ SelectionResult<'tcx, SelectionCandidate<'tcx>>,
+>;
+
+pub type EvaluationCache<'tcx> = Cache<
+ // See above: this cache does not use `ParamEnvAnd` in its keys due to sometimes incorrectly
+ // caching with the wrong `ParamEnv`.
+ (ty::ParamEnv<'tcx>, ty::PolyTraitPredicate<'tcx>),
+ EvaluationResult,
+>;
+
+/// The selection process begins by considering all impls, where
+/// clauses, and so forth that might resolve an obligation. Sometimes
+/// we'll be able to say definitively that (e.g.) an impl does not
+/// apply to the obligation: perhaps it is defined for `usize` but the
+/// obligation is for `i32`. In that case, we drop the impl out of the
+/// list. But the other cases are considered *candidates*.
+///
+/// For selection to succeed, there must be exactly one matching
+/// candidate. If the obligation is fully known, this is guaranteed
+/// by coherence. However, if the obligation contains type parameters
+/// or variables, there may be multiple such impls.
+///
+/// It is not a real problem if multiple matching impls exist because
+/// of type variables - it just means the obligation isn't sufficiently
+/// elaborated. In that case we report an ambiguity, and the caller can
+/// try again after more type information has been gathered or report a
+/// "type annotations needed" error.
+///
+/// However, with type parameters, this can be a real problem - type
+/// parameters don't unify with regular types, but they *can* unify
+/// with variables from blanket impls, and (unless we know its bounds
+/// will always be satisfied) picking the blanket impl will be wrong
+/// for at least *some* substitutions. To make this concrete, if we have
+///
+/// ```rust, ignore
+/// trait AsDebug { type Out: fmt::Debug; fn debug(self) -> Self::Out; }
+/// impl<T: fmt::Debug> AsDebug for T {
+/// type Out = T;
+/// fn debug(self) -> fmt::Debug { self }
+/// }
+/// fn foo<T: AsDebug>(t: T) { println!("{:?}", <T as AsDebug>::debug(t)); }
+/// ```
+///
+/// we can't just use the impl to resolve the `<T as AsDebug>` obligation
+/// -- a type from another crate (that doesn't implement `fmt::Debug`) could
+/// implement `AsDebug`.
+///
+/// Because where-clauses match the type exactly, multiple clauses can
+/// only match if there are unresolved variables, and we can mostly just
+/// report this ambiguity in that case. This is still a problem - we can't
+/// *do anything* with ambiguities that involve only regions. This is issue
+/// #21974.
+///
+/// If a single where-clause matches and there are no inference
+/// variables left, then it definitely matches and we can just select
+/// it.
+///
+/// In fact, we even select the where-clause when the obligation contains
+/// inference variables. The can lead to inference making "leaps of logic",
+/// for example in this situation:
+///
+/// ```rust, ignore
+/// pub trait Foo<T> { fn foo(&self) -> T; }
+/// impl<T> Foo<()> for T { fn foo(&self) { } }
+/// impl Foo<bool> for bool { fn foo(&self) -> bool { *self } }
+///
+/// pub fn foo<T>(t: T) where T: Foo<bool> {
+/// println!("{:?}", <T as Foo<_>>::foo(&t));
+/// }
+/// fn main() { foo(false); }
+/// ```
+///
+/// Here the obligation `<T as Foo<$0>>` can be matched by both the blanket
+/// impl and the where-clause. We select the where-clause and unify `$0=bool`,
+/// so the program prints "false". However, if the where-clause is omitted,
+/// the blanket impl is selected, we unify `$0=()`, and the program prints
+/// "()".
+///
+/// Exactly the same issues apply to projection and object candidates, except
+/// that we can have both a projection candidate and a where-clause candidate
+/// for the same obligation. In that case either would do (except that
+/// different "leaps of logic" would occur if inference variables are
+/// present), and we just pick the where-clause. This is, for example,
+/// required for associated types to work in default impls, as the bounds
+/// are visible both as projection bounds and as where-clauses from the
+/// parameter environment.
+#[derive(PartialEq, Eq, Debug, Clone, TypeFoldable, TypeVisitable)]
+pub enum SelectionCandidate<'tcx> {
+ BuiltinCandidate {
+ /// `false` if there are no *further* obligations.
+ has_nested: bool,
+ },
+
+ /// Implementation of transmutability trait.
+ TransmutabilityCandidate,
+
+ ParamCandidate(ty::PolyTraitPredicate<'tcx>),
+ ImplCandidate(DefId),
+ AutoImplCandidate(DefId),
+
+ /// This is a trait matching with a projected type as `Self`, and we found
+ /// an applicable bound in the trait definition. The `usize` is an index
+ /// into the list returned by `tcx.item_bounds`.
+ ProjectionCandidate(usize),
+
+ /// Implementation of a `Fn`-family trait by one of the anonymous types
+ /// generated for an `||` expression.
+ ClosureCandidate,
+
+ /// Implementation of a `Generator` trait by one of the anonymous types
+ /// generated for a generator.
+ GeneratorCandidate,
+
+ /// Implementation of a `Fn`-family trait by one of the anonymous
+ /// types generated for a fn pointer type (e.g., `fn(int) -> int`)
+ FnPointerCandidate {
+ is_const: bool,
+ },
+
+ /// Builtin implementation of `DiscriminantKind`.
+ DiscriminantKindCandidate,
+
+ /// Builtin implementation of `Pointee`.
+ PointeeCandidate,
+
+ TraitAliasCandidate(DefId),
+
+ /// Matching `dyn Trait` with a supertrait of `Trait`. The index is the
+ /// position in the iterator returned by
+ /// `rustc_infer::traits::util::supertraits`.
+ ObjectCandidate(usize),
+
+ /// Perform trait upcasting coercion of `dyn Trait` to a supertrait of `Trait`.
+ /// The index is the position in the iterator returned by
+ /// `rustc_infer::traits::util::supertraits`.
+ TraitUpcastingUnsizeCandidate(usize),
+
+ BuiltinObjectCandidate,
+
+ BuiltinUnsizeCandidate,
+
+ /// Implementation of `const Destruct`, optionally from a custom `impl const Drop`.
+ ConstDestructCandidate(Option<DefId>),
+}
+
+/// The result of trait evaluation. The order is important
+/// here as the evaluation of a list is the maximum of the
+/// evaluations.
+///
+/// The evaluation results are ordered:
+/// - `EvaluatedToOk` implies `EvaluatedToOkModuloRegions`
+/// implies `EvaluatedToAmbig` implies `EvaluatedToUnknown`
+/// - `EvaluatedToErr` implies `EvaluatedToRecur`
+/// - the "union" of evaluation results is equal to their maximum -
+/// all the "potential success" candidates can potentially succeed,
+/// so they are noops when unioned with a definite error, and within
+/// the categories it's easy to see that the unions are correct.
+#[derive(Copy, Clone, Debug, PartialOrd, Ord, PartialEq, Eq, HashStable)]
+pub enum EvaluationResult {
+ /// Evaluation successful.
+ EvaluatedToOk,
+ /// Evaluation successful, but there were unevaluated region obligations.
+ EvaluatedToOkModuloRegions,
+ /// Evaluation successful, but need to rerun because opaque types got
+ /// hidden types assigned without it being known whether the opaque types
+ /// are within their defining scope
+ EvaluatedToOkModuloOpaqueTypes,
+ /// Evaluation is known to be ambiguous -- it *might* hold for some
+ /// assignment of inference variables, but it might not.
+ ///
+ /// While this has the same meaning as `EvaluatedToUnknown` -- we can't
+ /// know whether this obligation holds or not -- it is the result we
+ /// would get with an empty stack, and therefore is cacheable.
+ EvaluatedToAmbig,
+ /// Evaluation failed because of recursion involving inference
+ /// variables. We are somewhat imprecise there, so we don't actually
+ /// know the real result.
+ ///
+ /// This can't be trivially cached for the same reason as `EvaluatedToRecur`.
+ EvaluatedToUnknown,
+ /// Evaluation failed because we encountered an obligation we are already
+ /// trying to prove on this branch.
+ ///
+ /// We know this branch can't be a part of a minimal proof-tree for
+ /// the "root" of our cycle, because then we could cut out the recursion
+ /// and maintain a valid proof tree. However, this does not mean
+ /// that all the obligations on this branch do not hold -- it's possible
+ /// that we entered this branch "speculatively", and that there
+ /// might be some other way to prove this obligation that does not
+ /// go through this cycle -- so we can't cache this as a failure.
+ ///
+ /// For example, suppose we have this:
+ ///
+ /// ```rust,ignore (pseudo-Rust)
+ /// pub trait Trait { fn xyz(); }
+ /// // This impl is "useless", but we can still have
+ /// // an `impl Trait for SomeUnsizedType` somewhere.
+ /// impl<T: Trait + Sized> Trait for T { fn xyz() {} }
+ ///
+ /// pub fn foo<T: Trait + ?Sized>() {
+ /// <T as Trait>::xyz();
+ /// }
+ /// ```
+ ///
+ /// When checking `foo`, we have to prove `T: Trait`. This basically
+ /// translates into this:
+ ///
+ /// ```plain,ignore
+ /// (T: Trait + Sized →_\impl T: Trait), T: Trait ⊢ T: Trait
+ /// ```
+ ///
+ /// When we try to prove it, we first go the first option, which
+ /// recurses. This shows us that the impl is "useless" -- it won't
+ /// tell us that `T: Trait` unless it already implemented `Trait`
+ /// by some other means. However, that does not prevent `T: Trait`
+ /// does not hold, because of the bound (which can indeed be satisfied
+ /// by `SomeUnsizedType` from another crate).
+ //
+ // FIXME: when an `EvaluatedToRecur` goes past its parent root, we
+ // ought to convert it to an `EvaluatedToErr`, because we know
+ // there definitely isn't a proof tree for that obligation. Not
+ // doing so is still sound -- there isn't any proof tree, so the
+ // branch still can't be a part of a minimal one -- but does not re-enable caching.
+ EvaluatedToRecur,
+ /// Evaluation failed.
+ EvaluatedToErr,
+}
+
+impl EvaluationResult {
+ /// Returns `true` if this evaluation result is known to apply, even
+ /// considering outlives constraints.
+ pub fn must_apply_considering_regions(self) -> bool {
+ self == EvaluatedToOk
+ }
+
+ /// Returns `true` if this evaluation result is known to apply, ignoring
+ /// outlives constraints.
+ pub fn must_apply_modulo_regions(self) -> bool {
+ self <= EvaluatedToOkModuloRegions
+ }
+
+ pub fn may_apply(self) -> bool {
+ match self {
+ EvaluatedToOkModuloOpaqueTypes
+ | EvaluatedToOk
+ | EvaluatedToOkModuloRegions
+ | EvaluatedToAmbig
+ | EvaluatedToUnknown => true,
+
+ EvaluatedToErr | EvaluatedToRecur => false,
+ }
+ }
+
+ pub fn is_stack_dependent(self) -> bool {
+ match self {
+ EvaluatedToUnknown | EvaluatedToRecur => true,
+
+ EvaluatedToOkModuloOpaqueTypes
+ | EvaluatedToOk
+ | EvaluatedToOkModuloRegions
+ | EvaluatedToAmbig
+ | EvaluatedToErr => false,
+ }
+ }
+}
+
+/// Indicates that trait evaluation caused overflow and in which pass.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable)]
+pub enum OverflowError {
+ Error(ErrorGuaranteed),
+ Canonical,
+ ErrorReporting,
+}
+
+impl From<ErrorGuaranteed> for OverflowError {
+ fn from(e: ErrorGuaranteed) -> OverflowError {
+ OverflowError::Error(e)
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ OverflowError,
+}
+
+impl<'tcx> From<OverflowError> for SelectionError<'tcx> {
+ fn from(overflow_error: OverflowError) -> SelectionError<'tcx> {
+ match overflow_error {
+ OverflowError::Error(e) => SelectionError::Overflow(OverflowError::Error(e)),
+ OverflowError::Canonical => SelectionError::Overflow(OverflowError::Canonical),
+ OverflowError::ErrorReporting => SelectionError::ErrorReporting,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/traits/specialization_graph.rs b/compiler/rustc_middle/src/traits/specialization_graph.rs
new file mode 100644
index 000000000..2465f8e25
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/specialization_graph.rs
@@ -0,0 +1,261 @@
+use crate::ty::fast_reject::SimplifiedType;
+use crate::ty::visit::TypeVisitable;
+use crate::ty::{self, TyCtxt};
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def_id::{DefId, DefIdMap};
+use rustc_span::symbol::sym;
+
+/// A per-trait graph of impls in specialization order. At the moment, this
+/// graph forms a tree rooted with the trait itself, with all other nodes
+/// representing impls, and parent-child relationships representing
+/// specializations.
+///
+/// The graph provides two key services:
+///
+/// - Construction. This implicitly checks for overlapping impls (i.e., impls
+/// that overlap but where neither specializes the other -- an artifact of the
+/// simple "chain" rule.
+///
+/// - Parent extraction. In particular, the graph can give you the *immediate*
+/// parents of a given specializing impl, which is needed for extracting
+/// default items amongst other things. In the simple "chain" rule, every impl
+/// has at most one parent.
+#[derive(TyEncodable, TyDecodable, HashStable, Debug)]
+pub struct Graph {
+ /// All impls have a parent; the "root" impls have as their parent the `def_id`
+ /// of the trait.
+ pub parent: DefIdMap<DefId>,
+
+ /// The "root" impls are found by looking up the trait's def_id.
+ pub children: DefIdMap<Children>,
+
+ /// Whether an error was emitted while constructing the graph.
+ pub has_errored: Option<ErrorGuaranteed>,
+}
+
+impl Graph {
+ pub fn new() -> Graph {
+ Graph { parent: Default::default(), children: Default::default(), has_errored: None }
+ }
+
+ /// The parent of a given impl, which is the `DefId` of the trait when the
+ /// impl is a "specialization root".
+ pub fn parent(&self, child: DefId) -> DefId {
+ *self.parent.get(&child).unwrap_or_else(|| panic!("Failed to get parent for {:?}", child))
+ }
+}
+
+/// What kind of overlap check are we doing -- this exists just for testing and feature-gating
+/// purposes.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, HashStable, Debug, TyEncodable, TyDecodable)]
+pub enum OverlapMode {
+ /// The 1.0 rules (either types fail to unify, or where clauses are not implemented for crate-local types)
+ Stable,
+ /// Feature-gated test: Stable, *or* there is an explicit negative impl that rules out one of the where-clauses.
+ WithNegative,
+ /// Just check for negative impls, not for "where clause not implemented": used for testing.
+ Strict,
+}
+
+impl OverlapMode {
+ pub fn get<'tcx>(tcx: TyCtxt<'tcx>, trait_id: DefId) -> OverlapMode {
+ let with_negative_coherence = tcx.features().with_negative_coherence;
+ let strict_coherence = tcx.has_attr(trait_id, sym::rustc_strict_coherence);
+
+ if with_negative_coherence {
+ if strict_coherence { OverlapMode::Strict } else { OverlapMode::WithNegative }
+ } else if strict_coherence {
+ bug!("To use strict_coherence you need to set with_negative_coherence feature flag");
+ } else {
+ OverlapMode::Stable
+ }
+ }
+
+ pub fn use_negative_impl(&self) -> bool {
+ *self == OverlapMode::Strict || *self == OverlapMode::WithNegative
+ }
+
+ pub fn use_implicit_negative(&self) -> bool {
+ *self == OverlapMode::Stable || *self == OverlapMode::WithNegative
+ }
+}
+
+/// Children of a given impl, grouped into blanket/non-blanket varieties as is
+/// done in `TraitDef`.
+#[derive(Default, TyEncodable, TyDecodable, Debug, HashStable)]
+pub struct Children {
+ // Impls of a trait (or specializations of a given impl). To allow for
+ // quicker lookup, the impls are indexed by a simplified version of their
+ // `Self` type: impls with a simplifiable `Self` are stored in
+ // `non_blanket_impls` keyed by it, while all other impls are stored in
+ // `blanket_impls`.
+ //
+ // A similar division is used within `TraitDef`, but the lists there collect
+ // together *all* the impls for a trait, and are populated prior to building
+ // the specialization graph.
+ /// Impls of the trait.
+ pub non_blanket_impls: FxIndexMap<SimplifiedType, Vec<DefId>>,
+
+ /// Blanket impls associated with the trait.
+ pub blanket_impls: Vec<DefId>,
+}
+
+/// A node in the specialization graph is either an impl or a trait
+/// definition; either can serve as a source of item definitions.
+/// There is always exactly one trait definition node: the root.
+#[derive(Debug, Copy, Clone)]
+pub enum Node {
+ Impl(DefId),
+ Trait(DefId),
+}
+
+impl Node {
+ pub fn is_from_trait(&self) -> bool {
+ matches!(self, Node::Trait(..))
+ }
+
+ /// Trys to find the associated item that implements `trait_item_def_id`
+ /// defined in this node.
+ ///
+ /// If this returns `None`, the item can potentially still be found in
+ /// parents of this node.
+ pub fn item<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ trait_item_def_id: DefId,
+ ) -> Option<&'tcx ty::AssocItem> {
+ match *self {
+ Node::Trait(_) => Some(tcx.associated_item(trait_item_def_id)),
+ Node::Impl(impl_def_id) => {
+ let id = tcx.impl_item_implementor_ids(impl_def_id).get(&trait_item_def_id)?;
+ Some(tcx.associated_item(*id))
+ }
+ }
+ }
+
+ pub fn def_id(&self) -> DefId {
+ match *self {
+ Node::Impl(did) => did,
+ Node::Trait(did) => did,
+ }
+ }
+}
+
+#[derive(Copy, Clone)]
+pub struct Ancestors<'tcx> {
+ trait_def_id: DefId,
+ specialization_graph: &'tcx Graph,
+ current_source: Option<Node>,
+}
+
+impl Iterator for Ancestors<'_> {
+ type Item = Node;
+ fn next(&mut self) -> Option<Node> {
+ let cur = self.current_source.take();
+ if let Some(Node::Impl(cur_impl)) = cur {
+ let parent = self.specialization_graph.parent(cur_impl);
+
+ self.current_source = if parent == self.trait_def_id {
+ Some(Node::Trait(parent))
+ } else {
+ Some(Node::Impl(parent))
+ };
+ }
+ cur
+ }
+}
+
+/// Information about the most specialized definition of an associated item.
+pub struct LeafDef {
+ /// The associated item described by this `LeafDef`.
+ pub item: ty::AssocItem,
+
+ /// The node in the specialization graph containing the definition of `item`.
+ pub defining_node: Node,
+
+ /// The "top-most" (ie. least specialized) specialization graph node that finalized the
+ /// definition of `item`.
+ ///
+ /// Example:
+ ///
+ /// ```
+ /// #![feature(specialization)]
+ /// trait Tr {
+ /// fn assoc(&self);
+ /// }
+ ///
+ /// impl<T> Tr for T {
+ /// default fn assoc(&self) {}
+ /// }
+ ///
+ /// impl Tr for u8 {}
+ /// ```
+ ///
+ /// If we start the leaf definition search at `impl Tr for u8`, that impl will be the
+ /// `finalizing_node`, while `defining_node` will be the generic impl.
+ ///
+ /// If the leaf definition search is started at the generic impl, `finalizing_node` will be
+ /// `None`, since the most specialized impl we found still allows overriding the method
+ /// (doesn't finalize it).
+ pub finalizing_node: Option<Node>,
+}
+
+impl LeafDef {
+ /// Returns whether this definition is known to not be further specializable.
+ pub fn is_final(&self) -> bool {
+ self.finalizing_node.is_some()
+ }
+}
+
+impl<'tcx> Ancestors<'tcx> {
+ /// Finds the bottom-most (ie. most specialized) definition of an associated
+ /// item.
+ pub fn leaf_def(mut self, tcx: TyCtxt<'tcx>, trait_item_def_id: DefId) -> Option<LeafDef> {
+ let mut finalizing_node = None;
+
+ self.find_map(|node| {
+ if let Some(item) = node.item(tcx, trait_item_def_id) {
+ if finalizing_node.is_none() {
+ let is_specializable = item.defaultness(tcx).is_default()
+ || tcx.impl_defaultness(node.def_id()).is_default();
+
+ if !is_specializable {
+ finalizing_node = Some(node);
+ }
+ }
+
+ Some(LeafDef { item: *item, defining_node: node, finalizing_node })
+ } else {
+ // Item not mentioned. This "finalizes" any defaulted item provided by an ancestor.
+ finalizing_node = Some(node);
+ None
+ }
+ })
+ }
+}
+
+/// Walk up the specialization ancestors of a given impl, starting with that
+/// impl itself.
+///
+/// Returns `Err` if an error was reported while building the specialization
+/// graph.
+pub fn ancestors<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_def_id: DefId,
+ start_from_impl: DefId,
+) -> Result<Ancestors<'tcx>, ErrorGuaranteed> {
+ let specialization_graph = tcx.specialization_graph_of(trait_def_id);
+
+ if let Some(reported) = specialization_graph.has_errored {
+ Err(reported)
+ } else if let Some(reported) = tcx.type_of(start_from_impl).error_reported() {
+ Err(reported)
+ } else {
+ Ok(Ancestors {
+ trait_def_id,
+ specialization_graph,
+ current_source: Some(Node::Impl(start_from_impl)),
+ })
+ }
+}
diff --git a/compiler/rustc_middle/src/traits/structural_impls.rs b/compiler/rustc_middle/src/traits/structural_impls.rs
new file mode 100644
index 000000000..7fbd57ac7
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/structural_impls.rs
@@ -0,0 +1,135 @@
+use crate::traits;
+
+use std::fmt;
+
+// Structural impls for the structs in `traits`.
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSource<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ super::ImplSource::UserDefined(ref v) => write!(f, "{:?}", v),
+
+ super::ImplSource::AutoImpl(ref t) => write!(f, "{:?}", t),
+
+ super::ImplSource::Closure(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::Generator(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::FnPointer(ref d) => write!(f, "({:?})", d),
+
+ super::ImplSource::DiscriminantKind(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::Pointee(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::Object(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::Param(ref n, ct) => {
+ write!(f, "ImplSourceParamData({:?}, {:?})", n, ct)
+ }
+
+ super::ImplSource::Builtin(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::TraitAlias(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::TraitUpcasting(ref d) => write!(f, "{:?}", d),
+
+ super::ImplSource::ConstDestruct(ref d) => write!(f, "{:?}", d),
+ }
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceUserDefinedData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceUserDefinedData(impl_def_id={:?}, substs={:?}, nested={:?})",
+ self.impl_def_id, self.substs, self.nested
+ )
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceGeneratorData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceGeneratorData(generator_def_id={:?}, substs={:?}, nested={:?})",
+ self.generator_def_id, self.substs, self.nested
+ )
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceClosureData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceClosureData(closure_def_id={:?}, substs={:?}, nested={:?})",
+ self.closure_def_id, self.substs, self.nested
+ )
+ }
+}
+
+impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceBuiltinData<N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "ImplSourceBuiltinData(nested={:?})", self.nested)
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceTraitUpcastingData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceTraitUpcastingData(upcast={:?}, vtable_vptr_slot={:?}, nested={:?})",
+ self.upcast_trait_ref, self.vtable_vptr_slot, self.nested
+ )
+ }
+}
+
+impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceAutoImplData<N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceAutoImplData(trait_def_id={:?}, nested={:?})",
+ self.trait_def_id, self.nested
+ )
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceObjectData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceObjectData(upcast={:?}, vtable_base={}, nested={:?})",
+ self.upcast_trait_ref, self.vtable_base, self.nested
+ )
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceFnPointerData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "ImplSourceFnPointerData(fn_ty={:?}, nested={:?})", self.fn_ty, self.nested)
+ }
+}
+
+impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSourceTraitAliasData<'tcx, N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "ImplSourceTraitAliasData(alias_def_id={:?}, substs={:?}, nested={:?})",
+ self.alias_def_id, self.substs, self.nested
+ )
+ }
+}
+
+impl<N: fmt::Debug> fmt::Debug for traits::ImplSourceConstDestructData<N> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "ImplSourceConstDestructData(nested={:?})", self.nested)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Lift implementations
+
+TrivialTypeTraversalAndLiftImpls! {
+ super::ImplSourceDiscriminantKindData,
+ super::ImplSourcePointeeData,
+}
diff --git a/compiler/rustc_middle/src/traits/util.rs b/compiler/rustc_middle/src/traits/util.rs
new file mode 100644
index 000000000..d54b8c599
--- /dev/null
+++ b/compiler/rustc_middle/src/traits/util.rs
@@ -0,0 +1,49 @@
+use rustc_data_structures::fx::FxHashSet;
+
+use crate::ty::{PolyTraitRef, TyCtxt};
+
+/// Given a PolyTraitRef, get the PolyTraitRefs of the trait's (transitive) supertraits.
+///
+/// A simplified version of the same function at `rustc_infer::traits::util::supertraits`.
+pub fn supertraits<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: PolyTraitRef<'tcx>,
+) -> impl Iterator<Item = PolyTraitRef<'tcx>> {
+ Elaborator { tcx, visited: FxHashSet::from_iter([trait_ref]), stack: vec![trait_ref] }
+}
+
+struct Elaborator<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ visited: FxHashSet<PolyTraitRef<'tcx>>,
+ stack: Vec<PolyTraitRef<'tcx>>,
+}
+
+impl<'tcx> Elaborator<'tcx> {
+ fn elaborate(&mut self, trait_ref: PolyTraitRef<'tcx>) {
+ let supertrait_refs = self
+ .tcx
+ .super_predicates_of(trait_ref.def_id())
+ .predicates
+ .into_iter()
+ .flat_map(|(pred, _)| {
+ pred.subst_supertrait(self.tcx, &trait_ref).to_opt_poly_trait_pred()
+ })
+ .map(|t| t.map_bound(|pred| pred.trait_ref))
+ .filter(|supertrait_ref| self.visited.insert(*supertrait_ref));
+
+ self.stack.extend(supertrait_refs);
+ }
+}
+
+impl<'tcx> Iterator for Elaborator<'tcx> {
+ type Item = PolyTraitRef<'tcx>;
+
+ fn next(&mut self) -> Option<PolyTraitRef<'tcx>> {
+ if let Some(trait_ref) = self.stack.pop() {
+ self.elaborate(trait_ref);
+ Some(trait_ref)
+ } else {
+ None
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/_match.rs b/compiler/rustc_middle/src/ty/_match.rs
new file mode 100644
index 000000000..e6aab30a1
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/_match.rs
@@ -0,0 +1,124 @@
+use crate::ty::error::TypeError;
+use crate::ty::relate::{self, Relate, RelateResult, TypeRelation};
+use crate::ty::{self, InferConst, Ty, TyCtxt};
+
+/// A type "A" *matches* "B" if the fresh types in B could be
+/// substituted with values so as to make it equal to A. Matching is
+/// intended to be used only on freshened types, and it basically
+/// indicates if the non-freshened versions of A and B could have been
+/// unified.
+///
+/// It is only an approximation. If it yields false, unification would
+/// definitely fail, but a true result doesn't mean unification would
+/// succeed. This is because we don't track the "side-constraints" on
+/// type variables, nor do we track if the same freshened type appears
+/// more than once. To some extent these approximations could be
+/// fixed, given effort.
+///
+/// Like subtyping, matching is really a binary relation, so the only
+/// important thing about the result is Ok/Err. Also, matching never
+/// affects any type variables or unification state.
+pub struct Match<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'tcx> Match<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Match<'tcx> {
+ Match { tcx, param_env }
+ }
+}
+
+impl<'tcx> TypeRelation<'tcx> for Match<'tcx> {
+ fn tag(&self) -> &'static str {
+ "Match"
+ }
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+ fn a_is_expected(&self) -> bool {
+ true
+ } // irrelevant
+
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ _: ty::Variance,
+ _: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T> {
+ self.relate(a, b)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ Ok(a)
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>> {
+ if a == b {
+ return Ok(a);
+ }
+
+ match (a.kind(), b.kind()) {
+ (
+ _,
+ &ty::Infer(ty::FreshTy(_))
+ | &ty::Infer(ty::FreshIntTy(_))
+ | &ty::Infer(ty::FreshFloatTy(_)),
+ ) => Ok(a),
+
+ (&ty::Infer(_), _) | (_, &ty::Infer(_)) => {
+ Err(TypeError::Sorts(relate::expected_found(self, a, b)))
+ }
+
+ (&ty::Error(_), _) | (_, &ty::Error(_)) => Ok(self.tcx().ty_error()),
+
+ _ => relate::super_relate_tys(self, a, b),
+ }
+ }
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ debug!("{}.consts({:?}, {:?})", self.tag(), a, b);
+ if a == b {
+ return Ok(a);
+ }
+
+ match (a.kind(), b.kind()) {
+ (_, ty::ConstKind::Infer(InferConst::Fresh(_))) => {
+ return Ok(a);
+ }
+
+ (ty::ConstKind::Infer(_), _) | (_, ty::ConstKind::Infer(_)) => {
+ return Err(TypeError::ConstMismatch(relate::expected_found(self, a, b)));
+ }
+
+ _ => {}
+ }
+
+ relate::super_relate_consts(self, a, b)
+ }
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>,
+ {
+ Ok(a.rebind(self.relate(a.skip_binder(), b.skip_binder())?))
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/abstract_const.rs b/compiler/rustc_middle/src/ty/abstract_const.rs
new file mode 100644
index 000000000..bed809930
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/abstract_const.rs
@@ -0,0 +1,194 @@
+//! A subset of a mir body used for const evaluatability checking.
+use crate::mir;
+use crate::ty::visit::TypeVisitable;
+use crate::ty::{self, subst::Subst, DelaySpanBugEmitted, EarlyBinder, SubstsRef, Ty, TyCtxt};
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def_id::DefId;
+use std::cmp;
+use std::ops::ControlFlow;
+
+rustc_index::newtype_index! {
+ /// An index into an `AbstractConst`.
+ pub struct NodeId {
+ derive [HashStable]
+ DEBUG_FORMAT = "n{}",
+ }
+}
+
+/// A tree representing an anonymous constant.
+///
+/// This is only able to represent a subset of `MIR`,
+/// and should not leak any information about desugarings.
+#[derive(Debug, Clone, Copy)]
+pub struct AbstractConst<'tcx> {
+ // FIXME: Consider adding something like `IndexSlice`
+ // and use this here.
+ inner: &'tcx [Node<'tcx>],
+ substs: SubstsRef<'tcx>,
+}
+
+impl<'tcx> AbstractConst<'tcx> {
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ uv: ty::Unevaluated<'tcx, ()>,
+ ) -> Result<Option<AbstractConst<'tcx>>, ErrorGuaranteed> {
+ let inner = tcx.thir_abstract_const_opt_const_arg(uv.def)?;
+ debug!("AbstractConst::new({:?}) = {:?}", uv, inner);
+ Ok(inner.map(|inner| AbstractConst { inner, substs: tcx.erase_regions(uv.substs) }))
+ }
+
+ pub fn from_const(
+ tcx: TyCtxt<'tcx>,
+ ct: ty::Const<'tcx>,
+ ) -> Result<Option<AbstractConst<'tcx>>, ErrorGuaranteed> {
+ match ct.kind() {
+ ty::ConstKind::Unevaluated(uv) => AbstractConst::new(tcx, uv.shrink()),
+ ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => Err(reported),
+ _ => Ok(None),
+ }
+ }
+
+ #[inline]
+ pub fn subtree(self, node: NodeId) -> AbstractConst<'tcx> {
+ AbstractConst { inner: &self.inner[..=node.index()], substs: self.substs }
+ }
+
+ #[inline]
+ pub fn root(self, tcx: TyCtxt<'tcx>) -> Node<'tcx> {
+ let node = self.inner.last().copied().unwrap();
+ match node {
+ Node::Leaf(leaf) => Node::Leaf(EarlyBinder(leaf).subst(tcx, self.substs)),
+ Node::Cast(kind, operand, ty) => {
+ Node::Cast(kind, operand, EarlyBinder(ty).subst(tcx, self.substs))
+ }
+ // Don't perform substitution on the following as they can't directly contain generic params
+ Node::Binop(_, _, _) | Node::UnaryOp(_, _) | Node::FunctionCall(_, _) => node,
+ }
+ }
+
+ pub fn unify_failure_kind(self, tcx: TyCtxt<'tcx>) -> FailureKind {
+ let mut failure_kind = FailureKind::Concrete;
+ walk_abstract_const::<!, _>(tcx, self, |node| {
+ match node.root(tcx) {
+ Node::Leaf(leaf) => {
+ if leaf.has_infer_types_or_consts() {
+ failure_kind = FailureKind::MentionsInfer;
+ } else if leaf.has_param_types_or_consts() {
+ failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam);
+ }
+ }
+ Node::Cast(_, _, ty) => {
+ if ty.has_infer_types_or_consts() {
+ failure_kind = FailureKind::MentionsInfer;
+ } else if ty.has_param_types_or_consts() {
+ failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam);
+ }
+ }
+ Node::Binop(_, _, _) | Node::UnaryOp(_, _) | Node::FunctionCall(_, _) => {}
+ }
+ ControlFlow::CONTINUE
+ });
+ failure_kind
+ }
+}
+
+#[derive(Debug, Clone, Copy, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
+pub enum CastKind {
+ /// thir::ExprKind::As
+ As,
+ /// thir::ExprKind::Use
+ Use,
+}
+
+/// A node of an `AbstractConst`.
+#[derive(Debug, Clone, Copy, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
+pub enum Node<'tcx> {
+ Leaf(ty::Const<'tcx>),
+ Binop(mir::BinOp, NodeId, NodeId),
+ UnaryOp(mir::UnOp, NodeId),
+ FunctionCall(NodeId, &'tcx [NodeId]),
+ Cast(CastKind, NodeId, Ty<'tcx>),
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq, HashStable, TyEncodable, TyDecodable)]
+pub enum NotConstEvaluatable {
+ Error(ErrorGuaranteed),
+ MentionsInfer,
+ MentionsParam,
+}
+
+impl From<ErrorGuaranteed> for NotConstEvaluatable {
+ fn from(e: ErrorGuaranteed) -> NotConstEvaluatable {
+ NotConstEvaluatable::Error(e)
+ }
+}
+
+TrivialTypeTraversalAndLiftImpls! {
+ NotConstEvaluatable,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ #[inline]
+ pub fn thir_abstract_const_opt_const_arg(
+ self,
+ def: ty::WithOptConstParam<DefId>,
+ ) -> Result<Option<&'tcx [Node<'tcx>]>, ErrorGuaranteed> {
+ if let Some((did, param_did)) = def.as_const_arg() {
+ self.thir_abstract_const_of_const_arg((did, param_did))
+ } else {
+ self.thir_abstract_const(def.did)
+ }
+ }
+}
+
+#[instrument(skip(tcx, f), level = "debug")]
+pub fn walk_abstract_const<'tcx, R, F>(
+ tcx: TyCtxt<'tcx>,
+ ct: AbstractConst<'tcx>,
+ mut f: F,
+) -> ControlFlow<R>
+where
+ F: FnMut(AbstractConst<'tcx>) -> ControlFlow<R>,
+{
+ #[instrument(skip(tcx, f), level = "debug")]
+ fn recurse<'tcx, R>(
+ tcx: TyCtxt<'tcx>,
+ ct: AbstractConst<'tcx>,
+ f: &mut dyn FnMut(AbstractConst<'tcx>) -> ControlFlow<R>,
+ ) -> ControlFlow<R> {
+ f(ct)?;
+ let root = ct.root(tcx);
+ debug!(?root);
+ match root {
+ Node::Leaf(_) => ControlFlow::CONTINUE,
+ Node::Binop(_, l, r) => {
+ recurse(tcx, ct.subtree(l), f)?;
+ recurse(tcx, ct.subtree(r), f)
+ }
+ Node::UnaryOp(_, v) => recurse(tcx, ct.subtree(v), f),
+ Node::FunctionCall(func, args) => {
+ recurse(tcx, ct.subtree(func), f)?;
+ args.iter().try_for_each(|&arg| recurse(tcx, ct.subtree(arg), f))
+ }
+ Node::Cast(_, operand, _) => recurse(tcx, ct.subtree(operand), f),
+ }
+ }
+
+ recurse(tcx, ct, &mut f)
+}
+
+// We were unable to unify the abstract constant with
+// a constant found in the caller bounds, there are
+// now three possible cases here.
+#[derive(Debug, Copy, Clone, PartialEq, Eq, PartialOrd, Ord)]
+pub enum FailureKind {
+ /// The abstract const still references an inference
+ /// variable, in this case we return `TooGeneric`.
+ MentionsInfer,
+ /// The abstract const references a generic parameter,
+ /// this means that we emit an error here.
+ MentionsParam,
+ /// The substs are concrete enough that we can simply
+ /// try and evaluate the given constant.
+ Concrete,
+}
diff --git a/compiler/rustc_middle/src/ty/adjustment.rs b/compiler/rustc_middle/src/ty/adjustment.rs
new file mode 100644
index 000000000..d36cf2fe3
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/adjustment.rs
@@ -0,0 +1,198 @@
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_macros::HashStable;
+use rustc_span::Span;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
+pub enum PointerCast {
+ /// Go from a fn-item type to a fn-pointer type.
+ ReifyFnPointer,
+
+ /// Go from a safe fn pointer to an unsafe fn pointer.
+ UnsafeFnPointer,
+
+ /// Go from a non-capturing closure to an fn pointer or an unsafe fn pointer.
+ /// It cannot convert a closure that requires unsafe.
+ ClosureFnPointer(hir::Unsafety),
+
+ /// Go from a mut raw pointer to a const raw pointer.
+ MutToConstPointer,
+
+ /// Go from `*const [T; N]` to `*const T`
+ ArrayToPointer,
+
+ /// Unsize a pointer/reference value, e.g., `&[T; n]` to
+ /// `&[T]`. Note that the source could be a thin or fat pointer.
+ /// This will do things like convert thin pointers to fat
+ /// pointers, or convert structs containing thin pointers to
+ /// structs containing fat pointers, or convert between fat
+ /// pointers. We don't store the details of how the transform is
+ /// done (in fact, we don't know that, because it might depend on
+ /// the precise type parameters). We just store the target
+ /// type. Codegen backends and miri figure out what has to be done
+ /// based on the precise source/target type at hand.
+ Unsize,
+}
+
+/// Represents coercing a value to a different type of value.
+///
+/// We transform values by following a number of `Adjust` steps in order.
+/// See the documentation on variants of `Adjust` for more details.
+///
+/// Here are some common scenarios:
+///
+/// 1. The simplest cases are where a pointer is not adjusted fat vs thin.
+/// Here the pointer will be dereferenced N times (where a dereference can
+/// happen to raw or borrowed pointers or any smart pointer which implements
+/// `Deref`, including `Box<_>`). The types of dereferences is given by
+/// `autoderefs`. It can then be auto-referenced zero or one times, indicated
+/// by `autoref`, to either a raw or borrowed pointer. In these cases unsize is
+/// `false`.
+///
+/// 2. A thin-to-fat coercion involves unsizing the underlying data. We start
+/// with a thin pointer, deref a number of times, unsize the underlying data,
+/// then autoref. The 'unsize' phase may change a fixed length array to a
+/// dynamically sized one, a concrete object to a trait object, or statically
+/// sized struct to a dynamically sized one. E.g., `&[i32; 4]` -> `&[i32]` is
+/// represented by:
+///
+/// ```ignore (illustrative)
+/// Deref(None) -> [i32; 4],
+/// Borrow(AutoBorrow::Ref) -> &[i32; 4],
+/// Unsize -> &[i32],
+/// ```
+///
+/// Note that for a struct, the 'deep' unsizing of the struct is not recorded.
+/// E.g., `struct Foo<T> { x: T }` we can coerce `&Foo<[i32; 4]>` to `&Foo<[i32]>`
+/// The autoderef and -ref are the same as in the above example, but the type
+/// stored in `unsize` is `Foo<[i32]>`, we don't store any further detail about
+/// the underlying conversions from `[i32; 4]` to `[i32]`.
+///
+/// 3. Coercing a `Box<T>` to `Box<dyn Trait>` is an interesting special case. In
+/// that case, we have the pointer we need coming in, so there are no
+/// autoderefs, and no autoref. Instead we just do the `Unsize` transformation.
+/// At some point, of course, `Box` should move out of the compiler, in which
+/// case this is analogous to transforming a struct. E.g., `Box<[i32; 4]>` ->
+/// `Box<[i32]>` is an `Adjust::Unsize` with the target `Box<[i32]>`.
+#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub struct Adjustment<'tcx> {
+ pub kind: Adjust<'tcx>,
+ pub target: Ty<'tcx>,
+}
+
+impl<'tcx> Adjustment<'tcx> {
+ pub fn is_region_borrow(&self) -> bool {
+ matches!(self.kind, Adjust::Borrow(AutoBorrow::Ref(..)))
+ }
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub enum Adjust<'tcx> {
+ /// Go from ! to any type.
+ NeverToAny,
+
+ /// Dereference once, producing a place.
+ Deref(Option<OverloadedDeref<'tcx>>),
+
+ /// Take the address and produce either a `&` or `*` pointer.
+ Borrow(AutoBorrow<'tcx>),
+
+ Pointer(PointerCast),
+}
+
+/// An overloaded autoderef step, representing a `Deref(Mut)::deref(_mut)`
+/// call, with the signature `&'a T -> &'a U` or `&'a mut T -> &'a mut U`.
+/// The target type is `U` in both cases, with the region and mutability
+/// being those shared by both the receiver and the returned reference.
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct OverloadedDeref<'tcx> {
+ pub region: ty::Region<'tcx>,
+ pub mutbl: hir::Mutability,
+ /// The `Span` associated with the field access or method call
+ /// that triggered this overloaded deref.
+ pub span: Span,
+}
+
+impl<'tcx> OverloadedDeref<'tcx> {
+ pub fn method_call(&self, tcx: TyCtxt<'tcx>, source: Ty<'tcx>) -> (DefId, SubstsRef<'tcx>) {
+ let trait_def_id = match self.mutbl {
+ hir::Mutability::Not => tcx.require_lang_item(LangItem::Deref, None),
+ hir::Mutability::Mut => tcx.require_lang_item(LangItem::DerefMut, None),
+ };
+ let method_def_id = tcx
+ .associated_items(trait_def_id)
+ .in_definition_order()
+ .find(|m| m.kind == ty::AssocKind::Fn)
+ .unwrap()
+ .def_id;
+ (method_def_id, tcx.mk_substs_trait(source, &[]))
+ }
+}
+
+/// At least for initial deployment, we want to limit two-phase borrows to
+/// only a few specific cases. Right now, those are mostly "things that desugar"
+/// into method calls:
+/// - using `x.some_method()` syntax, where some_method takes `&mut self`,
+/// - using `Foo::some_method(&mut x, ...)` syntax,
+/// - binary assignment operators (`+=`, `-=`, `*=`, etc.).
+/// Anything else should be rejected until generalized two-phase borrow support
+/// is implemented. Right now, dataflow can't handle the general case where there
+/// is more than one use of a mutable borrow, and we don't want to accept too much
+/// new code via two-phase borrows, so we try to limit where we create two-phase
+/// capable mutable borrows.
+/// See #49434 for tracking.
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum AllowTwoPhase {
+ Yes,
+ No,
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum AutoBorrowMutability {
+ Mut { allow_two_phase_borrow: AllowTwoPhase },
+ Not,
+}
+
+impl From<AutoBorrowMutability> for hir::Mutability {
+ fn from(m: AutoBorrowMutability) -> Self {
+ match m {
+ AutoBorrowMutability::Mut { .. } => hir::Mutability::Mut,
+ AutoBorrowMutability::Not => hir::Mutability::Not,
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum AutoBorrow<'tcx> {
+ /// Converts from T to &T.
+ Ref(ty::Region<'tcx>, AutoBorrowMutability),
+
+ /// Converts from T to *T.
+ RawPtr(hir::Mutability),
+}
+
+/// Information for `CoerceUnsized` impls, storing information we
+/// have computed about the coercion.
+///
+/// This struct can be obtained via the `coerce_impl_info` query.
+/// Demanding this struct also has the side-effect of reporting errors
+/// for inappropriate impls.
+#[derive(Clone, Copy, TyEncodable, TyDecodable, Debug, HashStable)]
+pub struct CoerceUnsizedInfo {
+ /// If this is a "custom coerce" impl, then what kind of custom
+ /// coercion is it? This applies to impls of `CoerceUnsized` for
+ /// structs, primarily, where we store a bit of info about which
+ /// fields need to be coerced.
+ pub custom_kind: Option<CustomCoerceUnsized>,
+}
+
+#[derive(Clone, Copy, TyEncodable, TyDecodable, Debug, HashStable)]
+pub enum CustomCoerceUnsized {
+ /// Records the index of the field being coerced.
+ Struct(usize),
+}
diff --git a/compiler/rustc_middle/src/ty/adt.rs b/compiler/rustc_middle/src/ty/adt.rs
new file mode 100644
index 000000000..2e596b275
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/adt.rs
@@ -0,0 +1,569 @@
+use crate::mir::interpret::ErrorHandled;
+use crate::ty;
+use crate::ty::util::{Discr, IntTypeExt};
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::intern::Interned;
+use rustc_data_structures::stable_hasher::HashingControls;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_query_system::ich::StableHashingContext;
+use rustc_session::DataTypeKind;
+use rustc_span::symbol::sym;
+use rustc_target::abi::VariantIdx;
+
+use std::cell::RefCell;
+use std::cmp::Ordering;
+use std::hash::{Hash, Hasher};
+use std::ops::Range;
+use std::str;
+
+use super::{
+ Destructor, FieldDef, GenericPredicates, ReprOptions, Ty, TyCtxt, VariantDef, VariantDiscr,
+};
+
+#[derive(Copy, Clone, HashStable, Debug)]
+pub struct AdtSizedConstraint<'tcx>(pub &'tcx [Ty<'tcx>]);
+
+bitflags! {
+ #[derive(HashStable, TyEncodable, TyDecodable)]
+ pub struct AdtFlags: u32 {
+ const NO_ADT_FLAGS = 0;
+ /// Indicates whether the ADT is an enum.
+ const IS_ENUM = 1 << 0;
+ /// Indicates whether the ADT is a union.
+ const IS_UNION = 1 << 1;
+ /// Indicates whether the ADT is a struct.
+ const IS_STRUCT = 1 << 2;
+ /// Indicates whether the ADT is a struct and has a constructor.
+ const HAS_CTOR = 1 << 3;
+ /// Indicates whether the type is `PhantomData`.
+ const IS_PHANTOM_DATA = 1 << 4;
+ /// Indicates whether the type has a `#[fundamental]` attribute.
+ const IS_FUNDAMENTAL = 1 << 5;
+ /// Indicates whether the type is `Box`.
+ const IS_BOX = 1 << 6;
+ /// Indicates whether the type is `ManuallyDrop`.
+ const IS_MANUALLY_DROP = 1 << 7;
+ /// Indicates whether the variant list of this ADT is `#[non_exhaustive]`.
+ /// (i.e., this flag is never set unless this ADT is an enum).
+ const IS_VARIANT_LIST_NON_EXHAUSTIVE = 1 << 8;
+ /// Indicates whether the type is `UnsafeCell`.
+ const IS_UNSAFE_CELL = 1 << 9;
+ }
+}
+
+/// The definition of a user-defined type, e.g., a `struct`, `enum`, or `union`.
+///
+/// These are all interned (by `alloc_adt_def`) into the global arena.
+///
+/// The initialism *ADT* stands for an [*algebraic data type (ADT)*][adt].
+/// This is slightly wrong because `union`s are not ADTs.
+/// Moreover, Rust only allows recursive data types through indirection.
+///
+/// [adt]: https://en.wikipedia.org/wiki/Algebraic_data_type
+///
+/// # Recursive types
+///
+/// It may seem impossible to represent recursive types using [`Ty`],
+/// since [`TyKind::Adt`] includes [`AdtDef`], which includes its fields,
+/// creating a cycle. However, `AdtDef` does not actually include the *types*
+/// of its fields; it includes just their [`DefId`]s.
+///
+/// [`TyKind::Adt`]: ty::TyKind::Adt
+///
+/// For example, the following type:
+///
+/// ```
+/// struct S { x: Box<S> }
+/// ```
+///
+/// is essentially represented with [`Ty`] as the following pseudocode:
+///
+/// ```ignore (illustrative)
+/// struct S { x }
+/// ```
+///
+/// where `x` here represents the `DefId` of `S.x`. Then, the `DefId`
+/// can be used with [`TyCtxt::type_of()`] to get the type of the field.
+#[derive(TyEncodable, TyDecodable)]
+pub struct AdtDefData {
+ /// The `DefId` of the struct, enum or union item.
+ pub did: DefId,
+ /// Variants of the ADT. If this is a struct or union, then there will be a single variant.
+ variants: IndexVec<VariantIdx, VariantDef>,
+ /// Flags of the ADT (e.g., is this a struct? is this non-exhaustive?).
+ flags: AdtFlags,
+ /// Repr options provided by the user.
+ repr: ReprOptions,
+}
+
+impl PartialOrd for AdtDefData {
+ fn partial_cmp(&self, other: &AdtDefData) -> Option<Ordering> {
+ Some(self.cmp(&other))
+ }
+}
+
+/// There should be only one AdtDef for each `did`, therefore
+/// it is fine to implement `Ord` only based on `did`.
+impl Ord for AdtDefData {
+ fn cmp(&self, other: &AdtDefData) -> Ordering {
+ self.did.cmp(&other.did)
+ }
+}
+
+/// There should be only one AdtDef for each `did`, therefore
+/// it is fine to implement `PartialEq` only based on `did`.
+impl PartialEq for AdtDefData {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ self.did == other.did
+ }
+}
+
+impl Eq for AdtDefData {}
+
+/// There should be only one AdtDef for each `did`, therefore
+/// it is fine to implement `Hash` only based on `did`.
+impl Hash for AdtDefData {
+ #[inline]
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ self.did.hash(s)
+ }
+}
+
+impl<'a> HashStable<StableHashingContext<'a>> for AdtDefData {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ thread_local! {
+ static CACHE: RefCell<FxHashMap<(usize, HashingControls), Fingerprint>> = Default::default();
+ }
+
+ let hash: Fingerprint = CACHE.with(|cache| {
+ let addr = self as *const AdtDefData as usize;
+ let hashing_controls = hcx.hashing_controls();
+ *cache.borrow_mut().entry((addr, hashing_controls)).or_insert_with(|| {
+ let ty::AdtDefData { did, ref variants, ref flags, ref repr } = *self;
+
+ let mut hasher = StableHasher::new();
+ did.hash_stable(hcx, &mut hasher);
+ variants.hash_stable(hcx, &mut hasher);
+ flags.hash_stable(hcx, &mut hasher);
+ repr.hash_stable(hcx, &mut hasher);
+
+ hasher.finish()
+ })
+ });
+
+ hash.hash_stable(hcx, hasher);
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Ord, PartialOrd, HashStable)]
+#[rustc_pass_by_value]
+pub struct AdtDef<'tcx>(pub Interned<'tcx, AdtDefData>);
+
+impl<'tcx> AdtDef<'tcx> {
+ #[inline]
+ pub fn did(self) -> DefId {
+ self.0.0.did
+ }
+
+ #[inline]
+ pub fn variants(self) -> &'tcx IndexVec<VariantIdx, VariantDef> {
+ &self.0.0.variants
+ }
+
+ #[inline]
+ pub fn variant(self, idx: VariantIdx) -> &'tcx VariantDef {
+ &self.0.0.variants[idx]
+ }
+
+ #[inline]
+ pub fn flags(self) -> AdtFlags {
+ self.0.0.flags
+ }
+
+ #[inline]
+ pub fn repr(self) -> ReprOptions {
+ self.0.0.repr
+ }
+}
+
+#[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)]
+pub enum AdtKind {
+ Struct,
+ Union,
+ Enum,
+}
+
+impl Into<DataTypeKind> for AdtKind {
+ fn into(self) -> DataTypeKind {
+ match self {
+ AdtKind::Struct => DataTypeKind::Struct,
+ AdtKind::Union => DataTypeKind::Union,
+ AdtKind::Enum => DataTypeKind::Enum,
+ }
+ }
+}
+
+impl AdtDefData {
+ /// Creates a new `AdtDefData`.
+ pub(super) fn new(
+ tcx: TyCtxt<'_>,
+ did: DefId,
+ kind: AdtKind,
+ variants: IndexVec<VariantIdx, VariantDef>,
+ repr: ReprOptions,
+ ) -> Self {
+ debug!("AdtDef::new({:?}, {:?}, {:?}, {:?})", did, kind, variants, repr);
+ let mut flags = AdtFlags::NO_ADT_FLAGS;
+
+ if kind == AdtKind::Enum && tcx.has_attr(did, sym::non_exhaustive) {
+ debug!("found non-exhaustive variant list for {:?}", did);
+ flags = flags | AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE;
+ }
+
+ flags |= match kind {
+ AdtKind::Enum => AdtFlags::IS_ENUM,
+ AdtKind::Union => AdtFlags::IS_UNION,
+ AdtKind::Struct => AdtFlags::IS_STRUCT,
+ };
+
+ if kind == AdtKind::Struct && variants[VariantIdx::new(0)].ctor_def_id.is_some() {
+ flags |= AdtFlags::HAS_CTOR;
+ }
+
+ if tcx.has_attr(did, sym::fundamental) {
+ flags |= AdtFlags::IS_FUNDAMENTAL;
+ }
+ if Some(did) == tcx.lang_items().phantom_data() {
+ flags |= AdtFlags::IS_PHANTOM_DATA;
+ }
+ if Some(did) == tcx.lang_items().owned_box() {
+ flags |= AdtFlags::IS_BOX;
+ }
+ if Some(did) == tcx.lang_items().manually_drop() {
+ flags |= AdtFlags::IS_MANUALLY_DROP;
+ }
+ if Some(did) == tcx.lang_items().unsafe_cell_type() {
+ flags |= AdtFlags::IS_UNSAFE_CELL;
+ }
+
+ AdtDefData { did, variants, flags, repr }
+ }
+}
+
+impl<'tcx> AdtDef<'tcx> {
+ /// Returns `true` if this is a struct.
+ #[inline]
+ pub fn is_struct(self) -> bool {
+ self.flags().contains(AdtFlags::IS_STRUCT)
+ }
+
+ /// Returns `true` if this is a union.
+ #[inline]
+ pub fn is_union(self) -> bool {
+ self.flags().contains(AdtFlags::IS_UNION)
+ }
+
+ /// Returns `true` if this is an enum.
+ #[inline]
+ pub fn is_enum(self) -> bool {
+ self.flags().contains(AdtFlags::IS_ENUM)
+ }
+
+ /// Returns `true` if the variant list of this ADT is `#[non_exhaustive]`.
+ #[inline]
+ pub fn is_variant_list_non_exhaustive(self) -> bool {
+ self.flags().contains(AdtFlags::IS_VARIANT_LIST_NON_EXHAUSTIVE)
+ }
+
+ /// Returns the kind of the ADT.
+ #[inline]
+ pub fn adt_kind(self) -> AdtKind {
+ if self.is_enum() {
+ AdtKind::Enum
+ } else if self.is_union() {
+ AdtKind::Union
+ } else {
+ AdtKind::Struct
+ }
+ }
+
+ /// Returns a description of this abstract data type.
+ pub fn descr(self) -> &'static str {
+ match self.adt_kind() {
+ AdtKind::Struct => "struct",
+ AdtKind::Union => "union",
+ AdtKind::Enum => "enum",
+ }
+ }
+
+ /// Returns a description of a variant of this abstract data type.
+ #[inline]
+ pub fn variant_descr(self) -> &'static str {
+ match self.adt_kind() {
+ AdtKind::Struct => "struct",
+ AdtKind::Union => "union",
+ AdtKind::Enum => "variant",
+ }
+ }
+
+ /// If this function returns `true`, it implies that `is_struct` must return `true`.
+ #[inline]
+ pub fn has_ctor(self) -> bool {
+ self.flags().contains(AdtFlags::HAS_CTOR)
+ }
+
+ /// Returns `true` if this type is `#[fundamental]` for the purposes
+ /// of coherence checking.
+ #[inline]
+ pub fn is_fundamental(self) -> bool {
+ self.flags().contains(AdtFlags::IS_FUNDAMENTAL)
+ }
+
+ /// Returns `true` if this is `PhantomData<T>`.
+ #[inline]
+ pub fn is_phantom_data(self) -> bool {
+ self.flags().contains(AdtFlags::IS_PHANTOM_DATA)
+ }
+
+ /// Returns `true` if this is Box<T>.
+ #[inline]
+ pub fn is_box(self) -> bool {
+ self.flags().contains(AdtFlags::IS_BOX)
+ }
+
+ /// Returns `true` if this is UnsafeCell<T>.
+ #[inline]
+ pub fn is_unsafe_cell(self) -> bool {
+ self.flags().contains(AdtFlags::IS_UNSAFE_CELL)
+ }
+
+ /// Returns `true` if this is `ManuallyDrop<T>`.
+ #[inline]
+ pub fn is_manually_drop(self) -> bool {
+ self.flags().contains(AdtFlags::IS_MANUALLY_DROP)
+ }
+
+ /// Returns `true` if this type has a destructor.
+ pub fn has_dtor(self, tcx: TyCtxt<'tcx>) -> bool {
+ self.destructor(tcx).is_some()
+ }
+
+ pub fn has_non_const_dtor(self, tcx: TyCtxt<'tcx>) -> bool {
+ matches!(self.destructor(tcx), Some(Destructor { constness: hir::Constness::NotConst, .. }))
+ }
+
+ /// Asserts this is a struct or union and returns its unique variant.
+ pub fn non_enum_variant(self) -> &'tcx VariantDef {
+ assert!(self.is_struct() || self.is_union());
+ &self.variant(VariantIdx::new(0))
+ }
+
+ #[inline]
+ pub fn predicates(self, tcx: TyCtxt<'tcx>) -> GenericPredicates<'tcx> {
+ tcx.predicates_of(self.did())
+ }
+
+ /// Returns an iterator over all fields contained
+ /// by this ADT.
+ #[inline]
+ pub fn all_fields(self) -> impl Iterator<Item = &'tcx FieldDef> + Clone {
+ self.variants().iter().flat_map(|v| v.fields.iter())
+ }
+
+ /// Whether the ADT lacks fields. Note that this includes uninhabited enums,
+ /// e.g., `enum Void {}` is considered payload free as well.
+ pub fn is_payloadfree(self) -> bool {
+ // Treat the ADT as not payload-free if arbitrary_enum_discriminant is used (#88621).
+ // This would disallow the following kind of enum from being casted into integer.
+ // ```
+ // enum Enum {
+ // Foo() = 1,
+ // Bar{} = 2,
+ // Baz = 3,
+ // }
+ // ```
+ if self
+ .variants()
+ .iter()
+ .any(|v| matches!(v.discr, VariantDiscr::Explicit(_)) && v.ctor_kind != CtorKind::Const)
+ {
+ return false;
+ }
+ self.variants().iter().all(|v| v.fields.is_empty())
+ }
+
+ /// Return a `VariantDef` given a variant id.
+ pub fn variant_with_id(self, vid: DefId) -> &'tcx VariantDef {
+ self.variants().iter().find(|v| v.def_id == vid).expect("variant_with_id: unknown variant")
+ }
+
+ /// Return a `VariantDef` given a constructor id.
+ pub fn variant_with_ctor_id(self, cid: DefId) -> &'tcx VariantDef {
+ self.variants()
+ .iter()
+ .find(|v| v.ctor_def_id == Some(cid))
+ .expect("variant_with_ctor_id: unknown variant")
+ }
+
+ /// Return the index of `VariantDef` given a variant id.
+ pub fn variant_index_with_id(self, vid: DefId) -> VariantIdx {
+ self.variants()
+ .iter_enumerated()
+ .find(|(_, v)| v.def_id == vid)
+ .expect("variant_index_with_id: unknown variant")
+ .0
+ }
+
+ /// Return the index of `VariantDef` given a constructor id.
+ pub fn variant_index_with_ctor_id(self, cid: DefId) -> VariantIdx {
+ self.variants()
+ .iter_enumerated()
+ .find(|(_, v)| v.ctor_def_id == Some(cid))
+ .expect("variant_index_with_ctor_id: unknown variant")
+ .0
+ }
+
+ pub fn variant_of_res(self, res: Res) -> &'tcx VariantDef {
+ match res {
+ Res::Def(DefKind::Variant, vid) => self.variant_with_id(vid),
+ Res::Def(DefKind::Ctor(..), cid) => self.variant_with_ctor_id(cid),
+ Res::Def(DefKind::Struct, _)
+ | Res::Def(DefKind::Union, _)
+ | Res::Def(DefKind::TyAlias, _)
+ | Res::Def(DefKind::AssocTy, _)
+ | Res::SelfTy { .. }
+ | Res::SelfCtor(..) => self.non_enum_variant(),
+ _ => bug!("unexpected res {:?} in variant_of_res", res),
+ }
+ }
+
+ #[inline]
+ pub fn eval_explicit_discr(self, tcx: TyCtxt<'tcx>, expr_did: DefId) -> Option<Discr<'tcx>> {
+ assert!(self.is_enum());
+ let param_env = tcx.param_env(expr_did);
+ let repr_type = self.repr().discr_type();
+ match tcx.const_eval_poly(expr_did) {
+ Ok(val) => {
+ let ty = repr_type.to_ty(tcx);
+ if let Some(b) = val.try_to_bits_for_ty(tcx, param_env, ty) {
+ trace!("discriminants: {} ({:?})", b, repr_type);
+ Some(Discr { val: b, ty })
+ } else {
+ info!("invalid enum discriminant: {:#?}", val);
+ crate::mir::interpret::struct_error(
+ tcx.at(tcx.def_span(expr_did)),
+ "constant evaluation of enum discriminant resulted in non-integer",
+ )
+ .emit();
+ None
+ }
+ }
+ Err(err) => {
+ let msg = match err {
+ ErrorHandled::Reported(_) | ErrorHandled::Linted => {
+ "enum discriminant evaluation failed"
+ }
+ ErrorHandled::TooGeneric => "enum discriminant depends on generics",
+ };
+ tcx.sess.delay_span_bug(tcx.def_span(expr_did), msg);
+ None
+ }
+ }
+ }
+
+ #[inline]
+ pub fn discriminants(
+ self,
+ tcx: TyCtxt<'tcx>,
+ ) -> impl Iterator<Item = (VariantIdx, Discr<'tcx>)> + Captures<'tcx> {
+ assert!(self.is_enum());
+ let repr_type = self.repr().discr_type();
+ let initial = repr_type.initial_discriminant(tcx);
+ let mut prev_discr = None::<Discr<'tcx>>;
+ self.variants().iter_enumerated().map(move |(i, v)| {
+ let mut discr = prev_discr.map_or(initial, |d| d.wrap_incr(tcx));
+ if let VariantDiscr::Explicit(expr_did) = v.discr {
+ if let Some(new_discr) = self.eval_explicit_discr(tcx, expr_did) {
+ discr = new_discr;
+ }
+ }
+ prev_discr = Some(discr);
+
+ (i, discr)
+ })
+ }
+
+ #[inline]
+ pub fn variant_range(self) -> Range<VariantIdx> {
+ VariantIdx::new(0)..VariantIdx::new(self.variants().len())
+ }
+
+ /// Computes the discriminant value used by a specific variant.
+ /// Unlike `discriminants`, this is (amortized) constant-time,
+ /// only doing at most one query for evaluating an explicit
+ /// discriminant (the last one before the requested variant),
+ /// assuming there are no constant-evaluation errors there.
+ #[inline]
+ pub fn discriminant_for_variant(
+ self,
+ tcx: TyCtxt<'tcx>,
+ variant_index: VariantIdx,
+ ) -> Discr<'tcx> {
+ assert!(self.is_enum());
+ let (val, offset) = self.discriminant_def_for_variant(variant_index);
+ let explicit_value = val
+ .and_then(|expr_did| self.eval_explicit_discr(tcx, expr_did))
+ .unwrap_or_else(|| self.repr().discr_type().initial_discriminant(tcx));
+ explicit_value.checked_add(tcx, offset as u128).0
+ }
+
+ /// Yields a `DefId` for the discriminant and an offset to add to it
+ /// Alternatively, if there is no explicit discriminant, returns the
+ /// inferred discriminant directly.
+ pub fn discriminant_def_for_variant(self, variant_index: VariantIdx) -> (Option<DefId>, u32) {
+ assert!(!self.variants().is_empty());
+ let mut explicit_index = variant_index.as_u32();
+ let expr_did;
+ loop {
+ match self.variant(VariantIdx::from_u32(explicit_index)).discr {
+ ty::VariantDiscr::Relative(0) => {
+ expr_did = None;
+ break;
+ }
+ ty::VariantDiscr::Relative(distance) => {
+ explicit_index -= distance;
+ }
+ ty::VariantDiscr::Explicit(did) => {
+ expr_did = Some(did);
+ break;
+ }
+ }
+ }
+ (expr_did, variant_index.as_u32() - explicit_index)
+ }
+
+ pub fn destructor(self, tcx: TyCtxt<'tcx>) -> Option<Destructor> {
+ tcx.adt_destructor(self.did())
+ }
+
+ /// Returns a list of types such that `Self: Sized` if and only
+ /// if that type is `Sized`, or `TyErr` if this type is recursive.
+ ///
+ /// Oddly enough, checking that the sized-constraint is `Sized` is
+ /// actually more expressive than checking all members:
+ /// the `Sized` trait is inductive, so an associated type that references
+ /// `Self` would prevent its containing ADT from being `Sized`.
+ ///
+ /// Due to normalization being eager, this applies even if
+ /// the associated type is behind a pointer (e.g., issue #31299).
+ pub fn sized_constraint(self, tcx: TyCtxt<'tcx>) -> ty::EarlyBinder<&'tcx [Ty<'tcx>]> {
+ ty::EarlyBinder(tcx.adt_sized_constraint(self.did()).0)
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/assoc.rs b/compiler/rustc_middle/src/ty/assoc.rs
new file mode 100644
index 000000000..c97156ac1
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/assoc.rs
@@ -0,0 +1,195 @@
+pub use self::AssocItemContainer::*;
+
+use crate::ty::{self, DefIdTree};
+use rustc_data_structures::sorted_map::SortedIndexMultiMap;
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Namespace};
+use rustc_hir::def_id::DefId;
+use rustc_span::symbol::{Ident, Symbol};
+
+use super::{TyCtxt, Visibility};
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable, Hash, Encodable, Decodable)]
+pub enum AssocItemContainer {
+ TraitContainer,
+ ImplContainer,
+}
+
+/// Information about an associated item
+#[derive(Copy, Clone, Debug, PartialEq, HashStable, Eq, Hash, Encodable, Decodable)]
+pub struct AssocItem {
+ pub def_id: DefId,
+ pub name: Symbol,
+ pub kind: AssocKind,
+ pub container: AssocItemContainer,
+
+ /// If this is an item in an impl of a trait then this is the `DefId` of
+ /// the associated item on the trait that this implements.
+ pub trait_item_def_id: Option<DefId>,
+
+ /// Whether this is a method with an explicit self
+ /// as its first parameter, allowing method calls.
+ pub fn_has_self_parameter: bool,
+}
+
+impl AssocItem {
+ pub fn ident(&self, tcx: TyCtxt<'_>) -> Ident {
+ Ident::new(self.name, tcx.def_ident_span(self.def_id).unwrap())
+ }
+
+ pub fn defaultness(&self, tcx: TyCtxt<'_>) -> hir::Defaultness {
+ tcx.impl_defaultness(self.def_id)
+ }
+
+ #[inline]
+ pub fn visibility(&self, tcx: TyCtxt<'_>) -> Visibility {
+ tcx.visibility(self.def_id)
+ }
+
+ #[inline]
+ pub fn container_id(&self, tcx: TyCtxt<'_>) -> DefId {
+ tcx.parent(self.def_id)
+ }
+
+ #[inline]
+ pub fn trait_container(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
+ match self.container {
+ AssocItemContainer::ImplContainer => None,
+ AssocItemContainer::TraitContainer => Some(tcx.parent(self.def_id)),
+ }
+ }
+
+ #[inline]
+ pub fn impl_container(&self, tcx: TyCtxt<'_>) -> Option<DefId> {
+ match self.container {
+ AssocItemContainer::ImplContainer => Some(tcx.parent(self.def_id)),
+ AssocItemContainer::TraitContainer => None,
+ }
+ }
+
+ pub fn signature(&self, tcx: TyCtxt<'_>) -> String {
+ match self.kind {
+ ty::AssocKind::Fn => {
+ // We skip the binder here because the binder would deanonymize all
+ // late-bound regions, and we don't want method signatures to show up
+ // `as for<'r> fn(&'r MyType)`. Pretty-printing handles late-bound
+ // regions just fine, showing `fn(&MyType)`.
+ tcx.fn_sig(self.def_id).skip_binder().to_string()
+ }
+ ty::AssocKind::Type => format!("type {};", self.name),
+ ty::AssocKind::Const => {
+ format!("const {}: {:?};", self.name, tcx.type_of(self.def_id))
+ }
+ }
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Debug, HashStable, Eq, Hash, Encodable, Decodable)]
+pub enum AssocKind {
+ Const,
+ Fn,
+ Type,
+}
+
+impl AssocKind {
+ pub fn namespace(&self) -> Namespace {
+ match *self {
+ ty::AssocKind::Type => Namespace::TypeNS,
+ ty::AssocKind::Const | ty::AssocKind::Fn => Namespace::ValueNS,
+ }
+ }
+
+ pub fn as_def_kind(&self) -> DefKind {
+ match self {
+ AssocKind::Const => DefKind::AssocConst,
+ AssocKind::Fn => DefKind::AssocFn,
+ AssocKind::Type => DefKind::AssocTy,
+ }
+ }
+}
+
+impl std::fmt::Display for AssocKind {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ match self {
+ AssocKind::Fn => write!(f, "method"),
+ AssocKind::Const => write!(f, "associated const"),
+ AssocKind::Type => write!(f, "associated type"),
+ }
+ }
+}
+
+/// A list of `ty::AssocItem`s in definition order that allows for efficient lookup by name.
+///
+/// When doing lookup by name, we try to postpone hygienic comparison for as long as possible since
+/// it is relatively expensive. Instead, items are indexed by `Symbol` and hygienic comparison is
+/// done only on items with the same name.
+#[derive(Debug, Clone, PartialEq, HashStable)]
+pub struct AssocItems<'tcx> {
+ pub(super) items: SortedIndexMultiMap<u32, Symbol, &'tcx ty::AssocItem>,
+}
+
+impl<'tcx> AssocItems<'tcx> {
+ /// Constructs an `AssociatedItems` map from a series of `ty::AssocItem`s in definition order.
+ pub fn new(items_in_def_order: impl IntoIterator<Item = &'tcx ty::AssocItem>) -> Self {
+ let items = items_in_def_order.into_iter().map(|item| (item.name, item)).collect();
+ AssocItems { items }
+ }
+
+ /// Returns a slice of associated items in the order they were defined.
+ ///
+ /// New code should avoid relying on definition order. If you need a particular associated item
+ /// for a known trait, make that trait a lang item instead of indexing this array.
+ pub fn in_definition_order(&self) -> impl '_ + Iterator<Item = &ty::AssocItem> {
+ self.items.iter().map(|(_, v)| *v)
+ }
+
+ pub fn len(&self) -> usize {
+ self.items.len()
+ }
+
+ /// Returns an iterator over all associated items with the given name, ignoring hygiene.
+ pub fn filter_by_name_unhygienic(
+ &self,
+ name: Symbol,
+ ) -> impl '_ + Iterator<Item = &ty::AssocItem> {
+ self.items.get_by_key(name).copied()
+ }
+
+ /// Returns the associated item with the given name and `AssocKind`, if one exists.
+ pub fn find_by_name_and_kind(
+ &self,
+ tcx: TyCtxt<'_>,
+ ident: Ident,
+ kind: AssocKind,
+ parent_def_id: DefId,
+ ) -> Option<&ty::AssocItem> {
+ self.filter_by_name_unhygienic(ident.name)
+ .filter(|item| item.kind == kind)
+ .find(|item| tcx.hygienic_eq(ident, item.ident(tcx), parent_def_id))
+ }
+
+ /// Returns the associated item with the given name and any of `AssocKind`, if one exists.
+ pub fn find_by_name_and_kinds(
+ &self,
+ tcx: TyCtxt<'_>,
+ ident: Ident,
+ // Sorted in order of what kinds to look at
+ kinds: &[AssocKind],
+ parent_def_id: DefId,
+ ) -> Option<&ty::AssocItem> {
+ kinds.iter().find_map(|kind| self.find_by_name_and_kind(tcx, ident, *kind, parent_def_id))
+ }
+
+ /// Returns the associated item with the given name in the given `Namespace`, if one exists.
+ pub fn find_by_name_and_namespace(
+ &self,
+ tcx: TyCtxt<'_>,
+ ident: Ident,
+ ns: Namespace,
+ parent_def_id: DefId,
+ ) -> Option<&ty::AssocItem> {
+ self.filter_by_name_unhygienic(ident.name)
+ .filter(|item| item.kind.namespace() == ns)
+ .find(|item| tcx.hygienic_eq(ident, item.ident(tcx), parent_def_id))
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/binding.rs b/compiler/rustc_middle/src/ty/binding.rs
new file mode 100644
index 000000000..3d65429f2
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/binding.rs
@@ -0,0 +1,22 @@
+use rustc_hir::BindingAnnotation;
+use rustc_hir::BindingAnnotation::*;
+use rustc_hir::Mutability;
+
+#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Debug, Copy, HashStable)]
+pub enum BindingMode {
+ BindByReference(Mutability),
+ BindByValue(Mutability),
+}
+
+TrivialTypeTraversalAndLiftImpls! { BindingMode, }
+
+impl BindingMode {
+ pub fn convert(ba: BindingAnnotation) -> BindingMode {
+ match ba {
+ Unannotated => BindingMode::BindByValue(Mutability::Not),
+ Mutable => BindingMode::BindByValue(Mutability::Mut),
+ Ref => BindingMode::BindByReference(Mutability::Not),
+ RefMut => BindingMode::BindByReference(Mutability::Mut),
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/cast.rs b/compiler/rustc_middle/src/ty/cast.rs
new file mode 100644
index 000000000..c4b743dd4
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/cast.rs
@@ -0,0 +1,73 @@
+// Helpers for handling cast expressions, used in both
+// typeck and codegen.
+
+use crate::ty::{self, Ty};
+
+use rustc_macros::HashStable;
+
+/// Types that are represented as ints.
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum IntTy {
+ U(ty::UintTy),
+ I,
+ CEnum,
+ Bool,
+ Char,
+}
+
+impl IntTy {
+ pub fn is_signed(self) -> bool {
+ matches!(self, Self::I)
+ }
+}
+
+// Valid types for the result of a non-coercion cast
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum CastTy<'tcx> {
+ /// Various types that are represented as ints and handled mostly
+ /// in the same way, merged for easier matching.
+ Int(IntTy),
+ /// Floating-point types.
+ Float,
+ /// Function pointers.
+ FnPtr,
+ /// Raw pointers.
+ Ptr(ty::TypeAndMut<'tcx>),
+}
+
+/// Cast Kind. See [RFC 401](https://rust-lang.github.io/rfcs/0401-coercions.html)
+/// (or librustc_typeck/check/cast.rs).
+#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum CastKind {
+ CoercionCast,
+ PtrPtrCast,
+ PtrAddrCast,
+ AddrPtrCast,
+ NumericCast,
+ EnumCast,
+ PrimIntCast,
+ U8CharCast,
+ ArrayPtrCast,
+ FnPtrPtrCast,
+ FnPtrAddrCast,
+}
+
+impl<'tcx> CastTy<'tcx> {
+ /// Returns `Some` for integral/pointer casts.
+ /// Casts like unsizing casts will return `None`.
+ pub fn from_ty(t: Ty<'tcx>) -> Option<CastTy<'tcx>> {
+ match *t.kind() {
+ ty::Bool => Some(CastTy::Int(IntTy::Bool)),
+ ty::Char => Some(CastTy::Int(IntTy::Char)),
+ ty::Int(_) => Some(CastTy::Int(IntTy::I)),
+ ty::Infer(ty::InferTy::IntVar(_)) => Some(CastTy::Int(IntTy::I)),
+ ty::Infer(ty::InferTy::FloatVar(_)) => Some(CastTy::Float),
+ ty::Uint(u) => Some(CastTy::Int(IntTy::U(u))),
+ ty::Float(_) => Some(CastTy::Float),
+ ty::Adt(d, _) if d.is_enum() && d.is_payloadfree() => Some(CastTy::Int(IntTy::CEnum)),
+ ty::RawPtr(mt) => Some(CastTy::Ptr(mt)),
+ ty::FnPtr(..) => Some(CastTy::FnPtr),
+ _ => None,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/closure.rs b/compiler/rustc_middle/src/ty/closure.rs
new file mode 100644
index 000000000..0d6c26a58
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/closure.rs
@@ -0,0 +1,454 @@
+use crate::hir::place::{
+ Place as HirPlace, PlaceBase as HirPlaceBase, ProjectionKind as HirProjectionKind,
+};
+use crate::{mir, ty};
+
+use std::fmt::Write;
+
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_span::{Span, Symbol};
+
+use super::{Ty, TyCtxt};
+
+use self::BorrowKind::*;
+
+// Captures are represented using fields inside a structure.
+// This represents accessing self in the closure structure
+pub const CAPTURE_STRUCT_LOCAL: mir::Local = mir::Local::from_u32(1);
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct UpvarPath {
+ pub hir_id: hir::HirId,
+}
+
+/// Upvars do not get their own `NodeId`. Instead, we use the pair of
+/// the original var ID (that is, the root variable that is referenced
+/// by the upvar) and the ID of the closure expression.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct UpvarId {
+ pub var_path: UpvarPath,
+ pub closure_expr_id: LocalDefId,
+}
+
+impl UpvarId {
+ pub fn new(var_hir_id: hir::HirId, closure_def_id: LocalDefId) -> UpvarId {
+ UpvarId { var_path: UpvarPath { hir_id: var_hir_id }, closure_expr_id: closure_def_id }
+ }
+}
+
+/// Information describing the capture of an upvar. This is computed
+/// during `typeck`, specifically by `regionck`.
+#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum UpvarCapture {
+ /// Upvar is captured by value. This is always true when the
+ /// closure is labeled `move`, but can also be true in other cases
+ /// depending on inference.
+ ByValue,
+
+ /// Upvar is captured by reference.
+ ByRef(BorrowKind),
+}
+
+pub type UpvarListMap = FxHashMap<DefId, FxIndexMap<hir::HirId, UpvarId>>;
+pub type UpvarCaptureMap = FxHashMap<UpvarId, UpvarCapture>;
+
+/// Given the closure DefId this map provides a map of root variables to minimum
+/// set of `CapturedPlace`s that need to be tracked to support all captures of that closure.
+pub type MinCaptureInformationMap<'tcx> = FxHashMap<LocalDefId, RootVariableMinCaptureList<'tcx>>;
+
+/// Part of `MinCaptureInformationMap`; Maps a root variable to the list of `CapturedPlace`.
+/// Used to track the minimum set of `Place`s that need to be captured to support all
+/// Places captured by the closure starting at a given root variable.
+///
+/// This provides a convenient and quick way of checking if a variable being used within
+/// a closure is a capture of a local variable.
+pub type RootVariableMinCaptureList<'tcx> = FxIndexMap<hir::HirId, MinCaptureList<'tcx>>;
+
+/// Part of `MinCaptureInformationMap`; List of `CapturePlace`s.
+pub type MinCaptureList<'tcx> = Vec<CapturedPlace<'tcx>>;
+
+/// Represents the various closure traits in the language. This
+/// will determine the type of the environment (`self`, in the
+/// desugaring) argument that the closure expects.
+///
+/// You can get the environment type of a closure using
+/// `tcx.closure_env_ty()`.
+#[derive(Clone, Copy, PartialOrd, Ord, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum ClosureKind {
+ // Warning: Ordering is significant here! The ordering is chosen
+ // because the trait Fn is a subtrait of FnMut and so in turn, and
+ // hence we order it so that Fn < FnMut < FnOnce.
+ Fn,
+ FnMut,
+ FnOnce,
+}
+
+impl<'tcx> ClosureKind {
+ // This is the initial value used when doing upvar inference.
+ pub const LATTICE_BOTTOM: ClosureKind = ClosureKind::Fn;
+
+ /// Returns `true` if a type that impls this closure kind
+ /// must also implement `other`.
+ pub fn extends(self, other: ty::ClosureKind) -> bool {
+ matches!(
+ (self, other),
+ (ClosureKind::Fn, ClosureKind::Fn)
+ | (ClosureKind::Fn, ClosureKind::FnMut)
+ | (ClosureKind::Fn, ClosureKind::FnOnce)
+ | (ClosureKind::FnMut, ClosureKind::FnMut)
+ | (ClosureKind::FnMut, ClosureKind::FnOnce)
+ | (ClosureKind::FnOnce, ClosureKind::FnOnce)
+ )
+ }
+
+ /// Returns the representative scalar type for this closure kind.
+ /// See `Ty::to_opt_closure_kind` for more details.
+ pub fn to_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match self {
+ ClosureKind::Fn => tcx.types.i8,
+ ClosureKind::FnMut => tcx.types.i16,
+ ClosureKind::FnOnce => tcx.types.i32,
+ }
+ }
+
+ pub fn from_def_id(tcx: TyCtxt<'_>, def_id: DefId) -> Option<ClosureKind> {
+ if Some(def_id) == tcx.lang_items().fn_once_trait() {
+ Some(ClosureKind::FnOnce)
+ } else if Some(def_id) == tcx.lang_items().fn_mut_trait() {
+ Some(ClosureKind::FnMut)
+ } else if Some(def_id) == tcx.lang_items().fn_trait() {
+ Some(ClosureKind::Fn)
+ } else {
+ None
+ }
+ }
+
+ pub fn to_def_id(&self, tcx: TyCtxt<'_>) -> DefId {
+ match self {
+ ClosureKind::Fn => tcx.lang_items().fn_once_trait().unwrap(),
+ ClosureKind::FnMut => tcx.lang_items().fn_mut_trait().unwrap(),
+ ClosureKind::FnOnce => tcx.lang_items().fn_trait().unwrap(),
+ }
+ }
+}
+
+/// A composite describing a `Place` that is captured by a closure.
+#[derive(PartialEq, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct CapturedPlace<'tcx> {
+ /// The `Place` that is captured.
+ pub place: HirPlace<'tcx>,
+
+ /// `CaptureKind` and expression(s) that resulted in such capture of `place`.
+ pub info: CaptureInfo,
+
+ /// Represents if `place` can be mutated or not.
+ pub mutability: hir::Mutability,
+
+ /// Region of the resulting reference if the upvar is captured by ref.
+ pub region: Option<ty::Region<'tcx>>,
+}
+
+impl<'tcx> CapturedPlace<'tcx> {
+ pub fn to_string(&self, tcx: TyCtxt<'tcx>) -> String {
+ place_to_string_for_capture(tcx, &self.place)
+ }
+
+ /// Returns a symbol of the captured upvar, which looks like `name__field1__field2`.
+ fn to_symbol(&self, tcx: TyCtxt<'tcx>) -> Symbol {
+ let hir_id = match self.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ base => bug!("Expected an upvar, found {:?}", base),
+ };
+ let mut symbol = tcx.hir().name(hir_id).as_str().to_string();
+
+ let mut ty = self.place.base_ty;
+ for proj in self.place.projections.iter() {
+ match proj.kind {
+ HirProjectionKind::Field(idx, variant) => match ty.kind() {
+ ty::Tuple(_) => write!(&mut symbol, "__{}", idx).unwrap(),
+ ty::Adt(def, ..) => {
+ write!(
+ &mut symbol,
+ "__{}",
+ def.variant(variant).fields[idx as usize].name.as_str(),
+ )
+ .unwrap();
+ }
+ ty => {
+ span_bug!(
+ self.get_capture_kind_span(tcx),
+ "Unexpected type {:?} for `Field` projection",
+ ty
+ )
+ }
+ },
+
+ // Ignore derefs for now, as they are likely caused by
+ // autoderefs that don't appear in the original code.
+ HirProjectionKind::Deref => {}
+ proj => bug!("Unexpected projection {:?} in captured place", proj),
+ }
+ ty = proj.ty;
+ }
+
+ Symbol::intern(&symbol)
+ }
+
+ /// Returns the hir-id of the root variable for the captured place.
+ /// e.g., if `a.b.c` was captured, would return the hir-id for `a`.
+ pub fn get_root_variable(&self) -> hir::HirId {
+ match self.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.var_path.hir_id,
+ base => bug!("Expected upvar, found={:?}", base),
+ }
+ }
+
+ /// Returns the `LocalDefId` of the closure that captured this Place
+ pub fn get_closure_local_def_id(&self) -> LocalDefId {
+ match self.place.base {
+ HirPlaceBase::Upvar(upvar_id) => upvar_id.closure_expr_id,
+ base => bug!("expected upvar, found={:?}", base),
+ }
+ }
+
+ /// Return span pointing to use that resulted in selecting the captured path
+ pub fn get_path_span(&self, tcx: TyCtxt<'tcx>) -> Span {
+ if let Some(path_expr_id) = self.info.path_expr_id {
+ tcx.hir().span(path_expr_id)
+ } else if let Some(capture_kind_expr_id) = self.info.capture_kind_expr_id {
+ tcx.hir().span(capture_kind_expr_id)
+ } else {
+ // Fallback on upvars mentioned if neither path or capture expr id is captured
+
+ // Safe to unwrap since we know this place is captured by the closure, therefore the closure must have upvars.
+ tcx.upvars_mentioned(self.get_closure_local_def_id()).unwrap()
+ [&self.get_root_variable()]
+ .span
+ }
+ }
+
+ /// Return span pointing to use that resulted in selecting the current capture kind
+ pub fn get_capture_kind_span(&self, tcx: TyCtxt<'tcx>) -> Span {
+ if let Some(capture_kind_expr_id) = self.info.capture_kind_expr_id {
+ tcx.hir().span(capture_kind_expr_id)
+ } else if let Some(path_expr_id) = self.info.path_expr_id {
+ tcx.hir().span(path_expr_id)
+ } else {
+ // Fallback on upvars mentioned if neither path or capture expr id is captured
+
+ // Safe to unwrap since we know this place is captured by the closure, therefore the closure must have upvars.
+ tcx.upvars_mentioned(self.get_closure_local_def_id()).unwrap()
+ [&self.get_root_variable()]
+ .span
+ }
+ }
+}
+
+fn symbols_for_closure_captures<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ def_id: (LocalDefId, LocalDefId),
+) -> Vec<Symbol> {
+ let typeck_results = tcx.typeck(def_id.0);
+ let captures = typeck_results.closure_min_captures_flattened(def_id.1);
+ captures.into_iter().map(|captured_place| captured_place.to_symbol(tcx)).collect()
+}
+
+/// Return true if the `proj_possible_ancestor` represents an ancestor path
+/// to `proj_capture` or `proj_possible_ancestor` is same as `proj_capture`,
+/// assuming they both start off of the same root variable.
+///
+/// **Note:** It's the caller's responsibility to ensure that both lists of projections
+/// start off of the same root variable.
+///
+/// Eg: 1. `foo.x` which is represented using `projections=[Field(x)]` is an ancestor of
+/// `foo.x.y` which is represented using `projections=[Field(x), Field(y)]`.
+/// Note both `foo.x` and `foo.x.y` start off of the same root variable `foo`.
+/// 2. Since we only look at the projections here function will return `bar.x` as an a valid
+/// ancestor of `foo.x.y`. It's the caller's responsibility to ensure that both projections
+/// list are being applied to the same root variable.
+pub fn is_ancestor_or_same_capture(
+ proj_possible_ancestor: &[HirProjectionKind],
+ proj_capture: &[HirProjectionKind],
+) -> bool {
+ // We want to make sure `is_ancestor_or_same_capture("x.0.0", "x.0")` to return false.
+ // Therefore we can't just check if all projections are same in the zipped iterator below.
+ if proj_possible_ancestor.len() > proj_capture.len() {
+ return false;
+ }
+
+ proj_possible_ancestor.iter().zip(proj_capture).all(|(a, b)| a == b)
+}
+
+/// Part of `MinCaptureInformationMap`; describes the capture kind (&, &mut, move)
+/// for a particular capture as well as identifying the part of the source code
+/// that triggered this capture to occur.
+#[derive(PartialEq, Clone, Debug, Copy, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct CaptureInfo {
+ /// Expr Id pointing to use that resulted in selecting the current capture kind
+ ///
+ /// Eg:
+ /// ```rust,no_run
+ /// let mut t = (0,1);
+ ///
+ /// let c = || {
+ /// println!("{t:?}"); // L1
+ /// t.1 = 4; // L2
+ /// };
+ /// ```
+ /// `capture_kind_expr_id` will point to the use on L2 and `path_expr_id` will point to the
+ /// use on L1.
+ ///
+ /// If the user doesn't enable feature `capture_disjoint_fields` (RFC 2229) then, it is
+ /// possible that we don't see the use of a particular place resulting in capture_kind_expr_id being
+ /// None. In such case we fallback on uvpars_mentioned for span.
+ ///
+ /// Eg:
+ /// ```rust,no_run
+ /// let x = 5;
+ ///
+ /// let c = || {
+ /// let _ = x;
+ /// };
+ /// ```
+ ///
+ /// In this example, if `capture_disjoint_fields` is **not** set, then x will be captured,
+ /// but we won't see it being used during capture analysis, since it's essentially a discard.
+ pub capture_kind_expr_id: Option<hir::HirId>,
+ /// Expr Id pointing to use that resulted the corresponding place being captured
+ ///
+ /// See `capture_kind_expr_id` for example.
+ ///
+ pub path_expr_id: Option<hir::HirId>,
+
+ /// Capture mode that was selected
+ pub capture_kind: UpvarCapture,
+}
+
+pub fn place_to_string_for_capture<'tcx>(tcx: TyCtxt<'tcx>, place: &HirPlace<'tcx>) -> String {
+ let mut curr_string: String = match place.base {
+ HirPlaceBase::Upvar(upvar_id) => tcx.hir().name(upvar_id.var_path.hir_id).to_string(),
+ _ => bug!("Capture_information should only contain upvars"),
+ };
+
+ for (i, proj) in place.projections.iter().enumerate() {
+ match proj.kind {
+ HirProjectionKind::Deref => {
+ curr_string = format!("*{}", curr_string);
+ }
+ HirProjectionKind::Field(idx, variant) => match place.ty_before_projection(i).kind() {
+ ty::Adt(def, ..) => {
+ curr_string = format!(
+ "{}.{}",
+ curr_string,
+ def.variant(variant).fields[idx as usize].name.as_str()
+ );
+ }
+ ty::Tuple(_) => {
+ curr_string = format!("{}.{}", curr_string, idx);
+ }
+ _ => {
+ bug!(
+ "Field projection applied to a type other than Adt or Tuple: {:?}.",
+ place.ty_before_projection(i).kind()
+ )
+ }
+ },
+ proj => bug!("{:?} unexpected because it isn't captured", proj),
+ }
+ }
+
+ curr_string
+}
+
+#[derive(Clone, PartialEq, Debug, TyEncodable, TyDecodable, Copy, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum BorrowKind {
+ /// Data must be immutable and is aliasable.
+ ImmBorrow,
+
+ /// Data must be immutable but not aliasable. This kind of borrow
+ /// cannot currently be expressed by the user and is used only in
+ /// implicit closure bindings. It is needed when the closure
+ /// is borrowing or mutating a mutable referent, e.g.:
+ ///
+ /// ```
+ /// let mut z = 3;
+ /// let x: &mut isize = &mut z;
+ /// let y = || *x += 5;
+ /// ```
+ ///
+ /// If we were to try to translate this closure into a more explicit
+ /// form, we'd encounter an error with the code as written:
+ ///
+ /// ```compile_fail,E0594
+ /// struct Env<'a> { x: &'a &'a mut isize }
+ /// let mut z = 3;
+ /// let x: &mut isize = &mut z;
+ /// let y = (&mut Env { x: &x }, fn_ptr); // Closure is pair of env and fn
+ /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
+ /// ```
+ ///
+ /// This is then illegal because you cannot mutate a `&mut` found
+ /// in an aliasable location. To solve, you'd have to translate with
+ /// an `&mut` borrow:
+ ///
+ /// ```compile_fail,E0596
+ /// struct Env<'a> { x: &'a mut &'a mut isize }
+ /// let mut z = 3;
+ /// let x: &mut isize = &mut z;
+ /// let y = (&mut Env { x: &mut x }, fn_ptr); // changed from &x to &mut x
+ /// fn fn_ptr(env: &mut Env) { **env.x += 5; }
+ /// ```
+ ///
+ /// Now the assignment to `**env.x` is legal, but creating a
+ /// mutable pointer to `x` is not because `x` is not mutable. We
+ /// could fix this by declaring `x` as `let mut x`. This is ok in
+ /// user code, if awkward, but extra weird for closures, since the
+ /// borrow is hidden.
+ ///
+ /// So we introduce a "unique imm" borrow -- the referent is
+ /// immutable, but not aliasable. This solves the problem. For
+ /// simplicity, we don't give users the way to express this
+ /// borrow, it's just used when translating closures.
+ UniqueImmBorrow,
+
+ /// Data is mutable and not aliasable.
+ MutBorrow,
+}
+
+impl BorrowKind {
+ pub fn from_mutbl(m: hir::Mutability) -> BorrowKind {
+ match m {
+ hir::Mutability::Mut => MutBorrow,
+ hir::Mutability::Not => ImmBorrow,
+ }
+ }
+
+ /// Returns a mutability `m` such that an `&m T` pointer could be used to obtain this borrow
+ /// kind. Because borrow kinds are richer than mutabilities, we sometimes have to pick a
+ /// mutability that is stronger than necessary so that it at least *would permit* the borrow in
+ /// question.
+ pub fn to_mutbl_lossy(self) -> hir::Mutability {
+ match self {
+ MutBorrow => hir::Mutability::Mut,
+ ImmBorrow => hir::Mutability::Not,
+
+ // We have no type corresponding to a unique imm borrow, so
+ // use `&mut`. It gives all the capabilities of a `&uniq`
+ // and hence is a safe "over approximation".
+ UniqueImmBorrow => hir::Mutability::Mut,
+ }
+ }
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers { symbols_for_closure_captures, ..*providers }
+}
diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs
new file mode 100644
index 000000000..51137c526
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/codec.rs
@@ -0,0 +1,527 @@
+//! This module contains some shared code for encoding and decoding various
+//! things from the `ty` module, and in particular implements support for
+//! "shorthands" which allow to have pointers back into the already encoded
+//! stream instead of re-encoding the same thing twice.
+//!
+//! The functionality in here is shared between persisting to crate metadata and
+//! persisting to incr. comp. caches.
+
+use crate::arena::ArenaAllocatable;
+use crate::infer::canonical::{CanonicalVarInfo, CanonicalVarInfos};
+use crate::mir::{
+ self,
+ interpret::{AllocId, ConstAllocation},
+};
+use crate::traits;
+use crate::ty::subst::SubstsRef;
+use crate::ty::{self, AdtDef, Ty};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::ty::TyCtxt;
+use rustc_serialize::{Decodable, Encodable};
+use rustc_span::Span;
+pub use rustc_type_ir::{TyDecoder, TyEncoder};
+use std::hash::Hash;
+use std::intrinsics;
+use std::marker::DiscriminantKind;
+
+/// The shorthand encoding uses an enum's variant index `usize`
+/// and is offset by this value so it never matches a real variant.
+/// This offset is also chosen so that the first byte is never < 0x80.
+pub const SHORTHAND_OFFSET: usize = 0x80;
+
+pub trait EncodableWithShorthand<E: TyEncoder>: Copy + Eq + Hash {
+ type Variant: Encodable<E>;
+ fn variant(&self) -> &Self::Variant;
+}
+
+#[allow(rustc::usage_of_ty_tykind)]
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> EncodableWithShorthand<E> for Ty<'tcx> {
+ type Variant = ty::TyKind<'tcx>;
+
+ #[inline]
+ fn variant(&self) -> &Self::Variant {
+ self.kind()
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> EncodableWithShorthand<E> for ty::PredicateKind<'tcx> {
+ type Variant = ty::PredicateKind<'tcx>;
+
+ #[inline]
+ fn variant(&self) -> &Self::Variant {
+ self
+ }
+}
+
+/// Trait for decoding to a reference.
+///
+/// This is a separate trait from `Decodable` so that we can implement it for
+/// upstream types, such as `FxHashSet`.
+///
+/// The `TyDecodable` derive macro will use this trait for fields that are
+/// references (and don't use a type alias to hide that).
+///
+/// `Decodable` can still be implemented in cases where `Decodable` is required
+/// by a trait bound.
+pub trait RefDecodable<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> {
+ fn decode(d: &mut D) -> &'tcx Self;
+}
+
+/// Encode the given value or a previously cached shorthand.
+pub fn encode_with_shorthand<'tcx, E, T, M>(encoder: &mut E, value: &T, cache: M)
+where
+ E: TyEncoder<I = TyCtxt<'tcx>>,
+ M: for<'b> Fn(&'b mut E) -> &'b mut FxHashMap<T, usize>,
+ T: EncodableWithShorthand<E>,
+ // The discriminant and shorthand must have the same size.
+ T::Variant: DiscriminantKind<Discriminant = isize>,
+{
+ let existing_shorthand = cache(encoder).get(value).copied();
+ if let Some(shorthand) = existing_shorthand {
+ encoder.emit_usize(shorthand);
+ return;
+ }
+
+ let variant = value.variant();
+
+ let start = encoder.position();
+ variant.encode(encoder);
+ let len = encoder.position() - start;
+
+ // The shorthand encoding uses the same usize as the
+ // discriminant, with an offset so they can't conflict.
+ let discriminant = intrinsics::discriminant_value(variant);
+ assert!(SHORTHAND_OFFSET > discriminant as usize);
+
+ let shorthand = start + SHORTHAND_OFFSET;
+
+ // Get the number of bits that leb128 could fit
+ // in the same space as the fully encoded type.
+ let leb128_bits = len * 7;
+
+ // Check that the shorthand is a not longer than the
+ // full encoding itself, i.e., it's an obvious win.
+ if leb128_bits >= 64 || (shorthand as u64) < (1 << leb128_bits) {
+ cache(encoder).insert(*value, shorthand);
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for Ty<'tcx> {
+ fn encode(&self, e: &mut E) {
+ encode_with_shorthand(e, self, TyEncoder::type_shorthands);
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E>
+ for ty::Binder<'tcx, ty::PredicateKind<'tcx>>
+{
+ fn encode(&self, e: &mut E) {
+ self.bound_vars().encode(e);
+ encode_with_shorthand(e, &self.skip_binder(), TyEncoder::predicate_shorthands);
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for ty::Predicate<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.kind().encode(e);
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for ty::Region<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.kind().encode(e);
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for ty::Const<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.0.0.encode(e);
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for ConstAllocation<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.inner().encode(e)
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for AdtDef<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.0.0.encode(e)
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for AllocId {
+ fn encode(&self, e: &mut E) {
+ e.encode_alloc_id(self)
+ }
+}
+
+#[inline]
+fn decode_arena_allocable<
+ 'tcx,
+ D: TyDecoder<I = TyCtxt<'tcx>>,
+ T: ArenaAllocatable<'tcx> + Decodable<D>,
+>(
+ decoder: &mut D,
+) -> &'tcx T
+where
+ D: TyDecoder,
+{
+ decoder.interner().arena.alloc(Decodable::decode(decoder))
+}
+
+#[inline]
+fn decode_arena_allocable_slice<
+ 'tcx,
+ D: TyDecoder<I = TyCtxt<'tcx>>,
+ T: ArenaAllocatable<'tcx> + Decodable<D>,
+>(
+ decoder: &mut D,
+) -> &'tcx [T]
+where
+ D: TyDecoder,
+{
+ decoder.interner().arena.alloc_from_iter(<Vec<T> as Decodable<D>>::decode(decoder))
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for Ty<'tcx> {
+ #[allow(rustc::usage_of_ty_tykind)]
+ fn decode(decoder: &mut D) -> Ty<'tcx> {
+ // Handle shorthands first, if we have a usize > 0x80.
+ if decoder.positioned_at_shorthand() {
+ let pos = decoder.read_usize();
+ assert!(pos >= SHORTHAND_OFFSET);
+ let shorthand = pos - SHORTHAND_OFFSET;
+
+ decoder.cached_ty_for_shorthand(shorthand, |decoder| {
+ decoder.with_position(shorthand, Ty::decode)
+ })
+ } else {
+ let tcx = decoder.interner();
+ tcx.mk_ty(rustc_type_ir::TyKind::decode(decoder))
+ }
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D>
+ for ty::Binder<'tcx, ty::PredicateKind<'tcx>>
+{
+ fn decode(decoder: &mut D) -> ty::Binder<'tcx, ty::PredicateKind<'tcx>> {
+ let bound_vars = Decodable::decode(decoder);
+ // Handle shorthands first, if we have a usize > 0x80.
+ ty::Binder::bind_with_vars(
+ if decoder.positioned_at_shorthand() {
+ let pos = decoder.read_usize();
+ assert!(pos >= SHORTHAND_OFFSET);
+ let shorthand = pos - SHORTHAND_OFFSET;
+
+ decoder.with_position(shorthand, ty::PredicateKind::decode)
+ } else {
+ ty::PredicateKind::decode(decoder)
+ },
+ bound_vars,
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::Predicate<'tcx> {
+ fn decode(decoder: &mut D) -> ty::Predicate<'tcx> {
+ let predicate_kind = Decodable::decode(decoder);
+ decoder.interner().mk_predicate(predicate_kind)
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for SubstsRef<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ let len = decoder.read_usize();
+ let tcx = decoder.interner();
+ tcx.mk_substs(
+ (0..len).map::<ty::subst::GenericArg<'tcx>, _>(|_| Decodable::decode(decoder)),
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for mir::Place<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ let local: mir::Local = Decodable::decode(decoder);
+ let len = decoder.read_usize();
+ let projection = decoder.interner().mk_place_elems(
+ (0..len).map::<mir::PlaceElem<'tcx>, _>(|_| Decodable::decode(decoder)),
+ );
+ mir::Place { local, projection }
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::Region<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ decoder.interner().mk_region(Decodable::decode(decoder))
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for CanonicalVarInfos<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ let len = decoder.read_usize();
+ let interned: Vec<CanonicalVarInfo<'tcx>> =
+ (0..len).map(|_| Decodable::decode(decoder)).collect();
+ decoder.interner().intern_canonical_var_infos(interned.as_slice())
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for AllocId {
+ fn decode(decoder: &mut D) -> Self {
+ decoder.decode_alloc_id()
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::SymbolName<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ ty::SymbolName::new(decoder.interner(), &decoder.read_str())
+ }
+}
+
+macro_rules! impl_decodable_via_ref {
+ ($($t:ty),+) => {
+ $(impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for $t {
+ fn decode(decoder: &mut D) -> Self {
+ RefDecodable::decode(decoder)
+ }
+ })*
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for ty::List<Ty<'tcx>> {
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ let len = decoder.read_usize();
+ decoder.interner().mk_type_list((0..len).map::<Ty<'tcx>, _>(|_| Decodable::decode(decoder)))
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D>
+ for ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>
+{
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ let len = decoder.read_usize();
+ decoder.interner().mk_poly_existential_predicates(
+ (0..len).map::<ty::Binder<'tcx, _>, _>(|_| Decodable::decode(decoder)),
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::Const<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ decoder.interner().mk_const(Decodable::decode(decoder))
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for [ty::ValTree<'tcx>] {
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decoder.interner().arena.alloc_from_iter(
+ (0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(),
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ConstAllocation<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ decoder.interner().intern_const_alloc(Decodable::decode(decoder))
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for AdtDef<'tcx> {
+ fn decode(decoder: &mut D) -> Self {
+ decoder.interner().intern_adt_def(Decodable::decode(decoder))
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D>
+ for [(ty::Predicate<'tcx>, Span)]
+{
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decoder.interner().arena.alloc_from_iter(
+ (0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(),
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D>
+ for [ty::abstract_const::Node<'tcx>]
+{
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decoder.interner().arena.alloc_from_iter(
+ (0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(),
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D>
+ for [ty::abstract_const::NodeId]
+{
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decoder.interner().arena.alloc_from_iter(
+ (0..decoder.read_usize()).map(|_| Decodable::decode(decoder)).collect::<Vec<_>>(),
+ )
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D>
+ for ty::List<ty::BoundVariableKind>
+{
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ let len = decoder.read_usize();
+ decoder.interner().mk_bound_variable_kinds(
+ (0..len).map::<ty::BoundVariableKind, _>(|_| Decodable::decode(decoder)),
+ )
+ }
+}
+
+impl_decodable_via_ref! {
+ &'tcx ty::TypeckResults<'tcx>,
+ &'tcx ty::List<Ty<'tcx>>,
+ &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ &'tcx traits::ImplSource<'tcx, ()>,
+ &'tcx mir::Body<'tcx>,
+ &'tcx mir::UnsafetyCheckResult,
+ &'tcx mir::BorrowCheckResult<'tcx>,
+ &'tcx mir::coverage::CodeRegion,
+ &'tcx ty::List<ty::BoundVariableKind>
+}
+
+#[macro_export]
+macro_rules! __impl_decoder_methods {
+ ($($name:ident -> $ty:ty;)*) => {
+ $(
+ #[inline]
+ fn $name(&mut self) -> $ty {
+ self.opaque.$name()
+ }
+ )*
+ }
+}
+
+macro_rules! impl_arena_allocatable_decoder {
+ ([]$args:tt) => {};
+ ([decode $(, $attrs:ident)*]
+ [$name:ident: $ty:ty]) => {
+ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for $ty {
+ #[inline]
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decode_arena_allocable(decoder)
+ }
+ }
+
+ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for [$ty] {
+ #[inline]
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decode_arena_allocable_slice(decoder)
+ }
+ }
+ };
+}
+
+macro_rules! impl_arena_allocatable_decoders {
+ ([$($a:tt $name:ident: $ty:ty,)*]) => {
+ $(
+ impl_arena_allocatable_decoder!($a [$name: $ty]);
+ )*
+ }
+}
+
+rustc_hir::arena_types!(impl_arena_allocatable_decoders);
+arena_types!(impl_arena_allocatable_decoders);
+
+macro_rules! impl_arena_copy_decoder {
+ (<$tcx:tt> $($ty:ty,)*) => {
+ $(impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for $ty {
+ #[inline]
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decoder.interner().arena.alloc(Decodable::decode(decoder))
+ }
+ }
+
+ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> RefDecodable<'tcx, D> for [$ty] {
+ #[inline]
+ fn decode(decoder: &mut D) -> &'tcx Self {
+ decoder.interner().arena.alloc_from_iter(<Vec<_> as Decodable<D>>::decode(decoder))
+ }
+ })*
+ };
+}
+
+impl_arena_copy_decoder! {<'tcx>
+ Span,
+ rustc_span::symbol::Ident,
+ ty::Variance,
+ rustc_span::def_id::DefId,
+ rustc_span::def_id::LocalDefId,
+ (rustc_middle::middle::exported_symbols::ExportedSymbol<'tcx>, rustc_middle::middle::exported_symbols::SymbolExportInfo),
+}
+
+#[macro_export]
+macro_rules! implement_ty_decoder {
+ ($DecoderName:ident <$($typaram:tt),*>) => {
+ mod __ty_decoder_impl {
+ use std::borrow::Cow;
+ use rustc_serialize::Decoder;
+
+ use super::$DecoderName;
+
+ impl<$($typaram ),*> Decoder for $DecoderName<$($typaram),*> {
+ $crate::__impl_decoder_methods! {
+ read_u128 -> u128;
+ read_u64 -> u64;
+ read_u32 -> u32;
+ read_u16 -> u16;
+ read_u8 -> u8;
+ read_usize -> usize;
+
+ read_i128 -> i128;
+ read_i64 -> i64;
+ read_i32 -> i32;
+ read_i16 -> i16;
+ read_i8 -> i8;
+ read_isize -> isize;
+
+ read_bool -> bool;
+ read_f64 -> f64;
+ read_f32 -> f32;
+ read_char -> char;
+ read_str -> &str;
+ }
+
+ #[inline]
+ fn read_raw_bytes(&mut self, len: usize) -> &[u8] {
+ self.opaque.read_raw_bytes(len)
+ }
+ }
+ }
+ }
+}
+
+macro_rules! impl_binder_encode_decode {
+ ($($t:ty),+ $(,)?) => {
+ $(
+ impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for ty::Binder<'tcx, $t> {
+ fn encode(&self, e: &mut E) {
+ self.bound_vars().encode(e);
+ self.as_ref().skip_binder().encode(e);
+ }
+ }
+ impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for ty::Binder<'tcx, $t> {
+ fn decode(decoder: &mut D) -> Self {
+ let bound_vars = Decodable::decode(decoder);
+ ty::Binder::bind_with_vars(Decodable::decode(decoder), bound_vars)
+ }
+ }
+ )*
+ }
+}
+
+impl_binder_encode_decode! {
+ &'tcx ty::List<Ty<'tcx>>,
+ ty::FnSig<'tcx>,
+ ty::ExistentialPredicate<'tcx>,
+ ty::TraitRef<'tcx>,
+ Vec<ty::GeneratorInteriorTypeCause<'tcx>>,
+ ty::ExistentialTraitRef<'tcx>,
+}
diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs
new file mode 100644
index 000000000..f8792edc0
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/consts.rs
@@ -0,0 +1,326 @@
+use crate::mir::interpret::LitToConstInput;
+use crate::mir::ConstantKind;
+use crate::ty::{
+ self, InlineConstSubsts, InlineConstSubstsParts, InternalSubsts, ParamEnv, ParamEnvAnd, Ty,
+ TyCtxt, TypeVisitable,
+};
+use rustc_data_structures::intern::Interned;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::def_id::{DefId, LocalDefId};
+use rustc_macros::HashStable;
+use std::fmt;
+
+mod int;
+mod kind;
+mod valtree;
+
+pub use int::*;
+pub use kind::*;
+pub use valtree::*;
+
+/// Use this rather than `ConstS`, whenever possible.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
+#[rustc_pass_by_value]
+pub struct Const<'tcx>(pub Interned<'tcx, ConstS<'tcx>>);
+
+impl<'tcx> fmt::Debug for Const<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // This reflects what `Const` looked liked before `Interned` was
+ // introduced. We print it like this to avoid having to update expected
+ // output in a lot of tests.
+ write!(f, "Const {{ ty: {:?}, kind: {:?} }}", self.ty(), self.kind())
+ }
+}
+
+/// Typed constant value.
+#[derive(PartialEq, Eq, PartialOrd, Ord, Hash, HashStable, TyEncodable, TyDecodable)]
+pub struct ConstS<'tcx> {
+ pub ty: Ty<'tcx>,
+ pub kind: ConstKind<'tcx>,
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(ConstS<'_>, 48);
+
+impl<'tcx> Const<'tcx> {
+ #[inline]
+ pub fn ty(self) -> Ty<'tcx> {
+ self.0.ty
+ }
+
+ #[inline]
+ pub fn kind(self) -> ConstKind<'tcx> {
+ self.0.kind
+ }
+
+ /// Literals and const generic parameters are eagerly converted to a constant, everything else
+ /// becomes `Unevaluated`.
+ pub fn from_anon_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self {
+ Self::from_opt_const_arg_anon_const(tcx, ty::WithOptConstParam::unknown(def_id))
+ }
+
+ #[instrument(skip(tcx), level = "debug")]
+ pub fn from_opt_const_arg_anon_const(
+ tcx: TyCtxt<'tcx>,
+ def: ty::WithOptConstParam<LocalDefId>,
+ ) -> Self {
+ debug!("Const::from_anon_const(def={:?})", def);
+
+ let body_id = match tcx.hir().get_by_def_id(def.did) {
+ hir::Node::AnonConst(ac) => ac.body,
+ _ => span_bug!(
+ tcx.def_span(def.did.to_def_id()),
+ "from_anon_const can only process anonymous constants"
+ ),
+ };
+
+ let expr = &tcx.hir().body(body_id).value;
+ debug!(?expr);
+
+ let ty = tcx.type_of(def.def_id_for_type_of());
+
+ match Self::try_eval_lit_or_param(tcx, ty, expr) {
+ Some(v) => v,
+ None => tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def: def.to_global(),
+ substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()),
+ promoted: None,
+ }),
+ ty,
+ }),
+ }
+ }
+
+ #[instrument(skip(tcx), level = "debug")]
+ fn try_eval_lit_or_param(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ expr: &'tcx hir::Expr<'tcx>,
+ ) -> Option<Self> {
+ // Unwrap a block, so that e.g. `{ P }` is recognised as a parameter. Const arguments
+ // currently have to be wrapped in curly brackets, so it's necessary to special-case.
+ let expr = match &expr.kind {
+ hir::ExprKind::Block(block, _) if block.stmts.is_empty() && block.expr.is_some() => {
+ block.expr.as_ref().unwrap()
+ }
+ _ => expr,
+ };
+
+ let lit_input = match expr.kind {
+ hir::ExprKind::Lit(ref lit) => Some(LitToConstInput { lit: &lit.node, ty, neg: false }),
+ hir::ExprKind::Unary(hir::UnOp::Neg, ref expr) => match expr.kind {
+ hir::ExprKind::Lit(ref lit) => {
+ Some(LitToConstInput { lit: &lit.node, ty, neg: true })
+ }
+ _ => None,
+ },
+ _ => None,
+ };
+
+ if let Some(lit_input) = lit_input {
+ // If an error occurred, ignore that it's a literal and leave reporting the error up to
+ // mir.
+ match tcx.at(expr.span).lit_to_const(lit_input) {
+ Ok(c) => return Some(c),
+ Err(e) => {
+ tcx.sess.delay_span_bug(
+ expr.span,
+ &format!("Const::from_anon_const: couldn't lit_to_const {:?}", e),
+ );
+ }
+ }
+ }
+
+ use hir::{def::DefKind::ConstParam, def::Res, ExprKind, Path, QPath};
+ match expr.kind {
+ ExprKind::Path(QPath::Resolved(_, &Path { res: Res::Def(ConstParam, def_id), .. })) => {
+ // Find the name and index of the const parameter by indexing the generics of
+ // the parent item and construct a `ParamConst`.
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id.expect_local());
+ let item_id = tcx.hir().get_parent_node(hir_id);
+ let item_def_id = tcx.hir().local_def_id(item_id);
+ let generics = tcx.generics_of(item_def_id.to_def_id());
+ let index = generics.param_def_id_to_index[&def_id];
+ let name = tcx.hir().name(hir_id);
+ Some(tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Param(ty::ParamConst::new(index, name)),
+ ty,
+ }))
+ }
+ _ => None,
+ }
+ }
+
+ pub fn from_inline_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self {
+ debug!("Const::from_inline_const(def_id={:?})", def_id);
+
+ let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
+
+ let body_id = match tcx.hir().get(hir_id) {
+ hir::Node::AnonConst(ac) => ac.body,
+ _ => span_bug!(
+ tcx.def_span(def_id.to_def_id()),
+ "from_inline_const can only process anonymous constants"
+ ),
+ };
+
+ let expr = &tcx.hir().body(body_id).value;
+
+ let ty = tcx.typeck(def_id).node_type(hir_id);
+
+ let ret = match Self::try_eval_lit_or_param(tcx, ty, expr) {
+ Some(v) => v,
+ None => {
+ let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id());
+ let parent_substs =
+ tcx.erase_regions(InternalSubsts::identity_for_item(tcx, typeck_root_def_id));
+ let substs =
+ InlineConstSubsts::new(tcx, InlineConstSubstsParts { parent_substs, ty })
+ .substs;
+ tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def: ty::WithOptConstParam::unknown(def_id).to_global(),
+ substs,
+ promoted: None,
+ }),
+ ty,
+ })
+ }
+ };
+ debug_assert!(!ret.has_free_regions());
+ ret
+ }
+
+ /// Interns the given value as a constant.
+ #[inline]
+ pub fn from_value(tcx: TyCtxt<'tcx>, val: ty::ValTree<'tcx>, ty: Ty<'tcx>) -> Self {
+ tcx.mk_const(ConstS { kind: ConstKind::Value(val), ty })
+ }
+
+ /// Panics if self.kind != ty::ConstKind::Value
+ pub fn to_valtree(self) -> ty::ValTree<'tcx> {
+ match self.kind() {
+ ty::ConstKind::Value(valtree) => valtree,
+ _ => bug!("expected ConstKind::Value, got {:?}", self.kind()),
+ }
+ }
+
+ pub fn from_scalar_int(tcx: TyCtxt<'tcx>, i: ScalarInt, ty: Ty<'tcx>) -> Self {
+ let valtree = ty::ValTree::from_scalar_int(i);
+ Self::from_value(tcx, valtree, ty)
+ }
+
+ #[inline]
+ /// Creates a constant with the given integer value and interns it.
+ pub fn from_bits(tcx: TyCtxt<'tcx>, bits: u128, ty: ParamEnvAnd<'tcx, Ty<'tcx>>) -> Self {
+ let size = tcx
+ .layout_of(ty)
+ .unwrap_or_else(|e| panic!("could not compute layout for {:?}: {:?}", ty, e))
+ .size;
+ Self::from_scalar_int(tcx, ScalarInt::try_from_uint(bits, size).unwrap(), ty.value)
+ }
+
+ #[inline]
+ /// Creates an interned zst constant.
+ pub fn zero_sized(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Self {
+ let valtree = ty::ValTree::zst();
+ Self::from_value(tcx, valtree, ty)
+ }
+
+ #[inline]
+ /// Creates an interned bool constant.
+ pub fn from_bool(tcx: TyCtxt<'tcx>, v: bool) -> Self {
+ Self::from_bits(tcx, v as u128, ParamEnv::empty().and(tcx.types.bool))
+ }
+
+ #[inline]
+ /// Creates an interned usize constant.
+ pub fn from_usize(tcx: TyCtxt<'tcx>, n: u64) -> Self {
+ Self::from_bits(tcx, n as u128, ParamEnv::empty().and(tcx.types.usize))
+ }
+
+ #[inline]
+ /// Attempts to evaluate the given constant to bits. Can fail to evaluate in the presence of
+ /// generics (or erroneous code) or if the value can't be represented as bits (e.g. because it
+ /// contains const generic parameters or pointers).
+ pub fn try_eval_bits(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> Option<u128> {
+ assert_eq!(self.ty(), ty);
+ let size = tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size;
+ // if `ty` does not depend on generic parameters, use an empty param_env
+ self.kind().eval(tcx, param_env).try_to_bits(size)
+ }
+
+ #[inline]
+ pub fn try_eval_bool(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<bool> {
+ self.kind().eval(tcx, param_env).try_to_bool()
+ }
+
+ #[inline]
+ pub fn try_eval_usize(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<u64> {
+ self.kind().eval(tcx, param_env).try_to_machine_usize(tcx)
+ }
+
+ #[inline]
+ /// Tries to evaluate the constant if it is `Unevaluated`. If that doesn't succeed, return the
+ /// unevaluated constant.
+ pub fn eval(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Const<'tcx> {
+ if let Some(val) = self.kind().try_eval_for_typeck(tcx, param_env) {
+ match val {
+ Ok(val) => Const::from_value(tcx, val, self.ty()),
+ Err(ErrorGuaranteed { .. }) => tcx.const_error(self.ty()),
+ }
+ } else {
+ // Either the constant isn't evaluatable or ValTree creation failed.
+ self
+ }
+ }
+
+ #[inline]
+ /// Tries to evaluate the constant if it is `Unevaluated` and creates a ConstValue if the
+ /// evaluation succeeds. If it doesn't succeed, returns the unevaluated constant.
+ pub fn eval_for_mir(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> ConstantKind<'tcx> {
+ if let Some(val) = self.kind().try_eval_for_mir(tcx, param_env) {
+ match val {
+ Ok(const_val) => ConstantKind::from_value(const_val, self.ty()),
+ Err(ErrorGuaranteed { .. }) => ConstantKind::Ty(tcx.const_error(self.ty())),
+ }
+ } else {
+ ConstantKind::Ty(self)
+ }
+ }
+
+ #[inline]
+ /// Panics if the value cannot be evaluated or doesn't contain a valid integer of the given type.
+ pub fn eval_bits(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, ty: Ty<'tcx>) -> u128 {
+ self.try_eval_bits(tcx, param_env, ty)
+ .unwrap_or_else(|| bug!("expected bits of {:#?}, got {:#?}", ty, self))
+ }
+
+ #[inline]
+ /// Panics if the value cannot be evaluated or doesn't contain a valid `usize`.
+ pub fn eval_usize(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> u64 {
+ self.try_eval_usize(tcx, param_env)
+ .unwrap_or_else(|| bug!("expected usize, got {:#?}", self))
+ }
+}
+
+pub fn const_param_default<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Const<'tcx> {
+ let default_def_id = match tcx.hir().get_by_def_id(def_id.expect_local()) {
+ hir::Node::GenericParam(hir::GenericParam {
+ kind: hir::GenericParamKind::Const { ty: _, default: Some(ac) },
+ ..
+ }) => tcx.hir().local_def_id(ac.hir_id),
+ _ => span_bug!(
+ tcx.def_span(def_id),
+ "`const_param_default` expected a generic parameter with a constant"
+ ),
+ };
+ Const::from_anon_const(tcx, default_def_id)
+}
diff --git a/compiler/rustc_middle/src/ty/consts/int.rs b/compiler/rustc_middle/src/ty/consts/int.rs
new file mode 100644
index 000000000..7436f0f6f
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/consts/int.rs
@@ -0,0 +1,483 @@
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_apfloat::Float;
+use rustc_serialize::{Decodable, Decoder, Encodable, Encoder};
+use rustc_target::abi::Size;
+use std::convert::{TryFrom, TryInto};
+use std::fmt;
+use std::num::NonZeroU8;
+
+use crate::ty::TyCtxt;
+
+#[derive(Copy, Clone)]
+/// A type for representing any integer. Only used for printing.
+pub struct ConstInt {
+ /// The "untyped" variant of `ConstInt`.
+ int: ScalarInt,
+ /// Whether the value is of a signed integer type.
+ signed: bool,
+ /// Whether the value is a `usize` or `isize` type.
+ is_ptr_sized_integral: bool,
+}
+
+impl ConstInt {
+ pub fn new(int: ScalarInt, signed: bool, is_ptr_sized_integral: bool) -> Self {
+ Self { int, signed, is_ptr_sized_integral }
+ }
+}
+
+impl std::fmt::Debug for ConstInt {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ let Self { int, signed, is_ptr_sized_integral } = *self;
+ let size = int.size().bytes();
+ let raw = int.data;
+ if signed {
+ let bit_size = size * 8;
+ let min = 1u128 << (bit_size - 1);
+ let max = min - 1;
+ if raw == min {
+ match (size, is_ptr_sized_integral) {
+ (_, true) => write!(fmt, "isize::MIN"),
+ (1, _) => write!(fmt, "i8::MIN"),
+ (2, _) => write!(fmt, "i16::MIN"),
+ (4, _) => write!(fmt, "i32::MIN"),
+ (8, _) => write!(fmt, "i64::MIN"),
+ (16, _) => write!(fmt, "i128::MIN"),
+ _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+ }
+ } else if raw == max {
+ match (size, is_ptr_sized_integral) {
+ (_, true) => write!(fmt, "isize::MAX"),
+ (1, _) => write!(fmt, "i8::MAX"),
+ (2, _) => write!(fmt, "i16::MAX"),
+ (4, _) => write!(fmt, "i32::MAX"),
+ (8, _) => write!(fmt, "i64::MAX"),
+ (16, _) => write!(fmt, "i128::MAX"),
+ _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+ }
+ } else {
+ match size {
+ 1 => write!(fmt, "{}", raw as i8)?,
+ 2 => write!(fmt, "{}", raw as i16)?,
+ 4 => write!(fmt, "{}", raw as i32)?,
+ 8 => write!(fmt, "{}", raw as i64)?,
+ 16 => write!(fmt, "{}", raw as i128)?,
+ _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+ }
+ if fmt.alternate() {
+ match (size, is_ptr_sized_integral) {
+ (_, true) => write!(fmt, "_isize")?,
+ (1, _) => write!(fmt, "_i8")?,
+ (2, _) => write!(fmt, "_i16")?,
+ (4, _) => write!(fmt, "_i32")?,
+ (8, _) => write!(fmt, "_i64")?,
+ (16, _) => write!(fmt, "_i128")?,
+ _ => bug!(),
+ }
+ }
+ Ok(())
+ }
+ } else {
+ let max = Size::from_bytes(size).truncate(u128::MAX);
+ if raw == max {
+ match (size, is_ptr_sized_integral) {
+ (_, true) => write!(fmt, "usize::MAX"),
+ (1, _) => write!(fmt, "u8::MAX"),
+ (2, _) => write!(fmt, "u16::MAX"),
+ (4, _) => write!(fmt, "u32::MAX"),
+ (8, _) => write!(fmt, "u64::MAX"),
+ (16, _) => write!(fmt, "u128::MAX"),
+ _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+ }
+ } else {
+ match size {
+ 1 => write!(fmt, "{}", raw as u8)?,
+ 2 => write!(fmt, "{}", raw as u16)?,
+ 4 => write!(fmt, "{}", raw as u32)?,
+ 8 => write!(fmt, "{}", raw as u64)?,
+ 16 => write!(fmt, "{}", raw as u128)?,
+ _ => bug!("ConstInt 0x{:x} with size = {} and signed = {}", raw, size, signed),
+ }
+ if fmt.alternate() {
+ match (size, is_ptr_sized_integral) {
+ (_, true) => write!(fmt, "_usize")?,
+ (1, _) => write!(fmt, "_u8")?,
+ (2, _) => write!(fmt, "_u16")?,
+ (4, _) => write!(fmt, "_u32")?,
+ (8, _) => write!(fmt, "_u64")?,
+ (16, _) => write!(fmt, "_u128")?,
+ _ => bug!(),
+ }
+ }
+ Ok(())
+ }
+ }
+ }
+}
+
+/// The raw bytes of a simple value.
+///
+/// This is a packed struct in order to allow this type to be optimally embedded in enums
+/// (like Scalar).
+#[derive(Clone, Copy, Eq, PartialEq, Ord, PartialOrd, Hash)]
+#[repr(packed)]
+pub struct ScalarInt {
+ /// The first `size` bytes of `data` are the value.
+ /// Do not try to read less or more bytes than that. The remaining bytes must be 0.
+ data: u128,
+ size: NonZeroU8,
+}
+
+// Cannot derive these, as the derives take references to the fields, and we
+// can't take references to fields of packed structs.
+impl<CTX> crate::ty::HashStable<CTX> for ScalarInt {
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut crate::ty::StableHasher) {
+ // Using a block `{self.data}` here to force a copy instead of using `self.data`
+ // directly, because `hash_stable` takes `&self` and would thus borrow `self.data`.
+ // Since `Self` is a packed struct, that would create a possibly unaligned reference,
+ // which is UB.
+ { self.data }.hash_stable(hcx, hasher);
+ self.size.get().hash_stable(hcx, hasher);
+ }
+}
+
+impl<S: Encoder> Encodable<S> for ScalarInt {
+ fn encode(&self, s: &mut S) {
+ s.emit_u128(self.data);
+ s.emit_u8(self.size.get());
+ }
+}
+
+impl<D: Decoder> Decodable<D> for ScalarInt {
+ fn decode(d: &mut D) -> ScalarInt {
+ ScalarInt { data: d.read_u128(), size: NonZeroU8::new(d.read_u8()).unwrap() }
+ }
+}
+
+impl ScalarInt {
+ pub const TRUE: ScalarInt = ScalarInt { data: 1_u128, size: NonZeroU8::new(1).unwrap() };
+
+ pub const FALSE: ScalarInt = ScalarInt { data: 0_u128, size: NonZeroU8::new(1).unwrap() };
+
+ #[inline]
+ pub fn size(self) -> Size {
+ Size::from_bytes(self.size.get())
+ }
+
+ /// Make sure the `data` fits in `size`.
+ /// This is guaranteed by all constructors here, but having had this check saved us from
+ /// bugs many times in the past, so keeping it around is definitely worth it.
+ #[inline(always)]
+ fn check_data(self) {
+ // Using a block `{self.data}` here to force a copy instead of using `self.data`
+ // directly, because `debug_assert_eq` takes references to its arguments and formatting
+ // arguments and would thus borrow `self.data`. Since `Self`
+ // is a packed struct, that would create a possibly unaligned reference, which
+ // is UB.
+ debug_assert_eq!(
+ self.size().truncate(self.data),
+ { self.data },
+ "Scalar value {:#x} exceeds size of {} bytes",
+ { self.data },
+ self.size
+ );
+ }
+
+ #[inline]
+ pub fn null(size: Size) -> Self {
+ Self { data: 0, size: NonZeroU8::new(size.bytes() as u8).unwrap() }
+ }
+
+ #[inline]
+ pub fn is_null(self) -> bool {
+ self.data == 0
+ }
+
+ #[inline]
+ pub fn try_from_uint(i: impl Into<u128>, size: Size) -> Option<Self> {
+ let data = i.into();
+ if size.truncate(data) == data {
+ Some(Self { data, size: NonZeroU8::new(size.bytes() as u8).unwrap() })
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ pub fn try_from_int(i: impl Into<i128>, size: Size) -> Option<Self> {
+ let i = i.into();
+ // `into` performed sign extension, we have to truncate
+ let truncated = size.truncate(i as u128);
+ if size.sign_extend(truncated) as i128 == i {
+ Some(Self { data: truncated, size: NonZeroU8::new(size.bytes() as u8).unwrap() })
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ pub fn assert_bits(self, target_size: Size) -> u128 {
+ self.to_bits(target_size).unwrap_or_else(|size| {
+ bug!("expected int of size {}, but got size {}", target_size.bytes(), size.bytes())
+ })
+ }
+
+ #[inline]
+ pub fn to_bits(self, target_size: Size) -> Result<u128, Size> {
+ assert_ne!(target_size.bytes(), 0, "you should never look at the bits of a ZST");
+ if target_size.bytes() == u64::from(self.size.get()) {
+ self.check_data();
+ Ok(self.data)
+ } else {
+ Err(self.size())
+ }
+ }
+
+ #[inline]
+ pub fn try_to_machine_usize<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Result<u64, Size> {
+ Ok(self.to_bits(tcx.data_layout.pointer_size)? as u64)
+ }
+
+ /// Tries to convert the `ScalarInt` to an unsigned integer of the given size.
+ /// Fails if the size of the `ScalarInt` is unequal to `size` and returns the
+ /// `ScalarInt`s size in that case.
+ #[inline]
+ pub fn try_to_uint(self, size: Size) -> Result<u128, Size> {
+ self.to_bits(size)
+ }
+
+ // Tries to convert the `ScalarInt` to `u8`. Fails if the `size` of the `ScalarInt`
+ // in not equal to `Size { raw: 1 }` and returns the `size` value of the `ScalarInt` in
+ // that case.
+ #[inline]
+ pub fn try_to_u8(self) -> Result<u8, Size> {
+ self.to_bits(Size::from_bits(8)).map(|v| u8::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to `u16`. Fails if the size of the `ScalarInt`
+ /// in not equal to `Size { raw: 2 }` and returns the `size` value of the `ScalarInt` in
+ /// that case.
+ #[inline]
+ pub fn try_to_u16(self) -> Result<u16, Size> {
+ self.to_bits(Size::from_bits(16)).map(|v| u16::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to `u32`. Fails if the `size` of the `ScalarInt`
+ /// in not equal to `Size { raw: 4 }` and returns the `size` value of the `ScalarInt` in
+ /// that case.
+ #[inline]
+ pub fn try_to_u32(self) -> Result<u32, Size> {
+ self.to_bits(Size::from_bits(32)).map(|v| u32::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to `u64`. Fails if the `size` of the `ScalarInt`
+ /// in not equal to `Size { raw: 8 }` and returns the `size` value of the `ScalarInt` in
+ /// that case.
+ #[inline]
+ pub fn try_to_u64(self) -> Result<u64, Size> {
+ self.to_bits(Size::from_bits(64)).map(|v| u64::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to `u128`. Fails if the `size` of the `ScalarInt`
+ /// in not equal to `Size { raw: 16 }` and returns the `size` value of the `ScalarInt` in
+ /// that case.
+ #[inline]
+ pub fn try_to_u128(self) -> Result<u128, Size> {
+ self.to_bits(Size::from_bits(128))
+ }
+
+ /// Tries to convert the `ScalarInt` to a signed integer of the given size.
+ /// Fails if the size of the `ScalarInt` is unequal to `size` and returns the
+ /// `ScalarInt`s size in that case.
+ #[inline]
+ pub fn try_to_int(self, size: Size) -> Result<i128, Size> {
+ let b = self.to_bits(size)?;
+ Ok(size.sign_extend(b) as i128)
+ }
+
+ /// Tries to convert the `ScalarInt` to i8.
+ /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 1 }`
+ /// and returns the `ScalarInt`s size in that case.
+ pub fn try_to_i8(self) -> Result<i8, Size> {
+ self.try_to_int(Size::from_bits(8)).map(|v| i8::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to i16.
+ /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 2 }`
+ /// and returns the `ScalarInt`s size in that case.
+ pub fn try_to_i16(self) -> Result<i16, Size> {
+ self.try_to_int(Size::from_bits(16)).map(|v| i16::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to i32.
+ /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 4 }`
+ /// and returns the `ScalarInt`s size in that case.
+ pub fn try_to_i32(self) -> Result<i32, Size> {
+ self.try_to_int(Size::from_bits(32)).map(|v| i32::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to i64.
+ /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 8 }`
+ /// and returns the `ScalarInt`s size in that case.
+ pub fn try_to_i64(self) -> Result<i64, Size> {
+ self.try_to_int(Size::from_bits(64)).map(|v| i64::try_from(v).unwrap())
+ }
+
+ /// Tries to convert the `ScalarInt` to i128.
+ /// Fails if the size of the `ScalarInt` is unequal to `Size { raw: 16 }`
+ /// and returns the `ScalarInt`s size in that case.
+ pub fn try_to_i128(self) -> Result<i128, Size> {
+ self.try_to_int(Size::from_bits(128)).map(|v| i128::try_from(v).unwrap())
+ }
+}
+
+macro_rules! from {
+ ($($ty:ty),*) => {
+ $(
+ impl From<$ty> for ScalarInt {
+ #[inline]
+ fn from(u: $ty) -> Self {
+ Self {
+ data: u128::from(u),
+ size: NonZeroU8::new(std::mem::size_of::<$ty>() as u8).unwrap(),
+ }
+ }
+ }
+ )*
+ }
+}
+
+macro_rules! try_from {
+ ($($ty:ty),*) => {
+ $(
+ impl TryFrom<ScalarInt> for $ty {
+ type Error = Size;
+ #[inline]
+ fn try_from(int: ScalarInt) -> Result<Self, Size> {
+ // The `unwrap` cannot fail because to_bits (if it succeeds)
+ // is guaranteed to return a value that fits into the size.
+ int.to_bits(Size::from_bytes(std::mem::size_of::<$ty>()))
+ .map(|u| u.try_into().unwrap())
+ }
+ }
+ )*
+ }
+}
+
+from!(u8, u16, u32, u64, u128, bool);
+try_from!(u8, u16, u32, u64, u128);
+
+impl TryFrom<ScalarInt> for bool {
+ type Error = Size;
+ #[inline]
+ fn try_from(int: ScalarInt) -> Result<Self, Size> {
+ int.to_bits(Size::from_bytes(1)).and_then(|u| match u {
+ 0 => Ok(false),
+ 1 => Ok(true),
+ _ => Err(Size::from_bytes(1)),
+ })
+ }
+}
+
+impl From<char> for ScalarInt {
+ #[inline]
+ fn from(c: char) -> Self {
+ Self { data: c as u128, size: NonZeroU8::new(std::mem::size_of::<char>() as u8).unwrap() }
+ }
+}
+
+/// Error returned when a conversion from ScalarInt to char fails.
+#[derive(Debug)]
+pub struct CharTryFromScalarInt;
+
+impl TryFrom<ScalarInt> for char {
+ type Error = CharTryFromScalarInt;
+
+ #[inline]
+ fn try_from(int: ScalarInt) -> Result<Self, Self::Error> {
+ let Ok(bits) = int.to_bits(Size::from_bytes(std::mem::size_of::<char>())) else {
+ return Err(CharTryFromScalarInt);
+ };
+ match char::from_u32(bits.try_into().unwrap()) {
+ Some(c) => Ok(c),
+ None => Err(CharTryFromScalarInt),
+ }
+ }
+}
+
+impl From<Single> for ScalarInt {
+ #[inline]
+ fn from(f: Single) -> Self {
+ // We trust apfloat to give us properly truncated data.
+ Self { data: f.to_bits(), size: NonZeroU8::new((Single::BITS / 8) as u8).unwrap() }
+ }
+}
+
+impl TryFrom<ScalarInt> for Single {
+ type Error = Size;
+ #[inline]
+ fn try_from(int: ScalarInt) -> Result<Self, Size> {
+ int.to_bits(Size::from_bytes(4)).map(Self::from_bits)
+ }
+}
+
+impl From<Double> for ScalarInt {
+ #[inline]
+ fn from(f: Double) -> Self {
+ // We trust apfloat to give us properly truncated data.
+ Self { data: f.to_bits(), size: NonZeroU8::new((Double::BITS / 8) as u8).unwrap() }
+ }
+}
+
+impl TryFrom<ScalarInt> for Double {
+ type Error = Size;
+ #[inline]
+ fn try_from(int: ScalarInt) -> Result<Self, Size> {
+ int.to_bits(Size::from_bytes(8)).map(Self::from_bits)
+ }
+}
+
+impl fmt::Debug for ScalarInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // Dispatch to LowerHex below.
+ write!(f, "0x{:x}", self)
+ }
+}
+
+impl fmt::LowerHex for ScalarInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.check_data();
+ if f.alternate() {
+ // Like regular ints, alternate flag adds leading `0x`.
+ write!(f, "0x")?;
+ }
+ // Format as hex number wide enough to fit any value of the given `size`.
+ // So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
+ // Using a block `{self.data}` here to force a copy instead of using `self.data`
+ // directly, because `write!` takes references to its formatting arguments and
+ // would thus borrow `self.data`. Since `Self`
+ // is a packed struct, that would create a possibly unaligned reference, which
+ // is UB.
+ write!(f, "{:01$x}", { self.data }, self.size.get() as usize * 2)
+ }
+}
+
+impl fmt::UpperHex for ScalarInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.check_data();
+ // Format as hex number wide enough to fit any value of the given `size`.
+ // So data=20, size=1 will be "0x14", but with size=4 it'll be "0x00000014".
+ // Using a block `{self.data}` here to force a copy instead of using `self.data`
+ // directly, because `write!` takes references to its formatting arguments and
+ // would thus borrow `self.data`. Since `Self`
+ // is a packed struct, that would create a possibly unaligned reference, which
+ // is UB.
+ write!(f, "{:01$X}", { self.data }, self.size.get() as usize * 2)
+ }
+}
+
+impl fmt::Display for ScalarInt {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.check_data();
+ write!(f, "{}", { self.data })
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/consts/kind.rs b/compiler/rustc_middle/src/ty/consts/kind.rs
new file mode 100644
index 000000000..cb0137d2e
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/consts/kind.rs
@@ -0,0 +1,239 @@
+use std::convert::TryInto;
+
+use crate::mir::interpret::{AllocId, ConstValue, Scalar};
+use crate::mir::Promoted;
+use crate::ty::subst::{InternalSubsts, SubstsRef};
+use crate::ty::ParamEnv;
+use crate::ty::{self, TyCtxt, TypeVisitable};
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def_id::DefId;
+use rustc_macros::HashStable;
+use rustc_target::abi::Size;
+
+use super::ScalarInt;
+/// An unevaluated, potentially generic, constant.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)]
+#[derive(Hash, HashStable)]
+pub struct Unevaluated<'tcx, P = Option<Promoted>> {
+ pub def: ty::WithOptConstParam<DefId>,
+ pub substs: SubstsRef<'tcx>,
+ pub promoted: P,
+}
+
+impl<'tcx> Unevaluated<'tcx> {
+ #[inline]
+ pub fn shrink(self) -> Unevaluated<'tcx, ()> {
+ debug_assert_eq!(self.promoted, None);
+ Unevaluated { def: self.def, substs: self.substs, promoted: () }
+ }
+}
+
+impl<'tcx> Unevaluated<'tcx, ()> {
+ #[inline]
+ pub fn expand(self) -> Unevaluated<'tcx> {
+ Unevaluated { def: self.def, substs: self.substs, promoted: None }
+ }
+}
+
+impl<'tcx, P: Default> Unevaluated<'tcx, P> {
+ #[inline]
+ pub fn new(def: ty::WithOptConstParam<DefId>, substs: SubstsRef<'tcx>) -> Unevaluated<'tcx, P> {
+ Unevaluated { def, substs, promoted: Default::default() }
+ }
+}
+
+/// Represents a constant in Rust.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(Hash, HashStable)]
+pub enum ConstKind<'tcx> {
+ /// A const generic parameter.
+ Param(ty::ParamConst),
+
+ /// Infer the value of the const.
+ Infer(InferConst<'tcx>),
+
+ /// Bound const variable, used only when preparing a trait query.
+ Bound(ty::DebruijnIndex, ty::BoundVar),
+
+ /// A placeholder const - universally quantified higher-ranked const.
+ Placeholder(ty::PlaceholderConst<'tcx>),
+
+ /// Used in the HIR by using `Unevaluated` everywhere and later normalizing to one of the other
+ /// variants when the code is monomorphic enough for that.
+ Unevaluated(Unevaluated<'tcx>),
+
+ /// Used to hold computed value.
+ Value(ty::ValTree<'tcx>),
+
+ /// A placeholder for a const which could not be computed; this is
+ /// propagated to avoid useless error messages.
+ Error(ty::DelaySpanBugEmitted),
+}
+
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(ConstKind<'_>, 40);
+
+impl<'tcx> ConstKind<'tcx> {
+ #[inline]
+ pub fn try_to_value(self) -> Option<ty::ValTree<'tcx>> {
+ if let ConstKind::Value(val) = self { Some(val) } else { None }
+ }
+
+ #[inline]
+ pub fn try_to_scalar(self) -> Option<Scalar<AllocId>> {
+ self.try_to_value()?.try_to_scalar()
+ }
+
+ #[inline]
+ pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
+ self.try_to_value()?.try_to_scalar_int()
+ }
+
+ #[inline]
+ pub fn try_to_bits(self, size: Size) -> Option<u128> {
+ self.try_to_scalar_int()?.to_bits(size).ok()
+ }
+
+ #[inline]
+ pub fn try_to_bool(self) -> Option<bool> {
+ self.try_to_scalar_int()?.try_into().ok()
+ }
+
+ #[inline]
+ pub fn try_to_machine_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> {
+ self.try_to_value()?.try_to_machine_usize(tcx)
+ }
+}
+
+/// An inference variable for a const, for use in const generics.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
+#[derive(HashStable)]
+pub enum InferConst<'tcx> {
+ /// Infer the value of the const.
+ Var(ty::ConstVid<'tcx>),
+ /// A fresh const variable. See `infer::freshen` for more details.
+ Fresh(u32),
+}
+
+enum EvalMode {
+ Typeck,
+ Mir,
+}
+
+enum EvalResult<'tcx> {
+ ValTree(ty::ValTree<'tcx>),
+ ConstVal(ConstValue<'tcx>),
+}
+
+impl<'tcx> ConstKind<'tcx> {
+ #[inline]
+ /// Tries to evaluate the constant if it is `Unevaluated`. If that doesn't succeed, return the
+ /// unevaluated constant.
+ pub fn eval(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Self {
+ self.try_eval_for_typeck(tcx, param_env).and_then(Result::ok).map_or(self, ConstKind::Value)
+ }
+
+ #[inline]
+ /// Tries to evaluate the constant if it is `Unevaluated`. If that isn't possible or necessary
+ /// return `None`.
+ // FIXME(@lcnr): Completely rework the evaluation/normalization system for `ty::Const` once valtrees are merged.
+ pub fn try_eval_for_mir(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ ) -> Option<Result<ConstValue<'tcx>, ErrorGuaranteed>> {
+ match self.try_eval_inner(tcx, param_env, EvalMode::Mir) {
+ Some(Ok(EvalResult::ValTree(_))) => unreachable!(),
+ Some(Ok(EvalResult::ConstVal(v))) => Some(Ok(v)),
+ Some(Err(e)) => Some(Err(e)),
+ None => None,
+ }
+ }
+
+ #[inline]
+ /// Tries to evaluate the constant if it is `Unevaluated`. If that isn't possible or necessary
+ /// return `None`.
+ // FIXME(@lcnr): Completely rework the evaluation/normalization system for `ty::Const` once valtrees are merged.
+ pub fn try_eval_for_typeck(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ ) -> Option<Result<ty::ValTree<'tcx>, ErrorGuaranteed>> {
+ match self.try_eval_inner(tcx, param_env, EvalMode::Typeck) {
+ Some(Ok(EvalResult::ValTree(v))) => Some(Ok(v)),
+ Some(Ok(EvalResult::ConstVal(_))) => unreachable!(),
+ Some(Err(e)) => Some(Err(e)),
+ None => None,
+ }
+ }
+
+ #[inline]
+ fn try_eval_inner(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ eval_mode: EvalMode,
+ ) -> Option<Result<EvalResult<'tcx>, ErrorGuaranteed>> {
+ if let ConstKind::Unevaluated(unevaluated) = self {
+ use crate::mir::interpret::ErrorHandled;
+
+ // HACK(eddyb) this erases lifetimes even though `const_eval_resolve`
+ // also does later, but we want to do it before checking for
+ // inference variables.
+ // Note that we erase regions *before* calling `with_reveal_all_normalized`,
+ // so that we don't try to invoke this query with
+ // any region variables.
+ let param_env_and = tcx
+ .erase_regions(param_env)
+ .with_reveal_all_normalized(tcx)
+ .and(tcx.erase_regions(unevaluated));
+
+ // HACK(eddyb) when the query key would contain inference variables,
+ // attempt using identity substs and `ParamEnv` instead, that will succeed
+ // when the expression doesn't depend on any parameters.
+ // FIXME(eddyb, skinny121) pass `InferCtxt` into here when it's available, so that
+ // we can call `infcx.const_eval_resolve` which handles inference variables.
+ let param_env_and = if param_env_and.needs_infer() {
+ tcx.param_env(unevaluated.def.did).and(ty::Unevaluated {
+ def: unevaluated.def,
+ substs: InternalSubsts::identity_for_item(tcx, unevaluated.def.did),
+ promoted: unevaluated.promoted,
+ })
+ } else {
+ param_env_and
+ };
+
+ // FIXME(eddyb) maybe the `const_eval_*` methods should take
+ // `ty::ParamEnvAnd` instead of having them separate.
+ let (param_env, unevaluated) = param_env_and.into_parts();
+ // try to resolve e.g. associated constants to their definition on an impl, and then
+ // evaluate the const.
+ match eval_mode {
+ EvalMode::Typeck => {
+ match tcx.const_eval_resolve_for_typeck(param_env, unevaluated, None) {
+ // NOTE(eddyb) `val` contains no lifetimes/types/consts,
+ // and we use the original type, so nothing from `substs`
+ // (which may be identity substs, see above),
+ // can leak through `val` into the const we return.
+ Ok(val) => Some(Ok(EvalResult::ValTree(val?))),
+ Err(ErrorHandled::TooGeneric | ErrorHandled::Linted) => None,
+ Err(ErrorHandled::Reported(e)) => Some(Err(e)),
+ }
+ }
+ EvalMode::Mir => {
+ match tcx.const_eval_resolve(param_env, unevaluated, None) {
+ // NOTE(eddyb) `val` contains no lifetimes/types/consts,
+ // and we use the original type, so nothing from `substs`
+ // (which may be identity substs, see above),
+ // can leak through `val` into the const we return.
+ Ok(val) => Some(Ok(EvalResult::ConstVal(val))),
+ Err(ErrorHandled::TooGeneric | ErrorHandled::Linted) => None,
+ Err(ErrorHandled::Reported(e)) => Some(Err(e)),
+ }
+ }
+ }
+ } else {
+ None
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs
new file mode 100644
index 000000000..93707bb18
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/consts/valtree.rs
@@ -0,0 +1,104 @@
+use super::ScalarInt;
+use crate::mir::interpret::{AllocId, Scalar};
+use crate::ty::{self, Ty, TyCtxt};
+use rustc_macros::{HashStable, TyDecodable, TyEncodable};
+
+#[derive(Copy, Clone, Debug, Hash, TyEncodable, TyDecodable, Eq, PartialEq, Ord, PartialOrd)]
+#[derive(HashStable)]
+/// This datastructure is used to represent the value of constants used in the type system.
+///
+/// We explicitly choose a different datastructure from the way values are processed within
+/// CTFE, as in the type system equal values (according to their `PartialEq`) must also have
+/// equal representation (`==` on the rustc data structure, e.g. `ValTree`) and vice versa.
+/// Since CTFE uses `AllocId` to represent pointers, it often happens that two different
+/// `AllocId`s point to equal values. So we may end up with different representations for
+/// two constants whose value is `&42`. Furthermore any kind of struct that has padding will
+/// have arbitrary values within that padding, even if the values of the struct are the same.
+///
+/// `ValTree` does not have this problem with representation, as it only contains integers or
+/// lists of (nested) `ValTree`.
+pub enum ValTree<'tcx> {
+ /// ZSTs, integers, `bool`, `char` are represented as scalars.
+ /// See the `ScalarInt` documentation for how `ScalarInt` guarantees that equal values
+ /// of these types have the same representation.
+ Leaf(ScalarInt),
+
+ //SliceOrStr(ValSlice<'tcx>),
+ // dont use SliceOrStr for now
+ /// The fields of any kind of aggregate. Structs, tuples and arrays are represented by
+ /// listing their fields' values in order.
+ /// Enums are represented by storing their discriminant as a field, followed by all
+ /// the fields of the variant.
+ Branch(&'tcx [ValTree<'tcx>]),
+}
+
+impl<'tcx> ValTree<'tcx> {
+ pub fn zst() -> Self {
+ Self::Branch(&[])
+ }
+
+ #[inline]
+ pub fn unwrap_leaf(self) -> ScalarInt {
+ match self {
+ Self::Leaf(s) => s,
+ _ => bug!("expected leaf, got {:?}", self),
+ }
+ }
+
+ #[inline]
+ pub fn unwrap_branch(self) -> &'tcx [Self] {
+ match self {
+ Self::Branch(branch) => branch,
+ _ => bug!("expected branch, got {:?}", self),
+ }
+ }
+
+ pub fn from_raw_bytes<'a>(tcx: TyCtxt<'tcx>, bytes: &'a [u8]) -> Self {
+ let branches = bytes.iter().map(|b| Self::Leaf(ScalarInt::from(*b)));
+ let interned = tcx.arena.alloc_from_iter(branches);
+
+ Self::Branch(interned)
+ }
+
+ pub fn from_scalar_int(i: ScalarInt) -> Self {
+ Self::Leaf(i)
+ }
+
+ pub fn try_to_scalar(self) -> Option<Scalar<AllocId>> {
+ self.try_to_scalar_int().map(Scalar::Int)
+ }
+
+ pub fn try_to_scalar_int(self) -> Option<ScalarInt> {
+ match self {
+ Self::Leaf(s) => Some(s),
+ Self::Branch(_) => None,
+ }
+ }
+
+ pub fn try_to_machine_usize(self, tcx: TyCtxt<'tcx>) -> Option<u64> {
+ self.try_to_scalar_int().map(|s| s.try_to_machine_usize(tcx).ok()).flatten()
+ }
+
+ /// Get the values inside the ValTree as a slice of bytes. This only works for
+ /// constants with types &str, &[u8], or [u8; _].
+ pub fn try_to_raw_bytes(self, tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Option<&'tcx [u8]> {
+ match ty.kind() {
+ ty::Ref(_, inner_ty, _) => match inner_ty.kind() {
+ // `&str` can be interpreted as raw bytes
+ ty::Str => {}
+ // `&[u8]` can be interpreted as raw bytes
+ ty::Slice(slice_ty) if *slice_ty == tcx.types.u8 => {}
+ // other `&_` can't be interpreted as raw bytes
+ _ => return None,
+ },
+ // `[u8; N]` can be interpreted as raw bytes
+ ty::Array(array_ty, _) if *array_ty == tcx.types.u8 => {}
+ // Otherwise, type cannot be interpreted as raw bytes
+ _ => return None,
+ }
+
+ Some(tcx.arena.alloc_from_iter(
+ self.unwrap_branch().into_iter().map(|v| v.unwrap_leaf().try_to_u8().unwrap()),
+ ))
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
new file mode 100644
index 000000000..0a0f45ce1
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -0,0 +1,3018 @@
+//! Type context book-keeping.
+
+use crate::arena::Arena;
+use crate::dep_graph::{DepGraph, DepKind, DepKindStruct};
+use crate::hir::place::Place as HirPlace;
+use crate::infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos};
+use crate::lint::{struct_lint_level, LintLevelSource};
+use crate::middle::codegen_fn_attrs::CodegenFnAttrs;
+use crate::middle::resolve_lifetime;
+use crate::middle::stability;
+use crate::mir::interpret::{self, Allocation, ConstAllocation};
+use crate::mir::{
+ Body, BorrowCheckResult, Field, Local, Place, PlaceElem, ProjectionKind, Promoted,
+};
+use crate::thir::Thir;
+use crate::traits;
+use crate::ty::query::{self, TyCtxtAt};
+use crate::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSubsts};
+use crate::ty::{
+ self, AdtDef, AdtDefData, AdtKind, Binder, BindingMode, BoundVar, CanonicalPolyFnSig,
+ ClosureSizeProfileData, Const, ConstS, ConstVid, DefIdTree, ExistentialPredicate, FloatTy,
+ FloatVar, FloatVid, GenericParamDefKind, InferConst, InferTy, IntTy, IntVar, IntVid, List,
+ ParamConst, ParamTy, PolyFnSig, Predicate, PredicateKind, PredicateS, ProjectionTy, Region,
+ RegionKind, ReprOptions, TraitObjectVisitor, Ty, TyKind, TyS, TyVar, TyVid, TypeAndMut, UintTy,
+};
+use rustc_ast as ast;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::intern::{Interned, WithStableHash};
+use rustc_data_structures::memmap::Mmap;
+use rustc_data_structures::profiling::SelfProfilerRef;
+use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::steal::Steal;
+use rustc_data_structures::sync::{self, Lock, Lrc, ReadGuard, RwLock, WorkerLocal};
+use rustc_data_structures::vec_map::VecMap;
+use rustc_errors::{DecorateLint, ErrorGuaranteed, LintDiagnosticBuilder, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::{DefKind, Res};
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LOCAL_CRATE};
+use rustc_hir::definitions::Definitions;
+use rustc_hir::intravisit::Visitor;
+use rustc_hir::lang_items::LangItem;
+use rustc_hir::{
+ Constness, ExprKind, HirId, ImplItemKind, ItemKind, ItemLocalId, ItemLocalMap, ItemLocalSet,
+ Node, TraitCandidate, TraitItemKind,
+};
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_macros::HashStable;
+use rustc_middle::mir::FakeReadCause;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
+use rustc_session::config::{CrateType, OutputFilenames};
+use rustc_session::cstore::CrateStoreDyn;
+use rustc_session::lint::{Level, Lint};
+use rustc_session::Limit;
+use rustc_session::Session;
+use rustc_span::def_id::{DefPathHash, StableCrateId};
+use rustc_span::source_map::SourceMap;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::abi::{Layout, LayoutS, TargetDataLayout, VariantIdx};
+use rustc_target::spec::abi;
+use rustc_type_ir::sty::TyKind::*;
+use rustc_type_ir::{InternAs, InternIteratorElement, Interner, TypeFlags};
+
+use std::any::Any;
+use std::borrow::Borrow;
+use std::cmp::Ordering;
+use std::collections::hash_map::{self, Entry};
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::iter;
+use std::mem;
+use std::ops::{Bound, Deref};
+use std::sync::Arc;
+
+use super::{ImplPolarity, RvalueScopes};
+
+pub trait OnDiskCache<'tcx>: rustc_data_structures::sync::Sync {
+ /// Creates a new `OnDiskCache` instance from the serialized data in `data`.
+ fn new(sess: &'tcx Session, data: Mmap, start_pos: usize) -> Self
+ where
+ Self: Sized;
+
+ fn new_empty(source_map: &'tcx SourceMap) -> Self
+ where
+ Self: Sized;
+
+ fn drop_serialized_data(&self, tcx: TyCtxt<'tcx>);
+
+ fn serialize(&self, tcx: TyCtxt<'tcx>, encoder: FileEncoder) -> FileEncodeResult;
+}
+
+#[allow(rustc::usage_of_ty_tykind)]
+impl<'tcx> Interner for TyCtxt<'tcx> {
+ type AdtDef = ty::AdtDef<'tcx>;
+ type SubstsRef = ty::SubstsRef<'tcx>;
+ type DefId = DefId;
+ type Ty = Ty<'tcx>;
+ type Const = ty::Const<'tcx>;
+ type Region = Region<'tcx>;
+ type TypeAndMut = TypeAndMut<'tcx>;
+ type Mutability = hir::Mutability;
+ type Movability = hir::Movability;
+ type PolyFnSig = PolyFnSig<'tcx>;
+ type ListBinderExistentialPredicate = &'tcx List<Binder<'tcx, ExistentialPredicate<'tcx>>>;
+ type BinderListTy = Binder<'tcx, &'tcx List<Ty<'tcx>>>;
+ type ListTy = &'tcx List<Ty<'tcx>>;
+ type ProjectionTy = ty::ProjectionTy<'tcx>;
+ type ParamTy = ParamTy;
+ type BoundTy = ty::BoundTy;
+ type PlaceholderType = ty::PlaceholderType;
+ type InferTy = InferTy;
+ type DelaySpanBugEmitted = DelaySpanBugEmitted;
+ type PredicateKind = ty::PredicateKind<'tcx>;
+ type AllocId = crate::mir::interpret::AllocId;
+
+ type EarlyBoundRegion = ty::EarlyBoundRegion;
+ type BoundRegion = ty::BoundRegion;
+ type FreeRegion = ty::FreeRegion;
+ type RegionVid = ty::RegionVid;
+ type PlaceholderRegion = ty::PlaceholderRegion;
+}
+
+/// A type that is not publicly constructable. This prevents people from making [`TyKind::Error`]s
+/// except through the error-reporting functions on a [`tcx`][TyCtxt].
+#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, PartialOrd, Ord)]
+#[derive(TyEncodable, TyDecodable, HashStable)]
+pub struct DelaySpanBugEmitted {
+ pub reported: ErrorGuaranteed,
+ _priv: (),
+}
+
+type InternedSet<'tcx, T> = ShardedHashMap<InternedInSet<'tcx, T>, ()>;
+
+pub struct CtxtInterners<'tcx> {
+ /// The arena that types, regions, etc. are allocated from.
+ arena: &'tcx WorkerLocal<Arena<'tcx>>,
+
+ // Specifically use a speedy hash algorithm for these hash sets, since
+ // they're accessed quite often.
+ type_: InternedSet<'tcx, WithStableHash<TyS<'tcx>>>,
+ substs: InternedSet<'tcx, InternalSubsts<'tcx>>,
+ canonical_var_infos: InternedSet<'tcx, List<CanonicalVarInfo<'tcx>>>,
+ region: InternedSet<'tcx, RegionKind<'tcx>>,
+ poly_existential_predicates:
+ InternedSet<'tcx, List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>>>,
+ predicate: InternedSet<'tcx, PredicateS<'tcx>>,
+ predicates: InternedSet<'tcx, List<Predicate<'tcx>>>,
+ projs: InternedSet<'tcx, List<ProjectionKind>>,
+ place_elems: InternedSet<'tcx, List<PlaceElem<'tcx>>>,
+ const_: InternedSet<'tcx, ConstS<'tcx>>,
+ const_allocation: InternedSet<'tcx, Allocation>,
+ bound_variable_kinds: InternedSet<'tcx, List<ty::BoundVariableKind>>,
+ layout: InternedSet<'tcx, LayoutS<'tcx>>,
+ adt_def: InternedSet<'tcx, AdtDefData>,
+}
+
+impl<'tcx> CtxtInterners<'tcx> {
+ fn new(arena: &'tcx WorkerLocal<Arena<'tcx>>) -> CtxtInterners<'tcx> {
+ CtxtInterners {
+ arena,
+ type_: Default::default(),
+ substs: Default::default(),
+ region: Default::default(),
+ poly_existential_predicates: Default::default(),
+ canonical_var_infos: Default::default(),
+ predicate: Default::default(),
+ predicates: Default::default(),
+ projs: Default::default(),
+ place_elems: Default::default(),
+ const_: Default::default(),
+ const_allocation: Default::default(),
+ bound_variable_kinds: Default::default(),
+ layout: Default::default(),
+ adt_def: Default::default(),
+ }
+ }
+
+ /// Interns a type.
+ #[allow(rustc::usage_of_ty_tykind)]
+ #[inline(never)]
+ fn intern_ty(
+ &self,
+ kind: TyKind<'tcx>,
+ sess: &Session,
+ definitions: &rustc_hir::definitions::Definitions,
+ cstore: &CrateStoreDyn,
+ source_span: &IndexVec<LocalDefId, Span>,
+ ) -> Ty<'tcx> {
+ Ty(Interned::new_unchecked(
+ self.type_
+ .intern(kind, |kind| {
+ let flags = super::flags::FlagComputation::for_kind(&kind);
+
+ // It's impossible to hash inference regions (and will ICE), so we don't need to try to cache them.
+ // Without incremental, we rarely stable-hash types, so let's not do it proactively.
+ let stable_hash = if flags.flags.intersects(TypeFlags::HAS_RE_INFER)
+ || sess.opts.incremental.is_none()
+ {
+ Fingerprint::ZERO
+ } else {
+ let mut hasher = StableHasher::new();
+ let mut hcx = StableHashingContext::ignore_spans(
+ sess,
+ definitions,
+ cstore,
+ source_span,
+ );
+ kind.hash_stable(&mut hcx, &mut hasher);
+ hasher.finish()
+ };
+
+ let ty_struct = TyS {
+ kind,
+ flags: flags.flags,
+ outer_exclusive_binder: flags.outer_exclusive_binder,
+ };
+
+ InternedInSet(
+ self.arena.alloc(WithStableHash { internee: ty_struct, stable_hash }),
+ )
+ })
+ .0,
+ ))
+ }
+
+ #[inline(never)]
+ fn intern_predicate(&self, kind: Binder<'tcx, PredicateKind<'tcx>>) -> Predicate<'tcx> {
+ Predicate(Interned::new_unchecked(
+ self.predicate
+ .intern(kind, |kind| {
+ let flags = super::flags::FlagComputation::for_predicate(kind);
+
+ let predicate_struct = PredicateS {
+ kind,
+ flags: flags.flags,
+ outer_exclusive_binder: flags.outer_exclusive_binder,
+ };
+
+ InternedInSet(self.arena.alloc(predicate_struct))
+ })
+ .0,
+ ))
+ }
+}
+
+pub struct CommonTypes<'tcx> {
+ pub unit: Ty<'tcx>,
+ pub bool: Ty<'tcx>,
+ pub char: Ty<'tcx>,
+ pub isize: Ty<'tcx>,
+ pub i8: Ty<'tcx>,
+ pub i16: Ty<'tcx>,
+ pub i32: Ty<'tcx>,
+ pub i64: Ty<'tcx>,
+ pub i128: Ty<'tcx>,
+ pub usize: Ty<'tcx>,
+ pub u8: Ty<'tcx>,
+ pub u16: Ty<'tcx>,
+ pub u32: Ty<'tcx>,
+ pub u64: Ty<'tcx>,
+ pub u128: Ty<'tcx>,
+ pub f32: Ty<'tcx>,
+ pub f64: Ty<'tcx>,
+ pub str_: Ty<'tcx>,
+ pub never: Ty<'tcx>,
+ pub self_param: Ty<'tcx>,
+
+ /// Dummy type used for the `Self` of a `TraitRef` created for converting
+ /// a trait object, and which gets removed in `ExistentialTraitRef`.
+ /// This type must not appear anywhere in other converted types.
+ pub trait_object_dummy_self: Ty<'tcx>,
+}
+
+pub struct CommonLifetimes<'tcx> {
+ /// `ReEmpty` in the root universe.
+ pub re_root_empty: Region<'tcx>,
+
+ /// `ReStatic`
+ pub re_static: Region<'tcx>,
+
+ /// Erased region, used outside of type inference.
+ pub re_erased: Region<'tcx>,
+}
+
+pub struct CommonConsts<'tcx> {
+ pub unit: Const<'tcx>,
+}
+
+pub struct LocalTableInContext<'a, V> {
+ hir_owner: LocalDefId,
+ data: &'a ItemLocalMap<V>,
+}
+
+/// Validate that the given HirId (respectively its `local_id` part) can be
+/// safely used as a key in the maps of a TypeckResults. For that to be
+/// the case, the HirId must have the same `owner` as all the other IDs in
+/// this table (signified by `hir_owner`). Otherwise the HirId
+/// would be in a different frame of reference and using its `local_id`
+/// would result in lookup errors, or worse, in silently wrong data being
+/// stored/returned.
+#[inline]
+fn validate_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) {
+ if hir_id.owner != hir_owner {
+ invalid_hir_id_for_typeck_results(hir_owner, hir_id);
+ }
+}
+
+#[cold]
+#[inline(never)]
+fn invalid_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) {
+ ty::tls::with(|tcx| {
+ bug!(
+ "node {} with HirId::owner {:?} cannot be placed in TypeckResults with hir_owner {:?}",
+ tcx.hir().node_to_string(hir_id),
+ hir_id.owner,
+ hir_owner
+ )
+ });
+}
+
+impl<'a, V> LocalTableInContext<'a, V> {
+ pub fn contains_key(&self, id: hir::HirId) -> bool {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.data.contains_key(&id.local_id)
+ }
+
+ pub fn get(&self, id: hir::HirId) -> Option<&V> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.data.get(&id.local_id)
+ }
+
+ pub fn iter(&self) -> hash_map::Iter<'_, hir::ItemLocalId, V> {
+ self.data.iter()
+ }
+}
+
+impl<'a, V> ::std::ops::Index<hir::HirId> for LocalTableInContext<'a, V> {
+ type Output = V;
+
+ fn index(&self, key: hir::HirId) -> &V {
+ self.get(key).expect("LocalTableInContext: key not found")
+ }
+}
+
+pub struct LocalTableInContextMut<'a, V> {
+ hir_owner: LocalDefId,
+ data: &'a mut ItemLocalMap<V>,
+}
+
+impl<'a, V> LocalTableInContextMut<'a, V> {
+ pub fn get_mut(&mut self, id: hir::HirId) -> Option<&mut V> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.data.get_mut(&id.local_id)
+ }
+
+ pub fn entry(&mut self, id: hir::HirId) -> Entry<'_, hir::ItemLocalId, V> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.data.entry(id.local_id)
+ }
+
+ pub fn insert(&mut self, id: hir::HirId, val: V) -> Option<V> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.data.insert(id.local_id, val)
+ }
+
+ pub fn remove(&mut self, id: hir::HirId) -> Option<V> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.data.remove(&id.local_id)
+ }
+}
+
+/// Whenever a value may be live across a generator yield, the type of that value winds up in the
+/// `GeneratorInteriorTypeCause` struct. This struct adds additional information about such
+/// captured types that can be useful for diagnostics. In particular, it stores the span that
+/// caused a given type to be recorded, along with the scope that enclosed the value (which can
+/// be used to find the await that the value is live across).
+///
+/// For example:
+///
+/// ```ignore (pseudo-Rust)
+/// async move {
+/// let x: T = expr;
+/// foo.await
+/// ...
+/// }
+/// ```
+///
+/// Here, we would store the type `T`, the span of the value `x`, the "scope-span" for
+/// the scope that contains `x`, the expr `T` evaluated from, and the span of `foo.await`.
+#[derive(TyEncodable, TyDecodable, Clone, Debug, Eq, Hash, PartialEq, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct GeneratorInteriorTypeCause<'tcx> {
+ /// Type of the captured binding.
+ pub ty: Ty<'tcx>,
+ /// Span of the binding that was captured.
+ pub span: Span,
+ /// Span of the scope of the captured binding.
+ pub scope_span: Option<Span>,
+ /// Span of `.await` or `yield` expression.
+ pub yield_span: Span,
+ /// Expr which the type evaluated from.
+ pub expr: Option<hir::HirId>,
+}
+
+// This type holds diagnostic information on generators and async functions across crate boundaries
+// and is used to provide better error messages
+#[derive(TyEncodable, TyDecodable, Clone, Debug, HashStable)]
+pub struct GeneratorDiagnosticData<'tcx> {
+ pub generator_interior_types: ty::Binder<'tcx, Vec<GeneratorInteriorTypeCause<'tcx>>>,
+ pub hir_owner: DefId,
+ pub nodes_types: ItemLocalMap<Ty<'tcx>>,
+ pub adjustments: ItemLocalMap<Vec<ty::adjustment::Adjustment<'tcx>>>,
+}
+
+#[derive(TyEncodable, TyDecodable, Debug, HashStable)]
+pub struct TypeckResults<'tcx> {
+ /// The `HirId::owner` all `ItemLocalId`s in this table are relative to.
+ pub hir_owner: LocalDefId,
+
+ /// Resolved definitions for `<T>::X` associated paths and
+ /// method calls, including those of overloaded operators.
+ type_dependent_defs: ItemLocalMap<Result<(DefKind, DefId), ErrorGuaranteed>>,
+
+ /// Resolved field indices for field accesses in expressions (`S { field }`, `obj.field`)
+ /// or patterns (`S { field }`). The index is often useful by itself, but to learn more
+ /// about the field you also need definition of the variant to which the field
+ /// belongs, but it may not exist if it's a tuple field (`tuple.0`).
+ field_indices: ItemLocalMap<usize>,
+
+ /// Stores the types for various nodes in the AST. Note that this table
+ /// is not guaranteed to be populated outside inference. See
+ /// typeck::check::fn_ctxt for details.
+ node_types: ItemLocalMap<Ty<'tcx>>,
+
+ /// Stores the type parameters which were substituted to obtain the type
+ /// of this node. This only applies to nodes that refer to entities
+ /// parameterized by type parameters, such as generic fns, types, or
+ /// other items.
+ node_substs: ItemLocalMap<SubstsRef<'tcx>>,
+
+ /// This will either store the canonicalized types provided by the user
+ /// or the substitutions that the user explicitly gave (if any) attached
+ /// to `id`. These will not include any inferred values. The canonical form
+ /// is used to capture things like `_` or other unspecified values.
+ ///
+ /// For example, if the user wrote `foo.collect::<Vec<_>>()`, then the
+ /// canonical substitutions would include only `for<X> { Vec<X> }`.
+ ///
+ /// See also `AscribeUserType` statement in MIR.
+ user_provided_types: ItemLocalMap<CanonicalUserType<'tcx>>,
+
+ /// Stores the canonicalized types provided by the user. See also
+ /// `AscribeUserType` statement in MIR.
+ pub user_provided_sigs: DefIdMap<CanonicalPolyFnSig<'tcx>>,
+
+ adjustments: ItemLocalMap<Vec<ty::adjustment::Adjustment<'tcx>>>,
+
+ /// Stores the actual binding mode for all instances of hir::BindingAnnotation.
+ pat_binding_modes: ItemLocalMap<BindingMode>,
+
+ /// Stores the types which were implicitly dereferenced in pattern binding modes
+ /// for later usage in THIR lowering. For example,
+ ///
+ /// ```
+ /// match &&Some(5i32) {
+ /// Some(n) => {},
+ /// _ => {},
+ /// }
+ /// ```
+ /// leads to a `vec![&&Option<i32>, &Option<i32>]`. Empty vectors are not stored.
+ ///
+ /// See:
+ /// <https://github.com/rust-lang/rfcs/blob/master/text/2005-match-ergonomics.md#definitions>
+ pat_adjustments: ItemLocalMap<Vec<Ty<'tcx>>>,
+
+ /// Records the reasons that we picked the kind of each closure;
+ /// not all closures are present in the map.
+ closure_kind_origins: ItemLocalMap<(Span, HirPlace<'tcx>)>,
+
+ /// For each fn, records the "liberated" types of its arguments
+ /// and return type. Liberated means that all bound regions
+ /// (including late-bound regions) are replaced with free
+ /// equivalents. This table is not used in codegen (since regions
+ /// are erased there) and hence is not serialized to metadata.
+ ///
+ /// This table also contains the "revealed" values for any `impl Trait`
+ /// that appear in the signature and whose values are being inferred
+ /// by this function.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// # use std::fmt::Debug;
+ /// fn foo(x: &u32) -> impl Debug { *x }
+ /// ```
+ ///
+ /// The function signature here would be:
+ ///
+ /// ```ignore (illustrative)
+ /// for<'a> fn(&'a u32) -> Foo
+ /// ```
+ ///
+ /// where `Foo` is an opaque type created for this function.
+ ///
+ ///
+ /// The *liberated* form of this would be
+ ///
+ /// ```ignore (illustrative)
+ /// fn(&'a u32) -> u32
+ /// ```
+ ///
+ /// Note that `'a` is not bound (it would be an `ReFree`) and
+ /// that the `Foo` opaque type is replaced by its hidden type.
+ liberated_fn_sigs: ItemLocalMap<ty::FnSig<'tcx>>,
+
+ /// For each FRU expression, record the normalized types of the fields
+ /// of the struct - this is needed because it is non-trivial to
+ /// normalize while preserving regions. This table is used only in
+ /// MIR construction and hence is not serialized to metadata.
+ fru_field_types: ItemLocalMap<Vec<Ty<'tcx>>>,
+
+ /// For every coercion cast we add the HIR node ID of the cast
+ /// expression to this set.
+ coercion_casts: ItemLocalSet,
+
+ /// Set of trait imports actually used in the method resolution.
+ /// This is used for warning unused imports. During type
+ /// checking, this `Lrc` should not be cloned: it must have a ref-count
+ /// of 1 so that we can insert things into the set mutably.
+ pub used_trait_imports: Lrc<FxHashSet<LocalDefId>>,
+
+ /// If any errors occurred while type-checking this body,
+ /// this field will be set to `Some(ErrorGuaranteed)`.
+ pub tainted_by_errors: Option<ErrorGuaranteed>,
+
+ /// All the opaque types that have hidden types set
+ /// by this function. For return-position-impl-trait we also store the
+ /// type here, so that mir-borrowck can figure out hidden types,
+ /// even if they are only set in dead code (which doesn't show up in MIR).
+ /// For type-alias-impl-trait, this map is only used to prevent query cycles,
+ /// so the hidden types are all `None`.
+ pub concrete_opaque_types: VecMap<LocalDefId, Option<Ty<'tcx>>>,
+
+ /// Tracks the minimum captures required for a closure;
+ /// see `MinCaptureInformationMap` for more details.
+ pub closure_min_captures: ty::MinCaptureInformationMap<'tcx>,
+
+ /// Tracks the fake reads required for a closure and the reason for the fake read.
+ /// When performing pattern matching for closures, there are times we don't end up
+ /// reading places that are mentioned in a closure (because of _ patterns). However,
+ /// to ensure the places are initialized, we introduce fake reads.
+ /// Consider these two examples:
+ /// ``` (discriminant matching with only wildcard arm)
+ /// let x: u8;
+ /// let c = || match x { _ => () };
+ /// ```
+ /// In this example, we don't need to actually read/borrow `x` in `c`, and so we don't
+ /// want to capture it. However, we do still want an error here, because `x` should have
+ /// to be initialized at the point where c is created. Therefore, we add a "fake read"
+ /// instead.
+ /// ``` (destructured assignments)
+ /// let c = || {
+ /// let (t1, t2) = t;
+ /// }
+ /// ```
+ /// In the second example, we capture the disjoint fields of `t` (`t.0` & `t.1`), but
+ /// we never capture `t`. This becomes an issue when we build MIR as we require
+ /// information on `t` in order to create place `t.0` and `t.1`. We can solve this
+ /// issue by fake reading `t`.
+ pub closure_fake_reads: FxHashMap<LocalDefId, Vec<(HirPlace<'tcx>, FakeReadCause, hir::HirId)>>,
+
+ /// Tracks the rvalue scoping rules which defines finer scoping for rvalue expressions
+ /// by applying extended parameter rules.
+ /// Details may be find in `rustc_typeck::check::rvalue_scopes`.
+ pub rvalue_scopes: RvalueScopes,
+
+ /// Stores the type, expression, span and optional scope span of all types
+ /// that are live across the yield of this generator (if a generator).
+ pub generator_interior_types: ty::Binder<'tcx, Vec<GeneratorInteriorTypeCause<'tcx>>>,
+
+ /// We sometimes treat byte string literals (which are of type `&[u8; N]`)
+ /// as `&[u8]`, depending on the pattern in which they are used.
+ /// This hashset records all instances where we behave
+ /// like this to allow `const_to_pat` to reliably handle this situation.
+ pub treat_byte_string_as_slice: ItemLocalSet,
+
+ /// Contains the data for evaluating the effect of feature `capture_disjoint_fields`
+ /// on closure size.
+ pub closure_size_eval: FxHashMap<LocalDefId, ClosureSizeProfileData<'tcx>>,
+}
+
+impl<'tcx> TypeckResults<'tcx> {
+ pub fn new(hir_owner: LocalDefId) -> TypeckResults<'tcx> {
+ TypeckResults {
+ hir_owner,
+ type_dependent_defs: Default::default(),
+ field_indices: Default::default(),
+ user_provided_types: Default::default(),
+ user_provided_sigs: Default::default(),
+ node_types: Default::default(),
+ node_substs: Default::default(),
+ adjustments: Default::default(),
+ pat_binding_modes: Default::default(),
+ pat_adjustments: Default::default(),
+ closure_kind_origins: Default::default(),
+ liberated_fn_sigs: Default::default(),
+ fru_field_types: Default::default(),
+ coercion_casts: Default::default(),
+ used_trait_imports: Lrc::new(Default::default()),
+ tainted_by_errors: None,
+ concrete_opaque_types: Default::default(),
+ closure_min_captures: Default::default(),
+ closure_fake_reads: Default::default(),
+ rvalue_scopes: Default::default(),
+ generator_interior_types: ty::Binder::dummy(Default::default()),
+ treat_byte_string_as_slice: Default::default(),
+ closure_size_eval: Default::default(),
+ }
+ }
+
+ /// Returns the final resolution of a `QPath` in an `Expr` or `Pat` node.
+ pub fn qpath_res(&self, qpath: &hir::QPath<'_>, id: hir::HirId) -> Res {
+ match *qpath {
+ hir::QPath::Resolved(_, ref path) => path.res,
+ hir::QPath::TypeRelative(..) | hir::QPath::LangItem(..) => self
+ .type_dependent_def(id)
+ .map_or(Res::Err, |(kind, def_id)| Res::Def(kind, def_id)),
+ }
+ }
+
+ pub fn type_dependent_defs(
+ &self,
+ ) -> LocalTableInContext<'_, Result<(DefKind, DefId), ErrorGuaranteed>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.type_dependent_defs }
+ }
+
+ pub fn type_dependent_def(&self, id: HirId) -> Option<(DefKind, DefId)> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.type_dependent_defs.get(&id.local_id).cloned().and_then(|r| r.ok())
+ }
+
+ pub fn type_dependent_def_id(&self, id: HirId) -> Option<DefId> {
+ self.type_dependent_def(id).map(|(_, def_id)| def_id)
+ }
+
+ pub fn type_dependent_defs_mut(
+ &mut self,
+ ) -> LocalTableInContextMut<'_, Result<(DefKind, DefId), ErrorGuaranteed>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.type_dependent_defs }
+ }
+
+ pub fn field_indices(&self) -> LocalTableInContext<'_, usize> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.field_indices }
+ }
+
+ pub fn field_indices_mut(&mut self) -> LocalTableInContextMut<'_, usize> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.field_indices }
+ }
+
+ pub fn user_provided_types(&self) -> LocalTableInContext<'_, CanonicalUserType<'tcx>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.user_provided_types }
+ }
+
+ pub fn user_provided_types_mut(
+ &mut self,
+ ) -> LocalTableInContextMut<'_, CanonicalUserType<'tcx>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.user_provided_types }
+ }
+
+ pub fn node_types(&self) -> LocalTableInContext<'_, Ty<'tcx>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.node_types }
+ }
+
+ pub fn node_types_mut(&mut self) -> LocalTableInContextMut<'_, Ty<'tcx>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_types }
+ }
+
+ pub fn get_generator_diagnostic_data(&self) -> GeneratorDiagnosticData<'tcx> {
+ let generator_interior_type = self.generator_interior_types.map_bound_ref(|vec| {
+ vec.iter()
+ .map(|item| {
+ GeneratorInteriorTypeCause {
+ ty: item.ty,
+ span: item.span,
+ scope_span: item.scope_span,
+ yield_span: item.yield_span,
+ expr: None, //FIXME: Passing expression over crate boundaries is impossible at the moment
+ }
+ })
+ .collect::<Vec<_>>()
+ });
+ GeneratorDiagnosticData {
+ generator_interior_types: generator_interior_type,
+ hir_owner: self.hir_owner.to_def_id(),
+ nodes_types: self.node_types.clone(),
+ adjustments: self.adjustments.clone(),
+ }
+ }
+
+ pub fn node_type(&self, id: hir::HirId) -> Ty<'tcx> {
+ self.node_type_opt(id).unwrap_or_else(|| {
+ bug!("node_type: no type for node `{}`", tls::with(|tcx| tcx.hir().node_to_string(id)))
+ })
+ }
+
+ pub fn node_type_opt(&self, id: hir::HirId) -> Option<Ty<'tcx>> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.node_types.get(&id.local_id).cloned()
+ }
+
+ pub fn node_substs_mut(&mut self) -> LocalTableInContextMut<'_, SubstsRef<'tcx>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.node_substs }
+ }
+
+ pub fn node_substs(&self, id: hir::HirId) -> SubstsRef<'tcx> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.node_substs.get(&id.local_id).cloned().unwrap_or_else(|| InternalSubsts::empty())
+ }
+
+ pub fn node_substs_opt(&self, id: hir::HirId) -> Option<SubstsRef<'tcx>> {
+ validate_hir_id_for_typeck_results(self.hir_owner, id);
+ self.node_substs.get(&id.local_id).cloned()
+ }
+
+ // Returns the type of a pattern as a monotype. Like @expr_ty, this function
+ // doesn't provide type parameter substitutions.
+ pub fn pat_ty(&self, pat: &hir::Pat<'_>) -> Ty<'tcx> {
+ self.node_type(pat.hir_id)
+ }
+
+ // Returns the type of an expression as a monotype.
+ //
+ // NB (1): This is the PRE-ADJUSTMENT TYPE for the expression. That is, in
+ // some cases, we insert `Adjustment` annotations such as auto-deref or
+ // auto-ref. The type returned by this function does not consider such
+ // adjustments. See `expr_ty_adjusted()` instead.
+ //
+ // NB (2): This type doesn't provide type parameter substitutions; e.g., if you
+ // ask for the type of "id" in "id(3)", it will return "fn(&isize) -> isize"
+ // instead of "fn(ty) -> T with T = isize".
+ pub fn expr_ty(&self, expr: &hir::Expr<'_>) -> Ty<'tcx> {
+ self.node_type(expr.hir_id)
+ }
+
+ pub fn expr_ty_opt(&self, expr: &hir::Expr<'_>) -> Option<Ty<'tcx>> {
+ self.node_type_opt(expr.hir_id)
+ }
+
+ pub fn adjustments(&self) -> LocalTableInContext<'_, Vec<ty::adjustment::Adjustment<'tcx>>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.adjustments }
+ }
+
+ pub fn adjustments_mut(
+ &mut self,
+ ) -> LocalTableInContextMut<'_, Vec<ty::adjustment::Adjustment<'tcx>>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.adjustments }
+ }
+
+ pub fn expr_adjustments(&self, expr: &hir::Expr<'_>) -> &[ty::adjustment::Adjustment<'tcx>] {
+ validate_hir_id_for_typeck_results(self.hir_owner, expr.hir_id);
+ self.adjustments.get(&expr.hir_id.local_id).map_or(&[], |a| &a[..])
+ }
+
+ /// Returns the type of `expr`, considering any `Adjustment`
+ /// entry recorded for that expression.
+ pub fn expr_ty_adjusted(&self, expr: &hir::Expr<'_>) -> Ty<'tcx> {
+ self.expr_adjustments(expr).last().map_or_else(|| self.expr_ty(expr), |adj| adj.target)
+ }
+
+ pub fn expr_ty_adjusted_opt(&self, expr: &hir::Expr<'_>) -> Option<Ty<'tcx>> {
+ self.expr_adjustments(expr).last().map(|adj| adj.target).or_else(|| self.expr_ty_opt(expr))
+ }
+
+ pub fn is_method_call(&self, expr: &hir::Expr<'_>) -> bool {
+ // Only paths and method calls/overloaded operators have
+ // entries in type_dependent_defs, ignore the former here.
+ if let hir::ExprKind::Path(_) = expr.kind {
+ return false;
+ }
+
+ matches!(self.type_dependent_defs().get(expr.hir_id), Some(Ok((DefKind::AssocFn, _))))
+ }
+
+ pub fn extract_binding_mode(&self, s: &Session, id: HirId, sp: Span) -> Option<BindingMode> {
+ self.pat_binding_modes().get(id).copied().or_else(|| {
+ s.delay_span_bug(sp, "missing binding mode");
+ None
+ })
+ }
+
+ pub fn pat_binding_modes(&self) -> LocalTableInContext<'_, BindingMode> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.pat_binding_modes }
+ }
+
+ pub fn pat_binding_modes_mut(&mut self) -> LocalTableInContextMut<'_, BindingMode> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.pat_binding_modes }
+ }
+
+ pub fn pat_adjustments(&self) -> LocalTableInContext<'_, Vec<Ty<'tcx>>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.pat_adjustments }
+ }
+
+ pub fn pat_adjustments_mut(&mut self) -> LocalTableInContextMut<'_, Vec<Ty<'tcx>>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.pat_adjustments }
+ }
+
+ /// For a given closure, returns the iterator of `ty::CapturedPlace`s that are captured
+ /// by the closure.
+ pub fn closure_min_captures_flattened(
+ &self,
+ closure_def_id: LocalDefId,
+ ) -> impl Iterator<Item = &ty::CapturedPlace<'tcx>> {
+ self.closure_min_captures
+ .get(&closure_def_id)
+ .map(|closure_min_captures| closure_min_captures.values().flat_map(|v| v.iter()))
+ .into_iter()
+ .flatten()
+ }
+
+ pub fn closure_kind_origins(&self) -> LocalTableInContext<'_, (Span, HirPlace<'tcx>)> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.closure_kind_origins }
+ }
+
+ pub fn closure_kind_origins_mut(
+ &mut self,
+ ) -> LocalTableInContextMut<'_, (Span, HirPlace<'tcx>)> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.closure_kind_origins }
+ }
+
+ pub fn liberated_fn_sigs(&self) -> LocalTableInContext<'_, ty::FnSig<'tcx>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.liberated_fn_sigs }
+ }
+
+ pub fn liberated_fn_sigs_mut(&mut self) -> LocalTableInContextMut<'_, ty::FnSig<'tcx>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.liberated_fn_sigs }
+ }
+
+ pub fn fru_field_types(&self) -> LocalTableInContext<'_, Vec<Ty<'tcx>>> {
+ LocalTableInContext { hir_owner: self.hir_owner, data: &self.fru_field_types }
+ }
+
+ pub fn fru_field_types_mut(&mut self) -> LocalTableInContextMut<'_, Vec<Ty<'tcx>>> {
+ LocalTableInContextMut { hir_owner: self.hir_owner, data: &mut self.fru_field_types }
+ }
+
+ pub fn is_coercion_cast(&self, hir_id: hir::HirId) -> bool {
+ validate_hir_id_for_typeck_results(self.hir_owner, hir_id);
+ self.coercion_casts.contains(&hir_id.local_id)
+ }
+
+ pub fn set_coercion_cast(&mut self, id: ItemLocalId) {
+ self.coercion_casts.insert(id);
+ }
+
+ pub fn coercion_casts(&self) -> &ItemLocalSet {
+ &self.coercion_casts
+ }
+}
+
+rustc_index::newtype_index! {
+ pub struct UserTypeAnnotationIndex {
+ derive [HashStable]
+ DEBUG_FORMAT = "UserType({})",
+ const START_INDEX = 0,
+ }
+}
+
+/// Mapping of type annotation indices to canonical user type annotations.
+pub type CanonicalUserTypeAnnotations<'tcx> =
+ IndexVec<UserTypeAnnotationIndex, CanonicalUserTypeAnnotation<'tcx>>;
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct CanonicalUserTypeAnnotation<'tcx> {
+ pub user_ty: CanonicalUserType<'tcx>,
+ pub span: Span,
+ pub inferred_ty: Ty<'tcx>,
+}
+
+/// Canonicalized user type annotation.
+pub type CanonicalUserType<'tcx> = Canonical<'tcx, UserType<'tcx>>;
+
+impl<'tcx> CanonicalUserType<'tcx> {
+ /// Returns `true` if this represents a substitution of the form `[?0, ?1, ?2]`,
+ /// i.e., each thing is mapped to a canonical variable with the same index.
+ pub fn is_identity(&self) -> bool {
+ match self.value {
+ UserType::Ty(_) => false,
+ UserType::TypeOf(_, user_substs) => {
+ if user_substs.user_self_ty.is_some() {
+ return false;
+ }
+
+ iter::zip(user_substs.substs, BoundVar::new(0)..).all(|(kind, cvar)| {
+ match kind.unpack() {
+ GenericArgKind::Type(ty) => match ty.kind() {
+ ty::Bound(debruijn, b) => {
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(*debruijn, ty::INNERMOST);
+ cvar == b.var
+ }
+ _ => false,
+ },
+
+ GenericArgKind::Lifetime(r) => match *r {
+ ty::ReLateBound(debruijn, br) => {
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(debruijn, ty::INNERMOST);
+ cvar == br.var
+ }
+ _ => false,
+ },
+
+ GenericArgKind::Const(ct) => match ct.kind() {
+ ty::ConstKind::Bound(debruijn, b) => {
+ // We only allow a `ty::INNERMOST` index in substitutions.
+ assert_eq!(debruijn, ty::INNERMOST);
+ cvar == b
+ }
+ _ => false,
+ },
+ }
+ })
+ }
+ }
+ }
+}
+
+/// A user-given type annotation attached to a constant. These arise
+/// from constants that are named via paths, like `Foo::<A>::new` and
+/// so forth.
+#[derive(Copy, Clone, Debug, PartialEq, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub enum UserType<'tcx> {
+ Ty(Ty<'tcx>),
+
+ /// The canonical type is the result of `type_of(def_id)` with the
+ /// given substitutions applied.
+ TypeOf(DefId, UserSubsts<'tcx>),
+}
+
+impl<'tcx> CommonTypes<'tcx> {
+ fn new(
+ interners: &CtxtInterners<'tcx>,
+ sess: &Session,
+ definitions: &rustc_hir::definitions::Definitions,
+ cstore: &CrateStoreDyn,
+ source_span: &IndexVec<LocalDefId, Span>,
+ ) -> CommonTypes<'tcx> {
+ let mk = |ty| interners.intern_ty(ty, sess, definitions, cstore, source_span);
+
+ CommonTypes {
+ unit: mk(Tuple(List::empty())),
+ bool: mk(Bool),
+ char: mk(Char),
+ never: mk(Never),
+ isize: mk(Int(ty::IntTy::Isize)),
+ i8: mk(Int(ty::IntTy::I8)),
+ i16: mk(Int(ty::IntTy::I16)),
+ i32: mk(Int(ty::IntTy::I32)),
+ i64: mk(Int(ty::IntTy::I64)),
+ i128: mk(Int(ty::IntTy::I128)),
+ usize: mk(Uint(ty::UintTy::Usize)),
+ u8: mk(Uint(ty::UintTy::U8)),
+ u16: mk(Uint(ty::UintTy::U16)),
+ u32: mk(Uint(ty::UintTy::U32)),
+ u64: mk(Uint(ty::UintTy::U64)),
+ u128: mk(Uint(ty::UintTy::U128)),
+ f32: mk(Float(ty::FloatTy::F32)),
+ f64: mk(Float(ty::FloatTy::F64)),
+ str_: mk(Str),
+ self_param: mk(ty::Param(ty::ParamTy { index: 0, name: kw::SelfUpper })),
+
+ trait_object_dummy_self: mk(Infer(ty::FreshTy(0))),
+ }
+ }
+}
+
+impl<'tcx> CommonLifetimes<'tcx> {
+ fn new(interners: &CtxtInterners<'tcx>) -> CommonLifetimes<'tcx> {
+ let mk = |r| {
+ Region(Interned::new_unchecked(
+ interners.region.intern(r, |r| InternedInSet(interners.arena.alloc(r))).0,
+ ))
+ };
+
+ CommonLifetimes {
+ re_root_empty: mk(ty::ReEmpty(ty::UniverseIndex::ROOT)),
+ re_static: mk(ty::ReStatic),
+ re_erased: mk(ty::ReErased),
+ }
+ }
+}
+
+impl<'tcx> CommonConsts<'tcx> {
+ fn new(interners: &CtxtInterners<'tcx>, types: &CommonTypes<'tcx>) -> CommonConsts<'tcx> {
+ let mk_const = |c| {
+ Const(Interned::new_unchecked(
+ interners.const_.intern(c, |c| InternedInSet(interners.arena.alloc(c))).0,
+ ))
+ };
+
+ CommonConsts {
+ unit: mk_const(ty::ConstS {
+ kind: ty::ConstKind::Value(ty::ValTree::zst()),
+ ty: types.unit,
+ }),
+ }
+ }
+}
+
+// This struct contains information regarding the `ReFree(FreeRegion)` corresponding to a lifetime
+// conflict.
+#[derive(Debug)]
+pub struct FreeRegionInfo {
+ // `LocalDefId` corresponding to FreeRegion
+ pub def_id: LocalDefId,
+ // the bound region corresponding to FreeRegion
+ pub boundregion: ty::BoundRegionKind,
+ // checks if bound region is in Impl Item
+ pub is_impl_item: bool,
+}
+
+/// The central data structure of the compiler. It stores references
+/// to the various **arenas** and also houses the results of the
+/// various **compiler queries** that have been performed. See the
+/// [rustc dev guide] for more details.
+///
+/// [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/ty.html
+#[derive(Copy, Clone)]
+#[rustc_diagnostic_item = "TyCtxt"]
+#[rustc_pass_by_value]
+pub struct TyCtxt<'tcx> {
+ gcx: &'tcx GlobalCtxt<'tcx>,
+}
+
+impl<'tcx> Deref for TyCtxt<'tcx> {
+ type Target = &'tcx GlobalCtxt<'tcx>;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ &self.gcx
+ }
+}
+
+pub struct GlobalCtxt<'tcx> {
+ pub arena: &'tcx WorkerLocal<Arena<'tcx>>,
+ pub hir_arena: &'tcx WorkerLocal<hir::Arena<'tcx>>,
+
+ interners: CtxtInterners<'tcx>,
+
+ pub sess: &'tcx Session,
+
+ /// This only ever stores a `LintStore` but we don't want a dependency on that type here.
+ ///
+ /// FIXME(Centril): consider `dyn LintStoreMarker` once
+ /// we can upcast to `Any` for some additional type safety.
+ pub lint_store: Lrc<dyn Any + sync::Sync + sync::Send>,
+
+ pub dep_graph: DepGraph,
+
+ pub prof: SelfProfilerRef,
+
+ /// Common types, pre-interned for your convenience.
+ pub types: CommonTypes<'tcx>,
+
+ /// Common lifetimes, pre-interned for your convenience.
+ pub lifetimes: CommonLifetimes<'tcx>,
+
+ /// Common consts, pre-interned for your convenience.
+ pub consts: CommonConsts<'tcx>,
+
+ definitions: RwLock<Definitions>,
+ cstore: Box<CrateStoreDyn>,
+
+ /// Output of the resolver.
+ pub(crate) untracked_resolutions: ty::ResolverOutputs,
+ untracked_resolver_for_lowering: Steal<ty::ResolverAstLowering>,
+ /// The entire crate as AST. This field serves as the input for the hir_crate query,
+ /// which lowers it from AST to HIR. It must not be read or used by anything else.
+ pub untracked_crate: Steal<Lrc<ast::Crate>>,
+
+ /// This provides access to the incremental compilation on-disk cache for query results.
+ /// Do not access this directly. It is only meant to be used by
+ /// `DepGraph::try_mark_green()` and the query infrastructure.
+ /// This is `None` if we are not incremental compilation mode
+ pub on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>,
+
+ pub queries: &'tcx dyn query::QueryEngine<'tcx>,
+ pub query_caches: query::QueryCaches<'tcx>,
+ query_kinds: &'tcx [DepKindStruct],
+
+ // Internal caches for metadata decoding. No need to track deps on this.
+ pub ty_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>,
+ pub pred_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Predicate<'tcx>>>,
+
+ /// Caches the results of trait selection. This cache is used
+ /// for things that do not have to do with the parameters in scope.
+ pub selection_cache: traits::SelectionCache<'tcx>,
+
+ /// Caches the results of trait evaluation. This cache is used
+ /// for things that do not have to do with the parameters in scope.
+ /// Merge this with `selection_cache`?
+ pub evaluation_cache: traits::EvaluationCache<'tcx>,
+
+ /// The definite name of the current crate after taking into account
+ /// attributes, commandline parameters, etc.
+ crate_name: Symbol,
+
+ /// Data layout specification for the current target.
+ pub data_layout: TargetDataLayout,
+
+ /// Stores memory for globals (statics/consts).
+ pub(crate) alloc_map: Lock<interpret::AllocMap<'tcx>>,
+
+ output_filenames: Arc<OutputFilenames>,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Expects a body and returns its codegen attributes.
+ ///
+ /// Unlike `codegen_fn_attrs`, this returns `CodegenFnAttrs::EMPTY` for
+ /// constants.
+ pub fn body_codegen_attrs(self, def_id: DefId) -> &'tcx CodegenFnAttrs {
+ let def_kind = self.def_kind(def_id);
+ if def_kind.has_codegen_attrs() {
+ self.codegen_fn_attrs(def_id)
+ } else if matches!(
+ def_kind,
+ DefKind::AnonConst | DefKind::AssocConst | DefKind::Const | DefKind::InlineConst
+ ) {
+ CodegenFnAttrs::EMPTY
+ } else {
+ bug!(
+ "body_codegen_fn_attrs called on unexpected definition: {:?} {:?}",
+ def_id,
+ def_kind
+ )
+ }
+ }
+
+ pub fn typeck_opt_const_arg(
+ self,
+ def: ty::WithOptConstParam<LocalDefId>,
+ ) -> &'tcx TypeckResults<'tcx> {
+ if let Some(param_did) = def.const_param_did {
+ self.typeck_const_arg((def.did, param_did))
+ } else {
+ self.typeck(def.did)
+ }
+ }
+
+ pub fn mir_borrowck_opt_const_arg(
+ self,
+ def: ty::WithOptConstParam<LocalDefId>,
+ ) -> &'tcx BorrowCheckResult<'tcx> {
+ if let Some(param_did) = def.const_param_did {
+ self.mir_borrowck_const_arg((def.did, param_did))
+ } else {
+ self.mir_borrowck(def.did)
+ }
+ }
+
+ pub fn alloc_steal_thir(self, thir: Thir<'tcx>) -> &'tcx Steal<Thir<'tcx>> {
+ self.arena.alloc(Steal::new(thir))
+ }
+
+ pub fn alloc_steal_mir(self, mir: Body<'tcx>) -> &'tcx Steal<Body<'tcx>> {
+ self.arena.alloc(Steal::new(mir))
+ }
+
+ pub fn alloc_steal_promoted(
+ self,
+ promoted: IndexVec<Promoted, Body<'tcx>>,
+ ) -> &'tcx Steal<IndexVec<Promoted, Body<'tcx>>> {
+ self.arena.alloc(Steal::new(promoted))
+ }
+
+ pub fn alloc_adt_def(
+ self,
+ did: DefId,
+ kind: AdtKind,
+ variants: IndexVec<VariantIdx, ty::VariantDef>,
+ repr: ReprOptions,
+ ) -> ty::AdtDef<'tcx> {
+ self.intern_adt_def(ty::AdtDefData::new(self, did, kind, variants, repr))
+ }
+
+ /// Allocates a read-only byte or string literal for `mir::interpret`.
+ pub fn allocate_bytes(self, bytes: &[u8]) -> interpret::AllocId {
+ // Create an allocation that just contains these bytes.
+ let alloc = interpret::Allocation::from_bytes_byte_aligned_immutable(bytes);
+ let alloc = self.intern_const_alloc(alloc);
+ self.create_memory_alloc(alloc)
+ }
+
+ /// Returns a range of the start/end indices specified with the
+ /// `rustc_layout_scalar_valid_range` attribute.
+ // FIXME(eddyb) this is an awkward spot for this method, maybe move it?
+ pub fn layout_scalar_valid_range(self, def_id: DefId) -> (Bound<u128>, Bound<u128>) {
+ let get = |name| {
+ let Some(attr) = self.get_attr(def_id, name) else {
+ return Bound::Unbounded;
+ };
+ debug!("layout_scalar_valid_range: attr={:?}", attr);
+ if let Some(
+ &[
+ ast::NestedMetaItem::Literal(ast::Lit {
+ kind: ast::LitKind::Int(a, _), ..
+ }),
+ ],
+ ) = attr.meta_item_list().as_deref()
+ {
+ Bound::Included(a)
+ } else {
+ self.sess
+ .delay_span_bug(attr.span, "invalid rustc_layout_scalar_valid_range attribute");
+ Bound::Unbounded
+ }
+ };
+ (
+ get(sym::rustc_layout_scalar_valid_range_start),
+ get(sym::rustc_layout_scalar_valid_range_end),
+ )
+ }
+
+ pub fn lift<T: Lift<'tcx>>(self, value: T) -> Option<T::Lifted> {
+ value.lift_to_tcx(self)
+ }
+
+ /// Creates a type context and call the closure with a `TyCtxt` reference
+ /// to the context. The closure enforces that the type context and any interned
+ /// value (types, substs, etc.) can only be used while `ty::tls` has a valid
+ /// reference to the context, to allow formatting values that need it.
+ pub fn create_global_ctxt(
+ s: &'tcx Session,
+ lint_store: Lrc<dyn Any + sync::Send + sync::Sync>,
+ arena: &'tcx WorkerLocal<Arena<'tcx>>,
+ hir_arena: &'tcx WorkerLocal<hir::Arena<'tcx>>,
+ definitions: Definitions,
+ cstore: Box<CrateStoreDyn>,
+ untracked_resolutions: ty::ResolverOutputs,
+ untracked_resolver_for_lowering: ty::ResolverAstLowering,
+ krate: Lrc<ast::Crate>,
+ dep_graph: DepGraph,
+ on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>,
+ queries: &'tcx dyn query::QueryEngine<'tcx>,
+ query_kinds: &'tcx [DepKindStruct],
+ crate_name: &str,
+ output_filenames: OutputFilenames,
+ ) -> GlobalCtxt<'tcx> {
+ let data_layout = TargetDataLayout::parse(&s.target).unwrap_or_else(|err| {
+ s.fatal(&err);
+ });
+ let interners = CtxtInterners::new(arena);
+ let common_types = CommonTypes::new(
+ &interners,
+ s,
+ &definitions,
+ &*cstore,
+ // This is only used to create a stable hashing context.
+ &untracked_resolutions.source_span,
+ );
+ let common_lifetimes = CommonLifetimes::new(&interners);
+ let common_consts = CommonConsts::new(&interners, &common_types);
+
+ GlobalCtxt {
+ sess: s,
+ lint_store,
+ arena,
+ hir_arena,
+ interners,
+ dep_graph,
+ definitions: RwLock::new(definitions),
+ cstore,
+ prof: s.prof.clone(),
+ types: common_types,
+ lifetimes: common_lifetimes,
+ consts: common_consts,
+ untracked_resolutions,
+ untracked_resolver_for_lowering: Steal::new(untracked_resolver_for_lowering),
+ untracked_crate: Steal::new(krate),
+ on_disk_cache,
+ queries,
+ query_caches: query::QueryCaches::default(),
+ query_kinds,
+ ty_rcache: Default::default(),
+ pred_rcache: Default::default(),
+ selection_cache: Default::default(),
+ evaluation_cache: Default::default(),
+ crate_name: Symbol::intern(crate_name),
+ data_layout,
+ alloc_map: Lock::new(interpret::AllocMap::new()),
+ output_filenames: Arc::new(output_filenames),
+ }
+ }
+
+ pub(crate) fn query_kind(self, k: DepKind) -> &'tcx DepKindStruct {
+ &self.query_kinds[k as usize]
+ }
+
+ /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used.
+ #[track_caller]
+ pub fn ty_error(self) -> Ty<'tcx> {
+ self.ty_error_with_message(DUMMY_SP, "TyKind::Error constructed but no error reported")
+ }
+
+ /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` with the given `msg` to
+ /// ensure it gets used.
+ #[track_caller]
+ pub fn ty_error_with_message<S: Into<MultiSpan>>(self, span: S, msg: &str) -> Ty<'tcx> {
+ let reported = self.sess.delay_span_bug(span, msg);
+ self.mk_ty(Error(DelaySpanBugEmitted { reported, _priv: () }))
+ }
+
+ /// Like [TyCtxt::ty_error] but for constants.
+ #[track_caller]
+ pub fn const_error(self, ty: Ty<'tcx>) -> Const<'tcx> {
+ self.const_error_with_message(
+ ty,
+ DUMMY_SP,
+ "ty::ConstKind::Error constructed but no error reported",
+ )
+ }
+
+ /// Like [TyCtxt::ty_error_with_message] but for constants.
+ #[track_caller]
+ pub fn const_error_with_message<S: Into<MultiSpan>>(
+ self,
+ ty: Ty<'tcx>,
+ span: S,
+ msg: &str,
+ ) -> Const<'tcx> {
+ let reported = self.sess.delay_span_bug(span, msg);
+ self.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Error(DelaySpanBugEmitted { reported, _priv: () }),
+ ty,
+ })
+ }
+
+ pub fn consider_optimizing<T: Fn() -> String>(self, msg: T) -> bool {
+ let cname = self.crate_name(LOCAL_CRATE);
+ self.sess.consider_optimizing(cname.as_str(), msg)
+ }
+
+ /// Obtain all lang items of this crate and all dependencies (recursively)
+ pub fn lang_items(self) -> &'tcx rustc_hir::lang_items::LanguageItems {
+ self.get_lang_items(())
+ }
+
+ /// Obtain the given diagnostic item's `DefId`. Use `is_diagnostic_item` if you just want to
+ /// compare against another `DefId`, since `is_diagnostic_item` is cheaper.
+ pub fn get_diagnostic_item(self, name: Symbol) -> Option<DefId> {
+ self.all_diagnostic_items(()).name_to_id.get(&name).copied()
+ }
+
+ /// Obtain the diagnostic item's name
+ pub fn get_diagnostic_name(self, id: DefId) -> Option<Symbol> {
+ self.diagnostic_items(id.krate).id_to_name.get(&id).copied()
+ }
+
+ /// Check whether the diagnostic item with the given `name` has the given `DefId`.
+ pub fn is_diagnostic_item(self, name: Symbol, did: DefId) -> bool {
+ self.diagnostic_items(did.krate).name_to_id.get(&name) == Some(&did)
+ }
+
+ pub fn stability(self) -> &'tcx stability::Index {
+ self.stability_index(())
+ }
+
+ pub fn features(self) -> &'tcx rustc_feature::Features {
+ self.features_query(())
+ }
+
+ pub fn def_key(self, id: DefId) -> rustc_hir::definitions::DefKey {
+ // Accessing the DefKey is ok, since it is part of DefPathHash.
+ if let Some(id) = id.as_local() {
+ self.definitions_untracked().def_key(id)
+ } else {
+ self.cstore.def_key(id)
+ }
+ }
+
+ /// Converts a `DefId` into its fully expanded `DefPath` (every
+ /// `DefId` is really just an interned `DefPath`).
+ ///
+ /// Note that if `id` is not local to this crate, the result will
+ /// be a non-local `DefPath`.
+ pub fn def_path(self, id: DefId) -> rustc_hir::definitions::DefPath {
+ // Accessing the DefPath is ok, since it is part of DefPathHash.
+ if let Some(id) = id.as_local() {
+ self.definitions_untracked().def_path(id)
+ } else {
+ self.cstore.def_path(id)
+ }
+ }
+
+ #[inline]
+ pub fn def_path_hash(self, def_id: DefId) -> rustc_hir::definitions::DefPathHash {
+ // Accessing the DefPathHash is ok, it is incr. comp. stable.
+ if let Some(def_id) = def_id.as_local() {
+ self.definitions_untracked().def_path_hash(def_id)
+ } else {
+ self.cstore.def_path_hash(def_id)
+ }
+ }
+
+ #[inline]
+ pub fn stable_crate_id(self, crate_num: CrateNum) -> StableCrateId {
+ if crate_num == LOCAL_CRATE {
+ self.sess.local_stable_crate_id()
+ } else {
+ self.cstore.stable_crate_id(crate_num)
+ }
+ }
+
+ /// Maps a StableCrateId to the corresponding CrateNum. This method assumes
+ /// that the crate in question has already been loaded by the CrateStore.
+ #[inline]
+ pub fn stable_crate_id_to_crate_num(self, stable_crate_id: StableCrateId) -> CrateNum {
+ if stable_crate_id == self.sess.local_stable_crate_id() {
+ LOCAL_CRATE
+ } else {
+ self.cstore.stable_crate_id_to_crate_num(stable_crate_id)
+ }
+ }
+
+ /// Converts a `DefPathHash` to its corresponding `DefId` in the current compilation
+ /// session, if it still exists. This is used during incremental compilation to
+ /// turn a deserialized `DefPathHash` into its current `DefId`.
+ pub fn def_path_hash_to_def_id(self, hash: DefPathHash, err: &mut dyn FnMut() -> !) -> DefId {
+ debug!("def_path_hash_to_def_id({:?})", hash);
+
+ let stable_crate_id = hash.stable_crate_id();
+
+ // If this is a DefPathHash from the local crate, we can look up the
+ // DefId in the tcx's `Definitions`.
+ if stable_crate_id == self.sess.local_stable_crate_id() {
+ self.definitions.read().local_def_path_hash_to_def_id(hash, err).to_def_id()
+ } else {
+ // If this is a DefPathHash from an upstream crate, let the CrateStore map
+ // it to a DefId.
+ let cnum = self.cstore.stable_crate_id_to_crate_num(stable_crate_id);
+ self.cstore.def_path_hash_to_def_id(cnum, hash)
+ }
+ }
+
+ pub fn def_path_debug_str(self, def_id: DefId) -> String {
+ // We are explicitly not going through queries here in order to get
+ // crate name and stable crate id since this code is called from debug!()
+ // statements within the query system and we'd run into endless
+ // recursion otherwise.
+ let (crate_name, stable_crate_id) = if def_id.is_local() {
+ (self.crate_name, self.sess.local_stable_crate_id())
+ } else {
+ let cstore = &self.cstore;
+ (cstore.crate_name(def_id.krate), cstore.stable_crate_id(def_id.krate))
+ };
+
+ format!(
+ "{}[{:04x}]{}",
+ crate_name,
+ // Don't print the whole stable crate id. That's just
+ // annoying in debug output.
+ stable_crate_id.to_u64() >> 8 * 6,
+ self.def_path(def_id).to_string_no_crate_verbose()
+ )
+ }
+
+ /// Create a new definition within the incr. comp. engine.
+ pub fn create_def(self, parent: LocalDefId, data: hir::definitions::DefPathData) -> LocalDefId {
+ // This function modifies `self.definitions` using a side-effect.
+ // We need to ensure that these side effects are re-run by the incr. comp. engine.
+ // Depending on the forever-red node will tell the graph that the calling query
+ // needs to be re-evaluated.
+ use rustc_query_system::dep_graph::DepNodeIndex;
+ self.dep_graph.read_index(DepNodeIndex::FOREVER_RED_NODE);
+
+ // The following call has the side effect of modifying the tables inside `definitions`.
+ // These very tables are relied on by the incr. comp. engine to decode DepNodes and to
+ // decode the on-disk cache.
+ //
+ // Any LocalDefId which is used within queries, either as key or result, either:
+ // - has been created before the construction of the TyCtxt;
+ // - has been created by this call to `create_def`.
+ // As a consequence, this LocalDefId is always re-created before it is needed by the incr.
+ // comp. engine itself.
+ //
+ // This call also writes to the value of `source_span` and `expn_that_defined` queries.
+ // This is fine because:
+ // - those queries are `eval_always` so we won't miss their result changing;
+ // - this write will have happened before these queries are called.
+ self.definitions.write().create_def(parent, data)
+ }
+
+ pub fn iter_local_def_id(self) -> impl Iterator<Item = LocalDefId> + 'tcx {
+ // Create a dependency to the crate to be sure we re-execute this when the amount of
+ // definitions change.
+ self.ensure().hir_crate(());
+ // Leak a read lock once we start iterating on definitions, to prevent adding new onces
+ // while iterating. If some query needs to add definitions, it should be `ensure`d above.
+ let definitions = self.definitions.leak();
+ definitions.iter_local_def_id()
+ }
+
+ pub fn def_path_table(self) -> &'tcx rustc_hir::definitions::DefPathTable {
+ // Create a dependency to the crate to be sure we reexcute this when the amount of
+ // definitions change.
+ self.ensure().hir_crate(());
+ // Leak a read lock once we start iterating on definitions, to prevent adding new onces
+ // while iterating. If some query needs to add definitions, it should be `ensure`d above.
+ let definitions = self.definitions.leak();
+ definitions.def_path_table()
+ }
+
+ pub fn def_path_hash_to_def_index_map(
+ self,
+ ) -> &'tcx rustc_hir::def_path_hash_map::DefPathHashMap {
+ // Create a dependency to the crate to be sure we reexcute this when the amount of
+ // definitions change.
+ self.ensure().hir_crate(());
+ // Leak a read lock once we start iterating on definitions, to prevent adding new onces
+ // while iterating. If some query needs to add definitions, it should be `ensure`d above.
+ let definitions = self.definitions.leak();
+ definitions.def_path_hash_to_def_index_map()
+ }
+
+ /// Note that this is *untracked* and should only be used within the query
+ /// system if the result is otherwise tracked through queries
+ pub fn cstore_untracked(self) -> &'tcx CrateStoreDyn {
+ &*self.cstore
+ }
+
+ /// Note that this is *untracked* and should only be used within the query
+ /// system if the result is otherwise tracked through queries
+ #[inline]
+ pub fn definitions_untracked(self) -> ReadGuard<'tcx, Definitions> {
+ self.definitions.read()
+ }
+
+ /// Note that this is *untracked* and should only be used within the query
+ /// system if the result is otherwise tracked through queries
+ #[inline]
+ pub fn source_span_untracked(self, def_id: LocalDefId) -> Span {
+ self.untracked_resolutions.source_span.get(def_id).copied().unwrap_or(DUMMY_SP)
+ }
+
+ #[inline(always)]
+ pub fn with_stable_hashing_context<R>(
+ self,
+ f: impl FnOnce(StableHashingContext<'_>) -> R,
+ ) -> R {
+ let definitions = self.definitions_untracked();
+ let hcx = StableHashingContext::new(
+ self.sess,
+ &*definitions,
+ &*self.cstore,
+ &self.untracked_resolutions.source_span,
+ );
+ f(hcx)
+ }
+
+ pub fn serialize_query_result_cache(self, encoder: FileEncoder) -> FileEncodeResult {
+ self.on_disk_cache.as_ref().map_or(Ok(0), |c| c.serialize(self, encoder))
+ }
+
+ /// If `true`, we should use lazy normalization for constants, otherwise
+ /// we still evaluate them eagerly.
+ #[inline]
+ pub fn lazy_normalization(self) -> bool {
+ let features = self.features();
+ // Note: We only use lazy normalization for generic const expressions.
+ features.generic_const_exprs
+ }
+
+ #[inline]
+ pub fn local_crate_exports_generics(self) -> bool {
+ debug_assert!(self.sess.opts.share_generics());
+
+ self.sess.crate_types().iter().any(|crate_type| {
+ match crate_type {
+ CrateType::Executable
+ | CrateType::Staticlib
+ | CrateType::ProcMacro
+ | CrateType::Cdylib => false,
+
+ // FIXME rust-lang/rust#64319, rust-lang/rust#64872:
+ // We want to block export of generics from dylibs,
+ // but we must fix rust-lang/rust#65890 before we can
+ // do that robustly.
+ CrateType::Dylib => true,
+
+ CrateType::Rlib => true,
+ }
+ })
+ }
+
+ // Returns the `DefId` and the `BoundRegionKind` corresponding to the given region.
+ pub fn is_suitable_region(self, region: Region<'tcx>) -> Option<FreeRegionInfo> {
+ let (suitable_region_binding_scope, bound_region) = match *region {
+ ty::ReFree(ref free_region) => {
+ (free_region.scope.expect_local(), free_region.bound_region)
+ }
+ ty::ReEarlyBound(ref ebr) => (
+ self.local_parent(ebr.def_id.expect_local()),
+ ty::BoundRegionKind::BrNamed(ebr.def_id, ebr.name),
+ ),
+ _ => return None, // not a free region
+ };
+
+ let is_impl_item = match self.hir().find_by_def_id(suitable_region_binding_scope) {
+ Some(Node::Item(..) | Node::TraitItem(..)) => false,
+ Some(Node::ImplItem(..)) => {
+ self.is_bound_region_in_impl_item(suitable_region_binding_scope)
+ }
+ _ => return None,
+ };
+
+ Some(FreeRegionInfo {
+ def_id: suitable_region_binding_scope,
+ boundregion: bound_region,
+ is_impl_item,
+ })
+ }
+
+ /// Given a `DefId` for an `fn`, return all the `dyn` and `impl` traits in its return type.
+ pub fn return_type_impl_or_dyn_traits(
+ self,
+ scope_def_id: LocalDefId,
+ ) -> Vec<&'tcx hir::Ty<'tcx>> {
+ let hir_id = self.hir().local_def_id_to_hir_id(scope_def_id);
+ let Some(hir::FnDecl { output: hir::FnRetTy::Return(hir_output), .. }) = self.hir().fn_decl_by_hir_id(hir_id) else {
+ return vec![];
+ };
+
+ let mut v = TraitObjectVisitor(vec![], self.hir());
+ v.visit_ty(hir_output);
+ v.0
+ }
+
+ pub fn return_type_impl_trait(self, scope_def_id: LocalDefId) -> Option<(Ty<'tcx>, Span)> {
+ // `type_of()` will fail on these (#55796, #86483), so only allow `fn`s or closures.
+ match self.hir().get_by_def_id(scope_def_id) {
+ Node::Item(&hir::Item { kind: ItemKind::Fn(..), .. }) => {}
+ Node::TraitItem(&hir::TraitItem { kind: TraitItemKind::Fn(..), .. }) => {}
+ Node::ImplItem(&hir::ImplItem { kind: ImplItemKind::Fn(..), .. }) => {}
+ Node::Expr(&hir::Expr { kind: ExprKind::Closure { .. }, .. }) => {}
+ _ => return None,
+ }
+
+ let ret_ty = self.type_of(scope_def_id);
+ match ret_ty.kind() {
+ ty::FnDef(_, _) => {
+ let sig = ret_ty.fn_sig(self);
+ let output = self.erase_late_bound_regions(sig.output());
+ if output.is_impl_trait() {
+ let hir_id = self.hir().local_def_id_to_hir_id(scope_def_id);
+ let fn_decl = self.hir().fn_decl_by_hir_id(hir_id).unwrap();
+ Some((output, fn_decl.output.span()))
+ } else {
+ None
+ }
+ }
+ _ => None,
+ }
+ }
+
+ // Checks if the bound region is in Impl Item.
+ pub fn is_bound_region_in_impl_item(self, suitable_region_binding_scope: LocalDefId) -> bool {
+ let container_id = self.parent(suitable_region_binding_scope.to_def_id());
+ if self.impl_trait_ref(container_id).is_some() {
+ // For now, we do not try to target impls of traits. This is
+ // because this message is going to suggest that the user
+ // change the fn signature, but they may not be free to do so,
+ // since the signature must match the trait.
+ //
+ // FIXME(#42706) -- in some cases, we could do better here.
+ return true;
+ }
+ false
+ }
+
+ /// Determines whether identifiers in the assembly have strict naming rules.
+ /// Currently, only NVPTX* targets need it.
+ pub fn has_strict_asm_symbol_naming(self) -> bool {
+ self.sess.target.arch.contains("nvptx")
+ }
+
+ /// Returns `&'static core::panic::Location<'static>`.
+ pub fn caller_location_ty(self) -> Ty<'tcx> {
+ self.mk_imm_ref(
+ self.lifetimes.re_static,
+ self.bound_type_of(self.require_lang_item(LangItem::PanicLocation, None))
+ .subst(self, self.mk_substs([self.lifetimes.re_static.into()].iter())),
+ )
+ }
+
+ /// Returns a displayable description and article for the given `def_id` (e.g. `("a", "struct")`).
+ pub fn article_and_description(self, def_id: DefId) -> (&'static str, &'static str) {
+ match self.def_kind(def_id) {
+ DefKind::Generator => match self.generator_kind(def_id).unwrap() {
+ rustc_hir::GeneratorKind::Async(..) => ("an", "async closure"),
+ rustc_hir::GeneratorKind::Gen => ("a", "generator"),
+ },
+ def_kind => (def_kind.article(), def_kind.descr(def_id)),
+ }
+ }
+
+ pub fn type_length_limit(self) -> Limit {
+ self.limits(()).type_length_limit
+ }
+
+ pub fn recursion_limit(self) -> Limit {
+ self.limits(()).recursion_limit
+ }
+
+ pub fn move_size_limit(self) -> Limit {
+ self.limits(()).move_size_limit
+ }
+
+ pub fn const_eval_limit(self) -> Limit {
+ self.limits(()).const_eval_limit
+ }
+
+ pub fn all_traits(self) -> impl Iterator<Item = DefId> + 'tcx {
+ iter::once(LOCAL_CRATE)
+ .chain(self.crates(()).iter().copied())
+ .flat_map(move |cnum| self.traits_in_crate(cnum).iter().copied())
+ }
+}
+
+/// A trait implemented for all `X<'a>` types that can be safely and
+/// efficiently converted to `X<'tcx>` as long as they are part of the
+/// provided `TyCtxt<'tcx>`.
+/// This can be done, for example, for `Ty<'tcx>` or `SubstsRef<'tcx>`
+/// by looking them up in their respective interners.
+///
+/// However, this is still not the best implementation as it does
+/// need to compare the components, even for interned values.
+/// It would be more efficient if `TypedArena` provided a way to
+/// determine whether the address is in the allocated range.
+///
+/// `None` is returned if the value or one of the components is not part
+/// of the provided context.
+/// For `Ty`, `None` can be returned if either the type interner doesn't
+/// contain the `TyKind` key or if the address of the interned
+/// pointer differs. The latter case is possible if a primitive type,
+/// e.g., `()` or `u8`, was interned in a different context.
+pub trait Lift<'tcx>: fmt::Debug {
+ type Lifted: fmt::Debug + 'tcx;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted>;
+}
+
+macro_rules! nop_lift {
+ ($set:ident; $ty:ty => $lifted:ty) => {
+ impl<'a, 'tcx> Lift<'tcx> for $ty {
+ type Lifted = $lifted;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ if tcx.interners.$set.contains_pointer_to(&InternedInSet(&*self.0.0)) {
+ // SAFETY: `self` is interned and therefore valid
+ // for the entire lifetime of the `TyCtxt`.
+ Some(unsafe { mem::transmute(self) })
+ } else {
+ None
+ }
+ }
+ }
+ };
+}
+
+// Can't use the macros as we have reuse the `substs` here.
+//
+// See `intern_type_list` for more info.
+impl<'a, 'tcx> Lift<'tcx> for &'a List<Ty<'a>> {
+ type Lifted = &'tcx List<Ty<'tcx>>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ if self.is_empty() {
+ return Some(List::empty());
+ }
+ if tcx.interners.substs.contains_pointer_to(&InternedInSet(self.as_substs())) {
+ // SAFETY: `self` is interned and therefore valid
+ // for the entire lifetime of the `TyCtxt`.
+ Some(unsafe { mem::transmute::<&'a List<Ty<'a>>, &'tcx List<Ty<'tcx>>>(self) })
+ } else {
+ None
+ }
+ }
+}
+
+macro_rules! nop_list_lift {
+ ($set:ident; $ty:ty => $lifted:ty) => {
+ impl<'a, 'tcx> Lift<'tcx> for &'a List<$ty> {
+ type Lifted = &'tcx List<$lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ if self.is_empty() {
+ return Some(List::empty());
+ }
+ if tcx.interners.$set.contains_pointer_to(&InternedInSet(self)) {
+ Some(unsafe { mem::transmute(self) })
+ } else {
+ None
+ }
+ }
+ }
+ };
+}
+
+nop_lift! {type_; Ty<'a> => Ty<'tcx>}
+nop_lift! {region; Region<'a> => Region<'tcx>}
+nop_lift! {const_; Const<'a> => Const<'tcx>}
+nop_lift! {const_allocation; ConstAllocation<'a> => ConstAllocation<'tcx>}
+nop_lift! {predicate; Predicate<'a> => Predicate<'tcx>}
+
+nop_list_lift! {poly_existential_predicates; ty::Binder<'a, ExistentialPredicate<'a>> => ty::Binder<'tcx, ExistentialPredicate<'tcx>>}
+nop_list_lift! {predicates; Predicate<'a> => Predicate<'tcx>}
+nop_list_lift! {canonical_var_infos; CanonicalVarInfo<'a> => CanonicalVarInfo<'tcx>}
+nop_list_lift! {projs; ProjectionKind => ProjectionKind}
+nop_list_lift! {bound_variable_kinds; ty::BoundVariableKind => ty::BoundVariableKind}
+
+// This is the impl for `&'a InternalSubsts<'a>`.
+nop_list_lift! {substs; GenericArg<'a> => GenericArg<'tcx>}
+
+CloneLiftImpls! { for<'tcx> { Constness, traits::WellFormedLoc, } }
+
+pub mod tls {
+ use super::{ptr_eq, GlobalCtxt, TyCtxt};
+
+ use crate::dep_graph::TaskDepsRef;
+ use crate::ty::query;
+ use rustc_data_structures::sync::{self, Lock};
+ use rustc_data_structures::thin_vec::ThinVec;
+ use rustc_errors::Diagnostic;
+ use std::mem;
+
+ #[cfg(not(parallel_compiler))]
+ use std::cell::Cell;
+
+ #[cfg(parallel_compiler)]
+ use rustc_rayon_core as rayon_core;
+
+ /// This is the implicit state of rustc. It contains the current
+ /// `TyCtxt` and query. It is updated when creating a local interner or
+ /// executing a new query. Whenever there's a `TyCtxt` value available
+ /// you should also have access to an `ImplicitCtxt` through the functions
+ /// in this module.
+ #[derive(Clone)]
+ pub struct ImplicitCtxt<'a, 'tcx> {
+ /// The current `TyCtxt`.
+ pub tcx: TyCtxt<'tcx>,
+
+ /// The current query job, if any. This is updated by `JobOwner::start` in
+ /// `ty::query::plumbing` when executing a query.
+ pub query: Option<query::QueryJobId>,
+
+ /// Where to store diagnostics for the current query job, if any.
+ /// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query.
+ pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>,
+
+ /// Used to prevent layout from recursing too deeply.
+ pub layout_depth: usize,
+
+ /// The current dep graph task. This is used to add dependencies to queries
+ /// when executing them.
+ pub task_deps: TaskDepsRef<'a>,
+ }
+
+ impl<'a, 'tcx> ImplicitCtxt<'a, 'tcx> {
+ pub fn new(gcx: &'tcx GlobalCtxt<'tcx>) -> Self {
+ let tcx = TyCtxt { gcx };
+ ImplicitCtxt {
+ tcx,
+ query: None,
+ diagnostics: None,
+ layout_depth: 0,
+ task_deps: TaskDepsRef::Ignore,
+ }
+ }
+ }
+
+ /// Sets Rayon's thread-local variable, which is preserved for Rayon jobs
+ /// to `value` during the call to `f`. It is restored to its previous value after.
+ /// This is used to set the pointer to the new `ImplicitCtxt`.
+ #[cfg(parallel_compiler)]
+ #[inline]
+ fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
+ rayon_core::tlv::with(value, f)
+ }
+
+ /// Gets Rayon's thread-local variable, which is preserved for Rayon jobs.
+ /// This is used to get the pointer to the current `ImplicitCtxt`.
+ #[cfg(parallel_compiler)]
+ #[inline]
+ pub fn get_tlv() -> usize {
+ rayon_core::tlv::get()
+ }
+
+ #[cfg(not(parallel_compiler))]
+ thread_local! {
+ /// A thread local variable that stores a pointer to the current `ImplicitCtxt`.
+ static TLV: Cell<usize> = const { Cell::new(0) };
+ }
+
+ /// Sets TLV to `value` during the call to `f`.
+ /// It is restored to its previous value after.
+ /// This is used to set the pointer to the new `ImplicitCtxt`.
+ #[cfg(not(parallel_compiler))]
+ #[inline]
+ fn set_tlv<F: FnOnce() -> R, R>(value: usize, f: F) -> R {
+ let old = get_tlv();
+ let _reset = rustc_data_structures::OnDrop(move || TLV.with(|tlv| tlv.set(old)));
+ TLV.with(|tlv| tlv.set(value));
+ f()
+ }
+
+ /// Gets the pointer to the current `ImplicitCtxt`.
+ #[cfg(not(parallel_compiler))]
+ #[inline]
+ fn get_tlv() -> usize {
+ TLV.with(|tlv| tlv.get())
+ }
+
+ /// Sets `context` as the new current `ImplicitCtxt` for the duration of the function `f`.
+ #[inline]
+ pub fn enter_context<'a, 'tcx, F, R>(context: &ImplicitCtxt<'a, 'tcx>, f: F) -> R
+ where
+ F: FnOnce(&ImplicitCtxt<'a, 'tcx>) -> R,
+ {
+ set_tlv(context as *const _ as usize, || f(&context))
+ }
+
+ /// Allows access to the current `ImplicitCtxt` in a closure if one is available.
+ #[inline]
+ pub fn with_context_opt<F, R>(f: F) -> R
+ where
+ F: for<'a, 'tcx> FnOnce(Option<&ImplicitCtxt<'a, 'tcx>>) -> R,
+ {
+ let context = get_tlv();
+ if context == 0 {
+ f(None)
+ } else {
+ // We could get an `ImplicitCtxt` pointer from another thread.
+ // Ensure that `ImplicitCtxt` is `Sync`.
+ sync::assert_sync::<ImplicitCtxt<'_, '_>>();
+
+ unsafe { f(Some(&*(context as *const ImplicitCtxt<'_, '_>))) }
+ }
+ }
+
+ /// Allows access to the current `ImplicitCtxt`.
+ /// Panics if there is no `ImplicitCtxt` available.
+ #[inline]
+ pub fn with_context<F, R>(f: F) -> R
+ where
+ F: for<'a, 'tcx> FnOnce(&ImplicitCtxt<'a, 'tcx>) -> R,
+ {
+ with_context_opt(|opt_context| f(opt_context.expect("no ImplicitCtxt stored in tls")))
+ }
+
+ /// Allows access to the current `ImplicitCtxt` whose tcx field is the same as the tcx argument
+ /// passed in. This means the closure is given an `ImplicitCtxt` with the same `'tcx` lifetime
+ /// as the `TyCtxt` passed in.
+ /// This will panic if you pass it a `TyCtxt` which is different from the current
+ /// `ImplicitCtxt`'s `tcx` field.
+ #[inline]
+ pub fn with_related_context<'tcx, F, R>(tcx: TyCtxt<'tcx>, f: F) -> R
+ where
+ F: FnOnce(&ImplicitCtxt<'_, 'tcx>) -> R,
+ {
+ with_context(|context| unsafe {
+ assert!(ptr_eq(context.tcx.gcx, tcx.gcx));
+ let context: &ImplicitCtxt<'_, '_> = mem::transmute(context);
+ f(context)
+ })
+ }
+
+ /// Allows access to the `TyCtxt` in the current `ImplicitCtxt`.
+ /// Panics if there is no `ImplicitCtxt` available.
+ #[inline]
+ pub fn with<F, R>(f: F) -> R
+ where
+ F: for<'tcx> FnOnce(TyCtxt<'tcx>) -> R,
+ {
+ with_context(|context| f(context.tcx))
+ }
+
+ /// Allows access to the `TyCtxt` in the current `ImplicitCtxt`.
+ /// The closure is passed None if there is no `ImplicitCtxt` available.
+ #[inline]
+ pub fn with_opt<F, R>(f: F) -> R
+ where
+ F: for<'tcx> FnOnce(Option<TyCtxt<'tcx>>) -> R,
+ {
+ with_context_opt(|opt_context| f(opt_context.map(|context| context.tcx)))
+ }
+}
+
+macro_rules! sty_debug_print {
+ ($fmt: expr, $ctxt: expr, $($variant: ident),*) => {{
+ // Curious inner module to allow variant names to be used as
+ // variable names.
+ #[allow(non_snake_case)]
+ mod inner {
+ use crate::ty::{self, TyCtxt};
+ use crate::ty::context::InternedInSet;
+
+ #[derive(Copy, Clone)]
+ struct DebugStat {
+ total: usize,
+ lt_infer: usize,
+ ty_infer: usize,
+ ct_infer: usize,
+ all_infer: usize,
+ }
+
+ pub fn go(fmt: &mut std::fmt::Formatter<'_>, tcx: TyCtxt<'_>) -> std::fmt::Result {
+ let mut total = DebugStat {
+ total: 0,
+ lt_infer: 0,
+ ty_infer: 0,
+ ct_infer: 0,
+ all_infer: 0,
+ };
+ $(let mut $variant = total;)*
+
+ let shards = tcx.interners.type_.lock_shards();
+ let types = shards.iter().flat_map(|shard| shard.keys());
+ for &InternedInSet(t) in types {
+ let variant = match t.kind {
+ ty::Bool | ty::Char | ty::Int(..) | ty::Uint(..) |
+ ty::Float(..) | ty::Str | ty::Never => continue,
+ ty::Error(_) => /* unimportant */ continue,
+ $(ty::$variant(..) => &mut $variant,)*
+ };
+ let lt = t.flags.intersects(ty::TypeFlags::HAS_RE_INFER);
+ let ty = t.flags.intersects(ty::TypeFlags::HAS_TY_INFER);
+ let ct = t.flags.intersects(ty::TypeFlags::HAS_CT_INFER);
+
+ variant.total += 1;
+ total.total += 1;
+ if lt { total.lt_infer += 1; variant.lt_infer += 1 }
+ if ty { total.ty_infer += 1; variant.ty_infer += 1 }
+ if ct { total.ct_infer += 1; variant.ct_infer += 1 }
+ if lt && ty && ct { total.all_infer += 1; variant.all_infer += 1 }
+ }
+ writeln!(fmt, "Ty interner total ty lt ct all")?;
+ $(writeln!(fmt, " {:18}: {uses:6} {usespc:4.1}%, \
+ {ty:4.1}% {lt:5.1}% {ct:4.1}% {all:4.1}%",
+ stringify!($variant),
+ uses = $variant.total,
+ usespc = $variant.total as f64 * 100.0 / total.total as f64,
+ ty = $variant.ty_infer as f64 * 100.0 / total.total as f64,
+ lt = $variant.lt_infer as f64 * 100.0 / total.total as f64,
+ ct = $variant.ct_infer as f64 * 100.0 / total.total as f64,
+ all = $variant.all_infer as f64 * 100.0 / total.total as f64)?;
+ )*
+ writeln!(fmt, " total {uses:6} \
+ {ty:4.1}% {lt:5.1}% {ct:4.1}% {all:4.1}%",
+ uses = total.total,
+ ty = total.ty_infer as f64 * 100.0 / total.total as f64,
+ lt = total.lt_infer as f64 * 100.0 / total.total as f64,
+ ct = total.ct_infer as f64 * 100.0 / total.total as f64,
+ all = total.all_infer as f64 * 100.0 / total.total as f64)
+ }
+ }
+
+ inner::go($fmt, $ctxt)
+ }}
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ pub fn debug_stats(self) -> impl std::fmt::Debug + 'tcx {
+ struct DebugStats<'tcx>(TyCtxt<'tcx>);
+
+ impl<'tcx> std::fmt::Debug for DebugStats<'tcx> {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ sty_debug_print!(
+ fmt,
+ self.0,
+ Adt,
+ Array,
+ Slice,
+ RawPtr,
+ Ref,
+ FnDef,
+ FnPtr,
+ Placeholder,
+ Generator,
+ GeneratorWitness,
+ Dynamic,
+ Closure,
+ Tuple,
+ Bound,
+ Param,
+ Infer,
+ Projection,
+ Opaque,
+ Foreign
+ )?;
+
+ writeln!(fmt, "InternalSubsts interner: #{}", self.0.interners.substs.len())?;
+ writeln!(fmt, "Region interner: #{}", self.0.interners.region.len())?;
+ writeln!(
+ fmt,
+ "Const Allocation interner: #{}",
+ self.0.interners.const_allocation.len()
+ )?;
+ writeln!(fmt, "Layout interner: #{}", self.0.interners.layout.len())?;
+
+ Ok(())
+ }
+ }
+
+ DebugStats(self)
+ }
+}
+
+// This type holds a `T` in the interner. The `T` is stored in the arena and
+// this type just holds a pointer to it, but it still effectively owns it. It
+// impls `Borrow` so that it can be looked up using the original
+// (non-arena-memory-owning) types.
+struct InternedInSet<'tcx, T: ?Sized>(&'tcx T);
+
+impl<'tcx, T: 'tcx + ?Sized> Clone for InternedInSet<'tcx, T> {
+ fn clone(&self) -> Self {
+ InternedInSet(self.0)
+ }
+}
+
+impl<'tcx, T: 'tcx + ?Sized> Copy for InternedInSet<'tcx, T> {}
+
+impl<'tcx, T: 'tcx + ?Sized> IntoPointer for InternedInSet<'tcx, T> {
+ fn into_pointer(&self) -> *const () {
+ self.0 as *const _ as *const ()
+ }
+}
+
+#[allow(rustc::usage_of_ty_tykind)]
+impl<'tcx> Borrow<TyKind<'tcx>> for InternedInSet<'tcx, WithStableHash<TyS<'tcx>>> {
+ fn borrow<'a>(&'a self) -> &'a TyKind<'tcx> {
+ &self.0.kind
+ }
+}
+
+impl<'tcx> PartialEq for InternedInSet<'tcx, WithStableHash<TyS<'tcx>>> {
+ fn eq(&self, other: &InternedInSet<'tcx, WithStableHash<TyS<'tcx>>>) -> bool {
+ // The `Borrow` trait requires that `x.borrow() == y.borrow()` equals
+ // `x == y`.
+ self.0.kind == other.0.kind
+ }
+}
+
+impl<'tcx> Eq for InternedInSet<'tcx, WithStableHash<TyS<'tcx>>> {}
+
+impl<'tcx> Hash for InternedInSet<'tcx, WithStableHash<TyS<'tcx>>> {
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // The `Borrow` trait requires that `x.borrow().hash(s) == x.hash(s)`.
+ self.0.kind.hash(s)
+ }
+}
+
+impl<'tcx> Borrow<Binder<'tcx, PredicateKind<'tcx>>> for InternedInSet<'tcx, PredicateS<'tcx>> {
+ fn borrow<'a>(&'a self) -> &'a Binder<'tcx, PredicateKind<'tcx>> {
+ &self.0.kind
+ }
+}
+
+impl<'tcx> PartialEq for InternedInSet<'tcx, PredicateS<'tcx>> {
+ fn eq(&self, other: &InternedInSet<'tcx, PredicateS<'tcx>>) -> bool {
+ // The `Borrow` trait requires that `x.borrow() == y.borrow()` equals
+ // `x == y`.
+ self.0.kind == other.0.kind
+ }
+}
+
+impl<'tcx> Eq for InternedInSet<'tcx, PredicateS<'tcx>> {}
+
+impl<'tcx> Hash for InternedInSet<'tcx, PredicateS<'tcx>> {
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // The `Borrow` trait requires that `x.borrow().hash(s) == x.hash(s)`.
+ self.0.kind.hash(s)
+ }
+}
+
+impl<'tcx, T> Borrow<[T]> for InternedInSet<'tcx, List<T>> {
+ fn borrow<'a>(&'a self) -> &'a [T] {
+ &self.0[..]
+ }
+}
+
+impl<'tcx, T: PartialEq> PartialEq for InternedInSet<'tcx, List<T>> {
+ fn eq(&self, other: &InternedInSet<'tcx, List<T>>) -> bool {
+ // The `Borrow` trait requires that `x.borrow() == y.borrow()` equals
+ // `x == y`.
+ self.0[..] == other.0[..]
+ }
+}
+
+impl<'tcx, T: Eq> Eq for InternedInSet<'tcx, List<T>> {}
+
+impl<'tcx, T: Hash> Hash for InternedInSet<'tcx, List<T>> {
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // The `Borrow` trait requires that `x.borrow().hash(s) == x.hash(s)`.
+ self.0[..].hash(s)
+ }
+}
+
+macro_rules! direct_interners {
+ ($($name:ident: $method:ident($ty:ty): $ret_ctor:ident -> $ret_ty:ty,)+) => {
+ $(impl<'tcx> Borrow<$ty> for InternedInSet<'tcx, $ty> {
+ fn borrow<'a>(&'a self) -> &'a $ty {
+ &self.0
+ }
+ }
+
+ impl<'tcx> PartialEq for InternedInSet<'tcx, $ty> {
+ fn eq(&self, other: &Self) -> bool {
+ // The `Borrow` trait requires that `x.borrow() == y.borrow()`
+ // equals `x == y`.
+ self.0 == other.0
+ }
+ }
+
+ impl<'tcx> Eq for InternedInSet<'tcx, $ty> {}
+
+ impl<'tcx> Hash for InternedInSet<'tcx, $ty> {
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // The `Borrow` trait requires that `x.borrow().hash(s) ==
+ // x.hash(s)`.
+ self.0.hash(s)
+ }
+ }
+
+ impl<'tcx> TyCtxt<'tcx> {
+ pub fn $method(self, v: $ty) -> $ret_ty {
+ $ret_ctor(Interned::new_unchecked(self.interners.$name.intern(v, |v| {
+ InternedInSet(self.interners.arena.alloc(v))
+ }).0))
+ }
+ })+
+ }
+}
+
+direct_interners! {
+ region: mk_region(RegionKind<'tcx>): Region -> Region<'tcx>,
+ const_: mk_const(ConstS<'tcx>): Const -> Const<'tcx>,
+ const_allocation: intern_const_alloc(Allocation): ConstAllocation -> ConstAllocation<'tcx>,
+ layout: intern_layout(LayoutS<'tcx>): Layout -> Layout<'tcx>,
+ adt_def: intern_adt_def(AdtDefData): AdtDef -> AdtDef<'tcx>,
+}
+
+macro_rules! slice_interners {
+ ($($field:ident: $method:ident($ty:ty)),+ $(,)?) => (
+ impl<'tcx> TyCtxt<'tcx> {
+ $(pub fn $method(self, v: &[$ty]) -> &'tcx List<$ty> {
+ self.interners.$field.intern_ref(v, || {
+ InternedInSet(List::from_arena(&*self.arena, v))
+ }).0
+ })+
+ }
+ );
+}
+
+slice_interners!(
+ substs: _intern_substs(GenericArg<'tcx>),
+ canonical_var_infos: _intern_canonical_var_infos(CanonicalVarInfo<'tcx>),
+ poly_existential_predicates:
+ _intern_poly_existential_predicates(ty::Binder<'tcx, ExistentialPredicate<'tcx>>),
+ predicates: _intern_predicates(Predicate<'tcx>),
+ projs: _intern_projs(ProjectionKind),
+ place_elems: _intern_place_elems(PlaceElem<'tcx>),
+ bound_variable_kinds: _intern_bound_variable_kinds(ty::BoundVariableKind),
+);
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Given a `fn` type, returns an equivalent `unsafe fn` type;
+ /// that is, a `fn` type that is equivalent in every way for being
+ /// unsafe.
+ pub fn safe_to_unsafe_fn_ty(self, sig: PolyFnSig<'tcx>) -> Ty<'tcx> {
+ assert_eq!(sig.unsafety(), hir::Unsafety::Normal);
+ self.mk_fn_ptr(sig.map_bound(|sig| ty::FnSig { unsafety: hir::Unsafety::Unsafe, ..sig }))
+ }
+
+ /// Given the def_id of a Trait `trait_def_id` and the name of an associated item `assoc_name`
+ /// returns true if the `trait_def_id` defines an associated item of name `assoc_name`.
+ pub fn trait_may_define_assoc_type(self, trait_def_id: DefId, assoc_name: Ident) -> bool {
+ self.super_traits_of(trait_def_id).any(|trait_did| {
+ self.associated_items(trait_did)
+ .find_by_name_and_kind(self, assoc_name, ty::AssocKind::Type, trait_did)
+ .is_some()
+ })
+ }
+
+ /// Given a `ty`, return whether it's an `impl Future<...>`.
+ pub fn ty_is_opaque_future(self, ty: Ty<'_>) -> bool {
+ let ty::Opaque(def_id, _) = ty.kind() else { return false };
+ let future_trait = self.lang_items().future_trait().unwrap();
+
+ self.explicit_item_bounds(def_id).iter().any(|(predicate, _)| {
+ let ty::PredicateKind::Trait(trait_predicate) = predicate.kind().skip_binder() else {
+ return false;
+ };
+ trait_predicate.trait_ref.def_id == future_trait
+ && trait_predicate.polarity == ImplPolarity::Positive
+ })
+ }
+
+ /// Computes the def-ids of the transitive supertraits of `trait_def_id`. This (intentionally)
+ /// does not compute the full elaborated super-predicates but just the set of def-ids. It is used
+ /// to identify which traits may define a given associated type to help avoid cycle errors.
+ /// Returns a `DefId` iterator.
+ fn super_traits_of(self, trait_def_id: DefId) -> impl Iterator<Item = DefId> + 'tcx {
+ let mut set = FxHashSet::default();
+ let mut stack = vec![trait_def_id];
+
+ set.insert(trait_def_id);
+
+ iter::from_fn(move || -> Option<DefId> {
+ let trait_did = stack.pop()?;
+ let generic_predicates = self.super_predicates_of(trait_did);
+
+ for (predicate, _) in generic_predicates.predicates {
+ if let ty::PredicateKind::Trait(data) = predicate.kind().skip_binder() {
+ if set.insert(data.def_id()) {
+ stack.push(data.def_id());
+ }
+ }
+ }
+
+ Some(trait_did)
+ })
+ }
+
+ /// Given a closure signature, returns an equivalent fn signature. Detuples
+ /// and so forth -- so e.g., if we have a sig with `Fn<(u32, i32)>` then
+ /// you would get a `fn(u32, i32)`.
+ /// `unsafety` determines the unsafety of the fn signature. If you pass
+ /// `hir::Unsafety::Unsafe` in the previous example, then you would get
+ /// an `unsafe fn (u32, i32)`.
+ /// It cannot convert a closure that requires unsafe.
+ pub fn signature_unclosure(
+ self,
+ sig: PolyFnSig<'tcx>,
+ unsafety: hir::Unsafety,
+ ) -> PolyFnSig<'tcx> {
+ sig.map_bound(|s| {
+ let params_iter = match s.inputs()[0].kind() {
+ ty::Tuple(params) => params.into_iter(),
+ _ => bug!(),
+ };
+ self.mk_fn_sig(params_iter, s.output(), s.c_variadic, unsafety, abi::Abi::Rust)
+ })
+ }
+
+ /// Same a `self.mk_region(kind)`, but avoids accessing the interners if
+ /// `*r == kind`.
+ #[inline]
+ pub fn reuse_or_mk_region(self, r: Region<'tcx>, kind: RegionKind<'tcx>) -> Region<'tcx> {
+ if *r == kind { r } else { self.mk_region(kind) }
+ }
+
+ #[allow(rustc::usage_of_ty_tykind)]
+ #[inline]
+ pub fn mk_ty(self, st: TyKind<'tcx>) -> Ty<'tcx> {
+ self.interners.intern_ty(
+ st,
+ self.sess,
+ &self.definitions.read(),
+ &*self.cstore,
+ // This is only used to create a stable hashing context.
+ &self.untracked_resolutions.source_span,
+ )
+ }
+
+ #[inline]
+ pub fn mk_predicate(self, binder: Binder<'tcx, PredicateKind<'tcx>>) -> Predicate<'tcx> {
+ self.interners.intern_predicate(binder)
+ }
+
+ #[inline]
+ pub fn reuse_or_mk_predicate(
+ self,
+ pred: Predicate<'tcx>,
+ binder: Binder<'tcx, PredicateKind<'tcx>>,
+ ) -> Predicate<'tcx> {
+ if pred.kind() != binder { self.mk_predicate(binder) } else { pred }
+ }
+
+ pub fn mk_mach_int(self, tm: IntTy) -> Ty<'tcx> {
+ match tm {
+ IntTy::Isize => self.types.isize,
+ IntTy::I8 => self.types.i8,
+ IntTy::I16 => self.types.i16,
+ IntTy::I32 => self.types.i32,
+ IntTy::I64 => self.types.i64,
+ IntTy::I128 => self.types.i128,
+ }
+ }
+
+ pub fn mk_mach_uint(self, tm: UintTy) -> Ty<'tcx> {
+ match tm {
+ UintTy::Usize => self.types.usize,
+ UintTy::U8 => self.types.u8,
+ UintTy::U16 => self.types.u16,
+ UintTy::U32 => self.types.u32,
+ UintTy::U64 => self.types.u64,
+ UintTy::U128 => self.types.u128,
+ }
+ }
+
+ pub fn mk_mach_float(self, tm: FloatTy) -> Ty<'tcx> {
+ match tm {
+ FloatTy::F32 => self.types.f32,
+ FloatTy::F64 => self.types.f64,
+ }
+ }
+
+ #[inline]
+ pub fn mk_static_str(self) -> Ty<'tcx> {
+ self.mk_imm_ref(self.lifetimes.re_static, self.types.str_)
+ }
+
+ #[inline]
+ pub fn mk_adt(self, def: AdtDef<'tcx>, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+ // Take a copy of substs so that we own the vectors inside.
+ self.mk_ty(Adt(def, substs))
+ }
+
+ #[inline]
+ pub fn mk_foreign(self, def_id: DefId) -> Ty<'tcx> {
+ self.mk_ty(Foreign(def_id))
+ }
+
+ fn mk_generic_adt(self, wrapper_def_id: DefId, ty_param: Ty<'tcx>) -> Ty<'tcx> {
+ let adt_def = self.adt_def(wrapper_def_id);
+ let substs =
+ InternalSubsts::for_item(self, wrapper_def_id, |param, substs| match param.kind {
+ GenericParamDefKind::Lifetime | GenericParamDefKind::Const { .. } => bug!(),
+ GenericParamDefKind::Type { has_default, .. } => {
+ if param.index == 0 {
+ ty_param.into()
+ } else {
+ assert!(has_default);
+ self.bound_type_of(param.def_id).subst(self, substs).into()
+ }
+ }
+ });
+ self.mk_ty(Adt(adt_def, substs))
+ }
+
+ #[inline]
+ pub fn mk_box(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let def_id = self.require_lang_item(LangItem::OwnedBox, None);
+ self.mk_generic_adt(def_id, ty)
+ }
+
+ #[inline]
+ pub fn mk_lang_item(self, ty: Ty<'tcx>, item: LangItem) -> Option<Ty<'tcx>> {
+ let def_id = self.lang_items().require(item).ok()?;
+ Some(self.mk_generic_adt(def_id, ty))
+ }
+
+ #[inline]
+ pub fn mk_diagnostic_item(self, ty: Ty<'tcx>, name: Symbol) -> Option<Ty<'tcx>> {
+ let def_id = self.get_diagnostic_item(name)?;
+ Some(self.mk_generic_adt(def_id, ty))
+ }
+
+ #[inline]
+ pub fn mk_maybe_uninit(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let def_id = self.require_lang_item(LangItem::MaybeUninit, None);
+ self.mk_generic_adt(def_id, ty)
+ }
+
+ #[inline]
+ pub fn mk_ptr(self, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(RawPtr(tm))
+ }
+
+ #[inline]
+ pub fn mk_ref(self, r: Region<'tcx>, tm: TypeAndMut<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(Ref(r, tm.ty, tm.mutbl))
+ }
+
+ #[inline]
+ pub fn mk_mut_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ref(r, TypeAndMut { ty, mutbl: hir::Mutability::Mut })
+ }
+
+ #[inline]
+ pub fn mk_imm_ref(self, r: Region<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ref(r, TypeAndMut { ty, mutbl: hir::Mutability::Not })
+ }
+
+ #[inline]
+ pub fn mk_mut_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ptr(TypeAndMut { ty, mutbl: hir::Mutability::Mut })
+ }
+
+ #[inline]
+ pub fn mk_imm_ptr(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ptr(TypeAndMut { ty, mutbl: hir::Mutability::Not })
+ }
+
+ #[inline]
+ pub fn mk_array(self, ty: Ty<'tcx>, n: u64) -> Ty<'tcx> {
+ self.mk_ty(Array(ty, ty::Const::from_usize(self, n)))
+ }
+
+ #[inline]
+ pub fn mk_slice(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(Slice(ty))
+ }
+
+ #[inline]
+ pub fn intern_tup(self, ts: &[Ty<'tcx>]) -> Ty<'tcx> {
+ self.mk_ty(Tuple(self.intern_type_list(&ts)))
+ }
+
+ pub fn mk_tup<I: InternAs<[Ty<'tcx>], Ty<'tcx>>>(self, iter: I) -> I::Output {
+ iter.intern_with(|ts| self.mk_ty(Tuple(self.intern_type_list(&ts))))
+ }
+
+ #[inline]
+ pub fn mk_unit(self) -> Ty<'tcx> {
+ self.types.unit
+ }
+
+ #[inline]
+ pub fn mk_diverging_default(self) -> Ty<'tcx> {
+ if self.features().never_type_fallback { self.types.never } else { self.types.unit }
+ }
+
+ #[inline]
+ pub fn mk_fn_def(self, def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(FnDef(def_id, substs))
+ }
+
+ #[inline]
+ pub fn mk_fn_ptr(self, fty: PolyFnSig<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(FnPtr(fty))
+ }
+
+ #[inline]
+ pub fn mk_dynamic(
+ self,
+ obj: &'tcx List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>>,
+ reg: ty::Region<'tcx>,
+ ) -> Ty<'tcx> {
+ self.mk_ty(Dynamic(obj, reg))
+ }
+
+ #[inline]
+ pub fn mk_projection(self, item_def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(Projection(ProjectionTy { item_def_id, substs }))
+ }
+
+ #[inline]
+ pub fn mk_closure(self, closure_id: DefId, closure_substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(Closure(closure_id, closure_substs))
+ }
+
+ #[inline]
+ pub fn mk_generator(
+ self,
+ id: DefId,
+ generator_substs: SubstsRef<'tcx>,
+ movability: hir::Movability,
+ ) -> Ty<'tcx> {
+ self.mk_ty(Generator(id, generator_substs, movability))
+ }
+
+ #[inline]
+ pub fn mk_generator_witness(self, types: ty::Binder<'tcx, &'tcx List<Ty<'tcx>>>) -> Ty<'tcx> {
+ self.mk_ty(GeneratorWitness(types))
+ }
+
+ #[inline]
+ pub fn mk_ty_var(self, v: TyVid) -> Ty<'tcx> {
+ self.mk_ty_infer(TyVar(v))
+ }
+
+ #[inline]
+ pub fn mk_const_var(self, v: ConstVid<'tcx>, ty: Ty<'tcx>) -> Const<'tcx> {
+ self.mk_const(ty::ConstS { kind: ty::ConstKind::Infer(InferConst::Var(v)), ty })
+ }
+
+ #[inline]
+ pub fn mk_int_var(self, v: IntVid) -> Ty<'tcx> {
+ self.mk_ty_infer(IntVar(v))
+ }
+
+ #[inline]
+ pub fn mk_float_var(self, v: FloatVid) -> Ty<'tcx> {
+ self.mk_ty_infer(FloatVar(v))
+ }
+
+ #[inline]
+ pub fn mk_ty_infer(self, it: InferTy) -> Ty<'tcx> {
+ self.mk_ty(Infer(it))
+ }
+
+ #[inline]
+ pub fn mk_const_infer(self, ic: InferConst<'tcx>, ty: Ty<'tcx>) -> ty::Const<'tcx> {
+ self.mk_const(ty::ConstS { kind: ty::ConstKind::Infer(ic), ty })
+ }
+
+ #[inline]
+ pub fn mk_ty_param(self, index: u32, name: Symbol) -> Ty<'tcx> {
+ self.mk_ty(Param(ParamTy { index, name }))
+ }
+
+ #[inline]
+ pub fn mk_const_param(self, index: u32, name: Symbol, ty: Ty<'tcx>) -> Const<'tcx> {
+ self.mk_const(ty::ConstS { kind: ty::ConstKind::Param(ParamConst { index, name }), ty })
+ }
+
+ pub fn mk_param_from_def(self, param: &ty::GenericParamDef) -> GenericArg<'tcx> {
+ match param.kind {
+ GenericParamDefKind::Lifetime => {
+ self.mk_region(ty::ReEarlyBound(param.to_early_bound_region_data())).into()
+ }
+ GenericParamDefKind::Type { .. } => self.mk_ty_param(param.index, param.name).into(),
+ GenericParamDefKind::Const { .. } => {
+ self.mk_const_param(param.index, param.name, self.type_of(param.def_id)).into()
+ }
+ }
+ }
+
+ #[inline]
+ pub fn mk_opaque(self, def_id: DefId, substs: SubstsRef<'tcx>) -> Ty<'tcx> {
+ self.mk_ty(Opaque(def_id, substs))
+ }
+
+ pub fn mk_place_field(self, place: Place<'tcx>, f: Field, ty: Ty<'tcx>) -> Place<'tcx> {
+ self.mk_place_elem(place, PlaceElem::Field(f, ty))
+ }
+
+ pub fn mk_place_deref(self, place: Place<'tcx>) -> Place<'tcx> {
+ self.mk_place_elem(place, PlaceElem::Deref)
+ }
+
+ pub fn mk_place_downcast(
+ self,
+ place: Place<'tcx>,
+ adt_def: AdtDef<'tcx>,
+ variant_index: VariantIdx,
+ ) -> Place<'tcx> {
+ self.mk_place_elem(
+ place,
+ PlaceElem::Downcast(Some(adt_def.variant(variant_index).name), variant_index),
+ )
+ }
+
+ pub fn mk_place_downcast_unnamed(
+ self,
+ place: Place<'tcx>,
+ variant_index: VariantIdx,
+ ) -> Place<'tcx> {
+ self.mk_place_elem(place, PlaceElem::Downcast(None, variant_index))
+ }
+
+ pub fn mk_place_index(self, place: Place<'tcx>, index: Local) -> Place<'tcx> {
+ self.mk_place_elem(place, PlaceElem::Index(index))
+ }
+
+ /// This method copies `Place`'s projection, add an element and reintern it. Should not be used
+ /// to build a full `Place` it's just a convenient way to grab a projection and modify it in
+ /// flight.
+ pub fn mk_place_elem(self, place: Place<'tcx>, elem: PlaceElem<'tcx>) -> Place<'tcx> {
+ let mut projection = place.projection.to_vec();
+ projection.push(elem);
+
+ Place { local: place.local, projection: self.intern_place_elems(&projection) }
+ }
+
+ pub fn intern_poly_existential_predicates(
+ self,
+ eps: &[ty::Binder<'tcx, ExistentialPredicate<'tcx>>],
+ ) -> &'tcx List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>> {
+ assert!(!eps.is_empty());
+ assert!(
+ eps.array_windows()
+ .all(|[a, b]| a.skip_binder().stable_cmp(self, &b.skip_binder())
+ != Ordering::Greater)
+ );
+ self._intern_poly_existential_predicates(eps)
+ }
+
+ pub fn intern_predicates(self, preds: &[Predicate<'tcx>]) -> &'tcx List<Predicate<'tcx>> {
+ // FIXME consider asking the input slice to be sorted to avoid
+ // re-interning permutations, in which case that would be asserted
+ // here.
+ if preds.is_empty() {
+ // The macro-generated method below asserts we don't intern an empty slice.
+ List::empty()
+ } else {
+ self._intern_predicates(preds)
+ }
+ }
+
+ pub fn intern_type_list(self, ts: &[Ty<'tcx>]) -> &'tcx List<Ty<'tcx>> {
+ if ts.is_empty() {
+ List::empty()
+ } else {
+ // Actually intern type lists as lists of `GenericArg`s.
+ //
+ // Transmuting from `Ty<'tcx>` to `GenericArg<'tcx>` is sound
+ // as explained in ty_slice_as_generic_arg`. With this,
+ // we guarantee that even when transmuting between `List<Ty<'tcx>>`
+ // and `List<GenericArg<'tcx>>`, the uniqueness requirement for
+ // lists is upheld.
+ let substs = self._intern_substs(ty::subst::ty_slice_as_generic_args(ts));
+ substs.try_as_type_list().unwrap()
+ }
+ }
+
+ pub fn intern_substs(self, ts: &[GenericArg<'tcx>]) -> &'tcx List<GenericArg<'tcx>> {
+ if ts.is_empty() { List::empty() } else { self._intern_substs(ts) }
+ }
+
+ pub fn intern_projs(self, ps: &[ProjectionKind]) -> &'tcx List<ProjectionKind> {
+ if ps.is_empty() { List::empty() } else { self._intern_projs(ps) }
+ }
+
+ pub fn intern_place_elems(self, ts: &[PlaceElem<'tcx>]) -> &'tcx List<PlaceElem<'tcx>> {
+ if ts.is_empty() { List::empty() } else { self._intern_place_elems(ts) }
+ }
+
+ pub fn intern_canonical_var_infos(
+ self,
+ ts: &[CanonicalVarInfo<'tcx>],
+ ) -> CanonicalVarInfos<'tcx> {
+ if ts.is_empty() { List::empty() } else { self._intern_canonical_var_infos(ts) }
+ }
+
+ pub fn intern_bound_variable_kinds(
+ self,
+ ts: &[ty::BoundVariableKind],
+ ) -> &'tcx List<ty::BoundVariableKind> {
+ if ts.is_empty() { List::empty() } else { self._intern_bound_variable_kinds(ts) }
+ }
+
+ pub fn mk_fn_sig<I>(
+ self,
+ inputs: I,
+ output: I::Item,
+ c_variadic: bool,
+ unsafety: hir::Unsafety,
+ abi: abi::Abi,
+ ) -> <I::Item as InternIteratorElement<Ty<'tcx>, ty::FnSig<'tcx>>>::Output
+ where
+ I: Iterator<Item: InternIteratorElement<Ty<'tcx>, ty::FnSig<'tcx>>>,
+ {
+ inputs.chain(iter::once(output)).intern_with(|xs| ty::FnSig {
+ inputs_and_output: self.intern_type_list(xs),
+ c_variadic,
+ unsafety,
+ abi,
+ })
+ }
+
+ pub fn mk_poly_existential_predicates<
+ I: InternAs<
+ [ty::Binder<'tcx, ExistentialPredicate<'tcx>>],
+ &'tcx List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>>,
+ >,
+ >(
+ self,
+ iter: I,
+ ) -> I::Output {
+ iter.intern_with(|xs| self.intern_poly_existential_predicates(xs))
+ }
+
+ pub fn mk_predicates<I: InternAs<[Predicate<'tcx>], &'tcx List<Predicate<'tcx>>>>(
+ self,
+ iter: I,
+ ) -> I::Output {
+ iter.intern_with(|xs| self.intern_predicates(xs))
+ }
+
+ pub fn mk_type_list<I: InternAs<[Ty<'tcx>], &'tcx List<Ty<'tcx>>>>(self, iter: I) -> I::Output {
+ iter.intern_with(|xs| self.intern_type_list(xs))
+ }
+
+ pub fn mk_substs<I: InternAs<[GenericArg<'tcx>], &'tcx List<GenericArg<'tcx>>>>(
+ self,
+ iter: I,
+ ) -> I::Output {
+ iter.intern_with(|xs| self.intern_substs(xs))
+ }
+
+ pub fn mk_place_elems<I: InternAs<[PlaceElem<'tcx>], &'tcx List<PlaceElem<'tcx>>>>(
+ self,
+ iter: I,
+ ) -> I::Output {
+ iter.intern_with(|xs| self.intern_place_elems(xs))
+ }
+
+ pub fn mk_substs_trait(self, self_ty: Ty<'tcx>, rest: &[GenericArg<'tcx>]) -> SubstsRef<'tcx> {
+ self.mk_substs(iter::once(self_ty.into()).chain(rest.iter().cloned()))
+ }
+
+ pub fn mk_bound_variable_kinds<
+ I: InternAs<[ty::BoundVariableKind], &'tcx List<ty::BoundVariableKind>>,
+ >(
+ self,
+ iter: I,
+ ) -> I::Output {
+ iter.intern_with(|xs| self.intern_bound_variable_kinds(xs))
+ }
+
+ /// Walks upwards from `id` to find a node which might change lint levels with attributes.
+ /// It stops at `bound` and just returns it if reached.
+ pub fn maybe_lint_level_root_bounded(self, mut id: HirId, bound: HirId) -> HirId {
+ let hir = self.hir();
+ loop {
+ if id == bound {
+ return bound;
+ }
+
+ if hir.attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) {
+ return id;
+ }
+ let next = hir.get_parent_node(id);
+ if next == id {
+ bug!("lint traversal reached the root of the crate");
+ }
+ id = next;
+ }
+ }
+
+ pub fn lint_level_at_node(
+ self,
+ lint: &'static Lint,
+ mut id: hir::HirId,
+ ) -> (Level, LintLevelSource) {
+ let sets = self.lint_levels(());
+ loop {
+ if let Some(pair) = sets.level_and_source(lint, id, self.sess) {
+ return pair;
+ }
+ let next = self.hir().get_parent_node(id);
+ if next == id {
+ bug!("lint traversal reached the root of the crate");
+ }
+ id = next;
+ }
+ }
+
+ /// Emit a lint at `span` from a lint struct (some type that implements `DecorateLint`,
+ /// typically generated by `#[derive(LintDiagnostic)]`).
+ pub fn emit_spanned_lint(
+ self,
+ lint: &'static Lint,
+ hir_id: HirId,
+ span: impl Into<MultiSpan>,
+ decorator: impl for<'a> DecorateLint<'a, ()>,
+ ) {
+ self.struct_span_lint_hir(lint, hir_id, span, |diag| decorator.decorate_lint(diag))
+ }
+
+ pub fn struct_span_lint_hir(
+ self,
+ lint: &'static Lint,
+ hir_id: HirId,
+ span: impl Into<MultiSpan>,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ ) {
+ let (level, src) = self.lint_level_at_node(lint, hir_id);
+ struct_lint_level(self.sess, lint, level, src, Some(span.into()), decorate);
+ }
+
+ /// Emit a lint from a lint struct (some type that implements `DecorateLint`, typically
+ /// generated by `#[derive(LintDiagnostic)]`).
+ pub fn emit_lint(
+ self,
+ lint: &'static Lint,
+ id: HirId,
+ decorator: impl for<'a> DecorateLint<'a, ()>,
+ ) {
+ self.struct_lint_node(lint, id, |diag| decorator.decorate_lint(diag))
+ }
+
+ pub fn struct_lint_node(
+ self,
+ lint: &'static Lint,
+ id: HirId,
+ decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ ) {
+ let (level, src) = self.lint_level_at_node(lint, id);
+ struct_lint_level(self.sess, lint, level, src, None, decorate);
+ }
+
+ pub fn in_scope_traits(self, id: HirId) -> Option<&'tcx [TraitCandidate]> {
+ let map = self.in_scope_traits_map(id.owner)?;
+ let candidates = map.get(&id.local_id)?;
+ Some(&*candidates)
+ }
+
+ pub fn named_region(self, id: HirId) -> Option<resolve_lifetime::Region> {
+ debug!(?id, "named_region");
+ self.named_region_map(id.owner).and_then(|map| map.get(&id.local_id).cloned())
+ }
+
+ pub fn is_late_bound(self, id: HirId) -> bool {
+ self.is_late_bound_map(id.owner).map_or(false, |set| {
+ let def_id = self.hir().local_def_id(id);
+ set.contains(&def_id)
+ })
+ }
+
+ pub fn late_bound_vars(self, id: HirId) -> &'tcx List<ty::BoundVariableKind> {
+ self.mk_bound_variable_kinds(
+ self.late_bound_vars_map(id.owner)
+ .and_then(|map| map.get(&id.local_id).cloned())
+ .unwrap_or_else(|| {
+ bug!("No bound vars found for {:?} ({:?})", self.hir().node_to_string(id), id)
+ })
+ .iter(),
+ )
+ }
+
+ /// Whether the `def_id` counts as const fn in the current crate, considering all active
+ /// feature gates
+ pub fn is_const_fn(self, def_id: DefId) -> bool {
+ if self.is_const_fn_raw(def_id) {
+ match self.lookup_const_stability(def_id) {
+ Some(stability) if stability.is_const_unstable() => {
+ // has a `rustc_const_unstable` attribute, check whether the user enabled the
+ // corresponding feature gate.
+ self.features()
+ .declared_lib_features
+ .iter()
+ .any(|&(sym, _)| sym == stability.feature)
+ }
+ // functions without const stability are either stable user written
+ // const fn or the user is using feature gates and we thus don't
+ // care what they do
+ _ => true,
+ }
+ } else {
+ false
+ }
+ }
+
+ /// Whether the trait impl is marked const. This does not consider stability or feature gates.
+ pub fn is_const_trait_impl_raw(self, def_id: DefId) -> bool {
+ let Some(local_def_id) = def_id.as_local() else { return false };
+ let hir_id = self.local_def_id_to_hir_id(local_def_id);
+ let node = self.hir().get(hir_id);
+
+ matches!(
+ node,
+ hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { constness: hir::Constness::Const, .. }),
+ ..
+ })
+ )
+ }
+}
+
+impl<'tcx> TyCtxtAt<'tcx> {
+ /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used.
+ #[track_caller]
+ pub fn ty_error(self) -> Ty<'tcx> {
+ self.tcx.ty_error_with_message(self.span, "TyKind::Error constructed but no error reported")
+ }
+
+ /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` with the given `msg to
+ /// ensure it gets used.
+ #[track_caller]
+ pub fn ty_error_with_message(self, msg: &str) -> Ty<'tcx> {
+ self.tcx.ty_error_with_message(self.span, msg)
+ }
+}
+
+// We are comparing types with different invariant lifetimes, so `ptr::eq`
+// won't work for us.
+fn ptr_eq<T, U>(t: *const T, u: *const U) -> bool {
+ t as *const () == u as *const ()
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ providers.resolutions = |tcx, ()| &tcx.untracked_resolutions;
+ providers.resolver_for_lowering = |tcx, ()| &tcx.untracked_resolver_for_lowering;
+ providers.module_reexports =
+ |tcx, id| tcx.resolutions(()).reexport_map.get(&id).map(|v| &v[..]);
+ providers.crate_name = |tcx, id| {
+ assert_eq!(id, LOCAL_CRATE);
+ tcx.crate_name
+ };
+ providers.maybe_unused_trait_imports =
+ |tcx, ()| &tcx.resolutions(()).maybe_unused_trait_imports;
+ providers.maybe_unused_extern_crates =
+ |tcx, ()| &tcx.resolutions(()).maybe_unused_extern_crates[..];
+ providers.names_imported_by_glob_use = |tcx, id| {
+ tcx.arena.alloc(tcx.resolutions(()).glob_map.get(&id).cloned().unwrap_or_default())
+ };
+
+ providers.extern_mod_stmt_cnum =
+ |tcx, id| tcx.resolutions(()).extern_crate_map.get(&id).cloned();
+ providers.output_filenames = |tcx, ()| &tcx.output_filenames;
+ providers.features_query = |tcx, ()| tcx.sess.features_untracked();
+ providers.is_panic_runtime = |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ tcx.sess.contains_name(tcx.hir().krate_attrs(), sym::panic_runtime)
+ };
+ providers.is_compiler_builtins = |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ tcx.sess.contains_name(tcx.hir().krate_attrs(), sym::compiler_builtins)
+ };
+ providers.has_panic_handler = |tcx, cnum| {
+ assert_eq!(cnum, LOCAL_CRATE);
+ // We want to check if the panic handler was defined in this crate
+ tcx.lang_items().panic_impl().map_or(false, |did| did.is_local())
+ };
+}
diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs
new file mode 100644
index 000000000..dd2f43210
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/diagnostics.rs
@@ -0,0 +1,501 @@
+//! Diagnostics related methods for `Ty`.
+
+use std::ops::ControlFlow;
+
+use crate::ty::{
+ visit::TypeVisitable, Const, ConstKind, DefIdTree, ExistentialPredicate, InferConst, InferTy,
+ PolyTraitPredicate, Ty, TyCtxt, TypeSuperVisitable, TypeVisitor,
+};
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{Applicability, Diagnostic, DiagnosticArgValue, IntoDiagnosticArg};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::WherePredicate;
+use rustc_span::Span;
+use rustc_type_ir::sty::TyKind::*;
+
+impl<'tcx> IntoDiagnosticArg for Ty<'tcx> {
+ fn into_diagnostic_arg(self) -> DiagnosticArgValue<'static> {
+ format!("{}", self).into_diagnostic_arg()
+ }
+}
+
+impl<'tcx> Ty<'tcx> {
+ /// Similar to `Ty::is_primitive`, but also considers inferred numeric values to be primitive.
+ pub fn is_primitive_ty(self) -> bool {
+ matches!(
+ self.kind(),
+ Bool | Char
+ | Str
+ | Int(_)
+ | Uint(_)
+ | Float(_)
+ | Infer(
+ InferTy::IntVar(_)
+ | InferTy::FloatVar(_)
+ | InferTy::FreshIntTy(_)
+ | InferTy::FreshFloatTy(_)
+ )
+ )
+ }
+
+ /// Whether the type is succinctly representable as a type instead of just referred to with a
+ /// description in error messages. This is used in the main error message.
+ pub fn is_simple_ty(self) -> bool {
+ match self.kind() {
+ Bool
+ | Char
+ | Str
+ | Int(_)
+ | Uint(_)
+ | Float(_)
+ | Infer(
+ InferTy::IntVar(_)
+ | InferTy::FloatVar(_)
+ | InferTy::FreshIntTy(_)
+ | InferTy::FreshFloatTy(_),
+ ) => true,
+ Ref(_, x, _) | Array(x, _) | Slice(x) => x.peel_refs().is_simple_ty(),
+ Tuple(tys) if tys.is_empty() => true,
+ _ => false,
+ }
+ }
+
+ /// Whether the type is succinctly representable as a type instead of just referred to with a
+ /// description in error messages. This is used in the primary span label. Beyond what
+ /// `is_simple_ty` includes, it also accepts ADTs with no type arguments and references to
+ /// ADTs with no type arguments.
+ pub fn is_simple_text(self) -> bool {
+ match self.kind() {
+ Adt(_, substs) => substs.non_erasable_generics().next().is_none(),
+ Ref(_, ty, _) => ty.is_simple_text(),
+ _ => self.is_simple_ty(),
+ }
+ }
+}
+
+pub trait IsSuggestable<'tcx> {
+ /// Whether this makes sense to suggest in a diagnostic.
+ ///
+ /// We filter out certain types and constants since they don't provide
+ /// meaningful rendered suggestions when pretty-printed. We leave some
+ /// nonsense, such as region vars, since those render as `'_` and are
+ /// usually okay to reinterpret as elided lifetimes.
+ ///
+ /// Only if `infer_suggestable` is true, we consider type and const
+ /// inference variables to be suggestable.
+ fn is_suggestable(self, tcx: TyCtxt<'tcx>, infer_suggestable: bool) -> bool;
+}
+
+impl<'tcx, T> IsSuggestable<'tcx> for T
+where
+ T: TypeVisitable<'tcx>,
+{
+ fn is_suggestable(self, tcx: TyCtxt<'tcx>, infer_suggestable: bool) -> bool {
+ self.visit_with(&mut IsSuggestableVisitor { tcx, infer_suggestable }).is_continue()
+ }
+}
+
+pub fn suggest_arbitrary_trait_bound<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ generics: &hir::Generics<'_>,
+ err: &mut Diagnostic,
+ trait_pred: PolyTraitPredicate<'tcx>,
+) -> bool {
+ if !trait_pred.is_suggestable(tcx, false) {
+ return false;
+ }
+
+ let param_name = trait_pred.skip_binder().self_ty().to_string();
+ let constraint = trait_pred.print_modifiers_and_trait_path().to_string();
+ let param = generics.params.iter().find(|p| p.name.ident().as_str() == param_name);
+
+ // Skip, there is a param named Self
+ if param.is_some() && param_name == "Self" {
+ return false;
+ }
+
+ // Suggest a where clause bound for a non-type parameter.
+ err.span_suggestion_verbose(
+ generics.tail_span_for_predicate_suggestion(),
+ &format!(
+ "consider {} `where` clause, but there might be an alternative better way to express \
+ this requirement",
+ if generics.where_clause_span.is_empty() { "introducing a" } else { "extending the" },
+ ),
+ format!("{} {}: {}", generics.add_where_or_trailing_comma(), param_name, constraint),
+ Applicability::MaybeIncorrect,
+ );
+ true
+}
+
+#[derive(Debug)]
+enum SuggestChangingConstraintsMessage<'a> {
+ RestrictBoundFurther,
+ RestrictType { ty: &'a str },
+ RestrictTypeFurther { ty: &'a str },
+ RemovingQSized,
+}
+
+fn suggest_removing_unsized_bound(
+ tcx: TyCtxt<'_>,
+ generics: &hir::Generics<'_>,
+ suggestions: &mut Vec<(Span, String, SuggestChangingConstraintsMessage<'_>)>,
+ param: &hir::GenericParam<'_>,
+ def_id: Option<DefId>,
+) {
+ // See if there's a `?Sized` bound that can be removed to suggest that.
+ // First look at the `where` clause because we can have `where T: ?Sized`,
+ // then look at params.
+ let param_def_id = tcx.hir().local_def_id(param.hir_id);
+ for (where_pos, predicate) in generics.predicates.iter().enumerate() {
+ let WherePredicate::BoundPredicate(predicate) = predicate else {
+ continue;
+ };
+ if !predicate.is_param_bound(param_def_id.to_def_id()) {
+ continue;
+ };
+
+ for (pos, bound) in predicate.bounds.iter().enumerate() {
+ let hir::GenericBound::Trait(poly, hir::TraitBoundModifier::Maybe) = bound else {
+ continue;
+ };
+ if poly.trait_ref.trait_def_id() != def_id {
+ continue;
+ }
+ let sp = generics.span_for_bound_removal(where_pos, pos);
+ suggestions.push((
+ sp,
+ String::new(),
+ SuggestChangingConstraintsMessage::RemovingQSized,
+ ));
+ }
+ }
+}
+
+/// Suggest restricting a type param with a new bound.
+pub fn suggest_constraining_type_param(
+ tcx: TyCtxt<'_>,
+ generics: &hir::Generics<'_>,
+ err: &mut Diagnostic,
+ param_name: &str,
+ constraint: &str,
+ def_id: Option<DefId>,
+) -> bool {
+ suggest_constraining_type_params(
+ tcx,
+ generics,
+ err,
+ [(param_name, constraint, def_id)].into_iter(),
+ )
+}
+
+/// Suggest restricting a type param with a new bound.
+pub fn suggest_constraining_type_params<'a>(
+ tcx: TyCtxt<'_>,
+ generics: &hir::Generics<'_>,
+ err: &mut Diagnostic,
+ param_names_and_constraints: impl Iterator<Item = (&'a str, &'a str, Option<DefId>)>,
+) -> bool {
+ let mut grouped = FxHashMap::default();
+ param_names_and_constraints.for_each(|(param_name, constraint, def_id)| {
+ grouped.entry(param_name).or_insert(Vec::new()).push((constraint, def_id))
+ });
+
+ let mut applicability = Applicability::MachineApplicable;
+ let mut suggestions = Vec::new();
+
+ for (param_name, mut constraints) in grouped {
+ let param = generics.params.iter().find(|p| p.name.ident().as_str() == param_name);
+ let Some(param) = param else { return false };
+
+ {
+ let mut sized_constraints =
+ constraints.drain_filter(|(_, def_id)| *def_id == tcx.lang_items().sized_trait());
+ if let Some((constraint, def_id)) = sized_constraints.next() {
+ applicability = Applicability::MaybeIncorrect;
+
+ err.span_label(
+ param.span,
+ &format!("this type parameter needs to be `{}`", constraint),
+ );
+ suggest_removing_unsized_bound(tcx, generics, &mut suggestions, param, def_id);
+ }
+ }
+
+ if constraints.is_empty() {
+ continue;
+ }
+
+ let mut constraint = constraints.iter().map(|&(c, _)| c).collect::<Vec<_>>();
+ constraint.sort();
+ constraint.dedup();
+ let constraint = constraint.join(" + ");
+ let mut suggest_restrict = |span, bound_list_non_empty| {
+ suggestions.push((
+ span,
+ if bound_list_non_empty {
+ format!(" + {}", constraint)
+ } else {
+ format!(" {}", constraint)
+ },
+ SuggestChangingConstraintsMessage::RestrictBoundFurther,
+ ))
+ };
+
+ // When the type parameter has been provided bounds
+ //
+ // Message:
+ // fn foo<T>(t: T) where T: Foo { ... }
+ // ^^^^^^
+ // |
+ // help: consider further restricting this bound with `+ Bar`
+ //
+ // Suggestion:
+ // fn foo<T>(t: T) where T: Foo { ... }
+ // ^
+ // |
+ // replace with: ` + Bar`
+ //
+ // Or, if user has provided some bounds, suggest restricting them:
+ //
+ // fn foo<T: Foo>(t: T) { ... }
+ // ---
+ // |
+ // help: consider further restricting this bound with `+ Bar`
+ //
+ // Suggestion for tools in this case is:
+ //
+ // fn foo<T: Foo>(t: T) { ... }
+ // --
+ // |
+ // replace with: `T: Bar +`
+ let param_def_id = tcx.hir().local_def_id(param.hir_id);
+ if let Some(span) = generics.bounds_span_for_suggestions(param_def_id) {
+ suggest_restrict(span, true);
+ continue;
+ }
+
+ if generics.has_where_clause_predicates {
+ // This part is a bit tricky, because using the `where` clause user can
+ // provide zero, one or many bounds for the same type parameter, so we
+ // have following cases to consider:
+ //
+ // When the type parameter has been provided zero bounds
+ //
+ // Message:
+ // fn foo<X, Y>(x: X, y: Y) where Y: Foo { ... }
+ // - help: consider restricting this type parameter with `where X: Bar`
+ //
+ // Suggestion:
+ // fn foo<X, Y>(x: X, y: Y) where Y: Foo { ... }
+ // - insert: `, X: Bar`
+ suggestions.push((
+ generics.tail_span_for_predicate_suggestion(),
+ constraints
+ .iter()
+ .map(|&(constraint, _)| format!(", {}: {}", param_name, constraint))
+ .collect::<String>(),
+ SuggestChangingConstraintsMessage::RestrictTypeFurther { ty: param_name },
+ ));
+ continue;
+ }
+
+ // Additionally, there may be no `where` clause but the generic parameter has a default:
+ //
+ // Message:
+ // trait Foo<T=()> {... }
+ // - help: consider further restricting this type parameter with `where T: Zar`
+ //
+ // Suggestion:
+ // trait Foo<T=()> {... }
+ // - insert: `where T: Zar`
+ if matches!(param.kind, hir::GenericParamKind::Type { default: Some(_), .. }) {
+ // Suggest a bound, but there is no existing `where` clause *and* the type param has a
+ // default (`<T=Foo>`), so we suggest adding `where T: Bar`.
+ suggestions.push((
+ generics.tail_span_for_predicate_suggestion(),
+ format!(" where {}: {}", param_name, constraint),
+ SuggestChangingConstraintsMessage::RestrictTypeFurther { ty: param_name },
+ ));
+ continue;
+ }
+
+ // If user has provided a colon, don't suggest adding another:
+ //
+ // fn foo<T:>(t: T) { ... }
+ // - insert: consider restricting this type parameter with `T: Foo`
+ if let Some(colon_span) = param.colon_span {
+ suggestions.push((
+ colon_span.shrink_to_hi(),
+ format!(" {}", constraint),
+ SuggestChangingConstraintsMessage::RestrictType { ty: param_name },
+ ));
+ continue;
+ }
+
+ // If user hasn't provided any bounds, suggest adding a new one:
+ //
+ // fn foo<T>(t: T) { ... }
+ // - help: consider restricting this type parameter with `T: Foo`
+ suggestions.push((
+ param.span.shrink_to_hi(),
+ format!(": {}", constraint),
+ SuggestChangingConstraintsMessage::RestrictType { ty: param_name },
+ ));
+ }
+
+ if suggestions.len() == 1 {
+ let (span, suggestion, msg) = suggestions.pop().unwrap();
+
+ let s;
+ let msg = match msg {
+ SuggestChangingConstraintsMessage::RestrictBoundFurther => {
+ "consider further restricting this bound"
+ }
+ SuggestChangingConstraintsMessage::RestrictType { ty } => {
+ s = format!("consider restricting type parameter `{}`", ty);
+ &s
+ }
+ SuggestChangingConstraintsMessage::RestrictTypeFurther { ty } => {
+ s = format!("consider further restricting type parameter `{}`", ty);
+ &s
+ }
+ SuggestChangingConstraintsMessage::RemovingQSized => {
+ "consider removing the `?Sized` bound to make the type parameter `Sized`"
+ }
+ };
+
+ err.span_suggestion_verbose(span, msg, suggestion, applicability);
+ } else if suggestions.len() > 1 {
+ err.multipart_suggestion_verbose(
+ "consider restricting type parameters",
+ suggestions.into_iter().map(|(span, suggestion, _)| (span, suggestion)).collect(),
+ applicability,
+ );
+ }
+
+ true
+}
+
+/// Collect al types that have an implicit `'static` obligation that we could suggest `'_` for.
+pub struct TraitObjectVisitor<'tcx>(pub Vec<&'tcx hir::Ty<'tcx>>, pub crate::hir::map::Map<'tcx>);
+
+impl<'v> hir::intravisit::Visitor<'v> for TraitObjectVisitor<'v> {
+ fn visit_ty(&mut self, ty: &'v hir::Ty<'v>) {
+ match ty.kind {
+ hir::TyKind::TraitObject(
+ _,
+ hir::Lifetime {
+ name:
+ hir::LifetimeName::ImplicitObjectLifetimeDefault | hir::LifetimeName::Static,
+ ..
+ },
+ _,
+ ) => {
+ self.0.push(ty);
+ }
+ hir::TyKind::OpaqueDef(item_id, _) => {
+ self.0.push(ty);
+ let item = self.1.item(item_id);
+ hir::intravisit::walk_item(self, item);
+ }
+ _ => {}
+ }
+ hir::intravisit::walk_ty(self, ty);
+ }
+}
+
+/// Collect al types that have an implicit `'static` obligation that we could suggest `'_` for.
+pub struct StaticLifetimeVisitor<'tcx>(pub Vec<Span>, pub crate::hir::map::Map<'tcx>);
+
+impl<'v> hir::intravisit::Visitor<'v> for StaticLifetimeVisitor<'v> {
+ fn visit_lifetime(&mut self, lt: &'v hir::Lifetime) {
+ if let hir::LifetimeName::ImplicitObjectLifetimeDefault | hir::LifetimeName::Static =
+ lt.name
+ {
+ self.0.push(lt.span);
+ }
+ }
+}
+
+pub struct IsSuggestableVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ infer_suggestable: bool,
+}
+
+impl<'tcx> TypeVisitor<'tcx> for IsSuggestableVisitor<'tcx> {
+ type BreakTy = ();
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match t.kind() {
+ Infer(InferTy::TyVar(_)) if self.infer_suggestable => {}
+
+ FnDef(..)
+ | Closure(..)
+ | Infer(..)
+ | Generator(..)
+ | GeneratorWitness(..)
+ | Bound(_, _)
+ | Placeholder(_)
+ | Error(_) => {
+ return ControlFlow::Break(());
+ }
+
+ Opaque(did, _) => {
+ let parent = self.tcx.parent(*did);
+ if let hir::def::DefKind::TyAlias | hir::def::DefKind::AssocTy = self.tcx.def_kind(parent)
+ && let Opaque(parent_did, _) = self.tcx.type_of(parent).kind()
+ && parent_did == did
+ {
+ // Okay
+ } else {
+ return ControlFlow::Break(());
+ }
+ }
+
+ Dynamic(dty, _) => {
+ for pred in *dty {
+ match pred.skip_binder() {
+ ExistentialPredicate::Trait(_) | ExistentialPredicate::Projection(_) => {
+ // Okay
+ }
+ _ => return ControlFlow::Break(()),
+ }
+ }
+ }
+
+ Param(param) => {
+ // FIXME: It would be nice to make this not use string manipulation,
+ // but it's pretty hard to do this, since `ty::ParamTy` is missing
+ // sufficient info to determine if it is synthetic, and we don't
+ // always have a convenient way of getting `ty::Generics` at the call
+ // sites we invoke `IsSuggestable::is_suggestable`.
+ if param.name.as_str().starts_with("impl ") {
+ return ControlFlow::Break(());
+ }
+ }
+
+ _ => {}
+ }
+
+ t.super_visit_with(self)
+ }
+
+ fn visit_const(&mut self, c: Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match c.kind() {
+ ConstKind::Infer(InferConst::Var(_)) if self.infer_suggestable => {}
+
+ ConstKind::Infer(..)
+ | ConstKind::Bound(..)
+ | ConstKind::Placeholder(..)
+ | ConstKind::Error(..) => {
+ return ControlFlow::Break(());
+ }
+ _ => {}
+ }
+
+ c.super_visit_with(self)
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/erase_regions.rs b/compiler/rustc_middle/src/ty/erase_regions.rs
new file mode 100644
index 000000000..3226950e7
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/erase_regions.rs
@@ -0,0 +1,74 @@
+use crate::mir;
+use crate::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
+use crate::ty::visit::TypeVisitable;
+use crate::ty::{self, Ty, TyCtxt, TypeFlags};
+
+pub(super) fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers { erase_regions_ty, ..*providers };
+}
+
+fn erase_regions_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> Ty<'tcx> {
+ // N.B., use `super_fold_with` here. If we used `fold_with`, it
+ // could invoke the `erase_regions_ty` query recursively.
+ ty.super_fold_with(&mut RegionEraserVisitor { tcx })
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Returns an equivalent value with all free regions removed (note
+ /// that late-bound regions remain, because they are important for
+ /// subtyping, but they are anonymized and normalized as well)..
+ pub fn erase_regions<T>(self, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ // If there's nothing to erase avoid performing the query at all
+ if !value.has_type_flags(TypeFlags::HAS_RE_LATE_BOUND | TypeFlags::HAS_FREE_REGIONS) {
+ return value;
+ }
+ debug!("erase_regions({:?})", value);
+ let value1 = value.fold_with(&mut RegionEraserVisitor { tcx: self });
+ debug!("erase_regions = {:?}", value1);
+ value1
+ }
+}
+
+struct RegionEraserVisitor<'tcx> {
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> TypeFolder<'tcx> for RegionEraserVisitor<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ if ty.needs_infer() { ty.super_fold_with(self) } else { self.tcx.erase_regions_ty(ty) }
+ }
+
+ fn fold_binder<T>(&mut self, t: ty::Binder<'tcx, T>) -> ty::Binder<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let u = self.tcx.anonymize_bound_vars(t);
+ u.super_fold_with(self)
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ // because late-bound regions affect subtyping, we can't
+ // erase the bound/free distinction, but we can replace
+ // all free regions with 'erased.
+ //
+ // Note that we *CAN* replace early-bound regions -- the
+ // type system never "sees" those, they get substituted
+ // away. In codegen, they will always be erased to 'erased
+ // whenever a substitution occurs.
+ match *r {
+ ty::ReLateBound(..) => r,
+ _ => self.tcx.lifetimes.re_erased,
+ }
+ }
+
+ fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
+ c.super_fold_with(self)
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs
new file mode 100644
index 000000000..4b0bc3c11
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/error.rs
@@ -0,0 +1,965 @@
+use crate::traits::{ObligationCause, ObligationCauseCode};
+use crate::ty::diagnostics::suggest_constraining_type_param;
+use crate::ty::print::{FmtPrinter, Printer};
+use crate::ty::{self, BoundRegionKind, Region, Ty, TyCtxt};
+use rustc_errors::Applicability::{MachineApplicable, MaybeIncorrect};
+use rustc_errors::{pluralize, Diagnostic, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_span::symbol::{sym, Symbol};
+use rustc_span::{BytePos, Span};
+use rustc_target::spec::abi;
+
+use std::borrow::Cow;
+use std::fmt;
+
+#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable, TypeVisitable)]
+pub struct ExpectedFound<T> {
+ pub expected: T,
+ pub found: T,
+}
+
+impl<T> ExpectedFound<T> {
+ pub fn new(a_is_expected: bool, a: T, b: T) -> Self {
+ if a_is_expected {
+ ExpectedFound { expected: a, found: b }
+ } else {
+ ExpectedFound { expected: b, found: a }
+ }
+ }
+}
+
+// Data structures used in type unification
+#[derive(Clone, Debug, TypeFoldable, TypeVisitable)]
+pub enum TypeError<'tcx> {
+ Mismatch,
+ ConstnessMismatch(ExpectedFound<ty::BoundConstness>),
+ PolarityMismatch(ExpectedFound<ty::ImplPolarity>),
+ UnsafetyMismatch(ExpectedFound<hir::Unsafety>),
+ AbiMismatch(ExpectedFound<abi::Abi>),
+ Mutability,
+ ArgumentMutability(usize),
+ TupleSize(ExpectedFound<usize>),
+ FixedArraySize(ExpectedFound<u64>),
+ ArgCount,
+ FieldMisMatch(Symbol, Symbol),
+
+ RegionsDoesNotOutlive(Region<'tcx>, Region<'tcx>),
+ RegionsInsufficientlyPolymorphic(BoundRegionKind, Region<'tcx>),
+ RegionsOverlyPolymorphic(BoundRegionKind, Region<'tcx>),
+ RegionsPlaceholderMismatch,
+
+ Sorts(ExpectedFound<Ty<'tcx>>),
+ ArgumentSorts(ExpectedFound<Ty<'tcx>>, usize),
+ IntMismatch(ExpectedFound<ty::IntVarValue>),
+ FloatMismatch(ExpectedFound<ty::FloatTy>),
+ Traits(ExpectedFound<DefId>),
+ VariadicMismatch(ExpectedFound<bool>),
+
+ /// Instantiating a type variable with the given type would have
+ /// created a cycle (because it appears somewhere within that
+ /// type).
+ CyclicTy(Ty<'tcx>),
+ CyclicConst(ty::Const<'tcx>),
+ ProjectionMismatched(ExpectedFound<DefId>),
+ ExistentialMismatch(
+ ExpectedFound<&'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>>,
+ ),
+ ObjectUnsafeCoercion(DefId),
+ ConstMismatch(ExpectedFound<ty::Const<'tcx>>),
+
+ IntrinsicCast,
+ /// Safe `#[target_feature]` functions are not assignable to safe function pointers.
+ TargetFeatureCast(DefId),
+}
+
+/// Explains the source of a type err in a short, human readable way. This is meant to be placed
+/// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()`
+/// afterwards to present additional details, particularly when it comes to lifetime-related
+/// errors.
+impl<'tcx> fmt::Display for TypeError<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ use self::TypeError::*;
+ fn report_maybe_different(
+ f: &mut fmt::Formatter<'_>,
+ expected: &str,
+ found: &str,
+ ) -> fmt::Result {
+ // A naive approach to making sure that we're not reporting silly errors such as:
+ // (expected closure, found closure).
+ if expected == found {
+ write!(f, "expected {}, found a different {}", expected, found)
+ } else {
+ write!(f, "expected {}, found {}", expected, found)
+ }
+ }
+
+ let br_string = |br: ty::BoundRegionKind| match br {
+ ty::BrNamed(_, name) => format!(" {}", name),
+ _ => String::new(),
+ };
+
+ match *self {
+ CyclicTy(_) => write!(f, "cyclic type of infinite size"),
+ CyclicConst(_) => write!(f, "encountered a self-referencing constant"),
+ Mismatch => write!(f, "types differ"),
+ ConstnessMismatch(values) => {
+ write!(f, "expected {} bound, found {} bound", values.expected, values.found)
+ }
+ PolarityMismatch(values) => {
+ write!(f, "expected {} polarity, found {} polarity", values.expected, values.found)
+ }
+ UnsafetyMismatch(values) => {
+ write!(f, "expected {} fn, found {} fn", values.expected, values.found)
+ }
+ AbiMismatch(values) => {
+ write!(f, "expected {} fn, found {} fn", values.expected, values.found)
+ }
+ ArgumentMutability(_) | Mutability => write!(f, "types differ in mutability"),
+ TupleSize(values) => write!(
+ f,
+ "expected a tuple with {} element{}, found one with {} element{}",
+ values.expected,
+ pluralize!(values.expected),
+ values.found,
+ pluralize!(values.found)
+ ),
+ FixedArraySize(values) => write!(
+ f,
+ "expected an array with a fixed size of {} element{}, found one with {} element{}",
+ values.expected,
+ pluralize!(values.expected),
+ values.found,
+ pluralize!(values.found)
+ ),
+ ArgCount => write!(f, "incorrect number of function parameters"),
+ FieldMisMatch(adt, field) => write!(f, "field type mismatch: {}.{}", adt, field),
+ RegionsDoesNotOutlive(..) => write!(f, "lifetime mismatch"),
+ // Actually naming the region here is a bit confusing because context is lacking
+ RegionsInsufficientlyPolymorphic(..) => {
+ write!(f, "one type is more general than the other")
+ }
+ RegionsOverlyPolymorphic(br, _) => write!(
+ f,
+ "expected concrete lifetime, found bound lifetime parameter{}",
+ br_string(br)
+ ),
+ RegionsPlaceholderMismatch => write!(f, "one type is more general than the other"),
+ ArgumentSorts(values, _) | Sorts(values) => ty::tls::with(|tcx| {
+ report_maybe_different(
+ f,
+ &values.expected.sort_string(tcx),
+ &values.found.sort_string(tcx),
+ )
+ }),
+ Traits(values) => ty::tls::with(|tcx| {
+ report_maybe_different(
+ f,
+ &format!("trait `{}`", tcx.def_path_str(values.expected)),
+ &format!("trait `{}`", tcx.def_path_str(values.found)),
+ )
+ }),
+ IntMismatch(ref values) => {
+ let expected = match values.expected {
+ ty::IntVarValue::IntType(ty) => ty.name_str(),
+ ty::IntVarValue::UintType(ty) => ty.name_str(),
+ };
+ let found = match values.found {
+ ty::IntVarValue::IntType(ty) => ty.name_str(),
+ ty::IntVarValue::UintType(ty) => ty.name_str(),
+ };
+ write!(f, "expected `{}`, found `{}`", expected, found)
+ }
+ FloatMismatch(ref values) => {
+ write!(
+ f,
+ "expected `{}`, found `{}`",
+ values.expected.name_str(),
+ values.found.name_str()
+ )
+ }
+ VariadicMismatch(ref values) => write!(
+ f,
+ "expected {} fn, found {} function",
+ if values.expected { "variadic" } else { "non-variadic" },
+ if values.found { "variadic" } else { "non-variadic" }
+ ),
+ ProjectionMismatched(ref values) => ty::tls::with(|tcx| {
+ write!(
+ f,
+ "expected {}, found {}",
+ tcx.def_path_str(values.expected),
+ tcx.def_path_str(values.found)
+ )
+ }),
+ ExistentialMismatch(ref values) => report_maybe_different(
+ f,
+ &format!("trait `{}`", values.expected),
+ &format!("trait `{}`", values.found),
+ ),
+ ConstMismatch(ref values) => {
+ write!(f, "expected `{}`, found `{}`", values.expected, values.found)
+ }
+ IntrinsicCast => write!(f, "cannot coerce intrinsics to function pointers"),
+ TargetFeatureCast(_) => write!(
+ f,
+ "cannot coerce functions with `#[target_feature]` to safe function pointers"
+ ),
+ ObjectUnsafeCoercion(_) => write!(f, "coercion to object-unsafe trait object"),
+ }
+ }
+}
+
+impl<'tcx> TypeError<'tcx> {
+ pub fn must_include_note(&self) -> bool {
+ use self::TypeError::*;
+ match self {
+ CyclicTy(_) | CyclicConst(_) | UnsafetyMismatch(_) | ConstnessMismatch(_)
+ | PolarityMismatch(_) | Mismatch | AbiMismatch(_) | FixedArraySize(_)
+ | ArgumentSorts(..) | Sorts(_) | IntMismatch(_) | FloatMismatch(_)
+ | VariadicMismatch(_) | TargetFeatureCast(_) => false,
+
+ Mutability
+ | ArgumentMutability(_)
+ | TupleSize(_)
+ | ArgCount
+ | FieldMisMatch(..)
+ | RegionsDoesNotOutlive(..)
+ | RegionsInsufficientlyPolymorphic(..)
+ | RegionsOverlyPolymorphic(..)
+ | RegionsPlaceholderMismatch
+ | Traits(_)
+ | ProjectionMismatched(_)
+ | ExistentialMismatch(_)
+ | ConstMismatch(_)
+ | IntrinsicCast
+ | ObjectUnsafeCoercion(_) => true,
+ }
+ }
+}
+
+impl<'tcx> Ty<'tcx> {
+ pub fn sort_string(self, tcx: TyCtxt<'_>) -> Cow<'static, str> {
+ match *self.kind() {
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Float(_) | ty::Str | ty::Never => {
+ format!("`{}`", self).into()
+ }
+ ty::Tuple(ref tys) if tys.is_empty() => format!("`{}`", self).into(),
+
+ ty::Adt(def, _) => format!("{} `{}`", def.descr(), tcx.def_path_str(def.did())).into(),
+ ty::Foreign(def_id) => format!("extern type `{}`", tcx.def_path_str(def_id)).into(),
+ ty::Array(t, n) => {
+ if t.is_simple_ty() {
+ return format!("array `{}`", self).into();
+ }
+
+ let n = tcx.lift(n).unwrap();
+ if let ty::ConstKind::Value(v) = n.kind() {
+ if let Some(n) = v.try_to_machine_usize(tcx) {
+ return format!("array of {} element{}", n, pluralize!(n)).into();
+ }
+ }
+ "array".into()
+ }
+ ty::Slice(ty) if ty.is_simple_ty() => format!("slice `{}`", self).into(),
+ ty::Slice(_) => "slice".into(),
+ ty::RawPtr(_) => "*-ptr".into(),
+ ty::Ref(_, ty, mutbl) => {
+ let tymut = ty::TypeAndMut { ty, mutbl };
+ let tymut_string = tymut.to_string();
+ if tymut_string != "_"
+ && (ty.is_simple_text() || tymut_string.len() < "mutable reference".len())
+ {
+ format!("`&{}`", tymut_string).into()
+ } else {
+ // Unknown type name, it's long or has type arguments
+ match mutbl {
+ hir::Mutability::Mut => "mutable reference",
+ _ => "reference",
+ }
+ .into()
+ }
+ }
+ ty::FnDef(..) => "fn item".into(),
+ ty::FnPtr(_) => "fn pointer".into(),
+ ty::Dynamic(ref inner, ..) if let Some(principal) = inner.principal() => {
+ format!("trait object `dyn {}`", tcx.def_path_str(principal.def_id())).into()
+ }
+ ty::Dynamic(..) => "trait object".into(),
+ ty::Closure(..) => "closure".into(),
+ ty::Generator(def_id, ..) => tcx.generator_kind(def_id).unwrap().descr().into(),
+ ty::GeneratorWitness(..) => "generator witness".into(),
+ ty::Tuple(..) => "tuple".into(),
+ ty::Infer(ty::TyVar(_)) => "inferred type".into(),
+ ty::Infer(ty::IntVar(_)) => "integer".into(),
+ ty::Infer(ty::FloatVar(_)) => "floating-point number".into(),
+ ty::Placeholder(..) => "placeholder type".into(),
+ ty::Bound(..) => "bound type".into(),
+ ty::Infer(ty::FreshTy(_)) => "fresh type".into(),
+ ty::Infer(ty::FreshIntTy(_)) => "fresh integral type".into(),
+ ty::Infer(ty::FreshFloatTy(_)) => "fresh floating-point type".into(),
+ ty::Projection(_) => "associated type".into(),
+ ty::Param(p) => format!("type parameter `{}`", p).into(),
+ ty::Opaque(..) => "opaque type".into(),
+ ty::Error(_) => "type error".into(),
+ }
+ }
+
+ pub fn prefix_string(self, tcx: TyCtxt<'_>) -> Cow<'static, str> {
+ match *self.kind() {
+ ty::Infer(_)
+ | ty::Error(_)
+ | ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Never => "type".into(),
+ ty::Tuple(ref tys) if tys.is_empty() => "unit type".into(),
+ ty::Adt(def, _) => def.descr().into(),
+ ty::Foreign(_) => "extern type".into(),
+ ty::Array(..) => "array".into(),
+ ty::Slice(_) => "slice".into(),
+ ty::RawPtr(_) => "raw pointer".into(),
+ ty::Ref(.., mutbl) => match mutbl {
+ hir::Mutability::Mut => "mutable reference",
+ _ => "reference",
+ }
+ .into(),
+ ty::FnDef(..) => "fn item".into(),
+ ty::FnPtr(_) => "fn pointer".into(),
+ ty::Dynamic(..) => "trait object".into(),
+ ty::Closure(..) => "closure".into(),
+ ty::Generator(def_id, ..) => tcx.generator_kind(def_id).unwrap().descr().into(),
+ ty::GeneratorWitness(..) => "generator witness".into(),
+ ty::Tuple(..) => "tuple".into(),
+ ty::Placeholder(..) => "higher-ranked type".into(),
+ ty::Bound(..) => "bound type variable".into(),
+ ty::Projection(_) => "associated type".into(),
+ ty::Param(_) => "type parameter".into(),
+ ty::Opaque(..) => "opaque type".into(),
+ }
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ pub fn note_and_explain_type_err(
+ self,
+ diag: &mut Diagnostic,
+ err: &TypeError<'tcx>,
+ cause: &ObligationCause<'tcx>,
+ sp: Span,
+ body_owner_def_id: DefId,
+ ) {
+ use self::TypeError::*;
+ debug!("note_and_explain_type_err err={:?} cause={:?}", err, cause);
+ match err {
+ ArgumentSorts(values, _) | Sorts(values) => {
+ match (values.expected.kind(), values.found.kind()) {
+ (ty::Closure(..), ty::Closure(..)) => {
+ diag.note("no two closures, even if identical, have the same type");
+ diag.help("consider boxing your closure and/or using it as a trait object");
+ }
+ (ty::Opaque(..), ty::Opaque(..)) => {
+ // Issue #63167
+ diag.note("distinct uses of `impl Trait` result in different opaque types");
+ }
+ (ty::Float(_), ty::Infer(ty::IntVar(_)))
+ if let Ok(
+ // Issue #53280
+ snippet,
+ ) = self.sess.source_map().span_to_snippet(sp) =>
+ {
+ if snippet.chars().all(|c| c.is_digit(10) || c == '-' || c == '_') {
+ diag.span_suggestion(
+ sp,
+ "use a float literal",
+ format!("{}.0", snippet),
+ MachineApplicable,
+ );
+ }
+ }
+ (ty::Param(expected), ty::Param(found)) => {
+ let generics = self.generics_of(body_owner_def_id);
+ let e_span = self.def_span(generics.type_param(expected, self).def_id);
+ if !sp.contains(e_span) {
+ diag.span_label(e_span, "expected type parameter");
+ }
+ let f_span = self.def_span(generics.type_param(found, self).def_id);
+ if !sp.contains(f_span) {
+ diag.span_label(f_span, "found type parameter");
+ }
+ diag.note(
+ "a type parameter was expected, but a different one was found; \
+ you might be missing a type parameter or trait bound",
+ );
+ diag.note(
+ "for more information, visit \
+ https://doc.rust-lang.org/book/ch10-02-traits.html\
+ #traits-as-parameters",
+ );
+ }
+ (ty::Projection(_), ty::Projection(_)) => {
+ diag.note("an associated type was expected, but a different one was found");
+ }
+ (ty::Param(p), ty::Projection(proj)) | (ty::Projection(proj), ty::Param(p)) => {
+ let generics = self.generics_of(body_owner_def_id);
+ let p_span = self.def_span(generics.type_param(p, self).def_id);
+ if !sp.contains(p_span) {
+ diag.span_label(p_span, "this type parameter");
+ }
+ let hir = self.hir();
+ let mut note = true;
+ if let Some(generics) = generics
+ .type_param(p, self)
+ .def_id
+ .as_local()
+ .map(|id| hir.local_def_id_to_hir_id(id))
+ .and_then(|id| self.hir().find(self.hir().get_parent_node(id)))
+ .as_ref()
+ .and_then(|node| node.generics())
+ {
+ // Synthesize the associated type restriction `Add<Output = Expected>`.
+ // FIXME: extract this logic for use in other diagnostics.
+ let (trait_ref, assoc_substs) = proj.trait_ref_and_own_substs(self);
+ let path =
+ self.def_path_str_with_substs(trait_ref.def_id, trait_ref.substs);
+ let item_name = self.item_name(proj.item_def_id);
+ let item_args = self.format_generic_args(assoc_substs);
+
+ let path = if path.ends_with('>') {
+ format!(
+ "{}, {}{} = {}>",
+ &path[..path.len() - 1],
+ item_name,
+ item_args,
+ p
+ )
+ } else {
+ format!("{}<{}{} = {}>", path, item_name, item_args, p)
+ };
+ note = !suggest_constraining_type_param(
+ self,
+ generics,
+ diag,
+ &format!("{}", proj.self_ty()),
+ &path,
+ None,
+ );
+ }
+ if note {
+ diag.note("you might be missing a type parameter or trait bound");
+ }
+ }
+ (ty::Param(p), ty::Dynamic(..) | ty::Opaque(..))
+ | (ty::Dynamic(..) | ty::Opaque(..), ty::Param(p)) => {
+ let generics = self.generics_of(body_owner_def_id);
+ let p_span = self.def_span(generics.type_param(p, self).def_id);
+ if !sp.contains(p_span) {
+ diag.span_label(p_span, "this type parameter");
+ }
+ diag.help("type parameters must be constrained to match other types");
+ if self.sess.teach(&diag.get_code().unwrap()) {
+ diag.help(
+ "given a type parameter `T` and a method `foo`:
+```
+trait Trait<T> { fn foo(&self) -> T; }
+```
+the only ways to implement method `foo` are:
+- constrain `T` with an explicit type:
+```
+impl Trait<String> for X {
+ fn foo(&self) -> String { String::new() }
+}
+```
+- add a trait bound to `T` and call a method on that trait that returns `Self`:
+```
+impl<T: std::default::Default> Trait<T> for X {
+ fn foo(&self) -> T { <T as std::default::Default>::default() }
+}
+```
+- change `foo` to return an argument of type `T`:
+```
+impl<T> Trait<T> for X {
+ fn foo(&self, x: T) -> T { x }
+}
+```",
+ );
+ }
+ diag.note(
+ "for more information, visit \
+ https://doc.rust-lang.org/book/ch10-02-traits.html\
+ #traits-as-parameters",
+ );
+ }
+ (ty::Param(p), ty::Closure(..) | ty::Generator(..)) => {
+ let generics = self.generics_of(body_owner_def_id);
+ let p_span = self.def_span(generics.type_param(p, self).def_id);
+ if !sp.contains(p_span) {
+ diag.span_label(p_span, "this type parameter");
+ }
+ diag.help(&format!(
+ "every closure has a distinct type and so could not always match the \
+ caller-chosen type of parameter `{}`",
+ p
+ ));
+ }
+ (ty::Param(p), _) | (_, ty::Param(p)) => {
+ let generics = self.generics_of(body_owner_def_id);
+ let p_span = self.def_span(generics.type_param(p, self).def_id);
+ if !sp.contains(p_span) {
+ diag.span_label(p_span, "this type parameter");
+ }
+ }
+ (ty::Projection(proj_ty), _) => {
+ self.expected_projection(
+ diag,
+ proj_ty,
+ values,
+ body_owner_def_id,
+ cause.code(),
+ );
+ }
+ (_, ty::Projection(proj_ty)) => {
+ let msg = format!(
+ "consider constraining the associated type `{}` to `{}`",
+ values.found, values.expected,
+ );
+ if !(self.suggest_constraining_opaque_associated_type(
+ diag,
+ &msg,
+ proj_ty,
+ values.expected,
+ ) || self.suggest_constraint(
+ diag,
+ &msg,
+ body_owner_def_id,
+ proj_ty,
+ values.expected,
+ )) {
+ diag.help(&msg);
+ diag.note(
+ "for more information, visit \
+ https://doc.rust-lang.org/book/ch19-03-advanced-traits.html",
+ );
+ }
+ }
+ _ => {}
+ }
+ debug!(
+ "note_and_explain_type_err expected={:?} ({:?}) found={:?} ({:?})",
+ values.expected,
+ values.expected.kind(),
+ values.found,
+ values.found.kind(),
+ );
+ }
+ CyclicTy(ty) => {
+ // Watch out for various cases of cyclic types and try to explain.
+ if ty.is_closure() || ty.is_generator() {
+ diag.note(
+ "closures cannot capture themselves or take themselves as argument;\n\
+ this error may be the result of a recent compiler bug-fix,\n\
+ see issue #46062 <https://github.com/rust-lang/rust/issues/46062>\n\
+ for more information",
+ );
+ }
+ }
+ TargetFeatureCast(def_id) => {
+ let target_spans =
+ self.get_attrs(*def_id, sym::target_feature).map(|attr| attr.span);
+ diag.note(
+ "functions with `#[target_feature]` can only be coerced to `unsafe` function pointers"
+ );
+ diag.span_labels(target_spans, "`#[target_feature]` added here");
+ }
+ _ => {}
+ }
+ }
+
+ fn suggest_constraint(
+ self,
+ diag: &mut Diagnostic,
+ msg: &str,
+ body_owner_def_id: DefId,
+ proj_ty: &ty::ProjectionTy<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> bool {
+ let assoc = self.associated_item(proj_ty.item_def_id);
+ let (trait_ref, assoc_substs) = proj_ty.trait_ref_and_own_substs(self);
+ if let Some(item) = self.hir().get_if_local(body_owner_def_id) {
+ if let Some(hir_generics) = item.generics() {
+ // Get the `DefId` for the type parameter corresponding to `A` in `<A as T>::Foo`.
+ // This will also work for `impl Trait`.
+ let def_id = if let ty::Param(param_ty) = proj_ty.self_ty().kind() {
+ let generics = self.generics_of(body_owner_def_id);
+ generics.type_param(param_ty, self).def_id
+ } else {
+ return false;
+ };
+ let Some(def_id) = def_id.as_local() else {
+ return false;
+ };
+
+ // First look in the `where` clause, as this might be
+ // `fn foo<T>(x: T) where T: Trait`.
+ for pred in hir_generics.bounds_for_param(def_id) {
+ if self.constrain_generic_bound_associated_type_structured_suggestion(
+ diag,
+ &trait_ref,
+ pred.bounds,
+ &assoc,
+ assoc_substs,
+ ty,
+ msg,
+ false,
+ ) {
+ return true;
+ }
+ }
+ }
+ }
+ false
+ }
+
+ /// An associated type was expected and a different type was found.
+ ///
+ /// We perform a few different checks to see what we can suggest:
+ ///
+ /// - In the current item, look for associated functions that return the expected type and
+ /// suggest calling them. (Not a structured suggestion.)
+ /// - If any of the item's generic bounds can be constrained, we suggest constraining the
+ /// associated type to the found type.
+ /// - If the associated type has a default type and was expected inside of a `trait`, we
+ /// mention that this is disallowed.
+ /// - If all other things fail, and the error is not because of a mismatch between the `trait`
+ /// and the `impl`, we provide a generic `help` to constrain the assoc type or call an assoc
+ /// fn that returns the type.
+ fn expected_projection(
+ self,
+ diag: &mut Diagnostic,
+ proj_ty: &ty::ProjectionTy<'tcx>,
+ values: &ExpectedFound<Ty<'tcx>>,
+ body_owner_def_id: DefId,
+ cause_code: &ObligationCauseCode<'_>,
+ ) {
+ let msg = format!(
+ "consider constraining the associated type `{}` to `{}`",
+ values.expected, values.found
+ );
+ let body_owner = self.hir().get_if_local(body_owner_def_id);
+ let current_method_ident = body_owner.and_then(|n| n.ident()).map(|i| i.name);
+
+ // We don't want to suggest calling an assoc fn in a scope where that isn't feasible.
+ let callable_scope = matches!(
+ body_owner,
+ Some(
+ hir::Node::Item(hir::Item { kind: hir::ItemKind::Fn(..), .. })
+ | hir::Node::TraitItem(hir::TraitItem { kind: hir::TraitItemKind::Fn(..), .. })
+ | hir::Node::ImplItem(hir::ImplItem { kind: hir::ImplItemKind::Fn(..), .. }),
+ )
+ );
+ let impl_comparison =
+ matches!(cause_code, ObligationCauseCode::CompareImplItemObligation { .. });
+ let assoc = self.associated_item(proj_ty.item_def_id);
+ if !callable_scope || impl_comparison {
+ // We do not want to suggest calling functions when the reason of the
+ // type error is a comparison of an `impl` with its `trait` or when the
+ // scope is outside of a `Body`.
+ } else {
+ // If we find a suitable associated function that returns the expected type, we don't
+ // want the more general suggestion later in this method about "consider constraining
+ // the associated type or calling a method that returns the associated type".
+ let point_at_assoc_fn = self.point_at_methods_that_satisfy_associated_type(
+ diag,
+ assoc.container_id(self),
+ current_method_ident,
+ proj_ty.item_def_id,
+ values.expected,
+ );
+ // Possibly suggest constraining the associated type to conform to the
+ // found type.
+ if self.suggest_constraint(diag, &msg, body_owner_def_id, proj_ty, values.found)
+ || point_at_assoc_fn
+ {
+ return;
+ }
+ }
+
+ self.suggest_constraining_opaque_associated_type(diag, &msg, proj_ty, values.found);
+
+ if self.point_at_associated_type(diag, body_owner_def_id, values.found) {
+ return;
+ }
+
+ if !impl_comparison {
+ // Generic suggestion when we can't be more specific.
+ if callable_scope {
+ diag.help(&format!(
+ "{} or calling a method that returns `{}`",
+ msg, values.expected
+ ));
+ } else {
+ diag.help(&msg);
+ }
+ diag.note(
+ "for more information, visit \
+ https://doc.rust-lang.org/book/ch19-03-advanced-traits.html",
+ );
+ }
+ if self.sess.teach(&diag.get_code().unwrap()) {
+ diag.help(
+ "given an associated type `T` and a method `foo`:
+```
+trait Trait {
+type T;
+fn foo(&self) -> Self::T;
+}
+```
+the only way of implementing method `foo` is to constrain `T` with an explicit associated type:
+```
+impl Trait for X {
+type T = String;
+fn foo(&self) -> Self::T { String::new() }
+}
+```",
+ );
+ }
+ }
+
+ /// When the expected `impl Trait` is not defined in the current item, it will come from
+ /// a return type. This can occur when dealing with `TryStream` (#71035).
+ fn suggest_constraining_opaque_associated_type(
+ self,
+ diag: &mut Diagnostic,
+ msg: &str,
+ proj_ty: &ty::ProjectionTy<'tcx>,
+ ty: Ty<'tcx>,
+ ) -> bool {
+ let assoc = self.associated_item(proj_ty.item_def_id);
+ if let ty::Opaque(def_id, _) = *proj_ty.self_ty().kind() {
+ let opaque_local_def_id = def_id.as_local();
+ let opaque_hir_ty = if let Some(opaque_local_def_id) = opaque_local_def_id {
+ match &self.hir().expect_item(opaque_local_def_id).kind {
+ hir::ItemKind::OpaqueTy(opaque_hir_ty) => opaque_hir_ty,
+ _ => bug!("The HirId comes from a `ty::Opaque`"),
+ }
+ } else {
+ return false;
+ };
+
+ let (trait_ref, assoc_substs) = proj_ty.trait_ref_and_own_substs(self);
+
+ self.constrain_generic_bound_associated_type_structured_suggestion(
+ diag,
+ &trait_ref,
+ opaque_hir_ty.bounds,
+ assoc,
+ assoc_substs,
+ ty,
+ msg,
+ true,
+ )
+ } else {
+ false
+ }
+ }
+
+ fn point_at_methods_that_satisfy_associated_type(
+ self,
+ diag: &mut Diagnostic,
+ assoc_container_id: DefId,
+ current_method_ident: Option<Symbol>,
+ proj_ty_item_def_id: DefId,
+ expected: Ty<'tcx>,
+ ) -> bool {
+ let items = self.associated_items(assoc_container_id);
+ // Find all the methods in the trait that could be called to construct the
+ // expected associated type.
+ // FIXME: consider suggesting the use of associated `const`s.
+ let methods: Vec<(Span, String)> = items
+ .items
+ .iter()
+ .filter(|(name, item)| {
+ ty::AssocKind::Fn == item.kind && Some(**name) != current_method_ident
+ })
+ .filter_map(|(_, item)| {
+ let method = self.fn_sig(item.def_id);
+ match *method.output().skip_binder().kind() {
+ ty::Projection(ty::ProjectionTy { item_def_id, .. })
+ if item_def_id == proj_ty_item_def_id =>
+ {
+ Some((
+ self.def_span(item.def_id),
+ format!("consider calling `{}`", self.def_path_str(item.def_id)),
+ ))
+ }
+ _ => None,
+ }
+ })
+ .collect();
+ if !methods.is_empty() {
+ // Use a single `help:` to show all the methods in the trait that can
+ // be used to construct the expected associated type.
+ let mut span: MultiSpan =
+ methods.iter().map(|(sp, _)| *sp).collect::<Vec<Span>>().into();
+ let msg = format!(
+ "{some} method{s} {are} available that return{r} `{ty}`",
+ some = if methods.len() == 1 { "a" } else { "some" },
+ s = pluralize!(methods.len()),
+ are = pluralize!("is", methods.len()),
+ r = if methods.len() == 1 { "s" } else { "" },
+ ty = expected
+ );
+ for (sp, label) in methods.into_iter() {
+ span.push_span_label(sp, label);
+ }
+ diag.span_help(span, &msg);
+ return true;
+ }
+ false
+ }
+
+ fn point_at_associated_type(
+ self,
+ diag: &mut Diagnostic,
+ body_owner_def_id: DefId,
+ found: Ty<'tcx>,
+ ) -> bool {
+ let Some(hir_id) = body_owner_def_id.as_local() else {
+ return false;
+ };
+ let hir_id = self.hir().local_def_id_to_hir_id(hir_id);
+ // When `body_owner` is an `impl` or `trait` item, look in its associated types for
+ // `expected` and point at it.
+ let parent_id = self.hir().get_parent_item(hir_id);
+ let item = self.hir().find_by_def_id(parent_id);
+ debug!("expected_projection parent item {:?}", item);
+ match item {
+ Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Trait(.., items), .. })) => {
+ // FIXME: account for `#![feature(specialization)]`
+ for item in &items[..] {
+ match item.kind {
+ hir::AssocItemKind::Type => {
+ // FIXME: account for returning some type in a trait fn impl that has
+ // an assoc type as a return type (#72076).
+ if let hir::Defaultness::Default { has_value: true } =
+ self.impl_defaultness(item.id.def_id)
+ {
+ if self.type_of(item.id.def_id) == found {
+ diag.span_label(
+ item.span,
+ "associated type defaults can't be assumed inside the \
+ trait defining them",
+ );
+ return true;
+ }
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+ Some(hir::Node::Item(hir::Item {
+ kind: hir::ItemKind::Impl(hir::Impl { items, .. }),
+ ..
+ })) => {
+ for item in &items[..] {
+ if let hir::AssocItemKind::Type = item.kind {
+ if self.type_of(item.id.def_id) == found {
+ diag.span_label(item.span, "expected this associated type");
+ return true;
+ }
+ }
+ }
+ }
+ _ => {}
+ }
+ false
+ }
+
+ /// Given a slice of `hir::GenericBound`s, if any of them corresponds to the `trait_ref`
+ /// requirement, provide a structured suggestion to constrain it to a given type `ty`.
+ ///
+ /// `is_bound_surely_present` indicates whether we know the bound we're looking for is
+ /// inside `bounds`. If that's the case then we can consider `bounds` containing only one
+ /// trait bound as the one we're looking for. This can help in cases where the associated
+ /// type is defined on a supertrait of the one present in the bounds.
+ fn constrain_generic_bound_associated_type_structured_suggestion(
+ self,
+ diag: &mut Diagnostic,
+ trait_ref: &ty::TraitRef<'tcx>,
+ bounds: hir::GenericBounds<'_>,
+ assoc: &ty::AssocItem,
+ assoc_substs: &[ty::GenericArg<'tcx>],
+ ty: Ty<'tcx>,
+ msg: &str,
+ is_bound_surely_present: bool,
+ ) -> bool {
+ // FIXME: we would want to call `resolve_vars_if_possible` on `ty` before suggesting.
+
+ let trait_bounds = bounds.iter().filter_map(|bound| match bound {
+ hir::GenericBound::Trait(ptr, hir::TraitBoundModifier::None) => Some(ptr),
+ _ => None,
+ });
+
+ let matching_trait_bounds = trait_bounds
+ .clone()
+ .filter(|ptr| ptr.trait_ref.trait_def_id() == Some(trait_ref.def_id))
+ .collect::<Vec<_>>();
+
+ let span = match &matching_trait_bounds[..] {
+ &[ptr] => ptr.span,
+ &[] if is_bound_surely_present => match &trait_bounds.collect::<Vec<_>>()[..] {
+ &[ptr] => ptr.span,
+ _ => return false,
+ },
+ _ => return false,
+ };
+
+ self.constrain_associated_type_structured_suggestion(
+ diag,
+ span,
+ assoc,
+ assoc_substs,
+ ty,
+ msg,
+ )
+ }
+
+ /// Given a span corresponding to a bound, provide a structured suggestion to set an
+ /// associated type to a given type `ty`.
+ fn constrain_associated_type_structured_suggestion(
+ self,
+ diag: &mut Diagnostic,
+ span: Span,
+ assoc: &ty::AssocItem,
+ assoc_substs: &[ty::GenericArg<'tcx>],
+ ty: Ty<'tcx>,
+ msg: &str,
+ ) -> bool {
+ if let Ok(has_params) =
+ self.sess.source_map().span_to_snippet(span).map(|snippet| snippet.ends_with('>'))
+ {
+ let (span, sugg) = if has_params {
+ let pos = span.hi() - BytePos(1);
+ let span = Span::new(pos, pos, span.ctxt(), span.parent());
+ (span, format!(", {} = {}", assoc.ident(self), ty))
+ } else {
+ let item_args = self.format_generic_args(assoc_substs);
+ (span.shrink_to_hi(), format!("<{}{} = {}>", assoc.ident(self), item_args, ty))
+ };
+ diag.span_suggestion_verbose(span, msg, sugg, MaybeIncorrect);
+ return true;
+ }
+ false
+ }
+
+ fn format_generic_args(self, args: &[ty::GenericArg<'tcx>]) -> String {
+ FmtPrinter::new(self, hir::def::Namespace::TypeNS)
+ .path_generic_args(Ok, args)
+ .expect("could not write to `String`.")
+ .into_buffer()
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/fast_reject.rs b/compiler/rustc_middle/src/ty/fast_reject.rs
new file mode 100644
index 000000000..8d019a3ba
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/fast_reject.rs
@@ -0,0 +1,405 @@
+use crate::mir::Mutability;
+use crate::ty::subst::GenericArgKind;
+use crate::ty::{self, Ty, TyCtxt, TypeVisitable};
+use rustc_hir::def_id::DefId;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::iter;
+
+use self::SimplifiedTypeGen::*;
+
+pub type SimplifiedType = SimplifiedTypeGen<DefId>;
+
+/// See `simplify_type`
+///
+/// Note that we keep this type generic over the type of identifier it uses
+/// because we sometimes need to use SimplifiedTypeGen values as stable sorting
+/// keys (in which case we use a DefPathHash as id-type) but in the general case
+/// the non-stable but fast to construct DefId-version is the better choice.
+#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable)]
+pub enum SimplifiedTypeGen<D>
+where
+ D: Copy + Debug + Eq,
+{
+ BoolSimplifiedType,
+ CharSimplifiedType,
+ IntSimplifiedType(ty::IntTy),
+ UintSimplifiedType(ty::UintTy),
+ FloatSimplifiedType(ty::FloatTy),
+ AdtSimplifiedType(D),
+ ForeignSimplifiedType(D),
+ StrSimplifiedType,
+ ArraySimplifiedType,
+ SliceSimplifiedType,
+ RefSimplifiedType(Mutability),
+ PtrSimplifiedType(Mutability),
+ NeverSimplifiedType,
+ TupleSimplifiedType(usize),
+ /// A trait object, all of whose components are markers
+ /// (e.g., `dyn Send + Sync`).
+ MarkerTraitObjectSimplifiedType,
+ TraitSimplifiedType(D),
+ ClosureSimplifiedType(D),
+ GeneratorSimplifiedType(D),
+ GeneratorWitnessSimplifiedType(usize),
+ OpaqueSimplifiedType(D),
+ FunctionSimplifiedType(usize),
+ PlaceholderSimplifiedType,
+}
+
+/// Generic parameters are pretty much just bound variables, e.g.
+/// the type of `fn foo<'a, T>(x: &'a T) -> u32 { ... }` can be thought of as
+/// `for<'a, T> fn(&'a T) -> u32`.
+///
+/// Typecheck of `foo` has to succeed for all possible generic arguments, so
+/// during typeck, we have to treat its generic parameters as if they
+/// were placeholders.
+///
+/// But when calling `foo` we only have to provide a specific generic argument.
+/// In that case the generic parameters are instantiated with inference variables.
+/// As we use `simplify_type` before that instantiation happens, we just treat
+/// generic parameters as if they were inference variables in that case.
+#[derive(PartialEq, Eq, Debug, Clone, Copy)]
+pub enum TreatParams {
+ /// Treat parameters as placeholders in the given environment.
+ ///
+ /// Note that this also causes us to treat projections as if they were
+ /// placeholders. This is only correct if the given projection cannot
+ /// be normalized in the current context. Even if normalization fails,
+ /// it may still succeed later if the projection contains any inference
+ /// variables.
+ AsPlaceholder,
+ AsInfer,
+}
+
+/// Tries to simplify a type by only returning the outermost injective¹ layer, if one exists.
+///
+/// **This function should only be used if you need to store or retrieve the type from some
+/// hashmap. If you want to quickly decide whether two types may unify, use the [DeepRejectCtxt]
+/// instead.**
+///
+/// The idea is to get something simple that we can use to quickly decide if two types could unify,
+/// for example during method lookup. If this function returns `Some(x)` it can only unify with
+/// types for which this method returns either `Some(x)` as well or `None`.
+///
+/// A special case here are parameters and projections, which are only injective
+/// if they are treated as placeholders.
+///
+/// For example when storing impls based on their simplified self type, we treat
+/// generic parameters as if they were inference variables. We must not simplify them here,
+/// as they can unify with any other type.
+///
+/// With projections we have to be even more careful, as treating them as placeholders
+/// is only correct if they are fully normalized.
+///
+/// ¹ meaning that if the outermost layers are different, then the whole types are also different.
+pub fn simplify_type<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ treat_params: TreatParams,
+) -> Option<SimplifiedType> {
+ match *ty.kind() {
+ ty::Bool => Some(BoolSimplifiedType),
+ ty::Char => Some(CharSimplifiedType),
+ ty::Int(int_type) => Some(IntSimplifiedType(int_type)),
+ ty::Uint(uint_type) => Some(UintSimplifiedType(uint_type)),
+ ty::Float(float_type) => Some(FloatSimplifiedType(float_type)),
+ ty::Adt(def, _) => Some(AdtSimplifiedType(def.did())),
+ ty::Str => Some(StrSimplifiedType),
+ ty::Array(..) => Some(ArraySimplifiedType),
+ ty::Slice(..) => Some(SliceSimplifiedType),
+ ty::RawPtr(ptr) => Some(PtrSimplifiedType(ptr.mutbl)),
+ ty::Dynamic(trait_info, ..) => match trait_info.principal_def_id() {
+ Some(principal_def_id) if !tcx.trait_is_auto(principal_def_id) => {
+ Some(TraitSimplifiedType(principal_def_id))
+ }
+ _ => Some(MarkerTraitObjectSimplifiedType),
+ },
+ ty::Ref(_, _, mutbl) => Some(RefSimplifiedType(mutbl)),
+ ty::FnDef(def_id, _) | ty::Closure(def_id, _) => Some(ClosureSimplifiedType(def_id)),
+ ty::Generator(def_id, _, _) => Some(GeneratorSimplifiedType(def_id)),
+ ty::GeneratorWitness(tys) => Some(GeneratorWitnessSimplifiedType(tys.skip_binder().len())),
+ ty::Never => Some(NeverSimplifiedType),
+ ty::Tuple(tys) => Some(TupleSimplifiedType(tys.len())),
+ ty::FnPtr(f) => Some(FunctionSimplifiedType(f.skip_binder().inputs().len())),
+ ty::Placeholder(..) => Some(PlaceholderSimplifiedType),
+ ty::Param(_) => match treat_params {
+ TreatParams::AsPlaceholder => Some(PlaceholderSimplifiedType),
+ TreatParams::AsInfer => None,
+ },
+ ty::Projection(_) => match treat_params {
+ // When treating `ty::Param` as a placeholder, projections also
+ // don't unify with anything else as long as they are fully normalized.
+ //
+ // We will have to be careful with lazy normalization here.
+ TreatParams::AsPlaceholder if !ty.has_infer_types_or_consts() => {
+ debug!("treating `{}` as a placeholder", ty);
+ Some(PlaceholderSimplifiedType)
+ }
+ TreatParams::AsPlaceholder | TreatParams::AsInfer => None,
+ },
+ ty::Opaque(def_id, _) => Some(OpaqueSimplifiedType(def_id)),
+ ty::Foreign(def_id) => Some(ForeignSimplifiedType(def_id)),
+ ty::Bound(..) | ty::Infer(_) | ty::Error(_) => None,
+ }
+}
+
+impl<D: Copy + Debug + Eq> SimplifiedTypeGen<D> {
+ pub fn def(self) -> Option<D> {
+ match self {
+ AdtSimplifiedType(d)
+ | ForeignSimplifiedType(d)
+ | TraitSimplifiedType(d)
+ | ClosureSimplifiedType(d)
+ | GeneratorSimplifiedType(d)
+ | OpaqueSimplifiedType(d) => Some(d),
+ _ => None,
+ }
+ }
+
+ pub fn map_def<U, F>(self, map: F) -> SimplifiedTypeGen<U>
+ where
+ F: Fn(D) -> U,
+ U: Copy + Debug + Eq,
+ {
+ match self {
+ BoolSimplifiedType => BoolSimplifiedType,
+ CharSimplifiedType => CharSimplifiedType,
+ IntSimplifiedType(t) => IntSimplifiedType(t),
+ UintSimplifiedType(t) => UintSimplifiedType(t),
+ FloatSimplifiedType(t) => FloatSimplifiedType(t),
+ AdtSimplifiedType(d) => AdtSimplifiedType(map(d)),
+ ForeignSimplifiedType(d) => ForeignSimplifiedType(map(d)),
+ StrSimplifiedType => StrSimplifiedType,
+ ArraySimplifiedType => ArraySimplifiedType,
+ SliceSimplifiedType => SliceSimplifiedType,
+ RefSimplifiedType(m) => RefSimplifiedType(m),
+ PtrSimplifiedType(m) => PtrSimplifiedType(m),
+ NeverSimplifiedType => NeverSimplifiedType,
+ MarkerTraitObjectSimplifiedType => MarkerTraitObjectSimplifiedType,
+ TupleSimplifiedType(n) => TupleSimplifiedType(n),
+ TraitSimplifiedType(d) => TraitSimplifiedType(map(d)),
+ ClosureSimplifiedType(d) => ClosureSimplifiedType(map(d)),
+ GeneratorSimplifiedType(d) => GeneratorSimplifiedType(map(d)),
+ GeneratorWitnessSimplifiedType(n) => GeneratorWitnessSimplifiedType(n),
+ OpaqueSimplifiedType(d) => OpaqueSimplifiedType(map(d)),
+ FunctionSimplifiedType(n) => FunctionSimplifiedType(n),
+ PlaceholderSimplifiedType => PlaceholderSimplifiedType,
+ }
+ }
+}
+
+/// Given generic arguments from an obligation and an impl,
+/// could these two be unified after replacing parameters in the
+/// the impl with inference variables.
+///
+/// For obligations, parameters won't be replaced by inference
+/// variables and only unify with themselves. We treat them
+/// the same way we treat placeholders.
+///
+/// We also use this function during coherence. For coherence the
+/// impls only have to overlap for some value, so we treat parameters
+/// on both sides like inference variables. This behavior is toggled
+/// using the `treat_obligation_params` field.
+#[derive(Debug, Clone, Copy)]
+pub struct DeepRejectCtxt {
+ pub treat_obligation_params: TreatParams,
+}
+
+impl DeepRejectCtxt {
+ pub fn generic_args_may_unify<'tcx>(
+ self,
+ obligation_arg: ty::GenericArg<'tcx>,
+ impl_arg: ty::GenericArg<'tcx>,
+ ) -> bool {
+ match (obligation_arg.unpack(), impl_arg.unpack()) {
+ // We don't fast reject based on regions for now.
+ (GenericArgKind::Lifetime(_), GenericArgKind::Lifetime(_)) => true,
+ (GenericArgKind::Type(obl), GenericArgKind::Type(imp)) => {
+ self.types_may_unify(obl, imp)
+ }
+ (GenericArgKind::Const(obl), GenericArgKind::Const(imp)) => {
+ self.consts_may_unify(obl, imp)
+ }
+ _ => bug!("kind mismatch: {obligation_arg} {impl_arg}"),
+ }
+ }
+
+ pub fn types_may_unify<'tcx>(self, obligation_ty: Ty<'tcx>, impl_ty: Ty<'tcx>) -> bool {
+ match impl_ty.kind() {
+ // Start by checking whether the type in the impl may unify with
+ // pretty much everything. Just return `true` in that case.
+ ty::Param(_) | ty::Projection(_) | ty::Error(_) => return true,
+ // These types only unify with inference variables or their own
+ // variant.
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Adt(..)
+ | ty::Str
+ | ty::Array(..)
+ | ty::Slice(..)
+ | ty::RawPtr(..)
+ | ty::Dynamic(..)
+ | ty::Ref(..)
+ | ty::Never
+ | ty::Tuple(..)
+ | ty::FnPtr(..)
+ | ty::Foreign(..)
+ | ty::Opaque(..) => {}
+ ty::FnDef(..)
+ | ty::Closure(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Placeholder(..)
+ | ty::Bound(..)
+ | ty::Infer(_) => bug!("unexpected impl_ty: {impl_ty}"),
+ }
+
+ let k = impl_ty.kind();
+ match *obligation_ty.kind() {
+ // Purely rigid types, use structural equivalence.
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Never
+ | ty::Foreign(_) => obligation_ty == impl_ty,
+ ty::Ref(_, obl_ty, obl_mutbl) => match k {
+ &ty::Ref(_, impl_ty, impl_mutbl) => {
+ obl_mutbl == impl_mutbl && self.types_may_unify(obl_ty, impl_ty)
+ }
+ _ => false,
+ },
+ ty::Adt(obl_def, obl_substs) => match k {
+ &ty::Adt(impl_def, impl_substs) => {
+ obl_def == impl_def
+ && iter::zip(obl_substs, impl_substs)
+ .all(|(obl, imp)| self.generic_args_may_unify(obl, imp))
+ }
+ _ => false,
+ },
+ ty::Slice(obl_ty) => {
+ matches!(k, &ty::Slice(impl_ty) if self.types_may_unify(obl_ty, impl_ty))
+ }
+ ty::Array(obl_ty, obl_len) => match k {
+ &ty::Array(impl_ty, impl_len) => {
+ self.types_may_unify(obl_ty, impl_ty)
+ && self.consts_may_unify(obl_len, impl_len)
+ }
+ _ => false,
+ },
+ ty::Tuple(obl) => match k {
+ &ty::Tuple(imp) => {
+ obl.len() == imp.len()
+ && iter::zip(obl, imp).all(|(obl, imp)| self.types_may_unify(obl, imp))
+ }
+ _ => false,
+ },
+ ty::RawPtr(obl) => match k {
+ ty::RawPtr(imp) => obl.mutbl == imp.mutbl && self.types_may_unify(obl.ty, imp.ty),
+ _ => false,
+ },
+ ty::Dynamic(obl_preds, ..) => {
+ // Ideally we would walk the existential predicates here or at least
+ // compare their length. But considering that the relevant `Relate` impl
+ // actually sorts and deduplicates these, that doesn't work.
+ matches!(k, ty::Dynamic(impl_preds, ..) if
+ obl_preds.principal_def_id() == impl_preds.principal_def_id()
+ )
+ }
+ ty::FnPtr(obl_sig) => match k {
+ ty::FnPtr(impl_sig) => {
+ let ty::FnSig { inputs_and_output, c_variadic, unsafety, abi } =
+ obl_sig.skip_binder();
+ let impl_sig = impl_sig.skip_binder();
+
+ abi == impl_sig.abi
+ && c_variadic == impl_sig.c_variadic
+ && unsafety == impl_sig.unsafety
+ && inputs_and_output.len() == impl_sig.inputs_and_output.len()
+ && iter::zip(inputs_and_output, impl_sig.inputs_and_output)
+ .all(|(obl, imp)| self.types_may_unify(obl, imp))
+ }
+ _ => false,
+ },
+
+ // Opaque types in impls should be forbidden, but that doesn't
+ // stop compilation. So this match arm should never return true
+ // if compilation succeeds.
+ ty::Opaque(..) => matches!(k, ty::Opaque(..)),
+
+ // Impls cannot contain these types as these cannot be named directly.
+ ty::FnDef(..) | ty::Closure(..) | ty::Generator(..) => false,
+
+ ty::Placeholder(..) => false,
+
+ // Depending on the value of `treat_obligation_params`, we either
+ // treat generic parameters like placeholders or like inference variables.
+ ty::Param(_) => match self.treat_obligation_params {
+ TreatParams::AsPlaceholder => false,
+ TreatParams::AsInfer => true,
+ },
+
+ ty::Infer(_) => true,
+
+ // As we're walking the whole type, it may encounter projections
+ // inside of binders and what not, so we're just going to assume that
+ // projections can unify with other stuff.
+ //
+ // Looking forward to lazy normalization this is the safer strategy anyways.
+ ty::Projection(_) => true,
+
+ ty::Error(_) => true,
+
+ ty::GeneratorWitness(..) | ty::Bound(..) => {
+ bug!("unexpected obligation type: {:?}", obligation_ty)
+ }
+ }
+ }
+
+ pub fn consts_may_unify(self, obligation_ct: ty::Const<'_>, impl_ct: ty::Const<'_>) -> bool {
+ match impl_ct.kind() {
+ ty::ConstKind::Param(_) | ty::ConstKind::Unevaluated(_) | ty::ConstKind::Error(_) => {
+ return true;
+ }
+ ty::ConstKind::Value(_) => {}
+ ty::ConstKind::Infer(_) | ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(_) => {
+ bug!("unexpected impl arg: {:?}", impl_ct)
+ }
+ }
+
+ let k = impl_ct.kind();
+ match obligation_ct.kind() {
+ ty::ConstKind::Param(_) => match self.treat_obligation_params {
+ TreatParams::AsPlaceholder => false,
+ TreatParams::AsInfer => true,
+ },
+
+ // As we don't necessarily eagerly evaluate constants,
+ // they might unify with any value.
+ ty::ConstKind::Unevaluated(_) | ty::ConstKind::Error(_) => true,
+ ty::ConstKind::Value(obl) => match k {
+ ty::ConstKind::Value(imp) => {
+ // FIXME(valtrees): Once we have valtrees, we can just
+ // compare them directly here.
+ match (obl.try_to_scalar_int(), imp.try_to_scalar_int()) {
+ (Some(obl), Some(imp)) => obl == imp,
+ _ => true,
+ }
+ }
+ _ => true,
+ },
+
+ ty::ConstKind::Infer(_) => true,
+
+ ty::ConstKind::Bound(..) | ty::ConstKind::Placeholder(_) => {
+ bug!("unexpected obl const: {:?}", obligation_ct)
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs
new file mode 100644
index 000000000..ea6bb8a7a
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/flags.rs
@@ -0,0 +1,342 @@
+use crate::ty::subst::{GenericArg, GenericArgKind};
+use crate::ty::{self, InferConst, Term, Ty, TypeFlags};
+use std::slice;
+
+#[derive(Debug)]
+pub struct FlagComputation {
+ pub flags: TypeFlags,
+
+ // see `Ty::outer_exclusive_binder` for details
+ pub outer_exclusive_binder: ty::DebruijnIndex,
+}
+
+impl FlagComputation {
+ fn new() -> FlagComputation {
+ FlagComputation { flags: TypeFlags::empty(), outer_exclusive_binder: ty::INNERMOST }
+ }
+
+ #[allow(rustc::usage_of_ty_tykind)]
+ pub fn for_kind(kind: &ty::TyKind<'_>) -> FlagComputation {
+ let mut result = FlagComputation::new();
+ result.add_kind(kind);
+ result
+ }
+
+ pub fn for_predicate<'tcx>(binder: ty::Binder<'tcx, ty::PredicateKind<'_>>) -> FlagComputation {
+ let mut result = FlagComputation::new();
+ result.add_predicate(binder);
+ result
+ }
+
+ pub fn for_const(c: ty::Const<'_>) -> TypeFlags {
+ let mut result = FlagComputation::new();
+ result.add_const(c);
+ result.flags
+ }
+
+ pub fn for_unevaluated_const(uv: ty::Unevaluated<'_>) -> TypeFlags {
+ let mut result = FlagComputation::new();
+ result.add_unevaluated_const(uv);
+ result.flags
+ }
+
+ fn add_flags(&mut self, flags: TypeFlags) {
+ self.flags = self.flags | flags;
+ }
+
+ /// indicates that `self` refers to something at binding level `binder`
+ fn add_bound_var(&mut self, binder: ty::DebruijnIndex) {
+ let exclusive_binder = binder.shifted_in(1);
+ self.add_exclusive_binder(exclusive_binder);
+ }
+
+ /// indicates that `self` refers to something *inside* binding
+ /// level `binder` -- not bound by `binder`, but bound by the next
+ /// binder internal to it
+ fn add_exclusive_binder(&mut self, exclusive_binder: ty::DebruijnIndex) {
+ self.outer_exclusive_binder = self.outer_exclusive_binder.max(exclusive_binder);
+ }
+
+ /// Adds the flags/depth from a set of types that appear within the current type, but within a
+ /// region binder.
+ fn bound_computation<T, F>(&mut self, value: ty::Binder<'_, T>, f: F)
+ where
+ F: FnOnce(&mut Self, T),
+ {
+ let mut computation = FlagComputation::new();
+
+ if !value.bound_vars().is_empty() {
+ computation.flags = computation.flags | TypeFlags::HAS_RE_LATE_BOUND;
+ }
+
+ f(&mut computation, value.skip_binder());
+
+ self.add_flags(computation.flags);
+
+ // The types that contributed to `computation` occurred within
+ // a region binder, so subtract one from the region depth
+ // within when adding the depth to `self`.
+ let outer_exclusive_binder = computation.outer_exclusive_binder;
+ if outer_exclusive_binder > ty::INNERMOST {
+ self.add_exclusive_binder(outer_exclusive_binder.shifted_out(1));
+ } // otherwise, this binder captures nothing
+ }
+
+ #[allow(rustc::usage_of_ty_tykind)]
+ fn add_kind(&mut self, kind: &ty::TyKind<'_>) {
+ match kind {
+ &ty::Bool
+ | &ty::Char
+ | &ty::Int(_)
+ | &ty::Float(_)
+ | &ty::Uint(_)
+ | &ty::Never
+ | &ty::Str
+ | &ty::Foreign(..) => {}
+
+ &ty::Error(_) => self.add_flags(TypeFlags::HAS_ERROR),
+
+ &ty::Param(_) => {
+ self.add_flags(TypeFlags::HAS_TY_PARAM);
+ self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ }
+
+ &ty::Generator(_, ref substs, _) => {
+ let substs = substs.as_generator();
+ let should_remove_further_specializable =
+ !self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ self.add_substs(substs.parent_substs());
+ if should_remove_further_specializable {
+ self.flags -= TypeFlags::STILL_FURTHER_SPECIALIZABLE;
+ }
+
+ self.add_ty(substs.resume_ty());
+ self.add_ty(substs.return_ty());
+ self.add_ty(substs.witness());
+ self.add_ty(substs.yield_ty());
+ self.add_ty(substs.tupled_upvars_ty());
+ }
+
+ &ty::GeneratorWitness(ts) => {
+ self.bound_computation(ts, |flags, ts| flags.add_tys(ts));
+ }
+
+ &ty::Closure(_, substs) => {
+ let substs = substs.as_closure();
+ let should_remove_further_specializable =
+ !self.flags.contains(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ self.add_substs(substs.parent_substs());
+ if should_remove_further_specializable {
+ self.flags -= TypeFlags::STILL_FURTHER_SPECIALIZABLE;
+ }
+
+ self.add_ty(substs.sig_as_fn_ptr_ty());
+ self.add_ty(substs.kind_ty());
+ self.add_ty(substs.tupled_upvars_ty());
+ }
+
+ &ty::Bound(debruijn, _) => {
+ self.add_bound_var(debruijn);
+ }
+
+ &ty::Placeholder(..) => {
+ self.add_flags(TypeFlags::HAS_TY_PLACEHOLDER);
+ self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ }
+
+ &ty::Infer(infer) => {
+ self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ match infer {
+ ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_) => {
+ self.add_flags(TypeFlags::HAS_TY_FRESH)
+ }
+
+ ty::TyVar(_) | ty::IntVar(_) | ty::FloatVar(_) => {
+ self.add_flags(TypeFlags::HAS_TY_INFER)
+ }
+ }
+ }
+
+ &ty::Adt(_, substs) => {
+ self.add_substs(substs);
+ }
+
+ &ty::Projection(data) => {
+ self.add_flags(TypeFlags::HAS_TY_PROJECTION);
+ self.add_projection_ty(data);
+ }
+
+ &ty::Opaque(_, substs) => {
+ self.add_flags(TypeFlags::HAS_TY_OPAQUE);
+ self.add_substs(substs);
+ }
+
+ &ty::Dynamic(obj, r) => {
+ for predicate in obj.iter() {
+ self.bound_computation(predicate, |computation, predicate| match predicate {
+ ty::ExistentialPredicate::Trait(tr) => computation.add_substs(tr.substs),
+ ty::ExistentialPredicate::Projection(p) => {
+ computation.add_existential_projection(&p);
+ }
+ ty::ExistentialPredicate::AutoTrait(_) => {}
+ });
+ }
+
+ self.add_region(r);
+ }
+
+ &ty::Array(tt, len) => {
+ self.add_ty(tt);
+ self.add_const(len);
+ }
+
+ &ty::Slice(tt) => self.add_ty(tt),
+
+ &ty::RawPtr(ref m) => {
+ self.add_ty(m.ty);
+ }
+
+ &ty::Ref(r, ty, _) => {
+ self.add_region(r);
+ self.add_ty(ty);
+ }
+
+ &ty::Tuple(types) => {
+ self.add_tys(types);
+ }
+
+ &ty::FnDef(_, substs) => {
+ self.add_substs(substs);
+ }
+
+ &ty::FnPtr(fn_sig) => self.bound_computation(fn_sig, |computation, fn_sig| {
+ computation.add_tys(fn_sig.inputs());
+ computation.add_ty(fn_sig.output());
+ }),
+ }
+ }
+
+ fn add_predicate(&mut self, binder: ty::Binder<'_, ty::PredicateKind<'_>>) {
+ self.bound_computation(binder, |computation, atom| computation.add_predicate_atom(atom));
+ }
+
+ fn add_predicate_atom(&mut self, atom: ty::PredicateKind<'_>) {
+ match atom {
+ ty::PredicateKind::Trait(trait_pred) => {
+ self.add_substs(trait_pred.trait_ref.substs);
+ }
+ ty::PredicateKind::RegionOutlives(ty::OutlivesPredicate(a, b)) => {
+ self.add_region(a);
+ self.add_region(b);
+ }
+ ty::PredicateKind::TypeOutlives(ty::OutlivesPredicate(ty, region)) => {
+ self.add_ty(ty);
+ self.add_region(region);
+ }
+ ty::PredicateKind::Subtype(ty::SubtypePredicate { a_is_expected: _, a, b }) => {
+ self.add_ty(a);
+ self.add_ty(b);
+ }
+ ty::PredicateKind::Coerce(ty::CoercePredicate { a, b }) => {
+ self.add_ty(a);
+ self.add_ty(b);
+ }
+ ty::PredicateKind::Projection(ty::ProjectionPredicate { projection_ty, term }) => {
+ self.add_projection_ty(projection_ty);
+ match term {
+ Term::Ty(ty) => self.add_ty(ty),
+ Term::Const(c) => self.add_const(c),
+ }
+ }
+ ty::PredicateKind::WellFormed(arg) => {
+ self.add_substs(slice::from_ref(&arg));
+ }
+ ty::PredicateKind::ObjectSafe(_def_id) => {}
+ ty::PredicateKind::ClosureKind(_def_id, substs, _kind) => {
+ self.add_substs(substs);
+ }
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ self.add_unevaluated_const(uv);
+ }
+ ty::PredicateKind::ConstEquate(expected, found) => {
+ self.add_const(expected);
+ self.add_const(found);
+ }
+ ty::PredicateKind::TypeWellFormedFromEnv(ty) => {
+ self.add_ty(ty);
+ }
+ }
+ }
+
+ fn add_ty(&mut self, ty: Ty<'_>) {
+ self.add_flags(ty.flags());
+ self.add_exclusive_binder(ty.outer_exclusive_binder());
+ }
+
+ fn add_tys(&mut self, tys: &[Ty<'_>]) {
+ for &ty in tys {
+ self.add_ty(ty);
+ }
+ }
+
+ fn add_region(&mut self, r: ty::Region<'_>) {
+ self.add_flags(r.type_flags());
+ if let ty::ReLateBound(debruijn, _) = *r {
+ self.add_bound_var(debruijn);
+ }
+ }
+
+ fn add_const(&mut self, c: ty::Const<'_>) {
+ self.add_ty(c.ty());
+ match c.kind() {
+ ty::ConstKind::Unevaluated(unevaluated) => self.add_unevaluated_const(unevaluated),
+ ty::ConstKind::Infer(infer) => {
+ self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ match infer {
+ InferConst::Fresh(_) => self.add_flags(TypeFlags::HAS_CT_FRESH),
+ InferConst::Var(_) => self.add_flags(TypeFlags::HAS_CT_INFER),
+ }
+ }
+ ty::ConstKind::Bound(debruijn, _) => {
+ self.add_bound_var(debruijn);
+ }
+ ty::ConstKind::Param(_) => {
+ self.add_flags(TypeFlags::HAS_CT_PARAM);
+ self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ }
+ ty::ConstKind::Placeholder(_) => {
+ self.add_flags(TypeFlags::HAS_CT_PLACEHOLDER);
+ self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
+ }
+ ty::ConstKind::Value(_) => {}
+ ty::ConstKind::Error(_) => self.add_flags(TypeFlags::HAS_ERROR),
+ }
+ }
+
+ fn add_unevaluated_const<P>(&mut self, ct: ty::Unevaluated<'_, P>) {
+ self.add_substs(ct.substs);
+ self.add_flags(TypeFlags::HAS_CT_PROJECTION);
+ }
+
+ fn add_existential_projection(&mut self, projection: &ty::ExistentialProjection<'_>) {
+ self.add_substs(projection.substs);
+ match projection.term {
+ ty::Term::Ty(ty) => self.add_ty(ty),
+ ty::Term::Const(ct) => self.add_const(ct),
+ }
+ }
+
+ fn add_projection_ty(&mut self, projection_ty: ty::ProjectionTy<'_>) {
+ self.add_substs(projection_ty.substs);
+ }
+
+ fn add_substs(&mut self, substs: &[GenericArg<'_>]) {
+ for kind in substs {
+ match kind.unpack() {
+ GenericArgKind::Type(ty) => self.add_ty(ty),
+ GenericArgKind::Lifetime(lt) => self.add_region(lt),
+ GenericArgKind::Const(ct) => self.add_const(ct),
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/fold.rs b/compiler/rustc_middle/src/ty/fold.rs
new file mode 100644
index 000000000..5e96e278b
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/fold.rs
@@ -0,0 +1,797 @@
+//! A folding traversal mechanism for complex data structures that contain type
+//! information.
+//!
+//! This is a modifying traversal. It consumes the data structure, producing a
+//! (possibly) modified version of it. Both fallible and infallible versions are
+//! available. The name is potentially confusing, because this traversal is more
+//! like `Iterator::map` than `Iterator::fold`.
+//!
+//! This traversal has limited flexibility. Only a small number of "types of
+//! interest" within the complex data structures can receive custom
+//! modification. These are the ones containing the most important type-related
+//! information, such as `Ty`, `Predicate`, `Region`, and `Const`.
+//!
+//! There are three groups of traits involved in each traversal.
+//! - `TypeFoldable`. This is implemented once for many types, including:
+//! - Types of interest, for which the the methods delegate to the
+//! folder.
+//! - All other types, including generic containers like `Vec` and `Option`.
+//! It defines a "skeleton" of how they should be folded.
+//! - `TypeSuperFoldable`. This is implemented only for each type of interest,
+//! and defines the folding "skeleton" for these types.
+//! - `TypeFolder`/`FallibleTypeFolder. One of these is implemented for each
+//! folder. This defines how types of interest are folded.
+//!
+//! This means each fold is a mixture of (a) generic folding operations, and (b)
+//! custom fold operations that are specific to the folder.
+//! - The `TypeFoldable` impls handle most of the traversal, and call into
+//! `TypeFolder`/`FallibleTypeFolder` when they encounter a type of interest.
+//! - A `TypeFolder`/`FallibleTypeFolder` may call into another `TypeFoldable`
+//! impl, because some of the types of interest are recursive and can contain
+//! other types of interest.
+//! - A `TypeFolder`/`FallibleTypeFolder` may also call into a `TypeSuperFoldable`
+//! impl, because each folder might provide custom handling only for some types
+//! of interest, or only for some variants of each type of interest, and then
+//! use default traversal for the remaining cases.
+//!
+//! For example, if you have `struct S(Ty, U)` where `S: TypeFoldable` and `U:
+//! TypeFoldable`, and an instance `s = S(ty, u)`, it would be folded like so:
+//! ```text
+//! s.fold_with(folder) calls
+//! - ty.fold_with(folder) calls
+//! - folder.fold_ty(ty) may call
+//! - ty.super_fold_with(folder)
+//! - u.fold_with(folder)
+//! ```
+use crate::mir;
+use crate::ty::{self, Binder, BoundTy, Ty, TyCtxt, TypeVisitable};
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_hir::def_id::DefId;
+
+use std::collections::BTreeMap;
+
+/// This trait is implemented for every type that can be folded,
+/// providing the skeleton of the traversal.
+///
+/// To implement this conveniently, use the derive macro located in
+/// `rustc_macros`.
+pub trait TypeFoldable<'tcx>: TypeVisitable<'tcx> {
+ /// The entry point for folding. To fold a value `t` with a folder `f`
+ /// call: `t.try_fold_with(f)`.
+ ///
+ /// For most types, this just traverses the value, calling `try_fold_with`
+ /// on each field/element.
+ ///
+ /// For types of interest (such as `Ty`), the implementation of method
+ /// calls a folder method specifically for that type (such as
+ /// `F::try_fold_ty`). This is where control transfers from `TypeFoldable`
+ /// to `TypeFolder`.
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error>;
+
+ /// A convenient alternative to `try_fold_with` for use with infallible
+ /// folders. Do not override this method, to ensure coherence with
+ /// `try_fold_with`.
+ fn fold_with<F: TypeFolder<'tcx>>(self, folder: &mut F) -> Self {
+ self.try_fold_with(folder).into_ok()
+ }
+}
+
+// This trait is implemented for types of interest.
+pub trait TypeSuperFoldable<'tcx>: TypeFoldable<'tcx> {
+ /// Provides a default fold for a type of interest. This should only be
+ /// called within `TypeFolder` methods, when a non-custom traversal is
+ /// desired for the value of the type of interest passed to that method.
+ /// For example, in `MyFolder::try_fold_ty(ty)`, it is valid to call
+ /// `ty.try_super_fold_with(self)`, but any other folding should be done
+ /// with `xyz.try_fold_with(self)`.
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error>;
+
+ /// A convenient alternative to `try_super_fold_with` for use with
+ /// infallible folders. Do not override this method, to ensure coherence
+ /// with `try_super_fold_with`.
+ fn super_fold_with<F: TypeFolder<'tcx>>(self, folder: &mut F) -> Self {
+ self.try_super_fold_with(folder).into_ok()
+ }
+}
+
+/// This trait is implemented for every infallible folding traversal. There is
+/// a fold method defined for every type of interest. Each such method has a
+/// default that does an "identity" fold. Implementations of these methods
+/// often fall back to a `super_fold_with` method if the primary argument
+/// doesn't satisfy a particular condition.
+///
+/// A blanket implementation of [`FallibleTypeFolder`] will defer to
+/// the infallible methods of this trait to ensure that the two APIs
+/// are coherent.
+pub trait TypeFolder<'tcx>: FallibleTypeFolder<'tcx, Error = !> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
+
+ fn fold_binder<T>(&mut self, t: Binder<'tcx, T>) -> Binder<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ t.super_fold_with(self)
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ t.super_fold_with(self)
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ r.super_fold_with(self)
+ }
+
+ fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ c.super_fold_with(self)
+ }
+
+ fn fold_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ty::Unevaluated<'tcx> {
+ uv.super_fold_with(self)
+ }
+
+ fn fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> ty::Predicate<'tcx> {
+ p.super_fold_with(self)
+ }
+
+ fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
+ bug!("most type folders should not be folding MIR datastructures: {:?}", c)
+ }
+}
+
+/// This trait is implemented for every folding traversal. There is a fold
+/// method defined for every type of interest. Each such method has a default
+/// that does an "identity" fold.
+///
+/// A blanket implementation of this trait (that defers to the relevant
+/// method of [`TypeFolder`]) is provided for all infallible folders in
+/// order to ensure the two APIs are coherent.
+pub trait FallibleTypeFolder<'tcx>: Sized {
+ type Error;
+
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
+
+ fn try_fold_binder<T>(&mut self, t: Binder<'tcx, T>) -> Result<Binder<'tcx, T>, Self::Error>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ t.try_super_fold_with(self)
+ }
+
+ fn try_fold_ty(&mut self, t: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
+ t.try_super_fold_with(self)
+ }
+
+ fn try_fold_region(&mut self, r: ty::Region<'tcx>) -> Result<ty::Region<'tcx>, Self::Error> {
+ r.try_super_fold_with(self)
+ }
+
+ fn try_fold_const(&mut self, c: ty::Const<'tcx>) -> Result<ty::Const<'tcx>, Self::Error> {
+ c.try_super_fold_with(self)
+ }
+
+ fn try_fold_unevaluated(
+ &mut self,
+ c: ty::Unevaluated<'tcx>,
+ ) -> Result<ty::Unevaluated<'tcx>, Self::Error> {
+ c.try_super_fold_with(self)
+ }
+
+ fn try_fold_predicate(
+ &mut self,
+ p: ty::Predicate<'tcx>,
+ ) -> Result<ty::Predicate<'tcx>, Self::Error> {
+ p.try_super_fold_with(self)
+ }
+
+ fn try_fold_mir_const(
+ &mut self,
+ c: mir::ConstantKind<'tcx>,
+ ) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
+ bug!("most type folders should not be folding MIR datastructures: {:?}", c)
+ }
+}
+
+// This blanket implementation of the fallible trait for infallible folders
+// delegates to infallible methods to ensure coherence.
+impl<'tcx, F> FallibleTypeFolder<'tcx> for F
+where
+ F: TypeFolder<'tcx>,
+{
+ type Error = !;
+
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+ TypeFolder::tcx(self)
+ }
+
+ fn try_fold_binder<T>(&mut self, t: Binder<'tcx, T>) -> Result<Binder<'tcx, T>, !>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ Ok(self.fold_binder(t))
+ }
+
+ fn try_fold_ty(&mut self, t: Ty<'tcx>) -> Result<Ty<'tcx>, !> {
+ Ok(self.fold_ty(t))
+ }
+
+ fn try_fold_region(&mut self, r: ty::Region<'tcx>) -> Result<ty::Region<'tcx>, !> {
+ Ok(self.fold_region(r))
+ }
+
+ fn try_fold_const(&mut self, c: ty::Const<'tcx>) -> Result<ty::Const<'tcx>, !> {
+ Ok(self.fold_const(c))
+ }
+
+ fn try_fold_unevaluated(
+ &mut self,
+ c: ty::Unevaluated<'tcx>,
+ ) -> Result<ty::Unevaluated<'tcx>, !> {
+ Ok(self.fold_unevaluated(c))
+ }
+
+ fn try_fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> Result<ty::Predicate<'tcx>, !> {
+ Ok(self.fold_predicate(p))
+ }
+
+ fn try_fold_mir_const(
+ &mut self,
+ c: mir::ConstantKind<'tcx>,
+ ) -> Result<mir::ConstantKind<'tcx>, !> {
+ Ok(self.fold_mir_const(c))
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Some sample folders
+
+pub struct BottomUpFolder<'tcx, F, G, H>
+where
+ F: FnMut(Ty<'tcx>) -> Ty<'tcx>,
+ G: FnMut(ty::Region<'tcx>) -> ty::Region<'tcx>,
+ H: FnMut(ty::Const<'tcx>) -> ty::Const<'tcx>,
+{
+ pub tcx: TyCtxt<'tcx>,
+ pub ty_op: F,
+ pub lt_op: G,
+ pub ct_op: H,
+}
+
+impl<'tcx, F, G, H> TypeFolder<'tcx> for BottomUpFolder<'tcx, F, G, H>
+where
+ F: FnMut(Ty<'tcx>) -> Ty<'tcx>,
+ G: FnMut(ty::Region<'tcx>) -> ty::Region<'tcx>,
+ H: FnMut(ty::Const<'tcx>) -> ty::Const<'tcx>,
+{
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let t = ty.super_fold_with(self);
+ (self.ty_op)(t)
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ let r = r.super_fold_with(self);
+ (self.lt_op)(r)
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ let ct = ct.super_fold_with(self);
+ (self.ct_op)(ct)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Region folder
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Folds the escaping and free regions in `value` using `f`, and
+ /// sets `skipped_regions` to true if any late-bound region was found
+ /// and skipped.
+ pub fn fold_regions<T>(
+ self,
+ value: T,
+ mut f: impl FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx>,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ value.fold_with(&mut RegionFolder::new(self, &mut f))
+ }
+}
+
+/// Folds over the substructure of a type, visiting its component
+/// types and all regions that occur *free* within it.
+///
+/// That is, `Ty` can contain function or method types that bind
+/// regions at the call site (`ReLateBound`), and occurrences of
+/// regions (aka "lifetimes") that are bound within a type are not
+/// visited by this folder; only regions that occur free will be
+/// visited by `fld_r`.
+
+pub struct RegionFolder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ /// Stores the index of a binder *just outside* the stuff we have
+ /// visited. So this begins as INNERMOST; when we pass through a
+ /// binder, it is incremented (via `shift_in`).
+ current_index: ty::DebruijnIndex,
+
+ /// Callback invokes for each free region. The `DebruijnIndex`
+ /// points to the binder *just outside* the ones we have passed
+ /// through.
+ fold_region_fn:
+ &'a mut (dyn FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx> + 'a),
+}
+
+impl<'a, 'tcx> RegionFolder<'a, 'tcx> {
+ #[inline]
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ fold_region_fn: &'a mut dyn FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx>,
+ ) -> RegionFolder<'a, 'tcx> {
+ RegionFolder { tcx, current_index: ty::INNERMOST, fold_region_fn }
+ }
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.current_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.current_index.shift_out(1);
+ t
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReLateBound(debruijn, _) if debruijn < self.current_index => {
+ debug!(?self.current_index, "skipped bound region");
+ r
+ }
+ _ => {
+ debug!(?self.current_index, "folding free region");
+ (self.fold_region_fn)(r, self.current_index)
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Bound vars replacer
+
+pub trait BoundVarReplacerDelegate<'tcx> {
+ fn replace_region(&mut self, br: ty::BoundRegion) -> ty::Region<'tcx>;
+ fn replace_ty(&mut self, bt: ty::BoundTy) -> Ty<'tcx>;
+ fn replace_const(&mut self, bv: ty::BoundVar, ty: Ty<'tcx>) -> ty::Const<'tcx>;
+}
+
+pub struct FnMutDelegate<R, T, C> {
+ pub regions: R,
+ pub types: T,
+ pub consts: C,
+}
+impl<'tcx, R, T, C> BoundVarReplacerDelegate<'tcx> for FnMutDelegate<R, T, C>
+where
+ R: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+ T: FnMut(ty::BoundTy) -> Ty<'tcx>,
+ C: FnMut(ty::BoundVar, Ty<'tcx>) -> ty::Const<'tcx>,
+{
+ fn replace_region(&mut self, br: ty::BoundRegion) -> ty::Region<'tcx> {
+ (self.regions)(br)
+ }
+ fn replace_ty(&mut self, bt: ty::BoundTy) -> Ty<'tcx> {
+ (self.types)(bt)
+ }
+ fn replace_const(&mut self, bv: ty::BoundVar, ty: Ty<'tcx>) -> ty::Const<'tcx> {
+ (self.consts)(bv, ty)
+ }
+}
+
+/// Replaces the escaping bound vars (late bound regions or bound types) in a type.
+struct BoundVarReplacer<'tcx, D> {
+ tcx: TyCtxt<'tcx>,
+
+ /// As with `RegionFolder`, represents the index of a binder *just outside*
+ /// the ones we have visited.
+ current_index: ty::DebruijnIndex,
+
+ delegate: D,
+}
+
+impl<'tcx, D: BoundVarReplacerDelegate<'tcx>> BoundVarReplacer<'tcx, D> {
+ fn new(tcx: TyCtxt<'tcx>, delegate: D) -> Self {
+ BoundVarReplacer { tcx, current_index: ty::INNERMOST, delegate }
+ }
+}
+
+impl<'tcx, D> TypeFolder<'tcx> for BoundVarReplacer<'tcx, D>
+where
+ D: BoundVarReplacerDelegate<'tcx>,
+{
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.current_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.current_index.shift_out(1);
+ t
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match *t.kind() {
+ ty::Bound(debruijn, bound_ty) if debruijn == self.current_index => {
+ let ty = self.delegate.replace_ty(bound_ty);
+ ty::fold::shift_vars(self.tcx, ty, self.current_index.as_u32())
+ }
+ _ if t.has_vars_bound_at_or_above(self.current_index) => t.super_fold_with(self),
+ _ => t,
+ }
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReLateBound(debruijn, br) if debruijn == self.current_index => {
+ let region = self.delegate.replace_region(br);
+ if let ty::ReLateBound(debruijn1, br) = *region {
+ // If the callback returns a late-bound region,
+ // that region should always use the INNERMOST
+ // debruijn index. Then we adjust it to the
+ // correct depth.
+ assert_eq!(debruijn1, ty::INNERMOST);
+ self.tcx.reuse_or_mk_region(region, ty::ReLateBound(debruijn, br))
+ } else {
+ region
+ }
+ }
+ _ => r,
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ match ct.kind() {
+ ty::ConstKind::Bound(debruijn, bound_const) if debruijn == self.current_index => {
+ let ct = self.delegate.replace_const(bound_const, ct.ty());
+ ty::fold::shift_vars(self.tcx, ct, self.current_index.as_u32())
+ }
+ _ => ct.super_fold_with(self),
+ }
+ }
+
+ fn fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> ty::Predicate<'tcx> {
+ if p.has_vars_bound_at_or_above(self.current_index) { p.super_fold_with(self) } else { p }
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Replaces all regions bound by the given `Binder` with the
+ /// results returned by the closure; the closure is expected to
+ /// return a free region (relative to this binder), and hence the
+ /// binder is removed in the return type. The closure is invoked
+ /// once for each unique `BoundRegionKind`; multiple references to the
+ /// same `BoundRegionKind` will reuse the previous result. A map is
+ /// returned at the end with each bound region and the free region
+ /// that replaced it.
+ ///
+ /// # Panics
+ ///
+ /// This method only replaces late bound regions. Any types or
+ /// constants bound by `value` will cause an ICE.
+ pub fn replace_late_bound_regions<T, F>(
+ self,
+ value: Binder<'tcx, T>,
+ mut fld_r: F,
+ ) -> (T, BTreeMap<ty::BoundRegion, ty::Region<'tcx>>)
+ where
+ F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+ T: TypeFoldable<'tcx>,
+ {
+ let mut region_map = BTreeMap::new();
+ let real_fld_r = |br: ty::BoundRegion| *region_map.entry(br).or_insert_with(|| fld_r(br));
+ let value = self.replace_late_bound_regions_uncached(value, real_fld_r);
+ (value, region_map)
+ }
+
+ pub fn replace_late_bound_regions_uncached<T, F>(
+ self,
+ value: Binder<'tcx, T>,
+ replace_regions: F,
+ ) -> T
+ where
+ F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>,
+ T: TypeFoldable<'tcx>,
+ {
+ let value = value.skip_binder();
+ if !value.has_escaping_bound_vars() {
+ value
+ } else {
+ let delegate = FnMutDelegate {
+ regions: replace_regions,
+ types: |b| bug!("unexpected bound ty in binder: {b:?}"),
+ consts: |b, ty| bug!("unexpected bound ct in binder: {b:?} {ty}"),
+ };
+ let mut replacer = BoundVarReplacer::new(self, delegate);
+ value.fold_with(&mut replacer)
+ }
+ }
+
+ /// Replaces all escaping bound vars. The `fld_r` closure replaces escaping
+ /// bound regions; the `fld_t` closure replaces escaping bound types and the `fld_c`
+ /// closure replaces escaping bound consts.
+ pub fn replace_escaping_bound_vars_uncached<T: TypeFoldable<'tcx>>(
+ self,
+ value: T,
+ delegate: impl BoundVarReplacerDelegate<'tcx>,
+ ) -> T {
+ if !value.has_escaping_bound_vars() {
+ value
+ } else {
+ let mut replacer = BoundVarReplacer::new(self, delegate);
+ value.fold_with(&mut replacer)
+ }
+ }
+
+ /// Replaces all types or regions bound by the given `Binder`. The `fld_r`
+ /// closure replaces bound regions, the `fld_t` closure replaces bound
+ /// types, and `fld_c` replaces bound constants.
+ pub fn replace_bound_vars_uncached<T: TypeFoldable<'tcx>>(
+ self,
+ value: Binder<'tcx, T>,
+ delegate: impl BoundVarReplacerDelegate<'tcx>,
+ ) -> T {
+ self.replace_escaping_bound_vars_uncached(value.skip_binder(), delegate)
+ }
+
+ /// Replaces any late-bound regions bound in `value` with
+ /// free variants attached to `all_outlive_scope`.
+ pub fn liberate_late_bound_regions<T>(
+ self,
+ all_outlive_scope: DefId,
+ value: ty::Binder<'tcx, T>,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.replace_late_bound_regions_uncached(value, |br| {
+ self.mk_region(ty::ReFree(ty::FreeRegion {
+ scope: all_outlive_scope,
+ bound_region: br.kind,
+ }))
+ })
+ }
+
+ pub fn shift_bound_var_indices<T>(self, bound_vars: usize, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let shift_bv = |bv: ty::BoundVar| ty::BoundVar::from_usize(bv.as_usize() + bound_vars);
+ self.replace_escaping_bound_vars_uncached(
+ value,
+ FnMutDelegate {
+ regions: |r: ty::BoundRegion| {
+ self.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: shift_bv(r.var), kind: r.kind },
+ ))
+ },
+ types: |t: ty::BoundTy| {
+ self.mk_ty(ty::Bound(
+ ty::INNERMOST,
+ ty::BoundTy { var: shift_bv(t.var), kind: t.kind },
+ ))
+ },
+ consts: |c, ty: Ty<'tcx>| {
+ self.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Bound(ty::INNERMOST, shift_bv(c)),
+ ty,
+ })
+ },
+ },
+ )
+ }
+
+ /// Replaces any late-bound regions bound in `value` with `'erased`. Useful in codegen but also
+ /// method lookup and a few other places where precise region relationships are not required.
+ pub fn erase_late_bound_regions<T>(self, value: Binder<'tcx, T>) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ self.replace_late_bound_regions(value, |_| self.lifetimes.re_erased).0
+ }
+
+ /// Rewrite any late-bound regions so that they are anonymous. Region numbers are
+ /// assigned starting at 0 and increasing monotonically in the order traversed
+ /// by the fold operation.
+ ///
+ /// The chief purpose of this function is to canonicalize regions so that two
+ /// `FnSig`s or `TraitRef`s which are equivalent up to region naming will become
+ /// structurally identical. For example, `for<'a, 'b> fn(&'a isize, &'b isize)` and
+ /// `for<'a, 'b> fn(&'b isize, &'a isize)` will become identical after anonymization.
+ pub fn anonymize_late_bound_regions<T>(self, sig: Binder<'tcx, T>) -> Binder<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let mut counter = 0;
+ let inner = self
+ .replace_late_bound_regions(sig, |_| {
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_u32(counter),
+ kind: ty::BrAnon(counter),
+ };
+ let r = self.mk_region(ty::ReLateBound(ty::INNERMOST, br));
+ counter += 1;
+ r
+ })
+ .0;
+ let bound_vars = self.mk_bound_variable_kinds(
+ (0..counter).map(|i| ty::BoundVariableKind::Region(ty::BrAnon(i))),
+ );
+ Binder::bind_with_vars(inner, bound_vars)
+ }
+
+ /// Anonymize all bound variables in `value`, this is mostly used to improve caching.
+ pub fn anonymize_bound_vars<T>(self, value: Binder<'tcx, T>) -> Binder<'tcx, T>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ struct Anonymize<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ map: &'a mut FxIndexMap<ty::BoundVar, ty::BoundVariableKind>,
+ }
+ impl<'tcx> BoundVarReplacerDelegate<'tcx> for Anonymize<'_, 'tcx> {
+ fn replace_region(&mut self, br: ty::BoundRegion) -> ty::Region<'tcx> {
+ let entry = self.map.entry(br.var);
+ let index = entry.index();
+ let var = ty::BoundVar::from_usize(index);
+ let kind = entry
+ .or_insert_with(|| ty::BoundVariableKind::Region(ty::BrAnon(index as u32)))
+ .expect_region();
+ let br = ty::BoundRegion { var, kind };
+ self.tcx.mk_region(ty::ReLateBound(ty::INNERMOST, br))
+ }
+ fn replace_ty(&mut self, bt: ty::BoundTy) -> Ty<'tcx> {
+ let entry = self.map.entry(bt.var);
+ let index = entry.index();
+ let var = ty::BoundVar::from_usize(index);
+ let kind = entry
+ .or_insert_with(|| ty::BoundVariableKind::Ty(ty::BoundTyKind::Anon))
+ .expect_ty();
+ self.tcx.mk_ty(ty::Bound(ty::INNERMOST, BoundTy { var, kind }))
+ }
+ fn replace_const(&mut self, bv: ty::BoundVar, ty: Ty<'tcx>) -> ty::Const<'tcx> {
+ let entry = self.map.entry(bv);
+ let index = entry.index();
+ let var = ty::BoundVar::from_usize(index);
+ let () = entry.or_insert_with(|| ty::BoundVariableKind::Const).expect_const();
+ self.tcx.mk_const(ty::ConstS { ty, kind: ty::ConstKind::Bound(ty::INNERMOST, var) })
+ }
+ }
+
+ let mut map = Default::default();
+ let delegate = Anonymize { tcx: self, map: &mut map };
+ let inner = self.replace_escaping_bound_vars_uncached(value.skip_binder(), delegate);
+ let bound_vars = self.mk_bound_variable_kinds(map.into_values());
+ Binder::bind_with_vars(inner, bound_vars)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Shifter
+//
+// Shifts the De Bruijn indices on all escaping bound vars by a
+// fixed amount. Useful in substitution or when otherwise introducing
+// a binding level that is not intended to capture the existing bound
+// vars. See comment on `shift_vars_through_binders` method in
+// `subst.rs` for more details.
+
+struct Shifter<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ current_index: ty::DebruijnIndex,
+ amount: u32,
+}
+
+impl<'tcx> Shifter<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, amount: u32) -> Self {
+ Shifter { tcx, current_index: ty::INNERMOST, amount }
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for Shifter<'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.current_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.current_index.shift_out(1);
+ t
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ ty::ReLateBound(debruijn, br) => {
+ if self.amount == 0 || debruijn < self.current_index {
+ r
+ } else {
+ let debruijn = debruijn.shifted_in(self.amount);
+ let shifted = ty::ReLateBound(debruijn, br);
+ self.tcx.mk_region(shifted)
+ }
+ }
+ _ => r,
+ }
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match *ty.kind() {
+ ty::Bound(debruijn, bound_ty) => {
+ if self.amount == 0 || debruijn < self.current_index {
+ ty
+ } else {
+ let debruijn = debruijn.shifted_in(self.amount);
+ self.tcx.mk_ty(ty::Bound(debruijn, bound_ty))
+ }
+ }
+
+ _ => ty.super_fold_with(self),
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ if let ty::ConstKind::Bound(debruijn, bound_ct) = ct.kind() {
+ if self.amount == 0 || debruijn < self.current_index {
+ ct
+ } else {
+ let debruijn = debruijn.shifted_in(self.amount);
+ self.tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Bound(debruijn, bound_ct),
+ ty: ct.ty(),
+ })
+ }
+ } else {
+ ct.super_fold_with(self)
+ }
+ }
+}
+
+pub fn shift_region<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ region: ty::Region<'tcx>,
+ amount: u32,
+) -> ty::Region<'tcx> {
+ match *region {
+ ty::ReLateBound(debruijn, br) if amount > 0 => {
+ tcx.mk_region(ty::ReLateBound(debruijn.shifted_in(amount), br))
+ }
+ _ => region,
+ }
+}
+
+pub fn shift_vars<'tcx, T>(tcx: TyCtxt<'tcx>, value: T, amount: u32) -> T
+where
+ T: TypeFoldable<'tcx>,
+{
+ debug!("shift_vars(value={:?}, amount={})", value, amount);
+
+ value.fold_with(&mut Shifter::new(tcx, amount))
+}
diff --git a/compiler/rustc_middle/src/ty/generics.rs b/compiler/rustc_middle/src/ty/generics.rs
new file mode 100644
index 000000000..add2df258
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/generics.rs
@@ -0,0 +1,349 @@
+use crate::middle::resolve_lifetime::ObjectLifetimeDefault;
+use crate::ty;
+use crate::ty::subst::{Subst, SubstsRef};
+use crate::ty::EarlyBinder;
+use rustc_ast as ast;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::DefId;
+use rustc_span::symbol::Symbol;
+use rustc_span::Span;
+
+use super::{EarlyBoundRegion, InstantiatedPredicates, ParamConst, ParamTy, Predicate, TyCtxt};
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub enum GenericParamDefKind {
+ Lifetime,
+ Type { has_default: bool, object_lifetime_default: ObjectLifetimeDefault, synthetic: bool },
+ Const { has_default: bool },
+}
+
+impl GenericParamDefKind {
+ pub fn descr(&self) -> &'static str {
+ match self {
+ GenericParamDefKind::Lifetime => "lifetime",
+ GenericParamDefKind::Type { .. } => "type",
+ GenericParamDefKind::Const { .. } => "constant",
+ }
+ }
+ pub fn to_ord(&self) -> ast::ParamKindOrd {
+ match self {
+ GenericParamDefKind::Lifetime => ast::ParamKindOrd::Lifetime,
+ GenericParamDefKind::Type { .. } => ast::ParamKindOrd::Type,
+ GenericParamDefKind::Const { .. } => ast::ParamKindOrd::Const,
+ }
+ }
+
+ pub fn is_ty_or_const(&self) -> bool {
+ match self {
+ GenericParamDefKind::Lifetime => false,
+ GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => true,
+ }
+ }
+
+ pub fn is_synthetic(&self) -> bool {
+ match self {
+ GenericParamDefKind::Type { synthetic, .. } => *synthetic,
+ _ => false,
+ }
+ }
+}
+
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct GenericParamDef {
+ pub name: Symbol,
+ pub def_id: DefId,
+ pub index: u32,
+
+ /// `pure_wrt_drop`, set by the (unsafe) `#[may_dangle]` attribute
+ /// on generic parameter `'a`/`T`, asserts data behind the parameter
+ /// `'a`/`T` won't be accessed during the parent type's `Drop` impl.
+ pub pure_wrt_drop: bool,
+
+ pub kind: GenericParamDefKind,
+}
+
+impl GenericParamDef {
+ pub fn to_early_bound_region_data(&self) -> ty::EarlyBoundRegion {
+ if let GenericParamDefKind::Lifetime = self.kind {
+ ty::EarlyBoundRegion { def_id: self.def_id, index: self.index, name: self.name }
+ } else {
+ bug!("cannot convert a non-lifetime parameter def to an early bound region")
+ }
+ }
+
+ pub fn has_default(&self) -> bool {
+ match self.kind {
+ GenericParamDefKind::Type { has_default, .. }
+ | GenericParamDefKind::Const { has_default } => has_default,
+ GenericParamDefKind::Lifetime => false,
+ }
+ }
+
+ pub fn default_value<'tcx>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ ) -> Option<EarlyBinder<ty::GenericArg<'tcx>>> {
+ match self.kind {
+ GenericParamDefKind::Type { has_default, .. } if has_default => {
+ Some(tcx.bound_type_of(self.def_id).map_bound(|t| t.into()))
+ }
+ GenericParamDefKind::Const { has_default } if has_default => {
+ Some(tcx.bound_const_param_default(self.def_id).map_bound(|c| c.into()))
+ }
+ _ => None,
+ }
+ }
+}
+
+#[derive(Default)]
+pub struct GenericParamCount {
+ pub lifetimes: usize,
+ pub types: usize,
+ pub consts: usize,
+}
+
+/// Information about the formal type/lifetime parameters associated
+/// with an item or method. Analogous to `hir::Generics`.
+///
+/// The ordering of parameters is the same as in `Subst` (excluding child generics):
+/// `Self` (optionally), `Lifetime` params..., `Type` params...
+#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct Generics {
+ pub parent: Option<DefId>,
+ pub parent_count: usize,
+ pub params: Vec<GenericParamDef>,
+
+ /// Reverse map to the `index` field of each `GenericParamDef`.
+ #[stable_hasher(ignore)]
+ pub param_def_id_to_index: FxHashMap<DefId, u32>,
+
+ pub has_self: bool,
+ pub has_late_bound_regions: Option<Span>,
+}
+
+impl<'tcx> Generics {
+ #[inline]
+ pub fn count(&self) -> usize {
+ self.parent_count + self.params.len()
+ }
+
+ pub fn own_counts(&self) -> GenericParamCount {
+ // We could cache this as a property of `GenericParamCount`, but
+ // the aim is to refactor this away entirely eventually and the
+ // presence of this method will be a constant reminder.
+ let mut own_counts = GenericParamCount::default();
+
+ for param in &self.params {
+ match param.kind {
+ GenericParamDefKind::Lifetime => own_counts.lifetimes += 1,
+ GenericParamDefKind::Type { .. } => own_counts.types += 1,
+ GenericParamDefKind::Const { .. } => own_counts.consts += 1,
+ }
+ }
+
+ own_counts
+ }
+
+ pub fn own_defaults(&self) -> GenericParamCount {
+ let mut own_defaults = GenericParamCount::default();
+
+ for param in &self.params {
+ match param.kind {
+ GenericParamDefKind::Lifetime => (),
+ GenericParamDefKind::Type { has_default, .. } => {
+ own_defaults.types += has_default as usize;
+ }
+ GenericParamDefKind::Const { has_default } => {
+ own_defaults.consts += has_default as usize;
+ }
+ }
+ }
+
+ own_defaults
+ }
+
+ pub fn requires_monomorphization(&self, tcx: TyCtxt<'tcx>) -> bool {
+ if self.own_requires_monomorphization() {
+ return true;
+ }
+
+ if let Some(parent_def_id) = self.parent {
+ let parent = tcx.generics_of(parent_def_id);
+ parent.requires_monomorphization(tcx)
+ } else {
+ false
+ }
+ }
+
+ pub fn own_requires_monomorphization(&self) -> bool {
+ for param in &self.params {
+ match param.kind {
+ GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => {
+ return true;
+ }
+ GenericParamDefKind::Lifetime => {}
+ }
+ }
+ false
+ }
+
+ /// Returns the `GenericParamDef` with the given index.
+ pub fn param_at(&'tcx self, param_index: usize, tcx: TyCtxt<'tcx>) -> &'tcx GenericParamDef {
+ if let Some(index) = param_index.checked_sub(self.parent_count) {
+ &self.params[index]
+ } else {
+ tcx.generics_of(self.parent.expect("parent_count > 0 but no parent?"))
+ .param_at(param_index, tcx)
+ }
+ }
+
+ /// Returns the `GenericParamDef` associated with this `EarlyBoundRegion`.
+ pub fn region_param(
+ &'tcx self,
+ param: &EarlyBoundRegion,
+ tcx: TyCtxt<'tcx>,
+ ) -> &'tcx GenericParamDef {
+ let param = self.param_at(param.index as usize, tcx);
+ match param.kind {
+ GenericParamDefKind::Lifetime => param,
+ _ => bug!("expected lifetime parameter, but found another generic parameter"),
+ }
+ }
+
+ /// Returns the `GenericParamDef` associated with this `ParamTy`.
+ pub fn type_param(&'tcx self, param: &ParamTy, tcx: TyCtxt<'tcx>) -> &'tcx GenericParamDef {
+ let param = self.param_at(param.index as usize, tcx);
+ match param.kind {
+ GenericParamDefKind::Type { .. } => param,
+ _ => bug!("expected type parameter, but found another generic parameter"),
+ }
+ }
+
+ /// Returns the `GenericParamDef` associated with this `ParamConst`.
+ pub fn const_param(&'tcx self, param: &ParamConst, tcx: TyCtxt<'tcx>) -> &GenericParamDef {
+ let param = self.param_at(param.index as usize, tcx);
+ match param.kind {
+ GenericParamDefKind::Const { .. } => param,
+ _ => bug!("expected const parameter, but found another generic parameter"),
+ }
+ }
+
+ /// Returns `true` if `params` has `impl Trait`.
+ pub fn has_impl_trait(&'tcx self) -> bool {
+ self.params.iter().any(|param| {
+ matches!(param.kind, ty::GenericParamDefKind::Type { synthetic: true, .. })
+ })
+ }
+
+ /// Returns the substs corresponding to the generic parameters
+ /// of this item, excluding `Self`.
+ ///
+ /// **This should only be used for diagnostics purposes.**
+ pub fn own_substs_no_defaults(
+ &'tcx self,
+ tcx: TyCtxt<'tcx>,
+ substs: &'tcx [ty::GenericArg<'tcx>],
+ ) -> &'tcx [ty::GenericArg<'tcx>] {
+ let mut own_params = self.parent_count..self.count();
+ if self.has_self && self.parent.is_none() {
+ own_params.start = 1;
+ }
+
+ // Filter the default arguments.
+ //
+ // This currently uses structural equality instead
+ // of semantic equivalance. While not ideal, that's
+ // good enough for now as this should only be used
+ // for diagnostics anyways.
+ own_params.end -= self
+ .params
+ .iter()
+ .rev()
+ .take_while(|param| {
+ param.default_value(tcx).map_or(false, |default| {
+ default.subst(tcx, substs) == substs[param.index as usize]
+ })
+ })
+ .count();
+
+ &substs[own_params]
+ }
+
+ /// Returns the substs corresponding to the generic parameters of this item, excluding `Self`.
+ ///
+ /// **This should only be used for diagnostics purposes.**
+ pub fn own_substs(
+ &'tcx self,
+ substs: &'tcx [ty::GenericArg<'tcx>],
+ ) -> &'tcx [ty::GenericArg<'tcx>] {
+ let own = &substs[self.parent_count..][..self.params.len()];
+ if self.has_self && self.parent.is_none() { &own[1..] } else { &own }
+ }
+}
+
+/// Bounds on generics.
+#[derive(Copy, Clone, Default, Debug, TyEncodable, TyDecodable, HashStable)]
+pub struct GenericPredicates<'tcx> {
+ pub parent: Option<DefId>,
+ pub predicates: &'tcx [(Predicate<'tcx>, Span)],
+}
+
+impl<'tcx> GenericPredicates<'tcx> {
+ pub fn instantiate(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> InstantiatedPredicates<'tcx> {
+ let mut instantiated = InstantiatedPredicates::empty();
+ self.instantiate_into(tcx, &mut instantiated, substs);
+ instantiated
+ }
+
+ pub fn instantiate_own(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) -> InstantiatedPredicates<'tcx> {
+ InstantiatedPredicates {
+ predicates: self
+ .predicates
+ .iter()
+ .map(|(p, _)| EarlyBinder(*p).subst(tcx, substs))
+ .collect(),
+ spans: self.predicates.iter().map(|(_, sp)| *sp).collect(),
+ }
+ }
+
+ fn instantiate_into(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ instantiated: &mut InstantiatedPredicates<'tcx>,
+ substs: SubstsRef<'tcx>,
+ ) {
+ if let Some(def_id) = self.parent {
+ tcx.predicates_of(def_id).instantiate_into(tcx, instantiated, substs);
+ }
+ instantiated
+ .predicates
+ .extend(self.predicates.iter().map(|(p, _)| EarlyBinder(*p).subst(tcx, substs)));
+ instantiated.spans.extend(self.predicates.iter().map(|(_, sp)| *sp));
+ }
+
+ pub fn instantiate_identity(&self, tcx: TyCtxt<'tcx>) -> InstantiatedPredicates<'tcx> {
+ let mut instantiated = InstantiatedPredicates::empty();
+ self.instantiate_identity_into(tcx, &mut instantiated);
+ instantiated
+ }
+
+ fn instantiate_identity_into(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ instantiated: &mut InstantiatedPredicates<'tcx>,
+ ) {
+ if let Some(def_id) = self.parent {
+ tcx.predicates_of(def_id).instantiate_identity_into(tcx, instantiated);
+ }
+ instantiated.predicates.extend(self.predicates.iter().map(|(p, _)| p));
+ instantiated.spans.extend(self.predicates.iter().map(|(_, s)| s));
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/impls_ty.rs b/compiler/rustc_middle/src/ty/impls_ty.rs
new file mode 100644
index 000000000..cd00b26b8
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/impls_ty.rs
@@ -0,0 +1,135 @@
+//! This module contains `HashStable` implementations for various data types
+//! from `rustc_middle::ty` in no particular order.
+
+use crate::middle::region;
+use crate::mir;
+use crate::ty;
+use crate::ty::fast_reject::SimplifiedType;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::stable_hasher::HashingControls;
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher, ToStableHashKey};
+use rustc_query_system::ich::StableHashingContext;
+use std::cell::RefCell;
+
+impl<'a, 'tcx, T> HashStable<StableHashingContext<'a>> for &'tcx ty::List<T>
+where
+ T: HashStable<StableHashingContext<'a>>,
+{
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ thread_local! {
+ static CACHE: RefCell<FxHashMap<(usize, usize, HashingControls), Fingerprint>> =
+ RefCell::new(Default::default());
+ }
+
+ let hash = CACHE.with(|cache| {
+ let key = (self.as_ptr() as usize, self.len(), hcx.hashing_controls());
+ if let Some(&hash) = cache.borrow().get(&key) {
+ return hash;
+ }
+
+ let mut hasher = StableHasher::new();
+ (&self[..]).hash_stable(hcx, &mut hasher);
+
+ let hash: Fingerprint = hasher.finish();
+ cache.borrow_mut().insert(key, hash);
+ hash
+ });
+
+ hash.hash_stable(hcx, hasher);
+ }
+}
+
+impl<'a, 'tcx, T> ToStableHashKey<StableHashingContext<'a>> for &'tcx ty::List<T>
+where
+ T: HashStable<StableHashingContext<'a>>,
+{
+ type KeyType = Fingerprint;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> Fingerprint {
+ let mut hasher = StableHasher::new();
+ let mut hcx: StableHashingContext<'a> = hcx.clone();
+ self.hash_stable(&mut hcx, &mut hasher);
+ hasher.finish()
+ }
+}
+
+impl<'a> ToStableHashKey<StableHashingContext<'a>> for SimplifiedType {
+ type KeyType = Fingerprint;
+
+ #[inline]
+ fn to_stable_hash_key(&self, hcx: &StableHashingContext<'a>) -> Fingerprint {
+ let mut hasher = StableHasher::new();
+ let mut hcx: StableHashingContext<'a> = hcx.clone();
+ self.hash_stable(&mut hcx, &mut hasher);
+ hasher.finish()
+ }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ty::subst::GenericArg<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ self.unpack().hash_stable(hcx, hasher);
+ }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ty::subst::GenericArgKind<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ match self {
+ // WARNING: We dedup cache the `HashStable` results for `List`
+ // while ignoring types and freely transmute
+ // between `List<Ty<'tcx>>` and `List<GenericArg<'tcx>>`.
+ // See `fn intern_type_list` for more details.
+ //
+ // We therefore hash types without adding a hash for their discriminant.
+ //
+ // In order to make it very unlikely for the sequence of bytes being hashed for
+ // a `GenericArgKind::Type` to be the same as the sequence of bytes being
+ // hashed for one of the other variants, we hash some very high number instead
+ // of their actual discriminant since `TyKind` should never start with anything
+ // that high.
+ ty::subst::GenericArgKind::Type(ty) => ty.hash_stable(hcx, hasher),
+ ty::subst::GenericArgKind::Const(ct) => {
+ 0xF3u8.hash_stable(hcx, hasher);
+ ct.hash_stable(hcx, hasher);
+ }
+ ty::subst::GenericArgKind::Lifetime(lt) => {
+ 0xF5u8.hash_stable(hcx, hasher);
+ lt.hash_stable(hcx, hasher);
+ }
+ }
+ }
+}
+
+// AllocIds get resolved to whatever they point to (to be stable)
+impl<'a> HashStable<StableHashingContext<'a>> for mir::interpret::AllocId {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ ty::tls::with_opt(|tcx| {
+ trace!("hashing {:?}", *self);
+ let tcx = tcx.expect("can't hash AllocIds during hir lowering");
+ tcx.try_get_global_alloc(*self).hash_stable(hcx, hasher);
+ });
+ }
+}
+
+// `Relocations` with default type parameters is a sorted map.
+impl<'a, Prov> HashStable<StableHashingContext<'a>> for mir::interpret::Relocations<Prov>
+where
+ Prov: HashStable<StableHashingContext<'a>>,
+{
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ self.len().hash_stable(hcx, hasher);
+ for reloc in self.iter() {
+ reloc.hash_stable(hcx, hasher);
+ }
+ }
+}
+
+impl<'a> ToStableHashKey<StableHashingContext<'a>> for region::Scope {
+ type KeyType = region::Scope;
+
+ #[inline]
+ fn to_stable_hash_key(&self, _: &StableHashingContext<'a>) -> region::Scope {
+ *self
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs b/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs
new file mode 100644
index 000000000..c4ad698ba
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs
@@ -0,0 +1,145 @@
+use crate::ty::context::TyCtxt;
+use crate::ty::{DefId, DefIdTree};
+use rustc_span::def_id::CRATE_DEF_ID;
+use smallvec::SmallVec;
+use std::mem;
+
+use DefIdForest::*;
+
+/// Represents a forest of `DefId`s closed under the ancestor relation. That is,
+/// if a `DefId` representing a module is contained in the forest then all
+/// `DefId`s defined in that module or submodules are also implicitly contained
+/// in the forest.
+///
+/// This is used to represent a set of modules in which a type is visibly
+/// uninhabited.
+///
+/// We store the minimal set of `DefId`s required to represent the whole set. If A and B are
+/// `DefId`s in the `DefIdForest`, and A is a parent of B, then only A will be stored. When this is
+/// used with `type_uninhabited_from`, there will very rarely be more than one `DefId` stored.
+#[derive(Copy, Clone, HashStable, Debug)]
+pub enum DefIdForest<'a> {
+ Empty,
+ Single(DefId),
+ /// This variant is very rare.
+ /// Invariant: >1 elements
+ Multiple(&'a [DefId]),
+}
+
+/// Tests whether a slice of roots contains a given DefId.
+#[inline]
+fn slice_contains<'tcx>(tcx: TyCtxt<'tcx>, slice: &[DefId], id: DefId) -> bool {
+ slice.iter().any(|root_id| tcx.is_descendant_of(id, *root_id))
+}
+
+impl<'tcx> DefIdForest<'tcx> {
+ /// Creates an empty forest.
+ pub fn empty() -> DefIdForest<'tcx> {
+ DefIdForest::Empty
+ }
+
+ /// Creates a forest consisting of a single tree representing the entire
+ /// crate.
+ #[inline]
+ pub fn full() -> DefIdForest<'tcx> {
+ DefIdForest::from_id(CRATE_DEF_ID.to_def_id())
+ }
+
+ /// Creates a forest containing a `DefId` and all its descendants.
+ pub fn from_id(id: DefId) -> DefIdForest<'tcx> {
+ DefIdForest::Single(id)
+ }
+
+ fn as_slice(&self) -> &[DefId] {
+ match self {
+ Empty => &[],
+ Single(id) => std::slice::from_ref(id),
+ Multiple(root_ids) => root_ids,
+ }
+ }
+
+ // Only allocates in the rare `Multiple` case.
+ fn from_vec(tcx: TyCtxt<'tcx>, root_ids: SmallVec<[DefId; 1]>) -> DefIdForest<'tcx> {
+ match &root_ids[..] {
+ [] => Empty,
+ [id] => Single(*id),
+ _ => DefIdForest::Multiple(tcx.arena.alloc_from_iter(root_ids)),
+ }
+ }
+
+ /// Tests whether the forest is empty.
+ pub fn is_empty(&self) -> bool {
+ match self {
+ Empty => true,
+ Single(..) | Multiple(..) => false,
+ }
+ }
+
+ /// Iterate over the set of roots.
+ fn iter(&self) -> impl Iterator<Item = DefId> + '_ {
+ self.as_slice().iter().copied()
+ }
+
+ /// Tests whether the forest contains a given DefId.
+ pub fn contains(&self, tcx: TyCtxt<'tcx>, id: DefId) -> bool {
+ slice_contains(tcx, self.as_slice(), id)
+ }
+
+ /// Calculate the intersection of a collection of forests.
+ pub fn intersection<I>(tcx: TyCtxt<'tcx>, iter: I) -> DefIdForest<'tcx>
+ where
+ I: IntoIterator<Item = DefIdForest<'tcx>>,
+ {
+ let mut iter = iter.into_iter();
+ let mut ret: SmallVec<[_; 1]> = if let Some(first) = iter.next() {
+ SmallVec::from_slice(first.as_slice())
+ } else {
+ return DefIdForest::full();
+ };
+
+ let mut next_ret: SmallVec<[_; 1]> = SmallVec::new();
+ for next_forest in iter {
+ // No need to continue if the intersection is already empty.
+ if ret.is_empty() || next_forest.is_empty() {
+ return DefIdForest::empty();
+ }
+
+ // We keep the elements in `ret` that are also in `next_forest`.
+ next_ret.extend(ret.iter().copied().filter(|&id| next_forest.contains(tcx, id)));
+ // We keep the elements in `next_forest` that are also in `ret`.
+ next_ret.extend(next_forest.iter().filter(|&id| slice_contains(tcx, &ret, id)));
+
+ mem::swap(&mut next_ret, &mut ret);
+ next_ret.clear();
+ }
+ DefIdForest::from_vec(tcx, ret)
+ }
+
+ /// Calculate the union of a collection of forests.
+ pub fn union<I>(tcx: TyCtxt<'tcx>, iter: I) -> DefIdForest<'tcx>
+ where
+ I: IntoIterator<Item = DefIdForest<'tcx>>,
+ {
+ let mut ret: SmallVec<[_; 1]> = SmallVec::new();
+ let mut next_ret: SmallVec<[_; 1]> = SmallVec::new();
+ for next_forest in iter {
+ // Union with the empty set is a no-op.
+ if next_forest.is_empty() {
+ continue;
+ }
+
+ // We add everything in `ret` that is not in `next_forest`.
+ next_ret.extend(ret.iter().copied().filter(|&id| !next_forest.contains(tcx, id)));
+ // We add everything in `next_forest` that we haven't added yet.
+ for id in next_forest.iter() {
+ if !slice_contains(tcx, &next_ret, id) {
+ next_ret.push(id);
+ }
+ }
+
+ mem::swap(&mut next_ret, &mut ret);
+ next_ret.clear();
+ }
+ DefIdForest::from_vec(tcx, ret)
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
new file mode 100644
index 000000000..3d22f5a04
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
@@ -0,0 +1,234 @@
+pub use self::def_id_forest::DefIdForest;
+
+use crate::ty;
+use crate::ty::context::TyCtxt;
+use crate::ty::{AdtDef, FieldDef, Ty, VariantDef};
+use crate::ty::{AdtKind, Visibility};
+use crate::ty::{DefId, SubstsRef};
+
+use rustc_type_ir::sty::TyKind::*;
+
+mod def_id_forest;
+
+// The methods in this module calculate `DefIdForest`s of modules in which an
+// `AdtDef`/`VariantDef`/`FieldDef` is visibly uninhabited.
+//
+// # Example
+// ```rust
+// enum Void {}
+// mod a {
+// pub mod b {
+// pub struct SecretlyUninhabited {
+// _priv: !,
+// }
+// }
+// }
+//
+// mod c {
+// pub struct AlsoSecretlyUninhabited {
+// _priv: Void,
+// }
+// mod d {
+// }
+// }
+//
+// struct Foo {
+// x: a::b::SecretlyUninhabited,
+// y: c::AlsoSecretlyUninhabited,
+// }
+// ```
+// In this code, the type `Foo` will only be visibly uninhabited inside the
+// modules `b`, `c` and `d`. Calling `uninhabited_from` on `Foo` or its `AdtDef` will
+// return the forest of modules {`b`, `c`->`d`} (represented in a `DefIdForest` by the
+// set {`b`, `c`}).
+//
+// We need this information for pattern-matching on `Foo` or types that contain
+// `Foo`.
+//
+// # Example
+// ```rust
+// let foo_result: Result<T, Foo> = ... ;
+// let Ok(t) = foo_result;
+// ```
+// This code should only compile in modules where the uninhabitedness of `Foo` is
+// visible.
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Checks whether a type is visibly uninhabited from a particular module.
+ ///
+ /// # Example
+ /// ```
+ /// #![feature(never_type)]
+ /// # fn main() {}
+ /// enum Void {}
+ /// mod a {
+ /// pub mod b {
+ /// pub struct SecretlyUninhabited {
+ /// _priv: !,
+ /// }
+ /// }
+ /// }
+ ///
+ /// mod c {
+ /// use super::Void;
+ /// pub struct AlsoSecretlyUninhabited {
+ /// _priv: Void,
+ /// }
+ /// mod d {
+ /// }
+ /// }
+ ///
+ /// struct Foo {
+ /// x: a::b::SecretlyUninhabited,
+ /// y: c::AlsoSecretlyUninhabited,
+ /// }
+ /// ```
+ /// In this code, the type `Foo` will only be visibly uninhabited inside the
+ /// modules b, c and d. This effects pattern-matching on `Foo` or types that
+ /// contain `Foo`.
+ ///
+ /// # Example
+ /// ```ignore (illustrative)
+ /// let foo_result: Result<T, Foo> = ... ;
+ /// let Ok(t) = foo_result;
+ /// ```
+ /// This code should only compile in modules where the uninhabitedness of Foo is
+ /// visible.
+ pub fn is_ty_uninhabited_from(
+ self,
+ module: DefId,
+ ty: Ty<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> bool {
+ // To check whether this type is uninhabited at all (not just from the
+ // given node), you could check whether the forest is empty.
+ // ```
+ // forest.is_empty()
+ // ```
+ ty.uninhabited_from(self, param_env).contains(self, module)
+ }
+}
+
+impl<'tcx> AdtDef<'tcx> {
+ /// Calculates the forest of `DefId`s from which this ADT is visibly uninhabited.
+ fn uninhabited_from(
+ self,
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> DefIdForest<'tcx> {
+ // Non-exhaustive ADTs from other crates are always considered inhabited.
+ if self.is_variant_list_non_exhaustive() && !self.did().is_local() {
+ DefIdForest::empty()
+ } else {
+ DefIdForest::intersection(
+ tcx,
+ self.variants()
+ .iter()
+ .map(|v| v.uninhabited_from(tcx, substs, self.adt_kind(), param_env)),
+ )
+ }
+ }
+}
+
+impl<'tcx> VariantDef {
+ /// Calculates the forest of `DefId`s from which this variant is visibly uninhabited.
+ pub fn uninhabited_from(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ adt_kind: AdtKind,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> DefIdForest<'tcx> {
+ let is_enum = match adt_kind {
+ // For now, `union`s are never considered uninhabited.
+ // The precise semantics of inhabitedness with respect to unions is currently undecided.
+ AdtKind::Union => return DefIdForest::empty(),
+ AdtKind::Enum => true,
+ AdtKind::Struct => false,
+ };
+ // Non-exhaustive variants from other crates are always considered inhabited.
+ if self.is_field_list_non_exhaustive() && !self.def_id.is_local() {
+ DefIdForest::empty()
+ } else {
+ DefIdForest::union(
+ tcx,
+ self.fields.iter().map(|f| f.uninhabited_from(tcx, substs, is_enum, param_env)),
+ )
+ }
+ }
+}
+
+impl<'tcx> FieldDef {
+ /// Calculates the forest of `DefId`s from which this field is visibly uninhabited.
+ fn uninhabited_from(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ substs: SubstsRef<'tcx>,
+ is_enum: bool,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> DefIdForest<'tcx> {
+ let data_uninhabitedness = move || self.ty(tcx, substs).uninhabited_from(tcx, param_env);
+ // FIXME(canndrew): Currently enum fields are (incorrectly) stored with
+ // `Visibility::Invisible` so we need to override `self.vis` if we're
+ // dealing with an enum.
+ if is_enum {
+ data_uninhabitedness()
+ } else {
+ match self.vis {
+ Visibility::Invisible => DefIdForest::empty(),
+ Visibility::Restricted(from) => {
+ let forest = DefIdForest::from_id(from);
+ let iter = Some(forest).into_iter().chain(Some(data_uninhabitedness()));
+ DefIdForest::intersection(tcx, iter)
+ }
+ Visibility::Public => data_uninhabitedness(),
+ }
+ }
+ }
+}
+
+impl<'tcx> Ty<'tcx> {
+ /// Calculates the forest of `DefId`s from which this type is visibly uninhabited.
+ fn uninhabited_from(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> DefIdForest<'tcx> {
+ tcx.type_uninhabited_from(param_env.and(self))
+ }
+}
+
+// Query provider for `type_uninhabited_from`.
+pub(crate) fn type_uninhabited_from<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+) -> DefIdForest<'tcx> {
+ let ty = key.value;
+ let param_env = key.param_env;
+ match *ty.kind() {
+ Adt(def, substs) => def.uninhabited_from(tcx, substs, param_env),
+
+ Never => DefIdForest::full(),
+
+ Tuple(ref tys) => {
+ DefIdForest::union(tcx, tys.iter().map(|ty| ty.uninhabited_from(tcx, param_env)))
+ }
+
+ Array(ty, len) => match len.try_eval_usize(tcx, param_env) {
+ Some(0) | None => DefIdForest::empty(),
+ // If the array is definitely non-empty, it's uninhabited if
+ // the type of its elements is uninhabited.
+ Some(1..) => ty.uninhabited_from(tcx, param_env),
+ },
+
+ // References to uninitialised memory are valid for any type, including
+ // uninhabited types, in unsafe code, so we treat all references as
+ // inhabited.
+ // The precise semantics of inhabitedness with respect to references is currently
+ // undecided.
+ Ref(..) => DefIdForest::empty(),
+
+ _ => DefIdForest::empty(),
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs
new file mode 100644
index 000000000..53218225d
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/instance.rs
@@ -0,0 +1,746 @@
+use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use crate::ty::print::{FmtPrinter, Printer};
+use crate::ty::subst::{InternalSubsts, Subst};
+use crate::ty::{
+ self, EarlyBinder, SubstsRef, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable, TypeVisitable,
+};
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir::def::Namespace;
+use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_hir::lang_items::LangItem;
+use rustc_macros::HashStable;
+use rustc_middle::ty::normalize_erasing_regions::NormalizationError;
+use rustc_span::Symbol;
+
+use std::fmt;
+
+/// A monomorphized `InstanceDef`.
+///
+/// Monomorphization happens on-the-fly and no monomorphized MIR is ever created. Instead, this type
+/// simply couples a potentially generic `InstanceDef` with some substs, and codegen and const eval
+/// will do all required substitution as they run.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, Lift)]
+pub struct Instance<'tcx> {
+ pub def: InstanceDef<'tcx>,
+ pub substs: SubstsRef<'tcx>,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)]
+pub enum InstanceDef<'tcx> {
+ /// A user-defined callable item.
+ ///
+ /// This includes:
+ /// - `fn` items
+ /// - closures
+ /// - generators
+ Item(ty::WithOptConstParam<DefId>),
+
+ /// An intrinsic `fn` item (with `"rust-intrinsic"` or `"platform-intrinsic"` ABI).
+ ///
+ /// Alongside `Virtual`, this is the only `InstanceDef` that does not have its own callable MIR.
+ /// Instead, codegen and const eval "magically" evaluate calls to intrinsics purely in the
+ /// caller.
+ Intrinsic(DefId),
+
+ /// `<T as Trait>::method` where `method` receives unsizeable `self: Self` (part of the
+ /// `unsized_locals` feature).
+ ///
+ /// The generated shim will take `Self` via `*mut Self` - conceptually this is `&owned Self` -
+ /// and dereference the argument to call the original function.
+ VTableShim(DefId),
+
+ /// `fn()` pointer where the function itself cannot be turned into a pointer.
+ ///
+ /// One example is `<dyn Trait as Trait>::fn`, where the shim contains
+ /// a virtual call, which codegen supports only via a direct call to the
+ /// `<dyn Trait as Trait>::fn` instance (an `InstanceDef::Virtual`).
+ ///
+ /// Another example is functions annotated with `#[track_caller]`, which
+ /// must have their implicit caller location argument populated for a call.
+ /// Because this is a required part of the function's ABI but can't be tracked
+ /// as a property of the function pointer, we use a single "caller location"
+ /// (the definition of the function itself).
+ ReifyShim(DefId),
+
+ /// `<fn() as FnTrait>::call_*` (generated `FnTrait` implementation for `fn()` pointers).
+ ///
+ /// `DefId` is `FnTrait::call_*`.
+ FnPtrShim(DefId, Ty<'tcx>),
+
+ /// Dynamic dispatch to `<dyn Trait as Trait>::fn`.
+ ///
+ /// This `InstanceDef` does not have callable MIR. Calls to `Virtual` instances must be
+ /// codegen'd as virtual calls through the vtable.
+ ///
+ /// If this is reified to a `fn` pointer, a `ReifyShim` is used (see `ReifyShim` above for more
+ /// details on that).
+ Virtual(DefId, usize),
+
+ /// `<[FnMut closure] as FnOnce>::call_once`.
+ ///
+ /// The `DefId` is the ID of the `call_once` method in `FnOnce`.
+ ClosureOnceShim { call_once: DefId, track_caller: bool },
+
+ /// `core::ptr::drop_in_place::<T>`.
+ ///
+ /// The `DefId` is for `core::ptr::drop_in_place`.
+ /// The `Option<Ty<'tcx>>` is either `Some(T)`, or `None` for empty drop
+ /// glue.
+ DropGlue(DefId, Option<Ty<'tcx>>),
+
+ /// Compiler-generated `<T as Clone>::clone` implementation.
+ ///
+ /// For all types that automatically implement `Copy`, a trivial `Clone` impl is provided too.
+ /// Additionally, arrays, tuples, and closures get a `Clone` shim even if they aren't `Copy`.
+ ///
+ /// The `DefId` is for `Clone::clone`, the `Ty` is the type `T` with the builtin `Clone` impl.
+ CloneShim(DefId, Ty<'tcx>),
+}
+
+impl<'tcx> Instance<'tcx> {
+ /// Returns the `Ty` corresponding to this `Instance`, with generic substitutions applied and
+ /// lifetimes erased, allowing a `ParamEnv` to be specified for use during normalization.
+ pub fn ty(&self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Ty<'tcx> {
+ let ty = tcx.type_of(self.def.def_id());
+ tcx.subst_and_normalize_erasing_regions(self.substs, param_env, ty)
+ }
+
+ /// Finds a crate that contains a monomorphization of this instance that
+ /// can be linked to from the local crate. A return value of `None` means
+ /// no upstream crate provides such an exported monomorphization.
+ ///
+ /// This method already takes into account the global `-Zshare-generics`
+ /// setting, always returning `None` if `share-generics` is off.
+ pub fn upstream_monomorphization(&self, tcx: TyCtxt<'tcx>) -> Option<CrateNum> {
+ // If we are not in share generics mode, we don't link to upstream
+ // monomorphizations but always instantiate our own internal versions
+ // instead.
+ if !tcx.sess.opts.share_generics() {
+ return None;
+ }
+
+ // If this is an item that is defined in the local crate, no upstream
+ // crate can know about it/provide a monomorphization.
+ if self.def_id().is_local() {
+ return None;
+ }
+
+ // If this a non-generic instance, it cannot be a shared monomorphization.
+ self.substs.non_erasable_generics().next()?;
+
+ match self.def {
+ InstanceDef::Item(def) => tcx
+ .upstream_monomorphizations_for(def.did)
+ .and_then(|monos| monos.get(&self.substs).cloned()),
+ InstanceDef::DropGlue(_, Some(_)) => tcx.upstream_drop_glue_for(self.substs),
+ _ => None,
+ }
+ }
+}
+
+impl<'tcx> InstanceDef<'tcx> {
+ #[inline]
+ pub fn def_id(self) -> DefId {
+ match self {
+ InstanceDef::Item(def) => def.did,
+ InstanceDef::VTableShim(def_id)
+ | InstanceDef::ReifyShim(def_id)
+ | InstanceDef::FnPtrShim(def_id, _)
+ | InstanceDef::Virtual(def_id, _)
+ | InstanceDef::Intrinsic(def_id)
+ | InstanceDef::ClosureOnceShim { call_once: def_id, track_caller: _ }
+ | InstanceDef::DropGlue(def_id, _)
+ | InstanceDef::CloneShim(def_id, _) => def_id,
+ }
+ }
+
+ /// Returns the `DefId` of instances which might not require codegen locally.
+ pub fn def_id_if_not_guaranteed_local_codegen(self) -> Option<DefId> {
+ match self {
+ ty::InstanceDef::Item(def) => Some(def.did),
+ ty::InstanceDef::DropGlue(def_id, Some(_)) => Some(def_id),
+ InstanceDef::VTableShim(..)
+ | InstanceDef::ReifyShim(..)
+ | InstanceDef::FnPtrShim(..)
+ | InstanceDef::Virtual(..)
+ | InstanceDef::Intrinsic(..)
+ | InstanceDef::ClosureOnceShim { .. }
+ | InstanceDef::DropGlue(..)
+ | InstanceDef::CloneShim(..) => None,
+ }
+ }
+
+ #[inline]
+ pub fn with_opt_param(self) -> ty::WithOptConstParam<DefId> {
+ match self {
+ InstanceDef::Item(def) => def,
+ InstanceDef::VTableShim(def_id)
+ | InstanceDef::ReifyShim(def_id)
+ | InstanceDef::FnPtrShim(def_id, _)
+ | InstanceDef::Virtual(def_id, _)
+ | InstanceDef::Intrinsic(def_id)
+ | InstanceDef::ClosureOnceShim { call_once: def_id, track_caller: _ }
+ | InstanceDef::DropGlue(def_id, _)
+ | InstanceDef::CloneShim(def_id, _) => ty::WithOptConstParam::unknown(def_id),
+ }
+ }
+
+ #[inline]
+ pub fn get_attrs(&self, tcx: TyCtxt<'tcx>, attr: Symbol) -> ty::Attributes<'tcx> {
+ tcx.get_attrs(self.def_id(), attr)
+ }
+
+ /// Returns `true` if the LLVM version of this instance is unconditionally
+ /// marked with `inline`. This implies that a copy of this instance is
+ /// generated in every codegen unit.
+ /// Note that this is only a hint. See the documentation for
+ /// `generates_cgu_internal_copy` for more information.
+ pub fn requires_inline(&self, tcx: TyCtxt<'tcx>) -> bool {
+ use rustc_hir::definitions::DefPathData;
+ let def_id = match *self {
+ ty::InstanceDef::Item(def) => def.did,
+ ty::InstanceDef::DropGlue(_, Some(_)) => return false,
+ _ => return true,
+ };
+ matches!(
+ tcx.def_key(def_id).disambiguated_data.data,
+ DefPathData::Ctor | DefPathData::ClosureExpr
+ )
+ }
+
+ /// Returns `true` if the machine code for this instance is instantiated in
+ /// each codegen unit that references it.
+ /// Note that this is only a hint! The compiler can globally decide to *not*
+ /// do this in order to speed up compilation. CGU-internal copies are
+ /// only exist to enable inlining. If inlining is not performed (e.g. at
+ /// `-Copt-level=0`) then the time for generating them is wasted and it's
+ /// better to create a single copy with external linkage.
+ pub fn generates_cgu_internal_copy(&self, tcx: TyCtxt<'tcx>) -> bool {
+ if self.requires_inline(tcx) {
+ return true;
+ }
+ if let ty::InstanceDef::DropGlue(.., Some(ty)) = *self {
+ // Drop glue generally wants to be instantiated at every codegen
+ // unit, but without an #[inline] hint. We should make this
+ // available to normal end-users.
+ if tcx.sess.opts.incremental.is_none() {
+ return true;
+ }
+ // When compiling with incremental, we can generate a *lot* of
+ // codegen units. Including drop glue into all of them has a
+ // considerable compile time cost.
+ //
+ // We include enums without destructors to allow, say, optimizing
+ // drops of `Option::None` before LTO. We also respect the intent of
+ // `#[inline]` on `Drop::drop` implementations.
+ return ty.ty_adt_def().map_or(true, |adt_def| {
+ adt_def.destructor(tcx).map_or_else(
+ || adt_def.is_enum(),
+ |dtor| tcx.codegen_fn_attrs(dtor.did).requests_inline(),
+ )
+ });
+ }
+ tcx.codegen_fn_attrs(self.def_id()).requests_inline()
+ }
+
+ pub fn requires_caller_location(&self, tcx: TyCtxt<'_>) -> bool {
+ match *self {
+ InstanceDef::Item(ty::WithOptConstParam { did: def_id, .. })
+ | InstanceDef::Virtual(def_id, _) => {
+ tcx.body_codegen_attrs(def_id).flags.contains(CodegenFnAttrFlags::TRACK_CALLER)
+ }
+ InstanceDef::ClosureOnceShim { call_once: _, track_caller } => track_caller,
+ _ => false,
+ }
+ }
+
+ /// Returns `true` when the MIR body associated with this instance should be monomorphized
+ /// by its users (e.g. codegen or miri) by substituting the `substs` from `Instance` (see
+ /// `Instance::substs_for_mir_body`).
+ ///
+ /// Otherwise, returns `false` only for some kinds of shims where the construction of the MIR
+ /// body should perform necessary substitutions.
+ pub fn has_polymorphic_mir_body(&self) -> bool {
+ match *self {
+ InstanceDef::CloneShim(..)
+ | InstanceDef::FnPtrShim(..)
+ | InstanceDef::DropGlue(_, Some(_)) => false,
+ InstanceDef::ClosureOnceShim { .. }
+ | InstanceDef::DropGlue(..)
+ | InstanceDef::Item(_)
+ | InstanceDef::Intrinsic(..)
+ | InstanceDef::ReifyShim(..)
+ | InstanceDef::Virtual(..)
+ | InstanceDef::VTableShim(..) => true,
+ }
+ }
+}
+
+impl<'tcx> fmt::Display for Instance<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ty::tls::with(|tcx| {
+ let substs = tcx.lift(self.substs).expect("could not lift for printing");
+ let s = FmtPrinter::new(tcx, Namespace::ValueNS)
+ .print_def_path(self.def_id(), substs)?
+ .into_buffer();
+ f.write_str(&s)
+ })?;
+
+ match self.def {
+ InstanceDef::Item(_) => Ok(()),
+ InstanceDef::VTableShim(_) => write!(f, " - shim(vtable)"),
+ InstanceDef::ReifyShim(_) => write!(f, " - shim(reify)"),
+ InstanceDef::Intrinsic(_) => write!(f, " - intrinsic"),
+ InstanceDef::Virtual(_, num) => write!(f, " - virtual#{}", num),
+ InstanceDef::FnPtrShim(_, ty) => write!(f, " - shim({})", ty),
+ InstanceDef::ClosureOnceShim { .. } => write!(f, " - shim"),
+ InstanceDef::DropGlue(_, None) => write!(f, " - shim(None)"),
+ InstanceDef::DropGlue(_, Some(ty)) => write!(f, " - shim(Some({}))", ty),
+ InstanceDef::CloneShim(_, ty) => write!(f, " - shim({})", ty),
+ }
+ }
+}
+
+impl<'tcx> Instance<'tcx> {
+ pub fn new(def_id: DefId, substs: SubstsRef<'tcx>) -> Instance<'tcx> {
+ assert!(
+ !substs.has_escaping_bound_vars(),
+ "substs of instance {:?} not normalized for codegen: {:?}",
+ def_id,
+ substs
+ );
+ Instance { def: InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)), substs }
+ }
+
+ pub fn mono(tcx: TyCtxt<'tcx>, def_id: DefId) -> Instance<'tcx> {
+ let substs = InternalSubsts::for_item(tcx, def_id, |param, _| match param.kind {
+ ty::GenericParamDefKind::Lifetime => tcx.lifetimes.re_erased.into(),
+ ty::GenericParamDefKind::Type { .. } => {
+ bug!("Instance::mono: {:?} has type parameters", def_id)
+ }
+ ty::GenericParamDefKind::Const { .. } => {
+ bug!("Instance::mono: {:?} has const parameters", def_id)
+ }
+ });
+
+ Instance::new(def_id, substs)
+ }
+
+ #[inline]
+ pub fn def_id(&self) -> DefId {
+ self.def.def_id()
+ }
+
+ /// Resolves a `(def_id, substs)` pair to an (optional) instance -- most commonly,
+ /// this is used to find the precise code that will run for a trait method invocation,
+ /// if known.
+ ///
+ /// Returns `Ok(None)` if we cannot resolve `Instance` to a specific instance.
+ /// For example, in a context like this,
+ ///
+ /// ```ignore (illustrative)
+ /// fn foo<T: Debug>(t: T) { ... }
+ /// ```
+ ///
+ /// trying to resolve `Debug::fmt` applied to `T` will yield `Ok(None)`, because we do not
+ /// know what code ought to run. (Note that this setting is also affected by the
+ /// `RevealMode` in the parameter environment.)
+ ///
+ /// Presuming that coherence and type-check have succeeded, if this method is invoked
+ /// in a monomorphic context (i.e., like during codegen), then it is guaranteed to return
+ /// `Ok(Some(instance))`.
+ ///
+ /// Returns `Err(ErrorGuaranteed)` when the `Instance` resolution process
+ /// couldn't complete due to errors elsewhere - this is distinct
+ /// from `Ok(None)` to avoid misleading diagnostics when an error
+ /// has already been/will be emitted, for the original cause
+ pub fn resolve(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> Result<Option<Instance<'tcx>>, ErrorGuaranteed> {
+ Instance::resolve_opt_const_arg(
+ tcx,
+ param_env,
+ ty::WithOptConstParam::unknown(def_id),
+ substs,
+ )
+ }
+
+ // This should be kept up to date with `resolve`.
+ pub fn resolve_opt_const_arg(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ def: ty::WithOptConstParam<DefId>,
+ substs: SubstsRef<'tcx>,
+ ) -> Result<Option<Instance<'tcx>>, ErrorGuaranteed> {
+ // All regions in the result of this query are erased, so it's
+ // fine to erase all of the input regions.
+
+ // HACK(eddyb) erase regions in `substs` first, so that `param_env.and(...)`
+ // below is more likely to ignore the bounds in scope (e.g. if the only
+ // generic parameters mentioned by `substs` were lifetime ones).
+ let substs = tcx.erase_regions(substs);
+
+ // FIXME(eddyb) should this always use `param_env.with_reveal_all()`?
+ if let Some((did, param_did)) = def.as_const_arg() {
+ tcx.resolve_instance_of_const_arg(
+ tcx.erase_regions(param_env.and((did, param_did, substs))),
+ )
+ } else {
+ tcx.resolve_instance(tcx.erase_regions(param_env.and((def.did, substs))))
+ }
+ }
+
+ pub fn resolve_for_fn_ptr(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> Option<Instance<'tcx>> {
+ debug!("resolve(def_id={:?}, substs={:?})", def_id, substs);
+ // Use either `resolve_closure` or `resolve_for_vtable`
+ assert!(!tcx.is_closure(def_id), "Called `resolve_for_fn_ptr` on closure: {:?}", def_id);
+ Instance::resolve(tcx, param_env, def_id, substs).ok().flatten().map(|mut resolved| {
+ match resolved.def {
+ InstanceDef::Item(def) if resolved.def.requires_caller_location(tcx) => {
+ debug!(" => fn pointer created for function with #[track_caller]");
+ resolved.def = InstanceDef::ReifyShim(def.did);
+ }
+ InstanceDef::Virtual(def_id, _) => {
+ debug!(" => fn pointer created for virtual call");
+ resolved.def = InstanceDef::ReifyShim(def_id);
+ }
+ _ => {}
+ }
+
+ resolved
+ })
+ }
+
+ pub fn resolve_for_vtable(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> Option<Instance<'tcx>> {
+ debug!("resolve_for_vtable(def_id={:?}, substs={:?})", def_id, substs);
+ let fn_sig = tcx.fn_sig(def_id);
+ let is_vtable_shim = !fn_sig.inputs().skip_binder().is_empty()
+ && fn_sig.input(0).skip_binder().is_param(0)
+ && tcx.generics_of(def_id).has_self;
+ if is_vtable_shim {
+ debug!(" => associated item with unsizeable self: Self");
+ Some(Instance { def: InstanceDef::VTableShim(def_id), substs })
+ } else {
+ Instance::resolve(tcx, param_env, def_id, substs).ok().flatten().map(|mut resolved| {
+ match resolved.def {
+ InstanceDef::Item(def) => {
+ // We need to generate a shim when we cannot guarantee that
+ // the caller of a trait object method will be aware of
+ // `#[track_caller]` - this ensures that the caller
+ // and callee ABI will always match.
+ //
+ // The shim is generated when all of these conditions are met:
+ //
+ // 1) The underlying method expects a caller location parameter
+ // in the ABI
+ if resolved.def.requires_caller_location(tcx)
+ // 2) The caller location parameter comes from having `#[track_caller]`
+ // on the implementation, and *not* on the trait method.
+ && !tcx.should_inherit_track_caller(def.did)
+ // If the method implementation comes from the trait definition itself
+ // (e.g. `trait Foo { #[track_caller] my_fn() { /* impl */ } }`),
+ // then we don't need to generate a shim. This check is needed because
+ // `should_inherit_track_caller` returns `false` if our method
+ // implementation comes from the trait block, and not an impl block
+ && !matches!(
+ tcx.opt_associated_item(def.did),
+ Some(ty::AssocItem {
+ container: ty::AssocItemContainer::TraitContainer,
+ ..
+ })
+ )
+ {
+ if tcx.is_closure(def.did) {
+ debug!(" => vtable fn pointer created for closure with #[track_caller]: {:?} for method {:?} {:?}",
+ def.did, def_id, substs);
+
+ // Create a shim for the `FnOnce/FnMut/Fn` method we are calling
+ // - unlike functions, invoking a closure always goes through a
+ // trait.
+ resolved = Instance { def: InstanceDef::ReifyShim(def_id), substs };
+ } else {
+ debug!(
+ " => vtable fn pointer created for function with #[track_caller]: {:?}", def.did
+ );
+ resolved.def = InstanceDef::ReifyShim(def.did);
+ }
+ }
+ }
+ InstanceDef::Virtual(def_id, _) => {
+ debug!(" => vtable fn pointer created for virtual call");
+ resolved.def = InstanceDef::ReifyShim(def_id);
+ }
+ _ => {}
+ }
+
+ resolved
+ })
+ }
+ }
+
+ pub fn resolve_closure(
+ tcx: TyCtxt<'tcx>,
+ def_id: DefId,
+ substs: ty::SubstsRef<'tcx>,
+ requested_kind: ty::ClosureKind,
+ ) -> Option<Instance<'tcx>> {
+ let actual_kind = substs.as_closure().kind();
+
+ match needs_fn_once_adapter_shim(actual_kind, requested_kind) {
+ Ok(true) => Instance::fn_once_adapter_instance(tcx, def_id, substs),
+ _ => Some(Instance::new(def_id, substs)),
+ }
+ }
+
+ pub fn resolve_drop_in_place(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> ty::Instance<'tcx> {
+ let def_id = tcx.require_lang_item(LangItem::DropInPlace, None);
+ let substs = tcx.intern_substs(&[ty.into()]);
+ Instance::resolve(tcx, ty::ParamEnv::reveal_all(), def_id, substs).unwrap().unwrap()
+ }
+
+ pub fn fn_once_adapter_instance(
+ tcx: TyCtxt<'tcx>,
+ closure_did: DefId,
+ substs: ty::SubstsRef<'tcx>,
+ ) -> Option<Instance<'tcx>> {
+ debug!("fn_once_adapter_shim({:?}, {:?})", closure_did, substs);
+ let fn_once = tcx.require_lang_item(LangItem::FnOnce, None);
+ let call_once = tcx
+ .associated_items(fn_once)
+ .in_definition_order()
+ .find(|it| it.kind == ty::AssocKind::Fn)
+ .unwrap()
+ .def_id;
+ let track_caller =
+ tcx.codegen_fn_attrs(closure_did).flags.contains(CodegenFnAttrFlags::TRACK_CALLER);
+ let def = ty::InstanceDef::ClosureOnceShim { call_once, track_caller };
+
+ let self_ty = tcx.mk_closure(closure_did, substs);
+
+ let sig = substs.as_closure().sig();
+ let sig =
+ tcx.try_normalize_erasing_late_bound_regions(ty::ParamEnv::reveal_all(), sig).ok()?;
+ assert_eq!(sig.inputs().len(), 1);
+ let substs = tcx.mk_substs_trait(self_ty, &[sig.inputs()[0].into()]);
+
+ debug!("fn_once_adapter_shim: self_ty={:?} sig={:?}", self_ty, sig);
+ Some(Instance { def, substs })
+ }
+
+ /// Depending on the kind of `InstanceDef`, the MIR body associated with an
+ /// instance is expressed in terms of the generic parameters of `self.def_id()`, and in other
+ /// cases the MIR body is expressed in terms of the types found in the substitution array.
+ /// In the former case, we want to substitute those generic types and replace them with the
+ /// values from the substs when monomorphizing the function body. But in the latter case, we
+ /// don't want to do that substitution, since it has already been done effectively.
+ ///
+ /// This function returns `Some(substs)` in the former case and `None` otherwise -- i.e., if
+ /// this function returns `None`, then the MIR body does not require substitution during
+ /// codegen.
+ fn substs_for_mir_body(&self) -> Option<SubstsRef<'tcx>> {
+ if self.def.has_polymorphic_mir_body() { Some(self.substs) } else { None }
+ }
+
+ pub fn subst_mir<T>(&self, tcx: TyCtxt<'tcx>, v: &T) -> T
+ where
+ T: TypeFoldable<'tcx> + Copy,
+ {
+ if let Some(substs) = self.substs_for_mir_body() {
+ EarlyBinder(*v).subst(tcx, substs)
+ } else {
+ *v
+ }
+ }
+
+ #[inline(always)]
+ pub fn subst_mir_and_normalize_erasing_regions<T>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ v: T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx> + Clone,
+ {
+ if let Some(substs) = self.substs_for_mir_body() {
+ tcx.subst_and_normalize_erasing_regions(substs, param_env, v)
+ } else {
+ tcx.normalize_erasing_regions(param_env, v)
+ }
+ }
+
+ #[inline(always)]
+ pub fn try_subst_mir_and_normalize_erasing_regions<T>(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ v: T,
+ ) -> Result<T, NormalizationError<'tcx>>
+ where
+ T: TypeFoldable<'tcx> + Clone,
+ {
+ if let Some(substs) = self.substs_for_mir_body() {
+ tcx.try_subst_and_normalize_erasing_regions(substs, param_env, v)
+ } else {
+ tcx.try_normalize_erasing_regions(param_env, v)
+ }
+ }
+
+ /// Returns a new `Instance` where generic parameters in `instance.substs` are replaced by
+ /// identity parameters if they are determined to be unused in `instance.def`.
+ pub fn polymorphize(self, tcx: TyCtxt<'tcx>) -> Self {
+ debug!("polymorphize: running polymorphization analysis");
+ if !tcx.sess.opts.unstable_opts.polymorphize {
+ return self;
+ }
+
+ let polymorphized_substs = polymorphize(tcx, self.def, self.substs);
+ debug!("polymorphize: self={:?} polymorphized_substs={:?}", self, polymorphized_substs);
+ Self { def: self.def, substs: polymorphized_substs }
+ }
+}
+
+fn polymorphize<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ instance: ty::InstanceDef<'tcx>,
+ substs: SubstsRef<'tcx>,
+) -> SubstsRef<'tcx> {
+ debug!("polymorphize({:?}, {:?})", instance, substs);
+ let unused = tcx.unused_generic_params(instance);
+ debug!("polymorphize: unused={:?}", unused);
+
+ // If this is a closure or generator then we need to handle the case where another closure
+ // from the function is captured as an upvar and hasn't been polymorphized. In this case,
+ // the unpolymorphized upvar closure would result in a polymorphized closure producing
+ // multiple mono items (and eventually symbol clashes).
+ let def_id = instance.def_id();
+ let upvars_ty = if tcx.is_closure(def_id) {
+ Some(substs.as_closure().tupled_upvars_ty())
+ } else if tcx.type_of(def_id).is_generator() {
+ Some(substs.as_generator().tupled_upvars_ty())
+ } else {
+ None
+ };
+ let has_upvars = upvars_ty.map_or(false, |ty| !ty.tuple_fields().is_empty());
+ debug!("polymorphize: upvars_ty={:?} has_upvars={:?}", upvars_ty, has_upvars);
+
+ struct PolymorphizationFolder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ }
+
+ impl<'tcx> ty::TypeFolder<'tcx> for PolymorphizationFolder<'tcx> {
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ debug!("fold_ty: ty={:?}", ty);
+ match *ty.kind() {
+ ty::Closure(def_id, substs) => {
+ let polymorphized_substs = polymorphize(
+ self.tcx,
+ ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)),
+ substs,
+ );
+ if substs == polymorphized_substs {
+ ty
+ } else {
+ self.tcx.mk_closure(def_id, polymorphized_substs)
+ }
+ }
+ ty::Generator(def_id, substs, movability) => {
+ let polymorphized_substs = polymorphize(
+ self.tcx,
+ ty::InstanceDef::Item(ty::WithOptConstParam::unknown(def_id)),
+ substs,
+ );
+ if substs == polymorphized_substs {
+ ty
+ } else {
+ self.tcx.mk_generator(def_id, polymorphized_substs, movability)
+ }
+ }
+ _ => ty.super_fold_with(self),
+ }
+ }
+ }
+
+ InternalSubsts::for_item(tcx, def_id, |param, _| {
+ let is_unused = unused.contains(param.index).unwrap_or(false);
+ debug!("polymorphize: param={:?} is_unused={:?}", param, is_unused);
+ match param.kind {
+ // Upvar case: If parameter is a type parameter..
+ ty::GenericParamDefKind::Type { .. } if
+ // ..and has upvars..
+ has_upvars &&
+ // ..and this param has the same type as the tupled upvars..
+ upvars_ty == Some(substs[param.index as usize].expect_ty()) => {
+ // ..then double-check that polymorphization marked it used..
+ debug_assert!(!is_unused);
+ // ..and polymorphize any closures/generators captured as upvars.
+ let upvars_ty = upvars_ty.unwrap();
+ let polymorphized_upvars_ty = upvars_ty.fold_with(
+ &mut PolymorphizationFolder { tcx });
+ debug!("polymorphize: polymorphized_upvars_ty={:?}", polymorphized_upvars_ty);
+ ty::GenericArg::from(polymorphized_upvars_ty)
+ },
+
+ // Simple case: If parameter is a const or type parameter..
+ ty::GenericParamDefKind::Const { .. } | ty::GenericParamDefKind::Type { .. } if
+ // ..and is within range and unused..
+ unused.contains(param.index).unwrap_or(false) =>
+ // ..then use the identity for this parameter.
+ tcx.mk_param_from_def(param),
+
+ // Otherwise, use the parameter as before.
+ _ => substs[param.index as usize],
+ }
+ })
+}
+
+fn needs_fn_once_adapter_shim(
+ actual_closure_kind: ty::ClosureKind,
+ trait_closure_kind: ty::ClosureKind,
+) -> Result<bool, ()> {
+ match (actual_closure_kind, trait_closure_kind) {
+ (ty::ClosureKind::Fn, ty::ClosureKind::Fn)
+ | (ty::ClosureKind::FnMut, ty::ClosureKind::FnMut)
+ | (ty::ClosureKind::FnOnce, ty::ClosureKind::FnOnce) => {
+ // No adapter needed.
+ Ok(false)
+ }
+ (ty::ClosureKind::Fn, ty::ClosureKind::FnMut) => {
+ // The closure fn `llfn` is a `fn(&self, ...)`. We want a
+ // `fn(&mut self, ...)`. In fact, at codegen time, these are
+ // basically the same thing, so we can just return llfn.
+ Ok(false)
+ }
+ (ty::ClosureKind::Fn | ty::ClosureKind::FnMut, ty::ClosureKind::FnOnce) => {
+ // The closure fn `llfn` is a `fn(&self, ...)` or `fn(&mut
+ // self, ...)`. We want a `fn(self, ...)`. We can produce
+ // this by doing something like:
+ //
+ // fn call_once(self, ...) { call_mut(&self, ...) }
+ // fn call_once(mut self, ...) { call_mut(&mut self, ...) }
+ //
+ // These are both the same at codegen time.
+ Ok(true)
+ }
+ (ty::ClosureKind::FnMut | ty::ClosureKind::FnOnce, _) => Err(()),
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
new file mode 100644
index 000000000..ad78d24e9
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -0,0 +1,3504 @@
+use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
+use crate::ty::normalize_erasing_regions::NormalizationError;
+use crate::ty::subst::Subst;
+use crate::ty::{self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable};
+use rustc_ast as ast;
+use rustc_attr as attr;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_hir::lang_items::LangItem;
+use rustc_index::bit_set::BitSet;
+use rustc_index::vec::{Idx, IndexVec};
+use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::abi::call::{
+ ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
+};
+use rustc_target::abi::*;
+use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
+
+use std::cmp;
+use std::fmt;
+use std::iter;
+use std::num::NonZeroUsize;
+use std::ops::Bound;
+
+use rand::{seq::SliceRandom, SeedableRng};
+use rand_xoshiro::Xoshiro128StarStar;
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers =
+ ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
+}
+
+pub trait IntegerExt {
+ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
+ fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
+ fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer;
+ fn from_uint_ty<C: HasDataLayout>(cx: &C, uty: ty::UintTy) -> Integer;
+ fn repr_discr<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ repr: &ReprOptions,
+ min: i128,
+ max: i128,
+ ) -> (Integer, bool);
+}
+
+impl IntegerExt for Integer {
+ #[inline]
+ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx> {
+ match (*self, signed) {
+ (I8, false) => tcx.types.u8,
+ (I16, false) => tcx.types.u16,
+ (I32, false) => tcx.types.u32,
+ (I64, false) => tcx.types.u64,
+ (I128, false) => tcx.types.u128,
+ (I8, true) => tcx.types.i8,
+ (I16, true) => tcx.types.i16,
+ (I32, true) => tcx.types.i32,
+ (I64, true) => tcx.types.i64,
+ (I128, true) => tcx.types.i128,
+ }
+ }
+
+ /// Gets the Integer type from an attr::IntType.
+ fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer {
+ let dl = cx.data_layout();
+
+ match ity {
+ attr::SignedInt(ast::IntTy::I8) | attr::UnsignedInt(ast::UintTy::U8) => I8,
+ attr::SignedInt(ast::IntTy::I16) | attr::UnsignedInt(ast::UintTy::U16) => I16,
+ attr::SignedInt(ast::IntTy::I32) | attr::UnsignedInt(ast::UintTy::U32) => I32,
+ attr::SignedInt(ast::IntTy::I64) | attr::UnsignedInt(ast::UintTy::U64) => I64,
+ attr::SignedInt(ast::IntTy::I128) | attr::UnsignedInt(ast::UintTy::U128) => I128,
+ attr::SignedInt(ast::IntTy::Isize) | attr::UnsignedInt(ast::UintTy::Usize) => {
+ dl.ptr_sized_integer()
+ }
+ }
+ }
+
+ fn from_int_ty<C: HasDataLayout>(cx: &C, ity: ty::IntTy) -> Integer {
+ match ity {
+ ty::IntTy::I8 => I8,
+ ty::IntTy::I16 => I16,
+ ty::IntTy::I32 => I32,
+ ty::IntTy::I64 => I64,
+ ty::IntTy::I128 => I128,
+ ty::IntTy::Isize => cx.data_layout().ptr_sized_integer(),
+ }
+ }
+ fn from_uint_ty<C: HasDataLayout>(cx: &C, ity: ty::UintTy) -> Integer {
+ match ity {
+ ty::UintTy::U8 => I8,
+ ty::UintTy::U16 => I16,
+ ty::UintTy::U32 => I32,
+ ty::UintTy::U64 => I64,
+ ty::UintTy::U128 => I128,
+ ty::UintTy::Usize => cx.data_layout().ptr_sized_integer(),
+ }
+ }
+
+ /// Finds the appropriate Integer type and signedness for the given
+ /// signed discriminant range and `#[repr]` attribute.
+ /// N.B.: `u128` values above `i128::MAX` will be treated as signed, but
+ /// that shouldn't affect anything, other than maybe debuginfo.
+ fn repr_discr<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ ty: Ty<'tcx>,
+ repr: &ReprOptions,
+ min: i128,
+ max: i128,
+ ) -> (Integer, bool) {
+ // Theoretically, negative values could be larger in unsigned representation
+ // than the unsigned representation of the signed minimum. However, if there
+ // are any negative values, the only valid unsigned representation is u128
+ // which can fit all i128 values, so the result remains unaffected.
+ let unsigned_fit = Integer::fit_unsigned(cmp::max(min as u128, max as u128));
+ let signed_fit = cmp::max(Integer::fit_signed(min), Integer::fit_signed(max));
+
+ if let Some(ity) = repr.int {
+ let discr = Integer::from_attr(&tcx, ity);
+ let fit = if ity.is_signed() { signed_fit } else { unsigned_fit };
+ if discr < fit {
+ bug!(
+ "Integer::repr_discr: `#[repr]` hint too small for \
+ discriminant range of enum `{}",
+ ty
+ )
+ }
+ return (discr, ity.is_signed());
+ }
+
+ let at_least = if repr.c() {
+ // This is usually I32, however it can be different on some platforms,
+ // notably hexagon and arm-none/thumb-none
+ tcx.data_layout().c_enum_min_size
+ } else {
+ // repr(Rust) enums try to be as small as possible
+ I8
+ };
+
+ // If there are no negative values, we can use the unsigned fit.
+ if min >= 0 {
+ (cmp::max(unsigned_fit, at_least), false)
+ } else {
+ (cmp::max(signed_fit, at_least), true)
+ }
+ }
+}
+
+pub trait PrimitiveExt {
+ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
+ fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
+}
+
+impl PrimitiveExt for Primitive {
+ #[inline]
+ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match *self {
+ Int(i, signed) => i.to_ty(tcx, signed),
+ F32 => tcx.types.f32,
+ F64 => tcx.types.f64,
+ Pointer => tcx.mk_mut_ptr(tcx.mk_unit()),
+ }
+ }
+
+ /// Return an *integer* type matching this primitive.
+ /// Useful in particular when dealing with enum discriminants.
+ #[inline]
+ fn to_int_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match *self {
+ Int(i, signed) => i.to_ty(tcx, signed),
+ Pointer => tcx.types.usize,
+ F32 | F64 => bug!("floats do not have an int type"),
+ }
+ }
+}
+
+/// The first half of a fat pointer.
+///
+/// - For a trait object, this is the address of the box.
+/// - For a slice, this is the base address.
+pub const FAT_PTR_ADDR: usize = 0;
+
+/// The second half of a fat pointer.
+///
+/// - For a trait object, this is the address of the vtable.
+/// - For a slice, this is the length.
+pub const FAT_PTR_EXTRA: usize = 1;
+
+/// The maximum supported number of lanes in a SIMD vector.
+///
+/// This value is selected based on backend support:
+/// * LLVM does not appear to have a vector width limit.
+/// * Cranelift stores the base-2 log of the lane count in a 4 bit integer.
+pub const MAX_SIMD_LANES: u64 = 1 << 0xF;
+
+#[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
+pub enum LayoutError<'tcx> {
+ Unknown(Ty<'tcx>),
+ SizeOverflow(Ty<'tcx>),
+ NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
+}
+
+impl<'tcx> fmt::Display for LayoutError<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ LayoutError::Unknown(ty) => write!(f, "the type `{}` has an unknown layout", ty),
+ LayoutError::SizeOverflow(ty) => {
+ write!(f, "values of the type `{}` are too big for the current architecture", ty)
+ }
+ LayoutError::NormalizationFailure(t, e) => write!(
+ f,
+ "unable to determine layout for `{}` because `{}` cannot be normalized",
+ t,
+ e.get_type_for_failure()
+ ),
+ }
+ }
+}
+
+/// Enforce some basic invariants on layouts.
+fn sanity_check_layout<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ layout: &TyAndLayout<'tcx>,
+) {
+ // Type-level uninhabitedness should always imply ABI uninhabitedness.
+ if tcx.conservative_is_privately_uninhabited(param_env.and(layout.ty)) {
+ assert!(layout.abi.is_uninhabited());
+ }
+
+ if layout.size.bytes() % layout.align.abi.bytes() != 0 {
+ bug!("size is not a multiple of align, in the following layout:\n{layout:#?}");
+ }
+
+ if cfg!(debug_assertions) {
+ fn check_layout_abi<'tcx>(tcx: TyCtxt<'tcx>, layout: Layout<'tcx>) {
+ match layout.abi() {
+ Abi::Scalar(scalar) => {
+ // No padding in scalars.
+ assert_eq!(
+ layout.align().abi,
+ scalar.align(&tcx).abi,
+ "alignment mismatch between ABI and layout in {layout:#?}"
+ );
+ assert_eq!(
+ layout.size(),
+ scalar.size(&tcx),
+ "size mismatch between ABI and layout in {layout:#?}"
+ );
+ }
+ Abi::Vector { count, element } => {
+ // No padding in vectors. Alignment can be strengthened, though.
+ assert!(
+ layout.align().abi >= element.align(&tcx).abi,
+ "alignment mismatch between ABI and layout in {layout:#?}"
+ );
+ let size = element.size(&tcx) * count;
+ assert_eq!(
+ layout.size(),
+ size.align_to(tcx.data_layout().vector_align(size).abi),
+ "size mismatch between ABI and layout in {layout:#?}"
+ );
+ }
+ Abi::ScalarPair(scalar1, scalar2) => {
+ // Sanity-check scalar pairs. These are a bit more flexible and support
+ // padding, but we can at least ensure both fields actually fit into the layout
+ // and the alignment requirement has not been weakened.
+ let align1 = scalar1.align(&tcx).abi;
+ let align2 = scalar2.align(&tcx).abi;
+ assert!(
+ layout.align().abi >= cmp::max(align1, align2),
+ "alignment mismatch between ABI and layout in {layout:#?}",
+ );
+ let field2_offset = scalar1.size(&tcx).align_to(align2);
+ assert!(
+ layout.size() >= field2_offset + scalar2.size(&tcx),
+ "size mismatch between ABI and layout in {layout:#?}"
+ );
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
+ }
+ }
+
+ check_layout_abi(tcx, layout.layout);
+
+ if let Variants::Multiple { variants, .. } = &layout.variants {
+ for variant in variants {
+ check_layout_abi(tcx, *variant);
+ // No nested "multiple".
+ assert!(matches!(variant.variants(), Variants::Single { .. }));
+ // Skip empty variants.
+ if variant.size() == Size::ZERO
+ || variant.fields().count() == 0
+ || variant.abi().is_uninhabited()
+ {
+ // These are never actually accessed anyway, so we can skip them. (Note that
+ // sometimes, variants with fields have size 0, and sometimes, variants without
+ // fields have non-0 size.)
+ continue;
+ }
+ // Variants should have the same or a smaller size as the full thing.
+ if variant.size() > layout.size {
+ bug!(
+ "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
+ layout.size.bytes(),
+ variant.size().bytes(),
+ )
+ }
+ // The top-level ABI and the ABI of the variants should be coherent.
+ let abi_coherent = match (layout.abi, variant.abi()) {
+ (Abi::Scalar(..), Abi::Scalar(..)) => true,
+ (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true,
+ (Abi::Uninhabited, _) => true,
+ (Abi::Aggregate { .. }, _) => true,
+ _ => false,
+ };
+ if !abi_coherent {
+ bug!(
+ "Variant ABI is incompatible with top-level ABI:\nvariant={:#?}\nTop-level: {layout:#?}",
+ variant
+ );
+ }
+ }
+ }
+ }
+}
+
+#[instrument(skip(tcx, query), level = "debug")]
+fn layout_of<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
+) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
+ ty::tls::with_related_context(tcx, move |icx| {
+ let (param_env, ty) = query.into_parts();
+ debug!(?ty);
+
+ if !tcx.recursion_limit().value_within_limit(icx.layout_depth) {
+ tcx.sess.fatal(&format!("overflow representing the type `{}`", ty));
+ }
+
+ // Update the ImplicitCtxt to increase the layout_depth
+ let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() };
+
+ ty::tls::enter_context(&icx, |_| {
+ let param_env = param_env.with_reveal_all_normalized(tcx);
+ let unnormalized_ty = ty;
+
+ // FIXME: We might want to have two different versions of `layout_of`:
+ // One that can be called after typecheck has completed and can use
+ // `normalize_erasing_regions` here and another one that can be called
+ // before typecheck has completed and uses `try_normalize_erasing_regions`.
+ let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
+ Ok(t) => t,
+ Err(normalization_error) => {
+ return Err(LayoutError::NormalizationFailure(ty, normalization_error));
+ }
+ };
+
+ if ty != unnormalized_ty {
+ // Ensure this layout is also cached for the normalized type.
+ return tcx.layout_of(param_env.and(ty));
+ }
+
+ let cx = LayoutCx { tcx, param_env };
+
+ let layout = cx.layout_of_uncached(ty)?;
+ let layout = TyAndLayout { ty, layout };
+
+ cx.record_layout_for_printing(layout);
+
+ sanity_check_layout(tcx, param_env, &layout);
+
+ Ok(layout)
+ })
+ })
+}
+
+pub struct LayoutCx<'tcx, C> {
+ pub tcx: C,
+ pub param_env: ty::ParamEnv<'tcx>,
+}
+
+#[derive(Copy, Clone, Debug)]
+enum StructKind {
+ /// A tuple, closure, or univariant which cannot be coerced to unsized.
+ AlwaysSized,
+ /// A univariant, the last field of which may be coerced to unsized.
+ MaybeUnsized,
+ /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
+ Prefixed(Size, Align),
+}
+
+// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
+// This is used to go between `memory_index` (source field order to memory order)
+// and `inverse_memory_index` (memory order to source field order).
+// See also `FieldsShape::Arbitrary::memory_index` for more details.
+// FIXME(eddyb) build a better abstraction for permutations, if possible.
+fn invert_mapping(map: &[u32]) -> Vec<u32> {
+ let mut inverse = vec![0; map.len()];
+ for i in 0..map.len() {
+ inverse[map[i] as usize] = i as u32;
+ }
+ inverse
+}
+
+impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
+ fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
+ let dl = self.data_layout();
+ let b_align = b.align(dl);
+ let align = a.align(dl).max(b_align).max(dl.aggregate_align);
+ let b_offset = a.size(dl).align_to(b_align.abi);
+ let size = (b_offset + b.size(dl)).align_to(align.abi);
+
+ // HACK(nox): We iter on `b` and then `a` because `max_by_key`
+ // returns the last maximum.
+ let largest_niche = Niche::from_scalar(dl, b_offset, b)
+ .into_iter()
+ .chain(Niche::from_scalar(dl, Size::ZERO, a))
+ .max_by_key(|niche| niche.available(dl));
+
+ LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Arbitrary {
+ offsets: vec![Size::ZERO, b_offset],
+ memory_index: vec![0, 1],
+ },
+ abi: Abi::ScalarPair(a, b),
+ largest_niche,
+ align,
+ size,
+ }
+ }
+
+ fn univariant_uninterned(
+ &self,
+ ty: Ty<'tcx>,
+ fields: &[TyAndLayout<'_>],
+ repr: &ReprOptions,
+ kind: StructKind,
+ ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
+ let dl = self.data_layout();
+ let pack = repr.pack;
+ if pack.is_some() && repr.align.is_some() {
+ self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
+ return Err(LayoutError::Unknown(ty));
+ }
+
+ let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+
+ let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
+
+ let optimize = !repr.inhibit_struct_field_reordering_opt();
+ if optimize {
+ let end =
+ if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
+ let optimizing = &mut inverse_memory_index[..end];
+ let field_align = |f: &TyAndLayout<'_>| {
+ if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
+ };
+
+ // If `-Z randomize-layout` was enabled for the type definition we can shuffle
+ // the field ordering to try and catch some code making assumptions about layouts
+ // we don't guarantee
+ if repr.can_randomize_type_layout() {
+ // `ReprOptions.layout_seed` is a deterministic seed that we can use to
+ // randomize field ordering with
+ let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
+
+ // Shuffle the ordering of the fields
+ optimizing.shuffle(&mut rng);
+
+ // Otherwise we just leave things alone and actually optimize the type's fields
+ } else {
+ match kind {
+ StructKind::AlwaysSized | StructKind::MaybeUnsized => {
+ optimizing.sort_by_key(|&x| {
+ // Place ZSTs first to avoid "interesting offsets",
+ // especially with only one or two non-ZST fields.
+ let f = &fields[x as usize];
+ (!f.is_zst(), cmp::Reverse(field_align(f)))
+ });
+ }
+
+ StructKind::Prefixed(..) => {
+ // Sort in ascending alignment so that the layout stays optimal
+ // regardless of the prefix
+ optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
+ }
+ }
+
+ // FIXME(Kixiron): We can always shuffle fields within a given alignment class
+ // regardless of the status of `-Z randomize-layout`
+ }
+ }
+
+ // inverse_memory_index holds field indices by increasing memory offset.
+ // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
+ // We now write field offsets to the corresponding offset slot;
+ // field 5 with offset 0 puts 0 in offsets[5].
+ // At the bottom of this function, we invert `inverse_memory_index` to
+ // produce `memory_index` (see `invert_mapping`).
+
+ let mut sized = true;
+ let mut offsets = vec![Size::ZERO; fields.len()];
+ let mut offset = Size::ZERO;
+ let mut largest_niche = None;
+ let mut largest_niche_available = 0;
+
+ if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
+ let prefix_align =
+ if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
+ align = align.max(AbiAndPrefAlign::new(prefix_align));
+ offset = prefix_size.align_to(prefix_align);
+ }
+
+ for &i in &inverse_memory_index {
+ let field = fields[i as usize];
+ if !sized {
+ self.tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ &format!(
+ "univariant: field #{} of `{}` comes after unsized field",
+ offsets.len(),
+ ty
+ ),
+ );
+ }
+
+ if field.is_unsized() {
+ sized = false;
+ }
+
+ // Invariant: offset < dl.obj_size_bound() <= 1<<61
+ let field_align = if let Some(pack) = pack {
+ field.align.min(AbiAndPrefAlign::new(pack))
+ } else {
+ field.align
+ };
+ offset = offset.align_to(field_align.abi);
+ align = align.max(field_align);
+
+ debug!("univariant offset: {:?} field: {:#?}", offset, field);
+ offsets[i as usize] = offset;
+
+ if let Some(mut niche) = field.largest_niche {
+ let available = niche.available(dl);
+ if available > largest_niche_available {
+ largest_niche_available = available;
+ niche.offset += offset;
+ largest_niche = Some(niche);
+ }
+ }
+
+ offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
+ }
+
+ if let Some(repr_align) = repr.align {
+ align = align.max(AbiAndPrefAlign::new(repr_align));
+ }
+
+ debug!("univariant min_size: {:?}", offset);
+ let min_size = offset;
+
+ // As stated above, inverse_memory_index holds field indices by increasing offset.
+ // This makes it an already-sorted view of the offsets vec.
+ // To invert it, consider:
+ // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
+ // Field 5 would be the first element, so memory_index is i:
+ // Note: if we didn't optimize, it's already right.
+
+ let memory_index =
+ if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
+
+ let size = min_size.align_to(align.abi);
+ let mut abi = Abi::Aggregate { sized };
+
+ // Unpack newtype ABIs and find scalar pairs.
+ if sized && size.bytes() > 0 {
+ // All other fields must be ZSTs.
+ let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
+
+ match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
+ // We have exactly one non-ZST field.
+ (Some((i, field)), None, None) => {
+ // Field fills the struct and it has a scalar or scalar pair ABI.
+ if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
+ {
+ match field.abi {
+ // For plain scalars, or vectors of them, we can't unpack
+ // newtypes for `#[repr(C)]`, as that affects C ABIs.
+ Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
+ abi = field.abi;
+ }
+ // But scalar pairs are Rust-specific and get
+ // treated as aggregates by C ABIs anyway.
+ Abi::ScalarPair(..) => {
+ abi = field.abi;
+ }
+ _ => {}
+ }
+ }
+ }
+
+ // Two non-ZST fields, and they're both scalars.
+ (Some((i, a)), Some((j, b)), None) => {
+ match (a.abi, b.abi) {
+ (Abi::Scalar(a), Abi::Scalar(b)) => {
+ // Order by the memory placement, not source order.
+ let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
+ ((i, a), (j, b))
+ } else {
+ ((j, b), (i, a))
+ };
+ let pair = self.scalar_pair(a, b);
+ let pair_offsets = match pair.fields {
+ FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+ assert_eq!(memory_index, &[0, 1]);
+ offsets
+ }
+ _ => bug!(),
+ };
+ if offsets[i] == pair_offsets[0]
+ && offsets[j] == pair_offsets[1]
+ && align == pair.align
+ && size == pair.size
+ {
+ // We can use `ScalarPair` only when it matches our
+ // already computed layout (including `#[repr(C)]`).
+ abi = pair.abi;
+ }
+ }
+ _ => {}
+ }
+ }
+
+ _ => {}
+ }
+ }
+
+ if fields.iter().any(|f| f.abi.is_uninhabited()) {
+ abi = Abi::Uninhabited;
+ }
+
+ Ok(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Arbitrary { offsets, memory_index },
+ abi,
+ largest_niche,
+ align,
+ size,
+ })
+ }
+
+ fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
+ let tcx = self.tcx;
+ let param_env = self.param_env;
+ let dl = self.data_layout();
+ let scalar_unit = |value: Primitive| {
+ let size = value.size(dl);
+ assert!(size.bits() <= 128);
+ Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
+ };
+ let scalar =
+ |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
+
+ let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
+ Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
+ };
+ debug_assert!(!ty.has_infer_types_or_consts());
+
+ Ok(match *ty.kind() {
+ // Basic scalars.
+ ty::Bool => tcx.intern_layout(LayoutS::scalar(
+ self,
+ Scalar::Initialized {
+ value: Int(I8, false),
+ valid_range: WrappingRange { start: 0, end: 1 },
+ },
+ )),
+ ty::Char => tcx.intern_layout(LayoutS::scalar(
+ self,
+ Scalar::Initialized {
+ value: Int(I32, false),
+ valid_range: WrappingRange { start: 0, end: 0x10FFFF },
+ },
+ )),
+ ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
+ ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
+ ty::Float(fty) => scalar(match fty {
+ ty::FloatTy::F32 => F32,
+ ty::FloatTy::F64 => F64,
+ }),
+ ty::FnPtr(_) => {
+ let mut ptr = scalar_unit(Pointer);
+ ptr.valid_range_mut().start = 1;
+ tcx.intern_layout(LayoutS::scalar(self, ptr))
+ }
+
+ // The never type.
+ ty::Never => tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Primitive,
+ abi: Abi::Uninhabited,
+ largest_niche: None,
+ align: dl.i8_align,
+ size: Size::ZERO,
+ }),
+
+ // Potentially-wide pointers.
+ ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ let mut data_ptr = scalar_unit(Pointer);
+ if !ty.is_unsafe_ptr() {
+ data_ptr.valid_range_mut().start = 1;
+ }
+
+ let pointee = tcx.normalize_erasing_regions(param_env, pointee);
+ if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
+ return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
+ }
+
+ let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
+ let metadata = match unsized_part.kind() {
+ ty::Foreign(..) => {
+ return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
+ }
+ ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
+ ty::Dynamic(..) => {
+ let mut vtable = scalar_unit(Pointer);
+ vtable.valid_range_mut().start = 1;
+ vtable
+ }
+ _ => return Err(LayoutError::Unknown(unsized_part)),
+ };
+
+ // Effectively a (ptr, meta) tuple.
+ tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
+ }
+
+ // Arrays and slices.
+ ty::Array(element, mut count) => {
+ if count.has_projections() {
+ count = tcx.normalize_erasing_regions(param_env, count);
+ if count.has_projections() {
+ return Err(LayoutError::Unknown(ty));
+ }
+ }
+
+ let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
+ let element = self.layout_of(element)?;
+ let size =
+ element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
+
+ let abi =
+ if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
+ Abi::Uninhabited
+ } else {
+ Abi::Aggregate { sized: true }
+ };
+
+ let largest_niche = if count != 0 { element.largest_niche } else { None };
+
+ tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Array { stride: element.size, count },
+ abi,
+ largest_niche,
+ align: element.align,
+ size,
+ })
+ }
+ ty::Slice(element) => {
+ let element = self.layout_of(element)?;
+ tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Array { stride: element.size, count: 0 },
+ abi: Abi::Aggregate { sized: false },
+ largest_niche: None,
+ align: element.align,
+ size: Size::ZERO,
+ })
+ }
+ ty::Str => tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
+ abi: Abi::Aggregate { sized: false },
+ largest_niche: None,
+ align: dl.i8_align,
+ size: Size::ZERO,
+ }),
+
+ // Odd unit types.
+ ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
+ ty::Dynamic(..) | ty::Foreign(..) => {
+ let mut unit = self.univariant_uninterned(
+ ty,
+ &[],
+ &ReprOptions::default(),
+ StructKind::AlwaysSized,
+ )?;
+ match unit.abi {
+ Abi::Aggregate { ref mut sized } => *sized = false,
+ _ => bug!(),
+ }
+ tcx.intern_layout(unit)
+ }
+
+ ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
+
+ ty::Closure(_, ref substs) => {
+ let tys = substs.as_closure().upvar_tys();
+ univariant(
+ &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
+ &ReprOptions::default(),
+ StructKind::AlwaysSized,
+ )?
+ }
+
+ ty::Tuple(tys) => {
+ let kind =
+ if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
+
+ univariant(
+ &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
+ &ReprOptions::default(),
+ kind,
+ )?
+ }
+
+ // SIMD vector types.
+ ty::Adt(def, substs) if def.repr().simd() => {
+ if !def.is_struct() {
+ // Should have yielded E0517 by now.
+ tcx.sess.delay_span_bug(
+ DUMMY_SP,
+ "#[repr(simd)] was applied to an ADT that is not a struct",
+ );
+ return Err(LayoutError::Unknown(ty));
+ }
+
+ // Supported SIMD vectors are homogeneous ADTs with at least one field:
+ //
+ // * #[repr(simd)] struct S(T, T, T, T);
+ // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
+ // * #[repr(simd)] struct S([T; 4])
+ //
+ // where T is a primitive scalar (integer/float/pointer).
+
+ // SIMD vectors with zero fields are not supported.
+ // (should be caught by typeck)
+ if def.non_enum_variant().fields.is_empty() {
+ tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
+ }
+
+ // Type of the first ADT field:
+ let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
+
+ // Heterogeneous SIMD vectors are not supported:
+ // (should be caught by typeck)
+ for fi in &def.non_enum_variant().fields {
+ if fi.ty(tcx, substs) != f0_ty {
+ tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
+ }
+ }
+
+ // The element type and number of elements of the SIMD vector
+ // are obtained from:
+ //
+ // * the element type and length of the single array field, if
+ // the first field is of array type, or
+ //
+ // * the homogenous field type and the number of fields.
+ let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
+ // First ADT field is an array:
+
+ // SIMD vectors with multiple array fields are not supported:
+ // (should be caught by typeck)
+ if def.non_enum_variant().fields.len() != 1 {
+ tcx.sess.fatal(&format!(
+ "monomorphising SIMD type `{}` with more than one array field",
+ ty
+ ));
+ }
+
+ // Extract the number of elements from the layout of the array field:
+ let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
+ return Err(LayoutError::Unknown(ty));
+ };
+
+ (*e_ty, *count, true)
+ } else {
+ // First ADT field is not an array:
+ (f0_ty, def.non_enum_variant().fields.len() as _, false)
+ };
+
+ // SIMD vectors of zero length are not supported.
+ // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
+ // support.
+ //
+ // Can't be caught in typeck if the array length is generic.
+ if e_len == 0 {
+ tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
+ } else if e_len > MAX_SIMD_LANES {
+ tcx.sess.fatal(&format!(
+ "monomorphising SIMD type `{}` of length greater than {}",
+ ty, MAX_SIMD_LANES,
+ ));
+ }
+
+ // Compute the ABI of the element type:
+ let e_ly = self.layout_of(e_ty)?;
+ let Abi::Scalar(e_abi) = e_ly.abi else {
+ // This error isn't caught in typeck, e.g., if
+ // the element type of the vector is generic.
+ tcx.sess.fatal(&format!(
+ "monomorphising SIMD type `{}` with a non-primitive-scalar \
+ (integer/float/pointer) element type `{}`",
+ ty, e_ty
+ ))
+ };
+
+ // Compute the size and alignment of the vector:
+ let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
+ let align = dl.vector_align(size);
+ let size = size.align_to(align.abi);
+
+ // Compute the placement of the vector fields:
+ let fields = if is_array {
+ FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
+ } else {
+ FieldsShape::Array { stride: e_ly.size, count: e_len }
+ };
+
+ tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: VariantIdx::new(0) },
+ fields,
+ abi: Abi::Vector { element: e_abi, count: e_len },
+ largest_niche: e_ly.largest_niche,
+ size,
+ align,
+ })
+ }
+
+ // ADTs.
+ ty::Adt(def, substs) => {
+ // Cache the field layouts.
+ let variants = def
+ .variants()
+ .iter()
+ .map(|v| {
+ v.fields
+ .iter()
+ .map(|field| self.layout_of(field.ty(tcx, substs)))
+ .collect::<Result<Vec<_>, _>>()
+ })
+ .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ if def.is_union() {
+ if def.repr().pack.is_some() && def.repr().align.is_some() {
+ self.tcx.sess.delay_span_bug(
+ tcx.def_span(def.did()),
+ "union cannot be packed and aligned",
+ );
+ return Err(LayoutError::Unknown(ty));
+ }
+
+ let mut align =
+ if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
+
+ if let Some(repr_align) = def.repr().align {
+ align = align.max(AbiAndPrefAlign::new(repr_align));
+ }
+
+ let optimize = !def.repr().inhibit_union_abi_opt();
+ let mut size = Size::ZERO;
+ let mut abi = Abi::Aggregate { sized: true };
+ let index = VariantIdx::new(0);
+ for field in &variants[index] {
+ assert!(!field.is_unsized());
+ align = align.max(field.align);
+
+ // If all non-ZST fields have the same ABI, forward this ABI
+ if optimize && !field.is_zst() {
+ // Discard valid range information and allow undef
+ let field_abi = match field.abi {
+ Abi::Scalar(x) => Abi::Scalar(x.to_union()),
+ Abi::ScalarPair(x, y) => {
+ Abi::ScalarPair(x.to_union(), y.to_union())
+ }
+ Abi::Vector { element: x, count } => {
+ Abi::Vector { element: x.to_union(), count }
+ }
+ Abi::Uninhabited | Abi::Aggregate { .. } => {
+ Abi::Aggregate { sized: true }
+ }
+ };
+
+ if size == Size::ZERO {
+ // first non ZST: initialize 'abi'
+ abi = field_abi;
+ } else if abi != field_abi {
+ // different fields have different ABI: reset to Aggregate
+ abi = Abi::Aggregate { sized: true };
+ }
+ }
+
+ size = cmp::max(size, field.size);
+ }
+
+ if let Some(pack) = def.repr().pack {
+ align = align.min(AbiAndPrefAlign::new(pack));
+ }
+
+ return Ok(tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index },
+ fields: FieldsShape::Union(
+ NonZeroUsize::new(variants[index].len())
+ .ok_or(LayoutError::Unknown(ty))?,
+ ),
+ abi,
+ largest_niche: None,
+ align,
+ size: size.align_to(align.abi),
+ }));
+ }
+
+ // A variant is absent if it's uninhabited and only has ZST fields.
+ // Present uninhabited variants only require space for their fields,
+ // but *not* an encoding of the discriminant (e.g., a tag value).
+ // See issue #49298 for more details on the need to leave space
+ // for non-ZST uninhabited data (mostly partial initialization).
+ let absent = |fields: &[TyAndLayout<'_>]| {
+ let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
+ let is_zst = fields.iter().all(|f| f.is_zst());
+ uninhabited && is_zst
+ };
+ let (present_first, present_second) = {
+ let mut present_variants = variants
+ .iter_enumerated()
+ .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
+ (present_variants.next(), present_variants.next())
+ };
+ let present_first = match present_first {
+ Some(present_first) => present_first,
+ // Uninhabited because it has no variants, or only absent ones.
+ None if def.is_enum() => {
+ return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
+ }
+ // If it's a struct, still compute a layout so that we can still compute the
+ // field offsets.
+ None => VariantIdx::new(0),
+ };
+
+ let is_struct = !def.is_enum() ||
+ // Only one variant is present.
+ (present_second.is_none() &&
+ // Representation optimizations are allowed.
+ !def.repr().inhibit_enum_layout_opt());
+ if is_struct {
+ // Struct, or univariant enum equivalent to a struct.
+ // (Typechecking will reject discriminant-sizing attrs.)
+
+ let v = present_first;
+ let kind = if def.is_enum() || variants[v].is_empty() {
+ StructKind::AlwaysSized
+ } else {
+ let param_env = tcx.param_env(def.did());
+ let last_field = def.variant(v).fields.last().unwrap();
+ let always_sized =
+ tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
+ if !always_sized {
+ StructKind::MaybeUnsized
+ } else {
+ StructKind::AlwaysSized
+ }
+ };
+
+ let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
+ st.variants = Variants::Single { index: v };
+
+ if def.is_unsafe_cell() {
+ let hide_niches = |scalar: &mut _| match scalar {
+ Scalar::Initialized { value, valid_range } => {
+ *valid_range = WrappingRange::full(value.size(dl))
+ }
+ // Already doesn't have any niches
+ Scalar::Union { .. } => {}
+ };
+ match &mut st.abi {
+ Abi::Uninhabited => {}
+ Abi::Scalar(scalar) => hide_niches(scalar),
+ Abi::ScalarPair(a, b) => {
+ hide_niches(a);
+ hide_niches(b);
+ }
+ Abi::Vector { element, count: _ } => hide_niches(element),
+ Abi::Aggregate { sized: _ } => {}
+ }
+ st.largest_niche = None;
+ return Ok(tcx.intern_layout(st));
+ }
+
+ let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
+ match st.abi {
+ Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
+ // the asserts ensure that we are not using the
+ // `#[rustc_layout_scalar_valid_range(n)]`
+ // attribute to widen the range of anything as that would probably
+ // result in UB somewhere
+ // FIXME(eddyb) the asserts are probably not needed,
+ // as larger validity ranges would result in missed
+ // optimizations, *not* wrongly assuming the inner
+ // value is valid. e.g. unions enlarge validity ranges,
+ // because the values may be uninitialized.
+ if let Bound::Included(start) = start {
+ // FIXME(eddyb) this might be incorrect - it doesn't
+ // account for wrap-around (end < start) ranges.
+ let valid_range = scalar.valid_range_mut();
+ assert!(valid_range.start <= start);
+ valid_range.start = start;
+ }
+ if let Bound::Included(end) = end {
+ // FIXME(eddyb) this might be incorrect - it doesn't
+ // account for wrap-around (end < start) ranges.
+ let valid_range = scalar.valid_range_mut();
+ assert!(valid_range.end >= end);
+ valid_range.end = end;
+ }
+
+ // Update `largest_niche` if we have introduced a larger niche.
+ let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
+ if let Some(niche) = niche {
+ match st.largest_niche {
+ Some(largest_niche) => {
+ // Replace the existing niche even if they're equal,
+ // because this one is at a lower offset.
+ if largest_niche.available(dl) <= niche.available(dl) {
+ st.largest_niche = Some(niche);
+ }
+ }
+ None => st.largest_niche = Some(niche),
+ }
+ }
+ }
+ _ => assert!(
+ start == Bound::Unbounded && end == Bound::Unbounded,
+ "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
+ def,
+ st,
+ ),
+ }
+
+ return Ok(tcx.intern_layout(st));
+ }
+
+ // At this point, we have handled all unions and
+ // structs. (We have also handled univariant enums
+ // that allow representation optimization.)
+ assert!(def.is_enum());
+
+ // The current code for niche-filling relies on variant indices
+ // instead of actual discriminants, so dataful enums with
+ // explicit discriminants (RFC #2363) would misbehave.
+ let no_explicit_discriminants = def
+ .variants()
+ .iter_enumerated()
+ .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32()));
+
+ let mut niche_filling_layout = None;
+
+ // Niche-filling enum optimization.
+ if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants {
+ let mut dataful_variant = None;
+ let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0);
+
+ // Find one non-ZST variant.
+ 'variants: for (v, fields) in variants.iter_enumerated() {
+ if absent(fields) {
+ continue 'variants;
+ }
+ for f in fields {
+ if !f.is_zst() {
+ if dataful_variant.is_none() {
+ dataful_variant = Some(v);
+ continue 'variants;
+ } else {
+ dataful_variant = None;
+ break 'variants;
+ }
+ }
+ }
+ niche_variants = *niche_variants.start().min(&v)..=v;
+ }
+
+ if niche_variants.start() > niche_variants.end() {
+ dataful_variant = None;
+ }
+
+ if let Some(i) = dataful_variant {
+ let count = (niche_variants.end().as_u32()
+ - niche_variants.start().as_u32()
+ + 1) as u128;
+
+ // Find the field with the largest niche
+ let niche_candidate = variants[i]
+ .iter()
+ .enumerate()
+ .filter_map(|(j, field)| Some((j, field.largest_niche?)))
+ .max_by_key(|(_, niche)| niche.available(dl));
+
+ if let Some((field_index, niche, (niche_start, niche_scalar))) =
+ niche_candidate.and_then(|(field_index, niche)| {
+ Some((field_index, niche, niche.reserve(self, count)?))
+ })
+ {
+ let mut align = dl.aggregate_align;
+ let st = variants
+ .iter_enumerated()
+ .map(|(j, v)| {
+ let mut st = self.univariant_uninterned(
+ ty,
+ v,
+ &def.repr(),
+ StructKind::AlwaysSized,
+ )?;
+ st.variants = Variants::Single { index: j };
+
+ align = align.max(st.align);
+
+ Ok(tcx.intern_layout(st))
+ })
+ .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ let offset = st[i].fields().offset(field_index) + niche.offset;
+
+ // Align the total size to the largest alignment.
+ let size = st[i].size().align_to(align.abi);
+
+ let abi = if st.iter().all(|v| v.abi().is_uninhabited()) {
+ Abi::Uninhabited
+ } else if align == st[i].align() && size == st[i].size() {
+ // When the total alignment and size match, we can use the
+ // same ABI as the scalar variant with the reserved niche.
+ match st[i].abi() {
+ Abi::Scalar(_) => Abi::Scalar(niche_scalar),
+ Abi::ScalarPair(first, second) => {
+ // Only the niche is guaranteed to be initialised,
+ // so use union layout for the other primitive.
+ if offset.bytes() == 0 {
+ Abi::ScalarPair(niche_scalar, second.to_union())
+ } else {
+ Abi::ScalarPair(first.to_union(), niche_scalar)
+ }
+ }
+ _ => Abi::Aggregate { sized: true },
+ }
+ } else {
+ Abi::Aggregate { sized: true }
+ };
+
+ let largest_niche = Niche::from_scalar(dl, offset, niche_scalar);
+
+ niche_filling_layout = Some(LayoutS {
+ variants: Variants::Multiple {
+ tag: niche_scalar,
+ tag_encoding: TagEncoding::Niche {
+ dataful_variant: i,
+ niche_variants,
+ niche_start,
+ },
+ tag_field: 0,
+ variants: st,
+ },
+ fields: FieldsShape::Arbitrary {
+ offsets: vec![offset],
+ memory_index: vec![0],
+ },
+ abi,
+ largest_niche,
+ size,
+ align,
+ });
+ }
+ }
+ }
+
+ let (mut min, mut max) = (i128::MAX, i128::MIN);
+ let discr_type = def.repr().discr_type();
+ let bits = Integer::from_attr(self, discr_type).size().bits();
+ for (i, discr) in def.discriminants(tcx) {
+ if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
+ continue;
+ }
+ let mut x = discr.val as i128;
+ if discr_type.is_signed() {
+ // sign extend the raw representation to be an i128
+ x = (x << (128 - bits)) >> (128 - bits);
+ }
+ if x < min {
+ min = x;
+ }
+ if x > max {
+ max = x;
+ }
+ }
+ // We might have no inhabited variants, so pretend there's at least one.
+ if (min, max) == (i128::MAX, i128::MIN) {
+ min = 0;
+ max = 0;
+ }
+ assert!(min <= max, "discriminant range is {}...{}", min, max);
+ let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
+
+ let mut align = dl.aggregate_align;
+ let mut size = Size::ZERO;
+
+ // We're interested in the smallest alignment, so start large.
+ let mut start_align = Align::from_bytes(256).unwrap();
+ assert_eq!(Integer::for_align(dl, start_align), None);
+
+ // repr(C) on an enum tells us to make a (tag, union) layout,
+ // so we need to grow the prefix alignment to be at least
+ // the alignment of the union. (This value is used both for
+ // determining the alignment of the overall enum, and the
+ // determining the alignment of the payload after the tag.)
+ let mut prefix_align = min_ity.align(dl).abi;
+ if def.repr().c() {
+ for fields in &variants {
+ for field in fields {
+ prefix_align = prefix_align.max(field.align.abi);
+ }
+ }
+ }
+
+ // Create the set of structs that represent each variant.
+ let mut layout_variants = variants
+ .iter_enumerated()
+ .map(|(i, field_layouts)| {
+ let mut st = self.univariant_uninterned(
+ ty,
+ &field_layouts,
+ &def.repr(),
+ StructKind::Prefixed(min_ity.size(), prefix_align),
+ )?;
+ st.variants = Variants::Single { index: i };
+ // Find the first field we can't move later
+ // to make room for a larger discriminant.
+ for field in
+ st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
+ {
+ if !field.is_zst() || field.align.abi.bytes() != 1 {
+ start_align = start_align.min(field.align.abi);
+ break;
+ }
+ }
+ size = cmp::max(size, st.size);
+ align = align.max(st.align);
+ Ok(st)
+ })
+ .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ // Align the maximum variant size to the largest alignment.
+ size = size.align_to(align.abi);
+
+ if size.bytes() >= dl.obj_size_bound() {
+ return Err(LayoutError::SizeOverflow(ty));
+ }
+
+ let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
+ if typeck_ity < min_ity {
+ // It is a bug if Layout decided on a greater discriminant size than typeck for
+ // some reason at this point (based on values discriminant can take on). Mostly
+ // because this discriminant will be loaded, and then stored into variable of
+ // type calculated by typeck. Consider such case (a bug): typeck decided on
+ // byte-sized discriminant, but layout thinks we need a 16-bit to store all
+ // discriminant values. That would be a bug, because then, in codegen, in order
+ // to store this 16-bit discriminant into 8-bit sized temporary some of the
+ // space necessary to represent would have to be discarded (or layout is wrong
+ // on thinking it needs 16 bits)
+ bug!(
+ "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
+ min_ity,
+ typeck_ity
+ );
+ // However, it is fine to make discr type however large (as an optimisation)
+ // after this point – we’ll just truncate the value we load in codegen.
+ }
+
+ // Check to see if we should use a different type for the
+ // discriminant. We can safely use a type with the same size
+ // as the alignment of the first field of each variant.
+ // We increase the size of the discriminant to avoid LLVM copying
+ // padding when it doesn't need to. This normally causes unaligned
+ // load/stores and excessive memcpy/memset operations. By using a
+ // bigger integer size, LLVM can be sure about its contents and
+ // won't be so conservative.
+
+ // Use the initial field alignment
+ let mut ity = if def.repr().c() || def.repr().int.is_some() {
+ min_ity
+ } else {
+ Integer::for_align(dl, start_align).unwrap_or(min_ity)
+ };
+
+ // If the alignment is not larger than the chosen discriminant size,
+ // don't use the alignment as the final size.
+ if ity <= min_ity {
+ ity = min_ity;
+ } else {
+ // Patch up the variants' first few fields.
+ let old_ity_size = min_ity.size();
+ let new_ity_size = ity.size();
+ for variant in &mut layout_variants {
+ match variant.fields {
+ FieldsShape::Arbitrary { ref mut offsets, .. } => {
+ for i in offsets {
+ if *i <= old_ity_size {
+ assert_eq!(*i, old_ity_size);
+ *i = new_ity_size;
+ }
+ }
+ // We might be making the struct larger.
+ if variant.size <= old_ity_size {
+ variant.size = new_ity_size;
+ }
+ }
+ _ => bug!(),
+ }
+ }
+ }
+
+ let tag_mask = ity.size().unsigned_int_max();
+ let tag = Scalar::Initialized {
+ value: Int(ity, signed),
+ valid_range: WrappingRange {
+ start: (min as u128 & tag_mask),
+ end: (max as u128 & tag_mask),
+ },
+ };
+ let mut abi = Abi::Aggregate { sized: true };
+
+ if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
+ abi = Abi::Uninhabited;
+ } else if tag.size(dl) == size {
+ // Make sure we only use scalar layout when the enum is entirely its
+ // own tag (i.e. it has no padding nor any non-ZST variant fields).
+ abi = Abi::Scalar(tag);
+ } else {
+ // Try to use a ScalarPair for all tagged enums.
+ let mut common_prim = None;
+ let mut common_prim_initialized_in_all_variants = true;
+ for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
+ let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
+ bug!();
+ };
+ let mut fields =
+ iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
+ let (field, offset) = match (fields.next(), fields.next()) {
+ (None, None) => {
+ common_prim_initialized_in_all_variants = false;
+ continue;
+ }
+ (Some(pair), None) => pair,
+ _ => {
+ common_prim = None;
+ break;
+ }
+ };
+ let prim = match field.abi {
+ Abi::Scalar(scalar) => {
+ common_prim_initialized_in_all_variants &=
+ matches!(scalar, Scalar::Initialized { .. });
+ scalar.primitive()
+ }
+ _ => {
+ common_prim = None;
+ break;
+ }
+ };
+ if let Some(pair) = common_prim {
+ // This is pretty conservative. We could go fancier
+ // by conflating things like i32 and u32, or even
+ // realising that (u8, u8) could just cohabit with
+ // u16 or even u32.
+ if pair != (prim, offset) {
+ common_prim = None;
+ break;
+ }
+ } else {
+ common_prim = Some((prim, offset));
+ }
+ }
+ if let Some((prim, offset)) = common_prim {
+ let prim_scalar = if common_prim_initialized_in_all_variants {
+ scalar_unit(prim)
+ } else {
+ // Common prim might be uninit.
+ Scalar::Union { value: prim }
+ };
+ let pair = self.scalar_pair(tag, prim_scalar);
+ let pair_offsets = match pair.fields {
+ FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
+ assert_eq!(memory_index, &[0, 1]);
+ offsets
+ }
+ _ => bug!(),
+ };
+ if pair_offsets[0] == Size::ZERO
+ && pair_offsets[1] == *offset
+ && align == pair.align
+ && size == pair.size
+ {
+ // We can use `ScalarPair` only when it matches our
+ // already computed layout (including `#[repr(C)]`).
+ abi = pair.abi;
+ }
+ }
+ }
+
+ // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
+ // variants to ensure they are consistent. This is because a downcast is
+ // semantically a NOP, and thus should not affect layout.
+ if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
+ for variant in &mut layout_variants {
+ // We only do this for variants with fields; the others are not accessed anyway.
+ // Also do not overwrite any already existing "clever" ABIs.
+ if variant.fields.count() > 0
+ && matches!(variant.abi, Abi::Aggregate { .. })
+ {
+ variant.abi = abi;
+ // Also need to bump up the size and alignment, so that the entire value fits in here.
+ variant.size = cmp::max(variant.size, size);
+ variant.align.abi = cmp::max(variant.align.abi, align.abi);
+ }
+ }
+ }
+
+ let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
+
+ let layout_variants =
+ layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect();
+
+ let tagged_layout = LayoutS {
+ variants: Variants::Multiple {
+ tag,
+ tag_encoding: TagEncoding::Direct,
+ tag_field: 0,
+ variants: layout_variants,
+ },
+ fields: FieldsShape::Arbitrary {
+ offsets: vec![Size::ZERO],
+ memory_index: vec![0],
+ },
+ largest_niche,
+ abi,
+ align,
+ size,
+ };
+
+ let best_layout = match (tagged_layout, niche_filling_layout) {
+ (tagged_layout, Some(niche_filling_layout)) => {
+ // Pick the smaller layout; otherwise,
+ // pick the layout with the larger niche; otherwise,
+ // pick tagged as it has simpler codegen.
+ cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| {
+ let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl));
+ (layout.size, cmp::Reverse(niche_size))
+ })
+ }
+ (tagged_layout, None) => tagged_layout,
+ };
+
+ tcx.intern_layout(best_layout)
+ }
+
+ // Types with no meaningful known layout.
+ ty::Projection(_) | ty::Opaque(..) => {
+ // NOTE(eddyb) `layout_of` query should've normalized these away,
+ // if that was possible, so there's no reason to try again here.
+ return Err(LayoutError::Unknown(ty));
+ }
+
+ ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
+ bug!("Layout::compute: unexpected type `{}`", ty)
+ }
+
+ ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
+ return Err(LayoutError::Unknown(ty));
+ }
+ })
+ }
+}
+
+/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
+#[derive(Clone, Debug, PartialEq)]
+enum SavedLocalEligibility {
+ Unassigned,
+ Assigned(VariantIdx),
+ // FIXME: Use newtype_index so we aren't wasting bytes
+ Ineligible(Option<u32>),
+}
+
+// When laying out generators, we divide our saved local fields into two
+// categories: overlap-eligible and overlap-ineligible.
+//
+// Those fields which are ineligible for overlap go in a "prefix" at the
+// beginning of the layout, and always have space reserved for them.
+//
+// Overlap-eligible fields are only assigned to one variant, so we lay
+// those fields out for each variant and put them right after the
+// prefix.
+//
+// Finally, in the layout details, we point to the fields from the
+// variants they are assigned to. It is possible for some fields to be
+// included in multiple variants. No field ever "moves around" in the
+// layout; its offset is always the same.
+//
+// Also included in the layout are the upvars and the discriminant.
+// These are included as fields on the "outer" layout; they are not part
+// of any variant.
+impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
+ /// Compute the eligibility and assignment of each local.
+ fn generator_saved_local_eligibility(
+ &self,
+ info: &GeneratorLayout<'tcx>,
+ ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
+ use SavedLocalEligibility::*;
+
+ let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
+ IndexVec::from_elem_n(Unassigned, info.field_tys.len());
+
+ // The saved locals not eligible for overlap. These will get
+ // "promoted" to the prefix of our generator.
+ let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
+
+ // Figure out which of our saved locals are fields in only
+ // one variant. The rest are deemed ineligible for overlap.
+ for (variant_index, fields) in info.variant_fields.iter_enumerated() {
+ for local in fields {
+ match assignments[*local] {
+ Unassigned => {
+ assignments[*local] = Assigned(variant_index);
+ }
+ Assigned(idx) => {
+ // We've already seen this local at another suspension
+ // point, so it is no longer a candidate.
+ trace!(
+ "removing local {:?} in >1 variant ({:?}, {:?})",
+ local,
+ variant_index,
+ idx
+ );
+ ineligible_locals.insert(*local);
+ assignments[*local] = Ineligible(None);
+ }
+ Ineligible(_) => {}
+ }
+ }
+ }
+
+ // Next, check every pair of eligible locals to see if they
+ // conflict.
+ for local_a in info.storage_conflicts.rows() {
+ let conflicts_a = info.storage_conflicts.count(local_a);
+ if ineligible_locals.contains(local_a) {
+ continue;
+ }
+
+ for local_b in info.storage_conflicts.iter(local_a) {
+ // local_a and local_b are storage live at the same time, therefore they
+ // cannot overlap in the generator layout. The only way to guarantee
+ // this is if they are in the same variant, or one is ineligible
+ // (which means it is stored in every variant).
+ if ineligible_locals.contains(local_b)
+ || assignments[local_a] == assignments[local_b]
+ {
+ continue;
+ }
+
+ // If they conflict, we will choose one to make ineligible.
+ // This is not always optimal; it's just a greedy heuristic that
+ // seems to produce good results most of the time.
+ let conflicts_b = info.storage_conflicts.count(local_b);
+ let (remove, other) =
+ if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
+ ineligible_locals.insert(remove);
+ assignments[remove] = Ineligible(None);
+ trace!("removing local {:?} due to conflict with {:?}", remove, other);
+ }
+ }
+
+ // Count the number of variants in use. If only one of them, then it is
+ // impossible to overlap any locals in our layout. In this case it's
+ // always better to make the remaining locals ineligible, so we can
+ // lay them out with the other locals in the prefix and eliminate
+ // unnecessary padding bytes.
+ {
+ let mut used_variants = BitSet::new_empty(info.variant_fields.len());
+ for assignment in &assignments {
+ if let Assigned(idx) = assignment {
+ used_variants.insert(*idx);
+ }
+ }
+ if used_variants.count() < 2 {
+ for assignment in assignments.iter_mut() {
+ *assignment = Ineligible(None);
+ }
+ ineligible_locals.insert_all();
+ }
+ }
+
+ // Write down the order of our locals that will be promoted to the prefix.
+ {
+ for (idx, local) in ineligible_locals.iter().enumerate() {
+ assignments[local] = Ineligible(Some(idx as u32));
+ }
+ }
+ debug!("generator saved local assignments: {:?}", assignments);
+
+ (ineligible_locals, assignments)
+ }
+
+ /// Compute the full generator layout.
+ fn generator_layout(
+ &self,
+ ty: Ty<'tcx>,
+ def_id: hir::def_id::DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
+ use SavedLocalEligibility::*;
+ let tcx = self.tcx;
+ let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
+
+ let Some(info) = tcx.generator_layout(def_id) else {
+ return Err(LayoutError::Unknown(ty));
+ };
+ let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
+
+ // Build a prefix layout, including "promoting" all ineligible
+ // locals as part of the prefix. We compute the layout of all of
+ // these fields at once to get optimal packing.
+ let tag_index = substs.as_generator().prefix_tys().count();
+
+ // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
+ let max_discr = (info.variant_fields.len() - 1) as u128;
+ let discr_int = Integer::fit_unsigned(max_discr);
+ let discr_int_ty = discr_int.to_ty(tcx, false);
+ let tag = Scalar::Initialized {
+ value: Primitive::Int(discr_int, false),
+ valid_range: WrappingRange { start: 0, end: max_discr },
+ };
+ let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
+ let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
+
+ let promoted_layouts = ineligible_locals
+ .iter()
+ .map(|local| subst_field(info.field_tys[local]))
+ .map(|ty| tcx.mk_maybe_uninit(ty))
+ .map(|ty| self.layout_of(ty));
+ let prefix_layouts = substs
+ .as_generator()
+ .prefix_tys()
+ .map(|ty| self.layout_of(ty))
+ .chain(iter::once(Ok(tag_layout)))
+ .chain(promoted_layouts)
+ .collect::<Result<Vec<_>, _>>()?;
+ let prefix = self.univariant_uninterned(
+ ty,
+ &prefix_layouts,
+ &ReprOptions::default(),
+ StructKind::AlwaysSized,
+ )?;
+
+ let (prefix_size, prefix_align) = (prefix.size, prefix.align);
+
+ // Split the prefix layout into the "outer" fields (upvars and
+ // discriminant) and the "promoted" fields. Promoted fields will
+ // get included in each variant that requested them in
+ // GeneratorLayout.
+ debug!("prefix = {:#?}", prefix);
+ let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
+ FieldsShape::Arbitrary { mut offsets, memory_index } => {
+ let mut inverse_memory_index = invert_mapping(&memory_index);
+
+ // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
+ // "outer" and "promoted" fields respectively.
+ let b_start = (tag_index + 1) as u32;
+ let offsets_b = offsets.split_off(b_start as usize);
+ let offsets_a = offsets;
+
+ // Disentangle the "a" and "b" components of `inverse_memory_index`
+ // by preserving the order but keeping only one disjoint "half" each.
+ // FIXME(eddyb) build a better abstraction for permutations, if possible.
+ let inverse_memory_index_b: Vec<_> =
+ inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
+ inverse_memory_index.retain(|&i| i < b_start);
+ let inverse_memory_index_a = inverse_memory_index;
+
+ // Since `inverse_memory_index_{a,b}` each only refer to their
+ // respective fields, they can be safely inverted
+ let memory_index_a = invert_mapping(&inverse_memory_index_a);
+ let memory_index_b = invert_mapping(&inverse_memory_index_b);
+
+ let outer_fields =
+ FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
+ (outer_fields, offsets_b, memory_index_b)
+ }
+ _ => bug!(),
+ };
+
+ let mut size = prefix.size;
+ let mut align = prefix.align;
+ let variants = info
+ .variant_fields
+ .iter_enumerated()
+ .map(|(index, variant_fields)| {
+ // Only include overlap-eligible fields when we compute our variant layout.
+ let variant_only_tys = variant_fields
+ .iter()
+ .filter(|local| match assignments[**local] {
+ Unassigned => bug!(),
+ Assigned(v) if v == index => true,
+ Assigned(_) => bug!("assignment does not match variant"),
+ Ineligible(_) => false,
+ })
+ .map(|local| subst_field(info.field_tys[*local]));
+
+ let mut variant = self.univariant_uninterned(
+ ty,
+ &variant_only_tys
+ .map(|ty| self.layout_of(ty))
+ .collect::<Result<Vec<_>, _>>()?,
+ &ReprOptions::default(),
+ StructKind::Prefixed(prefix_size, prefix_align.abi),
+ )?;
+ variant.variants = Variants::Single { index };
+
+ let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
+ bug!();
+ };
+
+ // Now, stitch the promoted and variant-only fields back together in
+ // the order they are mentioned by our GeneratorLayout.
+ // Because we only use some subset (that can differ between variants)
+ // of the promoted fields, we can't just pick those elements of the
+ // `promoted_memory_index` (as we'd end up with gaps).
+ // So instead, we build an "inverse memory_index", as if all of the
+ // promoted fields were being used, but leave the elements not in the
+ // subset as `INVALID_FIELD_IDX`, which we can filter out later to
+ // obtain a valid (bijective) mapping.
+ const INVALID_FIELD_IDX: u32 = !0;
+ let mut combined_inverse_memory_index =
+ vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
+ let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
+ let combined_offsets = variant_fields
+ .iter()
+ .enumerate()
+ .map(|(i, local)| {
+ let (offset, memory_index) = match assignments[*local] {
+ Unassigned => bug!(),
+ Assigned(_) => {
+ let (offset, memory_index) =
+ offsets_and_memory_index.next().unwrap();
+ (offset, promoted_memory_index.len() as u32 + memory_index)
+ }
+ Ineligible(field_idx) => {
+ let field_idx = field_idx.unwrap() as usize;
+ (promoted_offsets[field_idx], promoted_memory_index[field_idx])
+ }
+ };
+ combined_inverse_memory_index[memory_index as usize] = i as u32;
+ offset
+ })
+ .collect();
+
+ // Remove the unused slots and invert the mapping to obtain the
+ // combined `memory_index` (also see previous comment).
+ combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
+ let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
+
+ variant.fields = FieldsShape::Arbitrary {
+ offsets: combined_offsets,
+ memory_index: combined_memory_index,
+ };
+
+ size = size.max(variant.size);
+ align = align.max(variant.align);
+ Ok(tcx.intern_layout(variant))
+ })
+ .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
+
+ size = size.align_to(align.abi);
+
+ let abi =
+ if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
+ Abi::Uninhabited
+ } else {
+ Abi::Aggregate { sized: true }
+ };
+
+ let layout = tcx.intern_layout(LayoutS {
+ variants: Variants::Multiple {
+ tag,
+ tag_encoding: TagEncoding::Direct,
+ tag_field: tag_index,
+ variants,
+ },
+ fields: outer_fields,
+ abi,
+ largest_niche: prefix.largest_niche,
+ size,
+ align,
+ });
+ debug!("generator layout ({:?}): {:#?}", ty, layout);
+ Ok(layout)
+ }
+
+ /// This is invoked by the `layout_of` query to record the final
+ /// layout of each type.
+ #[inline(always)]
+ fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
+ // If we are running with `-Zprint-type-sizes`, maybe record layouts
+ // for dumping later.
+ if self.tcx.sess.opts.unstable_opts.print_type_sizes {
+ self.record_layout_for_printing_outlined(layout)
+ }
+ }
+
+ fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
+ // Ignore layouts that are done with non-empty environments or
+ // non-monomorphic layouts, as the user only wants to see the stuff
+ // resulting from the final codegen session.
+ if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
+ return;
+ }
+
+ // (delay format until we actually need it)
+ let record = |kind, packed, opt_discr_size, variants| {
+ let type_desc = format!("{:?}", layout.ty);
+ self.tcx.sess.code_stats.record_type_size(
+ kind,
+ type_desc,
+ layout.align.abi,
+ layout.size,
+ packed,
+ opt_discr_size,
+ variants,
+ );
+ };
+
+ let adt_def = match *layout.ty.kind() {
+ ty::Adt(ref adt_def, _) => {
+ debug!("print-type-size t: `{:?}` process adt", layout.ty);
+ adt_def
+ }
+
+ ty::Closure(..) => {
+ debug!("print-type-size t: `{:?}` record closure", layout.ty);
+ record(DataTypeKind::Closure, false, None, vec![]);
+ return;
+ }
+
+ _ => {
+ debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
+ return;
+ }
+ };
+
+ let adt_kind = adt_def.adt_kind();
+ let adt_packed = adt_def.repr().pack.is_some();
+
+ let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
+ let mut min_size = Size::ZERO;
+ let field_info: Vec<_> = flds
+ .iter()
+ .enumerate()
+ .map(|(i, &name)| {
+ let field_layout = layout.field(self, i);
+ let offset = layout.fields.offset(i);
+ let field_end = offset + field_layout.size;
+ if min_size < field_end {
+ min_size = field_end;
+ }
+ FieldInfo {
+ name,
+ offset: offset.bytes(),
+ size: field_layout.size.bytes(),
+ align: field_layout.align.abi.bytes(),
+ }
+ })
+ .collect();
+
+ VariantInfo {
+ name: n,
+ kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
+ align: layout.align.abi.bytes(),
+ size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
+ fields: field_info,
+ }
+ };
+
+ match layout.variants {
+ Variants::Single { index } => {
+ if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
+ debug!(
+ "print-type-size `{:#?}` variant {}",
+ layout,
+ adt_def.variant(index).name
+ );
+ let variant_def = &adt_def.variant(index);
+ let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
+ record(
+ adt_kind.into(),
+ adt_packed,
+ None,
+ vec![build_variant_info(Some(variant_def.name), &fields, layout)],
+ );
+ } else {
+ // (This case arises for *empty* enums; so give it
+ // zero variants.)
+ record(adt_kind.into(), adt_packed, None, vec![]);
+ }
+ }
+
+ Variants::Multiple { tag, ref tag_encoding, .. } => {
+ debug!(
+ "print-type-size `{:#?}` adt general variants def {}",
+ layout.ty,
+ adt_def.variants().len()
+ );
+ let variant_infos: Vec<_> = adt_def
+ .variants()
+ .iter_enumerated()
+ .map(|(i, variant_def)| {
+ let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
+ build_variant_info(
+ Some(variant_def.name),
+ &fields,
+ layout.for_variant(self, i),
+ )
+ })
+ .collect();
+ record(
+ adt_kind.into(),
+ adt_packed,
+ match tag_encoding {
+ TagEncoding::Direct => Some(tag.size(self)),
+ _ => None,
+ },
+ variant_infos,
+ );
+ }
+ }
+ }
+}
+
+/// Type size "skeleton", i.e., the only information determining a type's size.
+/// While this is conservative, (aside from constant sizes, only pointers,
+/// newtypes thereof and null pointer optimized enums are allowed), it is
+/// enough to statically check common use cases of transmute.
+#[derive(Copy, Clone, Debug)]
+pub enum SizeSkeleton<'tcx> {
+ /// Any statically computable Layout.
+ Known(Size),
+
+ /// A potentially-fat pointer.
+ Pointer {
+ /// If true, this pointer is never null.
+ non_zero: bool,
+ /// The type which determines the unsized metadata, if any,
+ /// of this pointer. Either a type parameter or a projection
+ /// depending on one, with regions erased.
+ tail: Ty<'tcx>,
+ },
+}
+
+impl<'tcx> SizeSkeleton<'tcx> {
+ pub fn compute(
+ ty: Ty<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
+ debug_assert!(!ty.has_infer_types_or_consts());
+
+ // First try computing a static layout.
+ let err = match tcx.layout_of(param_env.and(ty)) {
+ Ok(layout) => {
+ return Ok(SizeSkeleton::Known(layout.size));
+ }
+ Err(err) => err,
+ };
+
+ match *ty.kind() {
+ ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ let non_zero = !ty.is_unsafe_ptr();
+ let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
+ match tail.kind() {
+ ty::Param(_) | ty::Projection(_) => {
+ debug_assert!(tail.has_param_types_or_consts());
+ Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
+ }
+ _ => bug!(
+ "SizeSkeleton::compute({}): layout errored ({}), yet \
+ tail `{}` is not a type parameter or a projection",
+ ty,
+ err,
+ tail
+ ),
+ }
+ }
+
+ ty::Adt(def, substs) => {
+ // Only newtypes and enums w/ nullable pointer optimization.
+ if def.is_union() || def.variants().is_empty() || def.variants().len() > 2 {
+ return Err(err);
+ }
+
+ // Get a zero-sized variant or a pointer newtype.
+ let zero_or_ptr_variant = |i| {
+ let i = VariantIdx::new(i);
+ let fields =
+ def.variant(i).fields.iter().map(|field| {
+ SizeSkeleton::compute(field.ty(tcx, substs), tcx, param_env)
+ });
+ let mut ptr = None;
+ for field in fields {
+ let field = field?;
+ match field {
+ SizeSkeleton::Known(size) => {
+ if size.bytes() > 0 {
+ return Err(err);
+ }
+ }
+ SizeSkeleton::Pointer { .. } => {
+ if ptr.is_some() {
+ return Err(err);
+ }
+ ptr = Some(field);
+ }
+ }
+ }
+ Ok(ptr)
+ };
+
+ let v0 = zero_or_ptr_variant(0)?;
+ // Newtype.
+ if def.variants().len() == 1 {
+ if let Some(SizeSkeleton::Pointer { non_zero, tail }) = v0 {
+ return Ok(SizeSkeleton::Pointer {
+ non_zero: non_zero
+ || match tcx.layout_scalar_valid_range(def.did()) {
+ (Bound::Included(start), Bound::Unbounded) => start > 0,
+ (Bound::Included(start), Bound::Included(end)) => {
+ 0 < start && start < end
+ }
+ _ => false,
+ },
+ tail,
+ });
+ } else {
+ return Err(err);
+ }
+ }
+
+ let v1 = zero_or_ptr_variant(1)?;
+ // Nullable pointer enum optimization.
+ match (v0, v1) {
+ (Some(SizeSkeleton::Pointer { non_zero: true, tail }), None)
+ | (None, Some(SizeSkeleton::Pointer { non_zero: true, tail })) => {
+ Ok(SizeSkeleton::Pointer { non_zero: false, tail })
+ }
+ _ => Err(err),
+ }
+ }
+
+ ty::Projection(_) | ty::Opaque(..) => {
+ let normalized = tcx.normalize_erasing_regions(param_env, ty);
+ if ty == normalized {
+ Err(err)
+ } else {
+ SizeSkeleton::compute(normalized, tcx, param_env)
+ }
+ }
+
+ _ => Err(err),
+ }
+ }
+
+ pub fn same_size(self, other: SizeSkeleton<'tcx>) -> bool {
+ match (self, other) {
+ (SizeSkeleton::Known(a), SizeSkeleton::Known(b)) => a == b,
+ (SizeSkeleton::Pointer { tail: a, .. }, SizeSkeleton::Pointer { tail: b, .. }) => {
+ a == b
+ }
+ _ => false,
+ }
+ }
+}
+
+pub trait HasTyCtxt<'tcx>: HasDataLayout {
+ fn tcx(&self) -> TyCtxt<'tcx>;
+}
+
+pub trait HasParamEnv<'tcx> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx>;
+}
+
+impl<'tcx> HasDataLayout for TyCtxt<'tcx> {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.data_layout
+ }
+}
+
+impl<'tcx> HasTargetSpec for TyCtxt<'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.sess.target
+ }
+}
+
+impl<'tcx> HasTyCtxt<'tcx> for TyCtxt<'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ *self
+ }
+}
+
+impl<'tcx> HasDataLayout for ty::query::TyCtxtAt<'tcx> {
+ #[inline]
+ fn data_layout(&self) -> &TargetDataLayout {
+ &self.data_layout
+ }
+}
+
+impl<'tcx> HasTargetSpec for ty::query::TyCtxtAt<'tcx> {
+ fn target_spec(&self) -> &Target {
+ &self.sess.target
+ }
+}
+
+impl<'tcx> HasTyCtxt<'tcx> for ty::query::TyCtxtAt<'tcx> {
+ #[inline]
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ **self
+ }
+}
+
+impl<'tcx, C> HasParamEnv<'tcx> for LayoutCx<'tcx, C> {
+ fn param_env(&self) -> ty::ParamEnv<'tcx> {
+ self.param_env
+ }
+}
+
+impl<'tcx, T: HasDataLayout> HasDataLayout for LayoutCx<'tcx, T> {
+ fn data_layout(&self) -> &TargetDataLayout {
+ self.tcx.data_layout()
+ }
+}
+
+impl<'tcx, T: HasTargetSpec> HasTargetSpec for LayoutCx<'tcx, T> {
+ fn target_spec(&self) -> &Target {
+ self.tcx.target_spec()
+ }
+}
+
+impl<'tcx, T: HasTyCtxt<'tcx>> HasTyCtxt<'tcx> for LayoutCx<'tcx, T> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx.tcx()
+ }
+}
+
+pub trait MaybeResult<T> {
+ type Error;
+
+ fn from(x: Result<T, Self::Error>) -> Self;
+ fn to_result(self) -> Result<T, Self::Error>;
+}
+
+impl<T> MaybeResult<T> for T {
+ type Error = !;
+
+ fn from(Ok(x): Result<T, Self::Error>) -> Self {
+ x
+ }
+ fn to_result(self) -> Result<T, Self::Error> {
+ Ok(self)
+ }
+}
+
+impl<T, E> MaybeResult<T> for Result<T, E> {
+ type Error = E;
+
+ fn from(x: Result<T, Self::Error>) -> Self {
+ x
+ }
+ fn to_result(self) -> Result<T, Self::Error> {
+ self
+ }
+}
+
+pub type TyAndLayout<'tcx> = rustc_target::abi::TyAndLayout<'tcx, Ty<'tcx>>;
+
+/// Trait for contexts that want to be able to compute layouts of types.
+/// This automatically gives access to `LayoutOf`, through a blanket `impl`.
+pub trait LayoutOfHelpers<'tcx>: HasDataLayout + HasTyCtxt<'tcx> + HasParamEnv<'tcx> {
+ /// The `TyAndLayout`-wrapping type (or `TyAndLayout` itself), which will be
+ /// returned from `layout_of` (see also `handle_layout_err`).
+ type LayoutOfResult: MaybeResult<TyAndLayout<'tcx>>;
+
+ /// `Span` to use for `tcx.at(span)`, from `layout_of`.
+ // FIXME(eddyb) perhaps make this mandatory to get contexts to track it better?
+ #[inline]
+ fn layout_tcx_at_span(&self) -> Span {
+ DUMMY_SP
+ }
+
+ /// Helper used for `layout_of`, to adapt `tcx.layout_of(...)` into a
+ /// `Self::LayoutOfResult` (which does not need to be a `Result<...>`).
+ ///
+ /// Most `impl`s, which propagate `LayoutError`s, should simply return `err`,
+ /// but this hook allows e.g. codegen to return only `TyAndLayout` from its
+ /// `cx.layout_of(...)`, without any `Result<...>` around it to deal with
+ /// (and any `LayoutError`s are turned into fatal errors or ICEs).
+ fn handle_layout_err(
+ &self,
+ err: LayoutError<'tcx>,
+ span: Span,
+ ty: Ty<'tcx>,
+ ) -> <Self::LayoutOfResult as MaybeResult<TyAndLayout<'tcx>>>::Error;
+}
+
+/// Blanket extension trait for contexts that can compute layouts of types.
+pub trait LayoutOf<'tcx>: LayoutOfHelpers<'tcx> {
+ /// Computes the layout of a type. Note that this implicitly
+ /// executes in "reveal all" mode, and will normalize the input type.
+ #[inline]
+ fn layout_of(&self, ty: Ty<'tcx>) -> Self::LayoutOfResult {
+ self.spanned_layout_of(ty, DUMMY_SP)
+ }
+
+ /// Computes the layout of a type, at `span`. Note that this implicitly
+ /// executes in "reveal all" mode, and will normalize the input type.
+ // FIXME(eddyb) avoid passing information like this, and instead add more
+ // `TyCtxt::at`-like APIs to be able to do e.g. `cx.at(span).layout_of(ty)`.
+ #[inline]
+ fn spanned_layout_of(&self, ty: Ty<'tcx>, span: Span) -> Self::LayoutOfResult {
+ let span = if !span.is_dummy() { span } else { self.layout_tcx_at_span() };
+ let tcx = self.tcx().at(span);
+
+ MaybeResult::from(
+ tcx.layout_of(self.param_env().and(ty))
+ .map_err(|err| self.handle_layout_err(err, span, ty)),
+ )
+ }
+}
+
+impl<'tcx, C: LayoutOfHelpers<'tcx>> LayoutOf<'tcx> for C {}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, TyCtxt<'tcx>> {
+ type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
+ err
+ }
+}
+
+impl<'tcx> LayoutOfHelpers<'tcx> for LayoutCx<'tcx, ty::query::TyCtxtAt<'tcx>> {
+ type LayoutOfResult = Result<TyAndLayout<'tcx>, LayoutError<'tcx>>;
+
+ #[inline]
+ fn layout_tcx_at_span(&self) -> Span {
+ self.tcx.span
+ }
+
+ #[inline]
+ fn handle_layout_err(&self, err: LayoutError<'tcx>, _: Span, _: Ty<'tcx>) -> LayoutError<'tcx> {
+ err
+ }
+}
+
+impl<'tcx, C> TyAbiInterface<'tcx, C> for Ty<'tcx>
+where
+ C: HasTyCtxt<'tcx> + HasParamEnv<'tcx>,
+{
+ fn ty_and_layout_for_variant(
+ this: TyAndLayout<'tcx>,
+ cx: &C,
+ variant_index: VariantIdx,
+ ) -> TyAndLayout<'tcx> {
+ let layout = match this.variants {
+ Variants::Single { index }
+ // If all variants but one are uninhabited, the variant layout is the enum layout.
+ if index == variant_index &&
+ // Don't confuse variants of uninhabited enums with the enum itself.
+ // For more details see https://github.com/rust-lang/rust/issues/69763.
+ this.fields != FieldsShape::Primitive =>
+ {
+ this.layout
+ }
+
+ Variants::Single { index } => {
+ let tcx = cx.tcx();
+ let param_env = cx.param_env();
+
+ // Deny calling for_variant more than once for non-Single enums.
+ if let Ok(original_layout) = tcx.layout_of(param_env.and(this.ty)) {
+ assert_eq!(original_layout.variants, Variants::Single { index });
+ }
+
+ let fields = match this.ty.kind() {
+ ty::Adt(def, _) if def.variants().is_empty() =>
+ bug!("for_variant called on zero-variant enum"),
+ ty::Adt(def, _) => def.variant(variant_index).fields.len(),
+ _ => bug!(),
+ };
+ tcx.intern_layout(LayoutS {
+ variants: Variants::Single { index: variant_index },
+ fields: match NonZeroUsize::new(fields) {
+ Some(fields) => FieldsShape::Union(fields),
+ None => FieldsShape::Arbitrary { offsets: vec![], memory_index: vec![] },
+ },
+ abi: Abi::Uninhabited,
+ largest_niche: None,
+ align: tcx.data_layout.i8_align,
+ size: Size::ZERO,
+ })
+ }
+
+ Variants::Multiple { ref variants, .. } => variants[variant_index],
+ };
+
+ assert_eq!(*layout.variants(), Variants::Single { index: variant_index });
+
+ TyAndLayout { ty: this.ty, layout }
+ }
+
+ fn ty_and_layout_field(this: TyAndLayout<'tcx>, cx: &C, i: usize) -> TyAndLayout<'tcx> {
+ enum TyMaybeWithLayout<'tcx> {
+ Ty(Ty<'tcx>),
+ TyAndLayout(TyAndLayout<'tcx>),
+ }
+
+ fn field_ty_or_layout<'tcx>(
+ this: TyAndLayout<'tcx>,
+ cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
+ i: usize,
+ ) -> TyMaybeWithLayout<'tcx> {
+ let tcx = cx.tcx();
+ let tag_layout = |tag: Scalar| -> TyAndLayout<'tcx> {
+ TyAndLayout {
+ layout: tcx.intern_layout(LayoutS::scalar(cx, tag)),
+ ty: tag.primitive().to_ty(tcx),
+ }
+ };
+
+ match *this.ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::FnPtr(_)
+ | ty::Never
+ | ty::FnDef(..)
+ | ty::GeneratorWitness(..)
+ | ty::Foreign(..)
+ | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this),
+
+ // Potentially-fat pointers.
+ ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
+ assert!(i < this.fields.count());
+
+ // Reuse the fat `*T` type as its own thin pointer data field.
+ // This provides information about, e.g., DST struct pointees
+ // (which may have no non-DST form), and will work as long
+ // as the `Abi` or `FieldsShape` is checked by users.
+ if i == 0 {
+ let nil = tcx.mk_unit();
+ let unit_ptr_ty = if this.ty.is_unsafe_ptr() {
+ tcx.mk_mut_ptr(nil)
+ } else {
+ tcx.mk_mut_ref(tcx.lifetimes.re_static, nil)
+ };
+
+ // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing
+ // the `Result` should always work because the type is
+ // always either `*mut ()` or `&'static mut ()`.
+ return TyMaybeWithLayout::TyAndLayout(TyAndLayout {
+ ty: this.ty,
+ ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
+ });
+ }
+
+ match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() {
+ ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize),
+ ty::Dynamic(_, _) => {
+ TyMaybeWithLayout::Ty(tcx.mk_imm_ref(
+ tcx.lifetimes.re_static,
+ tcx.mk_array(tcx.types.usize, 3),
+ ))
+ /* FIXME: use actual fn pointers
+ Warning: naively computing the number of entries in the
+ vtable by counting the methods on the trait + methods on
+ all parent traits does not work, because some methods can
+ be not object safe and thus excluded from the vtable.
+ Increase this counter if you tried to implement this but
+ failed to do it without duplicating a lot of code from
+ other places in the compiler: 2
+ tcx.mk_tup(&[
+ tcx.mk_array(tcx.types.usize, 3),
+ tcx.mk_array(Option<fn()>),
+ ])
+ */
+ }
+ _ => bug!("TyAndLayout::field({:?}): not applicable", this),
+ }
+ }
+
+ // Arrays and slices.
+ ty::Array(element, _) | ty::Slice(element) => TyMaybeWithLayout::Ty(element),
+ ty::Str => TyMaybeWithLayout::Ty(tcx.types.u8),
+
+ // Tuples, generators and closures.
+ ty::Closure(_, ref substs) => field_ty_or_layout(
+ TyAndLayout { ty: substs.as_closure().tupled_upvars_ty(), ..this },
+ cx,
+ i,
+ ),
+
+ ty::Generator(def_id, ref substs, _) => match this.variants {
+ Variants::Single { index } => TyMaybeWithLayout::Ty(
+ substs
+ .as_generator()
+ .state_tys(def_id, tcx)
+ .nth(index.as_usize())
+ .unwrap()
+ .nth(i)
+ .unwrap(),
+ ),
+ Variants::Multiple { tag, tag_field, .. } => {
+ if i == tag_field {
+ return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
+ }
+ TyMaybeWithLayout::Ty(substs.as_generator().prefix_tys().nth(i).unwrap())
+ }
+ },
+
+ ty::Tuple(tys) => TyMaybeWithLayout::Ty(tys[i]),
+
+ // ADTs.
+ ty::Adt(def, substs) => {
+ match this.variants {
+ Variants::Single { index } => {
+ TyMaybeWithLayout::Ty(def.variant(index).fields[i].ty(tcx, substs))
+ }
+
+ // Discriminant field for enums (where applicable).
+ Variants::Multiple { tag, .. } => {
+ assert_eq!(i, 0);
+ return TyMaybeWithLayout::TyAndLayout(tag_layout(tag));
+ }
+ }
+ }
+
+ ty::Projection(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Opaque(..)
+ | ty::Param(_)
+ | ty::Infer(_)
+ | ty::Error(_) => bug!("TyAndLayout::field: unexpected type `{}`", this.ty),
+ }
+ }
+
+ match field_ty_or_layout(this, cx, i) {
+ TyMaybeWithLayout::Ty(field_ty) => {
+ cx.tcx().layout_of(cx.param_env().and(field_ty)).unwrap_or_else(|e| {
+ bug!(
+ "failed to get layout for `{}`: {},\n\
+ despite it being a field (#{}) of an existing layout: {:#?}",
+ field_ty,
+ e,
+ i,
+ this
+ )
+ })
+ }
+ TyMaybeWithLayout::TyAndLayout(field_layout) => field_layout,
+ }
+ }
+
+ fn ty_and_layout_pointee_info_at(
+ this: TyAndLayout<'tcx>,
+ cx: &C,
+ offset: Size,
+ ) -> Option<PointeeInfo> {
+ let tcx = cx.tcx();
+ let param_env = cx.param_env();
+
+ let addr_space_of_ty = |ty: Ty<'tcx>| {
+ if ty.is_fn() { cx.data_layout().instruction_address_space } else { AddressSpace::DATA }
+ };
+
+ let pointee_info = match *this.ty.kind() {
+ ty::RawPtr(mt) if offset.bytes() == 0 => {
+ tcx.layout_of(param_env.and(mt.ty)).ok().map(|layout| PointeeInfo {
+ size: layout.size,
+ align: layout.align.abi,
+ safe: None,
+ address_space: addr_space_of_ty(mt.ty),
+ })
+ }
+ ty::FnPtr(fn_sig) if offset.bytes() == 0 => {
+ tcx.layout_of(param_env.and(tcx.mk_fn_ptr(fn_sig))).ok().map(|layout| PointeeInfo {
+ size: layout.size,
+ align: layout.align.abi,
+ safe: None,
+ address_space: cx.data_layout().instruction_address_space,
+ })
+ }
+ ty::Ref(_, ty, mt) if offset.bytes() == 0 => {
+ let address_space = addr_space_of_ty(ty);
+ let kind = if tcx.sess.opts.optimize == OptLevel::No {
+ // Use conservative pointer kind if not optimizing. This saves us the
+ // Freeze/Unpin queries, and can save time in the codegen backend (noalias
+ // attributes in LLVM have compile-time cost even in unoptimized builds).
+ PointerKind::SharedMutable
+ } else {
+ match mt {
+ hir::Mutability::Not => {
+ if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
+ PointerKind::Frozen
+ } else {
+ PointerKind::SharedMutable
+ }
+ }
+ hir::Mutability::Mut => {
+ // References to self-referential structures should not be considered
+ // noalias, as another pointer to the structure can be obtained, that
+ // is not based-on the original reference. We consider all !Unpin
+ // types to be potentially self-referential here.
+ if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
+ PointerKind::UniqueBorrowed
+ } else {
+ PointerKind::UniqueBorrowedPinned
+ }
+ }
+ }
+ };
+
+ tcx.layout_of(param_env.and(ty)).ok().map(|layout| PointeeInfo {
+ size: layout.size,
+ align: layout.align.abi,
+ safe: Some(kind),
+ address_space,
+ })
+ }
+
+ _ => {
+ let mut data_variant = match this.variants {
+ // Within the discriminant field, only the niche itself is
+ // always initialized, so we only check for a pointer at its
+ // offset.
+ //
+ // If the niche is a pointer, it's either valid (according
+ // to its type), or null (which the niche field's scalar
+ // validity range encodes). This allows using
+ // `dereferenceable_or_null` for e.g., `Option<&T>`, and
+ // this will continue to work as long as we don't start
+ // using more niches than just null (e.g., the first page of
+ // the address space, or unaligned pointers).
+ Variants::Multiple {
+ tag_encoding: TagEncoding::Niche { dataful_variant, .. },
+ tag_field,
+ ..
+ } if this.fields.offset(tag_field) == offset => {
+ Some(this.for_variant(cx, dataful_variant))
+ }
+ _ => Some(this),
+ };
+
+ if let Some(variant) = data_variant {
+ // We're not interested in any unions.
+ if let FieldsShape::Union(_) = variant.fields {
+ data_variant = None;
+ }
+ }
+
+ let mut result = None;
+
+ if let Some(variant) = data_variant {
+ let ptr_end = offset + Pointer.size(cx);
+ for i in 0..variant.fields.count() {
+ let field_start = variant.fields.offset(i);
+ if field_start <= offset {
+ let field = variant.field(cx, i);
+ result = field.to_result().ok().and_then(|field| {
+ if ptr_end <= field_start + field.size {
+ // We found the right field, look inside it.
+ let field_info =
+ field.pointee_info_at(cx, offset - field_start);
+ field_info
+ } else {
+ None
+ }
+ });
+ if result.is_some() {
+ break;
+ }
+ }
+ }
+ }
+
+ // FIXME(eddyb) This should be for `ptr::Unique<T>`, not `Box<T>`.
+ if let Some(ref mut pointee) = result {
+ if let ty::Adt(def, _) = this.ty.kind() {
+ if def.is_box() && offset.bytes() == 0 {
+ pointee.safe = Some(PointerKind::UniqueOwned);
+ }
+ }
+ }
+
+ result
+ }
+ };
+
+ debug!(
+ "pointee_info_at (offset={:?}, type kind: {:?}) => {:?}",
+ offset,
+ this.ty.kind(),
+ pointee_info
+ );
+
+ pointee_info
+ }
+
+ fn is_adt(this: TyAndLayout<'tcx>) -> bool {
+ matches!(this.ty.kind(), ty::Adt(..))
+ }
+
+ fn is_never(this: TyAndLayout<'tcx>) -> bool {
+ this.ty.kind() == &ty::Never
+ }
+
+ fn is_tuple(this: TyAndLayout<'tcx>) -> bool {
+ matches!(this.ty.kind(), ty::Tuple(..))
+ }
+
+ fn is_unit(this: TyAndLayout<'tcx>) -> bool {
+ matches!(this.ty.kind(), ty::Tuple(list) if list.len() == 0)
+ }
+}
+
+impl<'tcx> ty::Instance<'tcx> {
+ // NOTE(eddyb) this is private to avoid using it from outside of
+ // `fn_abi_of_instance` - any other uses are either too high-level
+ // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
+ // or should go through `FnAbi` instead, to avoid losing any
+ // adjustments `fn_abi_of_instance` might be performing.
+ fn fn_sig_for_fn_abi(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> ty::PolyFnSig<'tcx> {
+ let ty = self.ty(tcx, param_env);
+ match *ty.kind() {
+ ty::FnDef(..) => {
+ // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
+ // parameters unused if they show up in the signature, but not in the `mir::Body`
+ // (i.e. due to being inside a projection that got normalized, see
+ // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
+ // track of a polymorphization `ParamEnv` to allow normalizing later.
+ let mut sig = match *ty.kind() {
+ ty::FnDef(def_id, substs) => tcx
+ .normalize_erasing_regions(tcx.param_env(def_id), tcx.bound_fn_sig(def_id))
+ .subst(tcx, substs),
+ _ => unreachable!(),
+ };
+
+ if let ty::InstanceDef::VTableShim(..) = self.def {
+ // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
+ sig = sig.map_bound(|mut sig| {
+ let mut inputs_and_output = sig.inputs_and_output.to_vec();
+ inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
+ sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
+ sig
+ });
+ }
+ sig
+ }
+ ty::Closure(def_id, substs) => {
+ let sig = substs.as_closure().sig();
+
+ let bound_vars = tcx.mk_bound_variable_kinds(
+ sig.bound_vars()
+ .iter()
+ .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
+ );
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_usize(bound_vars.len() - 1),
+ kind: ty::BoundRegionKind::BrEnv,
+ };
+ let env_region = ty::ReLateBound(ty::INNERMOST, br);
+ let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
+
+ let sig = sig.skip_binder();
+ ty::Binder::bind_with_vars(
+ tcx.mk_fn_sig(
+ iter::once(env_ty).chain(sig.inputs().iter().cloned()),
+ sig.output(),
+ sig.c_variadic,
+ sig.unsafety,
+ sig.abi,
+ ),
+ bound_vars,
+ )
+ }
+ ty::Generator(_, substs, _) => {
+ let sig = substs.as_generator().poly_sig();
+
+ let bound_vars = tcx.mk_bound_variable_kinds(
+ sig.bound_vars()
+ .iter()
+ .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
+ );
+ let br = ty::BoundRegion {
+ var: ty::BoundVar::from_usize(bound_vars.len() - 1),
+ kind: ty::BoundRegionKind::BrEnv,
+ };
+ let env_region = ty::ReLateBound(ty::INNERMOST, br);
+ let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
+
+ let pin_did = tcx.require_lang_item(LangItem::Pin, None);
+ let pin_adt_ref = tcx.adt_def(pin_did);
+ let pin_substs = tcx.intern_substs(&[env_ty.into()]);
+ let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
+
+ let sig = sig.skip_binder();
+ let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
+ let state_adt_ref = tcx.adt_def(state_did);
+ let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
+ let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
+ ty::Binder::bind_with_vars(
+ tcx.mk_fn_sig(
+ [env_ty, sig.resume_ty].iter(),
+ &ret_ty,
+ false,
+ hir::Unsafety::Normal,
+ rustc_target::spec::abi::Abi::Rust,
+ ),
+ bound_vars,
+ )
+ }
+ _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
+ }
+ }
+}
+
+/// Calculates whether a function's ABI can unwind or not.
+///
+/// This takes two primary parameters:
+///
+/// * `codegen_fn_attr_flags` - these are flags calculated as part of the
+/// codegen attrs for a defined function. For function pointers this set of
+/// flags is the empty set. This is only applicable for Rust-defined
+/// functions, and generally isn't needed except for small optimizations where
+/// we try to say a function which otherwise might look like it could unwind
+/// doesn't actually unwind (such as for intrinsics and such).
+///
+/// * `abi` - this is the ABI that the function is defined with. This is the
+/// primary factor for determining whether a function can unwind or not.
+///
+/// Note that in this case unwinding is not necessarily panicking in Rust. Rust
+/// panics are implemented with unwinds on most platform (when
+/// `-Cpanic=unwind`), but this also accounts for `-Cpanic=abort` build modes.
+/// Notably unwinding is disallowed for more non-Rust ABIs unless it's
+/// specifically in the name (e.g. `"C-unwind"`). Unwinding within each ABI is
+/// defined for each ABI individually, but it always corresponds to some form of
+/// stack-based unwinding (the exact mechanism of which varies
+/// platform-by-platform).
+///
+/// Rust functions are classified whether or not they can unwind based on the
+/// active "panic strategy". In other words Rust functions are considered to
+/// unwind in `-Cpanic=unwind` mode and cannot unwind in `-Cpanic=abort` mode.
+/// Note that Rust supports intermingling panic=abort and panic=unwind code, but
+/// only if the final panic mode is panic=abort. In this scenario any code
+/// previously compiled assuming that a function can unwind is still correct, it
+/// just never happens to actually unwind at runtime.
+///
+/// This function's answer to whether or not a function can unwind is quite
+/// impactful throughout the compiler. This affects things like:
+///
+/// * Calling a function which can't unwind means codegen simply ignores any
+/// associated unwinding cleanup.
+/// * Calling a function which can unwind from a function which can't unwind
+/// causes the `abort_unwinding_calls` MIR pass to insert a landing pad that
+/// aborts the process.
+/// * This affects whether functions have the LLVM `nounwind` attribute, which
+/// affects various optimizations and codegen.
+///
+/// FIXME: this is actually buggy with respect to Rust functions. Rust functions
+/// compiled with `-Cpanic=unwind` and referenced from another crate compiled
+/// with `-Cpanic=abort` will look like they can't unwind when in fact they
+/// might (from a foreign exception or similar).
+#[inline]
+pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool {
+ if let Some(did) = fn_def_id {
+ // Special attribute for functions which can't unwind.
+ if tcx.codegen_fn_attrs(did).flags.contains(CodegenFnAttrFlags::NEVER_UNWIND) {
+ return false;
+ }
+
+ // With `-C panic=abort`, all non-FFI functions are required to not unwind.
+ //
+ // Note that this is true regardless ABI specified on the function -- a `extern "C-unwind"`
+ // function defined in Rust is also required to abort.
+ if tcx.sess.panic_strategy() == PanicStrategy::Abort && !tcx.is_foreign_item(did) {
+ return false;
+ }
+
+ // With -Z panic-in-drop=abort, drop_in_place never unwinds.
+ //
+ // This is not part of `codegen_fn_attrs` as it can differ between crates
+ // and therefore cannot be computed in core.
+ if tcx.sess.opts.unstable_opts.panic_in_drop == PanicStrategy::Abort {
+ if Some(did) == tcx.lang_items().drop_in_place_fn() {
+ return false;
+ }
+ }
+ }
+
+ // Otherwise if this isn't special then unwinding is generally determined by
+ // the ABI of the itself. ABIs like `C` have variants which also
+ // specifically allow unwinding (`C-unwind`), but not all platform-specific
+ // ABIs have such an option. Otherwise the only other thing here is Rust
+ // itself, and those ABIs are determined by the panic strategy configured
+ // for this compilation.
+ //
+ // Unfortunately at this time there's also another caveat. Rust [RFC
+ // 2945][rfc] has been accepted and is in the process of being implemented
+ // and stabilized. In this interim state we need to deal with historical
+ // rustc behavior as well as plan for future rustc behavior.
+ //
+ // Historically functions declared with `extern "C"` were marked at the
+ // codegen layer as `nounwind`. This happened regardless of `panic=unwind`
+ // or not. This is UB for functions in `panic=unwind` mode that then
+ // actually panic and unwind. Note that this behavior is true for both
+ // externally declared functions as well as Rust-defined function.
+ //
+ // To fix this UB rustc would like to change in the future to catch unwinds
+ // from function calls that may unwind within a Rust-defined `extern "C"`
+ // function and forcibly abort the process, thereby respecting the
+ // `nounwind` attribute emitted for `extern "C"`. This behavior change isn't
+ // ready to roll out, so determining whether or not the `C` family of ABIs
+ // unwinds is conditional not only on their definition but also whether the
+ // `#![feature(c_unwind)]` feature gate is active.
+ //
+ // Note that this means that unlike historical compilers rustc now, by
+ // default, unconditionally thinks that the `C` ABI may unwind. This will
+ // prevent some optimization opportunities, however, so we try to scope this
+ // change and only assume that `C` unwinds with `panic=unwind` (as opposed
+ // to `panic=abort`).
+ //
+ // Eventually the check against `c_unwind` here will ideally get removed and
+ // this'll be a little cleaner as it'll be a straightforward check of the
+ // ABI.
+ //
+ // [rfc]: https://github.com/rust-lang/rfcs/blob/master/text/2945-c-unwind-abi.md
+ use SpecAbi::*;
+ match abi {
+ C { unwind }
+ | System { unwind }
+ | Cdecl { unwind }
+ | Stdcall { unwind }
+ | Fastcall { unwind }
+ | Vectorcall { unwind }
+ | Thiscall { unwind }
+ | Aapcs { unwind }
+ | Win64 { unwind }
+ | SysV64 { unwind } => {
+ unwind
+ || (!tcx.features().c_unwind && tcx.sess.panic_strategy() == PanicStrategy::Unwind)
+ }
+ PtxKernel
+ | Msp430Interrupt
+ | X86Interrupt
+ | AmdGpuKernel
+ | EfiApi
+ | AvrInterrupt
+ | AvrNonBlockingInterrupt
+ | CCmseNonSecureCall
+ | Wasm
+ | RustIntrinsic
+ | PlatformIntrinsic
+ | Unadjusted => false,
+ Rust | RustCall | RustCold => tcx.sess.panic_strategy() == PanicStrategy::Unwind,
+ }
+}
+
+#[inline]
+pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
+ use rustc_target::spec::abi::Abi::*;
+ match tcx.sess.target.adjust_abi(abi) {
+ RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
+ RustCold => Conv::RustCold,
+
+ // It's the ABI's job to select this, not ours.
+ System { .. } => bug!("system abi should be selected elsewhere"),
+ EfiApi => bug!("eficall abi should be selected elsewhere"),
+
+ Stdcall { .. } => Conv::X86Stdcall,
+ Fastcall { .. } => Conv::X86Fastcall,
+ Vectorcall { .. } => Conv::X86VectorCall,
+ Thiscall { .. } => Conv::X86ThisCall,
+ C { .. } => Conv::C,
+ Unadjusted => Conv::C,
+ Win64 { .. } => Conv::X86_64Win64,
+ SysV64 { .. } => Conv::X86_64SysV,
+ Aapcs { .. } => Conv::ArmAapcs,
+ CCmseNonSecureCall => Conv::CCmseNonSecureCall,
+ PtxKernel => Conv::PtxKernel,
+ Msp430Interrupt => Conv::Msp430Intr,
+ X86Interrupt => Conv::X86Intr,
+ AmdGpuKernel => Conv::AmdGpuKernel,
+ AvrInterrupt => Conv::AvrInterrupt,
+ AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
+ Wasm => Conv::C,
+
+ // These API constants ought to be more specific...
+ Cdecl { .. } => Conv::C,
+ }
+}
+
+/// Error produced by attempting to compute or adjust a `FnAbi`.
+#[derive(Copy, Clone, Debug, HashStable)]
+pub enum FnAbiError<'tcx> {
+ /// Error produced by a `layout_of` call, while computing `FnAbi` initially.
+ Layout(LayoutError<'tcx>),
+
+ /// Error produced by attempting to adjust a `FnAbi`, for a "foreign" ABI.
+ AdjustForForeignAbi(call::AdjustForForeignAbiError),
+}
+
+impl<'tcx> From<LayoutError<'tcx>> for FnAbiError<'tcx> {
+ fn from(err: LayoutError<'tcx>) -> Self {
+ Self::Layout(err)
+ }
+}
+
+impl From<call::AdjustForForeignAbiError> for FnAbiError<'_> {
+ fn from(err: call::AdjustForForeignAbiError) -> Self {
+ Self::AdjustForForeignAbi(err)
+ }
+}
+
+impl<'tcx> fmt::Display for FnAbiError<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Layout(err) => err.fmt(f),
+ Self::AdjustForForeignAbi(err) => err.fmt(f),
+ }
+ }
+}
+
+// FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
+// just for error handling.
+#[derive(Debug)]
+pub enum FnAbiRequest<'tcx> {
+ OfFnPtr { sig: ty::PolyFnSig<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
+ OfInstance { instance: ty::Instance<'tcx>, extra_args: &'tcx ty::List<Ty<'tcx>> },
+}
+
+/// Trait for contexts that want to be able to compute `FnAbi`s.
+/// This automatically gives access to `FnAbiOf`, through a blanket `impl`.
+pub trait FnAbiOfHelpers<'tcx>: LayoutOfHelpers<'tcx> {
+ /// The `&FnAbi`-wrapping type (or `&FnAbi` itself), which will be
+ /// returned from `fn_abi_of_*` (see also `handle_fn_abi_err`).
+ type FnAbiOfResult: MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>;
+
+ /// Helper used for `fn_abi_of_*`, to adapt `tcx.fn_abi_of_*(...)` into a
+ /// `Self::FnAbiOfResult` (which does not need to be a `Result<...>`).
+ ///
+ /// Most `impl`s, which propagate `FnAbiError`s, should simply return `err`,
+ /// but this hook allows e.g. codegen to return only `&FnAbi` from its
+ /// `cx.fn_abi_of_*(...)`, without any `Result<...>` around it to deal with
+ /// (and any `FnAbiError`s are turned into fatal errors or ICEs).
+ fn handle_fn_abi_err(
+ &self,
+ err: FnAbiError<'tcx>,
+ span: Span,
+ fn_abi_request: FnAbiRequest<'tcx>,
+ ) -> <Self::FnAbiOfResult as MaybeResult<&'tcx FnAbi<'tcx, Ty<'tcx>>>>::Error;
+}
+
+/// Blanket extension trait for contexts that can compute `FnAbi`s.
+pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
+ /// Compute a `FnAbi` suitable for indirect calls, i.e. to `fn` pointers.
+ ///
+ /// NB: this doesn't handle virtual calls - those should use `fn_abi_of_instance`
+ /// instead, where the instance is an `InstanceDef::Virtual`.
+ #[inline]
+ fn fn_abi_of_fn_ptr(
+ &self,
+ sig: ty::PolyFnSig<'tcx>,
+ extra_args: &'tcx ty::List<Ty<'tcx>>,
+ ) -> Self::FnAbiOfResult {
+ // FIXME(eddyb) get a better `span` here.
+ let span = self.layout_tcx_at_span();
+ let tcx = self.tcx().at(span);
+
+ MaybeResult::from(tcx.fn_abi_of_fn_ptr(self.param_env().and((sig, extra_args))).map_err(
+ |err| self.handle_fn_abi_err(err, span, FnAbiRequest::OfFnPtr { sig, extra_args }),
+ ))
+ }
+
+ /// Compute a `FnAbi` suitable for declaring/defining an `fn` instance, and for
+ /// direct calls to an `fn`.
+ ///
+ /// NB: that includes virtual calls, which are represented by "direct calls"
+ /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`).
+ #[inline]
+ fn fn_abi_of_instance(
+ &self,
+ instance: ty::Instance<'tcx>,
+ extra_args: &'tcx ty::List<Ty<'tcx>>,
+ ) -> Self::FnAbiOfResult {
+ // FIXME(eddyb) get a better `span` here.
+ let span = self.layout_tcx_at_span();
+ let tcx = self.tcx().at(span);
+
+ MaybeResult::from(
+ tcx.fn_abi_of_instance(self.param_env().and((instance, extra_args))).map_err(|err| {
+ // HACK(eddyb) at least for definitions of/calls to `Instance`s,
+ // we can get some kind of span even if one wasn't provided.
+ // However, we don't do this early in order to avoid calling
+ // `def_span` unconditionally (which may have a perf penalty).
+ let span = if !span.is_dummy() { span } else { tcx.def_span(instance.def_id()) };
+ self.handle_fn_abi_err(err, span, FnAbiRequest::OfInstance { instance, extra_args })
+ }),
+ )
+ }
+}
+
+impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
+
+fn fn_abi_of_fn_ptr<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
+) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
+ let (param_env, (sig, extra_args)) = query.into_parts();
+
+ LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
+}
+
+fn fn_abi_of_instance<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
+) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
+ let (param_env, (instance, extra_args)) = query.into_parts();
+
+ let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
+
+ let caller_location = if instance.def.requires_caller_location(tcx) {
+ Some(tcx.caller_location_ty())
+ } else {
+ None
+ };
+
+ LayoutCx { tcx, param_env }.fn_abi_new_uncached(
+ sig,
+ extra_args,
+ caller_location,
+ Some(instance.def_id()),
+ matches!(instance.def, ty::InstanceDef::Virtual(..)),
+ )
+}
+
+impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
+ // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
+ // arguments of this method, into a separate `struct`.
+ fn fn_abi_new_uncached(
+ &self,
+ sig: ty::PolyFnSig<'tcx>,
+ extra_args: &[Ty<'tcx>],
+ caller_location: Option<Ty<'tcx>>,
+ fn_def_id: Option<DefId>,
+ // FIXME(eddyb) replace this with something typed, like an `enum`.
+ force_thin_self_ptr: bool,
+ ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
+ debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args);
+
+ let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
+
+ let conv = conv_from_spec_abi(self.tcx(), sig.abi);
+
+ let mut inputs = sig.inputs();
+ let extra_args = if sig.abi == RustCall {
+ assert!(!sig.c_variadic && extra_args.is_empty());
+
+ if let Some(input) = sig.inputs().last() {
+ if let ty::Tuple(tupled_arguments) = input.kind() {
+ inputs = &sig.inputs()[0..sig.inputs().len() - 1];
+ tupled_arguments
+ } else {
+ bug!(
+ "argument to function with \"rust-call\" ABI \
+ is not a tuple"
+ );
+ }
+ } else {
+ bug!(
+ "argument to function with \"rust-call\" ABI \
+ is not a tuple"
+ );
+ }
+ } else {
+ assert!(sig.c_variadic || extra_args.is_empty());
+ extra_args
+ };
+
+ let target = &self.tcx.sess.target;
+ let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
+ let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
+ let linux_s390x_gnu_like =
+ target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
+ let linux_sparc64_gnu_like =
+ target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
+ let linux_powerpc_gnu_like =
+ target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
+ use SpecAbi::*;
+ let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
+
+ // Handle safe Rust thin and fat pointers.
+ let adjust_for_rust_scalar = |attrs: &mut ArgAttributes,
+ scalar: Scalar,
+ layout: TyAndLayout<'tcx>,
+ offset: Size,
+ is_return: bool| {
+ // Booleans are always a noundef i1 that needs to be zero-extended.
+ if scalar.is_bool() {
+ attrs.ext(ArgExtension::Zext);
+ attrs.set(ArgAttribute::NoUndef);
+ return;
+ }
+
+ // Scalars which have invalid values cannot be undef.
+ if !scalar.is_always_valid(self) {
+ attrs.set(ArgAttribute::NoUndef);
+ }
+
+ // Only pointer types handled below.
+ let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
+
+ if !valid_range.contains(0) {
+ attrs.set(ArgAttribute::NonNull);
+ }
+
+ if let Some(pointee) = layout.pointee_info_at(self, offset) {
+ if let Some(kind) = pointee.safe {
+ attrs.pointee_align = Some(pointee.align);
+
+ // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
+ // for the entire duration of the function as they can be deallocated
+ // at any time. Same for shared mutable references. If LLVM had a
+ // way to say "dereferenceable on entry" we could use it here.
+ attrs.pointee_size = match kind {
+ PointerKind::UniqueBorrowed
+ | PointerKind::UniqueBorrowedPinned
+ | PointerKind::Frozen => pointee.size,
+ PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
+ };
+
+ // `Box`, `&T`, and `&mut T` cannot be undef.
+ // Note that this only applies to the value of the pointer itself;
+ // this attribute doesn't make it UB for the pointed-to data to be undef.
+ attrs.set(ArgAttribute::NoUndef);
+
+ // The aliasing rules for `Box<T>` are still not decided, but currently we emit
+ // `noalias` for it. This can be turned off using an unstable flag.
+ // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
+ let noalias_for_box =
+ self.tcx().sess.opts.unstable_opts.box_noalias.unwrap_or(true);
+
+ // `&mut` pointer parameters never alias other parameters,
+ // or mutable global data
+ //
+ // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
+ // and can be marked as both `readonly` and `noalias`, as
+ // LLVM's definition of `noalias` is based solely on memory
+ // dependencies rather than pointer equality
+ //
+ // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
+ // for UniqueBorrowed arguments, so that the codegen backend can decide whether
+ // or not to actually emit the attribute. It can also be controlled with the
+ // `-Zmutable-noalias` debugging option.
+ let no_alias = match kind {
+ PointerKind::SharedMutable
+ | PointerKind::UniqueBorrowed
+ | PointerKind::UniqueBorrowedPinned => false,
+ PointerKind::UniqueOwned => noalias_for_box,
+ PointerKind::Frozen => !is_return,
+ };
+ if no_alias {
+ attrs.set(ArgAttribute::NoAlias);
+ }
+
+ if kind == PointerKind::Frozen && !is_return {
+ attrs.set(ArgAttribute::ReadOnly);
+ }
+
+ if kind == PointerKind::UniqueBorrowed && !is_return {
+ attrs.set(ArgAttribute::NoAliasMutRef);
+ }
+ }
+ }
+ };
+
+ let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
+ let is_return = arg_idx.is_none();
+
+ let layout = self.layout_of(ty)?;
+ let layout = if force_thin_self_ptr && arg_idx == Some(0) {
+ // Don't pass the vtable, it's not an argument of the virtual fn.
+ // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
+ // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
+ make_thin_self_ptr(self, layout)
+ } else {
+ layout
+ };
+
+ let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
+ let mut attrs = ArgAttributes::new();
+ adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return);
+ attrs
+ });
+
+ if arg.layout.is_zst() {
+ // For some forsaken reason, x86_64-pc-windows-gnu
+ // doesn't ignore zero-sized struct arguments.
+ // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
+ if is_return
+ || rust_abi
+ || (!win_x64_gnu
+ && !linux_s390x_gnu_like
+ && !linux_sparc64_gnu_like
+ && !linux_powerpc_gnu_like)
+ {
+ arg.mode = PassMode::Ignore;
+ }
+ }
+
+ Ok(arg)
+ };
+
+ let mut fn_abi = FnAbi {
+ ret: arg_of(sig.output(), None)?,
+ args: inputs
+ .iter()
+ .copied()
+ .chain(extra_args.iter().copied())
+ .chain(caller_location)
+ .enumerate()
+ .map(|(i, ty)| arg_of(ty, Some(i)))
+ .collect::<Result<_, _>>()?,
+ c_variadic: sig.c_variadic,
+ fixed_count: inputs.len(),
+ conv,
+ can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
+ };
+ self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
+ debug!("fn_abi_new_uncached = {:?}", fn_abi);
+ Ok(self.tcx.arena.alloc(fn_abi))
+ }
+
+ fn fn_abi_adjust_for_abi(
+ &self,
+ fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
+ abi: SpecAbi,
+ ) -> Result<(), FnAbiError<'tcx>> {
+ if abi == SpecAbi::Unadjusted {
+ return Ok(());
+ }
+
+ if abi == SpecAbi::Rust
+ || abi == SpecAbi::RustCall
+ || abi == SpecAbi::RustIntrinsic
+ || abi == SpecAbi::PlatformIntrinsic
+ {
+ let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
+ if arg.is_ignore() {
+ return;
+ }
+
+ match arg.layout.abi {
+ Abi::Aggregate { .. } => {}
+
+ // This is a fun case! The gist of what this is doing is
+ // that we want callers and callees to always agree on the
+ // ABI of how they pass SIMD arguments. If we were to *not*
+ // make these arguments indirect then they'd be immediates
+ // in LLVM, which means that they'd used whatever the
+ // appropriate ABI is for the callee and the caller. That
+ // means, for example, if the caller doesn't have AVX
+ // enabled but the callee does, then passing an AVX argument
+ // across this boundary would cause corrupt data to show up.
+ //
+ // This problem is fixed by unconditionally passing SIMD
+ // arguments through memory between callers and callees
+ // which should get them all to agree on ABI regardless of
+ // target feature sets. Some more information about this
+ // issue can be found in #44367.
+ //
+ // Note that the platform intrinsic ABI is exempt here as
+ // that's how we connect up to LLVM and it's unstable
+ // anyway, we control all calls to it in libstd.
+ Abi::Vector { .. }
+ if abi != SpecAbi::PlatformIntrinsic
+ && self.tcx.sess.target.simd_types_indirect =>
+ {
+ arg.make_indirect();
+ return;
+ }
+
+ _ => return,
+ }
+
+ let size = arg.layout.size;
+ if arg.layout.is_unsized() || size > Pointer.size(self) {
+ arg.make_indirect();
+ } else {
+ // We want to pass small aggregates as immediates, but using
+ // a LLVM aggregate type for this leads to bad optimizations,
+ // so we pick an appropriately sized integer type instead.
+ arg.cast_to(Reg { kind: RegKind::Integer, size });
+ }
+ };
+ fixup(&mut fn_abi.ret);
+ for arg in &mut fn_abi.args {
+ fixup(arg);
+ }
+ } else {
+ fn_abi.adjust_for_foreign_abi(self, abi)?;
+ }
+
+ Ok(())
+ }
+}
+
+fn make_thin_self_ptr<'tcx>(
+ cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
+ layout: TyAndLayout<'tcx>,
+) -> TyAndLayout<'tcx> {
+ let tcx = cx.tcx();
+ let fat_pointer_ty = if layout.is_unsized() {
+ // unsized `self` is passed as a pointer to `self`
+ // FIXME (mikeyhew) change this to use &own if it is ever added to the language
+ tcx.mk_mut_ptr(layout.ty)
+ } else {
+ match layout.abi {
+ Abi::ScalarPair(..) => (),
+ _ => bug!("receiver type has unsupported layout: {:?}", layout),
+ }
+
+ // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
+ // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
+ // elsewhere in the compiler as a method on a `dyn Trait`.
+ // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
+ // get a built-in pointer type
+ let mut fat_pointer_layout = layout;
+ 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
+ && !fat_pointer_layout.ty.is_region_ptr()
+ {
+ for i in 0..fat_pointer_layout.fields.count() {
+ let field_layout = fat_pointer_layout.field(cx, i);
+
+ if !field_layout.is_zst() {
+ fat_pointer_layout = field_layout;
+ continue 'descend_newtypes;
+ }
+ }
+
+ bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
+ }
+
+ fat_pointer_layout.ty
+ };
+
+ // we now have a type like `*mut RcBox<dyn Trait>`
+ // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
+ // this is understood as a special case elsewhere in the compiler
+ let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
+
+ TyAndLayout {
+ ty: fat_pointer_ty,
+
+ // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
+ // should always work because the type is always `*mut ()`.
+ ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/list.rs b/compiler/rustc_middle/src/ty/list.rs
new file mode 100644
index 000000000..db3b5cfd1
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/list.rs
@@ -0,0 +1,215 @@
+use crate::arena::Arena;
+use rustc_serialize::{Encodable, Encoder};
+use std::alloc::Layout;
+use std::cmp::Ordering;
+use std::fmt;
+use std::hash::{Hash, Hasher};
+use std::iter;
+use std::mem;
+use std::ops::Deref;
+use std::ptr;
+use std::slice;
+
+/// `List<T>` is a bit like `&[T]`, but with some critical differences.
+/// - IMPORTANT: Every `List<T>` is *required* to have unique contents. The
+/// type's correctness relies on this, *but it does not enforce it*.
+/// Therefore, any code that creates a `List<T>` must ensure uniqueness
+/// itself. In practice this is achieved by interning.
+/// - The length is stored within the `List<T>`, so `&List<Ty>` is a thin
+/// pointer.
+/// - Because of this, you cannot get a `List<T>` that is a sub-list of another
+/// `List<T>`. You can get a sub-slice `&[T]`, however.
+/// - `List<T>` can be used with `CopyTaggedPtr`, which is useful within
+/// structs whose size must be minimized.
+/// - Because of the uniqueness assumption, we can use the address of a
+/// `List<T>` for faster equality comparisons and hashing.
+/// - `T` must be `Copy`. This lets `List<T>` be stored in a dropless arena and
+/// iterators return a `T` rather than a `&T`.
+/// - `T` must not be zero-sized.
+#[repr(C)]
+pub struct List<T> {
+ len: usize,
+
+ /// Although this claims to be a zero-length array, in practice `len`
+ /// elements are actually present.
+ data: [T; 0],
+
+ opaque: OpaqueListContents,
+}
+
+extern "C" {
+ /// A dummy type used to force `List` to be unsized while not requiring
+ /// references to it be wide pointers.
+ type OpaqueListContents;
+}
+
+impl<T> List<T> {
+ /// Returns a reference to the (unique, static) empty list.
+ #[inline(always)]
+ pub fn empty<'a>() -> &'a List<T> {
+ #[repr(align(64))]
+ struct MaxAlign;
+
+ assert!(mem::align_of::<T>() <= mem::align_of::<MaxAlign>());
+
+ #[repr(C)]
+ struct InOrder<T, U>(T, U);
+
+ // The empty slice is static and contains a single `0` usize (for the
+ // length) that is 64-byte aligned, thus featuring the necessary
+ // trailing padding for elements with up to 64-byte alignment.
+ static EMPTY_SLICE: InOrder<usize, MaxAlign> = InOrder(0, MaxAlign);
+ unsafe { &*(&EMPTY_SLICE as *const _ as *const List<T>) }
+ }
+
+ pub fn len(&self) -> usize {
+ self.len
+ }
+}
+
+impl<T: Copy> List<T> {
+ /// Allocates a list from `arena` and copies the contents of `slice` into it.
+ ///
+ /// WARNING: the contents *must be unique*, such that no list with these
+ /// contents has been previously created. If not, operations such as `eq`
+ /// and `hash` might give incorrect results.
+ ///
+ /// Panics if `T` is `Drop`, or `T` is zero-sized, or the slice is empty
+ /// (because the empty list exists statically, and is available via
+ /// `empty()`).
+ #[inline]
+ pub(super) fn from_arena<'tcx>(arena: &'tcx Arena<'tcx>, slice: &[T]) -> &'tcx List<T> {
+ assert!(!mem::needs_drop::<T>());
+ assert!(mem::size_of::<T>() != 0);
+ assert!(!slice.is_empty());
+
+ let (layout, _offset) =
+ Layout::new::<usize>().extend(Layout::for_value::<[T]>(slice)).unwrap();
+ let mem = arena.dropless.alloc_raw(layout) as *mut List<T>;
+ unsafe {
+ // Write the length
+ ptr::addr_of_mut!((*mem).len).write(slice.len());
+
+ // Write the elements
+ ptr::addr_of_mut!((*mem).data)
+ .cast::<T>()
+ .copy_from_nonoverlapping(slice.as_ptr(), slice.len());
+
+ &*mem
+ }
+ }
+
+ // If this method didn't exist, we would use `slice.iter` due to
+ // deref coercion.
+ //
+ // This would be weird, as `self.into_iter` iterates over `T` directly.
+ #[inline(always)]
+ pub fn iter(&self) -> <&'_ List<T> as IntoIterator>::IntoIter {
+ self.into_iter()
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for List<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ (**self).fmt(f)
+ }
+}
+
+impl<S: Encoder, T: Encodable<S>> Encodable<S> for List<T> {
+ #[inline]
+ fn encode(&self, s: &mut S) {
+ (**self).encode(s);
+ }
+}
+
+impl<T: PartialEq> PartialEq for List<T> {
+ #[inline]
+ fn eq(&self, other: &List<T>) -> bool {
+ // Pointer equality implies list equality (due to the unique contents
+ // assumption).
+ ptr::eq(self, other)
+ }
+}
+
+impl<T: Eq> Eq for List<T> {}
+
+impl<T> Ord for List<T>
+where
+ T: Ord,
+{
+ fn cmp(&self, other: &List<T>) -> Ordering {
+ // Pointer equality implies list equality (due to the unique contents
+ // assumption), but the contents must be compared otherwise.
+ if self == other { Ordering::Equal } else { <[T] as Ord>::cmp(&**self, &**other) }
+ }
+}
+
+impl<T> PartialOrd for List<T>
+where
+ T: PartialOrd,
+{
+ fn partial_cmp(&self, other: &List<T>) -> Option<Ordering> {
+ // Pointer equality implies list equality (due to the unique contents
+ // assumption), but the contents must be compared otherwise.
+ if self == other {
+ Some(Ordering::Equal)
+ } else {
+ <[T] as PartialOrd>::partial_cmp(&**self, &**other)
+ }
+ }
+}
+
+impl<T> Hash for List<T> {
+ #[inline]
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // Pointer hashing is sufficient (due to the unique contents
+ // assumption).
+ (self as *const List<T>).hash(s)
+ }
+}
+
+impl<T> Deref for List<T> {
+ type Target = [T];
+ #[inline(always)]
+ fn deref(&self) -> &[T] {
+ self.as_ref()
+ }
+}
+
+impl<T> AsRef<[T]> for List<T> {
+ #[inline(always)]
+ fn as_ref(&self) -> &[T] {
+ unsafe { slice::from_raw_parts(self.data.as_ptr(), self.len) }
+ }
+}
+
+impl<'a, T: Copy> IntoIterator for &'a List<T> {
+ type Item = T;
+ type IntoIter = iter::Copied<<&'a [T] as IntoIterator>::IntoIter>;
+ #[inline(always)]
+ fn into_iter(self) -> Self::IntoIter {
+ self[..].iter().copied()
+ }
+}
+
+unsafe impl<T: Sync> Sync for List<T> {}
+
+unsafe impl<'a, T: 'a> rustc_data_structures::tagged_ptr::Pointer for &'a List<T> {
+ const BITS: usize = std::mem::align_of::<usize>().trailing_zeros() as usize;
+
+ #[inline]
+ fn into_usize(self) -> usize {
+ self as *const List<T> as usize
+ }
+
+ #[inline]
+ unsafe fn from_usize(ptr: usize) -> &'a List<T> {
+ &*(ptr as *const List<T>)
+ }
+
+ unsafe fn with_ref<R, F: FnOnce(&Self) -> R>(ptr: usize, f: F) -> R {
+ // `Self` is `&'a List<T>` which impls `Copy`, so this is fine.
+ let ptr = Self::from_usize(ptr);
+ f(&ptr)
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
new file mode 100644
index 000000000..02da02568
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -0,0 +1,2518 @@
+//! Defines how the compiler represents types internally.
+//!
+//! Two important entities in this module are:
+//!
+//! - [`rustc_middle::ty::Ty`], used to represent the semantics of a type.
+//! - [`rustc_middle::ty::TyCtxt`], the central data structure in the compiler.
+//!
+//! For more information, see ["The `ty` module: representing types"] in the rustc-dev-guide.
+//!
+//! ["The `ty` module: representing types"]: https://rustc-dev-guide.rust-lang.org/ty.html
+
+pub use self::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder, TypeSuperFoldable};
+pub use self::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
+pub use self::AssocItemContainer::*;
+pub use self::BorrowKind::*;
+pub use self::IntVarValue::*;
+pub use self::Variance::*;
+use crate::metadata::ModChild;
+use crate::middle::privacy::AccessLevels;
+use crate::mir::{Body, GeneratorLayout};
+use crate::traits::{self, Reveal};
+use crate::ty;
+use crate::ty::fast_reject::SimplifiedType;
+use crate::ty::util::Discr;
+pub use adt::*;
+pub use assoc::*;
+pub use generics::*;
+use rustc_ast as ast;
+use rustc_ast::node_id::NodeMap;
+use rustc_attr as attr;
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
+use rustc_data_structures::intern::{Interned, WithStableHash};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_data_structures::tagged_ptr::CopyTaggedPtr;
+use rustc_hir as hir;
+use rustc_hir::def::{CtorKind, CtorOf, DefKind, LifetimeRes, Res};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LocalDefIdMap};
+use rustc_hir::Node;
+use rustc_index::vec::IndexVec;
+use rustc_macros::HashStable;
+use rustc_query_system::ich::StableHashingContext;
+use rustc_span::hygiene::MacroKind;
+use rustc_span::symbol::{kw, sym, Ident, Symbol};
+use rustc_span::{ExpnId, Span};
+use rustc_target::abi::{Align, VariantIdx};
+pub use subst::*;
+pub use vtable::*;
+
+use std::fmt::Debug;
+use std::hash::{Hash, Hasher};
+use std::ops::ControlFlow;
+use std::{fmt, str};
+
+pub use crate::ty::diagnostics::*;
+pub use rustc_type_ir::InferTy::*;
+pub use rustc_type_ir::RegionKind::*;
+pub use rustc_type_ir::TyKind::*;
+pub use rustc_type_ir::*;
+
+pub use self::binding::BindingMode;
+pub use self::binding::BindingMode::*;
+pub use self::closure::{
+ is_ancestor_or_same_capture, place_to_string_for_capture, BorrowKind, CaptureInfo,
+ CapturedPlace, ClosureKind, MinCaptureInformationMap, MinCaptureList,
+ RootVariableMinCaptureList, UpvarCapture, UpvarCaptureMap, UpvarId, UpvarListMap, UpvarPath,
+ CAPTURE_STRUCT_LOCAL,
+};
+pub use self::consts::{
+ Const, ConstInt, ConstKind, ConstS, InferConst, ScalarInt, Unevaluated, ValTree,
+};
+pub use self::context::{
+ tls, CanonicalUserType, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations,
+ CtxtInterners, DelaySpanBugEmitted, FreeRegionInfo, GeneratorDiagnosticData,
+ GeneratorInteriorTypeCause, GlobalCtxt, Lift, OnDiskCache, TyCtxt, TypeckResults, UserType,
+ UserTypeAnnotationIndex,
+};
+pub use self::instance::{Instance, InstanceDef};
+pub use self::list::List;
+pub use self::parameterized::ParameterizedOverTcx;
+pub use self::rvalue_scopes::RvalueScopes;
+pub use self::sty::BoundRegionKind::*;
+pub use self::sty::{
+ Article, Binder, BoundRegion, BoundRegionKind, BoundTy, BoundTyKind, BoundVar,
+ BoundVariableKind, CanonicalPolyFnSig, ClosureSubsts, ClosureSubstsParts, ConstVid,
+ EarlyBinder, EarlyBoundRegion, ExistentialPredicate, ExistentialProjection,
+ ExistentialTraitRef, FnSig, FreeRegion, GenSig, GeneratorSubsts, GeneratorSubstsParts,
+ InlineConstSubsts, InlineConstSubstsParts, ParamConst, ParamTy, PolyExistentialProjection,
+ PolyExistentialTraitRef, PolyFnSig, PolyGenSig, PolyTraitRef, ProjectionTy, Region, RegionKind,
+ RegionVid, TraitRef, TyKind, TypeAndMut, UpvarSubsts, VarianceDiagInfo,
+};
+pub use self::trait_def::TraitDef;
+
+pub mod _match;
+pub mod abstract_const;
+pub mod adjustment;
+pub mod binding;
+pub mod cast;
+pub mod codec;
+pub mod error;
+pub mod fast_reject;
+pub mod flags;
+pub mod fold;
+pub mod inhabitedness;
+pub mod layout;
+pub mod normalize_erasing_regions;
+pub mod print;
+pub mod query;
+pub mod relate;
+pub mod subst;
+pub mod trait_def;
+pub mod util;
+pub mod visit;
+pub mod vtable;
+pub mod walk;
+
+mod adt;
+mod assoc;
+mod closure;
+mod consts;
+mod context;
+mod diagnostics;
+mod erase_regions;
+mod generics;
+mod impls_ty;
+mod instance;
+mod list;
+mod parameterized;
+mod rvalue_scopes;
+mod structural_impls;
+mod sty;
+
+// Data types
+
+pub type RegisteredTools = FxHashSet<Ident>;
+
+#[derive(Debug)]
+pub struct ResolverOutputs {
+ pub visibilities: FxHashMap<LocalDefId, Visibility>,
+ /// This field is used to decide whether we should make `PRIVATE_IN_PUBLIC` a hard error.
+ pub has_pub_restricted: bool,
+ /// Item with a given `LocalDefId` was defined during macro expansion with ID `ExpnId`.
+ pub expn_that_defined: FxHashMap<LocalDefId, ExpnId>,
+ /// Reference span for definitions.
+ pub source_span: IndexVec<LocalDefId, Span>,
+ pub access_levels: AccessLevels,
+ pub extern_crate_map: FxHashMap<LocalDefId, CrateNum>,
+ pub maybe_unused_trait_imports: FxIndexSet<LocalDefId>,
+ pub maybe_unused_extern_crates: Vec<(LocalDefId, Span)>,
+ pub reexport_map: FxHashMap<LocalDefId, Vec<ModChild>>,
+ pub glob_map: FxHashMap<LocalDefId, FxHashSet<Symbol>>,
+ /// Extern prelude entries. The value is `true` if the entry was introduced
+ /// via `extern crate` item and not `--extern` option or compiler built-in.
+ pub extern_prelude: FxHashMap<Symbol, bool>,
+ pub main_def: Option<MainDefinition>,
+ pub trait_impls: FxIndexMap<DefId, Vec<LocalDefId>>,
+ /// A list of proc macro LocalDefIds, written out in the order in which
+ /// they are declared in the static array generated by proc_macro_harness.
+ pub proc_macros: Vec<LocalDefId>,
+ /// Mapping from ident span to path span for paths that don't exist as written, but that
+ /// exist under `std`. For example, wrote `str::from_utf8` instead of `std::str::from_utf8`.
+ pub confused_type_with_std_module: FxHashMap<Span, Span>,
+ pub registered_tools: RegisteredTools,
+}
+
+/// Resolutions that should only be used for lowering.
+/// This struct is meant to be consumed by lowering.
+#[derive(Debug)]
+pub struct ResolverAstLowering {
+ pub legacy_const_generic_args: FxHashMap<DefId, Option<Vec<usize>>>,
+
+ /// Resolutions for nodes that have a single resolution.
+ pub partial_res_map: NodeMap<hir::def::PartialRes>,
+ /// Resolutions for import nodes, which have multiple resolutions in different namespaces.
+ pub import_res_map: NodeMap<hir::def::PerNS<Option<Res<ast::NodeId>>>>,
+ /// Resolutions for labels (node IDs of their corresponding blocks or loops).
+ pub label_res_map: NodeMap<ast::NodeId>,
+ /// Resolutions for lifetimes.
+ pub lifetimes_res_map: NodeMap<LifetimeRes>,
+ /// Mapping from generics `def_id`s to TAIT generics `def_id`s.
+ /// For each captured lifetime (e.g., 'a), we create a new lifetime parameter that is a generic
+ /// defined on the TAIT, so we have type Foo<'a1> = ... and we establish a mapping in this
+ /// field from the original parameter 'a to the new parameter 'a1.
+ pub generics_def_id_map: Vec<FxHashMap<LocalDefId, LocalDefId>>,
+ /// Lifetime parameters that lowering will have to introduce.
+ pub extra_lifetime_params_map: NodeMap<Vec<(Ident, ast::NodeId, LifetimeRes)>>,
+
+ pub next_node_id: ast::NodeId,
+
+ pub node_id_to_def_id: FxHashMap<ast::NodeId, LocalDefId>,
+ pub def_id_to_node_id: IndexVec<LocalDefId, ast::NodeId>,
+
+ pub trait_map: NodeMap<Vec<hir::TraitCandidate>>,
+ /// A small map keeping true kinds of built-in macros that appear to be fn-like on
+ /// the surface (`macro` items in libcore), but are actually attributes or derives.
+ pub builtin_macro_kinds: FxHashMap<LocalDefId, MacroKind>,
+}
+
+#[derive(Clone, Copy, Debug)]
+pub struct MainDefinition {
+ pub res: Res<ast::NodeId>,
+ pub is_import: bool,
+ pub span: Span,
+}
+
+impl MainDefinition {
+ pub fn opt_fn_def_id(self) -> Option<DefId> {
+ if let Res::Def(DefKind::Fn, def_id) = self.res { Some(def_id) } else { None }
+ }
+}
+
+/// The "header" of an impl is everything outside the body: a Self type, a trait
+/// ref (in the case of a trait impl), and a set of predicates (from the
+/// bounds / where-clauses).
+#[derive(Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct ImplHeader<'tcx> {
+ pub impl_def_id: DefId,
+ pub self_ty: Ty<'tcx>,
+ pub trait_ref: Option<TraitRef<'tcx>>,
+ pub predicates: Vec<Predicate<'tcx>>,
+}
+
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+pub enum ImplSubject<'tcx> {
+ Trait(TraitRef<'tcx>),
+ Inherent(Ty<'tcx>),
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable, HashStable, Debug)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub enum ImplPolarity {
+ /// `impl Trait for Type`
+ Positive,
+ /// `impl !Trait for Type`
+ Negative,
+ /// `#[rustc_reservation_impl] impl Trait for Type`
+ ///
+ /// This is a "stability hack", not a real Rust feature.
+ /// See #64631 for details.
+ Reservation,
+}
+
+impl ImplPolarity {
+ /// Flips polarity by turning `Positive` into `Negative` and `Negative` into `Positive`.
+ pub fn flip(&self) -> Option<ImplPolarity> {
+ match self {
+ ImplPolarity::Positive => Some(ImplPolarity::Negative),
+ ImplPolarity::Negative => Some(ImplPolarity::Positive),
+ ImplPolarity::Reservation => None,
+ }
+ }
+}
+
+impl fmt::Display for ImplPolarity {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::Positive => f.write_str("positive"),
+ Self::Negative => f.write_str("negative"),
+ Self::Reservation => f.write_str("reservation"),
+ }
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Copy, Hash, Encodable, Decodable, HashStable)]
+pub enum Visibility {
+ /// Visible everywhere (including in other crates).
+ Public,
+ /// Visible only in the given crate-local module.
+ Restricted(DefId),
+ /// Not visible anywhere in the local crate. This is the visibility of private external items.
+ Invisible,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable, TyEncodable, TyDecodable)]
+pub enum BoundConstness {
+ /// `T: Trait`
+ NotConst,
+ /// `T: ~const Trait`
+ ///
+ /// Requires resolving to const only when we are in a const context.
+ ConstIfConst,
+}
+
+impl BoundConstness {
+ /// Reduce `self` and `constness` to two possible combined states instead of four.
+ pub fn and(&mut self, constness: hir::Constness) -> hir::Constness {
+ match (constness, self) {
+ (hir::Constness::Const, BoundConstness::ConstIfConst) => hir::Constness::Const,
+ (_, this) => {
+ *this = BoundConstness::NotConst;
+ hir::Constness::NotConst
+ }
+ }
+ }
+}
+
+impl fmt::Display for BoundConstness {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ Self::NotConst => f.write_str("normal"),
+ Self::ConstIfConst => f.write_str("`~const`"),
+ }
+ }
+}
+
+#[derive(Clone, Debug, PartialEq, Eq, Copy, Hash, TyEncodable, TyDecodable, HashStable)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct ClosureSizeProfileData<'tcx> {
+ /// Tuple containing the types of closure captures before the feature `capture_disjoint_fields`
+ pub before_feature_tys: Ty<'tcx>,
+ /// Tuple containing the types of closure captures after the feature `capture_disjoint_fields`
+ pub after_feature_tys: Ty<'tcx>,
+}
+
+pub trait DefIdTree: Copy {
+ fn opt_parent(self, id: DefId) -> Option<DefId>;
+
+ #[inline]
+ #[track_caller]
+ fn parent(self, id: DefId) -> DefId {
+ match self.opt_parent(id) {
+ Some(id) => id,
+ // not `unwrap_or_else` to avoid breaking caller tracking
+ None => bug!("{id:?} doesn't have a parent"),
+ }
+ }
+
+ #[inline]
+ #[track_caller]
+ fn opt_local_parent(self, id: LocalDefId) -> Option<LocalDefId> {
+ self.opt_parent(id.to_def_id()).map(DefId::expect_local)
+ }
+
+ #[inline]
+ #[track_caller]
+ fn local_parent(self, id: LocalDefId) -> LocalDefId {
+ self.parent(id.to_def_id()).expect_local()
+ }
+
+ fn is_descendant_of(self, mut descendant: DefId, ancestor: DefId) -> bool {
+ if descendant.krate != ancestor.krate {
+ return false;
+ }
+
+ while descendant != ancestor {
+ match self.opt_parent(descendant) {
+ Some(parent) => descendant = parent,
+ None => return false,
+ }
+ }
+ true
+ }
+}
+
+impl<'tcx> DefIdTree for TyCtxt<'tcx> {
+ #[inline]
+ fn opt_parent(self, id: DefId) -> Option<DefId> {
+ self.def_key(id).parent.map(|index| DefId { index, ..id })
+ }
+}
+
+impl Visibility {
+ /// Returns `true` if an item with this visibility is accessible from the given block.
+ pub fn is_accessible_from<T: DefIdTree>(self, module: DefId, tree: T) -> bool {
+ let restriction = match self {
+ // Public items are visible everywhere.
+ Visibility::Public => return true,
+ // Private items from other crates are visible nowhere.
+ Visibility::Invisible => return false,
+ // Restricted items are visible in an arbitrary local module.
+ Visibility::Restricted(other) if other.krate != module.krate => return false,
+ Visibility::Restricted(module) => module,
+ };
+
+ tree.is_descendant_of(module, restriction)
+ }
+
+ /// Returns `true` if this visibility is at least as accessible as the given visibility
+ pub fn is_at_least<T: DefIdTree>(self, vis: Visibility, tree: T) -> bool {
+ let vis_restriction = match vis {
+ Visibility::Public => return self == Visibility::Public,
+ Visibility::Invisible => return true,
+ Visibility::Restricted(module) => module,
+ };
+
+ self.is_accessible_from(vis_restriction, tree)
+ }
+
+ // Returns `true` if this item is visible anywhere in the local crate.
+ pub fn is_visible_locally(self) -> bool {
+ match self {
+ Visibility::Public => true,
+ Visibility::Restricted(def_id) => def_id.is_local(),
+ Visibility::Invisible => false,
+ }
+ }
+
+ pub fn is_public(self) -> bool {
+ matches!(self, Visibility::Public)
+ }
+}
+
+/// The crate variances map is computed during typeck and contains the
+/// variance of every item in the local crate. You should not use it
+/// directly, because to do so will make your pass dependent on the
+/// HIR of every item in the local crate. Instead, use
+/// `tcx.variances_of()` to get the variance for a *particular*
+/// item.
+#[derive(HashStable, Debug)]
+pub struct CrateVariancesMap<'tcx> {
+ /// For each item with generics, maps to a vector of the variance
+ /// of its generics. If an item has no generics, it will have no
+ /// entry.
+ pub variances: FxHashMap<DefId, &'tcx [ty::Variance]>,
+}
+
+// Contains information needed to resolve types and (in the future) look up
+// the types of AST nodes.
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub struct CReaderCacheKey {
+ pub cnum: Option<CrateNum>,
+ pub pos: usize,
+}
+
+/// Represents a type.
+///
+/// IMPORTANT:
+/// - This is a very "dumb" struct (with no derives and no `impls`).
+/// - Values of this type are always interned and thus unique, and are stored
+/// as an `Interned<TyS>`.
+/// - `Ty` (which contains a reference to a `Interned<TyS>`) or `Interned<TyS>`
+/// should be used everywhere instead of `TyS`. In particular, `Ty` has most
+/// of the relevant methods.
+#[derive(PartialEq, Eq, PartialOrd, Ord)]
+#[allow(rustc::usage_of_ty_tykind)]
+pub(crate) struct TyS<'tcx> {
+ /// This field shouldn't be used directly and may be removed in the future.
+ /// Use `Ty::kind()` instead.
+ kind: TyKind<'tcx>,
+
+ /// This field provides fast access to information that is also contained
+ /// in `kind`.
+ ///
+ /// This field shouldn't be used directly and may be removed in the future.
+ /// Use `Ty::flags()` instead.
+ flags: TypeFlags,
+
+ /// This field provides fast access to information that is also contained
+ /// in `kind`.
+ ///
+ /// This is a kind of confusing thing: it stores the smallest
+ /// binder such that
+ ///
+ /// (a) the binder itself captures nothing but
+ /// (b) all the late-bound things within the type are captured
+ /// by some sub-binder.
+ ///
+ /// So, for a type without any late-bound things, like `u32`, this
+ /// will be *innermost*, because that is the innermost binder that
+ /// captures nothing. But for a type `&'D u32`, where `'D` is a
+ /// late-bound region with De Bruijn index `D`, this would be `D + 1`
+ /// -- the binder itself does not capture `D`, but `D` is captured
+ /// by an inner binder.
+ ///
+ /// We call this concept an "exclusive" binder `D` because all
+ /// De Bruijn indices within the type are contained within `0..D`
+ /// (exclusive).
+ outer_exclusive_binder: ty::DebruijnIndex,
+}
+
+// `TyS` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(TyS<'_>, 40);
+
+// We are actually storing a stable hash cache next to the type, so let's
+// also check the full size
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(WithStableHash<TyS<'_>>, 56);
+
+/// Use this rather than `TyS`, whenever possible.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
+#[rustc_diagnostic_item = "Ty"]
+#[rustc_pass_by_value]
+pub struct Ty<'tcx>(Interned<'tcx, WithStableHash<TyS<'tcx>>>);
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// A "bool" type used in rustc_mir_transform unit tests when we
+ /// have not spun up a TyCtxt.
+ pub const BOOL_TY_FOR_UNIT_TESTING: Ty<'tcx> = Ty(Interned::new_unchecked(&WithStableHash {
+ internee: TyS {
+ kind: ty::Bool,
+ flags: TypeFlags::empty(),
+ outer_exclusive_binder: DebruijnIndex::from_usize(0),
+ },
+ stable_hash: Fingerprint::ZERO,
+ }));
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for TyS<'tcx> {
+ #[inline]
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let TyS {
+ kind,
+
+ // The other fields just provide fast access to information that is
+ // also contained in `kind`, so no need to hash them.
+ flags: _,
+
+ outer_exclusive_binder: _,
+ } = self;
+
+ kind.hash_stable(hcx, hasher)
+ }
+}
+
+impl ty::EarlyBoundRegion {
+ /// Does this early bound region have a name? Early bound regions normally
+ /// always have names except when using anonymous lifetimes (`'_`).
+ pub fn has_name(&self) -> bool {
+ self.name != kw::UnderscoreLifetime
+ }
+}
+
+/// Represents a predicate.
+///
+/// See comments on `TyS`, which apply here too (albeit for
+/// `PredicateS`/`Predicate` rather than `TyS`/`Ty`).
+#[derive(Debug)]
+pub(crate) struct PredicateS<'tcx> {
+ kind: Binder<'tcx, PredicateKind<'tcx>>,
+ flags: TypeFlags,
+ /// See the comment for the corresponding field of [TyS].
+ outer_exclusive_binder: ty::DebruijnIndex,
+}
+
+// This type is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(PredicateS<'_>, 56);
+
+/// Use this rather than `PredicateS`, whenever possible.
+#[derive(Clone, Copy, PartialEq, Eq, Hash)]
+#[rustc_pass_by_value]
+pub struct Predicate<'tcx>(Interned<'tcx, PredicateS<'tcx>>);
+
+impl<'tcx> Predicate<'tcx> {
+ /// Gets the inner `Binder<'tcx, PredicateKind<'tcx>>`.
+ #[inline]
+ pub fn kind(self) -> Binder<'tcx, PredicateKind<'tcx>> {
+ self.0.kind
+ }
+
+ #[inline(always)]
+ pub fn flags(self) -> TypeFlags {
+ self.0.flags
+ }
+
+ #[inline(always)]
+ pub fn outer_exclusive_binder(self) -> DebruijnIndex {
+ self.0.outer_exclusive_binder
+ }
+
+ /// Flips the polarity of a Predicate.
+ ///
+ /// Given `T: Trait` predicate it returns `T: !Trait` and given `T: !Trait` returns `T: Trait`.
+ pub fn flip_polarity(self, tcx: TyCtxt<'tcx>) -> Option<Predicate<'tcx>> {
+ let kind = self
+ .kind()
+ .map_bound(|kind| match kind {
+ PredicateKind::Trait(TraitPredicate { trait_ref, constness, polarity }) => {
+ Some(PredicateKind::Trait(TraitPredicate {
+ trait_ref,
+ constness,
+ polarity: polarity.flip()?,
+ }))
+ }
+
+ _ => None,
+ })
+ .transpose()?;
+
+ Some(tcx.mk_predicate(kind))
+ }
+
+ pub fn without_const(mut self, tcx: TyCtxt<'tcx>) -> Self {
+ if let PredicateKind::Trait(TraitPredicate { trait_ref, constness, polarity }) = self.kind().skip_binder()
+ && constness != BoundConstness::NotConst
+ {
+ self = tcx.mk_predicate(self.kind().rebind(PredicateKind::Trait(TraitPredicate {
+ trait_ref,
+ constness: BoundConstness::NotConst,
+ polarity,
+ })));
+ }
+ self
+ }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Predicate<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ let PredicateS {
+ ref kind,
+
+ // The other fields just provide fast access to information that is
+ // also contained in `kind`, so no need to hash them.
+ flags: _,
+ outer_exclusive_binder: _,
+ } = self.0.0;
+
+ kind.hash_stable(hcx, hasher);
+ }
+}
+
+impl rustc_errors::IntoDiagnosticArg for Predicate<'_> {
+ fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
+ rustc_errors::DiagnosticArgValue::Str(std::borrow::Cow::Owned(self.to_string()))
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub enum PredicateKind<'tcx> {
+ /// Corresponds to `where Foo: Bar<A, B, C>`. `Foo` here would be
+ /// the `Self` type of the trait reference and `A`, `B`, and `C`
+ /// would be the type parameters.
+ Trait(TraitPredicate<'tcx>),
+
+ /// `where 'a: 'b`
+ RegionOutlives(RegionOutlivesPredicate<'tcx>),
+
+ /// `where T: 'a`
+ TypeOutlives(TypeOutlivesPredicate<'tcx>),
+
+ /// `where <T as TraitRef>::Name == X`, approximately.
+ /// See the `ProjectionPredicate` struct for details.
+ Projection(ProjectionPredicate<'tcx>),
+
+ /// No syntax: `T` well-formed.
+ WellFormed(GenericArg<'tcx>),
+
+ /// Trait must be object-safe.
+ ObjectSafe(DefId),
+
+ /// No direct syntax. May be thought of as `where T: FnFoo<...>`
+ /// for some substitutions `...` and `T` being a closure type.
+ /// Satisfied (or refuted) once we know the closure's kind.
+ ClosureKind(DefId, SubstsRef<'tcx>, ClosureKind),
+
+ /// `T1 <: T2`
+ ///
+ /// This obligation is created most often when we have two
+ /// unresolved type variables and hence don't have enough
+ /// information to process the subtyping obligation yet.
+ Subtype(SubtypePredicate<'tcx>),
+
+ /// `T1` coerced to `T2`
+ ///
+ /// Like a subtyping obligation, this is created most often
+ /// when we have two unresolved type variables and hence
+ /// don't have enough information to process the coercion
+ /// obligation yet. At the moment, we actually process coercions
+ /// very much like subtyping and don't handle the full coercion
+ /// logic.
+ Coerce(CoercePredicate<'tcx>),
+
+ /// Constant initializer must evaluate successfully.
+ ConstEvaluatable(ty::Unevaluated<'tcx, ()>),
+
+ /// Constants must be equal. The first component is the const that is expected.
+ ConstEquate(Const<'tcx>, Const<'tcx>),
+
+ /// Represents a type found in the environment that we can use for implied bounds.
+ ///
+ /// Only used for Chalk.
+ TypeWellFormedFromEnv(Ty<'tcx>),
+}
+
+/// The crate outlives map is computed during typeck and contains the
+/// outlives of every item in the local crate. You should not use it
+/// directly, because to do so will make your pass dependent on the
+/// HIR of every item in the local crate. Instead, use
+/// `tcx.inferred_outlives_of()` to get the outlives for a *particular*
+/// item.
+#[derive(HashStable, Debug)]
+pub struct CratePredicatesMap<'tcx> {
+ /// For each struct with outlive bounds, maps to a vector of the
+ /// predicate of its outlive bounds. If an item has no outlives
+ /// bounds, it will have no entry.
+ pub predicates: FxHashMap<DefId, &'tcx [(Predicate<'tcx>, Span)]>,
+}
+
+impl<'tcx> Predicate<'tcx> {
+ /// Performs a substitution suitable for going from a
+ /// poly-trait-ref to supertraits that must hold if that
+ /// poly-trait-ref holds. This is slightly different from a normal
+ /// substitution in terms of what happens with bound regions. See
+ /// lengthy comment below for details.
+ pub fn subst_supertrait(
+ self,
+ tcx: TyCtxt<'tcx>,
+ trait_ref: &ty::PolyTraitRef<'tcx>,
+ ) -> Predicate<'tcx> {
+ // The interaction between HRTB and supertraits is not entirely
+ // obvious. Let me walk you (and myself) through an example.
+ //
+ // Let's start with an easy case. Consider two traits:
+ //
+ // trait Foo<'a>: Bar<'a,'a> { }
+ // trait Bar<'b,'c> { }
+ //
+ // Now, if we have a trait reference `for<'x> T: Foo<'x>`, then
+ // we can deduce that `for<'x> T: Bar<'x,'x>`. Basically, if we
+ // knew that `Foo<'x>` (for any 'x) then we also know that
+ // `Bar<'x,'x>` (for any 'x). This more-or-less falls out from
+ // normal substitution.
+ //
+ // In terms of why this is sound, the idea is that whenever there
+ // is an impl of `T:Foo<'a>`, it must show that `T:Bar<'a,'a>`
+ // holds. So if there is an impl of `T:Foo<'a>` that applies to
+ // all `'a`, then we must know that `T:Bar<'a,'a>` holds for all
+ // `'a`.
+ //
+ // Another example to be careful of is this:
+ //
+ // trait Foo1<'a>: for<'b> Bar1<'a,'b> { }
+ // trait Bar1<'b,'c> { }
+ //
+ // Here, if we have `for<'x> T: Foo1<'x>`, then what do we know?
+ // The answer is that we know `for<'x,'b> T: Bar1<'x,'b>`. The
+ // reason is similar to the previous example: any impl of
+ // `T:Foo1<'x>` must show that `for<'b> T: Bar1<'x, 'b>`. So
+ // basically we would want to collapse the bound lifetimes from
+ // the input (`trait_ref`) and the supertraits.
+ //
+ // To achieve this in practice is fairly straightforward. Let's
+ // consider the more complicated scenario:
+ //
+ // - We start out with `for<'x> T: Foo1<'x>`. In this case, `'x`
+ // has a De Bruijn index of 1. We want to produce `for<'x,'b> T: Bar1<'x,'b>`,
+ // where both `'x` and `'b` would have a DB index of 1.
+ // The substitution from the input trait-ref is therefore going to be
+ // `'a => 'x` (where `'x` has a DB index of 1).
+ // - The supertrait-ref is `for<'b> Bar1<'a,'b>`, where `'a` is an
+ // early-bound parameter and `'b' is a late-bound parameter with a
+ // DB index of 1.
+ // - If we replace `'a` with `'x` from the input, it too will have
+ // a DB index of 1, and thus we'll have `for<'x,'b> Bar1<'x,'b>`
+ // just as we wanted.
+ //
+ // There is only one catch. If we just apply the substitution `'a
+ // => 'x` to `for<'b> Bar1<'a,'b>`, the substitution code will
+ // adjust the DB index because we substituting into a binder (it
+ // tries to be so smart...) resulting in `for<'x> for<'b>
+ // Bar1<'x,'b>` (we have no syntax for this, so use your
+ // imagination). Basically the 'x will have DB index of 2 and 'b
+ // will have DB index of 1. Not quite what we want. So we apply
+ // the substitution to the *contents* of the trait reference,
+ // rather than the trait reference itself (put another way, the
+ // substitution code expects equal binding levels in the values
+ // from the substitution and the value being substituted into, and
+ // this trick achieves that).
+
+ // Working through the second example:
+ // trait_ref: for<'x> T: Foo1<'^0.0>; substs: [T, '^0.0]
+ // predicate: for<'b> Self: Bar1<'a, '^0.0>; substs: [Self, 'a, '^0.0]
+ // We want to end up with:
+ // for<'x, 'b> T: Bar1<'^0.0, '^0.1>
+ // To do this:
+ // 1) We must shift all bound vars in predicate by the length
+ // of trait ref's bound vars. So, we would end up with predicate like
+ // Self: Bar1<'a, '^0.1>
+ // 2) We can then apply the trait substs to this, ending up with
+ // T: Bar1<'^0.0, '^0.1>
+ // 3) Finally, to create the final bound vars, we concatenate the bound
+ // vars of the trait ref with those of the predicate:
+ // ['x, 'b]
+ let bound_pred = self.kind();
+ let pred_bound_vars = bound_pred.bound_vars();
+ let trait_bound_vars = trait_ref.bound_vars();
+ // 1) Self: Bar1<'a, '^0.0> -> Self: Bar1<'a, '^0.1>
+ let shifted_pred =
+ tcx.shift_bound_var_indices(trait_bound_vars.len(), bound_pred.skip_binder());
+ // 2) Self: Bar1<'a, '^0.1> -> T: Bar1<'^0.0, '^0.1>
+ let new = EarlyBinder(shifted_pred).subst(tcx, trait_ref.skip_binder().substs);
+ // 3) ['x] + ['b] -> ['x, 'b]
+ let bound_vars =
+ tcx.mk_bound_variable_kinds(trait_bound_vars.iter().chain(pred_bound_vars));
+ tcx.reuse_or_mk_predicate(self, ty::Binder::bind_with_vars(new, bound_vars))
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct TraitPredicate<'tcx> {
+ pub trait_ref: TraitRef<'tcx>,
+
+ pub constness: BoundConstness,
+
+ /// If polarity is Positive: we are proving that the trait is implemented.
+ ///
+ /// If polarity is Negative: we are proving that a negative impl of this trait
+ /// exists. (Note that coherence also checks whether negative impls of supertraits
+ /// exist via a series of predicates.)
+ ///
+ /// If polarity is Reserved: that's a bug.
+ pub polarity: ImplPolarity,
+}
+
+pub type PolyTraitPredicate<'tcx> = ty::Binder<'tcx, TraitPredicate<'tcx>>;
+
+impl<'tcx> TraitPredicate<'tcx> {
+ pub fn remap_constness(&mut self, param_env: &mut ParamEnv<'tcx>) {
+ *param_env = param_env.with_constness(self.constness.and(param_env.constness()))
+ }
+
+ /// Remap the constness of this predicate before emitting it for diagnostics.
+ pub fn remap_constness_diag(&mut self, param_env: ParamEnv<'tcx>) {
+ // this is different to `remap_constness` that callees want to print this predicate
+ // in case of selection errors. `T: ~const Drop` bounds cannot end up here when the
+ // param_env is not const because it is always satisfied in non-const contexts.
+ if let hir::Constness::NotConst = param_env.constness() {
+ self.constness = ty::BoundConstness::NotConst;
+ }
+ }
+
+ pub fn def_id(self) -> DefId {
+ self.trait_ref.def_id
+ }
+
+ pub fn self_ty(self) -> Ty<'tcx> {
+ self.trait_ref.self_ty()
+ }
+
+ #[inline]
+ pub fn is_const_if_const(self) -> bool {
+ self.constness == BoundConstness::ConstIfConst
+ }
+
+ pub fn is_constness_satisfied_by(self, constness: hir::Constness) -> bool {
+ match (self.constness, constness) {
+ (BoundConstness::NotConst, _)
+ | (BoundConstness::ConstIfConst, hir::Constness::Const) => true,
+ (BoundConstness::ConstIfConst, hir::Constness::NotConst) => false,
+ }
+ }
+}
+
+impl<'tcx> PolyTraitPredicate<'tcx> {
+ pub fn def_id(self) -> DefId {
+ // Ok to skip binder since trait `DefId` does not care about regions.
+ self.skip_binder().def_id()
+ }
+
+ pub fn self_ty(self) -> ty::Binder<'tcx, Ty<'tcx>> {
+ self.map_bound(|trait_ref| trait_ref.self_ty())
+ }
+
+ /// Remap the constness of this predicate before emitting it for diagnostics.
+ pub fn remap_constness_diag(&mut self, param_env: ParamEnv<'tcx>) {
+ *self = self.map_bound(|mut p| {
+ p.remap_constness_diag(param_env);
+ p
+ });
+ }
+
+ #[inline]
+ pub fn is_const_if_const(self) -> bool {
+ self.skip_binder().is_const_if_const()
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct OutlivesPredicate<A, B>(pub A, pub B); // `A: B`
+pub type RegionOutlivesPredicate<'tcx> = OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>;
+pub type TypeOutlivesPredicate<'tcx> = OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>;
+pub type PolyRegionOutlivesPredicate<'tcx> = ty::Binder<'tcx, RegionOutlivesPredicate<'tcx>>;
+pub type PolyTypeOutlivesPredicate<'tcx> = ty::Binder<'tcx, TypeOutlivesPredicate<'tcx>>;
+
+/// Encodes that `a` must be a subtype of `b`. The `a_is_expected` flag indicates
+/// whether the `a` type is the type that we should label as "expected" when
+/// presenting user diagnostics.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct SubtypePredicate<'tcx> {
+ pub a_is_expected: bool,
+ pub a: Ty<'tcx>,
+ pub b: Ty<'tcx>,
+}
+pub type PolySubtypePredicate<'tcx> = ty::Binder<'tcx, SubtypePredicate<'tcx>>;
+
+/// Encodes that we have to coerce *from* the `a` type to the `b` type.
+#[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct CoercePredicate<'tcx> {
+ pub a: Ty<'tcx>,
+ pub b: Ty<'tcx>,
+}
+pub type PolyCoercePredicate<'tcx> = ty::Binder<'tcx, CoercePredicate<'tcx>>;
+
+#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub enum Term<'tcx> {
+ Ty(Ty<'tcx>),
+ Const(Const<'tcx>),
+}
+
+impl<'tcx> From<Ty<'tcx>> for Term<'tcx> {
+ fn from(ty: Ty<'tcx>) -> Self {
+ Term::Ty(ty)
+ }
+}
+
+impl<'tcx> From<Const<'tcx>> for Term<'tcx> {
+ fn from(c: Const<'tcx>) -> Self {
+ Term::Const(c)
+ }
+}
+
+impl<'tcx> Term<'tcx> {
+ pub fn ty(&self) -> Option<Ty<'tcx>> {
+ if let Term::Ty(ty) = self { Some(*ty) } else { None }
+ }
+
+ pub fn ct(&self) -> Option<Const<'tcx>> {
+ if let Term::Const(c) = self { Some(*c) } else { None }
+ }
+
+ pub fn into_arg(self) -> GenericArg<'tcx> {
+ match self {
+ Term::Ty(ty) => ty.into(),
+ Term::Const(c) => c.into(),
+ }
+ }
+}
+
+/// This kind of predicate has no *direct* correspondent in the
+/// syntax, but it roughly corresponds to the syntactic forms:
+///
+/// 1. `T: TraitRef<..., Item = Type>`
+/// 2. `<T as TraitRef<...>>::Item == Type` (NYI)
+///
+/// In particular, form #1 is "desugared" to the combination of a
+/// normal trait predicate (`T: TraitRef<...>`) and one of these
+/// predicates. Form #2 is a broader form in that it also permits
+/// equality between arbitrary types. Processing an instance of
+/// Form #2 eventually yields one of these `ProjectionPredicate`
+/// instances to normalize the LHS.
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct ProjectionPredicate<'tcx> {
+ pub projection_ty: ProjectionTy<'tcx>,
+ pub term: Term<'tcx>,
+}
+
+pub type PolyProjectionPredicate<'tcx> = Binder<'tcx, ProjectionPredicate<'tcx>>;
+
+impl<'tcx> PolyProjectionPredicate<'tcx> {
+ /// Returns the `DefId` of the trait of the associated item being projected.
+ #[inline]
+ pub fn trait_def_id(&self, tcx: TyCtxt<'tcx>) -> DefId {
+ self.skip_binder().projection_ty.trait_def_id(tcx)
+ }
+
+ /// Get the [PolyTraitRef] required for this projection to be well formed.
+ /// Note that for generic associated types the predicates of the associated
+ /// type also need to be checked.
+ #[inline]
+ pub fn required_poly_trait_ref(&self, tcx: TyCtxt<'tcx>) -> PolyTraitRef<'tcx> {
+ // Note: unlike with `TraitRef::to_poly_trait_ref()`,
+ // `self.0.trait_ref` is permitted to have escaping regions.
+ // This is because here `self` has a `Binder` and so does our
+ // return value, so we are preserving the number of binding
+ // levels.
+ self.map_bound(|predicate| predicate.projection_ty.trait_ref(tcx))
+ }
+
+ pub fn term(&self) -> Binder<'tcx, Term<'tcx>> {
+ self.map_bound(|predicate| predicate.term)
+ }
+
+ /// The `DefId` of the `TraitItem` for the associated type.
+ ///
+ /// Note that this is not the `DefId` of the `TraitRef` containing this
+ /// associated type, which is in `tcx.associated_item(projection_def_id()).container`.
+ pub fn projection_def_id(&self) -> DefId {
+ // Ok to skip binder since trait `DefId` does not care about regions.
+ self.skip_binder().projection_ty.item_def_id
+ }
+}
+
+pub trait ToPolyTraitRef<'tcx> {
+ fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx>;
+}
+
+impl<'tcx> ToPolyTraitRef<'tcx> for PolyTraitPredicate<'tcx> {
+ fn to_poly_trait_ref(&self) -> PolyTraitRef<'tcx> {
+ self.map_bound_ref(|trait_pred| trait_pred.trait_ref)
+ }
+}
+
+pub trait ToPredicate<'tcx> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx>;
+}
+
+impl<'tcx> ToPredicate<'tcx> for Binder<'tcx, PredicateKind<'tcx>> {
+ #[inline(always)]
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ tcx.mk_predicate(self)
+ }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyTraitPredicate<'tcx> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ self.map_bound(PredicateKind::Trait).to_predicate(tcx)
+ }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyRegionOutlivesPredicate<'tcx> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ self.map_bound(PredicateKind::RegionOutlives).to_predicate(tcx)
+ }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyTypeOutlivesPredicate<'tcx> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ self.map_bound(PredicateKind::TypeOutlives).to_predicate(tcx)
+ }
+}
+
+impl<'tcx> ToPredicate<'tcx> for PolyProjectionPredicate<'tcx> {
+ fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> {
+ self.map_bound(PredicateKind::Projection).to_predicate(tcx)
+ }
+}
+
+impl<'tcx> Predicate<'tcx> {
+ pub fn to_opt_poly_trait_pred(self) -> Option<PolyTraitPredicate<'tcx>> {
+ let predicate = self.kind();
+ match predicate.skip_binder() {
+ PredicateKind::Trait(t) => Some(predicate.rebind(t)),
+ PredicateKind::Projection(..)
+ | PredicateKind::Subtype(..)
+ | PredicateKind::Coerce(..)
+ | PredicateKind::RegionOutlives(..)
+ | PredicateKind::WellFormed(..)
+ | PredicateKind::ObjectSafe(..)
+ | PredicateKind::ClosureKind(..)
+ | PredicateKind::TypeOutlives(..)
+ | PredicateKind::ConstEvaluatable(..)
+ | PredicateKind::ConstEquate(..)
+ | PredicateKind::TypeWellFormedFromEnv(..) => None,
+ }
+ }
+
+ pub fn to_opt_poly_projection_pred(self) -> Option<PolyProjectionPredicate<'tcx>> {
+ let predicate = self.kind();
+ match predicate.skip_binder() {
+ PredicateKind::Projection(t) => Some(predicate.rebind(t)),
+ PredicateKind::Trait(..)
+ | PredicateKind::Subtype(..)
+ | PredicateKind::Coerce(..)
+ | PredicateKind::RegionOutlives(..)
+ | PredicateKind::WellFormed(..)
+ | PredicateKind::ObjectSafe(..)
+ | PredicateKind::ClosureKind(..)
+ | PredicateKind::TypeOutlives(..)
+ | PredicateKind::ConstEvaluatable(..)
+ | PredicateKind::ConstEquate(..)
+ | PredicateKind::TypeWellFormedFromEnv(..) => None,
+ }
+ }
+
+ pub fn to_opt_type_outlives(self) -> Option<PolyTypeOutlivesPredicate<'tcx>> {
+ let predicate = self.kind();
+ match predicate.skip_binder() {
+ PredicateKind::TypeOutlives(data) => Some(predicate.rebind(data)),
+ PredicateKind::Trait(..)
+ | PredicateKind::Projection(..)
+ | PredicateKind::Subtype(..)
+ | PredicateKind::Coerce(..)
+ | PredicateKind::RegionOutlives(..)
+ | PredicateKind::WellFormed(..)
+ | PredicateKind::ObjectSafe(..)
+ | PredicateKind::ClosureKind(..)
+ | PredicateKind::ConstEvaluatable(..)
+ | PredicateKind::ConstEquate(..)
+ | PredicateKind::TypeWellFormedFromEnv(..) => None,
+ }
+ }
+}
+
+/// Represents the bounds declared on a particular set of type
+/// parameters. Should eventually be generalized into a flag list of
+/// where-clauses. You can obtain an `InstantiatedPredicates` list from a
+/// `GenericPredicates` by using the `instantiate` method. Note that this method
+/// reflects an important semantic invariant of `InstantiatedPredicates`: while
+/// the `GenericPredicates` are expressed in terms of the bound type
+/// parameters of the impl/trait/whatever, an `InstantiatedPredicates` instance
+/// represented a set of bounds for some particular instantiation,
+/// meaning that the generic parameters have been substituted with
+/// their values.
+///
+/// Example:
+/// ```ignore (illustrative)
+/// struct Foo<T, U: Bar<T>> { ... }
+/// ```
+/// Here, the `GenericPredicates` for `Foo` would contain a list of bounds like
+/// `[[], [U:Bar<T>]]`. Now if there were some particular reference
+/// like `Foo<isize,usize>`, then the `InstantiatedPredicates` would be `[[],
+/// [usize:Bar<isize>]]`.
+#[derive(Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct InstantiatedPredicates<'tcx> {
+ pub predicates: Vec<Predicate<'tcx>>,
+ pub spans: Vec<Span>,
+}
+
+impl<'tcx> InstantiatedPredicates<'tcx> {
+ pub fn empty() -> InstantiatedPredicates<'tcx> {
+ InstantiatedPredicates { predicates: vec![], spans: vec![] }
+ }
+
+ pub fn is_empty(&self) -> bool {
+ self.predicates.is_empty()
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, HashStable, TyEncodable, TyDecodable, Lift)]
+#[derive(TypeFoldable, TypeVisitable)]
+pub struct OpaqueTypeKey<'tcx> {
+ pub def_id: LocalDefId,
+ pub substs: SubstsRef<'tcx>,
+}
+
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, HashStable, TyEncodable, TyDecodable)]
+pub struct OpaqueHiddenType<'tcx> {
+ /// The span of this particular definition of the opaque type. So
+ /// for example:
+ ///
+ /// ```ignore (incomplete snippet)
+ /// type Foo = impl Baz;
+ /// fn bar() -> Foo {
+ /// // ^^^ This is the span we are looking for!
+ /// }
+ /// ```
+ ///
+ /// In cases where the fn returns `(impl Trait, impl Trait)` or
+ /// other such combinations, the result is currently
+ /// over-approximated, but better than nothing.
+ pub span: Span,
+
+ /// The type variable that represents the value of the opaque type
+ /// that we require. In other words, after we compile this function,
+ /// we will be created a constraint like:
+ /// ```ignore (pseudo-rust)
+ /// Foo<'a, T> = ?C
+ /// ```
+ /// where `?C` is the value of this type variable. =) It may
+ /// naturally refer to the type and lifetime parameters in scope
+ /// in this function, though ultimately it should only reference
+ /// those that are arguments to `Foo` in the constraint above. (In
+ /// other words, `?C` should not include `'b`, even though it's a
+ /// lifetime parameter on `foo`.)
+ pub ty: Ty<'tcx>,
+}
+
+impl<'tcx> OpaqueHiddenType<'tcx> {
+ pub fn report_mismatch(&self, other: &Self, tcx: TyCtxt<'tcx>) {
+ // Found different concrete types for the opaque type.
+ let mut err = tcx.sess.struct_span_err(
+ other.span,
+ "concrete type differs from previous defining opaque type use",
+ );
+ err.span_label(other.span, format!("expected `{}`, got `{}`", self.ty, other.ty));
+ if self.span == other.span {
+ err.span_label(
+ self.span,
+ "this expression supplies two conflicting concrete types for the same opaque type",
+ );
+ } else {
+ err.span_note(self.span, "previous use here");
+ }
+ err.emit();
+ }
+}
+
+/// The "placeholder index" fully defines a placeholder region, type, or const. Placeholders are
+/// identified by both a universe, as well as a name residing within that universe. Distinct bound
+/// regions/types/consts within the same universe simply have an unknown relationship to one
+/// another.
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord)]
+#[derive(HashStable, TyEncodable, TyDecodable)]
+pub struct Placeholder<T> {
+ pub universe: UniverseIndex,
+ pub name: T,
+}
+
+pub type PlaceholderRegion = Placeholder<BoundRegionKind>;
+
+pub type PlaceholderType = Placeholder<BoundVar>;
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable)]
+#[derive(TyEncodable, TyDecodable, PartialOrd, Ord)]
+pub struct BoundConst<'tcx> {
+ pub var: BoundVar,
+ pub ty: Ty<'tcx>,
+}
+
+pub type PlaceholderConst<'tcx> = Placeholder<BoundVar>;
+
+/// A `DefId` which, in case it is a const argument, is potentially bundled with
+/// the `DefId` of the generic parameter it instantiates.
+///
+/// This is used to avoid calls to `type_of` for const arguments during typeck
+/// which cause cycle errors.
+///
+/// ```rust
+/// struct A;
+/// impl A {
+/// fn foo<const N: usize>(&self) -> [u8; N] { [0; N] }
+/// // ^ const parameter
+/// }
+/// struct B;
+/// impl B {
+/// fn foo<const M: u8>(&self) -> usize { 42 }
+/// // ^ const parameter
+/// }
+///
+/// fn main() {
+/// let a = A;
+/// let _b = a.foo::<{ 3 + 7 }>();
+/// // ^^^^^^^^^ const argument
+/// }
+/// ```
+///
+/// Let's look at the call `a.foo::<{ 3 + 7 }>()` here. We do not know
+/// which `foo` is used until we know the type of `a`.
+///
+/// We only know the type of `a` once we are inside of `typeck(main)`.
+/// We also end up normalizing the type of `_b` during `typeck(main)` which
+/// requires us to evaluate the const argument.
+///
+/// To evaluate that const argument we need to know its type,
+/// which we would get using `type_of(const_arg)`. This requires us to
+/// resolve `foo` as it can be either `usize` or `u8` in this example.
+/// However, resolving `foo` once again requires `typeck(main)` to get the type of `a`,
+/// which results in a cycle.
+///
+/// In short we must not call `type_of(const_arg)` during `typeck(main)`.
+///
+/// When first creating the `ty::Const` of the const argument inside of `typeck` we have
+/// already resolved `foo` so we know which const parameter this argument instantiates.
+/// This means that we also know the expected result of `type_of(const_arg)` even if we
+/// aren't allowed to call that query: it is equal to `type_of(const_param)` which is
+/// trivial to compute.
+///
+/// If we now want to use that constant in a place which potentially needs its type
+/// we also pass the type of its `const_param`. This is the point of `WithOptConstParam`,
+/// except that instead of a `Ty` we bundle the `DefId` of the const parameter.
+/// Meaning that we need to use `type_of(const_param_did)` if `const_param_did` is `Some`
+/// to get the type of `did`.
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, Lift, TyEncodable, TyDecodable)]
+#[derive(PartialEq, Eq, PartialOrd, Ord)]
+#[derive(Hash, HashStable)]
+pub struct WithOptConstParam<T> {
+ pub did: T,
+ /// The `DefId` of the corresponding generic parameter in case `did` is
+ /// a const argument.
+ ///
+ /// Note that even if `did` is a const argument, this may still be `None`.
+ /// All queries taking `WithOptConstParam` start by calling `tcx.opt_const_param_of(def.did)`
+ /// to potentially update `param_did` in the case it is `None`.
+ pub const_param_did: Option<DefId>,
+}
+
+impl<T> WithOptConstParam<T> {
+ /// Creates a new `WithOptConstParam` setting `const_param_did` to `None`.
+ #[inline(always)]
+ pub fn unknown(did: T) -> WithOptConstParam<T> {
+ WithOptConstParam { did, const_param_did: None }
+ }
+}
+
+impl WithOptConstParam<LocalDefId> {
+ /// Returns `Some((did, param_did))` if `def_id` is a const argument,
+ /// `None` otherwise.
+ #[inline(always)]
+ pub fn try_lookup(did: LocalDefId, tcx: TyCtxt<'_>) -> Option<(LocalDefId, DefId)> {
+ tcx.opt_const_param_of(did).map(|param_did| (did, param_did))
+ }
+
+ /// In case `self` is unknown but `self.did` is a const argument, this returns
+ /// a `WithOptConstParam` with the correct `const_param_did`.
+ #[inline(always)]
+ pub fn try_upgrade(self, tcx: TyCtxt<'_>) -> Option<WithOptConstParam<LocalDefId>> {
+ if self.const_param_did.is_none() {
+ if let const_param_did @ Some(_) = tcx.opt_const_param_of(self.did) {
+ return Some(WithOptConstParam { did: self.did, const_param_did });
+ }
+ }
+
+ None
+ }
+
+ pub fn to_global(self) -> WithOptConstParam<DefId> {
+ WithOptConstParam { did: self.did.to_def_id(), const_param_did: self.const_param_did }
+ }
+
+ pub fn def_id_for_type_of(self) -> DefId {
+ if let Some(did) = self.const_param_did { did } else { self.did.to_def_id() }
+ }
+}
+
+impl WithOptConstParam<DefId> {
+ pub fn as_local(self) -> Option<WithOptConstParam<LocalDefId>> {
+ self.did
+ .as_local()
+ .map(|did| WithOptConstParam { did, const_param_did: self.const_param_did })
+ }
+
+ pub fn as_const_arg(self) -> Option<(LocalDefId, DefId)> {
+ if let Some(param_did) = self.const_param_did {
+ if let Some(did) = self.did.as_local() {
+ return Some((did, param_did));
+ }
+ }
+
+ None
+ }
+
+ pub fn is_local(self) -> bool {
+ self.did.is_local()
+ }
+
+ pub fn def_id_for_type_of(self) -> DefId {
+ self.const_param_did.unwrap_or(self.did)
+ }
+}
+
+/// When type checking, we use the `ParamEnv` to track
+/// details about the set of where-clauses that are in scope at this
+/// particular point.
+#[derive(Copy, Clone, Hash, PartialEq, Eq)]
+pub struct ParamEnv<'tcx> {
+ /// This packs both caller bounds and the reveal enum into one pointer.
+ ///
+ /// Caller bounds are `Obligation`s that the caller must satisfy. This is
+ /// basically the set of bounds on the in-scope type parameters, translated
+ /// into `Obligation`s, and elaborated and normalized.
+ ///
+ /// Use the `caller_bounds()` method to access.
+ ///
+ /// Typically, this is `Reveal::UserFacing`, but during codegen we
+ /// want `Reveal::All`.
+ ///
+ /// Note: This is packed, use the reveal() method to access it.
+ packed: CopyTaggedPtr<&'tcx List<Predicate<'tcx>>, ParamTag, true>,
+}
+
+#[derive(Copy, Clone)]
+struct ParamTag {
+ reveal: traits::Reveal,
+ constness: hir::Constness,
+}
+
+unsafe impl rustc_data_structures::tagged_ptr::Tag for ParamTag {
+ const BITS: usize = 2;
+ #[inline]
+ fn into_usize(self) -> usize {
+ match self {
+ Self { reveal: traits::Reveal::UserFacing, constness: hir::Constness::NotConst } => 0,
+ Self { reveal: traits::Reveal::All, constness: hir::Constness::NotConst } => 1,
+ Self { reveal: traits::Reveal::UserFacing, constness: hir::Constness::Const } => 2,
+ Self { reveal: traits::Reveal::All, constness: hir::Constness::Const } => 3,
+ }
+ }
+ #[inline]
+ unsafe fn from_usize(ptr: usize) -> Self {
+ match ptr {
+ 0 => Self { reveal: traits::Reveal::UserFacing, constness: hir::Constness::NotConst },
+ 1 => Self { reveal: traits::Reveal::All, constness: hir::Constness::NotConst },
+ 2 => Self { reveal: traits::Reveal::UserFacing, constness: hir::Constness::Const },
+ 3 => Self { reveal: traits::Reveal::All, constness: hir::Constness::Const },
+ _ => std::hint::unreachable_unchecked(),
+ }
+ }
+}
+
+impl<'tcx> fmt::Debug for ParamEnv<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("ParamEnv")
+ .field("caller_bounds", &self.caller_bounds())
+ .field("reveal", &self.reveal())
+ .field("constness", &self.constness())
+ .finish()
+ }
+}
+
+impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for ParamEnv<'tcx> {
+ fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
+ self.caller_bounds().hash_stable(hcx, hasher);
+ self.reveal().hash_stable(hcx, hasher);
+ self.constness().hash_stable(hcx, hasher);
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ParamEnv<'tcx> {
+ fn try_fold_with<F: ty::fold::FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ Ok(ParamEnv::new(
+ self.caller_bounds().try_fold_with(folder)?,
+ self.reveal().try_fold_with(folder)?,
+ self.constness().try_fold_with(folder)?,
+ ))
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ParamEnv<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.caller_bounds().visit_with(visitor)?;
+ self.reveal().visit_with(visitor)?;
+ self.constness().visit_with(visitor)
+ }
+}
+
+impl<'tcx> ParamEnv<'tcx> {
+ /// Construct a trait environment suitable for contexts where
+ /// there are no where-clauses in scope. Hidden types (like `impl
+ /// Trait`) are left hidden, so this is suitable for ordinary
+ /// type-checking.
+ #[inline]
+ pub fn empty() -> Self {
+ Self::new(List::empty(), Reveal::UserFacing, hir::Constness::NotConst)
+ }
+
+ #[inline]
+ pub fn caller_bounds(self) -> &'tcx List<Predicate<'tcx>> {
+ self.packed.pointer()
+ }
+
+ #[inline]
+ pub fn reveal(self) -> traits::Reveal {
+ self.packed.tag().reveal
+ }
+
+ #[inline]
+ pub fn constness(self) -> hir::Constness {
+ self.packed.tag().constness
+ }
+
+ #[inline]
+ pub fn is_const(self) -> bool {
+ self.packed.tag().constness == hir::Constness::Const
+ }
+
+ /// Construct a trait environment with no where-clauses in scope
+ /// where the values of all `impl Trait` and other hidden types
+ /// are revealed. This is suitable for monomorphized, post-typeck
+ /// environments like codegen or doing optimizations.
+ ///
+ /// N.B., if you want to have predicates in scope, use `ParamEnv::new`,
+ /// or invoke `param_env.with_reveal_all()`.
+ #[inline]
+ pub fn reveal_all() -> Self {
+ Self::new(List::empty(), Reveal::All, hir::Constness::NotConst)
+ }
+
+ /// Construct a trait environment with the given set of predicates.
+ #[inline]
+ pub fn new(
+ caller_bounds: &'tcx List<Predicate<'tcx>>,
+ reveal: Reveal,
+ constness: hir::Constness,
+ ) -> Self {
+ ty::ParamEnv { packed: CopyTaggedPtr::new(caller_bounds, ParamTag { reveal, constness }) }
+ }
+
+ pub fn with_user_facing(mut self) -> Self {
+ self.packed.set_tag(ParamTag { reveal: Reveal::UserFacing, ..self.packed.tag() });
+ self
+ }
+
+ #[inline]
+ pub fn with_constness(mut self, constness: hir::Constness) -> Self {
+ self.packed.set_tag(ParamTag { constness, ..self.packed.tag() });
+ self
+ }
+
+ #[inline]
+ pub fn with_const(mut self) -> Self {
+ self.packed.set_tag(ParamTag { constness: hir::Constness::Const, ..self.packed.tag() });
+ self
+ }
+
+ #[inline]
+ pub fn without_const(mut self) -> Self {
+ self.packed.set_tag(ParamTag { constness: hir::Constness::NotConst, ..self.packed.tag() });
+ self
+ }
+
+ #[inline]
+ pub fn remap_constness_with(&mut self, mut constness: ty::BoundConstness) {
+ *self = self.with_constness(constness.and(self.constness()))
+ }
+
+ /// Returns a new parameter environment with the same clauses, but
+ /// which "reveals" the true results of projections in all cases
+ /// (even for associated types that are specializable). This is
+ /// the desired behavior during codegen and certain other special
+ /// contexts; normally though we want to use `Reveal::UserFacing`,
+ /// which is the default.
+ /// All opaque types in the caller_bounds of the `ParamEnv`
+ /// will be normalized to their underlying types.
+ /// See PR #65989 and issue #65918 for more details
+ pub fn with_reveal_all_normalized(self, tcx: TyCtxt<'tcx>) -> Self {
+ if self.packed.tag().reveal == traits::Reveal::All {
+ return self;
+ }
+
+ ParamEnv::new(
+ tcx.normalize_opaque_types(self.caller_bounds()),
+ Reveal::All,
+ self.constness(),
+ )
+ }
+
+ /// Returns this same environment but with no caller bounds.
+ #[inline]
+ pub fn without_caller_bounds(self) -> Self {
+ Self::new(List::empty(), self.reveal(), self.constness())
+ }
+
+ /// Creates a suitable environment in which to perform trait
+ /// queries on the given value. When type-checking, this is simply
+ /// the pair of the environment plus value. But when reveal is set to
+ /// All, then if `value` does not reference any type parameters, we will
+ /// pair it with the empty environment. This improves caching and is generally
+ /// invisible.
+ ///
+ /// N.B., we preserve the environment when type-checking because it
+ /// is possible for the user to have wacky where-clauses like
+ /// `where Box<u32>: Copy`, which are clearly never
+ /// satisfiable. We generally want to behave as if they were true,
+ /// although the surrounding function is never reachable.
+ pub fn and<T: TypeVisitable<'tcx>>(self, value: T) -> ParamEnvAnd<'tcx, T> {
+ match self.reveal() {
+ Reveal::UserFacing => ParamEnvAnd { param_env: self, value },
+
+ Reveal::All => {
+ if value.is_global() {
+ ParamEnvAnd { param_env: self.without_caller_bounds(), value }
+ } else {
+ ParamEnvAnd { param_env: self, value }
+ }
+ }
+ }
+ }
+}
+
+// FIXME(ecstaticmorse): Audit all occurrences of `without_const().to_predicate(tcx)` to ensure that
+// the constness of trait bounds is being propagated correctly.
+impl<'tcx> PolyTraitRef<'tcx> {
+ #[inline]
+ pub fn with_constness(self, constness: BoundConstness) -> PolyTraitPredicate<'tcx> {
+ self.map_bound(|trait_ref| ty::TraitPredicate {
+ trait_ref,
+ constness,
+ polarity: ty::ImplPolarity::Positive,
+ })
+ }
+
+ #[inline]
+ pub fn without_const(self) -> PolyTraitPredicate<'tcx> {
+ self.with_constness(BoundConstness::NotConst)
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)]
+#[derive(HashStable)]
+pub struct ParamEnvAnd<'tcx, T> {
+ pub param_env: ParamEnv<'tcx>,
+ pub value: T,
+}
+
+impl<'tcx, T> ParamEnvAnd<'tcx, T> {
+ pub fn into_parts(self) -> (ParamEnv<'tcx>, T) {
+ (self.param_env, self.value)
+ }
+
+ #[inline]
+ pub fn without_const(mut self) -> Self {
+ self.param_env = self.param_env.without_const();
+ self
+ }
+}
+
+#[derive(Copy, Clone, Debug, HashStable, Encodable, Decodable)]
+pub struct Destructor {
+ /// The `DefId` of the destructor method
+ pub did: DefId,
+ /// The constness of the destructor method
+ pub constness: hir::Constness,
+}
+
+bitflags! {
+ #[derive(HashStable, TyEncodable, TyDecodable)]
+ pub struct VariantFlags: u32 {
+ const NO_VARIANT_FLAGS = 0;
+ /// Indicates whether the field list of this variant is `#[non_exhaustive]`.
+ const IS_FIELD_LIST_NON_EXHAUSTIVE = 1 << 0;
+ /// Indicates whether this variant was obtained as part of recovering from
+ /// a syntactic error. May be incomplete or bogus.
+ const IS_RECOVERED = 1 << 1;
+ }
+}
+
+/// Definition of a variant -- a struct's fields or an enum variant.
+#[derive(Debug, HashStable, TyEncodable, TyDecodable)]
+pub struct VariantDef {
+ /// `DefId` that identifies the variant itself.
+ /// If this variant belongs to a struct or union, then this is a copy of its `DefId`.
+ pub def_id: DefId,
+ /// `DefId` that identifies the variant's constructor.
+ /// If this variant is a struct variant, then this is `None`.
+ pub ctor_def_id: Option<DefId>,
+ /// Variant or struct name.
+ pub name: Symbol,
+ /// Discriminant of this variant.
+ pub discr: VariantDiscr,
+ /// Fields of this variant.
+ pub fields: Vec<FieldDef>,
+ /// Type of constructor of variant.
+ pub ctor_kind: CtorKind,
+ /// Flags of the variant (e.g. is field list non-exhaustive)?
+ flags: VariantFlags,
+}
+
+impl VariantDef {
+ /// Creates a new `VariantDef`.
+ ///
+ /// `variant_did` is the `DefId` that identifies the enum variant (if this `VariantDef`
+ /// represents an enum variant).
+ ///
+ /// `ctor_did` is the `DefId` that identifies the constructor of unit or
+ /// tuple-variants/structs. If this is a `struct`-variant then this should be `None`.
+ ///
+ /// `parent_did` is the `DefId` of the `AdtDef` representing the enum or struct that
+ /// owns this variant. It is used for checking if a struct has `#[non_exhaustive]` w/out having
+ /// to go through the redirect of checking the ctor's attributes - but compiling a small crate
+ /// requires loading the `AdtDef`s for all the structs in the universe (e.g., coherence for any
+ /// built-in trait), and we do not want to load attributes twice.
+ ///
+ /// If someone speeds up attribute loading to not be a performance concern, they can
+ /// remove this hack and use the constructor `DefId` everywhere.
+ pub fn new(
+ name: Symbol,
+ variant_did: Option<DefId>,
+ ctor_def_id: Option<DefId>,
+ discr: VariantDiscr,
+ fields: Vec<FieldDef>,
+ ctor_kind: CtorKind,
+ adt_kind: AdtKind,
+ parent_did: DefId,
+ recovered: bool,
+ is_field_list_non_exhaustive: bool,
+ ) -> Self {
+ debug!(
+ "VariantDef::new(name = {:?}, variant_did = {:?}, ctor_def_id = {:?}, discr = {:?},
+ fields = {:?}, ctor_kind = {:?}, adt_kind = {:?}, parent_did = {:?})",
+ name, variant_did, ctor_def_id, discr, fields, ctor_kind, adt_kind, parent_did,
+ );
+
+ let mut flags = VariantFlags::NO_VARIANT_FLAGS;
+ if is_field_list_non_exhaustive {
+ flags |= VariantFlags::IS_FIELD_LIST_NON_EXHAUSTIVE;
+ }
+
+ if recovered {
+ flags |= VariantFlags::IS_RECOVERED;
+ }
+
+ VariantDef {
+ def_id: variant_did.unwrap_or(parent_did),
+ ctor_def_id,
+ name,
+ discr,
+ fields,
+ ctor_kind,
+ flags,
+ }
+ }
+
+ /// Is this field list non-exhaustive?
+ #[inline]
+ pub fn is_field_list_non_exhaustive(&self) -> bool {
+ self.flags.intersects(VariantFlags::IS_FIELD_LIST_NON_EXHAUSTIVE)
+ }
+
+ /// Was this variant obtained as part of recovering from a syntactic error?
+ #[inline]
+ pub fn is_recovered(&self) -> bool {
+ self.flags.intersects(VariantFlags::IS_RECOVERED)
+ }
+
+ /// Computes the `Ident` of this variant by looking up the `Span`
+ pub fn ident(&self, tcx: TyCtxt<'_>) -> Ident {
+ Ident::new(self.name, tcx.def_ident_span(self.def_id).unwrap())
+ }
+}
+
+impl PartialEq for VariantDef {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ // There should be only one `VariantDef` for each `def_id`, therefore
+ // it is fine to implement `PartialEq` only based on `def_id`.
+ //
+ // Below, we exhaustively destructure `self` and `other` so that if the
+ // definition of `VariantDef` changes, a compile-error will be produced,
+ // reminding us to revisit this assumption.
+
+ let Self {
+ def_id: lhs_def_id,
+ ctor_def_id: _,
+ name: _,
+ discr: _,
+ fields: _,
+ ctor_kind: _,
+ flags: _,
+ } = &self;
+
+ let Self {
+ def_id: rhs_def_id,
+ ctor_def_id: _,
+ name: _,
+ discr: _,
+ fields: _,
+ ctor_kind: _,
+ flags: _,
+ } = other;
+
+ lhs_def_id == rhs_def_id
+ }
+}
+
+impl Eq for VariantDef {}
+
+impl Hash for VariantDef {
+ #[inline]
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // There should be only one `VariantDef` for each `def_id`, therefore
+ // it is fine to implement `Hash` only based on `def_id`.
+ //
+ // Below, we exhaustively destructure `self` so that if the definition
+ // of `VariantDef` changes, a compile-error will be produced, reminding
+ // us to revisit this assumption.
+
+ let Self { def_id, ctor_def_id: _, name: _, discr: _, fields: _, ctor_kind: _, flags: _ } =
+ &self;
+
+ def_id.hash(s)
+ }
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, HashStable)]
+pub enum VariantDiscr {
+ /// Explicit value for this variant, i.e., `X = 123`.
+ /// The `DefId` corresponds to the embedded constant.
+ Explicit(DefId),
+
+ /// The previous variant's discriminant plus one.
+ /// For efficiency reasons, the distance from the
+ /// last `Explicit` discriminant is being stored,
+ /// or `0` for the first variant, if it has none.
+ Relative(u32),
+}
+
+#[derive(Debug, HashStable, TyEncodable, TyDecodable)]
+pub struct FieldDef {
+ pub did: DefId,
+ pub name: Symbol,
+ pub vis: Visibility,
+}
+
+impl PartialEq for FieldDef {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ // There should be only one `FieldDef` for each `did`, therefore it is
+ // fine to implement `PartialEq` only based on `did`.
+ //
+ // Below, we exhaustively destructure `self` so that if the definition
+ // of `FieldDef` changes, a compile-error will be produced, reminding
+ // us to revisit this assumption.
+
+ let Self { did: lhs_did, name: _, vis: _ } = &self;
+
+ let Self { did: rhs_did, name: _, vis: _ } = other;
+
+ lhs_did == rhs_did
+ }
+}
+
+impl Eq for FieldDef {}
+
+impl Hash for FieldDef {
+ #[inline]
+ fn hash<H: Hasher>(&self, s: &mut H) {
+ // There should be only one `FieldDef` for each `did`, therefore it is
+ // fine to implement `Hash` only based on `did`.
+ //
+ // Below, we exhaustively destructure `self` so that if the definition
+ // of `FieldDef` changes, a compile-error will be produced, reminding
+ // us to revisit this assumption.
+
+ let Self { did, name: _, vis: _ } = &self;
+
+ did.hash(s)
+ }
+}
+
+bitflags! {
+ #[derive(TyEncodable, TyDecodable, Default, HashStable)]
+ pub struct ReprFlags: u8 {
+ const IS_C = 1 << 0;
+ const IS_SIMD = 1 << 1;
+ const IS_TRANSPARENT = 1 << 2;
+ // Internal only for now. If true, don't reorder fields.
+ const IS_LINEAR = 1 << 3;
+ // If true, the type's layout can be randomized using
+ // the seed stored in `ReprOptions.layout_seed`
+ const RANDOMIZE_LAYOUT = 1 << 4;
+ // Any of these flags being set prevent field reordering optimisation.
+ const IS_UNOPTIMISABLE = ReprFlags::IS_C.bits
+ | ReprFlags::IS_SIMD.bits
+ | ReprFlags::IS_LINEAR.bits;
+ }
+}
+
+/// Represents the repr options provided by the user,
+#[derive(Copy, Clone, Debug, Eq, PartialEq, TyEncodable, TyDecodable, Default, HashStable)]
+pub struct ReprOptions {
+ pub int: Option<attr::IntType>,
+ pub align: Option<Align>,
+ pub pack: Option<Align>,
+ pub flags: ReprFlags,
+ /// The seed to be used for randomizing a type's layout
+ ///
+ /// Note: This could technically be a `[u8; 16]` (a `u128`) which would
+ /// be the "most accurate" hash as it'd encompass the item and crate
+ /// hash without loss, but it does pay the price of being larger.
+ /// Everything's a tradeoff, a `u64` seed should be sufficient for our
+ /// purposes (primarily `-Z randomize-layout`)
+ pub field_shuffle_seed: u64,
+}
+
+impl ReprOptions {
+ pub fn new(tcx: TyCtxt<'_>, did: DefId) -> ReprOptions {
+ let mut flags = ReprFlags::empty();
+ let mut size = None;
+ let mut max_align: Option<Align> = None;
+ let mut min_pack: Option<Align> = None;
+
+ // Generate a deterministically-derived seed from the item's path hash
+ // to allow for cross-crate compilation to actually work
+ let mut field_shuffle_seed = tcx.def_path_hash(did).0.to_smaller_hash();
+
+ // If the user defined a custom seed for layout randomization, xor the item's
+ // path hash with the user defined seed, this will allowing determinism while
+ // still allowing users to further randomize layout generation for e.g. fuzzing
+ if let Some(user_seed) = tcx.sess.opts.unstable_opts.layout_seed {
+ field_shuffle_seed ^= user_seed;
+ }
+
+ for attr in tcx.get_attrs(did, sym::repr) {
+ for r in attr::parse_repr_attr(&tcx.sess, attr) {
+ flags.insert(match r {
+ attr::ReprC => ReprFlags::IS_C,
+ attr::ReprPacked(pack) => {
+ let pack = Align::from_bytes(pack as u64).unwrap();
+ min_pack = Some(if let Some(min_pack) = min_pack {
+ min_pack.min(pack)
+ } else {
+ pack
+ });
+ ReprFlags::empty()
+ }
+ attr::ReprTransparent => ReprFlags::IS_TRANSPARENT,
+ attr::ReprSimd => ReprFlags::IS_SIMD,
+ attr::ReprInt(i) => {
+ size = Some(i);
+ ReprFlags::empty()
+ }
+ attr::ReprAlign(align) => {
+ max_align = max_align.max(Some(Align::from_bytes(align as u64).unwrap()));
+ ReprFlags::empty()
+ }
+ });
+ }
+ }
+
+ // If `-Z randomize-layout` was enabled for the type definition then we can
+ // consider performing layout randomization
+ if tcx.sess.opts.unstable_opts.randomize_layout {
+ flags.insert(ReprFlags::RANDOMIZE_LAYOUT);
+ }
+
+ // This is here instead of layout because the choice must make it into metadata.
+ if !tcx.consider_optimizing(|| format!("Reorder fields of {:?}", tcx.def_path_str(did))) {
+ flags.insert(ReprFlags::IS_LINEAR);
+ }
+
+ Self { int: size, align: max_align, pack: min_pack, flags, field_shuffle_seed }
+ }
+
+ #[inline]
+ pub fn simd(&self) -> bool {
+ self.flags.contains(ReprFlags::IS_SIMD)
+ }
+
+ #[inline]
+ pub fn c(&self) -> bool {
+ self.flags.contains(ReprFlags::IS_C)
+ }
+
+ #[inline]
+ pub fn packed(&self) -> bool {
+ self.pack.is_some()
+ }
+
+ #[inline]
+ pub fn transparent(&self) -> bool {
+ self.flags.contains(ReprFlags::IS_TRANSPARENT)
+ }
+
+ #[inline]
+ pub fn linear(&self) -> bool {
+ self.flags.contains(ReprFlags::IS_LINEAR)
+ }
+
+ /// Returns the discriminant type, given these `repr` options.
+ /// This must only be called on enums!
+ pub fn discr_type(&self) -> attr::IntType {
+ self.int.unwrap_or(attr::SignedInt(ast::IntTy::Isize))
+ }
+
+ /// Returns `true` if this `#[repr()]` should inhabit "smart enum
+ /// layout" optimizations, such as representing `Foo<&T>` as a
+ /// single pointer.
+ pub fn inhibit_enum_layout_opt(&self) -> bool {
+ self.c() || self.int.is_some()
+ }
+
+ /// Returns `true` if this `#[repr()]` should inhibit struct field reordering
+ /// optimizations, such as with `repr(C)`, `repr(packed(1))`, or `repr(<int>)`.
+ pub fn inhibit_struct_field_reordering_opt(&self) -> bool {
+ if let Some(pack) = self.pack {
+ if pack.bytes() == 1 {
+ return true;
+ }
+ }
+
+ self.flags.intersects(ReprFlags::IS_UNOPTIMISABLE) || self.int.is_some()
+ }
+
+ /// Returns `true` if this type is valid for reordering and `-Z randomize-layout`
+ /// was enabled for its declaration crate
+ pub fn can_randomize_type_layout(&self) -> bool {
+ !self.inhibit_struct_field_reordering_opt()
+ && self.flags.contains(ReprFlags::RANDOMIZE_LAYOUT)
+ }
+
+ /// Returns `true` if this `#[repr()]` should inhibit union ABI optimisations.
+ pub fn inhibit_union_abi_opt(&self) -> bool {
+ self.c()
+ }
+}
+
+impl<'tcx> FieldDef {
+ /// Returns the type of this field. The resulting type is not normalized. The `subst` is
+ /// typically obtained via the second field of [`TyKind::Adt`].
+ pub fn ty(&self, tcx: TyCtxt<'tcx>, subst: SubstsRef<'tcx>) -> Ty<'tcx> {
+ tcx.bound_type_of(self.did).subst(tcx, subst)
+ }
+
+ /// Computes the `Ident` of this variant by looking up the `Span`
+ pub fn ident(&self, tcx: TyCtxt<'_>) -> Ident {
+ Ident::new(self.name, tcx.def_ident_span(self.did).unwrap())
+ }
+}
+
+pub type Attributes<'tcx> = impl Iterator<Item = &'tcx ast::Attribute>;
+#[derive(Debug, PartialEq, Eq)]
+pub enum ImplOverlapKind {
+ /// These impls are always allowed to overlap.
+ Permitted {
+ /// Whether or not the impl is permitted due to the trait being a `#[marker]` trait
+ marker: bool,
+ },
+ /// These impls are allowed to overlap, but that raises
+ /// an issue #33140 future-compatibility warning.
+ ///
+ /// Some background: in Rust 1.0, the trait-object types `Send + Sync` (today's
+ /// `dyn Send + Sync`) and `Sync + Send` (now `dyn Sync + Send`) were different.
+ ///
+ /// The widely-used version 0.1.0 of the crate `traitobject` had accidentally relied
+ /// that difference, making what reduces to the following set of impls:
+ ///
+ /// ```compile_fail,(E0119)
+ /// trait Trait {}
+ /// impl Trait for dyn Send + Sync {}
+ /// impl Trait for dyn Sync + Send {}
+ /// ```
+ ///
+ /// Obviously, once we made these types be identical, that code causes a coherence
+ /// error and a fairly big headache for us. However, luckily for us, the trait
+ /// `Trait` used in this case is basically a marker trait, and therefore having
+ /// overlapping impls for it is sound.
+ ///
+ /// To handle this, we basically regard the trait as a marker trait, with an additional
+ /// future-compatibility warning. To avoid accidentally "stabilizing" this feature,
+ /// it has the following restrictions:
+ ///
+ /// 1. The trait must indeed be a marker-like trait (i.e., no items), and must be
+ /// positive impls.
+ /// 2. The trait-ref of both impls must be equal.
+ /// 3. The trait-ref of both impls must be a trait object type consisting only of
+ /// marker traits.
+ /// 4. Neither of the impls can have any where-clauses.
+ ///
+ /// Once `traitobject` 0.1.0 is no longer an active concern, this hack can be removed.
+ Issue33140,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ pub fn typeck_body(self, body: hir::BodyId) -> &'tcx TypeckResults<'tcx> {
+ self.typeck(self.hir().body_owner_def_id(body))
+ }
+
+ pub fn provided_trait_methods(self, id: DefId) -> impl 'tcx + Iterator<Item = &'tcx AssocItem> {
+ self.associated_items(id)
+ .in_definition_order()
+ .filter(move |item| item.kind == AssocKind::Fn && item.defaultness(self).has_value())
+ }
+
+ /// Look up the name of a definition across crates. This does not look at HIR.
+ pub fn opt_item_name(self, def_id: DefId) -> Option<Symbol> {
+ if let Some(cnum) = def_id.as_crate_root() {
+ Some(self.crate_name(cnum))
+ } else {
+ let def_key = self.def_key(def_id);
+ match def_key.disambiguated_data.data {
+ // The name of a constructor is that of its parent.
+ rustc_hir::definitions::DefPathData::Ctor => self
+ .opt_item_name(DefId { krate: def_id.krate, index: def_key.parent.unwrap() }),
+ // The name of opaque types only exists in HIR.
+ rustc_hir::definitions::DefPathData::ImplTrait
+ if let Some(def_id) = def_id.as_local() =>
+ self.hir().opt_name(self.hir().local_def_id_to_hir_id(def_id)),
+ _ => def_key.get_opt_name(),
+ }
+ }
+ }
+
+ /// Look up the name of a definition across crates. This does not look at HIR.
+ ///
+ /// This method will ICE if the corresponding item does not have a name. In these cases, use
+ /// [`opt_item_name`] instead.
+ ///
+ /// [`opt_item_name`]: Self::opt_item_name
+ pub fn item_name(self, id: DefId) -> Symbol {
+ self.opt_item_name(id).unwrap_or_else(|| {
+ bug!("item_name: no name for {:?}", self.def_path(id));
+ })
+ }
+
+ /// Look up the name and span of a definition.
+ ///
+ /// See [`item_name`][Self::item_name] for more information.
+ pub fn opt_item_ident(self, def_id: DefId) -> Option<Ident> {
+ let def = self.opt_item_name(def_id)?;
+ let span = def_id
+ .as_local()
+ .and_then(|id| self.def_ident_span(id))
+ .unwrap_or(rustc_span::DUMMY_SP);
+ Some(Ident::new(def, span))
+ }
+
+ pub fn opt_associated_item(self, def_id: DefId) -> Option<&'tcx AssocItem> {
+ if let DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy = self.def_kind(def_id) {
+ Some(self.associated_item(def_id))
+ } else {
+ None
+ }
+ }
+
+ pub fn field_index(self, hir_id: hir::HirId, typeck_results: &TypeckResults<'_>) -> usize {
+ typeck_results.field_indices().get(hir_id).cloned().expect("no index for a field")
+ }
+
+ pub fn find_field_index(self, ident: Ident, variant: &VariantDef) -> Option<usize> {
+ variant
+ .fields
+ .iter()
+ .position(|field| self.hygienic_eq(ident, field.ident(self), variant.def_id))
+ }
+
+ /// Returns `true` if the impls are the same polarity and the trait either
+ /// has no items or is annotated `#[marker]` and prevents item overrides.
+ pub fn impls_are_allowed_to_overlap(
+ self,
+ def_id1: DefId,
+ def_id2: DefId,
+ ) -> Option<ImplOverlapKind> {
+ // If either trait impl references an error, they're allowed to overlap,
+ // as one of them essentially doesn't exist.
+ if self.impl_trait_ref(def_id1).map_or(false, |tr| tr.references_error())
+ || self.impl_trait_ref(def_id2).map_or(false, |tr| tr.references_error())
+ {
+ return Some(ImplOverlapKind::Permitted { marker: false });
+ }
+
+ match (self.impl_polarity(def_id1), self.impl_polarity(def_id2)) {
+ (ImplPolarity::Reservation, _) | (_, ImplPolarity::Reservation) => {
+ // `#[rustc_reservation_impl]` impls don't overlap with anything
+ debug!(
+ "impls_are_allowed_to_overlap({:?}, {:?}) = Some(Permitted) (reservations)",
+ def_id1, def_id2
+ );
+ return Some(ImplOverlapKind::Permitted { marker: false });
+ }
+ (ImplPolarity::Positive, ImplPolarity::Negative)
+ | (ImplPolarity::Negative, ImplPolarity::Positive) => {
+ // `impl AutoTrait for Type` + `impl !AutoTrait for Type`
+ debug!(
+ "impls_are_allowed_to_overlap({:?}, {:?}) - None (differing polarities)",
+ def_id1, def_id2
+ );
+ return None;
+ }
+ (ImplPolarity::Positive, ImplPolarity::Positive)
+ | (ImplPolarity::Negative, ImplPolarity::Negative) => {}
+ };
+
+ let is_marker_overlap = {
+ let is_marker_impl = |def_id: DefId| -> bool {
+ let trait_ref = self.impl_trait_ref(def_id);
+ trait_ref.map_or(false, |tr| self.trait_def(tr.def_id).is_marker)
+ };
+ is_marker_impl(def_id1) && is_marker_impl(def_id2)
+ };
+
+ if is_marker_overlap {
+ debug!(
+ "impls_are_allowed_to_overlap({:?}, {:?}) = Some(Permitted) (marker overlap)",
+ def_id1, def_id2
+ );
+ Some(ImplOverlapKind::Permitted { marker: true })
+ } else {
+ if let Some(self_ty1) = self.issue33140_self_ty(def_id1) {
+ if let Some(self_ty2) = self.issue33140_self_ty(def_id2) {
+ if self_ty1 == self_ty2 {
+ debug!(
+ "impls_are_allowed_to_overlap({:?}, {:?}) - issue #33140 HACK",
+ def_id1, def_id2
+ );
+ return Some(ImplOverlapKind::Issue33140);
+ } else {
+ debug!(
+ "impls_are_allowed_to_overlap({:?}, {:?}) - found {:?} != {:?}",
+ def_id1, def_id2, self_ty1, self_ty2
+ );
+ }
+ }
+ }
+
+ debug!("impls_are_allowed_to_overlap({:?}, {:?}) = None", def_id1, def_id2);
+ None
+ }
+ }
+
+ /// Returns `ty::VariantDef` if `res` refers to a struct,
+ /// or variant or their constructors, panics otherwise.
+ pub fn expect_variant_res(self, res: Res) -> &'tcx VariantDef {
+ match res {
+ Res::Def(DefKind::Variant, did) => {
+ let enum_did = self.parent(did);
+ self.adt_def(enum_did).variant_with_id(did)
+ }
+ Res::Def(DefKind::Struct | DefKind::Union, did) => self.adt_def(did).non_enum_variant(),
+ Res::Def(DefKind::Ctor(CtorOf::Variant, ..), variant_ctor_did) => {
+ let variant_did = self.parent(variant_ctor_did);
+ let enum_did = self.parent(variant_did);
+ self.adt_def(enum_did).variant_with_ctor_id(variant_ctor_did)
+ }
+ Res::Def(DefKind::Ctor(CtorOf::Struct, ..), ctor_did) => {
+ let struct_did = self.parent(ctor_did);
+ self.adt_def(struct_did).non_enum_variant()
+ }
+ _ => bug!("expect_variant_res used with unexpected res {:?}", res),
+ }
+ }
+
+ /// Returns the possibly-auto-generated MIR of a `(DefId, Subst)` pair.
+ #[instrument(skip(self), level = "debug")]
+ pub fn instance_mir(self, instance: ty::InstanceDef<'tcx>) -> &'tcx Body<'tcx> {
+ match instance {
+ ty::InstanceDef::Item(def) => {
+ debug!("calling def_kind on def: {:?}", def);
+ let def_kind = self.def_kind(def.did);
+ debug!("returned from def_kind: {:?}", def_kind);
+ match def_kind {
+ DefKind::Const
+ | DefKind::Static(..)
+ | DefKind::AssocConst
+ | DefKind::Ctor(..)
+ | DefKind::AnonConst
+ | DefKind::InlineConst => self.mir_for_ctfe_opt_const_arg(def),
+ // If the caller wants `mir_for_ctfe` of a function they should not be using
+ // `instance_mir`, so we'll assume const fn also wants the optimized version.
+ _ => {
+ assert_eq!(def.const_param_did, None);
+ self.optimized_mir(def.did)
+ }
+ }
+ }
+ ty::InstanceDef::VTableShim(..)
+ | ty::InstanceDef::ReifyShim(..)
+ | ty::InstanceDef::Intrinsic(..)
+ | ty::InstanceDef::FnPtrShim(..)
+ | ty::InstanceDef::Virtual(..)
+ | ty::InstanceDef::ClosureOnceShim { .. }
+ | ty::InstanceDef::DropGlue(..)
+ | ty::InstanceDef::CloneShim(..) => self.mir_shims(instance),
+ }
+ }
+
+ // FIXME(@lcnr): Remove this function.
+ pub fn get_attrs_unchecked(self, did: DefId) -> &'tcx [ast::Attribute] {
+ if let Some(did) = did.as_local() {
+ self.hir().attrs(self.hir().local_def_id_to_hir_id(did))
+ } else {
+ self.item_attrs(did)
+ }
+ }
+
+ /// Gets all attributes with the given name.
+ pub fn get_attrs(self, did: DefId, attr: Symbol) -> ty::Attributes<'tcx> {
+ let filter_fn = move |a: &&ast::Attribute| a.has_name(attr);
+ if let Some(did) = did.as_local() {
+ self.hir().attrs(self.hir().local_def_id_to_hir_id(did)).iter().filter(filter_fn)
+ } else if cfg!(debug_assertions) && rustc_feature::is_builtin_only_local(attr) {
+ bug!("tried to access the `only_local` attribute `{}` from an extern crate", attr);
+ } else {
+ self.item_attrs(did).iter().filter(filter_fn)
+ }
+ }
+
+ pub fn get_attr(self, did: DefId, attr: Symbol) -> Option<&'tcx ast::Attribute> {
+ self.get_attrs(did, attr).next()
+ }
+
+ /// Determines whether an item is annotated with an attribute.
+ pub fn has_attr(self, did: DefId, attr: Symbol) -> bool {
+ if cfg!(debug_assertions) && !did.is_local() && rustc_feature::is_builtin_only_local(attr) {
+ bug!("tried to access the `only_local` attribute `{}` from an extern crate", attr);
+ } else {
+ self.get_attrs(did, attr).next().is_some()
+ }
+ }
+
+ /// Returns `true` if this is an `auto trait`.
+ pub fn trait_is_auto(self, trait_def_id: DefId) -> bool {
+ self.trait_def(trait_def_id).has_auto_impl
+ }
+
+ /// Returns layout of a generator. Layout might be unavailable if the
+ /// generator is tainted by errors.
+ pub fn generator_layout(self, def_id: DefId) -> Option<&'tcx GeneratorLayout<'tcx>> {
+ self.optimized_mir(def_id).generator_layout()
+ }
+
+ /// Given the `DefId` of an impl, returns the `DefId` of the trait it implements.
+ /// If it implements no trait, returns `None`.
+ pub fn trait_id_of_impl(self, def_id: DefId) -> Option<DefId> {
+ self.impl_trait_ref(def_id).map(|tr| tr.def_id)
+ }
+
+ /// If the given `DefId` describes an item belonging to a trait,
+ /// returns the `DefId` of the trait that the trait item belongs to;
+ /// otherwise, returns `None`.
+ pub fn trait_of_item(self, def_id: DefId) -> Option<DefId> {
+ if let DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy = self.def_kind(def_id) {
+ let parent = self.parent(def_id);
+ if let DefKind::Trait | DefKind::TraitAlias = self.def_kind(parent) {
+ return Some(parent);
+ }
+ }
+ None
+ }
+
+ /// If the given `DefId` describes a method belonging to an impl, returns the
+ /// `DefId` of the impl that the method belongs to; otherwise, returns `None`.
+ pub fn impl_of_method(self, def_id: DefId) -> Option<DefId> {
+ if let DefKind::AssocConst | DefKind::AssocFn | DefKind::AssocTy = self.def_kind(def_id) {
+ let parent = self.parent(def_id);
+ if let DefKind::Impl = self.def_kind(parent) {
+ return Some(parent);
+ }
+ }
+ None
+ }
+
+ /// If the given `DefId` belongs to a trait that was automatically derived, returns `true`.
+ pub fn is_builtin_derive(self, def_id: DefId) -> bool {
+ self.has_attr(def_id, sym::automatically_derived)
+ }
+
+ /// Looks up the span of `impl_did` if the impl is local; otherwise returns `Err`
+ /// with the name of the crate containing the impl.
+ pub fn span_of_impl(self, impl_did: DefId) -> Result<Span, Symbol> {
+ if let Some(impl_did) = impl_did.as_local() {
+ Ok(self.def_span(impl_did))
+ } else {
+ Err(self.crate_name(impl_did.krate))
+ }
+ }
+
+ /// Hygienically compares a use-site name (`use_name`) for a field or an associated item with
+ /// its supposed definition name (`def_name`). The method also needs `DefId` of the supposed
+ /// definition's parent/scope to perform comparison.
+ pub fn hygienic_eq(self, use_name: Ident, def_name: Ident, def_parent_def_id: DefId) -> bool {
+ // We could use `Ident::eq` here, but we deliberately don't. The name
+ // comparison fails frequently, and we want to avoid the expensive
+ // `normalize_to_macros_2_0()` calls required for the span comparison whenever possible.
+ use_name.name == def_name.name
+ && use_name
+ .span
+ .ctxt()
+ .hygienic_eq(def_name.span.ctxt(), self.expn_that_defined(def_parent_def_id))
+ }
+
+ pub fn adjust_ident(self, mut ident: Ident, scope: DefId) -> Ident {
+ ident.span.normalize_to_macros_2_0_and_adjust(self.expn_that_defined(scope));
+ ident
+ }
+
+ pub fn adjust_ident_and_get_scope(
+ self,
+ mut ident: Ident,
+ scope: DefId,
+ block: hir::HirId,
+ ) -> (Ident, DefId) {
+ let scope = ident
+ .span
+ .normalize_to_macros_2_0_and_adjust(self.expn_that_defined(scope))
+ .and_then(|actual_expansion| actual_expansion.expn_data().parent_module)
+ .unwrap_or_else(|| self.parent_module(block).to_def_id());
+ (ident, scope)
+ }
+
+ pub fn is_object_safe(self, key: DefId) -> bool {
+ self.object_safety_violations(key).is_empty()
+ }
+
+ #[inline]
+ pub fn is_const_fn_raw(self, def_id: DefId) -> bool {
+ matches!(self.def_kind(def_id), DefKind::Fn | DefKind::AssocFn | DefKind::Ctor(..))
+ && self.constness(def_id) == hir::Constness::Const
+ }
+
+ #[inline]
+ pub fn is_const_default_method(self, def_id: DefId) -> bool {
+ matches!(self.trait_of_item(def_id), Some(trait_id) if self.has_attr(trait_id, sym::const_trait))
+ }
+}
+
+/// Yields the parent function's `LocalDefId` if `def_id` is an `impl Trait` definition.
+pub fn is_impl_trait_defn(tcx: TyCtxt<'_>, def_id: DefId) -> Option<LocalDefId> {
+ let def_id = def_id.as_local()?;
+ if let Node::Item(item) = tcx.hir().get_by_def_id(def_id) {
+ if let hir::ItemKind::OpaqueTy(ref opaque_ty) = item.kind {
+ return match opaque_ty.origin {
+ hir::OpaqueTyOrigin::FnReturn(parent) | hir::OpaqueTyOrigin::AsyncFn(parent) => {
+ Some(parent)
+ }
+ hir::OpaqueTyOrigin::TyAlias => None,
+ };
+ }
+ }
+ None
+}
+
+pub fn int_ty(ity: ast::IntTy) -> IntTy {
+ match ity {
+ ast::IntTy::Isize => IntTy::Isize,
+ ast::IntTy::I8 => IntTy::I8,
+ ast::IntTy::I16 => IntTy::I16,
+ ast::IntTy::I32 => IntTy::I32,
+ ast::IntTy::I64 => IntTy::I64,
+ ast::IntTy::I128 => IntTy::I128,
+ }
+}
+
+pub fn uint_ty(uty: ast::UintTy) -> UintTy {
+ match uty {
+ ast::UintTy::Usize => UintTy::Usize,
+ ast::UintTy::U8 => UintTy::U8,
+ ast::UintTy::U16 => UintTy::U16,
+ ast::UintTy::U32 => UintTy::U32,
+ ast::UintTy::U64 => UintTy::U64,
+ ast::UintTy::U128 => UintTy::U128,
+ }
+}
+
+pub fn float_ty(fty: ast::FloatTy) -> FloatTy {
+ match fty {
+ ast::FloatTy::F32 => FloatTy::F32,
+ ast::FloatTy::F64 => FloatTy::F64,
+ }
+}
+
+pub fn ast_int_ty(ity: IntTy) -> ast::IntTy {
+ match ity {
+ IntTy::Isize => ast::IntTy::Isize,
+ IntTy::I8 => ast::IntTy::I8,
+ IntTy::I16 => ast::IntTy::I16,
+ IntTy::I32 => ast::IntTy::I32,
+ IntTy::I64 => ast::IntTy::I64,
+ IntTy::I128 => ast::IntTy::I128,
+ }
+}
+
+pub fn ast_uint_ty(uty: UintTy) -> ast::UintTy {
+ match uty {
+ UintTy::Usize => ast::UintTy::Usize,
+ UintTy::U8 => ast::UintTy::U8,
+ UintTy::U16 => ast::UintTy::U16,
+ UintTy::U32 => ast::UintTy::U32,
+ UintTy::U64 => ast::UintTy::U64,
+ UintTy::U128 => ast::UintTy::U128,
+ }
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ closure::provide(providers);
+ context::provide(providers);
+ erase_regions::provide(providers);
+ layout::provide(providers);
+ util::provide(providers);
+ print::provide(providers);
+ super::util::bug::provide(providers);
+ super::middle::provide(providers);
+ *providers = ty::query::Providers {
+ trait_impls_of: trait_def::trait_impls_of_provider,
+ incoherent_impls: trait_def::incoherent_impls_provider,
+ type_uninhabited_from: inhabitedness::type_uninhabited_from,
+ const_param_default: consts::const_param_default,
+ vtable_allocation: vtable::vtable_allocation_provider,
+ ..*providers
+ };
+}
+
+/// A map for the local crate mapping each type to a vector of its
+/// inherent impls. This is not meant to be used outside of coherence;
+/// rather, you should request the vector for a specific type via
+/// `tcx.inherent_impls(def_id)` so as to minimize your dependencies
+/// (constructing this map requires touching the entire crate).
+#[derive(Clone, Debug, Default, HashStable)]
+pub struct CrateInherentImpls {
+ pub inherent_impls: LocalDefIdMap<Vec<DefId>>,
+ pub incoherent_impls: FxHashMap<SimplifiedType, Vec<LocalDefId>>,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, HashStable)]
+pub struct SymbolName<'tcx> {
+ /// `&str` gives a consistent ordering, which ensures reproducible builds.
+ pub name: &'tcx str,
+}
+
+impl<'tcx> SymbolName<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, name: &str) -> SymbolName<'tcx> {
+ SymbolName {
+ name: unsafe { str::from_utf8_unchecked(tcx.arena.alloc_slice(name.as_bytes())) },
+ }
+ }
+}
+
+impl<'tcx> fmt::Display for SymbolName<'tcx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.name, fmt)
+ }
+}
+
+impl<'tcx> fmt::Debug for SymbolName<'tcx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.name, fmt)
+ }
+}
+
+#[derive(Debug, Default, Copy, Clone)]
+pub struct FoundRelationships {
+ /// This is true if we identified that this Ty (`?T`) is found in a `?T: Foo`
+ /// obligation, where:
+ ///
+ /// * `Foo` is not `Sized`
+ /// * `(): Foo` may be satisfied
+ pub self_in_trait: bool,
+ /// This is true if we identified that this Ty (`?T`) is found in a `<_ as
+ /// _>::AssocType = ?T`
+ pub output: bool,
+}
+
+/// The constituent parts of a type level constant of kind ADT or array.
+#[derive(Copy, Clone, Debug, HashStable)]
+pub struct DestructuredConst<'tcx> {
+ pub variant: Option<VariantIdx>,
+ pub fields: &'tcx [ty::Const<'tcx>],
+}
diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
new file mode 100644
index 000000000..9d8a81165
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
@@ -0,0 +1,283 @@
+//! Methods for normalizing when you don't care about regions (and
+//! aren't doing type inference). If either of those things don't
+//! apply to you, use `infcx.normalize(...)`.
+//!
+//! The methods in this file use a `TypeFolder` to recursively process
+//! contents, invoking the underlying
+//! `normalize_generic_arg_after_erasing_regions` query for each type
+//! or constant found within. (This underlying query is what is cached.)
+
+use crate::mir;
+use crate::traits::query::NoSolution;
+use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder};
+use crate::ty::subst::{Subst, SubstsRef};
+use crate::ty::{self, EarlyBinder, Ty, TyCtxt};
+
+#[derive(Debug, Copy, Clone, HashStable, TyEncodable, TyDecodable)]
+pub enum NormalizationError<'tcx> {
+ Type(Ty<'tcx>),
+ Const(ty::Const<'tcx>),
+ ConstantKind(mir::ConstantKind<'tcx>),
+}
+
+impl<'tcx> NormalizationError<'tcx> {
+ pub fn get_type_for_failure(&self) -> String {
+ match self {
+ NormalizationError::Type(t) => format!("{}", t),
+ NormalizationError::Const(c) => format!("{}", c),
+ NormalizationError::ConstantKind(ck) => format!("{}", ck),
+ }
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Erase the regions in `value` and then fully normalize all the
+ /// types found within. The result will also have regions erased.
+ ///
+ /// This should only be used outside of type inference. For example,
+ /// it assumes that normalization will succeed.
+ pub fn normalize_erasing_regions<T>(self, param_env: ty::ParamEnv<'tcx>, value: T) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!(
+ "normalize_erasing_regions::<{}>(value={:?}, param_env={:?})",
+ std::any::type_name::<T>(),
+ value,
+ param_env,
+ );
+
+ // Erase first before we do the real query -- this keeps the
+ // cache from being too polluted.
+ let value = self.erase_regions(value);
+ debug!(?value);
+
+ if !value.has_projections() {
+ value
+ } else {
+ value.fold_with(&mut NormalizeAfterErasingRegionsFolder { tcx: self, param_env })
+ }
+ }
+
+ /// Tries to erase the regions in `value` and then fully normalize all the
+ /// types found within. The result will also have regions erased.
+ ///
+ /// Contrary to `normalize_erasing_regions` this function does not assume that normalization
+ /// succeeds.
+ pub fn try_normalize_erasing_regions<T>(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> Result<T, NormalizationError<'tcx>>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!(
+ "try_normalize_erasing_regions::<{}>(value={:?}, param_env={:?})",
+ std::any::type_name::<T>(),
+ value,
+ param_env,
+ );
+
+ // Erase first before we do the real query -- this keeps the
+ // cache from being too polluted.
+ let value = self.erase_regions(value);
+ debug!(?value);
+
+ if !value.has_projections() {
+ Ok(value)
+ } else {
+ let mut folder = TryNormalizeAfterErasingRegionsFolder::new(self, param_env);
+ value.try_fold_with(&mut folder)
+ }
+ }
+
+ /// If you have a `Binder<'tcx, T>`, you can do this to strip out the
+ /// late-bound regions and then normalize the result, yielding up
+ /// a `T` (with regions erased). This is appropriate when the
+ /// binder is being instantiated at the call site.
+ ///
+ /// N.B., currently, higher-ranked type bounds inhibit
+ /// normalization. Therefore, each time we erase them in
+ /// codegen, we need to normalize the contents.
+ pub fn normalize_erasing_late_bound_regions<T>(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ value: ty::Binder<'tcx, T>,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let value = self.erase_late_bound_regions(value);
+ self.normalize_erasing_regions(param_env, value)
+ }
+
+ /// If you have a `Binder<'tcx, T>`, you can do this to strip out the
+ /// late-bound regions and then normalize the result, yielding up
+ /// a `T` (with regions erased). This is appropriate when the
+ /// binder is being instantiated at the call site.
+ ///
+ /// N.B., currently, higher-ranked type bounds inhibit
+ /// normalization. Therefore, each time we erase them in
+ /// codegen, we need to normalize the contents.
+ pub fn try_normalize_erasing_late_bound_regions<T>(
+ self,
+ param_env: ty::ParamEnv<'tcx>,
+ value: ty::Binder<'tcx, T>,
+ ) -> Result<T, NormalizationError<'tcx>>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ let value = self.erase_late_bound_regions(value);
+ self.try_normalize_erasing_regions(param_env, value)
+ }
+
+ /// Monomorphizes a type from the AST by first applying the
+ /// in-scope substitutions and then normalizing any associated
+ /// types.
+ /// Panics if normalization fails. In case normalization might fail
+ /// use `try_subst_and_normalize_erasing_regions` instead.
+ pub fn subst_and_normalize_erasing_regions<T>(
+ self,
+ param_substs: SubstsRef<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> T
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!(
+ "subst_and_normalize_erasing_regions(\
+ param_substs={:?}, \
+ value={:?}, \
+ param_env={:?})",
+ param_substs, value, param_env,
+ );
+ let substituted = EarlyBinder(value).subst(self, param_substs);
+ self.normalize_erasing_regions(param_env, substituted)
+ }
+
+ /// Monomorphizes a type from the AST by first applying the
+ /// in-scope substitutions and then trying to normalize any associated
+ /// types. Contrary to `subst_and_normalize_erasing_regions` this does
+ /// not assume that normalization succeeds.
+ pub fn try_subst_and_normalize_erasing_regions<T>(
+ self,
+ param_substs: SubstsRef<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ value: T,
+ ) -> Result<T, NormalizationError<'tcx>>
+ where
+ T: TypeFoldable<'tcx>,
+ {
+ debug!(
+ "subst_and_normalize_erasing_regions(\
+ param_substs={:?}, \
+ value={:?}, \
+ param_env={:?})",
+ param_substs, value, param_env,
+ );
+ let substituted = EarlyBinder(value).subst(self, param_substs);
+ self.try_normalize_erasing_regions(param_env, substituted)
+ }
+}
+
+struct NormalizeAfterErasingRegionsFolder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'tcx> NormalizeAfterErasingRegionsFolder<'tcx> {
+ #[instrument(skip(self), level = "debug")]
+ fn normalize_generic_arg_after_erasing_regions(
+ &self,
+ arg: ty::GenericArg<'tcx>,
+ ) -> ty::GenericArg<'tcx> {
+ let arg = self.param_env.and(arg);
+ debug!(?arg);
+
+ self.tcx.try_normalize_generic_arg_after_erasing_regions(arg).unwrap_or_else(|_| bug!(
+ "Failed to normalize {:?}, maybe try to call `try_normalize_erasing_regions` instead",
+ arg.value
+ ))
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ self.normalize_generic_arg_after_erasing_regions(ty.into()).expect_ty()
+ }
+
+ fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ self.normalize_generic_arg_after_erasing_regions(c.into()).expect_const()
+ }
+
+ #[inline]
+ fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
+ // FIXME: This *probably* needs canonicalization too!
+ let arg = self.param_env.and(c);
+ self.tcx
+ .try_normalize_mir_const_after_erasing_regions(arg)
+ .unwrap_or_else(|_| bug!("failed to normalize {:?}", c))
+ }
+}
+
+struct TryNormalizeAfterErasingRegionsFolder<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+}
+
+impl<'tcx> TryNormalizeAfterErasingRegionsFolder<'tcx> {
+ fn new(tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> Self {
+ TryNormalizeAfterErasingRegionsFolder { tcx, param_env }
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn try_normalize_generic_arg_after_erasing_regions(
+ &self,
+ arg: ty::GenericArg<'tcx>,
+ ) -> Result<ty::GenericArg<'tcx>, NoSolution> {
+ let arg = self.param_env.and(arg);
+ debug!(?arg);
+
+ self.tcx.try_normalize_generic_arg_after_erasing_regions(arg)
+ }
+}
+
+impl<'tcx> FallibleTypeFolder<'tcx> for TryNormalizeAfterErasingRegionsFolder<'tcx> {
+ type Error = NormalizationError<'tcx>;
+
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn try_fold_ty(&mut self, ty: Ty<'tcx>) -> Result<Ty<'tcx>, Self::Error> {
+ match self.try_normalize_generic_arg_after_erasing_regions(ty.into()) {
+ Ok(t) => Ok(t.expect_ty()),
+ Err(_) => Err(NormalizationError::Type(ty)),
+ }
+ }
+
+ fn try_fold_const(&mut self, c: ty::Const<'tcx>) -> Result<ty::Const<'tcx>, Self::Error> {
+ match self.try_normalize_generic_arg_after_erasing_regions(c.into()) {
+ Ok(t) => Ok(t.expect_const()),
+ Err(_) => Err(NormalizationError::Const(c)),
+ }
+ }
+
+ fn try_fold_mir_const(
+ &mut self,
+ c: mir::ConstantKind<'tcx>,
+ ) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
+ // FIXME: This *probably* needs canonicalization too!
+ let arg = self.param_env.and(c);
+ match self.tcx.try_normalize_mir_const_after_erasing_regions(arg) {
+ Ok(c) => Ok(c),
+ Err(_) => Err(NormalizationError::ConstantKind(c)),
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/parameterized.rs b/compiler/rustc_middle/src/ty/parameterized.rs
new file mode 100644
index 000000000..e189ee2fc
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/parameterized.rs
@@ -0,0 +1,119 @@
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::{Idx, IndexVec};
+
+use crate::middle::exported_symbols::ExportedSymbol;
+use crate::mir::Body;
+use crate::ty::abstract_const::Node;
+use crate::ty::{
+ self, Const, FnSig, GeneratorDiagnosticData, GenericPredicates, Predicate, TraitRef, Ty,
+};
+
+pub trait ParameterizedOverTcx: 'static {
+ #[allow(unused_lifetimes)]
+ type Value<'tcx>;
+}
+
+impl<T: ParameterizedOverTcx> ParameterizedOverTcx for &'static [T] {
+ type Value<'tcx> = &'tcx [T::Value<'tcx>];
+}
+
+impl<T: ParameterizedOverTcx> ParameterizedOverTcx for Option<T> {
+ type Value<'tcx> = Option<T::Value<'tcx>>;
+}
+
+impl<A: ParameterizedOverTcx, B: ParameterizedOverTcx> ParameterizedOverTcx for (A, B) {
+ type Value<'tcx> = (A::Value<'tcx>, B::Value<'tcx>);
+}
+
+impl<I: Idx + 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for IndexVec<I, T> {
+ type Value<'tcx> = IndexVec<I, T::Value<'tcx>>;
+}
+
+impl<T: ParameterizedOverTcx> ParameterizedOverTcx for ty::Binder<'static, T> {
+ type Value<'tcx> = ty::Binder<'tcx, T::Value<'tcx>>;
+}
+
+#[macro_export]
+macro_rules! trivially_parameterized_over_tcx {
+ ($($ty:ty),+ $(,)?) => {
+ $(
+ impl $crate::ty::ParameterizedOverTcx for $ty {
+ #[allow(unused_lifetimes)]
+ type Value<'tcx> = $ty;
+ }
+ )*
+ }
+}
+
+trivially_parameterized_over_tcx! {
+ usize,
+ (),
+ u32,
+ std::string::String,
+ crate::metadata::ModChild,
+ crate::middle::codegen_fn_attrs::CodegenFnAttrs,
+ crate::middle::exported_symbols::SymbolExportInfo,
+ crate::mir::ConstQualifs,
+ ty::Generics,
+ ty::ImplPolarity,
+ ty::ReprOptions,
+ ty::TraitDef,
+ ty::Visibility,
+ ty::adjustment::CoerceUnsizedInfo,
+ ty::fast_reject::SimplifiedTypeGen<DefId>,
+ rustc_ast::Attribute,
+ rustc_ast::MacArgs,
+ rustc_attr::ConstStability,
+ rustc_attr::Deprecation,
+ rustc_attr::Stability,
+ rustc_hir::Constness,
+ rustc_hir::Defaultness,
+ rustc_hir::GeneratorKind,
+ rustc_hir::IsAsync,
+ rustc_hir::LangItem,
+ rustc_hir::def::DefKind,
+ rustc_hir::def_id::DefIndex,
+ rustc_hir::definitions::DefKey,
+ rustc_index::bit_set::FiniteBitSet<u32>,
+ rustc_session::cstore::ForeignModule,
+ rustc_session::cstore::LinkagePreference,
+ rustc_session::cstore::NativeLib,
+ rustc_span::DebuggerVisualizerFile,
+ rustc_span::ExpnData,
+ rustc_span::ExpnHash,
+ rustc_span::ExpnId,
+ rustc_span::SourceFile,
+ rustc_span::Span,
+ rustc_span::Symbol,
+ rustc_span::def_id::DefPathHash,
+ rustc_span::hygiene::SyntaxContextData,
+ rustc_span::symbol::Ident,
+ rustc_type_ir::Variance,
+}
+
+// HACK(compiler-errors): This macro rule can only take an ident,
+// not a path, due to parsing ambiguity reasons. That means we gotta
+// import all of these types above.
+#[macro_export]
+macro_rules! parameterized_over_tcx {
+ ($($ident:ident),+ $(,)?) => {
+ $(
+ impl $crate::ty::ParameterizedOverTcx for $ident<'static> {
+ type Value<'tcx> = $ident<'tcx>;
+ }
+ )*
+ }
+}
+
+parameterized_over_tcx! {
+ Ty,
+ FnSig,
+ GenericPredicates,
+ TraitRef,
+ Const,
+ Predicate,
+ GeneratorDiagnosticData,
+ Body,
+ Node,
+ ExportedSymbol,
+}
diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs
new file mode 100644
index 000000000..d57cf8f01
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/print/mod.rs
@@ -0,0 +1,327 @@
+use crate::ty::subst::{GenericArg, Subst};
+use crate::ty::{self, DefIdTree, Ty, TyCtxt};
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::sso::SsoHashSet;
+use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
+
+// `pretty` is a separate module only for organization.
+mod pretty;
+pub use self::pretty::*;
+
+// FIXME(eddyb) false positive, the lifetime parameters are used with `P: Printer<...>`.
+#[allow(unused_lifetimes)]
+pub trait Print<'tcx, P> {
+ type Output;
+ type Error;
+
+ fn print(&self, cx: P) -> Result<Self::Output, Self::Error>;
+}
+
+/// Interface for outputting user-facing "type-system entities"
+/// (paths, types, lifetimes, constants, etc.) as a side-effect
+/// (e.g. formatting, like `PrettyPrinter` implementors do) or by
+/// constructing some alternative representation (e.g. an AST),
+/// which the associated types allow passing through the methods.
+///
+/// For pretty-printing/formatting in particular, see `PrettyPrinter`.
+//
+// FIXME(eddyb) find a better name; this is more general than "printing".
+pub trait Printer<'tcx>: Sized {
+ type Error;
+
+ type Path;
+ type Region;
+ type Type;
+ type DynExistential;
+ type Const;
+
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx>;
+
+ fn print_def_path(
+ self,
+ def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ self.default_print_def_path(def_id, substs)
+ }
+
+ fn print_impl_path(
+ self,
+ impl_def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self.default_print_impl_path(impl_def_id, substs, self_ty, trait_ref)
+ }
+
+ fn print_region(self, region: ty::Region<'tcx>) -> Result<Self::Region, Self::Error>;
+
+ fn print_type(self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error>;
+
+ fn print_dyn_existential(
+ self,
+ predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Result<Self::DynExistential, Self::Error>;
+
+ fn print_const(self, ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error>;
+
+ fn path_crate(self, cnum: CrateNum) -> Result<Self::Path, Self::Error>;
+
+ fn path_qualified(
+ self,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error>;
+
+ fn path_append_impl(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ disambiguated_data: &DisambiguatedDefPathData,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error>;
+
+ fn path_append(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ disambiguated_data: &DisambiguatedDefPathData,
+ ) -> Result<Self::Path, Self::Error>;
+
+ fn path_generic_args(
+ self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ args: &[GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error>;
+
+ // Defaults (should not be overridden):
+
+ #[instrument(skip(self), level = "debug")]
+ fn default_print_def_path(
+ self,
+ def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ let key = self.tcx().def_key(def_id);
+ debug!(?key);
+
+ match key.disambiguated_data.data {
+ DefPathData::CrateRoot => {
+ assert!(key.parent.is_none());
+ self.path_crate(def_id.krate)
+ }
+
+ DefPathData::Impl => {
+ let generics = self.tcx().generics_of(def_id);
+ let self_ty = self.tcx().bound_type_of(def_id);
+ let impl_trait_ref = self.tcx().bound_impl_trait_ref(def_id);
+ let (self_ty, impl_trait_ref) = if substs.len() >= generics.count() {
+ (
+ self_ty.subst(self.tcx(), substs),
+ impl_trait_ref.map(|i| i.subst(self.tcx(), substs)),
+ )
+ } else {
+ (self_ty.0, impl_trait_ref.map(|i| i.0))
+ };
+ self.print_impl_path(def_id, substs, self_ty, impl_trait_ref)
+ }
+
+ _ => {
+ let parent_def_id = DefId { index: key.parent.unwrap(), ..def_id };
+
+ let mut parent_substs = substs;
+ let mut trait_qualify_parent = false;
+ if !substs.is_empty() {
+ let generics = self.tcx().generics_of(def_id);
+ parent_substs = &substs[..generics.parent_count.min(substs.len())];
+
+ match key.disambiguated_data.data {
+ // Closures' own generics are only captures, don't print them.
+ DefPathData::ClosureExpr => {}
+ // This covers both `DefKind::AnonConst` and `DefKind::InlineConst`.
+ // Anon consts doesn't have their own generics, and inline consts' own
+ // generics are their inferred types, so don't print them.
+ DefPathData::AnonConst => {}
+
+ // If we have any generic arguments to print, we do that
+ // on top of the same path, but without its own generics.
+ _ => {
+ if !generics.params.is_empty() && substs.len() >= generics.count() {
+ let args = generics.own_substs_no_defaults(self.tcx(), substs);
+ return self.path_generic_args(
+ |cx| cx.print_def_path(def_id, parent_substs),
+ args,
+ );
+ }
+ }
+ }
+
+ // FIXME(eddyb) try to move this into the parent's printing
+ // logic, instead of doing it when printing the child.
+ trait_qualify_parent = generics.has_self
+ && generics.parent == Some(parent_def_id)
+ && parent_substs.len() == generics.parent_count
+ && self.tcx().generics_of(parent_def_id).parent_count == 0;
+ }
+
+ self.path_append(
+ |cx: Self| {
+ if trait_qualify_parent {
+ let trait_ref = ty::TraitRef::new(
+ parent_def_id,
+ cx.tcx().intern_substs(parent_substs),
+ );
+ cx.path_qualified(trait_ref.self_ty(), Some(trait_ref))
+ } else {
+ cx.print_def_path(parent_def_id, parent_substs)
+ }
+ },
+ &key.disambiguated_data,
+ )
+ }
+ }
+ }
+
+ fn default_print_impl_path(
+ self,
+ impl_def_id: DefId,
+ _substs: &'tcx [GenericArg<'tcx>],
+ self_ty: Ty<'tcx>,
+ impl_trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ debug!(
+ "default_print_impl_path: impl_def_id={:?}, self_ty={}, impl_trait_ref={:?}",
+ impl_def_id, self_ty, impl_trait_ref
+ );
+
+ let key = self.tcx().def_key(impl_def_id);
+ let parent_def_id = DefId { index: key.parent.unwrap(), ..impl_def_id };
+
+ // Decide whether to print the parent path for the impl.
+ // Logically, since impls are global, it's never needed, but
+ // users may find it useful. Currently, we omit the parent if
+ // the impl is either in the same module as the self-type or
+ // as the trait.
+ let in_self_mod = match characteristic_def_id_of_type(self_ty) {
+ None => false,
+ Some(ty_def_id) => self.tcx().parent(ty_def_id) == parent_def_id,
+ };
+ let in_trait_mod = match impl_trait_ref {
+ None => false,
+ Some(trait_ref) => self.tcx().parent(trait_ref.def_id) == parent_def_id,
+ };
+
+ if !in_self_mod && !in_trait_mod {
+ // If the impl is not co-located with either self-type or
+ // trait-type, then fallback to a format that identifies
+ // the module more clearly.
+ self.path_append_impl(
+ |cx| cx.print_def_path(parent_def_id, &[]),
+ &key.disambiguated_data,
+ self_ty,
+ impl_trait_ref,
+ )
+ } else {
+ // Otherwise, try to give a good form that would be valid language
+ // syntax. Preferably using associated item notation.
+ self.path_qualified(self_ty, impl_trait_ref)
+ }
+ }
+}
+
+/// As a heuristic, when we see an impl, if we see that the
+/// 'self type' is a type defined in the same module as the impl,
+/// we can omit including the path to the impl itself. This
+/// function tries to find a "characteristic `DefId`" for a
+/// type. It's just a heuristic so it makes some questionable
+/// decisions and we may want to adjust it later.
+///
+/// Visited set is needed to avoid full iteration over
+/// deeply nested tuples that have no DefId.
+fn characteristic_def_id_of_type_cached<'a>(
+ ty: Ty<'a>,
+ visited: &mut SsoHashSet<Ty<'a>>,
+) -> Option<DefId> {
+ match *ty.kind() {
+ ty::Adt(adt_def, _) => Some(adt_def.did()),
+
+ ty::Dynamic(data, ..) => data.principal_def_id(),
+
+ ty::Array(subty, _) | ty::Slice(subty) => {
+ characteristic_def_id_of_type_cached(subty, visited)
+ }
+
+ ty::RawPtr(mt) => characteristic_def_id_of_type_cached(mt.ty, visited),
+
+ ty::Ref(_, ty, _) => characteristic_def_id_of_type_cached(ty, visited),
+
+ ty::Tuple(ref tys) => tys.iter().find_map(|ty| {
+ if visited.insert(ty) {
+ return characteristic_def_id_of_type_cached(ty, visited);
+ }
+ return None;
+ }),
+
+ ty::FnDef(def_id, _)
+ | ty::Closure(def_id, _)
+ | ty::Generator(def_id, _, _)
+ | ty::Foreign(def_id) => Some(def_id),
+
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Str
+ | ty::FnPtr(_)
+ | ty::Projection(_)
+ | ty::Placeholder(..)
+ | ty::Param(_)
+ | ty::Opaque(..)
+ | ty::Infer(_)
+ | ty::Bound(..)
+ | ty::Error(_)
+ | ty::GeneratorWitness(..)
+ | ty::Never
+ | ty::Float(_) => None,
+ }
+}
+pub fn characteristic_def_id_of_type(ty: Ty<'_>) -> Option<DefId> {
+ characteristic_def_id_of_type_cached(ty, &mut SsoHashSet::new())
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for ty::Region<'tcx> {
+ type Output = P::Region;
+ type Error = P::Error;
+ fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+ cx.print_region(*self)
+ }
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for Ty<'tcx> {
+ type Output = P::Type;
+ type Error = P::Error;
+
+ fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+ cx.print_type(*self)
+ }
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P>
+ for &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>
+{
+ type Output = P::DynExistential;
+ type Error = P::Error;
+ fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+ cx.print_dyn_existential(self)
+ }
+}
+
+impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for ty::Const<'tcx> {
+ type Output = P::Const;
+ type Error = P::Error;
+ fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+ cx.print_const(*self)
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs
new file mode 100644
index 000000000..7f2e81a71
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/print/pretty.rs
@@ -0,0 +1,2789 @@
+use crate::mir::interpret::{AllocRange, GlobalAlloc, Pointer, Provenance, Scalar};
+use crate::ty::subst::{GenericArg, GenericArgKind, Subst};
+use crate::ty::{
+ self, ConstInt, DefIdTree, ParamConst, ScalarInt, Term, Ty, TyCtxt, TypeFoldable,
+ TypeSuperFoldable, TypeSuperVisitable, TypeVisitable,
+};
+use rustc_apfloat::ieee::{Double, Single};
+use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
+use rustc_data_structures::sso::SsoHashSet;
+use rustc_hir as hir;
+use rustc_hir::def::{self, CtorKind, DefKind, Namespace};
+use rustc_hir::def_id::{DefId, DefIdSet, CRATE_DEF_ID, LOCAL_CRATE};
+use rustc_hir::definitions::{DefPathData, DefPathDataName, DisambiguatedDefPathData};
+use rustc_session::config::TrimmedDefPaths;
+use rustc_session::cstore::{ExternCrate, ExternCrateSource};
+use rustc_span::symbol::{kw, Ident, Symbol};
+use rustc_target::abi::Size;
+use rustc_target::spec::abi::Abi;
+
+use std::cell::Cell;
+use std::char;
+use std::collections::BTreeMap;
+use std::convert::TryFrom;
+use std::fmt::{self, Write as _};
+use std::iter;
+use std::ops::{ControlFlow, Deref, DerefMut};
+
+// `pretty` is a separate module only for organization.
+use super::*;
+
+macro_rules! p {
+ (@$lit:literal) => {
+ write!(scoped_cx!(), $lit)?
+ };
+ (@write($($data:expr),+)) => {
+ write!(scoped_cx!(), $($data),+)?
+ };
+ (@print($x:expr)) => {
+ scoped_cx!() = $x.print(scoped_cx!())?
+ };
+ (@$method:ident($($arg:expr),*)) => {
+ scoped_cx!() = scoped_cx!().$method($($arg),*)?
+ };
+ ($($elem:tt $(($($args:tt)*))?),+) => {{
+ $(p!(@ $elem $(($($args)*))?);)+
+ }};
+}
+macro_rules! define_scoped_cx {
+ ($cx:ident) => {
+ #[allow(unused_macros)]
+ macro_rules! scoped_cx {
+ () => {
+ $cx
+ };
+ }
+ };
+}
+
+thread_local! {
+ static FORCE_IMPL_FILENAME_LINE: Cell<bool> = const { Cell::new(false) };
+ static SHOULD_PREFIX_WITH_CRATE: Cell<bool> = const { Cell::new(false) };
+ static NO_TRIMMED_PATH: Cell<bool> = const { Cell::new(false) };
+ static NO_QUERIES: Cell<bool> = const { Cell::new(false) };
+ static NO_VISIBLE_PATH: Cell<bool> = const { Cell::new(false) };
+}
+
+macro_rules! define_helper {
+ ($($(#[$a:meta])* fn $name:ident($helper:ident, $tl:ident);)+) => {
+ $(
+ #[must_use]
+ pub struct $helper(bool);
+
+ impl $helper {
+ pub fn new() -> $helper {
+ $helper($tl.with(|c| c.replace(true)))
+ }
+ }
+
+ $(#[$a])*
+ pub macro $name($e:expr) {
+ {
+ let _guard = $helper::new();
+ $e
+ }
+ }
+
+ impl Drop for $helper {
+ fn drop(&mut self) {
+ $tl.with(|c| c.set(self.0))
+ }
+ }
+ )+
+ }
+}
+
+define_helper!(
+ /// Avoids running any queries during any prints that occur
+ /// during the closure. This may alter the appearance of some
+ /// types (e.g. forcing verbose printing for opaque types).
+ /// This method is used during some queries (e.g. `explicit_item_bounds`
+ /// for opaque types), to ensure that any debug printing that
+ /// occurs during the query computation does not end up recursively
+ /// calling the same query.
+ fn with_no_queries(NoQueriesGuard, NO_QUERIES);
+ /// Force us to name impls with just the filename/line number. We
+ /// normally try to use types. But at some points, notably while printing
+ /// cycle errors, this can result in extra or suboptimal error output,
+ /// so this variable disables that check.
+ fn with_forced_impl_filename_line(ForcedImplGuard, FORCE_IMPL_FILENAME_LINE);
+ /// Adds the `crate::` prefix to paths where appropriate.
+ fn with_crate_prefix(CratePrefixGuard, SHOULD_PREFIX_WITH_CRATE);
+ /// Prevent path trimming if it is turned on. Path trimming affects `Display` impl
+ /// of various rustc types, for example `std::vec::Vec` would be trimmed to `Vec`,
+ /// if no other `Vec` is found.
+ fn with_no_trimmed_paths(NoTrimmedGuard, NO_TRIMMED_PATH);
+ /// Prevent selection of visible paths. `Display` impl of DefId will prefer
+ /// visible (public) reexports of types as paths.
+ fn with_no_visible_paths(NoVisibleGuard, NO_VISIBLE_PATH);
+);
+
+/// The "region highlights" are used to control region printing during
+/// specific error messages. When a "region highlight" is enabled, it
+/// gives an alternate way to print specific regions. For now, we
+/// always print those regions using a number, so something like "`'0`".
+///
+/// Regions not selected by the region highlight mode are presently
+/// unaffected.
+#[derive(Copy, Clone)]
+pub struct RegionHighlightMode<'tcx> {
+ tcx: TyCtxt<'tcx>,
+
+ /// If enabled, when we see the selected region, use "`'N`"
+ /// instead of the ordinary behavior.
+ highlight_regions: [Option<(ty::Region<'tcx>, usize)>; 3],
+
+ /// If enabled, when printing a "free region" that originated from
+ /// the given `ty::BoundRegionKind`, print it as "`'1`". Free regions that would ordinarily
+ /// have names print as normal.
+ ///
+ /// This is used when you have a signature like `fn foo(x: &u32,
+ /// y: &'a u32)` and we want to give a name to the region of the
+ /// reference `x`.
+ highlight_bound_region: Option<(ty::BoundRegionKind, usize)>,
+}
+
+impl<'tcx> RegionHighlightMode<'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>) -> Self {
+ Self {
+ tcx,
+ highlight_regions: Default::default(),
+ highlight_bound_region: Default::default(),
+ }
+ }
+
+ /// If `region` and `number` are both `Some`, invokes
+ /// `highlighting_region`.
+ pub fn maybe_highlighting_region(
+ &mut self,
+ region: Option<ty::Region<'tcx>>,
+ number: Option<usize>,
+ ) {
+ if let Some(k) = region {
+ if let Some(n) = number {
+ self.highlighting_region(k, n);
+ }
+ }
+ }
+
+ /// Highlights the region inference variable `vid` as `'N`.
+ pub fn highlighting_region(&mut self, region: ty::Region<'tcx>, number: usize) {
+ let num_slots = self.highlight_regions.len();
+ let first_avail_slot =
+ self.highlight_regions.iter_mut().find(|s| s.is_none()).unwrap_or_else(|| {
+ bug!("can only highlight {} placeholders at a time", num_slots,)
+ });
+ *first_avail_slot = Some((region, number));
+ }
+
+ /// Convenience wrapper for `highlighting_region`.
+ pub fn highlighting_region_vid(&mut self, vid: ty::RegionVid, number: usize) {
+ self.highlighting_region(self.tcx.mk_region(ty::ReVar(vid)), number)
+ }
+
+ /// Returns `Some(n)` with the number to use for the given region, if any.
+ fn region_highlighted(&self, region: ty::Region<'tcx>) -> Option<usize> {
+ self.highlight_regions.iter().find_map(|h| match h {
+ Some((r, n)) if *r == region => Some(*n),
+ _ => None,
+ })
+ }
+
+ /// Highlight the given bound region.
+ /// We can only highlight one bound region at a time. See
+ /// the field `highlight_bound_region` for more detailed notes.
+ pub fn highlighting_bound_region(&mut self, br: ty::BoundRegionKind, number: usize) {
+ assert!(self.highlight_bound_region.is_none());
+ self.highlight_bound_region = Some((br, number));
+ }
+}
+
+/// Trait for printers that pretty-print using `fmt::Write` to the printer.
+pub trait PrettyPrinter<'tcx>:
+ Printer<
+ 'tcx,
+ Error = fmt::Error,
+ Path = Self,
+ Region = Self,
+ Type = Self,
+ DynExistential = Self,
+ Const = Self,
+ > + fmt::Write
+{
+ /// Like `print_def_path` but for value paths.
+ fn print_value_path(
+ self,
+ def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ self.print_def_path(def_id, substs)
+ }
+
+ fn in_binder<T>(self, value: &ty::Binder<'tcx, T>) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx>,
+ {
+ value.as_ref().skip_binder().print(self)
+ }
+
+ fn wrap_binder<T, F: FnOnce(&T, Self) -> Result<Self, fmt::Error>>(
+ self,
+ value: &ty::Binder<'tcx, T>,
+ f: F,
+ ) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx>,
+ {
+ f(value.as_ref().skip_binder(), self)
+ }
+
+ /// Prints comma-separated elements.
+ fn comma_sep<T>(mut self, mut elems: impl Iterator<Item = T>) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error>,
+ {
+ if let Some(first) = elems.next() {
+ self = first.print(self)?;
+ for elem in elems {
+ self.write_str(", ")?;
+ self = elem.print(self)?;
+ }
+ }
+ Ok(self)
+ }
+
+ /// Prints `{f: t}` or `{f as t}` depending on the `cast` argument
+ fn typed_value(
+ mut self,
+ f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ t: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ conversion: &str,
+ ) -> Result<Self::Const, Self::Error> {
+ self.write_str("{")?;
+ self = f(self)?;
+ self.write_str(conversion)?;
+ self = t(self)?;
+ self.write_str("}")?;
+ Ok(self)
+ }
+
+ /// Prints `<...>` around what `f` prints.
+ fn generic_delimiters(
+ self,
+ f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ ) -> Result<Self, Self::Error>;
+
+ /// Returns `true` if the region should be printed in
+ /// optional positions, e.g., `&'a T` or `dyn Tr + 'b`.
+ /// This is typically the case for all non-`'_` regions.
+ fn should_print_region(&self, region: ty::Region<'tcx>) -> bool;
+
+ // Defaults (should not be overridden):
+
+ /// If possible, this returns a global path resolving to `def_id` that is visible
+ /// from at least one local module, and returns `true`. If the crate defining `def_id` is
+ /// declared with an `extern crate`, the path is guaranteed to use the `extern crate`.
+ fn try_print_visible_def_path(self, def_id: DefId) -> Result<(Self, bool), Self::Error> {
+ if NO_VISIBLE_PATH.with(|flag| flag.get()) {
+ return Ok((self, false));
+ }
+
+ let mut callers = Vec::new();
+ self.try_print_visible_def_path_recur(def_id, &mut callers)
+ }
+
+ /// Try to see if this path can be trimmed to a unique symbol name.
+ fn try_print_trimmed_def_path(
+ mut self,
+ def_id: DefId,
+ ) -> Result<(Self::Path, bool), Self::Error> {
+ if !self.tcx().sess.opts.unstable_opts.trim_diagnostic_paths
+ || matches!(self.tcx().sess.opts.trimmed_def_paths, TrimmedDefPaths::Never)
+ || NO_TRIMMED_PATH.with(|flag| flag.get())
+ || SHOULD_PREFIX_WITH_CRATE.with(|flag| flag.get())
+ {
+ return Ok((self, false));
+ }
+
+ match self.tcx().trimmed_def_paths(()).get(&def_id) {
+ None => Ok((self, false)),
+ Some(symbol) => {
+ self.write_str(symbol.as_str())?;
+ Ok((self, true))
+ }
+ }
+ }
+
+ /// Does the work of `try_print_visible_def_path`, building the
+ /// full definition path recursively before attempting to
+ /// post-process it into the valid and visible version that
+ /// accounts for re-exports.
+ ///
+ /// This method should only be called by itself or
+ /// `try_print_visible_def_path`.
+ ///
+ /// `callers` is a chain of visible_parent's leading to `def_id`,
+ /// to support cycle detection during recursion.
+ ///
+ /// This method returns false if we can't print the visible path, so
+ /// `print_def_path` can fall back on the item's real definition path.
+ fn try_print_visible_def_path_recur(
+ mut self,
+ def_id: DefId,
+ callers: &mut Vec<DefId>,
+ ) -> Result<(Self, bool), Self::Error> {
+ define_scoped_cx!(self);
+
+ debug!("try_print_visible_def_path: def_id={:?}", def_id);
+
+ // If `def_id` is a direct or injected extern crate, return the
+ // path to the crate followed by the path to the item within the crate.
+ if let Some(cnum) = def_id.as_crate_root() {
+ if cnum == LOCAL_CRATE {
+ return Ok((self.path_crate(cnum)?, true));
+ }
+
+ // In local mode, when we encounter a crate other than
+ // LOCAL_CRATE, execution proceeds in one of two ways:
+ //
+ // 1. For a direct dependency, where user added an
+ // `extern crate` manually, we put the `extern
+ // crate` as the parent. So you wind up with
+ // something relative to the current crate.
+ // 2. For an extern inferred from a path or an indirect crate,
+ // where there is no explicit `extern crate`, we just prepend
+ // the crate name.
+ match self.tcx().extern_crate(def_id) {
+ Some(&ExternCrate { src, dependency_of, span, .. }) => match (src, dependency_of) {
+ (ExternCrateSource::Extern(def_id), LOCAL_CRATE) => {
+ // NOTE(eddyb) the only reason `span` might be dummy,
+ // that we're aware of, is that it's the `std`/`core`
+ // `extern crate` injected by default.
+ // FIXME(eddyb) find something better to key this on,
+ // or avoid ending up with `ExternCrateSource::Extern`,
+ // for the injected `std`/`core`.
+ if span.is_dummy() {
+ return Ok((self.path_crate(cnum)?, true));
+ }
+
+ // Disable `try_print_trimmed_def_path` behavior within
+ // the `print_def_path` call, to avoid infinite recursion
+ // in cases where the `extern crate foo` has non-trivial
+ // parents, e.g. it's nested in `impl foo::Trait for Bar`
+ // (see also issues #55779 and #87932).
+ self = with_no_visible_paths!(self.print_def_path(def_id, &[])?);
+
+ return Ok((self, true));
+ }
+ (ExternCrateSource::Path, LOCAL_CRATE) => {
+ return Ok((self.path_crate(cnum)?, true));
+ }
+ _ => {}
+ },
+ None => {
+ return Ok((self.path_crate(cnum)?, true));
+ }
+ }
+ }
+
+ if def_id.is_local() {
+ return Ok((self, false));
+ }
+
+ let visible_parent_map = self.tcx().visible_parent_map(());
+
+ let mut cur_def_key = self.tcx().def_key(def_id);
+ debug!("try_print_visible_def_path: cur_def_key={:?}", cur_def_key);
+
+ // For a constructor, we want the name of its parent rather than <unnamed>.
+ if let DefPathData::Ctor = cur_def_key.disambiguated_data.data {
+ let parent = DefId {
+ krate: def_id.krate,
+ index: cur_def_key
+ .parent
+ .expect("`DefPathData::Ctor` / `VariantData` missing a parent"),
+ };
+
+ cur_def_key = self.tcx().def_key(parent);
+ }
+
+ let Some(visible_parent) = visible_parent_map.get(&def_id).cloned() else {
+ return Ok((self, false));
+ };
+
+ let actual_parent = self.tcx().opt_parent(def_id);
+ debug!(
+ "try_print_visible_def_path: visible_parent={:?} actual_parent={:?}",
+ visible_parent, actual_parent,
+ );
+
+ let mut data = cur_def_key.disambiguated_data.data;
+ debug!(
+ "try_print_visible_def_path: data={:?} visible_parent={:?} actual_parent={:?}",
+ data, visible_parent, actual_parent,
+ );
+
+ match data {
+ // In order to output a path that could actually be imported (valid and visible),
+ // we need to handle re-exports correctly.
+ //
+ // For example, take `std::os::unix::process::CommandExt`, this trait is actually
+ // defined at `std::sys::unix::ext::process::CommandExt` (at time of writing).
+ //
+ // `std::os::unix` reexports the contents of `std::sys::unix::ext`. `std::sys` is
+ // private so the "true" path to `CommandExt` isn't accessible.
+ //
+ // In this case, the `visible_parent_map` will look something like this:
+ //
+ // (child) -> (parent)
+ // `std::sys::unix::ext::process::CommandExt` -> `std::sys::unix::ext::process`
+ // `std::sys::unix::ext::process` -> `std::sys::unix::ext`
+ // `std::sys::unix::ext` -> `std::os`
+ //
+ // This is correct, as the visible parent of `std::sys::unix::ext` is in fact
+ // `std::os`.
+ //
+ // When printing the path to `CommandExt` and looking at the `cur_def_key` that
+ // corresponds to `std::sys::unix::ext`, we would normally print `ext` and then go
+ // to the parent - resulting in a mangled path like
+ // `std::os::ext::process::CommandExt`.
+ //
+ // Instead, we must detect that there was a re-export and instead print `unix`
+ // (which is the name `std::sys::unix::ext` was re-exported as in `std::os`). To
+ // do this, we compare the parent of `std::sys::unix::ext` (`std::sys::unix`) with
+ // the visible parent (`std::os`). If these do not match, then we iterate over
+ // the children of the visible parent (as was done when computing
+ // `visible_parent_map`), looking for the specific child we currently have and then
+ // have access to the re-exported name.
+ DefPathData::TypeNs(ref mut name) if Some(visible_parent) != actual_parent => {
+ // Item might be re-exported several times, but filter for the one
+ // that's public and whose identifier isn't `_`.
+ let reexport = self
+ .tcx()
+ .module_children(visible_parent)
+ .iter()
+ .filter(|child| child.res.opt_def_id() == Some(def_id))
+ .find(|child| child.vis.is_public() && child.ident.name != kw::Underscore)
+ .map(|child| child.ident.name);
+
+ if let Some(new_name) = reexport {
+ *name = new_name;
+ } else {
+ // There is no name that is public and isn't `_`, so bail.
+ return Ok((self, false));
+ }
+ }
+ // Re-exported `extern crate` (#43189).
+ DefPathData::CrateRoot => {
+ data = DefPathData::TypeNs(self.tcx().crate_name(def_id.krate));
+ }
+ _ => {}
+ }
+ debug!("try_print_visible_def_path: data={:?}", data);
+
+ if callers.contains(&visible_parent) {
+ return Ok((self, false));
+ }
+ callers.push(visible_parent);
+ // HACK(eddyb) this bypasses `path_append`'s prefix printing to avoid
+ // knowing ahead of time whether the entire path will succeed or not.
+ // To support printers that do not implement `PrettyPrinter`, a `Vec` or
+ // linked list on the stack would need to be built, before any printing.
+ match self.try_print_visible_def_path_recur(visible_parent, callers)? {
+ (cx, false) => return Ok((cx, false)),
+ (cx, true) => self = cx,
+ }
+ callers.pop();
+
+ Ok((self.path_append(Ok, &DisambiguatedDefPathData { data, disambiguator: 0 })?, true))
+ }
+
+ fn pretty_path_qualified(
+ self,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ if trait_ref.is_none() {
+ // Inherent impls. Try to print `Foo::bar` for an inherent
+ // impl on `Foo`, but fallback to `<Foo>::bar` if self-type is
+ // anything other than a simple path.
+ match self_ty.kind() {
+ ty::Adt(..)
+ | ty::Foreign(_)
+ | ty::Bool
+ | ty::Char
+ | ty::Str
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_) => {
+ return self_ty.print(self);
+ }
+
+ _ => {}
+ }
+ }
+
+ self.generic_delimiters(|mut cx| {
+ define_scoped_cx!(cx);
+
+ p!(print(self_ty));
+ if let Some(trait_ref) = trait_ref {
+ p!(" as ", print(trait_ref.print_only_trait_path()));
+ }
+ Ok(cx)
+ })
+ }
+
+ fn pretty_path_append_impl(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self = print_prefix(self)?;
+
+ self.generic_delimiters(|mut cx| {
+ define_scoped_cx!(cx);
+
+ p!("impl ");
+ if let Some(trait_ref) = trait_ref {
+ p!(print(trait_ref.print_only_trait_path()), " for ");
+ }
+ p!(print(self_ty));
+
+ Ok(cx)
+ })
+ }
+
+ fn pretty_print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+ define_scoped_cx!(self);
+
+ match *ty.kind() {
+ ty::Bool => p!("bool"),
+ ty::Char => p!("char"),
+ ty::Int(t) => p!(write("{}", t.name_str())),
+ ty::Uint(t) => p!(write("{}", t.name_str())),
+ ty::Float(t) => p!(write("{}", t.name_str())),
+ ty::RawPtr(ref tm) => {
+ p!(write(
+ "*{} ",
+ match tm.mutbl {
+ hir::Mutability::Mut => "mut",
+ hir::Mutability::Not => "const",
+ }
+ ));
+ p!(print(tm.ty))
+ }
+ ty::Ref(r, ty, mutbl) => {
+ p!("&");
+ if self.should_print_region(r) {
+ p!(print(r), " ");
+ }
+ p!(print(ty::TypeAndMut { ty, mutbl }))
+ }
+ ty::Never => p!("!"),
+ ty::Tuple(ref tys) => {
+ p!("(", comma_sep(tys.iter()));
+ if tys.len() == 1 {
+ p!(",");
+ }
+ p!(")")
+ }
+ ty::FnDef(def_id, substs) => {
+ let sig = self.tcx().bound_fn_sig(def_id).subst(self.tcx(), substs);
+ p!(print(sig), " {{", print_value_path(def_id, substs), "}}");
+ }
+ ty::FnPtr(ref bare_fn) => p!(print(bare_fn)),
+ ty::Infer(infer_ty) => {
+ let verbose = self.tcx().sess.verbose();
+ if let ty::TyVar(ty_vid) = infer_ty {
+ if let Some(name) = self.ty_infer_name(ty_vid) {
+ p!(write("{}", name))
+ } else {
+ if verbose {
+ p!(write("{:?}", infer_ty))
+ } else {
+ p!(write("{}", infer_ty))
+ }
+ }
+ } else {
+ if verbose { p!(write("{:?}", infer_ty)) } else { p!(write("{}", infer_ty)) }
+ }
+ }
+ ty::Error(_) => p!("[type error]"),
+ ty::Param(ref param_ty) => p!(print(param_ty)),
+ ty::Bound(debruijn, bound_ty) => match bound_ty.kind {
+ ty::BoundTyKind::Anon => self.pretty_print_bound_var(debruijn, bound_ty.var)?,
+ ty::BoundTyKind::Param(p) => p!(write("{}", p)),
+ },
+ ty::Adt(def, substs) => {
+ p!(print_def_path(def.did(), substs));
+ }
+ ty::Dynamic(data, r) => {
+ let print_r = self.should_print_region(r);
+ if print_r {
+ p!("(");
+ }
+ p!("dyn ", print(data));
+ if print_r {
+ p!(" + ", print(r), ")");
+ }
+ }
+ ty::Foreign(def_id) => {
+ p!(print_def_path(def_id, &[]));
+ }
+ ty::Projection(ref data) => p!(print(data)),
+ ty::Placeholder(placeholder) => p!(write("Placeholder({:?})", placeholder)),
+ ty::Opaque(def_id, substs) => {
+ // FIXME(eddyb) print this with `print_def_path`.
+ // We use verbose printing in 'NO_QUERIES' mode, to
+ // avoid needing to call `predicates_of`. This should
+ // only affect certain debug messages (e.g. messages printed
+ // from `rustc_middle::ty` during the computation of `tcx.predicates_of`),
+ // and should have no effect on any compiler output.
+ if self.tcx().sess.verbose() || NO_QUERIES.with(|q| q.get()) {
+ p!(write("Opaque({:?}, {:?})", def_id, substs));
+ return Ok(self);
+ }
+
+ let parent = self.tcx().parent(def_id);
+ match self.tcx().def_kind(parent) {
+ DefKind::TyAlias | DefKind::AssocTy => {
+ if let ty::Opaque(d, _) = *self.tcx().type_of(parent).kind() {
+ if d == def_id {
+ // If the type alias directly starts with the `impl` of the
+ // opaque type we're printing, then skip the `::{opaque#1}`.
+ p!(print_def_path(parent, substs));
+ return Ok(self);
+ }
+ }
+ // Complex opaque type, e.g. `type Foo = (i32, impl Debug);`
+ p!(print_def_path(def_id, substs));
+ return Ok(self);
+ }
+ _ => return self.pretty_print_opaque_impl_type(def_id, substs),
+ }
+ }
+ ty::Str => p!("str"),
+ ty::Generator(did, substs, movability) => {
+ p!(write("["));
+ match movability {
+ hir::Movability::Movable => {}
+ hir::Movability::Static => p!("static "),
+ }
+
+ if !self.tcx().sess.verbose() {
+ p!("generator");
+ // FIXME(eddyb) should use `def_span`.
+ if let Some(did) = did.as_local() {
+ let span = self.tcx().def_span(did);
+ p!(write(
+ "@{}",
+ // This may end up in stderr diagnostics but it may also be emitted
+ // into MIR. Hence we use the remapped path if available
+ self.tcx().sess.source_map().span_to_embeddable_string(span)
+ ));
+ } else {
+ p!(write("@"), print_def_path(did, substs));
+ }
+ } else {
+ p!(print_def_path(did, substs));
+ p!(" upvar_tys=(");
+ if !substs.as_generator().is_valid() {
+ p!("unavailable");
+ } else {
+ self = self.comma_sep(substs.as_generator().upvar_tys())?;
+ }
+ p!(")");
+
+ if substs.as_generator().is_valid() {
+ p!(" ", print(substs.as_generator().witness()));
+ }
+ }
+
+ p!("]")
+ }
+ ty::GeneratorWitness(types) => {
+ p!(in_binder(&types));
+ }
+ ty::Closure(did, substs) => {
+ p!(write("["));
+ if !self.tcx().sess.verbose() {
+ p!(write("closure"));
+ // FIXME(eddyb) should use `def_span`.
+ if let Some(did) = did.as_local() {
+ if self.tcx().sess.opts.unstable_opts.span_free_formats {
+ p!("@", print_def_path(did.to_def_id(), substs));
+ } else {
+ let span = self.tcx().def_span(did);
+ p!(write(
+ "@{}",
+ // This may end up in stderr diagnostics but it may also be emitted
+ // into MIR. Hence we use the remapped path if available
+ self.tcx().sess.source_map().span_to_embeddable_string(span)
+ ));
+ }
+ } else {
+ p!(write("@"), print_def_path(did, substs));
+ }
+ } else {
+ p!(print_def_path(did, substs));
+ if !substs.as_closure().is_valid() {
+ p!(" closure_substs=(unavailable)");
+ p!(write(" substs={:?}", substs));
+ } else {
+ p!(" closure_kind_ty=", print(substs.as_closure().kind_ty()));
+ p!(
+ " closure_sig_as_fn_ptr_ty=",
+ print(substs.as_closure().sig_as_fn_ptr_ty())
+ );
+ p!(" upvar_tys=(");
+ self = self.comma_sep(substs.as_closure().upvar_tys())?;
+ p!(")");
+ }
+ }
+ p!("]");
+ }
+ ty::Array(ty, sz) => {
+ p!("[", print(ty), "; ");
+ if self.tcx().sess.verbose() {
+ p!(write("{:?}", sz));
+ } else if let ty::ConstKind::Unevaluated(..) = sz.kind() {
+ // Do not try to evaluate unevaluated constants. If we are const evaluating an
+ // array length anon const, rustc will (with debug assertions) print the
+ // constant's path. Which will end up here again.
+ p!("_");
+ } else if let Some(n) = sz.kind().try_to_bits(self.tcx().data_layout.pointer_size) {
+ p!(write("{}", n));
+ } else if let ty::ConstKind::Param(param) = sz.kind() {
+ p!(print(param));
+ } else {
+ p!("_");
+ }
+ p!("]")
+ }
+ ty::Slice(ty) => p!("[", print(ty), "]"),
+ }
+
+ Ok(self)
+ }
+
+ fn pretty_print_opaque_impl_type(
+ mut self,
+ def_id: DefId,
+ substs: &'tcx ty::List<ty::GenericArg<'tcx>>,
+ ) -> Result<Self::Type, Self::Error> {
+ let tcx = self.tcx();
+
+ // Grab the "TraitA + TraitB" from `impl TraitA + TraitB`,
+ // by looking up the projections associated with the def_id.
+ let bounds = tcx.bound_explicit_item_bounds(def_id);
+
+ let mut traits = FxIndexMap::default();
+ let mut fn_traits = FxIndexMap::default();
+ let mut is_sized = false;
+
+ for predicate in bounds.transpose_iter().map(|e| e.map_bound(|(p, _)| *p)) {
+ let predicate = predicate.subst(tcx, substs);
+ let bound_predicate = predicate.kind();
+
+ match bound_predicate.skip_binder() {
+ ty::PredicateKind::Trait(pred) => {
+ let trait_ref = bound_predicate.rebind(pred.trait_ref);
+
+ // Don't print + Sized, but rather + ?Sized if absent.
+ if Some(trait_ref.def_id()) == tcx.lang_items().sized_trait() {
+ is_sized = true;
+ continue;
+ }
+
+ self.insert_trait_and_projection(trait_ref, None, &mut traits, &mut fn_traits);
+ }
+ ty::PredicateKind::Projection(pred) => {
+ let proj_ref = bound_predicate.rebind(pred);
+ let trait_ref = proj_ref.required_poly_trait_ref(tcx);
+
+ // Projection type entry -- the def-id for naming, and the ty.
+ let proj_ty = (proj_ref.projection_def_id(), proj_ref.term());
+
+ self.insert_trait_and_projection(
+ trait_ref,
+ Some(proj_ty),
+ &mut traits,
+ &mut fn_traits,
+ );
+ }
+ _ => {}
+ }
+ }
+
+ write!(self, "impl ")?;
+
+ let mut first = true;
+ // Insert parenthesis around (Fn(A, B) -> C) if the opaque ty has more than one other trait
+ let paren_needed = fn_traits.len() > 1 || traits.len() > 0 || !is_sized;
+
+ for (fn_once_trait_ref, entry) in fn_traits {
+ write!(self, "{}", if first { "" } else { " + " })?;
+ write!(self, "{}", if paren_needed { "(" } else { "" })?;
+
+ self = self.wrap_binder(&fn_once_trait_ref, |trait_ref, mut cx| {
+ define_scoped_cx!(cx);
+ // Get the (single) generic ty (the args) of this FnOnce trait ref.
+ let generics = tcx.generics_of(trait_ref.def_id);
+ let args = generics.own_substs_no_defaults(tcx, trait_ref.substs);
+
+ match (entry.return_ty, args[0].expect_ty()) {
+ // We can only print `impl Fn() -> ()` if we have a tuple of args and we recorded
+ // a return type.
+ (Some(return_ty), arg_tys) if matches!(arg_tys.kind(), ty::Tuple(_)) => {
+ let name = if entry.fn_trait_ref.is_some() {
+ "Fn"
+ } else if entry.fn_mut_trait_ref.is_some() {
+ "FnMut"
+ } else {
+ "FnOnce"
+ };
+
+ p!(write("{}(", name));
+
+ for (idx, ty) in arg_tys.tuple_fields().iter().enumerate() {
+ if idx > 0 {
+ p!(", ");
+ }
+ p!(print(ty));
+ }
+
+ p!(")");
+ if let Term::Ty(ty) = return_ty.skip_binder() {
+ if !ty.is_unit() {
+ p!(" -> ", print(return_ty));
+ }
+ }
+ p!(write("{}", if paren_needed { ")" } else { "" }));
+
+ first = false;
+ }
+ // If we got here, we can't print as a `impl Fn(A, B) -> C`. Just record the
+ // trait_refs we collected in the OpaqueFnEntry as normal trait refs.
+ _ => {
+ if entry.has_fn_once {
+ traits.entry(fn_once_trait_ref).or_default().extend(
+ // Group the return ty with its def id, if we had one.
+ entry
+ .return_ty
+ .map(|ty| (tcx.lang_items().fn_once_output().unwrap(), ty)),
+ );
+ }
+ if let Some(trait_ref) = entry.fn_mut_trait_ref {
+ traits.entry(trait_ref).or_default();
+ }
+ if let Some(trait_ref) = entry.fn_trait_ref {
+ traits.entry(trait_ref).or_default();
+ }
+ }
+ }
+
+ Ok(cx)
+ })?;
+ }
+
+ // Print the rest of the trait types (that aren't Fn* family of traits)
+ for (trait_ref, assoc_items) in traits {
+ write!(self, "{}", if first { "" } else { " + " })?;
+
+ self = self.wrap_binder(&trait_ref, |trait_ref, mut cx| {
+ define_scoped_cx!(cx);
+ p!(print(trait_ref.print_only_trait_name()));
+
+ let generics = tcx.generics_of(trait_ref.def_id);
+ let args = generics.own_substs_no_defaults(tcx, trait_ref.substs);
+
+ if !args.is_empty() || !assoc_items.is_empty() {
+ let mut first = true;
+
+ for ty in args {
+ if first {
+ p!("<");
+ first = false;
+ } else {
+ p!(", ");
+ }
+ p!(print(ty));
+ }
+
+ for (assoc_item_def_id, term) in assoc_items {
+ // Skip printing `<[generator@] as Generator<_>>::Return` from async blocks,
+ // unless we can find out what generator return type it comes from.
+ let term = if let Some(ty) = term.skip_binder().ty()
+ && let ty::Projection(ty::ProjectionTy { item_def_id, substs }) = ty.kind()
+ && Some(*item_def_id) == tcx.lang_items().generator_return()
+ {
+ if let ty::Generator(_, substs, _) = substs.type_at(0).kind() {
+ let return_ty = substs.as_generator().return_ty();
+ if !return_ty.is_ty_infer() {
+ return_ty.into()
+ } else {
+ continue;
+ }
+ } else {
+ continue;
+ }
+ } else {
+ term.skip_binder()
+ };
+
+ if first {
+ p!("<");
+ first = false;
+ } else {
+ p!(", ");
+ }
+
+ p!(write("{} = ", tcx.associated_item(assoc_item_def_id).name));
+
+ match term {
+ Term::Ty(ty) => {
+ p!(print(ty))
+ }
+ Term::Const(c) => {
+ p!(print(c));
+ }
+ };
+ }
+
+ if !first {
+ p!(">");
+ }
+ }
+
+ first = false;
+ Ok(cx)
+ })?;
+ }
+
+ if !is_sized {
+ write!(self, "{}?Sized", if first { "" } else { " + " })?;
+ } else if first {
+ write!(self, "Sized")?;
+ }
+
+ Ok(self)
+ }
+
+ /// Insert the trait ref and optionally a projection type associated with it into either the
+ /// traits map or fn_traits map, depending on if the trait is in the Fn* family of traits.
+ fn insert_trait_and_projection(
+ &mut self,
+ trait_ref: ty::PolyTraitRef<'tcx>,
+ proj_ty: Option<(DefId, ty::Binder<'tcx, Term<'tcx>>)>,
+ traits: &mut FxIndexMap<
+ ty::PolyTraitRef<'tcx>,
+ FxIndexMap<DefId, ty::Binder<'tcx, Term<'tcx>>>,
+ >,
+ fn_traits: &mut FxIndexMap<ty::PolyTraitRef<'tcx>, OpaqueFnEntry<'tcx>>,
+ ) {
+ let trait_def_id = trait_ref.def_id();
+
+ // If our trait_ref is FnOnce or any of its children, project it onto the parent FnOnce
+ // super-trait ref and record it there.
+ if let Some(fn_once_trait) = self.tcx().lang_items().fn_once_trait() {
+ // If we have a FnOnce, then insert it into
+ if trait_def_id == fn_once_trait {
+ let entry = fn_traits.entry(trait_ref).or_default();
+ // Optionally insert the return_ty as well.
+ if let Some((_, ty)) = proj_ty {
+ entry.return_ty = Some(ty);
+ }
+ entry.has_fn_once = true;
+ return;
+ } else if Some(trait_def_id) == self.tcx().lang_items().fn_mut_trait() {
+ let super_trait_ref = crate::traits::util::supertraits(self.tcx(), trait_ref)
+ .find(|super_trait_ref| super_trait_ref.def_id() == fn_once_trait)
+ .unwrap();
+
+ fn_traits.entry(super_trait_ref).or_default().fn_mut_trait_ref = Some(trait_ref);
+ return;
+ } else if Some(trait_def_id) == self.tcx().lang_items().fn_trait() {
+ let super_trait_ref = crate::traits::util::supertraits(self.tcx(), trait_ref)
+ .find(|super_trait_ref| super_trait_ref.def_id() == fn_once_trait)
+ .unwrap();
+
+ fn_traits.entry(super_trait_ref).or_default().fn_trait_ref = Some(trait_ref);
+ return;
+ }
+ }
+
+ // Otherwise, just group our traits and projection types.
+ traits.entry(trait_ref).or_default().extend(proj_ty);
+ }
+
+ fn pretty_print_bound_var(
+ &mut self,
+ debruijn: ty::DebruijnIndex,
+ var: ty::BoundVar,
+ ) -> Result<(), Self::Error> {
+ if debruijn == ty::INNERMOST {
+ write!(self, "^{}", var.index())
+ } else {
+ write!(self, "^{}_{}", debruijn.index(), var.index())
+ }
+ }
+
+ fn ty_infer_name(&self, _: ty::TyVid) -> Option<Symbol> {
+ None
+ }
+
+ fn const_infer_name(&self, _: ty::ConstVid<'tcx>) -> Option<Symbol> {
+ None
+ }
+
+ fn pretty_print_dyn_existential(
+ mut self,
+ predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Result<Self::DynExistential, Self::Error> {
+ // Generate the main trait ref, including associated types.
+ let mut first = true;
+
+ if let Some(principal) = predicates.principal() {
+ self = self.wrap_binder(&principal, |principal, mut cx| {
+ define_scoped_cx!(cx);
+ p!(print_def_path(principal.def_id, &[]));
+
+ let mut resugared = false;
+
+ // Special-case `Fn(...) -> ...` and re-sugar it.
+ let fn_trait_kind = cx.tcx().fn_trait_kind_from_lang_item(principal.def_id);
+ if !cx.tcx().sess.verbose() && fn_trait_kind.is_some() {
+ if let ty::Tuple(tys) = principal.substs.type_at(0).kind() {
+ let mut projections = predicates.projection_bounds();
+ if let (Some(proj), None) = (projections.next(), projections.next()) {
+ p!(pretty_fn_sig(
+ tys,
+ false,
+ proj.skip_binder().term.ty().expect("Return type was a const")
+ ));
+ resugared = true;
+ }
+ }
+ }
+
+ // HACK(eddyb) this duplicates `FmtPrinter`'s `path_generic_args`,
+ // in order to place the projections inside the `<...>`.
+ if !resugared {
+ // Use a type that can't appear in defaults of type parameters.
+ let dummy_cx = cx.tcx().mk_ty_infer(ty::FreshTy(0));
+ let principal = principal.with_self_ty(cx.tcx(), dummy_cx);
+
+ let args = cx
+ .tcx()
+ .generics_of(principal.def_id)
+ .own_substs_no_defaults(cx.tcx(), principal.substs);
+
+ // Don't print `'_` if there's no unerased regions.
+ let print_regions = args.iter().any(|arg| match arg.unpack() {
+ GenericArgKind::Lifetime(r) => !r.is_erased(),
+ _ => false,
+ });
+ let mut args = args.iter().cloned().filter(|arg| match arg.unpack() {
+ GenericArgKind::Lifetime(_) => print_regions,
+ _ => true,
+ });
+ let mut projections = predicates.projection_bounds();
+
+ let arg0 = args.next();
+ let projection0 = projections.next();
+ if arg0.is_some() || projection0.is_some() {
+ let args = arg0.into_iter().chain(args);
+ let projections = projection0.into_iter().chain(projections);
+
+ p!(generic_delimiters(|mut cx| {
+ cx = cx.comma_sep(args)?;
+ if arg0.is_some() && projection0.is_some() {
+ write!(cx, ", ")?;
+ }
+ cx.comma_sep(projections)
+ }));
+ }
+ }
+ Ok(cx)
+ })?;
+
+ first = false;
+ }
+
+ define_scoped_cx!(self);
+
+ // Builtin bounds.
+ // FIXME(eddyb) avoid printing twice (needed to ensure
+ // that the auto traits are sorted *and* printed via cx).
+ let mut auto_traits: Vec<_> = predicates.auto_traits().collect();
+
+ // The auto traits come ordered by `DefPathHash`. While
+ // `DefPathHash` is *stable* in the sense that it depends on
+ // neither the host nor the phase of the moon, it depends
+ // "pseudorandomly" on the compiler version and the target.
+ //
+ // To avoid causing instabilities in compiletest
+ // output, sort the auto-traits alphabetically.
+ auto_traits.sort_by_cached_key(|did| self.tcx().def_path_str(*did));
+
+ for def_id in auto_traits {
+ if !first {
+ p!(" + ");
+ }
+ first = false;
+
+ p!(print_def_path(def_id, &[]));
+ }
+
+ Ok(self)
+ }
+
+ fn pretty_fn_sig(
+ mut self,
+ inputs: &[Ty<'tcx>],
+ c_variadic: bool,
+ output: Ty<'tcx>,
+ ) -> Result<Self, Self::Error> {
+ define_scoped_cx!(self);
+
+ p!("(", comma_sep(inputs.iter().copied()));
+ if c_variadic {
+ if !inputs.is_empty() {
+ p!(", ");
+ }
+ p!("...");
+ }
+ p!(")");
+ if !output.is_unit() {
+ p!(" -> ", print(output));
+ }
+
+ Ok(self)
+ }
+
+ fn pretty_print_const(
+ mut self,
+ ct: ty::Const<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ define_scoped_cx!(self);
+
+ if self.tcx().sess.verbose() {
+ p!(write("Const({:?}: {:?})", ct.kind(), ct.ty()));
+ return Ok(self);
+ }
+
+ macro_rules! print_underscore {
+ () => {{
+ if print_ty {
+ self = self.typed_value(
+ |mut this| {
+ write!(this, "_")?;
+ Ok(this)
+ },
+ |this| this.print_type(ct.ty()),
+ ": ",
+ )?;
+ } else {
+ write!(self, "_")?;
+ }
+ }};
+ }
+
+ match ct.kind() {
+ ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def,
+ substs,
+ promoted: Some(promoted),
+ }) => {
+ p!(print_value_path(def.did, substs));
+ p!(write("::{:?}", promoted));
+ }
+ ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted: None }) => {
+ match self.tcx().def_kind(def.did) {
+ DefKind::Static(..) | DefKind::Const | DefKind::AssocConst => {
+ p!(print_value_path(def.did, substs))
+ }
+ _ => {
+ if def.is_local() {
+ let span = self.tcx().def_span(def.did);
+ if let Ok(snip) = self.tcx().sess.source_map().span_to_snippet(span) {
+ p!(write("{}", snip))
+ } else {
+ print_underscore!()
+ }
+ } else {
+ print_underscore!()
+ }
+ }
+ }
+ }
+ ty::ConstKind::Infer(infer_ct) => {
+ match infer_ct {
+ ty::InferConst::Var(ct_vid)
+ if let Some(name) = self.const_infer_name(ct_vid) =>
+ p!(write("{}", name)),
+ _ => print_underscore!(),
+ }
+ }
+ ty::ConstKind::Param(ParamConst { name, .. }) => p!(write("{}", name)),
+ ty::ConstKind::Value(value) => {
+ return self.pretty_print_const_valtree(value, ct.ty(), print_ty);
+ }
+
+ ty::ConstKind::Bound(debruijn, bound_var) => {
+ self.pretty_print_bound_var(debruijn, bound_var)?
+ }
+ ty::ConstKind::Placeholder(placeholder) => p!(write("Placeholder({:?})", placeholder)),
+ ty::ConstKind::Error(_) => p!("[const error]"),
+ };
+ Ok(self)
+ }
+
+ fn pretty_print_const_scalar(
+ self,
+ scalar: Scalar,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ match scalar {
+ Scalar::Ptr(ptr, _size) => self.pretty_print_const_scalar_ptr(ptr, ty, print_ty),
+ Scalar::Int(int) => self.pretty_print_const_scalar_int(int, ty, print_ty),
+ }
+ }
+
+ fn pretty_print_const_scalar_ptr(
+ mut self,
+ ptr: Pointer,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ define_scoped_cx!(self);
+
+ let (alloc_id, offset) = ptr.into_parts();
+ match ty.kind() {
+ // Byte strings (&[u8; N])
+ ty::Ref(_, inner, _) => {
+ if let ty::Array(elem, len) = inner.kind() {
+ if let ty::Uint(ty::UintTy::U8) = elem.kind() {
+ if let ty::ConstKind::Value(ty::ValTree::Leaf(int)) = len.kind() {
+ match self.tcx().try_get_global_alloc(alloc_id) {
+ Some(GlobalAlloc::Memory(alloc)) => {
+ let len = int.assert_bits(self.tcx().data_layout.pointer_size);
+ let range =
+ AllocRange { start: offset, size: Size::from_bytes(len) };
+ if let Ok(byte_str) =
+ alloc.inner().get_bytes(&self.tcx(), range)
+ {
+ p!(pretty_print_byte_str(byte_str))
+ } else {
+ p!("<too short allocation>")
+ }
+ }
+ // FIXME: for statics, vtables, and functions, we could in principle print more detail.
+ Some(GlobalAlloc::Static(def_id)) => {
+ p!(write("<static({:?})>", def_id))
+ }
+ Some(GlobalAlloc::Function(_)) => p!("<function>"),
+ Some(GlobalAlloc::VTable(..)) => p!("<vtable>"),
+ None => p!("<dangling pointer>"),
+ }
+ return Ok(self);
+ }
+ }
+ }
+ }
+ ty::FnPtr(_) => {
+ // FIXME: We should probably have a helper method to share code with the "Byte strings"
+ // printing above (which also has to handle pointers to all sorts of things).
+ if let Some(GlobalAlloc::Function(instance)) =
+ self.tcx().try_get_global_alloc(alloc_id)
+ {
+ self = self.typed_value(
+ |this| this.print_value_path(instance.def_id(), instance.substs),
+ |this| this.print_type(ty),
+ " as ",
+ )?;
+ return Ok(self);
+ }
+ }
+ _ => {}
+ }
+ // Any pointer values not covered by a branch above
+ self = self.pretty_print_const_pointer(ptr, ty, print_ty)?;
+ Ok(self)
+ }
+
+ fn pretty_print_const_scalar_int(
+ mut self,
+ int: ScalarInt,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ define_scoped_cx!(self);
+
+ match ty.kind() {
+ // Bool
+ ty::Bool if int == ScalarInt::FALSE => p!("false"),
+ ty::Bool if int == ScalarInt::TRUE => p!("true"),
+ // Float
+ ty::Float(ty::FloatTy::F32) => {
+ p!(write("{}f32", Single::try_from(int).unwrap()))
+ }
+ ty::Float(ty::FloatTy::F64) => {
+ p!(write("{}f64", Double::try_from(int).unwrap()))
+ }
+ // Int
+ ty::Uint(_) | ty::Int(_) => {
+ let int =
+ ConstInt::new(int, matches!(ty.kind(), ty::Int(_)), ty.is_ptr_sized_integral());
+ if print_ty { p!(write("{:#?}", int)) } else { p!(write("{:?}", int)) }
+ }
+ // Char
+ ty::Char if char::try_from(int).is_ok() => {
+ p!(write("{:?}", char::try_from(int).unwrap()))
+ }
+ // Pointer types
+ ty::Ref(..) | ty::RawPtr(_) | ty::FnPtr(_) => {
+ let data = int.assert_bits(self.tcx().data_layout.pointer_size);
+ self = self.typed_value(
+ |mut this| {
+ write!(this, "0x{:x}", data)?;
+ Ok(this)
+ },
+ |this| this.print_type(ty),
+ " as ",
+ )?;
+ }
+ // Nontrivial types with scalar bit representation
+ _ => {
+ let print = |mut this: Self| {
+ if int.size() == Size::ZERO {
+ write!(this, "transmute(())")?;
+ } else {
+ write!(this, "transmute(0x{:x})", int)?;
+ }
+ Ok(this)
+ };
+ self = if print_ty {
+ self.typed_value(print, |this| this.print_type(ty), ": ")?
+ } else {
+ print(self)?
+ };
+ }
+ }
+ Ok(self)
+ }
+
+ /// This is overridden for MIR printing because we only want to hide alloc ids from users, not
+ /// from MIR where it is actually useful.
+ fn pretty_print_const_pointer<Prov: Provenance>(
+ mut self,
+ _: Pointer<Prov>,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ if print_ty {
+ self.typed_value(
+ |mut this| {
+ this.write_str("&_")?;
+ Ok(this)
+ },
+ |this| this.print_type(ty),
+ ": ",
+ )
+ } else {
+ self.write_str("&_")?;
+ Ok(self)
+ }
+ }
+
+ fn pretty_print_byte_str(mut self, byte_str: &'tcx [u8]) -> Result<Self::Const, Self::Error> {
+ define_scoped_cx!(self);
+ p!("b\"");
+ for &c in byte_str {
+ for e in std::ascii::escape_default(c) {
+ self.write_char(e as char)?;
+ }
+ }
+ p!("\"");
+ Ok(self)
+ }
+
+ fn pretty_print_const_valtree(
+ mut self,
+ valtree: ty::ValTree<'tcx>,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ define_scoped_cx!(self);
+
+ if self.tcx().sess.verbose() {
+ p!(write("ValTree({:?}: ", valtree), print(ty), ")");
+ return Ok(self);
+ }
+
+ let u8_type = self.tcx().types.u8;
+ match (valtree, ty.kind()) {
+ (ty::ValTree::Branch(_), ty::Ref(_, inner_ty, _)) => match inner_ty.kind() {
+ ty::Slice(t) if *t == u8_type => {
+ let bytes = valtree.try_to_raw_bytes(self.tcx(), ty).unwrap_or_else(|| {
+ bug!(
+ "expected to convert valtree {:?} to raw bytes for type {:?}",
+ valtree,
+ t
+ )
+ });
+ return self.pretty_print_byte_str(bytes);
+ }
+ ty::Str => {
+ let bytes = valtree.try_to_raw_bytes(self.tcx(), ty).unwrap_or_else(|| {
+ bug!("expected to convert valtree to raw bytes for type {:?}", ty)
+ });
+ p!(write("{:?}", String::from_utf8_lossy(bytes)));
+ return Ok(self);
+ }
+ _ => {
+ p!("&");
+ p!(pretty_print_const_valtree(valtree, *inner_ty, print_ty));
+ return Ok(self);
+ }
+ },
+ (ty::ValTree::Branch(_), ty::Array(t, _)) if *t == u8_type => {
+ let bytes = valtree.try_to_raw_bytes(self.tcx(), ty).unwrap_or_else(|| {
+ bug!("expected to convert valtree to raw bytes for type {:?}", t)
+ });
+ p!("*");
+ p!(pretty_print_byte_str(bytes));
+ return Ok(self);
+ }
+ // Aggregates, printed as array/tuple/struct/variant construction syntax.
+ (ty::ValTree::Branch(_), ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) => {
+ let contents =
+ self.tcx().destructure_const(ty::Const::from_value(self.tcx(), valtree, ty));
+ let fields = contents.fields.iter().copied();
+ match *ty.kind() {
+ ty::Array(..) => {
+ p!("[", comma_sep(fields), "]");
+ }
+ ty::Tuple(..) => {
+ p!("(", comma_sep(fields));
+ if contents.fields.len() == 1 {
+ p!(",");
+ }
+ p!(")");
+ }
+ ty::Adt(def, _) if def.variants().is_empty() => {
+ self = self.typed_value(
+ |mut this| {
+ write!(this, "unreachable()")?;
+ Ok(this)
+ },
+ |this| this.print_type(ty),
+ ": ",
+ )?;
+ }
+ ty::Adt(def, substs) => {
+ let variant_idx =
+ contents.variant.expect("destructed const of adt without variant idx");
+ let variant_def = &def.variant(variant_idx);
+ p!(print_value_path(variant_def.def_id, substs));
+ match variant_def.ctor_kind {
+ CtorKind::Const => {}
+ CtorKind::Fn => {
+ p!("(", comma_sep(fields), ")");
+ }
+ CtorKind::Fictive => {
+ p!(" {{ ");
+ let mut first = true;
+ for (field_def, field) in iter::zip(&variant_def.fields, fields) {
+ if !first {
+ p!(", ");
+ }
+ p!(write("{}: ", field_def.name), print(field));
+ first = false;
+ }
+ p!(" }}");
+ }
+ }
+ }
+ _ => unreachable!(),
+ }
+ return Ok(self);
+ }
+ (ty::ValTree::Leaf(leaf), _) => {
+ return self.pretty_print_const_scalar_int(leaf, ty, print_ty);
+ }
+ // FIXME(oli-obk): also pretty print arrays and other aggregate constants by reading
+ // their fields instead of just dumping the memory.
+ _ => {}
+ }
+
+ // fallback
+ if valtree == ty::ValTree::zst() {
+ p!(write("<ZST>"));
+ } else {
+ p!(write("{:?}", valtree));
+ }
+ if print_ty {
+ p!(": ", print(ty));
+ }
+ Ok(self)
+ }
+}
+
+// HACK(eddyb) boxed to avoid moving around a large struct by-value.
+pub struct FmtPrinter<'a, 'tcx>(Box<FmtPrinterData<'a, 'tcx>>);
+
+pub struct FmtPrinterData<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ fmt: String,
+
+ empty_path: bool,
+ in_value: bool,
+ pub print_alloc_ids: bool,
+
+ used_region_names: FxHashSet<Symbol>,
+ region_index: usize,
+ binder_depth: usize,
+ printed_type_count: usize,
+
+ pub region_highlight_mode: RegionHighlightMode<'tcx>,
+
+ pub ty_infer_name_resolver: Option<Box<dyn Fn(ty::TyVid) -> Option<Symbol> + 'a>>,
+ pub const_infer_name_resolver: Option<Box<dyn Fn(ty::ConstVid<'tcx>) -> Option<Symbol> + 'a>>,
+}
+
+impl<'a, 'tcx> Deref for FmtPrinter<'a, 'tcx> {
+ type Target = FmtPrinterData<'a, 'tcx>;
+ fn deref(&self) -> &Self::Target {
+ &self.0
+ }
+}
+
+impl DerefMut for FmtPrinter<'_, '_> {
+ fn deref_mut(&mut self) -> &mut Self::Target {
+ &mut self.0
+ }
+}
+
+impl<'a, 'tcx> FmtPrinter<'a, 'tcx> {
+ pub fn new(tcx: TyCtxt<'tcx>, ns: Namespace) -> Self {
+ FmtPrinter(Box::new(FmtPrinterData {
+ tcx,
+ // Estimated reasonable capacity to allocate upfront based on a few
+ // benchmarks.
+ fmt: String::with_capacity(64),
+ empty_path: false,
+ in_value: ns == Namespace::ValueNS,
+ print_alloc_ids: false,
+ used_region_names: Default::default(),
+ region_index: 0,
+ binder_depth: 0,
+ printed_type_count: 0,
+ region_highlight_mode: RegionHighlightMode::new(tcx),
+ ty_infer_name_resolver: None,
+ const_infer_name_resolver: None,
+ }))
+ }
+
+ pub fn into_buffer(self) -> String {
+ self.0.fmt
+ }
+}
+
+// HACK(eddyb) get rid of `def_path_str` and/or pass `Namespace` explicitly always
+// (but also some things just print a `DefId` generally so maybe we need this?)
+fn guess_def_namespace(tcx: TyCtxt<'_>, def_id: DefId) -> Namespace {
+ match tcx.def_key(def_id).disambiguated_data.data {
+ DefPathData::TypeNs(..) | DefPathData::CrateRoot | DefPathData::ImplTrait => {
+ Namespace::TypeNS
+ }
+
+ DefPathData::ValueNs(..)
+ | DefPathData::AnonConst
+ | DefPathData::ClosureExpr
+ | DefPathData::Ctor => Namespace::ValueNS,
+
+ DefPathData::MacroNs(..) => Namespace::MacroNS,
+
+ _ => Namespace::TypeNS,
+ }
+}
+
+impl<'t> TyCtxt<'t> {
+ /// Returns a string identifying this `DefId`. This string is
+ /// suitable for user output.
+ pub fn def_path_str(self, def_id: DefId) -> String {
+ self.def_path_str_with_substs(def_id, &[])
+ }
+
+ pub fn def_path_str_with_substs(self, def_id: DefId, substs: &'t [GenericArg<'t>]) -> String {
+ let ns = guess_def_namespace(self, def_id);
+ debug!("def_path_str: def_id={:?}, ns={:?}", def_id, ns);
+ FmtPrinter::new(self, ns).print_def_path(def_id, substs).unwrap().into_buffer()
+ }
+}
+
+impl fmt::Write for FmtPrinter<'_, '_> {
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ self.fmt.push_str(s);
+ Ok(())
+ }
+}
+
+impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> {
+ type Error = fmt::Error;
+
+ type Path = Self;
+ type Region = Self;
+ type Type = Self;
+ type DynExistential = Self;
+ type Const = Self;
+
+ fn tcx<'a>(&'a self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn print_def_path(
+ mut self,
+ def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ define_scoped_cx!(self);
+
+ if substs.is_empty() {
+ match self.try_print_trimmed_def_path(def_id)? {
+ (cx, true) => return Ok(cx),
+ (cx, false) => self = cx,
+ }
+
+ match self.try_print_visible_def_path(def_id)? {
+ (cx, true) => return Ok(cx),
+ (cx, false) => self = cx,
+ }
+ }
+
+ let key = self.tcx.def_key(def_id);
+ if let DefPathData::Impl = key.disambiguated_data.data {
+ // Always use types for non-local impls, where types are always
+ // available, and filename/line-number is mostly uninteresting.
+ let use_types = !def_id.is_local() || {
+ // Otherwise, use filename/line-number if forced.
+ let force_no_types = FORCE_IMPL_FILENAME_LINE.with(|f| f.get());
+ !force_no_types
+ };
+
+ if !use_types {
+ // If no type info is available, fall back to
+ // pretty printing some span information. This should
+ // only occur very early in the compiler pipeline.
+ let parent_def_id = DefId { index: key.parent.unwrap(), ..def_id };
+ let span = self.tcx.def_span(def_id);
+
+ self = self.print_def_path(parent_def_id, &[])?;
+
+ // HACK(eddyb) copy of `path_append` to avoid
+ // constructing a `DisambiguatedDefPathData`.
+ if !self.empty_path {
+ write!(self, "::")?;
+ }
+ write!(
+ self,
+ "<impl at {}>",
+ // This may end up in stderr diagnostics but it may also be emitted
+ // into MIR. Hence we use the remapped path if available
+ self.tcx.sess.source_map().span_to_embeddable_string(span)
+ )?;
+ self.empty_path = false;
+
+ return Ok(self);
+ }
+ }
+
+ self.default_print_def_path(def_id, substs)
+ }
+
+ fn print_region(self, region: ty::Region<'tcx>) -> Result<Self::Region, Self::Error> {
+ self.pretty_print_region(region)
+ }
+
+ fn print_type(mut self, ty: Ty<'tcx>) -> Result<Self::Type, Self::Error> {
+ let type_length_limit = self.tcx.type_length_limit();
+ if type_length_limit.value_within_limit(self.printed_type_count) {
+ self.printed_type_count += 1;
+ self.pretty_print_type(ty)
+ } else {
+ write!(self, "...")?;
+ Ok(self)
+ }
+ }
+
+ fn print_dyn_existential(
+ self,
+ predicates: &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ) -> Result<Self::DynExistential, Self::Error> {
+ self.pretty_print_dyn_existential(predicates)
+ }
+
+ fn print_const(self, ct: ty::Const<'tcx>) -> Result<Self::Const, Self::Error> {
+ self.pretty_print_const(ct, false)
+ }
+
+ fn path_crate(mut self, cnum: CrateNum) -> Result<Self::Path, Self::Error> {
+ self.empty_path = true;
+ if cnum == LOCAL_CRATE {
+ if self.tcx.sess.rust_2018() {
+ // We add the `crate::` keyword on Rust 2018, only when desired.
+ if SHOULD_PREFIX_WITH_CRATE.with(|flag| flag.get()) {
+ write!(self, "{}", kw::Crate)?;
+ self.empty_path = false;
+ }
+ }
+ } else {
+ write!(self, "{}", self.tcx.crate_name(cnum))?;
+ self.empty_path = false;
+ }
+ Ok(self)
+ }
+
+ fn path_qualified(
+ mut self,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self = self.pretty_path_qualified(self_ty, trait_ref)?;
+ self.empty_path = false;
+ Ok(self)
+ }
+
+ fn path_append_impl(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ _disambiguated_data: &DisambiguatedDefPathData,
+ self_ty: Ty<'tcx>,
+ trait_ref: Option<ty::TraitRef<'tcx>>,
+ ) -> Result<Self::Path, Self::Error> {
+ self = self.pretty_path_append_impl(
+ |mut cx| {
+ cx = print_prefix(cx)?;
+ if !cx.empty_path {
+ write!(cx, "::")?;
+ }
+
+ Ok(cx)
+ },
+ self_ty,
+ trait_ref,
+ )?;
+ self.empty_path = false;
+ Ok(self)
+ }
+
+ fn path_append(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ disambiguated_data: &DisambiguatedDefPathData,
+ ) -> Result<Self::Path, Self::Error> {
+ self = print_prefix(self)?;
+
+ // Skip `::{{extern}}` blocks and `::{{constructor}}` on tuple/unit structs.
+ if let DefPathData::ForeignMod | DefPathData::Ctor = disambiguated_data.data {
+ return Ok(self);
+ }
+
+ let name = disambiguated_data.data.name();
+ if !self.empty_path {
+ write!(self, "::")?;
+ }
+
+ if let DefPathDataName::Named(name) = name {
+ if Ident::with_dummy_span(name).is_raw_guess() {
+ write!(self, "r#")?;
+ }
+ }
+
+ let verbose = self.tcx.sess.verbose();
+ disambiguated_data.fmt_maybe_verbose(&mut self, verbose)?;
+
+ self.empty_path = false;
+
+ Ok(self)
+ }
+
+ fn path_generic_args(
+ mut self,
+ print_prefix: impl FnOnce(Self) -> Result<Self::Path, Self::Error>,
+ args: &[GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ self = print_prefix(self)?;
+
+ // Don't print `'_` if there's no unerased regions.
+ let print_regions = self.tcx.sess.verbose()
+ || args.iter().any(|arg| match arg.unpack() {
+ GenericArgKind::Lifetime(r) => !r.is_erased(),
+ _ => false,
+ });
+ let args = args.iter().cloned().filter(|arg| match arg.unpack() {
+ GenericArgKind::Lifetime(_) => print_regions,
+ _ => true,
+ });
+
+ if args.clone().next().is_some() {
+ if self.in_value {
+ write!(self, "::")?;
+ }
+ self.generic_delimiters(|cx| cx.comma_sep(args))
+ } else {
+ Ok(self)
+ }
+ }
+}
+
+impl<'tcx> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx> {
+ fn ty_infer_name(&self, id: ty::TyVid) -> Option<Symbol> {
+ self.0.ty_infer_name_resolver.as_ref().and_then(|func| func(id))
+ }
+
+ fn const_infer_name(&self, id: ty::ConstVid<'tcx>) -> Option<Symbol> {
+ self.0.const_infer_name_resolver.as_ref().and_then(|func| func(id))
+ }
+
+ fn print_value_path(
+ mut self,
+ def_id: DefId,
+ substs: &'tcx [GenericArg<'tcx>],
+ ) -> Result<Self::Path, Self::Error> {
+ let was_in_value = std::mem::replace(&mut self.in_value, true);
+ self = self.print_def_path(def_id, substs)?;
+ self.in_value = was_in_value;
+
+ Ok(self)
+ }
+
+ fn in_binder<T>(self, value: &ty::Binder<'tcx, T>) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx>,
+ {
+ self.pretty_in_binder(value)
+ }
+
+ fn wrap_binder<T, C: FnOnce(&T, Self) -> Result<Self, Self::Error>>(
+ self,
+ value: &ty::Binder<'tcx, T>,
+ f: C,
+ ) -> Result<Self, Self::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = Self::Error> + TypeFoldable<'tcx>,
+ {
+ self.pretty_wrap_binder(value, f)
+ }
+
+ fn typed_value(
+ mut self,
+ f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ t: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ conversion: &str,
+ ) -> Result<Self::Const, Self::Error> {
+ self.write_str("{")?;
+ self = f(self)?;
+ self.write_str(conversion)?;
+ let was_in_value = std::mem::replace(&mut self.in_value, false);
+ self = t(self)?;
+ self.in_value = was_in_value;
+ self.write_str("}")?;
+ Ok(self)
+ }
+
+ fn generic_delimiters(
+ mut self,
+ f: impl FnOnce(Self) -> Result<Self, Self::Error>,
+ ) -> Result<Self, Self::Error> {
+ write!(self, "<")?;
+
+ let was_in_value = std::mem::replace(&mut self.in_value, false);
+ let mut inner = f(self)?;
+ inner.in_value = was_in_value;
+
+ write!(inner, ">")?;
+ Ok(inner)
+ }
+
+ fn should_print_region(&self, region: ty::Region<'tcx>) -> bool {
+ let highlight = self.region_highlight_mode;
+ if highlight.region_highlighted(region).is_some() {
+ return true;
+ }
+
+ if self.tcx.sess.verbose() {
+ return true;
+ }
+
+ let identify_regions = self.tcx.sess.opts.unstable_opts.identify_regions;
+
+ match *region {
+ ty::ReEarlyBound(ref data) => {
+ data.name != kw::Empty && data.name != kw::UnderscoreLifetime
+ }
+
+ ty::ReLateBound(_, ty::BoundRegion { kind: br, .. })
+ | ty::ReFree(ty::FreeRegion { bound_region: br, .. })
+ | ty::RePlaceholder(ty::Placeholder { name: br, .. }) => {
+ if let ty::BrNamed(_, name) = br {
+ if name != kw::Empty && name != kw::UnderscoreLifetime {
+ return true;
+ }
+ }
+
+ if let Some((region, _)) = highlight.highlight_bound_region {
+ if br == region {
+ return true;
+ }
+ }
+
+ false
+ }
+
+ ty::ReVar(_) if identify_regions => true,
+
+ ty::ReVar(_) | ty::ReErased => false,
+
+ ty::ReStatic | ty::ReEmpty(_) => true,
+ }
+ }
+
+ fn pretty_print_const_pointer<Prov: Provenance>(
+ self,
+ p: Pointer<Prov>,
+ ty: Ty<'tcx>,
+ print_ty: bool,
+ ) -> Result<Self::Const, Self::Error> {
+ let print = |mut this: Self| {
+ define_scoped_cx!(this);
+ if this.print_alloc_ids {
+ p!(write("{:?}", p));
+ } else {
+ p!("&_");
+ }
+ Ok(this)
+ };
+ if print_ty {
+ self.typed_value(print, |this| this.print_type(ty), ": ")
+ } else {
+ print(self)
+ }
+ }
+}
+
+// HACK(eddyb) limited to `FmtPrinter` because of `region_highlight_mode`.
+impl<'tcx> FmtPrinter<'_, 'tcx> {
+ pub fn pretty_print_region(mut self, region: ty::Region<'tcx>) -> Result<Self, fmt::Error> {
+ define_scoped_cx!(self);
+
+ // Watch out for region highlights.
+ let highlight = self.region_highlight_mode;
+ if let Some(n) = highlight.region_highlighted(region) {
+ p!(write("'{}", n));
+ return Ok(self);
+ }
+
+ if self.tcx.sess.verbose() {
+ p!(write("{:?}", region));
+ return Ok(self);
+ }
+
+ let identify_regions = self.tcx.sess.opts.unstable_opts.identify_regions;
+
+ // These printouts are concise. They do not contain all the information
+ // the user might want to diagnose an error, but there is basically no way
+ // to fit that into a short string. Hence the recommendation to use
+ // `explain_region()` or `note_and_explain_region()`.
+ match *region {
+ ty::ReEarlyBound(ref data) => {
+ if data.name != kw::Empty {
+ p!(write("{}", data.name));
+ return Ok(self);
+ }
+ }
+ ty::ReLateBound(_, ty::BoundRegion { kind: br, .. })
+ | ty::ReFree(ty::FreeRegion { bound_region: br, .. })
+ | ty::RePlaceholder(ty::Placeholder { name: br, .. }) => {
+ if let ty::BrNamed(_, name) = br {
+ if name != kw::Empty && name != kw::UnderscoreLifetime {
+ p!(write("{}", name));
+ return Ok(self);
+ }
+ }
+
+ if let Some((region, counter)) = highlight.highlight_bound_region {
+ if br == region {
+ p!(write("'{}", counter));
+ return Ok(self);
+ }
+ }
+ }
+ ty::ReVar(region_vid) if identify_regions => {
+ p!(write("{:?}", region_vid));
+ return Ok(self);
+ }
+ ty::ReVar(_) => {}
+ ty::ReErased => {}
+ ty::ReStatic => {
+ p!("'static");
+ return Ok(self);
+ }
+ ty::ReEmpty(ty::UniverseIndex::ROOT) => {
+ p!("'<empty>");
+ return Ok(self);
+ }
+ ty::ReEmpty(ui) => {
+ p!(write("'<empty:{:?}>", ui));
+ return Ok(self);
+ }
+ }
+
+ p!("'_");
+
+ Ok(self)
+ }
+}
+
+/// Folds through bound vars and placeholders, naming them
+struct RegionFolder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ current_index: ty::DebruijnIndex,
+ region_map: BTreeMap<ty::BoundRegion, ty::Region<'tcx>>,
+ name: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a),
+}
+
+impl<'a, 'tcx> ty::TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.current_index.shift_in(1);
+ let t = t.super_fold_with(self);
+ self.current_index.shift_out(1);
+ t
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ match *t.kind() {
+ _ if t.has_vars_bound_at_or_above(self.current_index) || t.has_placeholders() => {
+ return t.super_fold_with(self);
+ }
+ _ => {}
+ }
+ t
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ let name = &mut self.name;
+ let region = match *r {
+ ty::ReLateBound(_, br) => *self.region_map.entry(br).or_insert_with(|| name(br)),
+ ty::RePlaceholder(ty::PlaceholderRegion { name: kind, .. }) => {
+ // If this is an anonymous placeholder, don't rename. Otherwise, in some
+ // async fns, we get a `for<'r> Send` bound
+ match kind {
+ ty::BrAnon(_) | ty::BrEnv => r,
+ _ => {
+ // Index doesn't matter, since this is just for naming and these never get bound
+ let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind };
+ *self.region_map.entry(br).or_insert_with(|| name(br))
+ }
+ }
+ }
+ _ => return r,
+ };
+ if let ty::ReLateBound(debruijn1, br) = *region {
+ assert_eq!(debruijn1, ty::INNERMOST);
+ self.tcx.mk_region(ty::ReLateBound(self.current_index, br))
+ } else {
+ region
+ }
+ }
+}
+
+// HACK(eddyb) limited to `FmtPrinter` because of `binder_depth`,
+// `region_index` and `used_region_names`.
+impl<'tcx> FmtPrinter<'_, 'tcx> {
+ pub fn name_all_regions<T>(
+ mut self,
+ value: &ty::Binder<'tcx, T>,
+ ) -> Result<(Self, T, BTreeMap<ty::BoundRegion, ty::Region<'tcx>>), fmt::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>,
+ {
+ fn name_by_region_index(index: usize) -> Symbol {
+ match index {
+ 0 => Symbol::intern("'r"),
+ 1 => Symbol::intern("'s"),
+ i => Symbol::intern(&format!("'t{}", i - 2)),
+ }
+ }
+
+ // Replace any anonymous late-bound regions with named
+ // variants, using new unique identifiers, so that we can
+ // clearly differentiate between named and unnamed regions in
+ // the output. We'll probably want to tweak this over time to
+ // decide just how much information to give.
+ if self.binder_depth == 0 {
+ self.prepare_late_bound_region_info(value);
+ }
+
+ let mut empty = true;
+ let mut start_or_continue = |cx: &mut Self, start: &str, cont: &str| {
+ let w = if empty {
+ empty = false;
+ start
+ } else {
+ cont
+ };
+ let _ = write!(cx, "{}", w);
+ };
+ let do_continue = |cx: &mut Self, cont: Symbol| {
+ let _ = write!(cx, "{}", cont);
+ };
+
+ define_scoped_cx!(self);
+
+ let mut region_index = self.region_index;
+ let mut next_name = |this: &Self| loop {
+ let name = name_by_region_index(region_index);
+ region_index += 1;
+ if !this.used_region_names.contains(&name) {
+ break name;
+ }
+ };
+
+ // If we want to print verbosely, then print *all* binders, even if they
+ // aren't named. Eventually, we might just want this as the default, but
+ // this is not *quite* right and changes the ordering of some output
+ // anyways.
+ let (new_value, map) = if self.tcx().sess.verbose() {
+ let regions: Vec<_> = value
+ .bound_vars()
+ .into_iter()
+ .map(|var| {
+ let ty::BoundVariableKind::Region(var) = var else {
+ // This doesn't really matter because it doesn't get used,
+ // it's just an empty value
+ return ty::BrAnon(0);
+ };
+ match var {
+ ty::BrAnon(_) | ty::BrEnv => {
+ start_or_continue(&mut self, "for<", ", ");
+ let name = next_name(&self);
+ do_continue(&mut self, name);
+ ty::BrNamed(CRATE_DEF_ID.to_def_id(), name)
+ }
+ ty::BrNamed(def_id, kw::UnderscoreLifetime) => {
+ start_or_continue(&mut self, "for<", ", ");
+ let name = next_name(&self);
+ do_continue(&mut self, name);
+ ty::BrNamed(def_id, name)
+ }
+ ty::BrNamed(def_id, name) => {
+ start_or_continue(&mut self, "for<", ", ");
+ do_continue(&mut self, name);
+ ty::BrNamed(def_id, name)
+ }
+ }
+ })
+ .collect();
+ start_or_continue(&mut self, "", "> ");
+
+ self.tcx.replace_late_bound_regions(value.clone(), |br| {
+ let kind = regions[br.var.as_usize()];
+ self.tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: br.var, kind },
+ ))
+ })
+ } else {
+ let tcx = self.tcx;
+ let mut name = |br: ty::BoundRegion| {
+ start_or_continue(&mut self, "for<", ", ");
+ let kind = match br.kind {
+ ty::BrAnon(_) | ty::BrEnv => {
+ let name = next_name(&self);
+ do_continue(&mut self, name);
+ ty::BrNamed(CRATE_DEF_ID.to_def_id(), name)
+ }
+ ty::BrNamed(def_id, kw::UnderscoreLifetime) => {
+ let name = next_name(&self);
+ do_continue(&mut self, name);
+ ty::BrNamed(def_id, name)
+ }
+ ty::BrNamed(_, name) => {
+ do_continue(&mut self, name);
+ br.kind
+ }
+ };
+ tcx.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BoundRegion { var: br.var, kind }))
+ };
+ let mut folder = RegionFolder {
+ tcx,
+ current_index: ty::INNERMOST,
+ name: &mut name,
+ region_map: BTreeMap::new(),
+ };
+ let new_value = value.clone().skip_binder().fold_with(&mut folder);
+ let region_map = folder.region_map;
+ start_or_continue(&mut self, "", "> ");
+ (new_value, region_map)
+ };
+
+ self.binder_depth += 1;
+ self.region_index = region_index;
+ Ok((self, new_value, map))
+ }
+
+ pub fn pretty_in_binder<T>(self, value: &ty::Binder<'tcx, T>) -> Result<Self, fmt::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>,
+ {
+ let old_region_index = self.region_index;
+ let (new, new_value, _) = self.name_all_regions(value)?;
+ let mut inner = new_value.print(new)?;
+ inner.region_index = old_region_index;
+ inner.binder_depth -= 1;
+ Ok(inner)
+ }
+
+ pub fn pretty_wrap_binder<T, C: FnOnce(&T, Self) -> Result<Self, fmt::Error>>(
+ self,
+ value: &ty::Binder<'tcx, T>,
+ f: C,
+ ) -> Result<Self, fmt::Error>
+ where
+ T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>,
+ {
+ let old_region_index = self.region_index;
+ let (new, new_value, _) = self.name_all_regions(value)?;
+ let mut inner = f(&new_value, new)?;
+ inner.region_index = old_region_index;
+ inner.binder_depth -= 1;
+ Ok(inner)
+ }
+
+ fn prepare_late_bound_region_info<T>(&mut self, value: &ty::Binder<'tcx, T>)
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ struct LateBoundRegionNameCollector<'a, 'tcx> {
+ used_region_names: &'a mut FxHashSet<Symbol>,
+ type_collector: SsoHashSet<Ty<'tcx>>,
+ }
+
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for LateBoundRegionNameCollector<'_, 'tcx> {
+ type BreakTy = ();
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ trace!("address: {:p}", r.0.0);
+ if let ty::ReLateBound(_, ty::BoundRegion { kind: ty::BrNamed(_, name), .. }) = *r {
+ self.used_region_names.insert(name);
+ } else if let ty::RePlaceholder(ty::PlaceholderRegion {
+ name: ty::BrNamed(_, name),
+ ..
+ }) = *r
+ {
+ self.used_region_names.insert(name);
+ }
+ r.super_visit_with(self)
+ }
+
+ // We collect types in order to prevent really large types from compiling for
+ // a really long time. See issue #83150 for why this is necessary.
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let not_previously_inserted = self.type_collector.insert(ty);
+ if not_previously_inserted {
+ ty.super_visit_with(self)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+ }
+
+ self.used_region_names.clear();
+ let mut collector = LateBoundRegionNameCollector {
+ used_region_names: &mut self.used_region_names,
+ type_collector: SsoHashSet::new(),
+ };
+ value.visit_with(&mut collector);
+ self.region_index = 0;
+ }
+}
+
+impl<'tcx, T, P: PrettyPrinter<'tcx>> Print<'tcx, P> for ty::Binder<'tcx, T>
+where
+ T: Print<'tcx, P, Output = P, Error = P::Error> + TypeFoldable<'tcx>,
+{
+ type Output = P;
+ type Error = P::Error;
+
+ fn print(&self, cx: P) -> Result<Self::Output, Self::Error> {
+ cx.in_binder(self)
+ }
+}
+
+impl<'tcx, T, U, P: PrettyPrinter<'tcx>> Print<'tcx, P> for ty::OutlivesPredicate<T, U>
+where
+ T: Print<'tcx, P, Output = P, Error = P::Error>,
+ U: Print<'tcx, P, Output = P, Error = P::Error>,
+{
+ type Output = P;
+ type Error = P::Error;
+ fn print(&self, mut cx: P) -> Result<Self::Output, Self::Error> {
+ define_scoped_cx!(cx);
+ p!(print(self.0), ": ", print(self.1));
+ Ok(cx)
+ }
+}
+
+macro_rules! forward_display_to_print {
+ ($($ty:ty),+) => {
+ // Some of the $ty arguments may not actually use 'tcx
+ $(#[allow(unused_lifetimes)] impl<'tcx> fmt::Display for $ty {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ty::tls::with(|tcx| {
+ let cx = tcx.lift(*self)
+ .expect("could not lift for printing")
+ .print(FmtPrinter::new(tcx, Namespace::TypeNS))?;
+ f.write_str(&cx.into_buffer())?;
+ Ok(())
+ })
+ }
+ })+
+ };
+}
+
+macro_rules! define_print_and_forward_display {
+ (($self:ident, $cx:ident): $($ty:ty $print:block)+) => {
+ $(impl<'tcx, P: PrettyPrinter<'tcx>> Print<'tcx, P> for $ty {
+ type Output = P;
+ type Error = fmt::Error;
+ fn print(&$self, $cx: P) -> Result<Self::Output, Self::Error> {
+ #[allow(unused_mut)]
+ let mut $cx = $cx;
+ define_scoped_cx!($cx);
+ let _: () = $print;
+ #[allow(unreachable_code)]
+ Ok($cx)
+ }
+ })+
+
+ forward_display_to_print!($($ty),+);
+ };
+}
+
+/// Wrapper type for `ty::TraitRef` which opts-in to pretty printing only
+/// the trait path. That is, it will print `Trait<U>` instead of
+/// `<T as Trait<U>>`.
+#[derive(Copy, Clone, TypeFoldable, TypeVisitable, Lift)]
+pub struct TraitRefPrintOnlyTraitPath<'tcx>(ty::TraitRef<'tcx>);
+
+impl<'tcx> fmt::Debug for TraitRefPrintOnlyTraitPath<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+/// Wrapper type for `ty::TraitRef` which opts-in to pretty printing only
+/// the trait name. That is, it will print `Trait` instead of
+/// `<T as Trait<U>>`.
+#[derive(Copy, Clone, TypeFoldable, TypeVisitable, Lift)]
+pub struct TraitRefPrintOnlyTraitName<'tcx>(ty::TraitRef<'tcx>);
+
+impl<'tcx> fmt::Debug for TraitRefPrintOnlyTraitName<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl<'tcx> ty::TraitRef<'tcx> {
+ pub fn print_only_trait_path(self) -> TraitRefPrintOnlyTraitPath<'tcx> {
+ TraitRefPrintOnlyTraitPath(self)
+ }
+
+ pub fn print_only_trait_name(self) -> TraitRefPrintOnlyTraitName<'tcx> {
+ TraitRefPrintOnlyTraitName(self)
+ }
+}
+
+impl<'tcx> ty::Binder<'tcx, ty::TraitRef<'tcx>> {
+ pub fn print_only_trait_path(self) -> ty::Binder<'tcx, TraitRefPrintOnlyTraitPath<'tcx>> {
+ self.map_bound(|tr| tr.print_only_trait_path())
+ }
+}
+
+#[derive(Copy, Clone, TypeFoldable, TypeVisitable, Lift)]
+pub struct TraitPredPrintModifiersAndPath<'tcx>(ty::TraitPredicate<'tcx>);
+
+impl<'tcx> fmt::Debug for TraitPredPrintModifiersAndPath<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(self, f)
+ }
+}
+
+impl<'tcx> ty::TraitPredicate<'tcx> {
+ pub fn print_modifiers_and_trait_path(self) -> TraitPredPrintModifiersAndPath<'tcx> {
+ TraitPredPrintModifiersAndPath(self)
+ }
+}
+
+impl<'tcx> ty::PolyTraitPredicate<'tcx> {
+ pub fn print_modifiers_and_trait_path(
+ self,
+ ) -> ty::Binder<'tcx, TraitPredPrintModifiersAndPath<'tcx>> {
+ self.map_bound(TraitPredPrintModifiersAndPath)
+ }
+}
+
+forward_display_to_print! {
+ ty::Region<'tcx>,
+ Ty<'tcx>,
+ &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>,
+ ty::Const<'tcx>,
+
+ // HACK(eddyb) these are exhaustive instead of generic,
+ // because `for<'tcx>` isn't possible yet.
+ ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>,
+ ty::Binder<'tcx, ty::TraitRef<'tcx>>,
+ ty::Binder<'tcx, ty::ExistentialTraitRef<'tcx>>,
+ ty::Binder<'tcx, TraitRefPrintOnlyTraitPath<'tcx>>,
+ ty::Binder<'tcx, TraitRefPrintOnlyTraitName<'tcx>>,
+ ty::Binder<'tcx, ty::FnSig<'tcx>>,
+ ty::Binder<'tcx, ty::TraitPredicate<'tcx>>,
+ ty::Binder<'tcx, TraitPredPrintModifiersAndPath<'tcx>>,
+ ty::Binder<'tcx, ty::SubtypePredicate<'tcx>>,
+ ty::Binder<'tcx, ty::ProjectionPredicate<'tcx>>,
+ ty::Binder<'tcx, ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>>,
+ ty::Binder<'tcx, ty::OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>>,
+
+ ty::OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>,
+ ty::OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>
+}
+
+define_print_and_forward_display! {
+ (self, cx):
+
+ &'tcx ty::List<Ty<'tcx>> {
+ p!("{{", comma_sep(self.iter()), "}}")
+ }
+
+ ty::TypeAndMut<'tcx> {
+ p!(write("{}", self.mutbl.prefix_str()), print(self.ty))
+ }
+
+ ty::ExistentialTraitRef<'tcx> {
+ // Use a type that can't appear in defaults of type parameters.
+ let dummy_self = cx.tcx().mk_ty_infer(ty::FreshTy(0));
+ let trait_ref = self.with_self_ty(cx.tcx(), dummy_self);
+ p!(print(trait_ref.print_only_trait_path()))
+ }
+
+ ty::ExistentialProjection<'tcx> {
+ let name = cx.tcx().associated_item(self.item_def_id).name;
+ p!(write("{} = ", name), print(self.term))
+ }
+
+ ty::ExistentialPredicate<'tcx> {
+ match *self {
+ ty::ExistentialPredicate::Trait(x) => p!(print(x)),
+ ty::ExistentialPredicate::Projection(x) => p!(print(x)),
+ ty::ExistentialPredicate::AutoTrait(def_id) => {
+ p!(print_def_path(def_id, &[]));
+ }
+ }
+ }
+
+ ty::FnSig<'tcx> {
+ p!(write("{}", self.unsafety.prefix_str()));
+
+ if self.abi != Abi::Rust {
+ p!(write("extern {} ", self.abi));
+ }
+
+ p!("fn", pretty_fn_sig(self.inputs(), self.c_variadic, self.output()));
+ }
+
+ ty::TraitRef<'tcx> {
+ p!(write("<{} as {}>", self.self_ty(), self.print_only_trait_path()))
+ }
+
+ TraitRefPrintOnlyTraitPath<'tcx> {
+ p!(print_def_path(self.0.def_id, self.0.substs));
+ }
+
+ TraitRefPrintOnlyTraitName<'tcx> {
+ p!(print_def_path(self.0.def_id, &[]));
+ }
+
+ TraitPredPrintModifiersAndPath<'tcx> {
+ if let ty::BoundConstness::ConstIfConst = self.0.constness {
+ p!("~const ")
+ }
+
+ if let ty::ImplPolarity::Negative = self.0.polarity {
+ p!("!")
+ }
+
+ p!(print(self.0.trait_ref.print_only_trait_path()));
+ }
+
+ ty::ParamTy {
+ p!(write("{}", self.name))
+ }
+
+ ty::ParamConst {
+ p!(write("{}", self.name))
+ }
+
+ ty::SubtypePredicate<'tcx> {
+ p!(print(self.a), " <: ", print(self.b))
+ }
+
+ ty::CoercePredicate<'tcx> {
+ p!(print(self.a), " -> ", print(self.b))
+ }
+
+ ty::TraitPredicate<'tcx> {
+ p!(print(self.trait_ref.self_ty()), ": ");
+ if let ty::BoundConstness::ConstIfConst = self.constness && cx.tcx().features().const_trait_impl {
+ p!("~const ");
+ }
+ p!(print(self.trait_ref.print_only_trait_path()))
+ }
+
+ ty::ProjectionPredicate<'tcx> {
+ p!(print(self.projection_ty), " == ", print(self.term))
+ }
+
+ ty::Term<'tcx> {
+ match self {
+ ty::Term::Ty(ty) => p!(print(ty)),
+ ty::Term::Const(c) => p!(print(c)),
+ }
+ }
+
+ ty::ProjectionTy<'tcx> {
+ p!(print_def_path(self.item_def_id, self.substs));
+ }
+
+ ty::ClosureKind {
+ match *self {
+ ty::ClosureKind::Fn => p!("Fn"),
+ ty::ClosureKind::FnMut => p!("FnMut"),
+ ty::ClosureKind::FnOnce => p!("FnOnce"),
+ }
+ }
+
+ ty::Predicate<'tcx> {
+ let binder = self.kind();
+ p!(print(binder))
+ }
+
+ ty::PredicateKind<'tcx> {
+ match *self {
+ ty::PredicateKind::Trait(ref data) => {
+ p!(print(data))
+ }
+ ty::PredicateKind::Subtype(predicate) => p!(print(predicate)),
+ ty::PredicateKind::Coerce(predicate) => p!(print(predicate)),
+ ty::PredicateKind::RegionOutlives(predicate) => p!(print(predicate)),
+ ty::PredicateKind::TypeOutlives(predicate) => p!(print(predicate)),
+ ty::PredicateKind::Projection(predicate) => p!(print(predicate)),
+ ty::PredicateKind::WellFormed(arg) => p!(print(arg), " well-formed"),
+ ty::PredicateKind::ObjectSafe(trait_def_id) => {
+ p!("the trait `", print_def_path(trait_def_id, &[]), "` is object-safe")
+ }
+ ty::PredicateKind::ClosureKind(closure_def_id, _closure_substs, kind) => {
+ p!("the closure `",
+ print_value_path(closure_def_id, &[]),
+ write("` implements the trait `{}`", kind))
+ }
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ p!("the constant `", print_value_path(uv.def.did, uv.substs), "` can be evaluated")
+ }
+ ty::PredicateKind::ConstEquate(c1, c2) => {
+ p!("the constant `", print(c1), "` equals `", print(c2), "`")
+ }
+ ty::PredicateKind::TypeWellFormedFromEnv(ty) => {
+ p!("the type `", print(ty), "` is found in the environment")
+ }
+ }
+ }
+
+ GenericArg<'tcx> {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => p!(print(lt)),
+ GenericArgKind::Type(ty) => p!(print(ty)),
+ GenericArgKind::Const(ct) => p!(print(ct)),
+ }
+ }
+}
+
+fn for_each_def(tcx: TyCtxt<'_>, mut collect_fn: impl for<'b> FnMut(&'b Ident, Namespace, DefId)) {
+ // Iterate all local crate items no matter where they are defined.
+ let hir = tcx.hir();
+ for id in hir.items() {
+ if matches!(tcx.def_kind(id.def_id), DefKind::Use) {
+ continue;
+ }
+
+ let item = hir.item(id);
+ if item.ident.name == kw::Empty {
+ continue;
+ }
+
+ let def_id = item.def_id.to_def_id();
+ let ns = tcx.def_kind(def_id).ns().unwrap_or(Namespace::TypeNS);
+ collect_fn(&item.ident, ns, def_id);
+ }
+
+ // Now take care of extern crate items.
+ let queue = &mut Vec::new();
+ let mut seen_defs: DefIdSet = Default::default();
+
+ for &cnum in tcx.crates(()).iter() {
+ let def_id = cnum.as_def_id();
+
+ // Ignore crates that are not direct dependencies.
+ match tcx.extern_crate(def_id) {
+ None => continue,
+ Some(extern_crate) => {
+ if !extern_crate.is_direct() {
+ continue;
+ }
+ }
+ }
+
+ queue.push(def_id);
+ }
+
+ // Iterate external crate defs but be mindful about visibility
+ while let Some(def) = queue.pop() {
+ for child in tcx.module_children(def).iter() {
+ if !child.vis.is_public() {
+ continue;
+ }
+
+ match child.res {
+ def::Res::Def(DefKind::AssocTy, _) => {}
+ def::Res::Def(DefKind::TyAlias, _) => {}
+ def::Res::Def(defkind, def_id) => {
+ if let Some(ns) = defkind.ns() {
+ collect_fn(&child.ident, ns, def_id);
+ }
+
+ if matches!(defkind, DefKind::Mod | DefKind::Enum | DefKind::Trait)
+ && seen_defs.insert(def_id)
+ {
+ queue.push(def_id);
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+}
+
+/// The purpose of this function is to collect public symbols names that are unique across all
+/// crates in the build. Later, when printing about types we can use those names instead of the
+/// full exported path to them.
+///
+/// So essentially, if a symbol name can only be imported from one place for a type, and as
+/// long as it was not glob-imported anywhere in the current crate, we can trim its printed
+/// path and print only the name.
+///
+/// This has wide implications on error messages with types, for example, shortening
+/// `std::vec::Vec` to just `Vec`, as long as there is no other `Vec` importable anywhere.
+///
+/// The implementation uses similar import discovery logic to that of 'use' suggestions.
+fn trimmed_def_paths(tcx: TyCtxt<'_>, (): ()) -> FxHashMap<DefId, Symbol> {
+ let mut map: FxHashMap<DefId, Symbol> = FxHashMap::default();
+
+ if let TrimmedDefPaths::GoodPath = tcx.sess.opts.trimmed_def_paths {
+ // For good paths causing this bug, the `rustc_middle::ty::print::with_no_trimmed_paths`
+ // wrapper can be used to suppress this query, in exchange for full paths being formatted.
+ tcx.sess.delay_good_path_bug("trimmed_def_paths constructed");
+ }
+
+ let unique_symbols_rev: &mut FxHashMap<(Namespace, Symbol), Option<DefId>> =
+ &mut FxHashMap::default();
+
+ for symbol_set in tcx.resolutions(()).glob_map.values() {
+ for symbol in symbol_set {
+ unique_symbols_rev.insert((Namespace::TypeNS, *symbol), None);
+ unique_symbols_rev.insert((Namespace::ValueNS, *symbol), None);
+ unique_symbols_rev.insert((Namespace::MacroNS, *symbol), None);
+ }
+ }
+
+ for_each_def(tcx, |ident, ns, def_id| {
+ use std::collections::hash_map::Entry::{Occupied, Vacant};
+
+ match unique_symbols_rev.entry((ns, ident.name)) {
+ Occupied(mut v) => match v.get() {
+ None => {}
+ Some(existing) => {
+ if *existing != def_id {
+ v.insert(None);
+ }
+ }
+ },
+ Vacant(v) => {
+ v.insert(Some(def_id));
+ }
+ }
+ });
+
+ for ((_, symbol), opt_def_id) in unique_symbols_rev.drain() {
+ use std::collections::hash_map::Entry::{Occupied, Vacant};
+
+ if let Some(def_id) = opt_def_id {
+ match map.entry(def_id) {
+ Occupied(mut v) => {
+ // A single DefId can be known under multiple names (e.g.,
+ // with a `pub use ... as ...;`). We need to ensure that the
+ // name placed in this map is chosen deterministically, so
+ // if we find multiple names (`symbol`) resolving to the
+ // same `def_id`, we prefer the lexicographically smallest
+ // name.
+ //
+ // Any stable ordering would be fine here though.
+ if *v.get() != symbol {
+ if v.get().as_str() > symbol.as_str() {
+ v.insert(symbol);
+ }
+ }
+ }
+ Vacant(v) => {
+ v.insert(symbol);
+ }
+ }
+ }
+ }
+
+ map
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers = ty::query::Providers { trimmed_def_paths, ..*providers };
+}
+
+#[derive(Default)]
+pub struct OpaqueFnEntry<'tcx> {
+ // The trait ref is already stored as a key, so just track if we have it as a real predicate
+ has_fn_once: bool,
+ fn_mut_trait_ref: Option<ty::PolyTraitRef<'tcx>>,
+ fn_trait_ref: Option<ty::PolyTraitRef<'tcx>>,
+ return_ty: Option<ty::Binder<'tcx, Term<'tcx>>>,
+}
diff --git a/compiler/rustc_middle/src/ty/query.rs b/compiler/rustc_middle/src/ty/query.rs
new file mode 100644
index 000000000..2452bcf6a
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/query.rs
@@ -0,0 +1,386 @@
+use crate::dep_graph;
+use crate::infer::canonical::{self, Canonical};
+use crate::lint::LintLevelMap;
+use crate::metadata::ModChild;
+use crate::middle::codegen_fn_attrs::CodegenFnAttrs;
+use crate::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo};
+use crate::middle::lib_features::LibFeatures;
+use crate::middle::privacy::AccessLevels;
+use crate::middle::resolve_lifetime::{ObjectLifetimeDefault, Region, ResolveLifetimes};
+use crate::middle::stability::{self, DeprecationEntry};
+use crate::mir;
+use crate::mir::interpret::GlobalId;
+use crate::mir::interpret::{
+ ConstValue, EvalToAllocationRawResult, EvalToConstValueResult, EvalToValTreeResult,
+};
+use crate::mir::interpret::{LitToConstError, LitToConstInput};
+use crate::mir::mono::CodegenUnit;
+use crate::thir;
+use crate::traits::query::{
+ CanonicalPredicateGoal, CanonicalProjectionGoal, CanonicalTyGoal,
+ CanonicalTypeOpAscribeUserTypeGoal, CanonicalTypeOpEqGoal, CanonicalTypeOpNormalizeGoal,
+ CanonicalTypeOpProvePredicateGoal, CanonicalTypeOpSubtypeGoal, NoSolution,
+};
+use crate::traits::query::{
+ DropckConstraint, DropckOutlivesResult, MethodAutoderefStepsResult, NormalizationResult,
+ OutlivesBound,
+};
+use crate::traits::specialization_graph;
+use crate::traits::{self, ImplSource};
+use crate::ty::fast_reject::SimplifiedType;
+use crate::ty::layout::TyAndLayout;
+use crate::ty::subst::{GenericArg, SubstsRef};
+use crate::ty::util::AlwaysRequiresDrop;
+use crate::ty::GeneratorDiagnosticData;
+use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
+use rustc_ast as ast;
+use rustc_ast::expand::allocator::AllocatorKind;
+use rustc_attr as attr;
+use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
+use rustc_data_structures::steal::Steal;
+use rustc_data_structures::svh::Svh;
+use rustc_data_structures::sync::Lrc;
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId};
+use rustc_hir::lang_items::{LangItem, LanguageItems};
+use rustc_hir::{Crate, ItemLocalId, TraitCandidate};
+use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec};
+use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion};
+use rustc_session::cstore::{CrateDepKind, CrateSource};
+use rustc_session::cstore::{ExternCrate, ForeignModule, LinkagePreference, NativeLib};
+use rustc_session::utils::NativeLibKind;
+use rustc_session::Limits;
+use rustc_span::symbol::Symbol;
+use rustc_span::{Span, DUMMY_SP};
+use rustc_target::abi;
+use rustc_target::spec::PanicStrategy;
+use std::ops::Deref;
+use std::path::PathBuf;
+use std::sync::Arc;
+
+pub(crate) use rustc_query_system::query::QueryJobId;
+use rustc_query_system::query::*;
+
+#[derive(Copy, Clone)]
+pub struct TyCtxtAt<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+ pub span: Span,
+}
+
+impl<'tcx> Deref for TyCtxtAt<'tcx> {
+ type Target = TyCtxt<'tcx>;
+ #[inline(always)]
+ fn deref(&self) -> &Self::Target {
+ &self.tcx
+ }
+}
+
+#[derive(Copy, Clone)]
+pub struct TyCtxtEnsure<'tcx> {
+ pub tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Returns a transparent wrapper for `TyCtxt`, which ensures queries
+ /// are executed instead of just returning their results.
+ #[inline(always)]
+ pub fn ensure(self) -> TyCtxtEnsure<'tcx> {
+ TyCtxtEnsure { tcx: self }
+ }
+
+ /// Returns a transparent wrapper for `TyCtxt` which uses
+ /// `span` as the location of queries performed through it.
+ #[inline(always)]
+ pub fn at(self, span: Span) -> TyCtxtAt<'tcx> {
+ TyCtxtAt { tcx: self, span }
+ }
+
+ pub fn try_mark_green(self, dep_node: &dep_graph::DepNode) -> bool {
+ self.queries.try_mark_green(self, dep_node)
+ }
+}
+
+/// Helper for `TyCtxtEnsure` to avoid a closure.
+#[inline(always)]
+fn noop<T>(_: &T) {}
+
+/// Helper to ensure that queries only return `Copy` types.
+#[inline(always)]
+fn copy<T: Copy>(x: &T) -> T {
+ *x
+}
+
+macro_rules! query_helper_param_ty {
+ (DefId) => { impl IntoQueryParam<DefId> };
+ ($K:ty) => { $K };
+}
+
+macro_rules! query_storage {
+ ([][$K:ty, $V:ty]) => {
+ <DefaultCacheSelector as CacheSelector<$K, $V>>::Cache
+ };
+ ([(storage $ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => {
+ <$ty as CacheSelector<$K, $V>>::Cache
+ };
+ ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => {
+ query_storage!([$($modifiers)*][$($args)*])
+ };
+}
+
+macro_rules! separate_provide_extern_decl {
+ ([][$name:ident]) => {
+ ()
+ };
+ ([(separate_provide_extern) $($rest:tt)*][$name:ident]) => {
+ for<'tcx> fn(
+ TyCtxt<'tcx>,
+ query_keys::$name<'tcx>,
+ ) -> query_values::$name<'tcx>
+ };
+ ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => {
+ separate_provide_extern_decl!([$($modifiers)*][$($args)*])
+ };
+}
+
+macro_rules! separate_provide_extern_default {
+ ([][$name:ident]) => {
+ ()
+ };
+ ([(separate_provide_extern) $($rest:tt)*][$name:ident]) => {
+ |_, key| bug!(
+ "`tcx.{}({:?})` unsupported by its crate; \
+ perhaps the `{}` query was never assigned a provider function",
+ stringify!($name),
+ key,
+ stringify!($name),
+ )
+ };
+ ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => {
+ separate_provide_extern_default!([$($modifiers)*][$($args)*])
+ };
+}
+
+macro_rules! opt_remap_env_constness {
+ ([][$name:ident]) => {};
+ ([(remap_env_constness) $($rest:tt)*][$name:ident]) => {
+ let $name = $name.without_const();
+ };
+ ([$other:tt $($modifiers:tt)*][$name:ident]) => {
+ opt_remap_env_constness!([$($modifiers)*][$name])
+ };
+}
+
+macro_rules! define_callbacks {
+ (<$tcx:tt>
+ $($(#[$attr:meta])*
+ [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => {
+
+ // HACK(eddyb) this is like the `impl QueryConfig for queries::$name`
+ // below, but using type aliases instead of associated types, to bypass
+ // the limitations around normalizing under HRTB - for example, this:
+ // `for<'tcx> fn(...) -> <queries::$name<'tcx> as QueryConfig<TyCtxt<'tcx>>>::Value`
+ // doesn't currently normalize to `for<'tcx> fn(...) -> query_values::$name<'tcx>`.
+ // This is primarily used by the `provide!` macro in `rustc_metadata`.
+ #[allow(nonstandard_style, unused_lifetimes)]
+ pub mod query_keys {
+ use super::*;
+
+ $(pub type $name<$tcx> = $($K)*;)*
+ }
+ #[allow(nonstandard_style, unused_lifetimes)]
+ pub mod query_values {
+ use super::*;
+
+ $(pub type $name<$tcx> = $V;)*
+ }
+ #[allow(nonstandard_style, unused_lifetimes)]
+ pub mod query_storage {
+ use super::*;
+
+ $(pub type $name<$tcx> = query_storage!([$($modifiers)*][$($K)*, $V]);)*
+ }
+ #[allow(nonstandard_style, unused_lifetimes)]
+ pub mod query_stored {
+ use super::*;
+
+ $(pub type $name<$tcx> = <query_storage::$name<$tcx> as QueryStorage>::Stored;)*
+ }
+
+ #[derive(Default)]
+ pub struct QueryCaches<$tcx> {
+ $($(#[$attr])* pub $name: query_storage::$name<$tcx>,)*
+ }
+
+ impl<$tcx> TyCtxtEnsure<$tcx> {
+ $($(#[$attr])*
+ #[inline(always)]
+ pub fn $name(self, key: query_helper_param_ty!($($K)*)) {
+ let key = key.into_query_param();
+ opt_remap_env_constness!([$($modifiers)*][key]);
+
+ let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, noop);
+
+ match cached {
+ Ok(()) => return,
+ Err(()) => (),
+ }
+
+ self.tcx.queries.$name(self.tcx, DUMMY_SP, key, QueryMode::Ensure);
+ })*
+ }
+
+ impl<$tcx> TyCtxt<$tcx> {
+ $($(#[$attr])*
+ #[inline(always)]
+ #[must_use]
+ pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
+ {
+ self.at(DUMMY_SP).$name(key)
+ })*
+ }
+
+ impl<$tcx> TyCtxtAt<$tcx> {
+ $($(#[$attr])*
+ #[inline(always)]
+ pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx>
+ {
+ let key = key.into_query_param();
+ opt_remap_env_constness!([$($modifiers)*][key]);
+
+ let cached = try_get_cached(self.tcx, &self.tcx.query_caches.$name, &key, copy);
+
+ match cached {
+ Ok(value) => return value,
+ Err(()) => (),
+ }
+
+ self.tcx.queries.$name(self.tcx, self.span, key, QueryMode::Get).unwrap()
+ })*
+ }
+
+ pub struct Providers {
+ $(pub $name: for<'tcx> fn(
+ TyCtxt<'tcx>,
+ query_keys::$name<'tcx>,
+ ) -> query_values::$name<'tcx>,)*
+ }
+
+ pub struct ExternProviders {
+ $(pub $name: separate_provide_extern_decl!([$($modifiers)*][$name]),)*
+ }
+
+ impl Default for Providers {
+ fn default() -> Self {
+ Providers {
+ $($name: |_, key| bug!(
+ "`tcx.{}({:?})` unsupported by its crate; \
+ perhaps the `{}` query was never assigned a provider function",
+ stringify!($name),
+ key,
+ stringify!($name),
+ ),)*
+ }
+ }
+ }
+
+ impl Default for ExternProviders {
+ fn default() -> Self {
+ ExternProviders {
+ $($name: separate_provide_extern_default!([$($modifiers)*][$name]),)*
+ }
+ }
+ }
+
+ impl Copy for Providers {}
+ impl Clone for Providers {
+ fn clone(&self) -> Self { *self }
+ }
+
+ impl Copy for ExternProviders {}
+ impl Clone for ExternProviders {
+ fn clone(&self) -> Self { *self }
+ }
+
+ pub trait QueryEngine<'tcx>: rustc_data_structures::sync::Sync {
+ fn as_any(&'tcx self) -> &'tcx dyn std::any::Any;
+
+ fn try_mark_green(&'tcx self, tcx: TyCtxt<'tcx>, dep_node: &dep_graph::DepNode) -> bool;
+
+ $($(#[$attr])*
+ fn $name(
+ &'tcx self,
+ tcx: TyCtxt<$tcx>,
+ span: Span,
+ key: query_keys::$name<$tcx>,
+ mode: QueryMode,
+ ) -> Option<query_stored::$name<$tcx>>;)*
+ }
+ };
+}
+
+// Each of these queries corresponds to a function pointer field in the
+// `Providers` struct for requesting a value of that type, and a method
+// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
+// which memoizes and does dep-graph tracking, wrapping around the actual
+// `Providers` that the driver creates (using several `rustc_*` crates).
+//
+// The result type of each query must implement `Clone`, and additionally
+// `ty::query::values::Value`, which produces an appropriate placeholder
+// (error) value if the query resulted in a query cycle.
+// Queries marked with `fatal_cycle` do not need the latter implementation,
+// as they will raise an fatal error on query cycles instead.
+
+rustc_query_append! { [define_callbacks!][<'tcx>] }
+
+mod sealed {
+ use super::{DefId, LocalDefId};
+
+ /// An analogue of the `Into` trait that's intended only for query parameters.
+ ///
+ /// This exists to allow queries to accept either `DefId` or `LocalDefId` while requiring that the
+ /// user call `to_def_id` to convert between them everywhere else.
+ pub trait IntoQueryParam<P> {
+ fn into_query_param(self) -> P;
+ }
+
+ impl<P> IntoQueryParam<P> for P {
+ #[inline(always)]
+ fn into_query_param(self) -> P {
+ self
+ }
+ }
+
+ impl<'a, P: Copy> IntoQueryParam<P> for &'a P {
+ #[inline(always)]
+ fn into_query_param(self) -> P {
+ *self
+ }
+ }
+
+ impl IntoQueryParam<DefId> for LocalDefId {
+ #[inline(always)]
+ fn into_query_param(self) -> DefId {
+ self.to_def_id()
+ }
+ }
+}
+
+use sealed::IntoQueryParam;
+
+impl<'tcx> TyCtxt<'tcx> {
+ pub fn def_kind(self, def_id: impl IntoQueryParam<DefId>) -> DefKind {
+ let def_id = def_id.into_query_param();
+ self.opt_def_kind(def_id)
+ .unwrap_or_else(|| bug!("def_kind: unsupported node: {:?}", def_id))
+ }
+}
+
+impl<'tcx> TyCtxtAt<'tcx> {
+ pub fn def_kind(self, def_id: impl IntoQueryParam<DefId>) -> DefKind {
+ let def_id = def_id.into_query_param();
+ self.opt_def_kind(def_id)
+ .unwrap_or_else(|| bug!("def_kind: unsupported node: {:?}", def_id))
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs
new file mode 100644
index 000000000..818affa71
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/relate.rs
@@ -0,0 +1,841 @@
+//! Generalized type relating mechanism.
+//!
+//! A type relation `R` relates a pair of values `(A, B)`. `A and B` are usually
+//! types or regions but can be other things. Examples of type relations are
+//! subtyping, type equality, etc.
+
+use crate::ty::error::{ExpectedFound, TypeError};
+use crate::ty::subst::{GenericArg, GenericArgKind, Subst, SubstsRef};
+use crate::ty::{self, ImplSubject, Term, Ty, TyCtxt, TypeFoldable};
+use rustc_hir as ast;
+use rustc_hir::def_id::DefId;
+use rustc_span::DUMMY_SP;
+use rustc_target::spec::abi;
+use std::iter;
+
+pub type RelateResult<'tcx, T> = Result<T, TypeError<'tcx>>;
+
+#[derive(Clone, Debug)]
+pub enum Cause {
+ ExistentialRegionBound, // relating an existential region bound
+}
+
+pub trait TypeRelation<'tcx>: Sized {
+ fn tcx(&self) -> TyCtxt<'tcx>;
+
+ fn param_env(&self) -> ty::ParamEnv<'tcx>;
+
+ /// Returns a static string we can use for printouts.
+ fn tag(&self) -> &'static str;
+
+ /// Returns `true` if the value `a` is the "expected" type in the
+ /// relation. Just affects error messages.
+ fn a_is_expected(&self) -> bool;
+
+ fn with_cause<F, R>(&mut self, _cause: Cause, f: F) -> R
+ where
+ F: FnOnce(&mut Self) -> R,
+ {
+ f(self)
+ }
+
+ /// Generic relation routine suitable for most anything.
+ fn relate<T: Relate<'tcx>>(&mut self, a: T, b: T) -> RelateResult<'tcx, T> {
+ Relate::relate(self, a, b)
+ }
+
+ /// Relate the two substitutions for the given item. The default
+ /// is to look up the variance for the item and proceed
+ /// accordingly.
+ fn relate_item_substs(
+ &mut self,
+ item_def_id: DefId,
+ a_subst: SubstsRef<'tcx>,
+ b_subst: SubstsRef<'tcx>,
+ ) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+ debug!(
+ "relate_item_substs(item_def_id={:?}, a_subst={:?}, b_subst={:?})",
+ item_def_id, a_subst, b_subst
+ );
+
+ let tcx = self.tcx();
+ let opt_variances = tcx.variances_of(item_def_id);
+ relate_substs_with_variances(self, item_def_id, opt_variances, a_subst, b_subst)
+ }
+
+ /// Switch variance for the purpose of relating `a` and `b`.
+ fn relate_with_variance<T: Relate<'tcx>>(
+ &mut self,
+ variance: ty::Variance,
+ info: ty::VarianceDiagInfo<'tcx>,
+ a: T,
+ b: T,
+ ) -> RelateResult<'tcx, T>;
+
+ // Overridable relations. You shouldn't typically call these
+ // directly, instead call `relate()`, which in turn calls
+ // these. This is both more uniform but also allows us to add
+ // additional hooks for other types in the future if needed
+ // without making older code, which called `relate`, obsolete.
+
+ fn tys(&mut self, a: Ty<'tcx>, b: Ty<'tcx>) -> RelateResult<'tcx, Ty<'tcx>>;
+
+ fn regions(
+ &mut self,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>>;
+
+ fn consts(
+ &mut self,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>>;
+
+ fn binders<T>(
+ &mut self,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>>
+ where
+ T: Relate<'tcx>;
+}
+
+pub trait Relate<'tcx>: TypeFoldable<'tcx> + Copy {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: Self,
+ b: Self,
+ ) -> RelateResult<'tcx, Self>;
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Relate impls
+
+pub fn relate_type_and_mut<'tcx, R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::TypeAndMut<'tcx>,
+ b: ty::TypeAndMut<'tcx>,
+ base_ty: Ty<'tcx>,
+) -> RelateResult<'tcx, ty::TypeAndMut<'tcx>> {
+ debug!("{}.mts({:?}, {:?})", relation.tag(), a, b);
+ if a.mutbl != b.mutbl {
+ Err(TypeError::Mutability)
+ } else {
+ let mutbl = a.mutbl;
+ let (variance, info) = match mutbl {
+ ast::Mutability::Not => (ty::Covariant, ty::VarianceDiagInfo::None),
+ ast::Mutability::Mut => {
+ (ty::Invariant, ty::VarianceDiagInfo::Invariant { ty: base_ty, param_index: 0 })
+ }
+ };
+ let ty = relation.relate_with_variance(variance, info, a.ty, b.ty)?;
+ Ok(ty::TypeAndMut { ty, mutbl })
+ }
+}
+
+#[inline]
+pub fn relate_substs<'tcx, R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a_subst: SubstsRef<'tcx>,
+ b_subst: SubstsRef<'tcx>,
+) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+ relation.tcx().mk_substs(iter::zip(a_subst, b_subst).map(|(a, b)| {
+ relation.relate_with_variance(ty::Invariant, ty::VarianceDiagInfo::default(), a, b)
+ }))
+}
+
+pub fn relate_substs_with_variances<'tcx, R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ ty_def_id: DefId,
+ variances: &[ty::Variance],
+ a_subst: SubstsRef<'tcx>,
+ b_subst: SubstsRef<'tcx>,
+) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+ let tcx = relation.tcx();
+
+ let mut cached_ty = None;
+ let params = iter::zip(a_subst, b_subst).enumerate().map(|(i, (a, b))| {
+ let variance = variances[i];
+ let variance_info = if variance == ty::Invariant {
+ let ty =
+ *cached_ty.get_or_insert_with(|| tcx.bound_type_of(ty_def_id).subst(tcx, a_subst));
+ ty::VarianceDiagInfo::Invariant { ty, param_index: i.try_into().unwrap() }
+ } else {
+ ty::VarianceDiagInfo::default()
+ };
+ relation.relate_with_variance(variance, variance_info, a, b)
+ });
+
+ tcx.mk_substs(params)
+}
+
+impl<'tcx> Relate<'tcx> for ty::FnSig<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::FnSig<'tcx>,
+ b: ty::FnSig<'tcx>,
+ ) -> RelateResult<'tcx, ty::FnSig<'tcx>> {
+ let tcx = relation.tcx();
+
+ if a.c_variadic != b.c_variadic {
+ return Err(TypeError::VariadicMismatch(expected_found(
+ relation,
+ a.c_variadic,
+ b.c_variadic,
+ )));
+ }
+ let unsafety = relation.relate(a.unsafety, b.unsafety)?;
+ let abi = relation.relate(a.abi, b.abi)?;
+
+ if a.inputs().len() != b.inputs().len() {
+ return Err(TypeError::ArgCount);
+ }
+
+ let inputs_and_output = iter::zip(a.inputs(), b.inputs())
+ .map(|(&a, &b)| ((a, b), false))
+ .chain(iter::once(((a.output(), b.output()), true)))
+ .map(|((a, b), is_output)| {
+ if is_output {
+ relation.relate(a, b)
+ } else {
+ relation.relate_with_variance(
+ ty::Contravariant,
+ ty::VarianceDiagInfo::default(),
+ a,
+ b,
+ )
+ }
+ })
+ .enumerate()
+ .map(|(i, r)| match r {
+ Err(TypeError::Sorts(exp_found) | TypeError::ArgumentSorts(exp_found, _)) => {
+ Err(TypeError::ArgumentSorts(exp_found, i))
+ }
+ Err(TypeError::Mutability | TypeError::ArgumentMutability(_)) => {
+ Err(TypeError::ArgumentMutability(i))
+ }
+ r => r,
+ });
+ Ok(ty::FnSig {
+ inputs_and_output: tcx.mk_type_list(inputs_and_output)?,
+ c_variadic: a.c_variadic,
+ unsafety,
+ abi,
+ })
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::BoundConstness {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::BoundConstness,
+ b: ty::BoundConstness,
+ ) -> RelateResult<'tcx, ty::BoundConstness> {
+ if a != b {
+ Err(TypeError::ConstnessMismatch(expected_found(relation, a, b)))
+ } else {
+ Ok(a)
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ast::Unsafety {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ast::Unsafety,
+ b: ast::Unsafety,
+ ) -> RelateResult<'tcx, ast::Unsafety> {
+ if a != b {
+ Err(TypeError::UnsafetyMismatch(expected_found(relation, a, b)))
+ } else {
+ Ok(a)
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for abi::Abi {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: abi::Abi,
+ b: abi::Abi,
+ ) -> RelateResult<'tcx, abi::Abi> {
+ if a == b { Ok(a) } else { Err(TypeError::AbiMismatch(expected_found(relation, a, b))) }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ProjectionTy<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::ProjectionTy<'tcx>,
+ b: ty::ProjectionTy<'tcx>,
+ ) -> RelateResult<'tcx, ty::ProjectionTy<'tcx>> {
+ if a.item_def_id != b.item_def_id {
+ Err(TypeError::ProjectionMismatched(expected_found(
+ relation,
+ a.item_def_id,
+ b.item_def_id,
+ )))
+ } else {
+ let substs = relation.relate(a.substs, b.substs)?;
+ Ok(ty::ProjectionTy { item_def_id: a.item_def_id, substs: &substs })
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ExistentialProjection<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::ExistentialProjection<'tcx>,
+ b: ty::ExistentialProjection<'tcx>,
+ ) -> RelateResult<'tcx, ty::ExistentialProjection<'tcx>> {
+ if a.item_def_id != b.item_def_id {
+ Err(TypeError::ProjectionMismatched(expected_found(
+ relation,
+ a.item_def_id,
+ b.item_def_id,
+ )))
+ } else {
+ let term = relation.relate_with_variance(
+ ty::Invariant,
+ ty::VarianceDiagInfo::default(),
+ a.term,
+ b.term,
+ )?;
+ let substs = relation.relate_with_variance(
+ ty::Invariant,
+ ty::VarianceDiagInfo::default(),
+ a.substs,
+ b.substs,
+ )?;
+ Ok(ty::ExistentialProjection { item_def_id: a.item_def_id, substs, term })
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::TraitRef<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::TraitRef<'tcx>,
+ b: ty::TraitRef<'tcx>,
+ ) -> RelateResult<'tcx, ty::TraitRef<'tcx>> {
+ // Different traits cannot be related.
+ if a.def_id != b.def_id {
+ Err(TypeError::Traits(expected_found(relation, a.def_id, b.def_id)))
+ } else {
+ let substs = relate_substs(relation, a.substs, b.substs)?;
+ Ok(ty::TraitRef { def_id: a.def_id, substs })
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ExistentialTraitRef<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::ExistentialTraitRef<'tcx>,
+ b: ty::ExistentialTraitRef<'tcx>,
+ ) -> RelateResult<'tcx, ty::ExistentialTraitRef<'tcx>> {
+ // Different traits cannot be related.
+ if a.def_id != b.def_id {
+ Err(TypeError::Traits(expected_found(relation, a.def_id, b.def_id)))
+ } else {
+ let substs = relate_substs(relation, a.substs, b.substs)?;
+ Ok(ty::ExistentialTraitRef { def_id: a.def_id, substs })
+ }
+ }
+}
+
+#[derive(Copy, Debug, Clone, TypeFoldable, TypeVisitable)]
+struct GeneratorWitness<'tcx>(&'tcx ty::List<Ty<'tcx>>);
+
+impl<'tcx> Relate<'tcx> for GeneratorWitness<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: GeneratorWitness<'tcx>,
+ b: GeneratorWitness<'tcx>,
+ ) -> RelateResult<'tcx, GeneratorWitness<'tcx>> {
+ assert_eq!(a.0.len(), b.0.len());
+ let tcx = relation.tcx();
+ let types = tcx.mk_type_list(iter::zip(a.0, b.0).map(|(a, b)| relation.relate(a, b)))?;
+ Ok(GeneratorWitness(types))
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ImplSubject<'tcx> {
+ #[inline]
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ImplSubject<'tcx>,
+ b: ImplSubject<'tcx>,
+ ) -> RelateResult<'tcx, ImplSubject<'tcx>> {
+ match (a, b) {
+ (ImplSubject::Trait(trait_ref_a), ImplSubject::Trait(trait_ref_b)) => {
+ let trait_ref = ty::TraitRef::relate(relation, trait_ref_a, trait_ref_b)?;
+ Ok(ImplSubject::Trait(trait_ref))
+ }
+ (ImplSubject::Inherent(ty_a), ImplSubject::Inherent(ty_b)) => {
+ let ty = Ty::relate(relation, ty_a, ty_b)?;
+ Ok(ImplSubject::Inherent(ty))
+ }
+ (ImplSubject::Trait(_), ImplSubject::Inherent(_))
+ | (ImplSubject::Inherent(_), ImplSubject::Trait(_)) => {
+ bug!("can not relate TraitRef and Ty");
+ }
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for Ty<'tcx> {
+ #[inline]
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+ ) -> RelateResult<'tcx, Ty<'tcx>> {
+ relation.tys(a, b)
+ }
+}
+
+/// The main "type relation" routine. Note that this does not handle
+/// inference artifacts, so you should filter those out before calling
+/// it.
+pub fn super_relate_tys<'tcx, R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: Ty<'tcx>,
+ b: Ty<'tcx>,
+) -> RelateResult<'tcx, Ty<'tcx>> {
+ let tcx = relation.tcx();
+ debug!("super_relate_tys: a={:?} b={:?}", a, b);
+ match (a.kind(), b.kind()) {
+ (&ty::Infer(_), _) | (_, &ty::Infer(_)) => {
+ // The caller should handle these cases!
+ bug!("var types encountered in super_relate_tys")
+ }
+
+ (ty::Bound(..), _) | (_, ty::Bound(..)) => {
+ bug!("bound types encountered in super_relate_tys")
+ }
+
+ (&ty::Error(_), _) | (_, &ty::Error(_)) => Ok(tcx.ty_error()),
+
+ (&ty::Never, _)
+ | (&ty::Char, _)
+ | (&ty::Bool, _)
+ | (&ty::Int(_), _)
+ | (&ty::Uint(_), _)
+ | (&ty::Float(_), _)
+ | (&ty::Str, _)
+ if a == b =>
+ {
+ Ok(a)
+ }
+
+ (&ty::Param(ref a_p), &ty::Param(ref b_p)) if a_p.index == b_p.index => Ok(a),
+
+ (ty::Placeholder(p1), ty::Placeholder(p2)) if p1 == p2 => Ok(a),
+
+ (&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs)) if a_def == b_def => {
+ let substs = relation.relate_item_substs(a_def.did(), a_substs, b_substs)?;
+ Ok(tcx.mk_adt(a_def, substs))
+ }
+
+ (&ty::Foreign(a_id), &ty::Foreign(b_id)) if a_id == b_id => Ok(tcx.mk_foreign(a_id)),
+
+ (&ty::Dynamic(a_obj, a_region), &ty::Dynamic(b_obj, b_region)) => {
+ let region_bound = relation.with_cause(Cause::ExistentialRegionBound, |relation| {
+ relation.relate_with_variance(
+ ty::Contravariant,
+ ty::VarianceDiagInfo::default(),
+ a_region,
+ b_region,
+ )
+ })?;
+ Ok(tcx.mk_dynamic(relation.relate(a_obj, b_obj)?, region_bound))
+ }
+
+ (&ty::Generator(a_id, a_substs, movability), &ty::Generator(b_id, b_substs, _))
+ if a_id == b_id =>
+ {
+ // All Generator types with the same id represent
+ // the (anonymous) type of the same generator expression. So
+ // all of their regions should be equated.
+ let substs = relation.relate(a_substs, b_substs)?;
+ Ok(tcx.mk_generator(a_id, substs, movability))
+ }
+
+ (&ty::GeneratorWitness(a_types), &ty::GeneratorWitness(b_types)) => {
+ // Wrap our types with a temporary GeneratorWitness struct
+ // inside the binder so we can related them
+ let a_types = a_types.map_bound(GeneratorWitness);
+ let b_types = b_types.map_bound(GeneratorWitness);
+ // Then remove the GeneratorWitness for the result
+ let types = relation.relate(a_types, b_types)?.map_bound(|witness| witness.0);
+ Ok(tcx.mk_generator_witness(types))
+ }
+
+ (&ty::Closure(a_id, a_substs), &ty::Closure(b_id, b_substs)) if a_id == b_id => {
+ // All Closure types with the same id represent
+ // the (anonymous) type of the same closure expression. So
+ // all of their regions should be equated.
+ let substs = relation.relate(a_substs, b_substs)?;
+ Ok(tcx.mk_closure(a_id, &substs))
+ }
+
+ (&ty::RawPtr(a_mt), &ty::RawPtr(b_mt)) => {
+ let mt = relate_type_and_mut(relation, a_mt, b_mt, a)?;
+ Ok(tcx.mk_ptr(mt))
+ }
+
+ (&ty::Ref(a_r, a_ty, a_mutbl), &ty::Ref(b_r, b_ty, b_mutbl)) => {
+ let r = relation.relate_with_variance(
+ ty::Contravariant,
+ ty::VarianceDiagInfo::default(),
+ a_r,
+ b_r,
+ )?;
+ let a_mt = ty::TypeAndMut { ty: a_ty, mutbl: a_mutbl };
+ let b_mt = ty::TypeAndMut { ty: b_ty, mutbl: b_mutbl };
+ let mt = relate_type_and_mut(relation, a_mt, b_mt, a)?;
+ Ok(tcx.mk_ref(r, mt))
+ }
+
+ (&ty::Array(a_t, sz_a), &ty::Array(b_t, sz_b)) => {
+ let t = relation.relate(a_t, b_t)?;
+ match relation.relate(sz_a, sz_b) {
+ Ok(sz) => Ok(tcx.mk_ty(ty::Array(t, sz))),
+ Err(err) => {
+ // Check whether the lengths are both concrete/known values,
+ // but are unequal, for better diagnostics.
+ //
+ // It might seem dubious to eagerly evaluate these constants here,
+ // we however cannot end up with errors in `Relate` during both
+ // `type_of` and `predicates_of`. This means that evaluating the
+ // constants should not cause cycle errors here.
+ let sz_a = sz_a.try_eval_usize(tcx, relation.param_env());
+ let sz_b = sz_b.try_eval_usize(tcx, relation.param_env());
+ match (sz_a, sz_b) {
+ (Some(sz_a_val), Some(sz_b_val)) if sz_a_val != sz_b_val => Err(
+ TypeError::FixedArraySize(expected_found(relation, sz_a_val, sz_b_val)),
+ ),
+ _ => Err(err),
+ }
+ }
+ }
+ }
+
+ (&ty::Slice(a_t), &ty::Slice(b_t)) => {
+ let t = relation.relate(a_t, b_t)?;
+ Ok(tcx.mk_slice(t))
+ }
+
+ (&ty::Tuple(as_), &ty::Tuple(bs)) => {
+ if as_.len() == bs.len() {
+ Ok(tcx.mk_tup(iter::zip(as_, bs).map(|(a, b)| relation.relate(a, b)))?)
+ } else if !(as_.is_empty() || bs.is_empty()) {
+ Err(TypeError::TupleSize(expected_found(relation, as_.len(), bs.len())))
+ } else {
+ Err(TypeError::Sorts(expected_found(relation, a, b)))
+ }
+ }
+
+ (&ty::FnDef(a_def_id, a_substs), &ty::FnDef(b_def_id, b_substs))
+ if a_def_id == b_def_id =>
+ {
+ let substs = relation.relate_item_substs(a_def_id, a_substs, b_substs)?;
+ Ok(tcx.mk_fn_def(a_def_id, substs))
+ }
+
+ (&ty::FnPtr(a_fty), &ty::FnPtr(b_fty)) => {
+ let fty = relation.relate(a_fty, b_fty)?;
+ Ok(tcx.mk_fn_ptr(fty))
+ }
+
+ // these two are already handled downstream in case of lazy normalization
+ (&ty::Projection(a_data), &ty::Projection(b_data)) => {
+ let projection_ty = relation.relate(a_data, b_data)?;
+ Ok(tcx.mk_projection(projection_ty.item_def_id, projection_ty.substs))
+ }
+
+ (&ty::Opaque(a_def_id, a_substs), &ty::Opaque(b_def_id, b_substs))
+ if a_def_id == b_def_id =>
+ {
+ let substs = relate_substs(relation, a_substs, b_substs)?;
+ Ok(tcx.mk_opaque(a_def_id, substs))
+ }
+
+ _ => Err(TypeError::Sorts(expected_found(relation, a, b))),
+ }
+}
+
+/// The main "const relation" routine. Note that this does not handle
+/// inference artifacts, so you should filter those out before calling
+/// it.
+pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ debug!("{}.super_relate_consts(a = {:?}, b = {:?})", relation.tag(), a, b);
+ let tcx = relation.tcx();
+
+ let a_ty;
+ let b_ty;
+ if relation.tcx().features().adt_const_params {
+ a_ty = tcx.normalize_erasing_regions(relation.param_env(), a.ty());
+ b_ty = tcx.normalize_erasing_regions(relation.param_env(), b.ty());
+ } else {
+ a_ty = tcx.erase_regions(a.ty());
+ b_ty = tcx.erase_regions(b.ty());
+ }
+ if a_ty != b_ty {
+ relation.tcx().sess.delay_span_bug(
+ DUMMY_SP,
+ &format!("cannot relate constants of different types: {} != {}", a_ty, b_ty),
+ );
+ }
+
+ let eagerly_eval = |x: ty::Const<'tcx>| x.eval(tcx, relation.param_env());
+ let a = eagerly_eval(a);
+ let b = eagerly_eval(b);
+
+ // Currently, the values that can be unified are primitive types,
+ // and those that derive both `PartialEq` and `Eq`, corresponding
+ // to structural-match types.
+ let is_match = match (a.kind(), b.kind()) {
+ (ty::ConstKind::Infer(_), _) | (_, ty::ConstKind::Infer(_)) => {
+ // The caller should handle these cases!
+ bug!("var types encountered in super_relate_consts: {:?} {:?}", a, b)
+ }
+
+ (ty::ConstKind::Error(_), _) => return Ok(a),
+ (_, ty::ConstKind::Error(_)) => return Ok(b),
+
+ (ty::ConstKind::Param(a_p), ty::ConstKind::Param(b_p)) => a_p.index == b_p.index,
+ (ty::ConstKind::Placeholder(p1), ty::ConstKind::Placeholder(p2)) => p1 == p2,
+ (ty::ConstKind::Value(a_val), ty::ConstKind::Value(b_val)) => a_val == b_val,
+
+ (ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu))
+ if tcx.features().generic_const_exprs =>
+ {
+ tcx.try_unify_abstract_consts(relation.param_env().and((au.shrink(), bu.shrink())))
+ }
+
+ // While this is slightly incorrect, it shouldn't matter for `min_const_generics`
+ // and is the better alternative to waiting until `generic_const_exprs` can
+ // be stabilized.
+ (ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu))
+ if au.def == bu.def && au.promoted == bu.promoted =>
+ {
+ let substs = relation.relate_with_variance(
+ ty::Variance::Invariant,
+ ty::VarianceDiagInfo::default(),
+ au.substs,
+ bu.substs,
+ )?;
+ return Ok(tcx.mk_const(ty::ConstS {
+ kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ def: au.def,
+ substs,
+ promoted: au.promoted,
+ }),
+ ty: a.ty(),
+ }));
+ }
+ _ => false,
+ };
+ if is_match { Ok(a) } else { Err(TypeError::ConstMismatch(expected_found(relation, a, b))) }
+}
+
+impl<'tcx> Relate<'tcx> for &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: Self,
+ b: Self,
+ ) -> RelateResult<'tcx, Self> {
+ let tcx = relation.tcx();
+
+ // FIXME: this is wasteful, but want to do a perf run to see how slow it is.
+ // We need to perform this deduplication as we sometimes generate duplicate projections
+ // in `a`.
+ let mut a_v: Vec<_> = a.into_iter().collect();
+ let mut b_v: Vec<_> = b.into_iter().collect();
+ // `skip_binder` here is okay because `stable_cmp` doesn't look at binders
+ a_v.sort_by(|a, b| a.skip_binder().stable_cmp(tcx, &b.skip_binder()));
+ a_v.dedup();
+ b_v.sort_by(|a, b| a.skip_binder().stable_cmp(tcx, &b.skip_binder()));
+ b_v.dedup();
+ if a_v.len() != b_v.len() {
+ return Err(TypeError::ExistentialMismatch(expected_found(relation, a, b)));
+ }
+
+ let v = iter::zip(a_v, b_v).map(|(ep_a, ep_b)| {
+ use crate::ty::ExistentialPredicate::*;
+ match (ep_a.skip_binder(), ep_b.skip_binder()) {
+ (Trait(a), Trait(b)) => Ok(ep_a
+ .rebind(Trait(relation.relate(ep_a.rebind(a), ep_b.rebind(b))?.skip_binder()))),
+ (Projection(a), Projection(b)) => Ok(ep_a.rebind(Projection(
+ relation.relate(ep_a.rebind(a), ep_b.rebind(b))?.skip_binder(),
+ ))),
+ (AutoTrait(a), AutoTrait(b)) if a == b => Ok(ep_a.rebind(AutoTrait(a))),
+ _ => Err(TypeError::ExistentialMismatch(expected_found(relation, a, b))),
+ }
+ });
+ tcx.mk_poly_existential_predicates(v)
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ClosureSubsts<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::ClosureSubsts<'tcx>,
+ b: ty::ClosureSubsts<'tcx>,
+ ) -> RelateResult<'tcx, ty::ClosureSubsts<'tcx>> {
+ let substs = relate_substs(relation, a.substs, b.substs)?;
+ Ok(ty::ClosureSubsts { substs })
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::GeneratorSubsts<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::GeneratorSubsts<'tcx>,
+ b: ty::GeneratorSubsts<'tcx>,
+ ) -> RelateResult<'tcx, ty::GeneratorSubsts<'tcx>> {
+ let substs = relate_substs(relation, a.substs, b.substs)?;
+ Ok(ty::GeneratorSubsts { substs })
+ }
+}
+
+impl<'tcx> Relate<'tcx> for SubstsRef<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: SubstsRef<'tcx>,
+ b: SubstsRef<'tcx>,
+ ) -> RelateResult<'tcx, SubstsRef<'tcx>> {
+ relate_substs(relation, a, b)
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::Region<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::Region<'tcx>,
+ b: ty::Region<'tcx>,
+ ) -> RelateResult<'tcx, ty::Region<'tcx>> {
+ relation.regions(a, b)
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::Const<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::Const<'tcx>,
+ b: ty::Const<'tcx>,
+ ) -> RelateResult<'tcx, ty::Const<'tcx>> {
+ relation.consts(a, b)
+ }
+}
+
+impl<'tcx, T: Relate<'tcx>> Relate<'tcx> for ty::Binder<'tcx, T> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::Binder<'tcx, T>,
+ b: ty::Binder<'tcx, T>,
+ ) -> RelateResult<'tcx, ty::Binder<'tcx, T>> {
+ relation.binders(a, b)
+ }
+}
+
+impl<'tcx> Relate<'tcx> for GenericArg<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: GenericArg<'tcx>,
+ b: GenericArg<'tcx>,
+ ) -> RelateResult<'tcx, GenericArg<'tcx>> {
+ match (a.unpack(), b.unpack()) {
+ (GenericArgKind::Lifetime(a_lt), GenericArgKind::Lifetime(b_lt)) => {
+ Ok(relation.relate(a_lt, b_lt)?.into())
+ }
+ (GenericArgKind::Type(a_ty), GenericArgKind::Type(b_ty)) => {
+ Ok(relation.relate(a_ty, b_ty)?.into())
+ }
+ (GenericArgKind::Const(a_ct), GenericArgKind::Const(b_ct)) => {
+ Ok(relation.relate(a_ct, b_ct)?.into())
+ }
+ (GenericArgKind::Lifetime(unpacked), x) => {
+ bug!("impossible case reached: can't relate: {:?} with {:?}", unpacked, x)
+ }
+ (GenericArgKind::Type(unpacked), x) => {
+ bug!("impossible case reached: can't relate: {:?} with {:?}", unpacked, x)
+ }
+ (GenericArgKind::Const(unpacked), x) => {
+ bug!("impossible case reached: can't relate: {:?} with {:?}", unpacked, x)
+ }
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ImplPolarity {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::ImplPolarity,
+ b: ty::ImplPolarity,
+ ) -> RelateResult<'tcx, ty::ImplPolarity> {
+ if a != b {
+ Err(TypeError::PolarityMismatch(expected_found(relation, a, b)))
+ } else {
+ Ok(a)
+ }
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::TraitPredicate<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::TraitPredicate<'tcx>,
+ b: ty::TraitPredicate<'tcx>,
+ ) -> RelateResult<'tcx, ty::TraitPredicate<'tcx>> {
+ Ok(ty::TraitPredicate {
+ trait_ref: relation.relate(a.trait_ref, b.trait_ref)?,
+ constness: relation.relate(a.constness, b.constness)?,
+ polarity: relation.relate(a.polarity, b.polarity)?,
+ })
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::Term<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: Self,
+ b: Self,
+ ) -> RelateResult<'tcx, Self> {
+ Ok(match (a, b) {
+ (Term::Ty(a), Term::Ty(b)) => relation.relate(a, b)?.into(),
+ (Term::Const(a), Term::Const(b)) => relation.relate(a, b)?.into(),
+ _ => return Err(TypeError::Mismatch),
+ })
+ }
+}
+
+impl<'tcx> Relate<'tcx> for ty::ProjectionPredicate<'tcx> {
+ fn relate<R: TypeRelation<'tcx>>(
+ relation: &mut R,
+ a: ty::ProjectionPredicate<'tcx>,
+ b: ty::ProjectionPredicate<'tcx>,
+ ) -> RelateResult<'tcx, ty::ProjectionPredicate<'tcx>> {
+ Ok(ty::ProjectionPredicate {
+ projection_ty: relation.relate(a.projection_ty, b.projection_ty)?,
+ term: relation.relate(a.term, b.term)?,
+ })
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Error handling
+
+pub fn expected_found<'tcx, R, T>(relation: &mut R, a: T, b: T) -> ExpectedFound<T>
+where
+ R: TypeRelation<'tcx>,
+{
+ ExpectedFound::new(relation.a_is_expected(), a, b)
+}
diff --git a/compiler/rustc_middle/src/ty/rvalue_scopes.rs b/compiler/rustc_middle/src/ty/rvalue_scopes.rs
new file mode 100644
index 000000000..e86dafae3
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/rvalue_scopes.rs
@@ -0,0 +1,57 @@
+use crate::middle::region::{Scope, ScopeData, ScopeTree};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir as hir;
+
+/// `RvalueScopes` is a mapping from sub-expressions to _extended_ lifetime as determined by
+/// rules laid out in `rustc_typeck::check::rvalue_scopes`.
+#[derive(TyEncodable, TyDecodable, Clone, Debug, Default, Eq, PartialEq, HashStable)]
+pub struct RvalueScopes {
+ map: FxHashMap<hir::ItemLocalId, Option<Scope>>,
+}
+
+impl RvalueScopes {
+ pub fn new() -> Self {
+ Self { map: <_>::default() }
+ }
+
+ /// Returns the scope when the temp created by `expr_id` will be cleaned up.
+ pub fn temporary_scope(
+ &self,
+ region_scope_tree: &ScopeTree,
+ expr_id: hir::ItemLocalId,
+ ) -> Option<Scope> {
+ // Check for a designated rvalue scope.
+ if let Some(&s) = self.map.get(&expr_id) {
+ debug!("temporary_scope({expr_id:?}) = {s:?} [custom]");
+ return s;
+ }
+
+ // Otherwise, locate the innermost terminating scope
+ // if there's one. Static items, for instance, won't
+ // have an enclosing scope, hence no scope will be
+ // returned.
+ let mut id = Scope { id: expr_id, data: ScopeData::Node };
+
+ while let Some(&(p, _)) = region_scope_tree.parent_map.get(&id) {
+ match p.data {
+ ScopeData::Destruction => {
+ debug!("temporary_scope({expr_id:?}) = {id:?} [enclosing]");
+ return Some(id);
+ }
+ _ => id = p,
+ }
+ }
+
+ debug!("temporary_scope({expr_id:?}) = None");
+ None
+ }
+
+ /// Make an association between a sub-expression and an extended lifetime
+ pub fn record_rvalue_scope(&mut self, var: hir::ItemLocalId, lifetime: Option<Scope>) {
+ debug!("record_rvalue_scope(var={var:?}, lifetime={lifetime:?})");
+ if let Some(lifetime) = lifetime {
+ assert!(var != lifetime.item_local_id());
+ }
+ self.map.insert(var, lifetime);
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs
new file mode 100644
index 000000000..7660a2f3a
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/structural_impls.rs
@@ -0,0 +1,1304 @@
+//! This module contains implements of the `Lift` and `TypeFoldable`
+//! traits for various types in the Rust compiler. Most are written by
+//! hand, though we've recently added some macros and proc-macros to help with the tedium.
+
+use crate::mir::interpret;
+use crate::mir::ProjectionKind;
+use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable};
+use crate::ty::print::{with_no_trimmed_paths, FmtPrinter, Printer};
+use crate::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use crate::ty::{self, InferConst, Lift, Term, Ty, TyCtxt};
+use rustc_data_structures::functor::IdFunctor;
+use rustc_hir as hir;
+use rustc_hir::def::Namespace;
+use rustc_index::vec::{Idx, IndexVec};
+
+use std::fmt;
+use std::mem::ManuallyDrop;
+use std::ops::ControlFlow;
+use std::rc::Rc;
+use std::sync::Arc;
+
+impl fmt::Debug for ty::TraitDef {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ty::tls::with(|tcx| {
+ with_no_trimmed_paths!({
+ f.write_str(
+ &FmtPrinter::new(tcx, Namespace::TypeNS)
+ .print_def_path(self.def_id, &[])?
+ .into_buffer(),
+ )
+ })
+ })
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::AdtDef<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ty::tls::with(|tcx| {
+ with_no_trimmed_paths!({
+ f.write_str(
+ &FmtPrinter::new(tcx, Namespace::TypeNS)
+ .print_def_path(self.did(), &[])?
+ .into_buffer(),
+ )
+ })
+ })
+ }
+}
+
+impl fmt::Debug for ty::UpvarId {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let name = ty::tls::with(|tcx| tcx.hir().name(self.var_path.hir_id));
+ write!(f, "UpvarId({:?};`{}`;{:?})", self.var_path.hir_id, name, self.closure_expr_id)
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::ExistentialTraitRef<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ with_no_trimmed_paths!(fmt::Display::fmt(self, f))
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::adjustment::Adjustment<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?} -> {}", self.kind, self.target)
+ }
+}
+
+impl fmt::Debug for ty::BoundRegionKind {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ ty::BrAnon(n) => write!(f, "BrAnon({:?})", n),
+ ty::BrNamed(did, name) => {
+ if did.is_crate_root() {
+ write!(f, "BrNamed({})", name)
+ } else {
+ write!(f, "BrNamed({:?}, {})", did, name)
+ }
+ }
+ ty::BrEnv => write!(f, "BrEnv"),
+ }
+ }
+}
+
+impl fmt::Debug for ty::FreeRegion {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "ReFree({:?}, {:?})", self.scope, self.bound_region)
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::FnSig<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "({:?}; c_variadic: {})->{:?}", self.inputs(), self.c_variadic, self.output())
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::ConstVid<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "_#{}c", self.index)
+ }
+}
+
+impl fmt::Debug for ty::RegionVid {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "'_#{}r", self.index())
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::TraitRef<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ with_no_trimmed_paths!(fmt::Display::fmt(self, f))
+ }
+}
+
+impl<'tcx> fmt::Debug for Ty<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ with_no_trimmed_paths!(fmt::Display::fmt(self, f))
+ }
+}
+
+impl fmt::Debug for ty::ParamTy {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}/#{}", self.name, self.index)
+ }
+}
+
+impl fmt::Debug for ty::ParamConst {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}/#{}", self.name, self.index)
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::TraitPredicate<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if let ty::BoundConstness::ConstIfConst = self.constness {
+ write!(f, "~const ")?;
+ }
+ write!(f, "TraitPredicate({:?}, polarity:{:?})", self.trait_ref, self.polarity)
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::ProjectionPredicate<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "ProjectionPredicate({:?}, {:?})", self.projection_ty, self.term)
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::Predicate<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self.kind())
+ }
+}
+
+impl<'tcx> fmt::Debug for ty::PredicateKind<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ ty::PredicateKind::Trait(ref a) => a.fmt(f),
+ ty::PredicateKind::Subtype(ref pair) => pair.fmt(f),
+ ty::PredicateKind::Coerce(ref pair) => pair.fmt(f),
+ ty::PredicateKind::RegionOutlives(ref pair) => pair.fmt(f),
+ ty::PredicateKind::TypeOutlives(ref pair) => pair.fmt(f),
+ ty::PredicateKind::Projection(ref pair) => pair.fmt(f),
+ ty::PredicateKind::WellFormed(data) => write!(f, "WellFormed({:?})", data),
+ ty::PredicateKind::ObjectSafe(trait_def_id) => {
+ write!(f, "ObjectSafe({:?})", trait_def_id)
+ }
+ ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => {
+ write!(f, "ClosureKind({:?}, {:?}, {:?})", closure_def_id, closure_substs, kind)
+ }
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ write!(f, "ConstEvaluatable({:?}, {:?})", uv.def, uv.substs)
+ }
+ ty::PredicateKind::ConstEquate(c1, c2) => write!(f, "ConstEquate({:?}, {:?})", c1, c2),
+ ty::PredicateKind::TypeWellFormedFromEnv(ty) => {
+ write!(f, "TypeWellFormedFromEnv({:?})", ty)
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Atomic structs
+//
+// For things that don't carry any arena-allocated data (and are
+// copy...), just add them to this list.
+
+TrivialTypeTraversalAndLiftImpls! {
+ (),
+ bool,
+ usize,
+ ::rustc_target::abi::VariantIdx,
+ u32,
+ u64,
+ String,
+ crate::middle::region::Scope,
+ crate::ty::FloatTy,
+ ::rustc_ast::InlineAsmOptions,
+ ::rustc_ast::InlineAsmTemplatePiece,
+ ::rustc_ast::NodeId,
+ ::rustc_span::symbol::Symbol,
+ ::rustc_hir::def::Res,
+ ::rustc_hir::def_id::DefId,
+ ::rustc_hir::def_id::LocalDefId,
+ ::rustc_hir::HirId,
+ ::rustc_hir::MatchSource,
+ ::rustc_hir::Mutability,
+ ::rustc_hir::Unsafety,
+ ::rustc_target::asm::InlineAsmRegOrRegClass,
+ ::rustc_target::spec::abi::Abi,
+ crate::mir::coverage::ExpressionOperandId,
+ crate::mir::coverage::CounterValueReference,
+ crate::mir::coverage::InjectedExpressionId,
+ crate::mir::coverage::InjectedExpressionIndex,
+ crate::mir::coverage::MappedExpressionIndex,
+ crate::mir::Local,
+ crate::mir::Promoted,
+ crate::traits::Reveal,
+ crate::ty::adjustment::AutoBorrowMutability,
+ crate::ty::AdtKind,
+ crate::ty::BoundConstness,
+ // Including `BoundRegionKind` is a *bit* dubious, but direct
+ // references to bound region appear in `ty::Error`, and aren't
+ // really meant to be folded. In general, we can only fold a fully
+ // general `Region`.
+ crate::ty::BoundRegionKind,
+ crate::ty::AssocItem,
+ crate::ty::AssocKind,
+ crate::ty::Placeholder<crate::ty::BoundRegionKind>,
+ crate::ty::ClosureKind,
+ crate::ty::FreeRegion,
+ crate::ty::InferTy,
+ crate::ty::IntVarValue,
+ crate::ty::ParamConst,
+ crate::ty::ParamTy,
+ crate::ty::adjustment::PointerCast,
+ crate::ty::RegionVid,
+ crate::ty::UniverseIndex,
+ crate::ty::Variance,
+ ::rustc_span::Span,
+ ::rustc_errors::ErrorGuaranteed,
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Lift implementations
+
+// FIXME(eddyb) replace all the uses of `Option::map` with `?`.
+impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) {
+ type Lifted = (A::Lifted, B::Lifted);
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ Some((tcx.lift(self.0)?, tcx.lift(self.1)?))
+ }
+}
+
+impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>, C: Lift<'tcx>> Lift<'tcx> for (A, B, C) {
+ type Lifted = (A::Lifted, B::Lifted, C::Lifted);
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ Some((tcx.lift(self.0)?, tcx.lift(self.1)?, tcx.lift(self.2)?))
+ }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Option<T> {
+ type Lifted = Option<T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ Some(x) => tcx.lift(x).map(Some),
+ None => Some(None),
+ }
+ }
+}
+
+impl<'tcx, T: Lift<'tcx>, E: Lift<'tcx>> Lift<'tcx> for Result<T, E> {
+ type Lifted = Result<T::Lifted, E::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ Ok(x) => tcx.lift(x).map(Ok),
+ Err(e) => tcx.lift(e).map(Err),
+ }
+ }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Box<T> {
+ type Lifted = Box<T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(*self).map(Box::new)
+ }
+}
+
+impl<'tcx, T: Lift<'tcx> + Clone> Lift<'tcx> for Rc<T> {
+ type Lifted = Rc<T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.as_ref().clone()).map(Rc::new)
+ }
+}
+
+impl<'tcx, T: Lift<'tcx> + Clone> Lift<'tcx> for Arc<T> {
+ type Lifted = Arc<T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.as_ref().clone()).map(Arc::new)
+ }
+}
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Vec<T> {
+ type Lifted = Vec<T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ self.into_iter().map(|v| tcx.lift(v)).collect()
+ }
+}
+
+impl<'tcx, I: Idx, T: Lift<'tcx>> Lift<'tcx> for IndexVec<I, T> {
+ type Lifted = IndexVec<I, T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ self.into_iter().map(|e| tcx.lift(e)).collect()
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::TraitRef<'a> {
+ type Lifted = ty::TraitRef<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.substs).map(|substs| ty::TraitRef { def_id: self.def_id, substs })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialTraitRef<'a> {
+ type Lifted = ty::ExistentialTraitRef<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.substs).map(|substs| ty::ExistentialTraitRef { def_id: self.def_id, substs })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialPredicate<'a> {
+ type Lifted = ty::ExistentialPredicate<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ ty::ExistentialPredicate::Trait(x) => tcx.lift(x).map(ty::ExistentialPredicate::Trait),
+ ty::ExistentialPredicate::Projection(x) => {
+ tcx.lift(x).map(ty::ExistentialPredicate::Projection)
+ }
+ ty::ExistentialPredicate::AutoTrait(def_id) => {
+ Some(ty::ExistentialPredicate::AutoTrait(def_id))
+ }
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for Term<'a> {
+ type Lifted = ty::Term<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ Some(match self {
+ Term::Ty(ty) => Term::Ty(tcx.lift(ty)?),
+ Term::Const(c) => Term::Const(tcx.lift(c)?),
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> {
+ type Lifted = ty::TraitPredicate<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::TraitPredicate<'tcx>> {
+ tcx.lift(self.trait_ref).map(|trait_ref| ty::TraitPredicate {
+ trait_ref,
+ constness: self.constness,
+ polarity: self.polarity,
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::SubtypePredicate<'a> {
+ type Lifted = ty::SubtypePredicate<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::SubtypePredicate<'tcx>> {
+ tcx.lift((self.a, self.b)).map(|(a, b)| ty::SubtypePredicate {
+ a_is_expected: self.a_is_expected,
+ a,
+ b,
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::CoercePredicate<'a> {
+ type Lifted = ty::CoercePredicate<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::CoercePredicate<'tcx>> {
+ tcx.lift((self.a, self.b)).map(|(a, b)| ty::CoercePredicate { a, b })
+ }
+}
+
+impl<'tcx, A: Copy + Lift<'tcx>, B: Copy + Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate<A, B> {
+ type Lifted = ty::OutlivesPredicate<A::Lifted, B::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift((self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b))
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionTy<'a> {
+ type Lifted = ty::ProjectionTy<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionTy<'tcx>> {
+ tcx.lift(self.substs)
+ .map(|substs| ty::ProjectionTy { item_def_id: self.item_def_id, substs })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> {
+ type Lifted = ty::ProjectionPredicate<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionPredicate<'tcx>> {
+ tcx.lift((self.projection_ty, self.term))
+ .map(|(projection_ty, term)| ty::ProjectionPredicate { projection_ty, term })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialProjection<'a> {
+ type Lifted = ty::ExistentialProjection<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.substs).map(|substs| ty::ExistentialProjection {
+ substs,
+ term: tcx.lift(self.term).expect("type must lift when substs do"),
+ item_def_id: self.item_def_id,
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::PredicateKind<'a> {
+ type Lifted = ty::PredicateKind<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ ty::PredicateKind::Trait(data) => tcx.lift(data).map(ty::PredicateKind::Trait),
+ ty::PredicateKind::Subtype(data) => tcx.lift(data).map(ty::PredicateKind::Subtype),
+ ty::PredicateKind::Coerce(data) => tcx.lift(data).map(ty::PredicateKind::Coerce),
+ ty::PredicateKind::RegionOutlives(data) => {
+ tcx.lift(data).map(ty::PredicateKind::RegionOutlives)
+ }
+ ty::PredicateKind::TypeOutlives(data) => {
+ tcx.lift(data).map(ty::PredicateKind::TypeOutlives)
+ }
+ ty::PredicateKind::Projection(data) => {
+ tcx.lift(data).map(ty::PredicateKind::Projection)
+ }
+ ty::PredicateKind::WellFormed(ty) => tcx.lift(ty).map(ty::PredicateKind::WellFormed),
+ ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => {
+ tcx.lift(closure_substs).map(|closure_substs| {
+ ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind)
+ })
+ }
+ ty::PredicateKind::ObjectSafe(trait_def_id) => {
+ Some(ty::PredicateKind::ObjectSafe(trait_def_id))
+ }
+ ty::PredicateKind::ConstEvaluatable(uv) => {
+ tcx.lift(uv).map(|uv| ty::PredicateKind::ConstEvaluatable(uv))
+ }
+ ty::PredicateKind::ConstEquate(c1, c2) => {
+ tcx.lift((c1, c2)).map(|(c1, c2)| ty::PredicateKind::ConstEquate(c1, c2))
+ }
+ ty::PredicateKind::TypeWellFormedFromEnv(ty) => {
+ tcx.lift(ty).map(ty::PredicateKind::TypeWellFormedFromEnv)
+ }
+ }
+ }
+}
+
+impl<'a, 'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder<'a, T>
+where
+ <T as Lift<'tcx>>::Lifted: TypeVisitable<'tcx>,
+{
+ type Lifted = ty::Binder<'tcx, T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ let bound_vars = tcx.lift(self.bound_vars());
+ tcx.lift(self.skip_binder())
+ .zip(bound_vars)
+ .map(|(value, vars)| ty::Binder::bind_with_vars(value, vars))
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> {
+ type Lifted = ty::ParamEnv<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.caller_bounds())
+ .map(|caller_bounds| ty::ParamEnv::new(caller_bounds, self.reveal(), self.constness()))
+ }
+}
+
+impl<'a, 'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::ParamEnvAnd<'a, T> {
+ type Lifted = ty::ParamEnvAnd<'tcx, T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.param_env).and_then(|param_env| {
+ tcx.lift(self.value).map(|value| ty::ParamEnvAnd { param_env, value })
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::ClosureSubsts<'a> {
+ type Lifted = ty::ClosureSubsts<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.substs).map(|substs| ty::ClosureSubsts { substs })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::GeneratorSubsts<'a> {
+ type Lifted = ty::GeneratorSubsts<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.substs).map(|substs| ty::GeneratorSubsts { substs })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjustment<'a> {
+ type Lifted = ty::adjustment::Adjustment<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ let ty::adjustment::Adjustment { kind, target } = self;
+ tcx.lift(kind).and_then(|kind| {
+ tcx.lift(target).map(|target| ty::adjustment::Adjustment { kind, target })
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjust<'a> {
+ type Lifted = ty::adjustment::Adjust<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ ty::adjustment::Adjust::NeverToAny => Some(ty::adjustment::Adjust::NeverToAny),
+ ty::adjustment::Adjust::Pointer(ptr) => Some(ty::adjustment::Adjust::Pointer(ptr)),
+ ty::adjustment::Adjust::Deref(overloaded) => {
+ tcx.lift(overloaded).map(ty::adjustment::Adjust::Deref)
+ }
+ ty::adjustment::Adjust::Borrow(autoref) => {
+ tcx.lift(autoref).map(ty::adjustment::Adjust::Borrow)
+ }
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::OverloadedDeref<'a> {
+ type Lifted = ty::adjustment::OverloadedDeref<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.region).map(|region| ty::adjustment::OverloadedDeref {
+ region,
+ mutbl: self.mutbl,
+ span: self.span,
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::AutoBorrow<'a> {
+ type Lifted = ty::adjustment::AutoBorrow<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ ty::adjustment::AutoBorrow::Ref(r, m) => {
+ tcx.lift(r).map(|r| ty::adjustment::AutoBorrow::Ref(r, m))
+ }
+ ty::adjustment::AutoBorrow::RawPtr(m) => Some(ty::adjustment::AutoBorrow::RawPtr(m)),
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::GenSig<'a> {
+ type Lifted = ty::GenSig<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift((self.resume_ty, self.yield_ty, self.return_ty))
+ .map(|(resume_ty, yield_ty, return_ty)| ty::GenSig { resume_ty, yield_ty, return_ty })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::FnSig<'a> {
+ type Lifted = ty::FnSig<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ tcx.lift(self.inputs_and_output).map(|x| ty::FnSig {
+ inputs_and_output: x,
+ c_variadic: self.c_variadic,
+ unsafety: self.unsafety,
+ abi: self.abi,
+ })
+ }
+}
+
+impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::error::ExpectedFound<T> {
+ type Lifted = ty::error::ExpectedFound<T::Lifted>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ let ty::error::ExpectedFound { expected, found } = self;
+ tcx.lift(expected).and_then(|expected| {
+ tcx.lift(found).map(|found| ty::error::ExpectedFound { expected, found })
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> {
+ type Lifted = ty::error::TypeError<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ use crate::ty::error::TypeError::*;
+
+ Some(match self {
+ Mismatch => Mismatch,
+ ConstnessMismatch(x) => ConstnessMismatch(x),
+ PolarityMismatch(x) => PolarityMismatch(x),
+ UnsafetyMismatch(x) => UnsafetyMismatch(x),
+ AbiMismatch(x) => AbiMismatch(x),
+ Mutability => Mutability,
+ ArgumentMutability(i) => ArgumentMutability(i),
+ TupleSize(x) => TupleSize(x),
+ FixedArraySize(x) => FixedArraySize(x),
+ ArgCount => ArgCount,
+ FieldMisMatch(x, y) => FieldMisMatch(x, y),
+ RegionsDoesNotOutlive(a, b) => {
+ return tcx.lift((a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b));
+ }
+ RegionsInsufficientlyPolymorphic(a, b) => {
+ return tcx.lift(b).map(|b| RegionsInsufficientlyPolymorphic(a, b));
+ }
+ RegionsOverlyPolymorphic(a, b) => {
+ return tcx.lift(b).map(|b| RegionsOverlyPolymorphic(a, b));
+ }
+ RegionsPlaceholderMismatch => RegionsPlaceholderMismatch,
+ IntMismatch(x) => IntMismatch(x),
+ FloatMismatch(x) => FloatMismatch(x),
+ Traits(x) => Traits(x),
+ VariadicMismatch(x) => VariadicMismatch(x),
+ CyclicTy(t) => return tcx.lift(t).map(|t| CyclicTy(t)),
+ CyclicConst(ct) => return tcx.lift(ct).map(|ct| CyclicConst(ct)),
+ ProjectionMismatched(x) => ProjectionMismatched(x),
+ ArgumentSorts(x, i) => return tcx.lift(x).map(|x| ArgumentSorts(x, i)),
+ Sorts(x) => return tcx.lift(x).map(Sorts),
+ ExistentialMismatch(x) => return tcx.lift(x).map(ExistentialMismatch),
+ ConstMismatch(x) => return tcx.lift(x).map(ConstMismatch),
+ IntrinsicCast => IntrinsicCast,
+ TargetFeatureCast(x) => TargetFeatureCast(x),
+ ObjectUnsafeCoercion(x) => return tcx.lift(x).map(ObjectUnsafeCoercion),
+ })
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for ty::InstanceDef<'a> {
+ type Lifted = ty::InstanceDef<'tcx>;
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self {
+ ty::InstanceDef::Item(def_id) => Some(ty::InstanceDef::Item(def_id)),
+ ty::InstanceDef::VTableShim(def_id) => Some(ty::InstanceDef::VTableShim(def_id)),
+ ty::InstanceDef::ReifyShim(def_id) => Some(ty::InstanceDef::ReifyShim(def_id)),
+ ty::InstanceDef::Intrinsic(def_id) => Some(ty::InstanceDef::Intrinsic(def_id)),
+ ty::InstanceDef::FnPtrShim(def_id, ty) => {
+ Some(ty::InstanceDef::FnPtrShim(def_id, tcx.lift(ty)?))
+ }
+ ty::InstanceDef::Virtual(def_id, n) => Some(ty::InstanceDef::Virtual(def_id, n)),
+ ty::InstanceDef::ClosureOnceShim { call_once, track_caller } => {
+ Some(ty::InstanceDef::ClosureOnceShim { call_once, track_caller })
+ }
+ ty::InstanceDef::DropGlue(def_id, ty) => {
+ Some(ty::InstanceDef::DropGlue(def_id, tcx.lift(ty)?))
+ }
+ ty::InstanceDef::CloneShim(def_id, ty) => {
+ Some(ty::InstanceDef::CloneShim(def_id, tcx.lift(ty)?))
+ }
+ }
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// TypeFoldable implementations.
+
+/// AdtDefs are basically the same as a DefId.
+impl<'tcx> TypeFoldable<'tcx> for ty::AdtDef<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::AdtDef<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>, U: TypeFoldable<'tcx>> TypeFoldable<'tcx> for (T, U) {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<(T, U), F::Error> {
+ Ok((self.0.try_fold_with(folder)?, self.1.try_fold_with(folder)?))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>, U: TypeVisitable<'tcx>> TypeVisitable<'tcx> for (T, U) {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.0.visit_with(visitor)?;
+ self.1.visit_with(visitor)
+ }
+}
+
+impl<'tcx, A: TypeFoldable<'tcx>, B: TypeFoldable<'tcx>, C: TypeFoldable<'tcx>> TypeFoldable<'tcx>
+ for (A, B, C)
+{
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<(A, B, C), F::Error> {
+ Ok((
+ self.0.try_fold_with(folder)?,
+ self.1.try_fold_with(folder)?,
+ self.2.try_fold_with(folder)?,
+ ))
+ }
+}
+
+impl<'tcx, A: TypeVisitable<'tcx>, B: TypeVisitable<'tcx>, C: TypeVisitable<'tcx>>
+ TypeVisitable<'tcx> for (A, B, C)
+{
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.0.visit_with(visitor)?;
+ self.1.visit_with(visitor)?;
+ self.2.visit_with(visitor)
+ }
+}
+
+EnumTypeTraversalImpl! {
+ impl<'tcx, T> TypeFoldable<'tcx> for Option<T> {
+ (Some)(a),
+ (None),
+ } where T: TypeFoldable<'tcx>
+}
+EnumTypeTraversalImpl! {
+ impl<'tcx, T> TypeVisitable<'tcx> for Option<T> {
+ (Some)(a),
+ (None),
+ } where T: TypeVisitable<'tcx>
+}
+
+EnumTypeTraversalImpl! {
+ impl<'tcx, T, E> TypeFoldable<'tcx> for Result<T, E> {
+ (Ok)(a),
+ (Err)(a),
+ } where T: TypeFoldable<'tcx>, E: TypeFoldable<'tcx>,
+}
+EnumTypeTraversalImpl! {
+ impl<'tcx, T, E> TypeVisitable<'tcx> for Result<T, E> {
+ (Ok)(a),
+ (Err)(a),
+ } where T: TypeVisitable<'tcx>, E: TypeVisitable<'tcx>,
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Rc<T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(
+ mut self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ // We merely want to replace the contained `T`, if at all possible,
+ // so that we don't needlessly allocate a new `Rc` or indeed clone
+ // the contained type.
+ unsafe {
+ // First step is to ensure that we have a unique reference to
+ // the contained type, which `Rc::make_mut` will accomplish (by
+ // allocating a new `Rc` and cloning the `T` only if required).
+ // This is done *before* casting to `Rc<ManuallyDrop<T>>` so that
+ // panicking during `make_mut` does not leak the `T`.
+ Rc::make_mut(&mut self);
+
+ // Casting to `Rc<ManuallyDrop<T>>` is safe because `ManuallyDrop`
+ // is `repr(transparent)`.
+ let ptr = Rc::into_raw(self).cast::<ManuallyDrop<T>>();
+ let mut unique = Rc::from_raw(ptr);
+
+ // Call to `Rc::make_mut` above guarantees that `unique` is the
+ // sole reference to the contained value, so we can avoid doing
+ // a checked `get_mut` here.
+ let slot = Rc::get_mut_unchecked(&mut unique);
+
+ // Semantically move the contained type out from `unique`, fold
+ // it, then move the folded value back into `unique`. Should
+ // folding fail, `ManuallyDrop` ensures that the "moved-out"
+ // value is not re-dropped.
+ let owned = ManuallyDrop::take(slot);
+ let folded = owned.try_fold_with(folder)?;
+ *slot = ManuallyDrop::new(folded);
+
+ // Cast back to `Rc<T>`.
+ Ok(Rc::from_raw(Rc::into_raw(unique).cast()))
+ }
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Rc<T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ (**self).visit_with(visitor)
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Arc<T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(
+ mut self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ // We merely want to replace the contained `T`, if at all possible,
+ // so that we don't needlessly allocate a new `Arc` or indeed clone
+ // the contained type.
+ unsafe {
+ // First step is to ensure that we have a unique reference to
+ // the contained type, which `Arc::make_mut` will accomplish (by
+ // allocating a new `Arc` and cloning the `T` only if required).
+ // This is done *before* casting to `Arc<ManuallyDrop<T>>` so that
+ // panicking during `make_mut` does not leak the `T`.
+ Arc::make_mut(&mut self);
+
+ // Casting to `Arc<ManuallyDrop<T>>` is safe because `ManuallyDrop`
+ // is `repr(transparent)`.
+ let ptr = Arc::into_raw(self).cast::<ManuallyDrop<T>>();
+ let mut unique = Arc::from_raw(ptr);
+
+ // Call to `Arc::make_mut` above guarantees that `unique` is the
+ // sole reference to the contained value, so we can avoid doing
+ // a checked `get_mut` here.
+ let slot = Arc::get_mut_unchecked(&mut unique);
+
+ // Semantically move the contained type out from `unique`, fold
+ // it, then move the folded value back into `unique`. Should
+ // folding fail, `ManuallyDrop` ensures that the "moved-out"
+ // value is not re-dropped.
+ let owned = ManuallyDrop::take(slot);
+ let folded = owned.try_fold_with(folder)?;
+ *slot = ManuallyDrop::new(folded);
+
+ // Cast back to `Arc<T>`.
+ Ok(Arc::from_raw(Arc::into_raw(unique).cast()))
+ }
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Arc<T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ (**self).visit_with(visitor)
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ self.try_map_id(|value| value.try_fold_with(folder))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Box<T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ (**self).visit_with(visitor)
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Vec<T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ self.try_map_id(|t| t.try_fold_with(folder))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Vec<T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<[T]> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ self.try_map_id(|t| t.try_fold_with(folder))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Box<[T]> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::EarlyBinder<T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ self.try_map_bound(|ty| ty.try_fold_with(folder))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for ty::EarlyBinder<T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.as_ref().0.visit_with(visitor)
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder<'tcx, T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_binder(self)
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for ty::Binder<'tcx, T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_binder(self)
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> TypeSuperFoldable<'tcx> for ty::Binder<'tcx, T> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ self.try_map_bound(|ty| ty.try_fold_with(folder))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>> TypeSuperVisitable<'tcx> for ty::Binder<'tcx, T> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.as_ref().skip_binder().visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ ty::util::fold_list(self, folder, |tcx, v| tcx.intern_poly_existential_predicates(v))
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx>
+ for &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>>
+{
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|p| p.visit_with(visitor))
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ProjectionKind> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ ty::util::fold_list(self, folder, |tcx, v| tcx.intern_projs(v))
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<ProjectionKind> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ use crate::ty::InstanceDef::*;
+ Ok(Self {
+ substs: self.substs.try_fold_with(folder)?,
+ def: match self.def {
+ Item(def) => Item(def.try_fold_with(folder)?),
+ VTableShim(did) => VTableShim(did.try_fold_with(folder)?),
+ ReifyShim(did) => ReifyShim(did.try_fold_with(folder)?),
+ Intrinsic(did) => Intrinsic(did.try_fold_with(folder)?),
+ FnPtrShim(did, ty) => {
+ FnPtrShim(did.try_fold_with(folder)?, ty.try_fold_with(folder)?)
+ }
+ Virtual(did, i) => Virtual(did.try_fold_with(folder)?, i),
+ ClosureOnceShim { call_once, track_caller } => {
+ ClosureOnceShim { call_once: call_once.try_fold_with(folder)?, track_caller }
+ }
+ DropGlue(did, ty) => {
+ DropGlue(did.try_fold_with(folder)?, ty.try_fold_with(folder)?)
+ }
+ CloneShim(did, ty) => {
+ CloneShim(did.try_fold_with(folder)?, ty.try_fold_with(folder)?)
+ }
+ },
+ })
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::instance::Instance<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ use crate::ty::InstanceDef::*;
+ self.substs.visit_with(visitor)?;
+ match self.def {
+ Item(def) => def.visit_with(visitor),
+ VTableShim(did) | ReifyShim(did) | Intrinsic(did) | Virtual(did, _) => {
+ did.visit_with(visitor)
+ }
+ FnPtrShim(did, ty) | CloneShim(did, ty) => {
+ did.visit_with(visitor)?;
+ ty.visit_with(visitor)
+ }
+ DropGlue(did, ty) => {
+ did.visit_with(visitor)?;
+ ty.visit_with(visitor)
+ }
+ ClosureOnceShim { call_once, track_caller: _ } => call_once.visit_with(visitor),
+ }
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for interpret::GlobalId<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(Self { instance: self.instance.try_fold_with(folder)?, promoted: self.promoted })
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for interpret::GlobalId<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.instance.visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_ty(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for Ty<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_ty(*self)
+ }
+}
+
+impl<'tcx> TypeSuperFoldable<'tcx> for Ty<'tcx> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ let kind = match *self.kind() {
+ ty::RawPtr(tm) => ty::RawPtr(tm.try_fold_with(folder)?),
+ ty::Array(typ, sz) => ty::Array(typ.try_fold_with(folder)?, sz.try_fold_with(folder)?),
+ ty::Slice(typ) => ty::Slice(typ.try_fold_with(folder)?),
+ ty::Adt(tid, substs) => ty::Adt(tid, substs.try_fold_with(folder)?),
+ ty::Dynamic(trait_ty, region) => {
+ ty::Dynamic(trait_ty.try_fold_with(folder)?, region.try_fold_with(folder)?)
+ }
+ ty::Tuple(ts) => ty::Tuple(ts.try_fold_with(folder)?),
+ ty::FnDef(def_id, substs) => ty::FnDef(def_id, substs.try_fold_with(folder)?),
+ ty::FnPtr(f) => ty::FnPtr(f.try_fold_with(folder)?),
+ ty::Ref(r, ty, mutbl) => {
+ ty::Ref(r.try_fold_with(folder)?, ty.try_fold_with(folder)?, mutbl)
+ }
+ ty::Generator(did, substs, movability) => {
+ ty::Generator(did, substs.try_fold_with(folder)?, movability)
+ }
+ ty::GeneratorWitness(types) => ty::GeneratorWitness(types.try_fold_with(folder)?),
+ ty::Closure(did, substs) => ty::Closure(did, substs.try_fold_with(folder)?),
+ ty::Projection(data) => ty::Projection(data.try_fold_with(folder)?),
+ ty::Opaque(did, substs) => ty::Opaque(did, substs.try_fold_with(folder)?),
+
+ ty::Bool
+ | ty::Char
+ | ty::Str
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Error(_)
+ | ty::Infer(_)
+ | ty::Param(..)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Never
+ | ty::Foreign(..) => return Ok(self),
+ };
+
+ Ok(if *self.kind() == kind { self } else { folder.tcx().mk_ty(kind) })
+ }
+}
+
+impl<'tcx> TypeSuperVisitable<'tcx> for Ty<'tcx> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ match self.kind() {
+ ty::RawPtr(ref tm) => tm.visit_with(visitor),
+ ty::Array(typ, sz) => {
+ typ.visit_with(visitor)?;
+ sz.visit_with(visitor)
+ }
+ ty::Slice(typ) => typ.visit_with(visitor),
+ ty::Adt(_, substs) => substs.visit_with(visitor),
+ ty::Dynamic(ref trait_ty, ref reg) => {
+ trait_ty.visit_with(visitor)?;
+ reg.visit_with(visitor)
+ }
+ ty::Tuple(ts) => ts.visit_with(visitor),
+ ty::FnDef(_, substs) => substs.visit_with(visitor),
+ ty::FnPtr(ref f) => f.visit_with(visitor),
+ ty::Ref(r, ty, _) => {
+ r.visit_with(visitor)?;
+ ty.visit_with(visitor)
+ }
+ ty::Generator(_did, ref substs, _) => substs.visit_with(visitor),
+ ty::GeneratorWitness(ref types) => types.visit_with(visitor),
+ ty::Closure(_did, ref substs) => substs.visit_with(visitor),
+ ty::Projection(ref data) => data.visit_with(visitor),
+ ty::Opaque(_, ref substs) => substs.visit_with(visitor),
+
+ ty::Bool
+ | ty::Char
+ | ty::Str
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Error(_)
+ | ty::Infer(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Param(..)
+ | ty::Never
+ | ty::Foreign(..) => ControlFlow::CONTINUE,
+ }
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Region<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_region(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::Region<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_region(*self)
+ }
+}
+
+impl<'tcx> TypeSuperFoldable<'tcx> for ty::Region<'tcx> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ _folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeSuperVisitable<'tcx> for ty::Region<'tcx> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Predicate<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_predicate(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::Predicate<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_predicate(*self)
+ }
+
+ #[inline]
+ fn has_vars_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool {
+ self.outer_exclusive_binder() > binder
+ }
+
+ #[inline]
+ fn has_type_flags(&self, flags: ty::TypeFlags) -> bool {
+ self.flags().intersects(flags)
+ }
+}
+
+impl<'tcx> TypeSuperFoldable<'tcx> for ty::Predicate<'tcx> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ let new = self.kind().try_fold_with(folder)?;
+ Ok(folder.tcx().reuse_or_mk_predicate(self, new))
+ }
+}
+
+impl<'tcx> TypeSuperVisitable<'tcx> for ty::Predicate<'tcx> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.kind().visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ ty::util::fold_list(self, folder, |tcx, v| tcx.intern_predicates(v))
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|p| p.visit_with(visitor))
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>, I: Idx> TypeFoldable<'tcx> for IndexVec<I, T> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ self.try_map_id(|x| x.try_fold_with(folder))
+ }
+}
+
+impl<'tcx, T: TypeVisitable<'tcx>, I: Idx> TypeVisitable<'tcx> for IndexVec<I, T> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Const<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_const(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::Const<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_const(*self)
+ }
+}
+
+impl<'tcx> TypeSuperFoldable<'tcx> for ty::Const<'tcx> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ let ty = self.ty().try_fold_with(folder)?;
+ let kind = self.kind().try_fold_with(folder)?;
+ if ty != self.ty() || kind != self.kind() {
+ Ok(folder.tcx().mk_const(ty::ConstS { ty, kind }))
+ } else {
+ Ok(self)
+ }
+ }
+}
+
+impl<'tcx> TypeSuperVisitable<'tcx> for ty::Const<'tcx> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.ty().visit_with(visitor)?;
+ self.kind().visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::ConstKind<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(match self {
+ ty::ConstKind::Infer(ic) => ty::ConstKind::Infer(ic.try_fold_with(folder)?),
+ ty::ConstKind::Param(p) => ty::ConstKind::Param(p.try_fold_with(folder)?),
+ ty::ConstKind::Unevaluated(uv) => ty::ConstKind::Unevaluated(uv.try_fold_with(folder)?),
+ ty::ConstKind::Value(_)
+ | ty::ConstKind::Bound(..)
+ | ty::ConstKind::Placeholder(..)
+ | ty::ConstKind::Error(_) => self,
+ })
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::ConstKind<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ match *self {
+ ty::ConstKind::Infer(ic) => ic.visit_with(visitor),
+ ty::ConstKind::Param(p) => p.visit_with(visitor),
+ ty::ConstKind::Unevaluated(uv) => uv.visit_with(visitor),
+ ty::ConstKind::Value(_)
+ | ty::ConstKind::Bound(..)
+ | ty::ConstKind::Placeholder(_)
+ | ty::ConstKind::Error(_) => ControlFlow::CONTINUE,
+ }
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for InferConst<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for InferConst<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Unevaluated<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ folder.try_fold_unevaluated(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::Unevaluated<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ visitor.visit_unevaluated(*self)
+ }
+}
+
+impl<'tcx> TypeSuperFoldable<'tcx> for ty::Unevaluated<'tcx> {
+ fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
+ self,
+ folder: &mut F,
+ ) -> Result<Self, F::Error> {
+ Ok(ty::Unevaluated {
+ def: self.def,
+ substs: self.substs.try_fold_with(folder)?,
+ promoted: self.promoted,
+ })
+ }
+}
+
+impl<'tcx> TypeSuperVisitable<'tcx> for ty::Unevaluated<'tcx> {
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.substs.visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for ty::Unevaluated<'tcx, ()> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ Ok(self.expand().try_fold_with(folder)?.shrink())
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for ty::Unevaluated<'tcx, ()> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.expand().visit_with(visitor)
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for hir::Constness {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> {
+ Ok(self)
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for hir::Constness {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> {
+ ControlFlow::CONTINUE
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs
new file mode 100644
index 000000000..52c3a3886
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/sty.rs
@@ -0,0 +1,2295 @@
+//! This module contains `TyKind` and its major components.
+
+#![allow(rustc::usage_of_ty_tykind)]
+
+use crate::infer::canonical::Canonical;
+use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef};
+use crate::ty::visit::ValidateBoundVars;
+use crate::ty::InferTy::*;
+use crate::ty::{
+ self, AdtDef, DefIdTree, Discr, Term, Ty, TyCtxt, TypeFlags, TypeSuperVisitable, TypeVisitable,
+ TypeVisitor,
+};
+use crate::ty::{List, ParamEnv};
+use polonius_engine::Atom;
+use rustc_data_structures::captures::Captures;
+use rustc_data_structures::intern::Interned;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use rustc_index::vec::Idx;
+use rustc_macros::HashStable;
+use rustc_span::symbol::{kw, Symbol};
+use rustc_target::abi::VariantIdx;
+use rustc_target::spec::abi;
+use std::borrow::Cow;
+use std::cmp::Ordering;
+use std::fmt;
+use std::marker::PhantomData;
+use std::ops::{ControlFlow, Deref, Range};
+use ty::util::IntTypeExt;
+
+use rustc_type_ir::sty::TyKind::*;
+use rustc_type_ir::RegionKind as IrRegionKind;
+use rustc_type_ir::TyKind as IrTyKind;
+
+// Re-export the `TyKind` from `rustc_type_ir` here for convenience
+#[rustc_diagnostic_item = "TyKind"]
+pub type TyKind<'tcx> = IrTyKind<TyCtxt<'tcx>>;
+pub type RegionKind<'tcx> = IrRegionKind<TyCtxt<'tcx>>;
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct TypeAndMut<'tcx> {
+ pub ty: Ty<'tcx>,
+ pub mutbl: hir::Mutability,
+}
+
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, TyEncodable, TyDecodable, Copy)]
+#[derive(HashStable)]
+/// A "free" region `fr` can be interpreted as "some region
+/// at least as big as the scope `fr.scope`".
+pub struct FreeRegion {
+ pub scope: DefId,
+ pub bound_region: BoundRegionKind,
+}
+
+#[derive(Clone, PartialEq, PartialOrd, Eq, Ord, Hash, TyEncodable, TyDecodable, Copy)]
+#[derive(HashStable)]
+pub enum BoundRegionKind {
+ /// An anonymous region parameter for a given fn (&T)
+ BrAnon(u32),
+
+ /// Named region parameters for functions (a in &'a T)
+ ///
+ /// The `DefId` is needed to distinguish free regions in
+ /// the event of shadowing.
+ BrNamed(DefId, Symbol),
+
+ /// Anonymous region for the implicit env pointer parameter
+ /// to a closure
+ BrEnv,
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable, Debug, PartialOrd, Ord)]
+#[derive(HashStable)]
+pub struct BoundRegion {
+ pub var: BoundVar,
+ pub kind: BoundRegionKind,
+}
+
+impl BoundRegionKind {
+ pub fn is_named(&self) -> bool {
+ match *self {
+ BoundRegionKind::BrNamed(_, name) => name != kw::UnderscoreLifetime,
+ _ => false,
+ }
+ }
+}
+
+pub trait Article {
+ fn article(&self) -> &'static str;
+}
+
+impl<'tcx> Article for TyKind<'tcx> {
+ /// Get the article ("a" or "an") to use with this type.
+ fn article(&self) -> &'static str {
+ match self {
+ Int(_) | Float(_) | Array(_, _) => "an",
+ Adt(def, _) if def.is_enum() => "an",
+ // This should never happen, but ICEing and causing the user's code
+ // to not compile felt too harsh.
+ Error(_) => "a",
+ _ => "a",
+ }
+ }
+}
+
+// `TyKind` is used a lot. Make sure it doesn't unintentionally get bigger.
+#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
+static_assert_size!(TyKind<'_>, 32);
+
+/// A closure can be modeled as a struct that looks like:
+/// ```ignore (illustrative)
+/// struct Closure<'l0...'li, T0...Tj, CK, CS, U>(...U);
+/// ```
+/// where:
+///
+/// - 'l0...'li and T0...Tj are the generic parameters
+/// in scope on the function that defined the closure,
+/// - CK represents the *closure kind* (Fn vs FnMut vs FnOnce). This
+/// is rather hackily encoded via a scalar type. See
+/// `Ty::to_opt_closure_kind` for details.
+/// - CS represents the *closure signature*, representing as a `fn()`
+/// type. For example, `fn(u32, u32) -> u32` would mean that the closure
+/// implements `CK<(u32, u32), Output = u32>`, where `CK` is the trait
+/// specified above.
+/// - U is a type parameter representing the types of its upvars, tupled up
+/// (borrowed, if appropriate; that is, if a U field represents a by-ref upvar,
+/// and the up-var has the type `Foo`, then that field of U will be `&Foo`).
+///
+/// So, for example, given this function:
+/// ```ignore (illustrative)
+/// fn foo<'a, T>(data: &'a mut T) {
+/// do(|| data.count += 1)
+/// }
+/// ```
+/// the type of the closure would be something like:
+/// ```ignore (illustrative)
+/// struct Closure<'a, T, U>(...U);
+/// ```
+/// Note that the type of the upvar is not specified in the struct.
+/// You may wonder how the impl would then be able to use the upvar,
+/// if it doesn't know it's type? The answer is that the impl is
+/// (conceptually) not fully generic over Closure but rather tied to
+/// instances with the expected upvar types:
+/// ```ignore (illustrative)
+/// impl<'b, 'a, T> FnMut() for Closure<'a, T, (&'b mut &'a mut T,)> {
+/// ...
+/// }
+/// ```
+/// You can see that the *impl* fully specified the type of the upvar
+/// and thus knows full well that `data` has type `&'b mut &'a mut T`.
+/// (Here, I am assuming that `data` is mut-borrowed.)
+///
+/// Now, the last question you may ask is: Why include the upvar types
+/// in an extra type parameter? The reason for this design is that the
+/// upvar types can reference lifetimes that are internal to the
+/// creating function. In my example above, for example, the lifetime
+/// `'b` represents the scope of the closure itself; this is some
+/// subset of `foo`, probably just the scope of the call to the to
+/// `do()`. If we just had the lifetime/type parameters from the
+/// enclosing function, we couldn't name this lifetime `'b`. Note that
+/// there can also be lifetimes in the types of the upvars themselves,
+/// if one of them happens to be a reference to something that the
+/// creating fn owns.
+///
+/// OK, you say, so why not create a more minimal set of parameters
+/// that just includes the extra lifetime parameters? The answer is
+/// primarily that it would be hard --- we don't know at the time when
+/// we create the closure type what the full types of the upvars are,
+/// nor do we know which are borrowed and which are not. In this
+/// design, we can just supply a fresh type parameter and figure that
+/// out later.
+///
+/// All right, you say, but why include the type parameters from the
+/// original function then? The answer is that codegen may need them
+/// when monomorphizing, and they may not appear in the upvars. A
+/// closure could capture no variables but still make use of some
+/// in-scope type parameter with a bound (e.g., if our example above
+/// had an extra `U: Default`, and the closure called `U::default()`).
+///
+/// There is another reason. This design (implicitly) prohibits
+/// closures from capturing themselves (except via a trait
+/// object). This simplifies closure inference considerably, since it
+/// means that when we infer the kind of a closure or its upvars, we
+/// don't have to handle cycles where the decisions we make for
+/// closure C wind up influencing the decisions we ought to make for
+/// closure C (which would then require fixed point iteration to
+/// handle). Plus it fixes an ICE. :P
+///
+/// ## Generators
+///
+/// Generators are handled similarly in `GeneratorSubsts`. The set of
+/// type parameters is similar, but `CK` and `CS` are replaced by the
+/// following type parameters:
+///
+/// * `GS`: The generator's "resume type", which is the type of the
+/// argument passed to `resume`, and the type of `yield` expressions
+/// inside the generator.
+/// * `GY`: The "yield type", which is the type of values passed to
+/// `yield` inside the generator.
+/// * `GR`: The "return type", which is the type of value returned upon
+/// completion of the generator.
+/// * `GW`: The "generator witness".
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct ClosureSubsts<'tcx> {
+ /// Lifetime and type parameters from the enclosing function,
+ /// concatenated with a tuple containing the types of the upvars.
+ ///
+ /// These are separated out because codegen wants to pass them around
+ /// when monomorphizing.
+ pub substs: SubstsRef<'tcx>,
+}
+
+/// Struct returned by `split()`.
+pub struct ClosureSubstsParts<'tcx, T> {
+ pub parent_substs: &'tcx [GenericArg<'tcx>],
+ pub closure_kind_ty: T,
+ pub closure_sig_as_fn_ptr_ty: T,
+ pub tupled_upvars_ty: T,
+}
+
+impl<'tcx> ClosureSubsts<'tcx> {
+ /// Construct `ClosureSubsts` from `ClosureSubstsParts`, containing `Substs`
+ /// for the closure parent, alongside additional closure-specific components.
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ parts: ClosureSubstsParts<'tcx, Ty<'tcx>>,
+ ) -> ClosureSubsts<'tcx> {
+ ClosureSubsts {
+ substs: tcx.mk_substs(
+ parts.parent_substs.iter().copied().chain(
+ [parts.closure_kind_ty, parts.closure_sig_as_fn_ptr_ty, parts.tupled_upvars_ty]
+ .iter()
+ .map(|&ty| ty.into()),
+ ),
+ ),
+ }
+ }
+
+ /// Divides the closure substs into their respective components.
+ /// The ordering assumed here must match that used by `ClosureSubsts::new` above.
+ fn split(self) -> ClosureSubstsParts<'tcx, GenericArg<'tcx>> {
+ match self.substs[..] {
+ [
+ ref parent_substs @ ..,
+ closure_kind_ty,
+ closure_sig_as_fn_ptr_ty,
+ tupled_upvars_ty,
+ ] => ClosureSubstsParts {
+ parent_substs,
+ closure_kind_ty,
+ closure_sig_as_fn_ptr_ty,
+ tupled_upvars_ty,
+ },
+ _ => bug!("closure substs missing synthetics"),
+ }
+ }
+
+ /// Returns `true` only if enough of the synthetic types are known to
+ /// allow using all of the methods on `ClosureSubsts` without panicking.
+ ///
+ /// Used primarily by `ty::print::pretty` to be able to handle closure
+ /// types that haven't had their synthetic types substituted in.
+ pub fn is_valid(self) -> bool {
+ self.substs.len() >= 3
+ && matches!(self.split().tupled_upvars_ty.expect_ty().kind(), Tuple(_))
+ }
+
+ /// Returns the substitutions of the closure's parent.
+ pub fn parent_substs(self) -> &'tcx [GenericArg<'tcx>] {
+ self.split().parent_substs
+ }
+
+ /// Returns an iterator over the list of types of captured paths by the closure.
+ /// In case there was a type error in figuring out the types of the captured path, an
+ /// empty iterator is returned.
+ #[inline]
+ pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+ match self.tupled_upvars_ty().kind() {
+ TyKind::Error(_) => None,
+ TyKind::Tuple(..) => Some(self.tupled_upvars_ty().tuple_fields()),
+ TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
+ ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
+ }
+ .into_iter()
+ .flatten()
+ }
+
+ /// Returns the tuple type representing the upvars for this closure.
+ #[inline]
+ pub fn tupled_upvars_ty(self) -> Ty<'tcx> {
+ self.split().tupled_upvars_ty.expect_ty()
+ }
+
+ /// Returns the closure kind for this closure; may return a type
+ /// variable during inference. To get the closure kind during
+ /// inference, use `infcx.closure_kind(substs)`.
+ pub fn kind_ty(self) -> Ty<'tcx> {
+ self.split().closure_kind_ty.expect_ty()
+ }
+
+ /// Returns the `fn` pointer type representing the closure signature for this
+ /// closure.
+ // FIXME(eddyb) this should be unnecessary, as the shallowly resolved
+ // type is known at the time of the creation of `ClosureSubsts`,
+ // see `rustc_typeck::check::closure`.
+ pub fn sig_as_fn_ptr_ty(self) -> Ty<'tcx> {
+ self.split().closure_sig_as_fn_ptr_ty.expect_ty()
+ }
+
+ /// Returns the closure kind for this closure; only usable outside
+ /// of an inference context, because in that context we know that
+ /// there are no type variables.
+ ///
+ /// If you have an inference context, use `infcx.closure_kind()`.
+ pub fn kind(self) -> ty::ClosureKind {
+ self.kind_ty().to_opt_closure_kind().unwrap()
+ }
+
+ /// Extracts the signature from the closure.
+ pub fn sig(self) -> ty::PolyFnSig<'tcx> {
+ let ty = self.sig_as_fn_ptr_ty();
+ match ty.kind() {
+ ty::FnPtr(sig) => *sig,
+ _ => bug!("closure_sig_as_fn_ptr_ty is not a fn-ptr: {:?}", ty.kind()),
+ }
+ }
+}
+
+/// Similar to `ClosureSubsts`; see the above documentation for more.
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct GeneratorSubsts<'tcx> {
+ pub substs: SubstsRef<'tcx>,
+}
+
+pub struct GeneratorSubstsParts<'tcx, T> {
+ pub parent_substs: &'tcx [GenericArg<'tcx>],
+ pub resume_ty: T,
+ pub yield_ty: T,
+ pub return_ty: T,
+ pub witness: T,
+ pub tupled_upvars_ty: T,
+}
+
+impl<'tcx> GeneratorSubsts<'tcx> {
+ /// Construct `GeneratorSubsts` from `GeneratorSubstsParts`, containing `Substs`
+ /// for the generator parent, alongside additional generator-specific components.
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ parts: GeneratorSubstsParts<'tcx, Ty<'tcx>>,
+ ) -> GeneratorSubsts<'tcx> {
+ GeneratorSubsts {
+ substs: tcx.mk_substs(
+ parts.parent_substs.iter().copied().chain(
+ [
+ parts.resume_ty,
+ parts.yield_ty,
+ parts.return_ty,
+ parts.witness,
+ parts.tupled_upvars_ty,
+ ]
+ .iter()
+ .map(|&ty| ty.into()),
+ ),
+ ),
+ }
+ }
+
+ /// Divides the generator substs into their respective components.
+ /// The ordering assumed here must match that used by `GeneratorSubsts::new` above.
+ fn split(self) -> GeneratorSubstsParts<'tcx, GenericArg<'tcx>> {
+ match self.substs[..] {
+ [ref parent_substs @ .., resume_ty, yield_ty, return_ty, witness, tupled_upvars_ty] => {
+ GeneratorSubstsParts {
+ parent_substs,
+ resume_ty,
+ yield_ty,
+ return_ty,
+ witness,
+ tupled_upvars_ty,
+ }
+ }
+ _ => bug!("generator substs missing synthetics"),
+ }
+ }
+
+ /// Returns `true` only if enough of the synthetic types are known to
+ /// allow using all of the methods on `GeneratorSubsts` without panicking.
+ ///
+ /// Used primarily by `ty::print::pretty` to be able to handle generator
+ /// types that haven't had their synthetic types substituted in.
+ pub fn is_valid(self) -> bool {
+ self.substs.len() >= 5
+ && matches!(self.split().tupled_upvars_ty.expect_ty().kind(), Tuple(_))
+ }
+
+ /// Returns the substitutions of the generator's parent.
+ pub fn parent_substs(self) -> &'tcx [GenericArg<'tcx>] {
+ self.split().parent_substs
+ }
+
+ /// This describes the types that can be contained in a generator.
+ /// It will be a type variable initially and unified in the last stages of typeck of a body.
+ /// It contains a tuple of all the types that could end up on a generator frame.
+ /// The state transformation MIR pass may only produce layouts which mention types
+ /// in this tuple. Upvars are not counted here.
+ pub fn witness(self) -> Ty<'tcx> {
+ self.split().witness.expect_ty()
+ }
+
+ /// Returns an iterator over the list of types of captured paths by the generator.
+ /// In case there was a type error in figuring out the types of the captured path, an
+ /// empty iterator is returned.
+ #[inline]
+ pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+ match self.tupled_upvars_ty().kind() {
+ TyKind::Error(_) => None,
+ TyKind::Tuple(..) => Some(self.tupled_upvars_ty().tuple_fields()),
+ TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
+ ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
+ }
+ .into_iter()
+ .flatten()
+ }
+
+ /// Returns the tuple type representing the upvars for this generator.
+ #[inline]
+ pub fn tupled_upvars_ty(self) -> Ty<'tcx> {
+ self.split().tupled_upvars_ty.expect_ty()
+ }
+
+ /// Returns the type representing the resume type of the generator.
+ pub fn resume_ty(self) -> Ty<'tcx> {
+ self.split().resume_ty.expect_ty()
+ }
+
+ /// Returns the type representing the yield type of the generator.
+ pub fn yield_ty(self) -> Ty<'tcx> {
+ self.split().yield_ty.expect_ty()
+ }
+
+ /// Returns the type representing the return type of the generator.
+ pub fn return_ty(self) -> Ty<'tcx> {
+ self.split().return_ty.expect_ty()
+ }
+
+ /// Returns the "generator signature", which consists of its yield
+ /// and return types.
+ ///
+ /// N.B., some bits of the code prefers to see this wrapped in a
+ /// binder, but it never contains bound regions. Probably this
+ /// function should be removed.
+ pub fn poly_sig(self) -> PolyGenSig<'tcx> {
+ ty::Binder::dummy(self.sig())
+ }
+
+ /// Returns the "generator signature", which consists of its resume, yield
+ /// and return types.
+ pub fn sig(self) -> GenSig<'tcx> {
+ ty::GenSig {
+ resume_ty: self.resume_ty(),
+ yield_ty: self.yield_ty(),
+ return_ty: self.return_ty(),
+ }
+ }
+}
+
+impl<'tcx> GeneratorSubsts<'tcx> {
+ /// Generator has not been resumed yet.
+ pub const UNRESUMED: usize = 0;
+ /// Generator has returned or is completed.
+ pub const RETURNED: usize = 1;
+ /// Generator has been poisoned.
+ pub const POISONED: usize = 2;
+
+ const UNRESUMED_NAME: &'static str = "Unresumed";
+ const RETURNED_NAME: &'static str = "Returned";
+ const POISONED_NAME: &'static str = "Panicked";
+
+ /// The valid variant indices of this generator.
+ #[inline]
+ pub fn variant_range(&self, def_id: DefId, tcx: TyCtxt<'tcx>) -> Range<VariantIdx> {
+ // FIXME requires optimized MIR
+ let num_variants = tcx.generator_layout(def_id).unwrap().variant_fields.len();
+ VariantIdx::new(0)..VariantIdx::new(num_variants)
+ }
+
+ /// The discriminant for the given variant. Panics if the `variant_index` is
+ /// out of range.
+ #[inline]
+ pub fn discriminant_for_variant(
+ &self,
+ def_id: DefId,
+ tcx: TyCtxt<'tcx>,
+ variant_index: VariantIdx,
+ ) -> Discr<'tcx> {
+ // Generators don't support explicit discriminant values, so they are
+ // the same as the variant index.
+ assert!(self.variant_range(def_id, tcx).contains(&variant_index));
+ Discr { val: variant_index.as_usize() as u128, ty: self.discr_ty(tcx) }
+ }
+
+ /// The set of all discriminants for the generator, enumerated with their
+ /// variant indices.
+ #[inline]
+ pub fn discriminants(
+ self,
+ def_id: DefId,
+ tcx: TyCtxt<'tcx>,
+ ) -> impl Iterator<Item = (VariantIdx, Discr<'tcx>)> + Captures<'tcx> {
+ self.variant_range(def_id, tcx).map(move |index| {
+ (index, Discr { val: index.as_usize() as u128, ty: self.discr_ty(tcx) })
+ })
+ }
+
+ /// Calls `f` with a reference to the name of the enumerator for the given
+ /// variant `v`.
+ pub fn variant_name(v: VariantIdx) -> Cow<'static, str> {
+ match v.as_usize() {
+ Self::UNRESUMED => Cow::from(Self::UNRESUMED_NAME),
+ Self::RETURNED => Cow::from(Self::RETURNED_NAME),
+ Self::POISONED => Cow::from(Self::POISONED_NAME),
+ _ => Cow::from(format!("Suspend{}", v.as_usize() - 3)),
+ }
+ }
+
+ /// The type of the state discriminant used in the generator type.
+ #[inline]
+ pub fn discr_ty(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ tcx.types.u32
+ }
+
+ /// This returns the types of the MIR locals which had to be stored across suspension points.
+ /// It is calculated in rustc_mir_transform::generator::StateTransform.
+ /// All the types here must be in the tuple in GeneratorInterior.
+ ///
+ /// The locals are grouped by their variant number. Note that some locals may
+ /// be repeated in multiple variants.
+ #[inline]
+ pub fn state_tys(
+ self,
+ def_id: DefId,
+ tcx: TyCtxt<'tcx>,
+ ) -> impl Iterator<Item = impl Iterator<Item = Ty<'tcx>> + Captures<'tcx>> {
+ let layout = tcx.generator_layout(def_id).unwrap();
+ layout.variant_fields.iter().map(move |variant| {
+ variant
+ .iter()
+ .map(move |field| EarlyBinder(layout.field_tys[*field]).subst(tcx, self.substs))
+ })
+ }
+
+ /// This is the types of the fields of a generator which are not stored in a
+ /// variant.
+ #[inline]
+ pub fn prefix_tys(self) -> impl Iterator<Item = Ty<'tcx>> {
+ self.upvar_tys()
+ }
+}
+
+#[derive(Debug, Copy, Clone, HashStable)]
+pub enum UpvarSubsts<'tcx> {
+ Closure(SubstsRef<'tcx>),
+ Generator(SubstsRef<'tcx>),
+}
+
+impl<'tcx> UpvarSubsts<'tcx> {
+ /// Returns an iterator over the list of types of captured paths by the closure/generator.
+ /// In case there was a type error in figuring out the types of the captured path, an
+ /// empty iterator is returned.
+ #[inline]
+ pub fn upvar_tys(self) -> impl Iterator<Item = Ty<'tcx>> + 'tcx {
+ let tupled_tys = match self {
+ UpvarSubsts::Closure(substs) => substs.as_closure().tupled_upvars_ty(),
+ UpvarSubsts::Generator(substs) => substs.as_generator().tupled_upvars_ty(),
+ };
+
+ match tupled_tys.kind() {
+ TyKind::Error(_) => None,
+ TyKind::Tuple(..) => Some(self.tupled_upvars_ty().tuple_fields()),
+ TyKind::Infer(_) => bug!("upvar_tys called before capture types are inferred"),
+ ty => bug!("Unexpected representation of upvar types tuple {:?}", ty),
+ }
+ .into_iter()
+ .flatten()
+ }
+
+ #[inline]
+ pub fn tupled_upvars_ty(self) -> Ty<'tcx> {
+ match self {
+ UpvarSubsts::Closure(substs) => substs.as_closure().tupled_upvars_ty(),
+ UpvarSubsts::Generator(substs) => substs.as_generator().tupled_upvars_ty(),
+ }
+ }
+}
+
+/// An inline const is modeled like
+/// ```ignore (illustrative)
+/// const InlineConst<'l0...'li, T0...Tj, R>: R;
+/// ```
+/// where:
+///
+/// - 'l0...'li and T0...Tj are the generic parameters
+/// inherited from the item that defined the inline const,
+/// - R represents the type of the constant.
+///
+/// When the inline const is instantiated, `R` is substituted as the actual inferred
+/// type of the constant. The reason that `R` is represented as an extra type parameter
+/// is the same reason that [`ClosureSubsts`] have `CS` and `U` as type parameters:
+/// inline const can reference lifetimes that are internal to the creating function.
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct InlineConstSubsts<'tcx> {
+ /// Generic parameters from the enclosing item,
+ /// concatenated with the inferred type of the constant.
+ pub substs: SubstsRef<'tcx>,
+}
+
+/// Struct returned by `split()`.
+pub struct InlineConstSubstsParts<'tcx, T> {
+ pub parent_substs: &'tcx [GenericArg<'tcx>],
+ pub ty: T,
+}
+
+impl<'tcx> InlineConstSubsts<'tcx> {
+ /// Construct `InlineConstSubsts` from `InlineConstSubstsParts`.
+ pub fn new(
+ tcx: TyCtxt<'tcx>,
+ parts: InlineConstSubstsParts<'tcx, Ty<'tcx>>,
+ ) -> InlineConstSubsts<'tcx> {
+ InlineConstSubsts {
+ substs: tcx.mk_substs(
+ parts.parent_substs.iter().copied().chain(std::iter::once(parts.ty.into())),
+ ),
+ }
+ }
+
+ /// Divides the inline const substs into their respective components.
+ /// The ordering assumed here must match that used by `InlineConstSubsts::new` above.
+ fn split(self) -> InlineConstSubstsParts<'tcx, GenericArg<'tcx>> {
+ match self.substs[..] {
+ [ref parent_substs @ .., ty] => InlineConstSubstsParts { parent_substs, ty },
+ _ => bug!("inline const substs missing synthetics"),
+ }
+ }
+
+ /// Returns the substitutions of the inline const's parent.
+ pub fn parent_substs(self) -> &'tcx [GenericArg<'tcx>] {
+ self.split().parent_substs
+ }
+
+ /// Returns the type of this inline const.
+ pub fn ty(self) -> Ty<'tcx> {
+ self.split().ty.expect_ty()
+ }
+}
+
+#[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub enum ExistentialPredicate<'tcx> {
+ /// E.g., `Iterator`.
+ Trait(ExistentialTraitRef<'tcx>),
+ /// E.g., `Iterator::Item = T`.
+ Projection(ExistentialProjection<'tcx>),
+ /// E.g., `Send`.
+ AutoTrait(DefId),
+}
+
+impl<'tcx> ExistentialPredicate<'tcx> {
+ /// Compares via an ordering that will not change if modules are reordered or other changes are
+ /// made to the tree. In particular, this ordering is preserved across incremental compilations.
+ pub fn stable_cmp(&self, tcx: TyCtxt<'tcx>, other: &Self) -> Ordering {
+ use self::ExistentialPredicate::*;
+ match (*self, *other) {
+ (Trait(_), Trait(_)) => Ordering::Equal,
+ (Projection(ref a), Projection(ref b)) => {
+ tcx.def_path_hash(a.item_def_id).cmp(&tcx.def_path_hash(b.item_def_id))
+ }
+ (AutoTrait(ref a), AutoTrait(ref b)) => {
+ tcx.def_path_hash(*a).cmp(&tcx.def_path_hash(*b))
+ }
+ (Trait(_), _) => Ordering::Less,
+ (Projection(_), Trait(_)) => Ordering::Greater,
+ (Projection(_), _) => Ordering::Less,
+ (AutoTrait(_), _) => Ordering::Greater,
+ }
+ }
+}
+
+impl<'tcx> Binder<'tcx, ExistentialPredicate<'tcx>> {
+ pub fn with_self_ty(&self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> ty::Predicate<'tcx> {
+ use crate::ty::ToPredicate;
+ match self.skip_binder() {
+ ExistentialPredicate::Trait(tr) => {
+ self.rebind(tr).with_self_ty(tcx, self_ty).without_const().to_predicate(tcx)
+ }
+ ExistentialPredicate::Projection(p) => {
+ self.rebind(p.with_self_ty(tcx, self_ty)).to_predicate(tcx)
+ }
+ ExistentialPredicate::AutoTrait(did) => {
+ let trait_ref = self.rebind(ty::TraitRef {
+ def_id: did,
+ substs: tcx.mk_substs_trait(self_ty, &[]),
+ });
+ trait_ref.without_const().to_predicate(tcx)
+ }
+ }
+ }
+}
+
+impl<'tcx> List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>> {
+ /// Returns the "principal `DefId`" of this set of existential predicates.
+ ///
+ /// A Rust trait object type consists (in addition to a lifetime bound)
+ /// of a set of trait bounds, which are separated into any number
+ /// of auto-trait bounds, and at most one non-auto-trait bound. The
+ /// non-auto-trait bound is called the "principal" of the trait
+ /// object.
+ ///
+ /// Only the principal can have methods or type parameters (because
+ /// auto traits can have neither of them). This is important, because
+ /// it means the auto traits can be treated as an unordered set (methods
+ /// would force an order for the vtable, while relating traits with
+ /// type parameters without knowing the order to relate them in is
+ /// a rather non-trivial task).
+ ///
+ /// For example, in the trait object `dyn fmt::Debug + Sync`, the
+ /// principal bound is `Some(fmt::Debug)`, while the auto-trait bounds
+ /// are the set `{Sync}`.
+ ///
+ /// It is also possible to have a "trivial" trait object that
+ /// consists only of auto traits, with no principal - for example,
+ /// `dyn Send + Sync`. In that case, the set of auto-trait bounds
+ /// is `{Send, Sync}`, while there is no principal. These trait objects
+ /// have a "trivial" vtable consisting of just the size, alignment,
+ /// and destructor.
+ pub fn principal(&self) -> Option<ty::Binder<'tcx, ExistentialTraitRef<'tcx>>> {
+ self[0]
+ .map_bound(|this| match this {
+ ExistentialPredicate::Trait(tr) => Some(tr),
+ _ => None,
+ })
+ .transpose()
+ }
+
+ pub fn principal_def_id(&self) -> Option<DefId> {
+ self.principal().map(|trait_ref| trait_ref.skip_binder().def_id)
+ }
+
+ #[inline]
+ pub fn projection_bounds<'a>(
+ &'a self,
+ ) -> impl Iterator<Item = ty::Binder<'tcx, ExistentialProjection<'tcx>>> + 'a {
+ self.iter().filter_map(|predicate| {
+ predicate
+ .map_bound(|pred| match pred {
+ ExistentialPredicate::Projection(projection) => Some(projection),
+ _ => None,
+ })
+ .transpose()
+ })
+ }
+
+ #[inline]
+ pub fn auto_traits<'a>(&'a self) -> impl Iterator<Item = DefId> + Captures<'tcx> + 'a {
+ self.iter().filter_map(|predicate| match predicate.skip_binder() {
+ ExistentialPredicate::AutoTrait(did) => Some(did),
+ _ => None,
+ })
+ }
+}
+
+/// A complete reference to a trait. These take numerous guises in syntax,
+/// but perhaps the most recognizable form is in a where-clause:
+/// ```ignore (illustrative)
+/// T: Foo<U>
+/// ```
+/// This would be represented by a trait-reference where the `DefId` is the
+/// `DefId` for the trait `Foo` and the substs define `T` as parameter 0,
+/// and `U` as parameter 1.
+///
+/// Trait references also appear in object types like `Foo<U>`, but in
+/// that case the `Self` parameter is absent from the substitutions.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct TraitRef<'tcx> {
+ pub def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+}
+
+impl<'tcx> TraitRef<'tcx> {
+ pub fn new(def_id: DefId, substs: SubstsRef<'tcx>) -> TraitRef<'tcx> {
+ TraitRef { def_id, substs }
+ }
+
+ /// Returns a `TraitRef` of the form `P0: Foo<P1..Pn>` where `Pi`
+ /// are the parameters defined on trait.
+ pub fn identity(tcx: TyCtxt<'tcx>, def_id: DefId) -> Binder<'tcx, TraitRef<'tcx>> {
+ ty::Binder::dummy(TraitRef {
+ def_id,
+ substs: InternalSubsts::identity_for_item(tcx, def_id),
+ })
+ }
+
+ #[inline]
+ pub fn self_ty(&self) -> Ty<'tcx> {
+ self.substs.type_at(0)
+ }
+
+ pub fn from_method(
+ tcx: TyCtxt<'tcx>,
+ trait_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> ty::TraitRef<'tcx> {
+ let defs = tcx.generics_of(trait_id);
+ ty::TraitRef { def_id: trait_id, substs: tcx.intern_substs(&substs[..defs.params.len()]) }
+ }
+}
+
+pub type PolyTraitRef<'tcx> = Binder<'tcx, TraitRef<'tcx>>;
+
+impl<'tcx> PolyTraitRef<'tcx> {
+ pub fn self_ty(&self) -> Binder<'tcx, Ty<'tcx>> {
+ self.map_bound_ref(|tr| tr.self_ty())
+ }
+
+ pub fn def_id(&self) -> DefId {
+ self.skip_binder().def_id
+ }
+
+ pub fn to_poly_trait_predicate(&self) -> ty::PolyTraitPredicate<'tcx> {
+ self.map_bound(|trait_ref| ty::TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::NotConst,
+ polarity: ty::ImplPolarity::Positive,
+ })
+ }
+
+ /// Same as [`PolyTraitRef::to_poly_trait_predicate`] but sets a negative polarity instead.
+ pub fn to_poly_trait_predicate_negative_polarity(&self) -> ty::PolyTraitPredicate<'tcx> {
+ self.map_bound(|trait_ref| ty::TraitPredicate {
+ trait_ref,
+ constness: ty::BoundConstness::NotConst,
+ polarity: ty::ImplPolarity::Negative,
+ })
+ }
+}
+
+/// An existential reference to a trait, where `Self` is erased.
+/// For example, the trait object `Trait<'a, 'b, X, Y>` is:
+/// ```ignore (illustrative)
+/// exists T. T: Trait<'a, 'b, X, Y>
+/// ```
+/// The substitutions don't include the erased `Self`, only trait
+/// type and lifetime parameters (`[X, Y]` and `['a, 'b]` above).
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct ExistentialTraitRef<'tcx> {
+ pub def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+}
+
+impl<'tcx> ExistentialTraitRef<'tcx> {
+ pub fn erase_self_ty(
+ tcx: TyCtxt<'tcx>,
+ trait_ref: ty::TraitRef<'tcx>,
+ ) -> ty::ExistentialTraitRef<'tcx> {
+ // Assert there is a Self.
+ trait_ref.substs.type_at(0);
+
+ ty::ExistentialTraitRef {
+ def_id: trait_ref.def_id,
+ substs: tcx.intern_substs(&trait_ref.substs[1..]),
+ }
+ }
+
+ /// Object types don't have a self type specified. Therefore, when
+ /// we convert the principal trait-ref into a normal trait-ref,
+ /// you must give *some* self type. A common choice is `mk_err()`
+ /// or some placeholder type.
+ pub fn with_self_ty(&self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> ty::TraitRef<'tcx> {
+ // otherwise the escaping vars would be captured by the binder
+ // debug_assert!(!self_ty.has_escaping_bound_vars());
+
+ ty::TraitRef { def_id: self.def_id, substs: tcx.mk_substs_trait(self_ty, self.substs) }
+ }
+}
+
+pub type PolyExistentialTraitRef<'tcx> = Binder<'tcx, ExistentialTraitRef<'tcx>>;
+
+impl<'tcx> PolyExistentialTraitRef<'tcx> {
+ pub fn def_id(&self) -> DefId {
+ self.skip_binder().def_id
+ }
+
+ /// Object types don't have a self type specified. Therefore, when
+ /// we convert the principal trait-ref into a normal trait-ref,
+ /// you must give *some* self type. A common choice is `mk_err()`
+ /// or some placeholder type.
+ pub fn with_self_ty(&self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> ty::PolyTraitRef<'tcx> {
+ self.map_bound(|trait_ref| trait_ref.with_self_ty(tcx, self_ty))
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Encodable, Decodable, HashStable)]
+pub struct EarlyBinder<T>(pub T);
+
+impl<T> EarlyBinder<T> {
+ pub fn as_ref(&self) -> EarlyBinder<&T> {
+ EarlyBinder(&self.0)
+ }
+
+ pub fn map_bound_ref<F, U>(&self, f: F) -> EarlyBinder<U>
+ where
+ F: FnOnce(&T) -> U,
+ {
+ self.as_ref().map_bound(f)
+ }
+
+ pub fn map_bound<F, U>(self, f: F) -> EarlyBinder<U>
+ where
+ F: FnOnce(T) -> U,
+ {
+ let value = f(self.0);
+ EarlyBinder(value)
+ }
+
+ pub fn try_map_bound<F, U, E>(self, f: F) -> Result<EarlyBinder<U>, E>
+ where
+ F: FnOnce(T) -> Result<U, E>,
+ {
+ let value = f(self.0)?;
+ Ok(EarlyBinder(value))
+ }
+
+ pub fn rebind<U>(&self, value: U) -> EarlyBinder<U> {
+ EarlyBinder(value)
+ }
+}
+
+impl<T> EarlyBinder<Option<T>> {
+ pub fn transpose(self) -> Option<EarlyBinder<T>> {
+ self.0.map(|v| EarlyBinder(v))
+ }
+}
+
+impl<T, U> EarlyBinder<(T, U)> {
+ pub fn transpose_tuple2(self) -> (EarlyBinder<T>, EarlyBinder<U>) {
+ (EarlyBinder(self.0.0), EarlyBinder(self.0.1))
+ }
+}
+
+pub struct EarlyBinderIter<T> {
+ t: T,
+}
+
+impl<T: IntoIterator> EarlyBinder<T> {
+ pub fn transpose_iter(self) -> EarlyBinderIter<T::IntoIter> {
+ EarlyBinderIter { t: self.0.into_iter() }
+ }
+}
+
+impl<T: Iterator> Iterator for EarlyBinderIter<T> {
+ type Item = EarlyBinder<T::Item>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.t.next().map(|i| EarlyBinder(i))
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum BoundVariableKind {
+ Ty(BoundTyKind),
+ Region(BoundRegionKind),
+ Const,
+}
+
+impl BoundVariableKind {
+ pub fn expect_region(self) -> BoundRegionKind {
+ match self {
+ BoundVariableKind::Region(lt) => lt,
+ _ => bug!("expected a region, but found another kind"),
+ }
+ }
+
+ pub fn expect_ty(self) -> BoundTyKind {
+ match self {
+ BoundVariableKind::Ty(ty) => ty,
+ _ => bug!("expected a type, but found another kind"),
+ }
+ }
+
+ pub fn expect_const(self) {
+ match self {
+ BoundVariableKind::Const => (),
+ _ => bug!("expected a const, but found another kind"),
+ }
+ }
+}
+
+/// Binder is a binder for higher-ranked lifetimes or types. It is part of the
+/// compiler's representation for things like `for<'a> Fn(&'a isize)`
+/// (which would be represented by the type `PolyTraitRef ==
+/// Binder<'tcx, TraitRef>`). Note that when we instantiate,
+/// erase, or otherwise "discharge" these bound vars, we change the
+/// type from `Binder<'tcx, T>` to just `T` (see
+/// e.g., `liberate_late_bound_regions`).
+///
+/// `Decodable` and `Encodable` are implemented for `Binder<T>` using the `impl_binder_encode_decode!` macro.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(HashStable)]
+pub struct Binder<'tcx, T>(T, &'tcx List<BoundVariableKind>);
+
+impl<'tcx, T> Binder<'tcx, T>
+where
+ T: TypeVisitable<'tcx>,
+{
+ /// Wraps `value` in a binder, asserting that `value` does not
+ /// contain any bound vars that would be bound by the
+ /// binder. This is commonly used to 'inject' a value T into a
+ /// different binding level.
+ pub fn dummy(value: T) -> Binder<'tcx, T> {
+ assert!(!value.has_escaping_bound_vars());
+ Binder(value, ty::List::empty())
+ }
+
+ pub fn bind_with_vars(value: T, vars: &'tcx List<BoundVariableKind>) -> Binder<'tcx, T> {
+ if cfg!(debug_assertions) {
+ let mut validator = ValidateBoundVars::new(vars);
+ value.visit_with(&mut validator);
+ }
+ Binder(value, vars)
+ }
+}
+
+impl<'tcx, T> Binder<'tcx, T> {
+ /// Skips the binder and returns the "bound" value. This is a
+ /// risky thing to do because it's easy to get confused about
+ /// De Bruijn indices and the like. It is usually better to
+ /// discharge the binder using `no_bound_vars` or
+ /// `replace_late_bound_regions` or something like
+ /// that. `skip_binder` is only valid when you are either
+ /// extracting data that has nothing to do with bound vars, you
+ /// are doing some sort of test that does not involve bound
+ /// regions, or you are being very careful about your depth
+ /// accounting.
+ ///
+ /// Some examples where `skip_binder` is reasonable:
+ ///
+ /// - extracting the `DefId` from a PolyTraitRef;
+ /// - comparing the self type of a PolyTraitRef to see if it is equal to
+ /// a type parameter `X`, since the type `X` does not reference any regions
+ pub fn skip_binder(self) -> T {
+ self.0
+ }
+
+ pub fn bound_vars(&self) -> &'tcx List<BoundVariableKind> {
+ self.1
+ }
+
+ pub fn as_ref(&self) -> Binder<'tcx, &T> {
+ Binder(&self.0, self.1)
+ }
+
+ pub fn as_deref(&self) -> Binder<'tcx, &T::Target>
+ where
+ T: Deref,
+ {
+ Binder(&self.0, self.1)
+ }
+
+ pub fn map_bound_ref_unchecked<F, U>(&self, f: F) -> Binder<'tcx, U>
+ where
+ F: FnOnce(&T) -> U,
+ {
+ let value = f(&self.0);
+ Binder(value, self.1)
+ }
+
+ pub fn map_bound_ref<F, U: TypeVisitable<'tcx>>(&self, f: F) -> Binder<'tcx, U>
+ where
+ F: FnOnce(&T) -> U,
+ {
+ self.as_ref().map_bound(f)
+ }
+
+ pub fn map_bound<F, U: TypeVisitable<'tcx>>(self, f: F) -> Binder<'tcx, U>
+ where
+ F: FnOnce(T) -> U,
+ {
+ let value = f(self.0);
+ if cfg!(debug_assertions) {
+ let mut validator = ValidateBoundVars::new(self.1);
+ value.visit_with(&mut validator);
+ }
+ Binder(value, self.1)
+ }
+
+ pub fn try_map_bound<F, U: TypeVisitable<'tcx>, E>(self, f: F) -> Result<Binder<'tcx, U>, E>
+ where
+ F: FnOnce(T) -> Result<U, E>,
+ {
+ let value = f(self.0)?;
+ if cfg!(debug_assertions) {
+ let mut validator = ValidateBoundVars::new(self.1);
+ value.visit_with(&mut validator);
+ }
+ Ok(Binder(value, self.1))
+ }
+
+ /// Wraps a `value` in a binder, using the same bound variables as the
+ /// current `Binder`. This should not be used if the new value *changes*
+ /// the bound variables. Note: the (old or new) value itself does not
+ /// necessarily need to *name* all the bound variables.
+ ///
+ /// This currently doesn't do anything different than `bind`, because we
+ /// don't actually track bound vars. However, semantically, it is different
+ /// because bound vars aren't allowed to change here, whereas they are
+ /// in `bind`. This may be (debug) asserted in the future.
+ pub fn rebind<U>(&self, value: U) -> Binder<'tcx, U>
+ where
+ U: TypeVisitable<'tcx>,
+ {
+ if cfg!(debug_assertions) {
+ let mut validator = ValidateBoundVars::new(self.bound_vars());
+ value.visit_with(&mut validator);
+ }
+ Binder(value, self.1)
+ }
+
+ /// Unwraps and returns the value within, but only if it contains
+ /// no bound vars at all. (In other words, if this binder --
+ /// and indeed any enclosing binder -- doesn't bind anything at
+ /// all.) Otherwise, returns `None`.
+ ///
+ /// (One could imagine having a method that just unwraps a single
+ /// binder, but permits late-bound vars bound by enclosing
+ /// binders, but that would require adjusting the debruijn
+ /// indices, and given the shallow binding structure we often use,
+ /// would not be that useful.)
+ pub fn no_bound_vars(self) -> Option<T>
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ if self.0.has_escaping_bound_vars() { None } else { Some(self.skip_binder()) }
+ }
+
+ /// Splits the contents into two things that share the same binder
+ /// level as the original, returning two distinct binders.
+ ///
+ /// `f` should consider bound regions at depth 1 to be free, and
+ /// anything it produces with bound regions at depth 1 will be
+ /// bound in the resulting return values.
+ pub fn split<U, V, F>(self, f: F) -> (Binder<'tcx, U>, Binder<'tcx, V>)
+ where
+ F: FnOnce(T) -> (U, V),
+ {
+ let (u, v) = f(self.0);
+ (Binder(u, self.1), Binder(v, self.1))
+ }
+}
+
+impl<'tcx, T> Binder<'tcx, Option<T>> {
+ pub fn transpose(self) -> Option<Binder<'tcx, T>> {
+ let bound_vars = self.1;
+ self.0.map(|v| Binder(v, bound_vars))
+ }
+}
+
+/// Represents the projection of an associated type. In explicit UFCS
+/// form this would be written `<T as Trait<..>>::N`.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct ProjectionTy<'tcx> {
+ /// The parameters of the associated item.
+ pub substs: SubstsRef<'tcx>,
+
+ /// The `DefId` of the `TraitItem` for the associated type `N`.
+ ///
+ /// Note that this is not the `DefId` of the `TraitRef` containing this
+ /// associated type, which is in `tcx.associated_item(item_def_id).container`,
+ /// aka. `tcx.parent(item_def_id).unwrap()`.
+ pub item_def_id: DefId,
+}
+
+impl<'tcx> ProjectionTy<'tcx> {
+ pub fn trait_def_id(&self, tcx: TyCtxt<'tcx>) -> DefId {
+ tcx.parent(self.item_def_id)
+ }
+
+ /// Extracts the underlying trait reference and own substs from this projection.
+ /// For example, if this is a projection of `<T as StreamingIterator>::Item<'a>`,
+ /// then this function would return a `T: Iterator` trait reference and `['a]` as the own substs
+ pub fn trait_ref_and_own_substs(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ ) -> (ty::TraitRef<'tcx>, &'tcx [ty::GenericArg<'tcx>]) {
+ let def_id = tcx.parent(self.item_def_id);
+ let trait_generics = tcx.generics_of(def_id);
+ (
+ ty::TraitRef { def_id, substs: self.substs.truncate_to(tcx, trait_generics) },
+ &self.substs[trait_generics.count()..],
+ )
+ }
+
+ /// Extracts the underlying trait reference from this projection.
+ /// For example, if this is a projection of `<T as Iterator>::Item`,
+ /// then this function would return a `T: Iterator` trait reference.
+ ///
+ /// WARNING: This will drop the substs for generic associated types
+ /// consider calling [Self::trait_ref_and_own_substs] to get those
+ /// as well.
+ pub fn trait_ref(&self, tcx: TyCtxt<'tcx>) -> ty::TraitRef<'tcx> {
+ let def_id = self.trait_def_id(tcx);
+ ty::TraitRef { def_id, substs: self.substs.truncate_to(tcx, tcx.generics_of(def_id)) }
+ }
+
+ pub fn self_ty(&self) -> Ty<'tcx> {
+ self.substs.type_at(0)
+ }
+}
+
+#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)]
+pub struct GenSig<'tcx> {
+ pub resume_ty: Ty<'tcx>,
+ pub yield_ty: Ty<'tcx>,
+ pub return_ty: Ty<'tcx>,
+}
+
+pub type PolyGenSig<'tcx> = Binder<'tcx, GenSig<'tcx>>;
+
+/// Signature of a function type, which we have arbitrarily
+/// decided to use to refer to the input/output types.
+///
+/// - `inputs`: is the list of arguments and their modes.
+/// - `output`: is the return type.
+/// - `c_variadic`: indicates whether this is a C-variadic function.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct FnSig<'tcx> {
+ pub inputs_and_output: &'tcx List<Ty<'tcx>>,
+ pub c_variadic: bool,
+ pub unsafety: hir::Unsafety,
+ pub abi: abi::Abi,
+}
+
+impl<'tcx> FnSig<'tcx> {
+ pub fn inputs(&self) -> &'tcx [Ty<'tcx>] {
+ &self.inputs_and_output[..self.inputs_and_output.len() - 1]
+ }
+
+ pub fn output(&self) -> Ty<'tcx> {
+ self.inputs_and_output[self.inputs_and_output.len() - 1]
+ }
+
+ // Creates a minimal `FnSig` to be used when encountering a `TyKind::Error` in a fallible
+ // method.
+ fn fake() -> FnSig<'tcx> {
+ FnSig {
+ inputs_and_output: List::empty(),
+ c_variadic: false,
+ unsafety: hir::Unsafety::Normal,
+ abi: abi::Abi::Rust,
+ }
+ }
+}
+
+pub type PolyFnSig<'tcx> = Binder<'tcx, FnSig<'tcx>>;
+
+impl<'tcx> PolyFnSig<'tcx> {
+ #[inline]
+ pub fn inputs(&self) -> Binder<'tcx, &'tcx [Ty<'tcx>]> {
+ self.map_bound_ref_unchecked(|fn_sig| fn_sig.inputs())
+ }
+ #[inline]
+ pub fn input(&self, index: usize) -> ty::Binder<'tcx, Ty<'tcx>> {
+ self.map_bound_ref(|fn_sig| fn_sig.inputs()[index])
+ }
+ pub fn inputs_and_output(&self) -> ty::Binder<'tcx, &'tcx List<Ty<'tcx>>> {
+ self.map_bound_ref(|fn_sig| fn_sig.inputs_and_output)
+ }
+ #[inline]
+ pub fn output(&self) -> ty::Binder<'tcx, Ty<'tcx>> {
+ self.map_bound_ref(|fn_sig| fn_sig.output())
+ }
+ pub fn c_variadic(&self) -> bool {
+ self.skip_binder().c_variadic
+ }
+ pub fn unsafety(&self) -> hir::Unsafety {
+ self.skip_binder().unsafety
+ }
+ pub fn abi(&self) -> abi::Abi {
+ self.skip_binder().abi
+ }
+}
+
+pub type CanonicalPolyFnSig<'tcx> = Canonical<'tcx, Binder<'tcx, FnSig<'tcx>>>;
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct ParamTy {
+ pub index: u32,
+ pub name: Symbol,
+}
+
+impl<'tcx> ParamTy {
+ pub fn new(index: u32, name: Symbol) -> ParamTy {
+ ParamTy { index, name }
+ }
+
+ pub fn for_def(def: &ty::GenericParamDef) -> ParamTy {
+ ParamTy::new(def.index, def.name)
+ }
+
+ #[inline]
+ pub fn to_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ tcx.mk_ty_param(self.index, self.name)
+ }
+}
+
+#[derive(Copy, Clone, Hash, TyEncodable, TyDecodable, Eq, PartialEq, Ord, PartialOrd)]
+#[derive(HashStable)]
+pub struct ParamConst {
+ pub index: u32,
+ pub name: Symbol,
+}
+
+impl ParamConst {
+ pub fn new(index: u32, name: Symbol) -> ParamConst {
+ ParamConst { index, name }
+ }
+
+ pub fn for_def(def: &ty::GenericParamDef) -> ParamConst {
+ ParamConst::new(def.index, def.name)
+ }
+}
+
+/// Use this rather than `RegionKind`, whenever possible.
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)]
+#[rustc_pass_by_value]
+pub struct Region<'tcx>(pub Interned<'tcx, RegionKind<'tcx>>);
+
+impl<'tcx> Deref for Region<'tcx> {
+ type Target = RegionKind<'tcx>;
+
+ #[inline]
+ fn deref(&self) -> &RegionKind<'tcx> {
+ &self.0.0
+ }
+}
+
+impl<'tcx> fmt::Debug for Region<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{:?}", self.kind())
+ }
+}
+
+#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable, PartialOrd, Ord)]
+#[derive(HashStable)]
+pub struct EarlyBoundRegion {
+ pub def_id: DefId,
+ pub index: u32,
+ pub name: Symbol,
+}
+
+impl fmt::Debug for EarlyBoundRegion {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "{}, {}", self.index, self.name)
+ }
+}
+
+/// A **`const`** **v**ariable **ID**.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
+#[derive(HashStable, TyEncodable, TyDecodable)]
+pub struct ConstVid<'tcx> {
+ pub index: u32,
+ pub phantom: PhantomData<&'tcx ()>,
+}
+
+rustc_index::newtype_index! {
+ /// A **region** (lifetime) **v**ariable **ID**.
+ #[derive(HashStable)]
+ pub struct RegionVid {
+ DEBUG_FORMAT = custom,
+ }
+}
+
+impl Atom for RegionVid {
+ fn index(self) -> usize {
+ Idx::index(self)
+ }
+}
+
+rustc_index::newtype_index! {
+ #[derive(HashStable)]
+ pub struct BoundVar { .. }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub struct BoundTy {
+ pub var: BoundVar,
+ pub kind: BoundTyKind,
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable)]
+pub enum BoundTyKind {
+ Anon,
+ Param(Symbol),
+}
+
+impl From<BoundVar> for BoundTy {
+ fn from(var: BoundVar) -> Self {
+ BoundTy { var, kind: BoundTyKind::Anon }
+ }
+}
+
+/// A `ProjectionPredicate` for an `ExistentialTraitRef`.
+#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable)]
+pub struct ExistentialProjection<'tcx> {
+ pub item_def_id: DefId,
+ pub substs: SubstsRef<'tcx>,
+ pub term: Term<'tcx>,
+}
+
+pub type PolyExistentialProjection<'tcx> = Binder<'tcx, ExistentialProjection<'tcx>>;
+
+impl<'tcx> ExistentialProjection<'tcx> {
+ /// Extracts the underlying existential trait reference from this projection.
+ /// For example, if this is a projection of `exists T. <T as Iterator>::Item == X`,
+ /// then this function would return an `exists T. T: Iterator` existential trait
+ /// reference.
+ pub fn trait_ref(&self, tcx: TyCtxt<'tcx>) -> ty::ExistentialTraitRef<'tcx> {
+ let def_id = tcx.parent(self.item_def_id);
+ let subst_count = tcx.generics_of(def_id).count() - 1;
+ let substs = tcx.intern_substs(&self.substs[..subst_count]);
+ ty::ExistentialTraitRef { def_id, substs }
+ }
+
+ pub fn with_self_ty(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ self_ty: Ty<'tcx>,
+ ) -> ty::ProjectionPredicate<'tcx> {
+ // otherwise the escaping regions would be captured by the binders
+ debug_assert!(!self_ty.has_escaping_bound_vars());
+
+ ty::ProjectionPredicate {
+ projection_ty: ty::ProjectionTy {
+ item_def_id: self.item_def_id,
+ substs: tcx.mk_substs_trait(self_ty, self.substs),
+ },
+ term: self.term,
+ }
+ }
+
+ pub fn erase_self_ty(
+ tcx: TyCtxt<'tcx>,
+ projection_predicate: ty::ProjectionPredicate<'tcx>,
+ ) -> Self {
+ // Assert there is a Self.
+ projection_predicate.projection_ty.substs.type_at(0);
+
+ Self {
+ item_def_id: projection_predicate.projection_ty.item_def_id,
+ substs: tcx.intern_substs(&projection_predicate.projection_ty.substs[1..]),
+ term: projection_predicate.term,
+ }
+ }
+}
+
+impl<'tcx> PolyExistentialProjection<'tcx> {
+ pub fn with_self_ty(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ self_ty: Ty<'tcx>,
+ ) -> ty::PolyProjectionPredicate<'tcx> {
+ self.map_bound(|p| p.with_self_ty(tcx, self_ty))
+ }
+
+ pub fn item_def_id(&self) -> DefId {
+ self.skip_binder().item_def_id
+ }
+}
+
+/// Region utilities
+impl<'tcx> Region<'tcx> {
+ pub fn kind(self) -> RegionKind<'tcx> {
+ *self.0.0
+ }
+
+ /// Is this region named by the user?
+ pub fn has_name(self) -> bool {
+ match *self {
+ ty::ReEarlyBound(ebr) => ebr.has_name(),
+ ty::ReLateBound(_, br) => br.kind.is_named(),
+ ty::ReFree(fr) => fr.bound_region.is_named(),
+ ty::ReStatic => true,
+ ty::ReVar(..) => false,
+ ty::RePlaceholder(placeholder) => placeholder.name.is_named(),
+ ty::ReEmpty(_) => false,
+ ty::ReErased => false,
+ }
+ }
+
+ #[inline]
+ pub fn is_static(self) -> bool {
+ matches!(*self, ty::ReStatic)
+ }
+
+ #[inline]
+ pub fn is_erased(self) -> bool {
+ matches!(*self, ty::ReErased)
+ }
+
+ #[inline]
+ pub fn is_late_bound(self) -> bool {
+ matches!(*self, ty::ReLateBound(..))
+ }
+
+ #[inline]
+ pub fn is_placeholder(self) -> bool {
+ matches!(*self, ty::RePlaceholder(..))
+ }
+
+ #[inline]
+ pub fn is_empty(self) -> bool {
+ matches!(*self, ty::ReEmpty(..))
+ }
+
+ #[inline]
+ pub fn bound_at_or_above_binder(self, index: ty::DebruijnIndex) -> bool {
+ match *self {
+ ty::ReLateBound(debruijn, _) => debruijn >= index,
+ _ => false,
+ }
+ }
+
+ pub fn type_flags(self) -> TypeFlags {
+ let mut flags = TypeFlags::empty();
+
+ match *self {
+ ty::ReVar(..) => {
+ flags = flags | TypeFlags::HAS_FREE_REGIONS;
+ flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
+ flags = flags | TypeFlags::HAS_RE_INFER;
+ }
+ ty::RePlaceholder(..) => {
+ flags = flags | TypeFlags::HAS_FREE_REGIONS;
+ flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
+ flags = flags | TypeFlags::HAS_RE_PLACEHOLDER;
+ }
+ ty::ReEarlyBound(..) => {
+ flags = flags | TypeFlags::HAS_FREE_REGIONS;
+ flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
+ flags = flags | TypeFlags::HAS_RE_PARAM;
+ }
+ ty::ReFree { .. } => {
+ flags = flags | TypeFlags::HAS_FREE_REGIONS;
+ flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS;
+ }
+ ty::ReEmpty(_) | ty::ReStatic => {
+ flags = flags | TypeFlags::HAS_FREE_REGIONS;
+ }
+ ty::ReLateBound(..) => {
+ flags = flags | TypeFlags::HAS_RE_LATE_BOUND;
+ }
+ ty::ReErased => {
+ flags = flags | TypeFlags::HAS_RE_ERASED;
+ }
+ }
+
+ debug!("type_flags({:?}) = {:?}", self, flags);
+
+ flags
+ }
+
+ /// Given an early-bound or free region, returns the `DefId` where it was bound.
+ /// For example, consider the regions in this snippet of code:
+ ///
+ /// ```ignore (illustrative)
+ /// impl<'a> Foo {
+ /// // ^^ -- early bound, declared on an impl
+ ///
+ /// fn bar<'b, 'c>(x: &self, y: &'b u32, z: &'c u64) where 'static: 'c
+ /// // ^^ ^^ ^ anonymous, late-bound
+ /// // | early-bound, appears in where-clauses
+ /// // late-bound, appears only in fn args
+ /// {..}
+ /// }
+ /// ```
+ ///
+ /// Here, `free_region_binding_scope('a)` would return the `DefId`
+ /// of the impl, and for all the other highlighted regions, it
+ /// would return the `DefId` of the function. In other cases (not shown), this
+ /// function might return the `DefId` of a closure.
+ pub fn free_region_binding_scope(self, tcx: TyCtxt<'_>) -> DefId {
+ match *self {
+ ty::ReEarlyBound(br) => tcx.parent(br.def_id),
+ ty::ReFree(fr) => fr.scope,
+ _ => bug!("free_region_binding_scope invoked on inappropriate region: {:?}", self),
+ }
+ }
+
+ /// True for free regions other than `'static`.
+ pub fn is_free(self) -> bool {
+ matches!(*self, ty::ReEarlyBound(_) | ty::ReFree(_))
+ }
+
+ /// True if `self` is a free region or static.
+ pub fn is_free_or_static(self) -> bool {
+ match *self {
+ ty::ReStatic => true,
+ _ => self.is_free(),
+ }
+ }
+}
+
+/// Type utilities
+impl<'tcx> Ty<'tcx> {
+ #[inline(always)]
+ pub fn kind(self) -> &'tcx TyKind<'tcx> {
+ &self.0.0.kind
+ }
+
+ #[inline(always)]
+ pub fn flags(self) -> TypeFlags {
+ self.0.0.flags
+ }
+
+ #[inline]
+ pub fn is_unit(self) -> bool {
+ match self.kind() {
+ Tuple(ref tys) => tys.is_empty(),
+ _ => false,
+ }
+ }
+
+ #[inline]
+ pub fn is_never(self) -> bool {
+ matches!(self.kind(), Never)
+ }
+
+ #[inline]
+ pub fn is_primitive(self) -> bool {
+ self.kind().is_primitive()
+ }
+
+ #[inline]
+ pub fn is_adt(self) -> bool {
+ matches!(self.kind(), Adt(..))
+ }
+
+ #[inline]
+ pub fn is_ref(self) -> bool {
+ matches!(self.kind(), Ref(..))
+ }
+
+ #[inline]
+ pub fn is_ty_var(self) -> bool {
+ matches!(self.kind(), Infer(TyVar(_)))
+ }
+
+ #[inline]
+ pub fn ty_vid(self) -> Option<ty::TyVid> {
+ match self.kind() {
+ &Infer(TyVar(vid)) => Some(vid),
+ _ => None,
+ }
+ }
+
+ #[inline]
+ pub fn is_ty_infer(self) -> bool {
+ matches!(self.kind(), Infer(_))
+ }
+
+ #[inline]
+ pub fn is_phantom_data(self) -> bool {
+ if let Adt(def, _) = self.kind() { def.is_phantom_data() } else { false }
+ }
+
+ #[inline]
+ pub fn is_bool(self) -> bool {
+ *self.kind() == Bool
+ }
+
+ /// Returns `true` if this type is a `str`.
+ #[inline]
+ pub fn is_str(self) -> bool {
+ *self.kind() == Str
+ }
+
+ #[inline]
+ pub fn is_param(self, index: u32) -> bool {
+ match self.kind() {
+ ty::Param(ref data) => data.index == index,
+ _ => false,
+ }
+ }
+
+ #[inline]
+ pub fn is_slice(self) -> bool {
+ matches!(self.kind(), Slice(_))
+ }
+
+ #[inline]
+ pub fn is_array_slice(self) -> bool {
+ match self.kind() {
+ Slice(_) => true,
+ RawPtr(TypeAndMut { ty, .. }) | Ref(_, ty, _) => matches!(ty.kind(), Slice(_)),
+ _ => false,
+ }
+ }
+
+ #[inline]
+ pub fn is_array(self) -> bool {
+ matches!(self.kind(), Array(..))
+ }
+
+ #[inline]
+ pub fn is_simd(self) -> bool {
+ match self.kind() {
+ Adt(def, _) => def.repr().simd(),
+ _ => false,
+ }
+ }
+
+ pub fn sequence_element_type(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match self.kind() {
+ Array(ty, _) | Slice(ty) => *ty,
+ Str => tcx.types.u8,
+ _ => bug!("`sequence_element_type` called on non-sequence value: {}", self),
+ }
+ }
+
+ pub fn simd_size_and_type(self, tcx: TyCtxt<'tcx>) -> (u64, Ty<'tcx>) {
+ match self.kind() {
+ Adt(def, substs) => {
+ assert!(def.repr().simd(), "`simd_size_and_type` called on non-SIMD type");
+ let variant = def.non_enum_variant();
+ let f0_ty = variant.fields[0].ty(tcx, substs);
+
+ match f0_ty.kind() {
+ // If the first field is an array, we assume it is the only field and its
+ // elements are the SIMD components.
+ Array(f0_elem_ty, f0_len) => {
+ // FIXME(repr_simd): https://github.com/rust-lang/rust/pull/78863#discussion_r522784112
+ // The way we evaluate the `N` in `[T; N]` here only works since we use
+ // `simd_size_and_type` post-monomorphization. It will probably start to ICE
+ // if we use it in generic code. See the `simd-array-trait` ui test.
+ (f0_len.eval_usize(tcx, ParamEnv::empty()) as u64, *f0_elem_ty)
+ }
+ // Otherwise, the fields of this Adt are the SIMD components (and we assume they
+ // all have the same type).
+ _ => (variant.fields.len() as u64, f0_ty),
+ }
+ }
+ _ => bug!("`simd_size_and_type` called on invalid type"),
+ }
+ }
+
+ #[inline]
+ pub fn is_region_ptr(self) -> bool {
+ matches!(self.kind(), Ref(..))
+ }
+
+ #[inline]
+ pub fn is_mutable_ptr(self) -> bool {
+ matches!(
+ self.kind(),
+ RawPtr(TypeAndMut { mutbl: hir::Mutability::Mut, .. })
+ | Ref(_, _, hir::Mutability::Mut)
+ )
+ }
+
+ /// Get the mutability of the reference or `None` when not a reference
+ #[inline]
+ pub fn ref_mutability(self) -> Option<hir::Mutability> {
+ match self.kind() {
+ Ref(_, _, mutability) => Some(*mutability),
+ _ => None,
+ }
+ }
+
+ #[inline]
+ pub fn is_unsafe_ptr(self) -> bool {
+ matches!(self.kind(), RawPtr(_))
+ }
+
+ /// Tests if this is any kind of primitive pointer type (reference, raw pointer, fn pointer).
+ #[inline]
+ pub fn is_any_ptr(self) -> bool {
+ self.is_region_ptr() || self.is_unsafe_ptr() || self.is_fn_ptr()
+ }
+
+ #[inline]
+ pub fn is_box(self) -> bool {
+ match self.kind() {
+ Adt(def, _) => def.is_box(),
+ _ => false,
+ }
+ }
+
+ /// Panics if called on any type other than `Box<T>`.
+ pub fn boxed_ty(self) -> Ty<'tcx> {
+ match self.kind() {
+ Adt(def, substs) if def.is_box() => substs.type_at(0),
+ _ => bug!("`boxed_ty` is called on non-box type {:?}", self),
+ }
+ }
+
+ /// A scalar type is one that denotes an atomic datum, with no sub-components.
+ /// (A RawPtr is scalar because it represents a non-managed pointer, so its
+ /// contents are abstract to rustc.)
+ #[inline]
+ pub fn is_scalar(self) -> bool {
+ matches!(
+ self.kind(),
+ Bool | Char
+ | Int(_)
+ | Float(_)
+ | Uint(_)
+ | FnDef(..)
+ | FnPtr(_)
+ | RawPtr(_)
+ | Infer(IntVar(_) | FloatVar(_))
+ )
+ }
+
+ /// Returns `true` if this type is a floating point type.
+ #[inline]
+ pub fn is_floating_point(self) -> bool {
+ matches!(self.kind(), Float(_) | Infer(FloatVar(_)))
+ }
+
+ #[inline]
+ pub fn is_trait(self) -> bool {
+ matches!(self.kind(), Dynamic(..))
+ }
+
+ #[inline]
+ pub fn is_enum(self) -> bool {
+ matches!(self.kind(), Adt(adt_def, _) if adt_def.is_enum())
+ }
+
+ #[inline]
+ pub fn is_union(self) -> bool {
+ matches!(self.kind(), Adt(adt_def, _) if adt_def.is_union())
+ }
+
+ #[inline]
+ pub fn is_closure(self) -> bool {
+ matches!(self.kind(), Closure(..))
+ }
+
+ #[inline]
+ pub fn is_generator(self) -> bool {
+ matches!(self.kind(), Generator(..))
+ }
+
+ #[inline]
+ pub fn is_integral(self) -> bool {
+ matches!(self.kind(), Infer(IntVar(_)) | Int(_) | Uint(_))
+ }
+
+ #[inline]
+ pub fn is_fresh_ty(self) -> bool {
+ matches!(self.kind(), Infer(FreshTy(_)))
+ }
+
+ #[inline]
+ pub fn is_fresh(self) -> bool {
+ matches!(self.kind(), Infer(FreshTy(_) | FreshIntTy(_) | FreshFloatTy(_)))
+ }
+
+ #[inline]
+ pub fn is_char(self) -> bool {
+ matches!(self.kind(), Char)
+ }
+
+ #[inline]
+ pub fn is_numeric(self) -> bool {
+ self.is_integral() || self.is_floating_point()
+ }
+
+ #[inline]
+ pub fn is_signed(self) -> bool {
+ matches!(self.kind(), Int(_))
+ }
+
+ #[inline]
+ pub fn is_ptr_sized_integral(self) -> bool {
+ matches!(self.kind(), Int(ty::IntTy::Isize) | Uint(ty::UintTy::Usize))
+ }
+
+ #[inline]
+ pub fn has_concrete_skeleton(self) -> bool {
+ !matches!(self.kind(), Param(_) | Infer(_) | Error(_))
+ }
+
+ /// Checks whether a type recursively contains another type
+ ///
+ /// Example: `Option<()>` contains `()`
+ pub fn contains(self, other: Ty<'tcx>) -> bool {
+ struct ContainsTyVisitor<'tcx>(Ty<'tcx>);
+
+ impl<'tcx> TypeVisitor<'tcx> for ContainsTyVisitor<'tcx> {
+ type BreakTy = ();
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if self.0 == t { ControlFlow::BREAK } else { t.super_visit_with(self) }
+ }
+ }
+
+ let cf = self.visit_with(&mut ContainsTyVisitor(other));
+ cf.is_break()
+ }
+
+ /// Returns the type and mutability of `*ty`.
+ ///
+ /// The parameter `explicit` indicates if this is an *explicit* dereference.
+ /// Some types -- notably unsafe ptrs -- can only be dereferenced explicitly.
+ pub fn builtin_deref(self, explicit: bool) -> Option<TypeAndMut<'tcx>> {
+ match self.kind() {
+ Adt(def, _) if def.is_box() => {
+ Some(TypeAndMut { ty: self.boxed_ty(), mutbl: hir::Mutability::Not })
+ }
+ Ref(_, ty, mutbl) => Some(TypeAndMut { ty: *ty, mutbl: *mutbl }),
+ RawPtr(mt) if explicit => Some(*mt),
+ _ => None,
+ }
+ }
+
+ /// Returns the type of `ty[i]`.
+ pub fn builtin_index(self) -> Option<Ty<'tcx>> {
+ match self.kind() {
+ Array(ty, _) | Slice(ty) => Some(*ty),
+ _ => None,
+ }
+ }
+
+ pub fn fn_sig(self, tcx: TyCtxt<'tcx>) -> PolyFnSig<'tcx> {
+ match self.kind() {
+ FnDef(def_id, substs) => tcx.bound_fn_sig(*def_id).subst(tcx, substs),
+ FnPtr(f) => *f,
+ Error(_) => {
+ // ignore errors (#54954)
+ ty::Binder::dummy(FnSig::fake())
+ }
+ Closure(..) => bug!(
+ "to get the signature of a closure, use `substs.as_closure().sig()` not `fn_sig()`",
+ ),
+ _ => bug!("Ty::fn_sig() called on non-fn type: {:?}", self),
+ }
+ }
+
+ #[inline]
+ pub fn is_fn(self) -> bool {
+ matches!(self.kind(), FnDef(..) | FnPtr(_))
+ }
+
+ #[inline]
+ pub fn is_fn_ptr(self) -> bool {
+ matches!(self.kind(), FnPtr(_))
+ }
+
+ #[inline]
+ pub fn is_impl_trait(self) -> bool {
+ matches!(self.kind(), Opaque(..))
+ }
+
+ #[inline]
+ pub fn ty_adt_def(self) -> Option<AdtDef<'tcx>> {
+ match self.kind() {
+ Adt(adt, _) => Some(*adt),
+ _ => None,
+ }
+ }
+
+ /// Iterates over tuple fields.
+ /// Panics when called on anything but a tuple.
+ #[inline]
+ pub fn tuple_fields(self) -> &'tcx List<Ty<'tcx>> {
+ match self.kind() {
+ Tuple(substs) => substs,
+ _ => bug!("tuple_fields called on non-tuple"),
+ }
+ }
+
+ /// If the type contains variants, returns the valid range of variant indices.
+ //
+ // FIXME: This requires the optimized MIR in the case of generators.
+ #[inline]
+ pub fn variant_range(self, tcx: TyCtxt<'tcx>) -> Option<Range<VariantIdx>> {
+ match self.kind() {
+ TyKind::Adt(adt, _) => Some(adt.variant_range()),
+ TyKind::Generator(def_id, substs, _) => {
+ Some(substs.as_generator().variant_range(*def_id, tcx))
+ }
+ _ => None,
+ }
+ }
+
+ /// If the type contains variants, returns the variant for `variant_index`.
+ /// Panics if `variant_index` is out of range.
+ //
+ // FIXME: This requires the optimized MIR in the case of generators.
+ #[inline]
+ pub fn discriminant_for_variant(
+ self,
+ tcx: TyCtxt<'tcx>,
+ variant_index: VariantIdx,
+ ) -> Option<Discr<'tcx>> {
+ match self.kind() {
+ TyKind::Adt(adt, _) if adt.variants().is_empty() => {
+ // This can actually happen during CTFE, see
+ // https://github.com/rust-lang/rust/issues/89765.
+ None
+ }
+ TyKind::Adt(adt, _) if adt.is_enum() => {
+ Some(adt.discriminant_for_variant(tcx, variant_index))
+ }
+ TyKind::Generator(def_id, substs, _) => {
+ Some(substs.as_generator().discriminant_for_variant(*def_id, tcx, variant_index))
+ }
+ _ => None,
+ }
+ }
+
+ /// Returns the type of the discriminant of this type.
+ pub fn discriminant_ty(self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match self.kind() {
+ ty::Adt(adt, _) if adt.is_enum() => adt.repr().discr_type().to_ty(tcx),
+ ty::Generator(_, substs, _) => substs.as_generator().discr_ty(tcx),
+
+ ty::Param(_) | ty::Projection(_) | ty::Opaque(..) | ty::Infer(ty::TyVar(_)) => {
+ let assoc_items = tcx.associated_item_def_ids(
+ tcx.require_lang_item(hir::LangItem::DiscriminantKind, None),
+ );
+ tcx.mk_projection(assoc_items[0], tcx.intern_substs(&[self.into()]))
+ }
+
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Adt(..)
+ | ty::Foreign(_)
+ | ty::Str
+ | ty::Array(..)
+ | ty::Slice(_)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(..)
+ | ty::Dynamic(..)
+ | ty::Closure(..)
+ | ty::GeneratorWitness(..)
+ | ty::Never
+ | ty::Tuple(_)
+ | ty::Error(_)
+ | ty::Infer(IntVar(_) | FloatVar(_)) => tcx.types.u8,
+
+ ty::Bound(..)
+ | ty::Placeholder(_)
+ | ty::Infer(FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("`discriminant_ty` applied to unexpected type: {:?}", self)
+ }
+ }
+ }
+
+ /// Returns the type of metadata for (potentially fat) pointers to this type,
+ /// and a boolean signifying if this is conditional on this type being `Sized`.
+ pub fn ptr_metadata_ty(
+ self,
+ tcx: TyCtxt<'tcx>,
+ normalize: impl FnMut(Ty<'tcx>) -> Ty<'tcx>,
+ ) -> (Ty<'tcx>, bool) {
+ let tail = tcx.struct_tail_with_normalize(self, normalize, || {});
+ match tail.kind() {
+ // Sized types
+ ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+ | ty::Uint(_)
+ | ty::Int(_)
+ | ty::Bool
+ | ty::Float(_)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::RawPtr(..)
+ | ty::Char
+ | ty::Ref(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Array(..)
+ | ty::Closure(..)
+ | ty::Never
+ | ty::Error(_)
+ // Extern types have metadata = ().
+ | ty::Foreign(..)
+ // If returned by `struct_tail_without_normalization` this is a unit struct
+ // without any fields, or not a struct, and therefore is Sized.
+ | ty::Adt(..)
+ // If returned by `struct_tail_without_normalization` this is the empty tuple,
+ // a.k.a. unit type, which is Sized
+ | ty::Tuple(..) => (tcx.types.unit, false),
+
+ ty::Str | ty::Slice(_) => (tcx.types.usize, false),
+ ty::Dynamic(..) => {
+ let dyn_metadata = tcx.lang_items().dyn_metadata().unwrap();
+ (tcx.bound_type_of(dyn_metadata).subst(tcx, &[tail.into()]), false)
+ },
+
+ // type parameters only have unit metadata if they're sized, so return true
+ // to make sure we double check this during confirmation
+ ty::Param(_) | ty::Projection(_) | ty::Opaque(..) => (tcx.types.unit, true),
+
+ ty::Infer(ty::TyVar(_))
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("`ptr_metadata_ty` applied to unexpected type: {:?} (tail = {:?})", self, tail)
+ }
+ }
+ }
+
+ /// When we create a closure, we record its kind (i.e., what trait
+ /// it implements) into its `ClosureSubsts` using a type
+ /// parameter. This is kind of a phantom type, except that the
+ /// most convenient thing for us to are the integral types. This
+ /// function converts such a special type into the closure
+ /// kind. To go the other way, use
+ /// `tcx.closure_kind_ty(closure_kind)`.
+ ///
+ /// Note that during type checking, we use an inference variable
+ /// to represent the closure kind, because it has not yet been
+ /// inferred. Once upvar inference (in `rustc_typeck/src/check/upvar.rs`)
+ /// is complete, that type variable will be unified.
+ pub fn to_opt_closure_kind(self) -> Option<ty::ClosureKind> {
+ match self.kind() {
+ Int(int_ty) => match int_ty {
+ ty::IntTy::I8 => Some(ty::ClosureKind::Fn),
+ ty::IntTy::I16 => Some(ty::ClosureKind::FnMut),
+ ty::IntTy::I32 => Some(ty::ClosureKind::FnOnce),
+ _ => bug!("cannot convert type `{:?}` to a closure kind", self),
+ },
+
+ // "Bound" types appear in canonical queries when the
+ // closure type is not yet known
+ Bound(..) | Infer(_) => None,
+
+ Error(_) => Some(ty::ClosureKind::Fn),
+
+ _ => bug!("cannot convert type `{:?}` to a closure kind", self),
+ }
+ }
+
+ /// Fast path helper for testing if a type is `Sized`.
+ ///
+ /// Returning true means the type is known to be sized. Returning
+ /// `false` means nothing -- could be sized, might not be.
+ ///
+ /// Note that we could never rely on the fact that a type such as `[_]` is
+ /// trivially `!Sized` because we could be in a type environment with a
+ /// bound such as `[_]: Copy`. A function with such a bound obviously never
+ /// can be called, but that doesn't mean it shouldn't typecheck. This is why
+ /// this method doesn't return `Option<bool>`.
+ pub fn is_trivially_sized(self, tcx: TyCtxt<'tcx>) -> bool {
+ match self.kind() {
+ ty::Infer(ty::IntVar(_) | ty::FloatVar(_))
+ | ty::Uint(_)
+ | ty::Int(_)
+ | ty::Bool
+ | ty::Float(_)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::RawPtr(..)
+ | ty::Char
+ | ty::Ref(..)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(..)
+ | ty::Array(..)
+ | ty::Closure(..)
+ | ty::Never
+ | ty::Error(_) => true,
+
+ ty::Str | ty::Slice(_) | ty::Dynamic(..) | ty::Foreign(..) => false,
+
+ ty::Tuple(tys) => tys.iter().all(|ty| ty.is_trivially_sized(tcx)),
+
+ ty::Adt(def, _substs) => def.sized_constraint(tcx).0.is_empty(),
+
+ ty::Projection(_) | ty::Param(_) | ty::Opaque(..) => false,
+
+ ty::Infer(ty::TyVar(_)) => false,
+
+ ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Infer(ty::FreshTy(_) | ty::FreshIntTy(_) | ty::FreshFloatTy(_)) => {
+ bug!("`is_trivially_sized` applied to unexpected type: {:?}", self)
+ }
+ }
+ }
+
+ /// Fast path helper for primitives which are always `Copy` and which
+ /// have a side-effect-free `Clone` impl.
+ ///
+ /// Returning true means the type is known to be pure and `Copy+Clone`.
+ /// Returning `false` means nothing -- could be `Copy`, might not be.
+ ///
+ /// This is mostly useful for optimizations, as there are the types
+ /// on which we can replace cloning with dereferencing.
+ pub fn is_trivially_pure_clone_copy(self) -> bool {
+ match self.kind() {
+ ty::Bool | ty::Char | ty::Never => true,
+
+ // These aren't even `Clone`
+ ty::Str | ty::Slice(..) | ty::Foreign(..) | ty::Dynamic(..) => false,
+
+ ty::Int(..) | ty::Uint(..) | ty::Float(..) => true,
+
+ // The voldemort ZSTs are fine.
+ ty::FnDef(..) => true,
+
+ ty::Array(element_ty, _len) => element_ty.is_trivially_pure_clone_copy(),
+
+ // A 100-tuple isn't "trivial", so doing this only for reasonable sizes.
+ ty::Tuple(field_tys) => {
+ field_tys.len() <= 3 && field_tys.iter().all(Self::is_trivially_pure_clone_copy)
+ }
+
+ // Sometimes traits aren't implemented for every ABI or arity,
+ // because we can't be generic over everything yet.
+ ty::FnPtr(..) => false,
+
+ // Definitely absolutely not copy.
+ ty::Ref(_, _, hir::Mutability::Mut) => false,
+
+ // Thin pointers & thin shared references are pure-clone-copy, but for
+ // anything with custom metadata it might be more complicated.
+ ty::Ref(_, _, hir::Mutability::Not) | ty::RawPtr(..) => false,
+
+ ty::Generator(..) | ty::GeneratorWitness(..) => false,
+
+ // Might be, but not "trivial" so just giving the safe answer.
+ ty::Adt(..) | ty::Closure(..) | ty::Opaque(..) => false,
+
+ ty::Projection(..) | ty::Param(..) | ty::Infer(..) | ty::Error(..) => false,
+
+ ty::Bound(..) | ty::Placeholder(..) => {
+ bug!("`is_trivially_pure_clone_copy` applied to unexpected type: {:?}", self);
+ }
+ }
+ }
+}
+
+/// Extra information about why we ended up with a particular variance.
+/// This is only used to add more information to error messages, and
+/// has no effect on soundness. While choosing the 'wrong' `VarianceDiagInfo`
+/// may lead to confusing notes in error messages, it will never cause
+/// a miscompilation or unsoundness.
+///
+/// When in doubt, use `VarianceDiagInfo::default()`
+#[derive(Copy, Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
+pub enum VarianceDiagInfo<'tcx> {
+ /// No additional information - this is the default.
+ /// We will not add any additional information to error messages.
+ #[default]
+ None,
+ /// We switched our variance because a generic argument occurs inside
+ /// the invariant generic argument of another type.
+ Invariant {
+ /// The generic type containing the generic parameter
+ /// that changes the variance (e.g. `*mut T`, `MyStruct<T>`)
+ ty: Ty<'tcx>,
+ /// The index of the generic parameter being used
+ /// (e.g. `0` for `*mut T`, `1` for `MyStruct<'CovariantParam, 'InvariantParam>`)
+ param_index: u32,
+ },
+}
+
+impl<'tcx> VarianceDiagInfo<'tcx> {
+ /// Mirrors `Variance::xform` - used to 'combine' the existing
+ /// and new `VarianceDiagInfo`s when our variance changes.
+ pub fn xform(self, other: VarianceDiagInfo<'tcx>) -> VarianceDiagInfo<'tcx> {
+ // For now, just use the first `VarianceDiagInfo::Invariant` that we see
+ match self {
+ VarianceDiagInfo::None => other,
+ VarianceDiagInfo::Invariant { .. } => self,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/subst.rs b/compiler/rustc_middle/src/ty/subst.rs
new file mode 100644
index 000000000..6262aa180
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/subst.rs
@@ -0,0 +1,785 @@
+// Type substitutions.
+
+use crate::mir;
+use crate::ty::codec::{TyDecoder, TyEncoder};
+use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder, TypeSuperFoldable};
+use crate::ty::sty::{ClosureSubsts, GeneratorSubsts, InlineConstSubsts};
+use crate::ty::visit::{TypeVisitable, TypeVisitor};
+use crate::ty::{self, Lift, List, ParamConst, Ty, TyCtxt};
+
+use rustc_data_structures::intern::{Interned, WithStableHash};
+use rustc_hir::def_id::DefId;
+use rustc_macros::HashStable;
+use rustc_serialize::{self, Decodable, Encodable};
+use smallvec::SmallVec;
+
+use core::intrinsics;
+use std::cmp::Ordering;
+use std::fmt;
+use std::marker::PhantomData;
+use std::mem;
+use std::num::NonZeroUsize;
+use std::ops::ControlFlow;
+use std::slice;
+
+/// An entity in the Rust type system, which can be one of
+/// several kinds (types, lifetimes, and consts).
+/// To reduce memory usage, a `GenericArg` is an interned pointer,
+/// with the lowest 2 bits being reserved for a tag to
+/// indicate the type (`Ty`, `Region`, or `Const`) it points to.
+///
+/// Note: the `PartialEq`, `Eq` and `Hash` derives are only valid because `Ty`,
+/// `Region` and `Const` are all interned.
+#[derive(Copy, Clone, PartialEq, Eq, Hash)]
+pub struct GenericArg<'tcx> {
+ ptr: NonZeroUsize,
+ marker: PhantomData<(Ty<'tcx>, ty::Region<'tcx>, ty::Const<'tcx>)>,
+}
+
+const TAG_MASK: usize = 0b11;
+const TYPE_TAG: usize = 0b00;
+const REGION_TAG: usize = 0b01;
+const CONST_TAG: usize = 0b10;
+
+#[derive(Debug, TyEncodable, TyDecodable, PartialEq, Eq, PartialOrd, Ord)]
+pub enum GenericArgKind<'tcx> {
+ Lifetime(ty::Region<'tcx>),
+ Type(Ty<'tcx>),
+ Const(ty::Const<'tcx>),
+}
+
+/// This function goes from `&'a [Ty<'tcx>]` to `&'a [GenericArg<'tcx>]`
+///
+/// This is sound as, for types, `GenericArg` is just
+/// `NonZeroUsize::new_unchecked(ty as *const _ as usize)` as
+/// long as we use `0` for the `TYPE_TAG`.
+pub fn ty_slice_as_generic_args<'a, 'tcx>(ts: &'a [Ty<'tcx>]) -> &'a [GenericArg<'tcx>] {
+ assert_eq!(TYPE_TAG, 0);
+ // SAFETY: the whole slice is valid and immutable.
+ // `Ty` and `GenericArg` is explained above.
+ unsafe { slice::from_raw_parts(ts.as_ptr().cast(), ts.len()) }
+}
+
+impl<'tcx> List<Ty<'tcx>> {
+ /// Allows to freely switch between `List<Ty<'tcx>>` and `List<GenericArg<'tcx>>`.
+ ///
+ /// As lists are interned, `List<Ty<'tcx>>` and `List<GenericArg<'tcx>>` have
+ /// be interned together, see `intern_type_list` for more details.
+ #[inline]
+ pub fn as_substs(&'tcx self) -> SubstsRef<'tcx> {
+ assert_eq!(TYPE_TAG, 0);
+ // SAFETY: `List<T>` is `#[repr(C)]`. `Ty` and `GenericArg` is explained above.
+ unsafe { &*(self as *const List<Ty<'tcx>> as *const List<GenericArg<'tcx>>) }
+ }
+}
+
+impl<'tcx> GenericArgKind<'tcx> {
+ #[inline]
+ fn pack(self) -> GenericArg<'tcx> {
+ let (tag, ptr) = match self {
+ GenericArgKind::Lifetime(lt) => {
+ // Ensure we can use the tag bits.
+ assert_eq!(mem::align_of_val(&*lt.0.0) & TAG_MASK, 0);
+ (REGION_TAG, lt.0.0 as *const ty::RegionKind<'tcx> as usize)
+ }
+ GenericArgKind::Type(ty) => {
+ // Ensure we can use the tag bits.
+ assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0);
+ (TYPE_TAG, ty.0.0 as *const WithStableHash<ty::TyS<'tcx>> as usize)
+ }
+ GenericArgKind::Const(ct) => {
+ // Ensure we can use the tag bits.
+ assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0);
+ (CONST_TAG, ct.0.0 as *const ty::ConstS<'tcx> as usize)
+ }
+ };
+
+ GenericArg { ptr: unsafe { NonZeroUsize::new_unchecked(ptr | tag) }, marker: PhantomData }
+ }
+}
+
+impl<'tcx> fmt::Debug for GenericArg<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => lt.fmt(f),
+ GenericArgKind::Type(ty) => ty.fmt(f),
+ GenericArgKind::Const(ct) => ct.fmt(f),
+ }
+ }
+}
+
+impl<'tcx> Ord for GenericArg<'tcx> {
+ fn cmp(&self, other: &GenericArg<'tcx>) -> Ordering {
+ self.unpack().cmp(&other.unpack())
+ }
+}
+
+impl<'tcx> PartialOrd for GenericArg<'tcx> {
+ fn partial_cmp(&self, other: &GenericArg<'tcx>) -> Option<Ordering> {
+ Some(self.cmp(&other))
+ }
+}
+
+impl<'tcx> From<ty::Region<'tcx>> for GenericArg<'tcx> {
+ #[inline]
+ fn from(r: ty::Region<'tcx>) -> GenericArg<'tcx> {
+ GenericArgKind::Lifetime(r).pack()
+ }
+}
+
+impl<'tcx> From<Ty<'tcx>> for GenericArg<'tcx> {
+ #[inline]
+ fn from(ty: Ty<'tcx>) -> GenericArg<'tcx> {
+ GenericArgKind::Type(ty).pack()
+ }
+}
+
+impl<'tcx> From<ty::Const<'tcx>> for GenericArg<'tcx> {
+ #[inline]
+ fn from(c: ty::Const<'tcx>) -> GenericArg<'tcx> {
+ GenericArgKind::Const(c).pack()
+ }
+}
+
+impl<'tcx> GenericArg<'tcx> {
+ #[inline]
+ pub fn unpack(self) -> GenericArgKind<'tcx> {
+ let ptr = self.ptr.get();
+ // SAFETY: use of `Interned::new_unchecked` here is ok because these
+ // pointers were originally created from `Interned` types in `pack()`,
+ // and this is just going in the other direction.
+ unsafe {
+ match ptr & TAG_MASK {
+ REGION_TAG => GenericArgKind::Lifetime(ty::Region(Interned::new_unchecked(
+ &*((ptr & !TAG_MASK) as *const ty::RegionKind<'tcx>),
+ ))),
+ TYPE_TAG => GenericArgKind::Type(Ty(Interned::new_unchecked(
+ &*((ptr & !TAG_MASK) as *const WithStableHash<ty::TyS<'tcx>>),
+ ))),
+ CONST_TAG => GenericArgKind::Const(ty::Const(Interned::new_unchecked(
+ &*((ptr & !TAG_MASK) as *const ty::ConstS<'tcx>),
+ ))),
+ _ => intrinsics::unreachable(),
+ }
+ }
+ }
+
+ /// Unpack the `GenericArg` as a region when it is known certainly to be a region.
+ pub fn expect_region(self) -> ty::Region<'tcx> {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => lt,
+ _ => bug!("expected a region, but found another kind"),
+ }
+ }
+
+ /// Unpack the `GenericArg` as a type when it is known certainly to be a type.
+ /// This is true in cases where `Substs` is used in places where the kinds are known
+ /// to be limited (e.g. in tuples, where the only parameters are type parameters).
+ pub fn expect_ty(self) -> Ty<'tcx> {
+ match self.unpack() {
+ GenericArgKind::Type(ty) => ty,
+ _ => bug!("expected a type, but found another kind"),
+ }
+ }
+
+ /// Unpack the `GenericArg` as a const when it is known certainly to be a const.
+ pub fn expect_const(self) -> ty::Const<'tcx> {
+ match self.unpack() {
+ GenericArgKind::Const(c) => c,
+ _ => bug!("expected a const, but found another kind"),
+ }
+ }
+}
+
+impl<'a, 'tcx> Lift<'tcx> for GenericArg<'a> {
+ type Lifted = GenericArg<'tcx>;
+
+ fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => tcx.lift(lt).map(|lt| lt.into()),
+ GenericArgKind::Type(ty) => tcx.lift(ty).map(|ty| ty.into()),
+ GenericArgKind::Const(ct) => tcx.lift(ct).map(|ct| ct.into()),
+ }
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for GenericArg<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => lt.try_fold_with(folder).map(Into::into),
+ GenericArgKind::Type(ty) => ty.try_fold_with(folder).map(Into::into),
+ GenericArgKind::Const(ct) => ct.try_fold_with(folder).map(Into::into),
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for GenericArg<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ match self.unpack() {
+ GenericArgKind::Lifetime(lt) => lt.visit_with(visitor),
+ GenericArgKind::Type(ty) => ty.visit_with(visitor),
+ GenericArgKind::Const(ct) => ct.visit_with(visitor),
+ }
+ }
+}
+
+impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for GenericArg<'tcx> {
+ fn encode(&self, e: &mut E) {
+ self.unpack().encode(e)
+ }
+}
+
+impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for GenericArg<'tcx> {
+ fn decode(d: &mut D) -> GenericArg<'tcx> {
+ GenericArgKind::decode(d).pack()
+ }
+}
+
+/// A substitution mapping generic parameters to new values.
+pub type InternalSubsts<'tcx> = List<GenericArg<'tcx>>;
+
+pub type SubstsRef<'tcx> = &'tcx InternalSubsts<'tcx>;
+
+impl<'tcx> InternalSubsts<'tcx> {
+ /// Checks whether all elements of this list are types, if so, transmute.
+ pub fn try_as_type_list(&'tcx self) -> Option<&'tcx List<Ty<'tcx>>> {
+ if self.iter().all(|arg| matches!(arg.unpack(), GenericArgKind::Type(_))) {
+ assert_eq!(TYPE_TAG, 0);
+ // SAFETY: All elements are types, see `List<Ty<'tcx>>::as_substs`.
+ Some(unsafe { &*(self as *const List<GenericArg<'tcx>> as *const List<Ty<'tcx>>) })
+ } else {
+ None
+ }
+ }
+
+ /// Interpret these substitutions as the substitutions of a closure type.
+ /// Closure substitutions have a particular structure controlled by the
+ /// compiler that encodes information like the signature and closure kind;
+ /// see `ty::ClosureSubsts` struct for more comments.
+ pub fn as_closure(&'tcx self) -> ClosureSubsts<'tcx> {
+ ClosureSubsts { substs: self }
+ }
+
+ /// Interpret these substitutions as the substitutions of a generator type.
+ /// Generator substitutions have a particular structure controlled by the
+ /// compiler that encodes information like the signature and generator kind;
+ /// see `ty::GeneratorSubsts` struct for more comments.
+ pub fn as_generator(&'tcx self) -> GeneratorSubsts<'tcx> {
+ GeneratorSubsts { substs: self }
+ }
+
+ /// Interpret these substitutions as the substitutions of an inline const.
+ /// Inline const substitutions have a particular structure controlled by the
+ /// compiler that encodes information like the inferred type;
+ /// see `ty::InlineConstSubsts` struct for more comments.
+ pub fn as_inline_const(&'tcx self) -> InlineConstSubsts<'tcx> {
+ InlineConstSubsts { substs: self }
+ }
+
+ /// Creates an `InternalSubsts` that maps each generic parameter to itself.
+ pub fn identity_for_item(tcx: TyCtxt<'tcx>, def_id: DefId) -> SubstsRef<'tcx> {
+ Self::for_item(tcx, def_id, |param, _| tcx.mk_param_from_def(param))
+ }
+
+ /// Creates an `InternalSubsts` for generic parameter definitions,
+ /// by calling closures to obtain each kind.
+ /// The closures get to observe the `InternalSubsts` as they're
+ /// being built, which can be used to correctly
+ /// substitute defaults of generic parameters.
+ pub fn for_item<F>(tcx: TyCtxt<'tcx>, def_id: DefId, mut mk_kind: F) -> SubstsRef<'tcx>
+ where
+ F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
+ {
+ let defs = tcx.generics_of(def_id);
+ let count = defs.count();
+ let mut substs = SmallVec::with_capacity(count);
+ Self::fill_item(&mut substs, tcx, defs, &mut mk_kind);
+ tcx.intern_substs(&substs)
+ }
+
+ pub fn extend_to<F>(&self, tcx: TyCtxt<'tcx>, def_id: DefId, mut mk_kind: F) -> SubstsRef<'tcx>
+ where
+ F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
+ {
+ Self::for_item(tcx, def_id, |param, substs| {
+ self.get(param.index as usize).cloned().unwrap_or_else(|| mk_kind(param, substs))
+ })
+ }
+
+ pub fn fill_item<F>(
+ substs: &mut SmallVec<[GenericArg<'tcx>; 8]>,
+ tcx: TyCtxt<'tcx>,
+ defs: &ty::Generics,
+ mk_kind: &mut F,
+ ) where
+ F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
+ {
+ if let Some(def_id) = defs.parent {
+ let parent_defs = tcx.generics_of(def_id);
+ Self::fill_item(substs, tcx, parent_defs, mk_kind);
+ }
+ Self::fill_single(substs, defs, mk_kind)
+ }
+
+ pub fn fill_single<F>(
+ substs: &mut SmallVec<[GenericArg<'tcx>; 8]>,
+ defs: &ty::Generics,
+ mk_kind: &mut F,
+ ) where
+ F: FnMut(&ty::GenericParamDef, &[GenericArg<'tcx>]) -> GenericArg<'tcx>,
+ {
+ substs.reserve(defs.params.len());
+ for param in &defs.params {
+ let kind = mk_kind(param, substs);
+ assert_eq!(param.index as usize, substs.len());
+ substs.push(kind);
+ }
+ }
+
+ #[inline]
+ pub fn types(&'tcx self) -> impl DoubleEndedIterator<Item = Ty<'tcx>> + 'tcx {
+ self.iter()
+ .filter_map(|k| if let GenericArgKind::Type(ty) = k.unpack() { Some(ty) } else { None })
+ }
+
+ #[inline]
+ pub fn regions(&'tcx self) -> impl DoubleEndedIterator<Item = ty::Region<'tcx>> + 'tcx {
+ self.iter().filter_map(|k| {
+ if let GenericArgKind::Lifetime(lt) = k.unpack() { Some(lt) } else { None }
+ })
+ }
+
+ #[inline]
+ pub fn consts(&'tcx self) -> impl DoubleEndedIterator<Item = ty::Const<'tcx>> + 'tcx {
+ self.iter().filter_map(|k| {
+ if let GenericArgKind::Const(ct) = k.unpack() { Some(ct) } else { None }
+ })
+ }
+
+ #[inline]
+ pub fn non_erasable_generics(
+ &'tcx self,
+ ) -> impl DoubleEndedIterator<Item = GenericArgKind<'tcx>> + 'tcx {
+ self.iter().filter_map(|k| match k.unpack() {
+ GenericArgKind::Lifetime(_) => None,
+ generic => Some(generic),
+ })
+ }
+
+ #[inline]
+ pub fn type_at(&self, i: usize) -> Ty<'tcx> {
+ if let GenericArgKind::Type(ty) = self[i].unpack() {
+ ty
+ } else {
+ bug!("expected type for param #{} in {:?}", i, self);
+ }
+ }
+
+ #[inline]
+ pub fn region_at(&self, i: usize) -> ty::Region<'tcx> {
+ if let GenericArgKind::Lifetime(lt) = self[i].unpack() {
+ lt
+ } else {
+ bug!("expected region for param #{} in {:?}", i, self);
+ }
+ }
+
+ #[inline]
+ pub fn const_at(&self, i: usize) -> ty::Const<'tcx> {
+ if let GenericArgKind::Const(ct) = self[i].unpack() {
+ ct
+ } else {
+ bug!("expected const for param #{} in {:?}", i, self);
+ }
+ }
+
+ #[inline]
+ pub fn type_for_def(&self, def: &ty::GenericParamDef) -> GenericArg<'tcx> {
+ self.type_at(def.index as usize).into()
+ }
+
+ /// Transform from substitutions for a child of `source_ancestor`
+ /// (e.g., a trait or impl) to substitutions for the same child
+ /// in a different item, with `target_substs` as the base for
+ /// the target impl/trait, with the source child-specific
+ /// parameters (e.g., method parameters) on top of that base.
+ ///
+ /// For example given:
+ ///
+ /// ```no_run
+ /// trait X<S> { fn f<T>(); }
+ /// impl<U> X<U> for U { fn f<V>() {} }
+ /// ```
+ ///
+ /// * If `self` is `[Self, S, T]`: the identity substs of `f` in the trait.
+ /// * If `source_ancestor` is the def_id of the trait.
+ /// * If `target_substs` is `[U]`, the substs for the impl.
+ /// * Then we will return `[U, T]`, the subst for `f` in the impl that
+ /// are needed for it to match the trait.
+ pub fn rebase_onto(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ source_ancestor: DefId,
+ target_substs: SubstsRef<'tcx>,
+ ) -> SubstsRef<'tcx> {
+ let defs = tcx.generics_of(source_ancestor);
+ tcx.mk_substs(target_substs.iter().chain(self.iter().skip(defs.params.len())))
+ }
+
+ pub fn truncate_to(&self, tcx: TyCtxt<'tcx>, generics: &ty::Generics) -> SubstsRef<'tcx> {
+ tcx.mk_substs(self.iter().take(generics.count()))
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for SubstsRef<'tcx> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ // This code is hot enough that it's worth specializing for the most
+ // common length lists, to avoid the overhead of `SmallVec` creation.
+ // The match arms are in order of frequency. The 1, 2, and 0 cases are
+ // typically hit in 90--99.99% of cases. When folding doesn't change
+ // the substs, it's faster to reuse the existing substs rather than
+ // calling `intern_substs`.
+ match self.len() {
+ 1 => {
+ let param0 = self[0].try_fold_with(folder)?;
+ if param0 == self[0] { Ok(self) } else { Ok(folder.tcx().intern_substs(&[param0])) }
+ }
+ 2 => {
+ let param0 = self[0].try_fold_with(folder)?;
+ let param1 = self[1].try_fold_with(folder)?;
+ if param0 == self[0] && param1 == self[1] {
+ Ok(self)
+ } else {
+ Ok(folder.tcx().intern_substs(&[param0, param1]))
+ }
+ }
+ 0 => Ok(self),
+ _ => ty::util::fold_list(self, folder, |tcx, v| tcx.intern_substs(v)),
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for SubstsRef<'tcx> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<Ty<'tcx>> {
+ fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
+ // This code is fairly hot, though not as hot as `SubstsRef`.
+ //
+ // When compiling stage 2, I get the following results:
+ //
+ // len | total | %
+ // --- | --------- | -----
+ // 2 | 15083590 | 48.1
+ // 3 | 7540067 | 24.0
+ // 1 | 5300377 | 16.9
+ // 4 | 1351897 | 4.3
+ // 0 | 1256849 | 4.0
+ //
+ // I've tried it with some private repositories and got
+ // close to the same result, with 4 and 0 swapping places
+ // sometimes.
+ match self.len() {
+ 2 => {
+ let param0 = self[0].try_fold_with(folder)?;
+ let param1 = self[1].try_fold_with(folder)?;
+ if param0 == self[0] && param1 == self[1] {
+ Ok(self)
+ } else {
+ Ok(folder.tcx().intern_type_list(&[param0, param1]))
+ }
+ }
+ _ => ty::util::fold_list(self, folder, |tcx, v| tcx.intern_type_list(v)),
+ }
+ }
+}
+
+impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<Ty<'tcx>> {
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
+ self.iter().try_for_each(|t| t.visit_with(visitor))
+ }
+}
+
+// Just call `foo.subst(tcx, substs)` to perform a substitution across `foo`.
+#[rustc_on_unimplemented(message = "Calling `subst` must now be done through an `EarlyBinder`")]
+pub trait Subst<'tcx>: Sized {
+ type Inner;
+
+ fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self::Inner;
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> Subst<'tcx> for ty::EarlyBinder<T> {
+ type Inner = T;
+
+ fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self::Inner {
+ let mut folder = SubstFolder { tcx, substs, binders_passed: 0 };
+ self.0.fold_with(&mut folder)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// The actual substitution engine itself is a type folder.
+
+struct SubstFolder<'a, 'tcx> {
+ tcx: TyCtxt<'tcx>,
+ substs: &'a [GenericArg<'tcx>],
+
+ /// Number of region binders we have passed through while doing the substitution
+ binders_passed: u32,
+}
+
+impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
+ #[inline]
+ fn tcx<'b>(&'b self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_binder<T: TypeFoldable<'tcx>>(
+ &mut self,
+ t: ty::Binder<'tcx, T>,
+ ) -> ty::Binder<'tcx, T> {
+ self.binders_passed += 1;
+ let t = t.super_fold_with(self);
+ self.binders_passed -= 1;
+ t
+ }
+
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ #[cold]
+ #[inline(never)]
+ fn region_param_out_of_range(data: ty::EarlyBoundRegion) -> ! {
+ bug!(
+ "Region parameter out of range when substituting in region {} (index={})",
+ data.name,
+ data.index
+ )
+ }
+
+ // Note: This routine only handles regions that are bound on
+ // type declarations and other outer declarations, not those
+ // bound in *fn types*. Region substitution of the bound
+ // regions that appear in a function signature is done using
+ // the specialized routine `ty::replace_late_regions()`.
+ match *r {
+ ty::ReEarlyBound(data) => {
+ let rk = self.substs.get(data.index as usize).map(|k| k.unpack());
+ match rk {
+ Some(GenericArgKind::Lifetime(lt)) => self.shift_region_through_binders(lt),
+ _ => region_param_out_of_range(data),
+ }
+ }
+ _ => r,
+ }
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ if !t.needs_subst() {
+ return t;
+ }
+
+ match *t.kind() {
+ ty::Param(p) => self.ty_for_param(p, t),
+ _ => t.super_fold_with(self),
+ }
+ }
+
+ fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ if let ty::ConstKind::Param(p) = c.kind() {
+ self.const_for_param(p, c)
+ } else {
+ c.super_fold_with(self)
+ }
+ }
+
+ #[inline]
+ fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
+ c.super_fold_with(self)
+ }
+}
+
+impl<'a, 'tcx> SubstFolder<'a, 'tcx> {
+ fn ty_for_param(&self, p: ty::ParamTy, source_ty: Ty<'tcx>) -> Ty<'tcx> {
+ // Look up the type in the substitutions. It really should be in there.
+ let opt_ty = self.substs.get(p.index as usize).map(|k| k.unpack());
+ let ty = match opt_ty {
+ Some(GenericArgKind::Type(ty)) => ty,
+ Some(kind) => self.type_param_expected(p, source_ty, kind),
+ None => self.type_param_out_of_range(p, source_ty),
+ };
+
+ self.shift_vars_through_binders(ty)
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn type_param_expected(&self, p: ty::ParamTy, ty: Ty<'tcx>, kind: GenericArgKind<'tcx>) -> ! {
+ bug!(
+ "expected type for `{:?}` ({:?}/{}) but found {:?} when substituting, substs={:?}",
+ p,
+ ty,
+ p.index,
+ kind,
+ self.substs,
+ )
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn type_param_out_of_range(&self, p: ty::ParamTy, ty: Ty<'tcx>) -> ! {
+ bug!(
+ "type parameter `{:?}` ({:?}/{}) out of range when substituting, substs={:?}",
+ p,
+ ty,
+ p.index,
+ self.substs,
+ )
+ }
+
+ fn const_for_param(&self, p: ParamConst, source_ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ // Look up the const in the substitutions. It really should be in there.
+ let opt_ct = self.substs.get(p.index as usize).map(|k| k.unpack());
+ let ct = match opt_ct {
+ Some(GenericArgKind::Const(ct)) => ct,
+ Some(kind) => self.const_param_expected(p, source_ct, kind),
+ None => self.const_param_out_of_range(p, source_ct),
+ };
+
+ self.shift_vars_through_binders(ct)
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn const_param_expected(
+ &self,
+ p: ty::ParamConst,
+ ct: ty::Const<'tcx>,
+ kind: GenericArgKind<'tcx>,
+ ) -> ! {
+ bug!(
+ "expected const for `{:?}` ({:?}/{}) but found {:?} when substituting substs={:?}",
+ p,
+ ct,
+ p.index,
+ kind,
+ self.substs,
+ )
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn const_param_out_of_range(&self, p: ty::ParamConst, ct: ty::Const<'tcx>) -> ! {
+ bug!(
+ "const parameter `{:?}` ({:?}/{}) out of range when substituting substs={:?}",
+ p,
+ ct,
+ p.index,
+ self.substs,
+ )
+ }
+
+ /// It is sometimes necessary to adjust the De Bruijn indices during substitution. This occurs
+ /// when we are substituting a type with escaping bound vars into a context where we have
+ /// passed through binders. That's quite a mouthful. Let's see an example:
+ ///
+ /// ```
+ /// type Func<A> = fn(A);
+ /// type MetaFunc = for<'a> fn(Func<&'a i32>);
+ /// ```
+ ///
+ /// The type `MetaFunc`, when fully expanded, will be
+ /// ```ignore (illustrative)
+ /// for<'a> fn(fn(&'a i32))
+ /// // ^~ ^~ ^~~
+ /// // | | |
+ /// // | | DebruijnIndex of 2
+ /// // Binders
+ /// ```
+ /// Here the `'a` lifetime is bound in the outer function, but appears as an argument of the
+ /// inner one. Therefore, that appearance will have a DebruijnIndex of 2, because we must skip
+ /// over the inner binder (remember that we count De Bruijn indices from 1). However, in the
+ /// definition of `MetaFunc`, the binder is not visible, so the type `&'a i32` will have a
+ /// De Bruijn index of 1. It's only during the substitution that we can see we must increase the
+ /// depth by 1 to account for the binder that we passed through.
+ ///
+ /// As a second example, consider this twist:
+ ///
+ /// ```
+ /// type FuncTuple<A> = (A,fn(A));
+ /// type MetaFuncTuple = for<'a> fn(FuncTuple<&'a i32>);
+ /// ```
+ ///
+ /// Here the final type will be:
+ /// ```ignore (illustrative)
+ /// for<'a> fn((&'a i32, fn(&'a i32)))
+ /// // ^~~ ^~~
+ /// // | |
+ /// // DebruijnIndex of 1 |
+ /// // DebruijnIndex of 2
+ /// ```
+ /// As indicated in the diagram, here the same type `&'a i32` is substituted once, but in the
+ /// first case we do not increase the De Bruijn index and in the second case we do. The reason
+ /// is that only in the second case have we passed through a fn binder.
+ fn shift_vars_through_binders<T: TypeFoldable<'tcx>>(&self, val: T) -> T {
+ debug!(
+ "shift_vars(val={:?}, binders_passed={:?}, has_escaping_bound_vars={:?})",
+ val,
+ self.binders_passed,
+ val.has_escaping_bound_vars()
+ );
+
+ if self.binders_passed == 0 || !val.has_escaping_bound_vars() {
+ return val;
+ }
+
+ let result = ty::fold::shift_vars(TypeFolder::tcx(self), val, self.binders_passed);
+ debug!("shift_vars: shifted result = {:?}", result);
+
+ result
+ }
+
+ fn shift_region_through_binders(&self, region: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ if self.binders_passed == 0 || !region.has_escaping_bound_vars() {
+ return region;
+ }
+ ty::fold::shift_region(self.tcx, region, self.binders_passed)
+ }
+}
+
+/// Stores the user-given substs to reach some fully qualified path
+/// (e.g., `<T>::Item` or `<T as Trait>::Item`).
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct UserSubsts<'tcx> {
+ /// The substitutions for the item as given by the user.
+ pub substs: SubstsRef<'tcx>,
+
+ /// The self type, in the case of a `<T>::Item` path (when applied
+ /// to an inherent impl). See `UserSelfTy` below.
+ pub user_self_ty: Option<UserSelfTy<'tcx>>,
+}
+
+/// Specifies the user-given self type. In the case of a path that
+/// refers to a member in an inherent impl, this self type is
+/// sometimes needed to constrain the type parameters on the impl. For
+/// example, in this code:
+///
+/// ```ignore (illustrative)
+/// struct Foo<T> { }
+/// impl<A> Foo<A> { fn method() { } }
+/// ```
+///
+/// when you then have a path like `<Foo<&'static u32>>::method`,
+/// this struct would carry the `DefId` of the impl along with the
+/// self type `Foo<u32>`. Then we can instantiate the parameters of
+/// the impl (with the substs from `UserSubsts`) and apply those to
+/// the self type, giving `Foo<?A>`. Finally, we unify that with
+/// the self type here, which contains `?A` to be `&'static u32`
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyEncodable, TyDecodable)]
+#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)]
+pub struct UserSelfTy<'tcx> {
+ pub impl_def_id: DefId,
+ pub self_ty: Ty<'tcx>,
+}
diff --git a/compiler/rustc_middle/src/ty/trait_def.rs b/compiler/rustc_middle/src/ty/trait_def.rs
new file mode 100644
index 000000000..541dace5c
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/trait_def.rs
@@ -0,0 +1,272 @@
+use crate::traits::specialization_graph;
+use crate::ty::fast_reject::{self, SimplifiedType, TreatParams};
+use crate::ty::visit::TypeVisitable;
+use crate::ty::{Ident, Ty, TyCtxt};
+use hir::def_id::LOCAL_CRATE;
+use rustc_hir as hir;
+use rustc_hir::def_id::DefId;
+use std::iter;
+
+use rustc_data_structures::fx::FxIndexMap;
+use rustc_errors::ErrorGuaranteed;
+use rustc_macros::HashStable;
+
+/// A trait's definition with type information.
+#[derive(HashStable, Encodable, Decodable)]
+pub struct TraitDef {
+ pub def_id: DefId,
+
+ pub unsafety: hir::Unsafety,
+
+ /// If `true`, then this trait had the `#[rustc_paren_sugar]`
+ /// attribute, indicating that it should be used with `Foo()`
+ /// sugar. This is a temporary thing -- eventually any trait will
+ /// be usable with the sugar (or without it).
+ pub paren_sugar: bool,
+
+ pub has_auto_impl: bool,
+
+ /// If `true`, then this trait has the `#[marker]` attribute, indicating
+ /// that all its associated items have defaults that cannot be overridden,
+ /// and thus `impl`s of it are allowed to overlap.
+ pub is_marker: bool,
+
+ /// If `true`, then this trait has the `#[rustc_skip_array_during_method_dispatch]`
+ /// attribute, indicating that editions before 2021 should not consider this trait
+ /// during method dispatch if the receiver is an array.
+ pub skip_array_during_method_dispatch: bool,
+
+ /// Used to determine whether the standard library is allowed to specialize
+ /// on this trait.
+ pub specialization_kind: TraitSpecializationKind,
+
+ /// List of functions from `#[rustc_must_implement_one_of]` attribute one of which
+ /// must be implemented.
+ pub must_implement_one_of: Option<Box<[Ident]>>,
+}
+
+/// Whether this trait is treated specially by the standard library
+/// specialization lint.
+#[derive(HashStable, PartialEq, Clone, Copy, Encodable, Decodable)]
+pub enum TraitSpecializationKind {
+ /// The default. Specializing on this trait is not allowed.
+ None,
+ /// Specializing on this trait is allowed because it doesn't have any
+ /// methods. For example `Sized` or `FusedIterator`.
+ /// Applies to traits with the `rustc_unsafe_specialization_marker`
+ /// attribute.
+ Marker,
+ /// Specializing on this trait is allowed because all of the impls of this
+ /// trait are "always applicable". Always applicable means that if
+ /// `X<'x>: T<'y>` for any lifetimes, then `for<'a, 'b> X<'a>: T<'b>`.
+ /// Applies to traits with the `rustc_specialization_trait` attribute.
+ AlwaysApplicable,
+}
+
+#[derive(Default, Debug, HashStable)]
+pub struct TraitImpls {
+ blanket_impls: Vec<DefId>,
+ /// Impls indexed by their simplified self type, for fast lookup.
+ non_blanket_impls: FxIndexMap<SimplifiedType, Vec<DefId>>,
+}
+
+impl TraitImpls {
+ pub fn blanket_impls(&self) -> &[DefId] {
+ self.blanket_impls.as_slice()
+ }
+
+ pub fn non_blanket_impls(&self) -> &FxIndexMap<SimplifiedType, Vec<DefId>> {
+ &self.non_blanket_impls
+ }
+}
+
+impl<'tcx> TraitDef {
+ pub fn new(
+ def_id: DefId,
+ unsafety: hir::Unsafety,
+ paren_sugar: bool,
+ has_auto_impl: bool,
+ is_marker: bool,
+ skip_array_during_method_dispatch: bool,
+ specialization_kind: TraitSpecializationKind,
+ must_implement_one_of: Option<Box<[Ident]>>,
+ ) -> TraitDef {
+ TraitDef {
+ def_id,
+ unsafety,
+ paren_sugar,
+ has_auto_impl,
+ is_marker,
+ skip_array_during_method_dispatch,
+ specialization_kind,
+ must_implement_one_of,
+ }
+ }
+
+ pub fn ancestors(
+ &self,
+ tcx: TyCtxt<'tcx>,
+ of_impl: DefId,
+ ) -> Result<specialization_graph::Ancestors<'tcx>, ErrorGuaranteed> {
+ specialization_graph::ancestors(tcx, self.def_id, of_impl)
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ pub fn for_each_impl<F: FnMut(DefId)>(self, def_id: DefId, mut f: F) {
+ let impls = self.trait_impls_of(def_id);
+
+ for &impl_def_id in impls.blanket_impls.iter() {
+ f(impl_def_id);
+ }
+
+ for v in impls.non_blanket_impls.values() {
+ for &impl_def_id in v {
+ f(impl_def_id);
+ }
+ }
+ }
+
+ /// Iterate over every impl that could possibly match the
+ /// self type `self_ty`.
+ pub fn for_each_relevant_impl<F: FnMut(DefId)>(
+ self,
+ def_id: DefId,
+ self_ty: Ty<'tcx>,
+ mut f: F,
+ ) {
+ let _: Option<()> = self.find_map_relevant_impl(def_id, self_ty, |did| {
+ f(did);
+ None
+ });
+ }
+
+ pub fn non_blanket_impls_for_ty(
+ self,
+ def_id: DefId,
+ self_ty: Ty<'tcx>,
+ ) -> impl Iterator<Item = DefId> + 'tcx {
+ let impls = self.trait_impls_of(def_id);
+ if let Some(simp) = fast_reject::simplify_type(self, self_ty, TreatParams::AsInfer) {
+ if let Some(impls) = impls.non_blanket_impls.get(&simp) {
+ return impls.iter().copied();
+ }
+ }
+
+ [].iter().copied()
+ }
+
+ /// Applies function to every impl that could possibly match the self type `self_ty` and returns
+ /// the first non-none value.
+ pub fn find_map_relevant_impl<T, F: FnMut(DefId) -> Option<T>>(
+ self,
+ def_id: DefId,
+ self_ty: Ty<'tcx>,
+ mut f: F,
+ ) -> Option<T> {
+ // FIXME: This depends on the set of all impls for the trait. That is
+ // unfortunate wrt. incremental compilation.
+ //
+ // If we want to be faster, we could have separate queries for
+ // blanket and non-blanket impls, and compare them separately.
+ let impls = self.trait_impls_of(def_id);
+
+ for &impl_def_id in impls.blanket_impls.iter() {
+ if let result @ Some(_) = f(impl_def_id) {
+ return result;
+ }
+ }
+
+ // Note that we're using `TreatParams::AsPlaceholder` to query `non_blanket_impls` while using
+ // `TreatParams::AsInfer` while actually adding them.
+ //
+ // This way, when searching for some impl for `T: Trait`, we do not look at any impls
+ // whose outer level is not a parameter or projection. Especially for things like
+ // `T: Clone` this is incredibly useful as we would otherwise look at all the impls
+ // of `Clone` for `Option<T>`, `Vec<T>`, `ConcreteType` and so on.
+ if let Some(simp) = fast_reject::simplify_type(self, self_ty, TreatParams::AsPlaceholder) {
+ if let Some(impls) = impls.non_blanket_impls.get(&simp) {
+ for &impl_def_id in impls {
+ if let result @ Some(_) = f(impl_def_id) {
+ return result;
+ }
+ }
+ }
+ } else {
+ for &impl_def_id in impls.non_blanket_impls.values().flatten() {
+ if let result @ Some(_) = f(impl_def_id) {
+ return result;
+ }
+ }
+ }
+
+ None
+ }
+
+ /// Returns an iterator containing all impls
+ pub fn all_impls(self, def_id: DefId) -> impl Iterator<Item = DefId> + 'tcx {
+ let TraitImpls { blanket_impls, non_blanket_impls } = self.trait_impls_of(def_id);
+
+ blanket_impls.iter().chain(non_blanket_impls.iter().flat_map(|(_, v)| v)).cloned()
+ }
+}
+
+// Query provider for `trait_impls_of`.
+pub(super) fn trait_impls_of_provider(tcx: TyCtxt<'_>, trait_id: DefId) -> TraitImpls {
+ let mut impls = TraitImpls::default();
+
+ // Traits defined in the current crate can't have impls in upstream
+ // crates, so we don't bother querying the cstore.
+ if !trait_id.is_local() {
+ for &cnum in tcx.crates(()).iter() {
+ for &(impl_def_id, simplified_self_ty) in
+ tcx.implementations_of_trait((cnum, trait_id)).iter()
+ {
+ if let Some(simplified_self_ty) = simplified_self_ty {
+ impls
+ .non_blanket_impls
+ .entry(simplified_self_ty)
+ .or_default()
+ .push(impl_def_id);
+ } else {
+ impls.blanket_impls.push(impl_def_id);
+ }
+ }
+ }
+ }
+
+ for &impl_def_id in tcx.hir().trait_impls(trait_id) {
+ let impl_def_id = impl_def_id.to_def_id();
+
+ let impl_self_ty = tcx.type_of(impl_def_id);
+ if impl_self_ty.references_error() {
+ continue;
+ }
+
+ if let Some(simplified_self_ty) =
+ fast_reject::simplify_type(tcx, impl_self_ty, TreatParams::AsInfer)
+ {
+ impls.non_blanket_impls.entry(simplified_self_ty).or_default().push(impl_def_id);
+ } else {
+ impls.blanket_impls.push(impl_def_id);
+ }
+ }
+
+ impls
+}
+
+// Query provider for `incoherent_impls`.
+#[instrument(level = "debug", skip(tcx))]
+pub(super) fn incoherent_impls_provider(tcx: TyCtxt<'_>, simp: SimplifiedType) -> &[DefId] {
+ let mut impls = Vec::new();
+
+ for cnum in iter::once(LOCAL_CRATE).chain(tcx.crates(()).iter().copied()) {
+ for &impl_def_id in tcx.crate_incoherent_impls((cnum, simp)) {
+ impls.push(impl_def_id)
+ }
+ }
+
+ debug!(?impls);
+
+ tcx.arena.alloc_slice(&impls)
+}
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
new file mode 100644
index 000000000..591bb7831
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -0,0 +1,1294 @@
+//! Miscellaneous type-system utilities that are too small to deserve their own modules.
+
+use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
+use crate::ty::layout::IntegerExt;
+use crate::ty::query::TyCtxtAt;
+use crate::ty::subst::{GenericArgKind, Subst, SubstsRef};
+use crate::ty::{
+ self, DefIdTree, FallibleTypeFolder, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
+ TypeVisitable,
+};
+use rustc_apfloat::Float as _;
+use rustc_ast as ast;
+use rustc_attr::{self as attr, SignedInt, UnsignedInt};
+use rustc_data_structures::fx::{FxHashMap, FxHashSet};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
+use rustc_errors::ErrorGuaranteed;
+use rustc_hir as hir;
+use rustc_hir::def::{CtorOf, DefKind, Res};
+use rustc_hir::def_id::DefId;
+use rustc_index::bit_set::GrowableBitSet;
+use rustc_macros::HashStable;
+use rustc_span::{sym, DUMMY_SP};
+use rustc_target::abi::{Integer, Size, TargetDataLayout};
+use rustc_target::spec::abi::Abi;
+use smallvec::SmallVec;
+use std::{fmt, iter};
+
+#[derive(Copy, Clone, Debug)]
+pub struct Discr<'tcx> {
+ /// Bit representation of the discriminant (e.g., `-128i8` is `0xFF_u128`).
+ pub val: u128,
+ pub ty: Ty<'tcx>,
+}
+
+/// Used as an input to [`TyCtxt::uses_unique_generic_params`].
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum IgnoreRegions {
+ Yes,
+ No,
+}
+
+#[derive(Copy, Clone, Debug)]
+pub enum NotUniqueParam<'tcx> {
+ DuplicateParam(ty::GenericArg<'tcx>),
+ NotParam(ty::GenericArg<'tcx>),
+}
+
+impl<'tcx> fmt::Display for Discr<'tcx> {
+ fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self.ty.kind() {
+ ty::Int(ity) => {
+ let size = ty::tls::with(|tcx| Integer::from_int_ty(&tcx, ity).size());
+ let x = self.val;
+ // sign extend the raw representation to be an i128
+ let x = size.sign_extend(x) as i128;
+ write!(fmt, "{}", x)
+ }
+ _ => write!(fmt, "{}", self.val),
+ }
+ }
+}
+
+fn int_size_and_signed<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> (Size, bool) {
+ let (int, signed) = match *ty.kind() {
+ ty::Int(ity) => (Integer::from_int_ty(&tcx, ity), true),
+ ty::Uint(uty) => (Integer::from_uint_ty(&tcx, uty), false),
+ _ => bug!("non integer discriminant"),
+ };
+ (int.size(), signed)
+}
+
+impl<'tcx> Discr<'tcx> {
+ /// Adds `1` to the value and wraps around if the maximum for the type is reached.
+ pub fn wrap_incr(self, tcx: TyCtxt<'tcx>) -> Self {
+ self.checked_add(tcx, 1).0
+ }
+ pub fn checked_add(self, tcx: TyCtxt<'tcx>, n: u128) -> (Self, bool) {
+ let (size, signed) = int_size_and_signed(tcx, self.ty);
+ let (val, oflo) = if signed {
+ let min = size.signed_int_min();
+ let max = size.signed_int_max();
+ let val = size.sign_extend(self.val) as i128;
+ assert!(n < (i128::MAX as u128));
+ let n = n as i128;
+ let oflo = val > max - n;
+ let val = if oflo { min + (n - (max - val) - 1) } else { val + n };
+ // zero the upper bits
+ let val = val as u128;
+ let val = size.truncate(val);
+ (val, oflo)
+ } else {
+ let max = size.unsigned_int_max();
+ let val = self.val;
+ let oflo = val > max - n;
+ let val = if oflo { n - (max - val) - 1 } else { val + n };
+ (val, oflo)
+ };
+ (Self { val, ty: self.ty }, oflo)
+ }
+}
+
+pub trait IntTypeExt {
+ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx>;
+ fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>>;
+ fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx>;
+}
+
+impl IntTypeExt for attr::IntType {
+ fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Ty<'tcx> {
+ match *self {
+ SignedInt(ast::IntTy::I8) => tcx.types.i8,
+ SignedInt(ast::IntTy::I16) => tcx.types.i16,
+ SignedInt(ast::IntTy::I32) => tcx.types.i32,
+ SignedInt(ast::IntTy::I64) => tcx.types.i64,
+ SignedInt(ast::IntTy::I128) => tcx.types.i128,
+ SignedInt(ast::IntTy::Isize) => tcx.types.isize,
+ UnsignedInt(ast::UintTy::U8) => tcx.types.u8,
+ UnsignedInt(ast::UintTy::U16) => tcx.types.u16,
+ UnsignedInt(ast::UintTy::U32) => tcx.types.u32,
+ UnsignedInt(ast::UintTy::U64) => tcx.types.u64,
+ UnsignedInt(ast::UintTy::U128) => tcx.types.u128,
+ UnsignedInt(ast::UintTy::Usize) => tcx.types.usize,
+ }
+ }
+
+ fn initial_discriminant<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Discr<'tcx> {
+ Discr { val: 0, ty: self.to_ty(tcx) }
+ }
+
+ fn disr_incr<'tcx>(&self, tcx: TyCtxt<'tcx>, val: Option<Discr<'tcx>>) -> Option<Discr<'tcx>> {
+ if let Some(val) = val {
+ assert_eq!(self.to_ty(tcx), val.ty);
+ let (new, oflo) = val.checked_add(tcx, 1);
+ if oflo { None } else { Some(new) }
+ } else {
+ Some(self.initial_discriminant(tcx))
+ }
+ }
+}
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Creates a hash of the type `Ty` which will be the same no matter what crate
+ /// context it's calculated within. This is used by the `type_id` intrinsic.
+ pub fn type_id_hash(self, ty: Ty<'tcx>) -> u64 {
+ // We want the type_id be independent of the types free regions, so we
+ // erase them. The erase_regions() call will also anonymize bound
+ // regions, which is desirable too.
+ let ty = self.erase_regions(ty);
+
+ self.with_stable_hashing_context(|mut hcx| {
+ let mut hasher = StableHasher::new();
+ hcx.while_hashing_spans(false, |hcx| ty.hash_stable(hcx, &mut hasher));
+ hasher.finish()
+ })
+ }
+
+ pub fn res_generics_def_id(self, res: Res) -> Option<DefId> {
+ match res {
+ Res::Def(DefKind::Ctor(CtorOf::Variant, _), def_id) => {
+ Some(self.parent(self.parent(def_id)))
+ }
+ Res::Def(DefKind::Variant | DefKind::Ctor(CtorOf::Struct, _), def_id) => {
+ Some(self.parent(def_id))
+ }
+ // Other `DefKind`s don't have generics and would ICE when calling
+ // `generics_of`.
+ Res::Def(
+ DefKind::Struct
+ | DefKind::Union
+ | DefKind::Enum
+ | DefKind::Trait
+ | DefKind::OpaqueTy
+ | DefKind::TyAlias
+ | DefKind::ForeignTy
+ | DefKind::TraitAlias
+ | DefKind::AssocTy
+ | DefKind::Fn
+ | DefKind::AssocFn
+ | DefKind::AssocConst
+ | DefKind::Impl,
+ def_id,
+ ) => Some(def_id),
+ Res::Err => None,
+ _ => None,
+ }
+ }
+
+ pub fn has_error_field(self, ty: Ty<'tcx>) -> bool {
+ if let ty::Adt(def, substs) = *ty.kind() {
+ for field in def.all_fields() {
+ let field_ty = field.ty(self, substs);
+ if let ty::Error(_) = field_ty.kind() {
+ return true;
+ }
+ }
+ }
+ false
+ }
+
+ /// Attempts to returns the deeply last field of nested structures, but
+ /// does not apply any normalization in its search. Returns the same type
+ /// if input `ty` is not a structure at all.
+ pub fn struct_tail_without_normalization(self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ let tcx = self;
+ tcx.struct_tail_with_normalize(ty, |ty| ty, || {})
+ }
+
+ /// Returns the deeply last field of nested structures, or the same type if
+ /// not a structure at all. Corresponds to the only possible unsized field,
+ /// and its type can be used to determine unsizing strategy.
+ ///
+ /// Should only be called if `ty` has no inference variables and does not
+ /// need its lifetimes preserved (e.g. as part of codegen); otherwise
+ /// normalization attempt may cause compiler bugs.
+ pub fn struct_tail_erasing_lifetimes(
+ self,
+ ty: Ty<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> Ty<'tcx> {
+ let tcx = self;
+ tcx.struct_tail_with_normalize(ty, |ty| tcx.normalize_erasing_regions(param_env, ty), || {})
+ }
+
+ /// Returns the deeply last field of nested structures, or the same type if
+ /// not a structure at all. Corresponds to the only possible unsized field,
+ /// and its type can be used to determine unsizing strategy.
+ ///
+ /// This is parameterized over the normalization strategy (i.e. how to
+ /// handle `<T as Trait>::Assoc` and `impl Trait`); pass the identity
+ /// function to indicate no normalization should take place.
+ ///
+ /// See also `struct_tail_erasing_lifetimes`, which is suitable for use
+ /// during codegen.
+ pub fn struct_tail_with_normalize(
+ self,
+ mut ty: Ty<'tcx>,
+ mut normalize: impl FnMut(Ty<'tcx>) -> Ty<'tcx>,
+ // This is currently used to allow us to walk a ValTree
+ // in lockstep with the type in order to get the ValTree branch that
+ // corresponds to an unsized field.
+ mut f: impl FnMut() -> (),
+ ) -> Ty<'tcx> {
+ let recursion_limit = self.recursion_limit();
+ for iteration in 0.. {
+ if !recursion_limit.value_within_limit(iteration) {
+ return self.ty_error_with_message(
+ DUMMY_SP,
+ &format!("reached the recursion limit finding the struct tail for {}", ty),
+ );
+ }
+ match *ty.kind() {
+ ty::Adt(def, substs) => {
+ if !def.is_struct() {
+ break;
+ }
+ match def.non_enum_variant().fields.last() {
+ Some(field) => {
+ f();
+ ty = field.ty(self, substs);
+ }
+ None => break,
+ }
+ }
+
+ ty::Tuple(tys) if let Some((&last_ty, _)) = tys.split_last() => {
+ f();
+ ty = last_ty;
+ }
+
+ ty::Tuple(_) => break,
+
+ ty::Projection(_) | ty::Opaque(..) => {
+ let normalized = normalize(ty);
+ if ty == normalized {
+ return ty;
+ } else {
+ ty = normalized;
+ }
+ }
+
+ _ => {
+ break;
+ }
+ }
+ }
+ ty
+ }
+
+ /// Same as applying `struct_tail` on `source` and `target`, but only
+ /// keeps going as long as the two types are instances of the same
+ /// structure definitions.
+ /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
+ /// whereas struct_tail produces `T`, and `Trait`, respectively.
+ ///
+ /// Should only be called if the types have no inference variables and do
+ /// not need their lifetimes preserved (e.g., as part of codegen); otherwise,
+ /// normalization attempt may cause compiler bugs.
+ pub fn struct_lockstep_tails_erasing_lifetimes(
+ self,
+ source: Ty<'tcx>,
+ target: Ty<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> (Ty<'tcx>, Ty<'tcx>) {
+ let tcx = self;
+ tcx.struct_lockstep_tails_with_normalize(source, target, |ty| {
+ tcx.normalize_erasing_regions(param_env, ty)
+ })
+ }
+
+ /// Same as applying `struct_tail` on `source` and `target`, but only
+ /// keeps going as long as the two types are instances of the same
+ /// structure definitions.
+ /// For `(Foo<Foo<T>>, Foo<dyn Trait>)`, the result will be `(Foo<T>, Trait)`,
+ /// whereas struct_tail produces `T`, and `Trait`, respectively.
+ ///
+ /// See also `struct_lockstep_tails_erasing_lifetimes`, which is suitable for use
+ /// during codegen.
+ pub fn struct_lockstep_tails_with_normalize(
+ self,
+ source: Ty<'tcx>,
+ target: Ty<'tcx>,
+ normalize: impl Fn(Ty<'tcx>) -> Ty<'tcx>,
+ ) -> (Ty<'tcx>, Ty<'tcx>) {
+ let (mut a, mut b) = (source, target);
+ loop {
+ match (&a.kind(), &b.kind()) {
+ (&ty::Adt(a_def, a_substs), &ty::Adt(b_def, b_substs))
+ if a_def == b_def && a_def.is_struct() =>
+ {
+ if let Some(f) = a_def.non_enum_variant().fields.last() {
+ a = f.ty(self, a_substs);
+ b = f.ty(self, b_substs);
+ } else {
+ break;
+ }
+ }
+ (&ty::Tuple(a_tys), &ty::Tuple(b_tys)) if a_tys.len() == b_tys.len() => {
+ if let Some(&a_last) = a_tys.last() {
+ a = a_last;
+ b = *b_tys.last().unwrap();
+ } else {
+ break;
+ }
+ }
+ (ty::Projection(_) | ty::Opaque(..), _)
+ | (_, ty::Projection(_) | ty::Opaque(..)) => {
+ // If either side is a projection, attempt to
+ // progress via normalization. (Should be safe to
+ // apply to both sides as normalization is
+ // idempotent.)
+ let a_norm = normalize(a);
+ let b_norm = normalize(b);
+ if a == a_norm && b == b_norm {
+ break;
+ } else {
+ a = a_norm;
+ b = b_norm;
+ }
+ }
+
+ _ => break,
+ }
+ }
+ (a, b)
+ }
+
+ /// Calculate the destructor of a given type.
+ pub fn calculate_dtor(
+ self,
+ adt_did: DefId,
+ validate: impl Fn(Self, DefId) -> Result<(), ErrorGuaranteed>,
+ ) -> Option<ty::Destructor> {
+ let drop_trait = self.lang_items().drop_trait()?;
+ self.ensure().coherent_trait(drop_trait);
+
+ let ty = self.type_of(adt_did);
+ let (did, constness) = self.find_map_relevant_impl(drop_trait, ty, |impl_did| {
+ if let Some(item_id) = self.associated_item_def_ids(impl_did).first() {
+ if validate(self, impl_did).is_ok() {
+ return Some((*item_id, self.constness(impl_did)));
+ }
+ }
+ None
+ })?;
+
+ Some(ty::Destructor { did, constness })
+ }
+
+ /// Returns the set of types that are required to be alive in
+ /// order to run the destructor of `def` (see RFCs 769 and
+ /// 1238).
+ ///
+ /// Note that this returns only the constraints for the
+ /// destructor of `def` itself. For the destructors of the
+ /// contents, you need `adt_dtorck_constraint`.
+ pub fn destructor_constraints(self, def: ty::AdtDef<'tcx>) -> Vec<ty::subst::GenericArg<'tcx>> {
+ let dtor = match def.destructor(self) {
+ None => {
+ debug!("destructor_constraints({:?}) - no dtor", def.did());
+ return vec![];
+ }
+ Some(dtor) => dtor.did,
+ };
+
+ let impl_def_id = self.parent(dtor);
+ let impl_generics = self.generics_of(impl_def_id);
+
+ // We have a destructor - all the parameters that are not
+ // pure_wrt_drop (i.e, don't have a #[may_dangle] attribute)
+ // must be live.
+
+ // We need to return the list of parameters from the ADTs
+ // generics/substs that correspond to impure parameters on the
+ // impl's generics. This is a bit ugly, but conceptually simple:
+ //
+ // Suppose our ADT looks like the following
+ //
+ // struct S<X, Y, Z>(X, Y, Z);
+ //
+ // and the impl is
+ //
+ // impl<#[may_dangle] P0, P1, P2> Drop for S<P1, P2, P0>
+ //
+ // We want to return the parameters (X, Y). For that, we match
+ // up the item-substs <X, Y, Z> with the substs on the impl ADT,
+ // <P1, P2, P0>, and then look up which of the impl substs refer to
+ // parameters marked as pure.
+
+ let impl_substs = match *self.type_of(impl_def_id).kind() {
+ ty::Adt(def_, substs) if def_ == def => substs,
+ _ => bug!(),
+ };
+
+ let item_substs = match *self.type_of(def.did()).kind() {
+ ty::Adt(def_, substs) if def_ == def => substs,
+ _ => bug!(),
+ };
+
+ let result = iter::zip(item_substs, impl_substs)
+ .filter(|&(_, k)| {
+ match k.unpack() {
+ GenericArgKind::Lifetime(region) => match region.kind() {
+ ty::ReEarlyBound(ref ebr) => {
+ !impl_generics.region_param(ebr, self).pure_wrt_drop
+ }
+ // Error: not a region param
+ _ => false,
+ },
+ GenericArgKind::Type(ty) => match ty.kind() {
+ ty::Param(ref pt) => !impl_generics.type_param(pt, self).pure_wrt_drop,
+ // Error: not a type param
+ _ => false,
+ },
+ GenericArgKind::Const(ct) => match ct.kind() {
+ ty::ConstKind::Param(ref pc) => {
+ !impl_generics.const_param(pc, self).pure_wrt_drop
+ }
+ // Error: not a const param
+ _ => false,
+ },
+ }
+ })
+ .map(|(item_param, _)| item_param)
+ .collect();
+ debug!("destructor_constraint({:?}) = {:?}", def.did(), result);
+ result
+ }
+
+ /// Checks whether each generic argument is simply a unique generic parameter.
+ pub fn uses_unique_generic_params(
+ self,
+ substs: SubstsRef<'tcx>,
+ ignore_regions: IgnoreRegions,
+ ) -> Result<(), NotUniqueParam<'tcx>> {
+ let mut seen = GrowableBitSet::default();
+ for arg in substs {
+ match arg.unpack() {
+ GenericArgKind::Lifetime(lt) => {
+ if ignore_regions == IgnoreRegions::No {
+ let ty::ReEarlyBound(p) = lt.kind() else {
+ return Err(NotUniqueParam::NotParam(lt.into()))
+ };
+ if !seen.insert(p.index) {
+ return Err(NotUniqueParam::DuplicateParam(lt.into()));
+ }
+ }
+ }
+ GenericArgKind::Type(t) => match t.kind() {
+ ty::Param(p) => {
+ if !seen.insert(p.index) {
+ return Err(NotUniqueParam::DuplicateParam(t.into()));
+ }
+ }
+ _ => return Err(NotUniqueParam::NotParam(t.into())),
+ },
+ GenericArgKind::Const(c) => match c.kind() {
+ ty::ConstKind::Param(p) => {
+ if !seen.insert(p.index) {
+ return Err(NotUniqueParam::DuplicateParam(c.into()));
+ }
+ }
+ _ => return Err(NotUniqueParam::NotParam(c.into())),
+ },
+ }
+ }
+
+ Ok(())
+ }
+
+ /// Returns `true` if `def_id` refers to a closure (e.g., `|x| x * 2`). Note
+ /// that closures have a `DefId`, but the closure *expression* also
+ /// has a `HirId` that is located within the context where the
+ /// closure appears (and, sadly, a corresponding `NodeId`, since
+ /// those are not yet phased out). The parent of the closure's
+ /// `DefId` will also be the context where it appears.
+ pub fn is_closure(self, def_id: DefId) -> bool {
+ matches!(self.def_kind(def_id), DefKind::Closure | DefKind::Generator)
+ }
+
+ /// Returns `true` if `def_id` refers to a definition that does not have its own
+ /// type-checking context, i.e. closure, generator or inline const.
+ pub fn is_typeck_child(self, def_id: DefId) -> bool {
+ matches!(
+ self.def_kind(def_id),
+ DefKind::Closure | DefKind::Generator | DefKind::InlineConst
+ )
+ }
+
+ /// Returns `true` if `def_id` refers to a trait (i.e., `trait Foo { ... }`).
+ pub fn is_trait(self, def_id: DefId) -> bool {
+ self.def_kind(def_id) == DefKind::Trait
+ }
+
+ /// Returns `true` if `def_id` refers to a trait alias (i.e., `trait Foo = ...;`),
+ /// and `false` otherwise.
+ pub fn is_trait_alias(self, def_id: DefId) -> bool {
+ self.def_kind(def_id) == DefKind::TraitAlias
+ }
+
+ /// Returns `true` if this `DefId` refers to the implicit constructor for
+ /// a tuple struct like `struct Foo(u32)`, and `false` otherwise.
+ pub fn is_constructor(self, def_id: DefId) -> bool {
+ matches!(self.def_kind(def_id), DefKind::Ctor(..))
+ }
+
+ /// Given the `DefId`, returns the `DefId` of the innermost item that
+ /// has its own type-checking context or "inference environment".
+ ///
+ /// For example, a closure has its own `DefId`, but it is type-checked
+ /// with the containing item. Similarly, an inline const block has its
+ /// own `DefId` but it is type-checked together with the containing item.
+ ///
+ /// Therefore, when we fetch the
+ /// `typeck` the closure, for example, we really wind up
+ /// fetching the `typeck` the enclosing fn item.
+ pub fn typeck_root_def_id(self, def_id: DefId) -> DefId {
+ let mut def_id = def_id;
+ while self.is_typeck_child(def_id) {
+ def_id = self.parent(def_id);
+ }
+ def_id
+ }
+
+ /// Given the `DefId` and substs a closure, creates the type of
+ /// `self` argument that the closure expects. For example, for a
+ /// `Fn` closure, this would return a reference type `&T` where
+ /// `T = closure_ty`.
+ ///
+ /// Returns `None` if this closure's kind has not yet been inferred.
+ /// This should only be possible during type checking.
+ ///
+ /// Note that the return value is a late-bound region and hence
+ /// wrapped in a binder.
+ pub fn closure_env_ty(
+ self,
+ closure_def_id: DefId,
+ closure_substs: SubstsRef<'tcx>,
+ env_region: ty::RegionKind<'tcx>,
+ ) -> Option<Ty<'tcx>> {
+ let closure_ty = self.mk_closure(closure_def_id, closure_substs);
+ let closure_kind_ty = closure_substs.as_closure().kind_ty();
+ let closure_kind = closure_kind_ty.to_opt_closure_kind()?;
+ let env_ty = match closure_kind {
+ ty::ClosureKind::Fn => self.mk_imm_ref(self.mk_region(env_region), closure_ty),
+ ty::ClosureKind::FnMut => self.mk_mut_ref(self.mk_region(env_region), closure_ty),
+ ty::ClosureKind::FnOnce => closure_ty,
+ };
+ Some(env_ty)
+ }
+
+ /// Returns `true` if the node pointed to by `def_id` is a `static` item.
+ #[inline]
+ pub fn is_static(self, def_id: DefId) -> bool {
+ matches!(self.def_kind(def_id), DefKind::Static(_))
+ }
+
+ #[inline]
+ pub fn static_mutability(self, def_id: DefId) -> Option<hir::Mutability> {
+ if let DefKind::Static(mt) = self.def_kind(def_id) { Some(mt) } else { None }
+ }
+
+ /// Returns `true` if this is a `static` item with the `#[thread_local]` attribute.
+ pub fn is_thread_local_static(self, def_id: DefId) -> bool {
+ self.codegen_fn_attrs(def_id).flags.contains(CodegenFnAttrFlags::THREAD_LOCAL)
+ }
+
+ /// Returns `true` if the node pointed to by `def_id` is a mutable `static` item.
+ #[inline]
+ pub fn is_mutable_static(self, def_id: DefId) -> bool {
+ self.static_mutability(def_id) == Some(hir::Mutability::Mut)
+ }
+
+ /// Get the type of the pointer to the static that we use in MIR.
+ pub fn static_ptr_ty(self, def_id: DefId) -> Ty<'tcx> {
+ // Make sure that any constants in the static's type are evaluated.
+ let static_ty = self.normalize_erasing_regions(ty::ParamEnv::empty(), self.type_of(def_id));
+
+ // Make sure that accesses to unsafe statics end up using raw pointers.
+ // For thread-locals, this needs to be kept in sync with `Rvalue::ty`.
+ if self.is_mutable_static(def_id) {
+ self.mk_mut_ptr(static_ty)
+ } else if self.is_foreign_item(def_id) {
+ self.mk_imm_ptr(static_ty)
+ } else {
+ self.mk_imm_ref(self.lifetimes.re_erased, static_ty)
+ }
+ }
+
+ /// Expands the given impl trait type, stopping if the type is recursive.
+ #[instrument(skip(self), level = "debug")]
+ pub fn try_expand_impl_trait_type(
+ self,
+ def_id: DefId,
+ substs: SubstsRef<'tcx>,
+ ) -> Result<Ty<'tcx>, Ty<'tcx>> {
+ let mut visitor = OpaqueTypeExpander {
+ seen_opaque_tys: FxHashSet::default(),
+ expanded_cache: FxHashMap::default(),
+ primary_def_id: Some(def_id),
+ found_recursion: false,
+ found_any_recursion: false,
+ check_recursion: true,
+ tcx: self,
+ };
+
+ let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap();
+ trace!(?expanded_type);
+ if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) }
+ }
+
+ pub fn bound_type_of(self, def_id: DefId) -> ty::EarlyBinder<Ty<'tcx>> {
+ ty::EarlyBinder(self.type_of(def_id))
+ }
+
+ pub fn bound_fn_sig(self, def_id: DefId) -> ty::EarlyBinder<ty::PolyFnSig<'tcx>> {
+ ty::EarlyBinder(self.fn_sig(def_id))
+ }
+
+ pub fn bound_impl_trait_ref(
+ self,
+ def_id: DefId,
+ ) -> Option<ty::EarlyBinder<ty::TraitRef<'tcx>>> {
+ self.impl_trait_ref(def_id).map(|i| ty::EarlyBinder(i))
+ }
+
+ pub fn bound_explicit_item_bounds(
+ self,
+ def_id: DefId,
+ ) -> ty::EarlyBinder<&'tcx [(ty::Predicate<'tcx>, rustc_span::Span)]> {
+ ty::EarlyBinder(self.explicit_item_bounds(def_id))
+ }
+
+ pub fn bound_item_bounds(
+ self,
+ def_id: DefId,
+ ) -> ty::EarlyBinder<&'tcx ty::List<ty::Predicate<'tcx>>> {
+ ty::EarlyBinder(self.item_bounds(def_id))
+ }
+
+ pub fn bound_const_param_default(self, def_id: DefId) -> ty::EarlyBinder<ty::Const<'tcx>> {
+ ty::EarlyBinder(self.const_param_default(def_id))
+ }
+
+ pub fn bound_predicates_of(
+ self,
+ def_id: DefId,
+ ) -> ty::EarlyBinder<ty::generics::GenericPredicates<'tcx>> {
+ ty::EarlyBinder(self.predicates_of(def_id))
+ }
+
+ pub fn bound_explicit_predicates_of(
+ self,
+ def_id: DefId,
+ ) -> ty::EarlyBinder<ty::generics::GenericPredicates<'tcx>> {
+ ty::EarlyBinder(self.explicit_predicates_of(def_id))
+ }
+
+ pub fn bound_impl_subject(self, def_id: DefId) -> ty::EarlyBinder<ty::ImplSubject<'tcx>> {
+ ty::EarlyBinder(self.impl_subject(def_id))
+ }
+}
+
+struct OpaqueTypeExpander<'tcx> {
+ // Contains the DefIds of the opaque types that are currently being
+ // expanded. When we expand an opaque type we insert the DefId of
+ // that type, and when we finish expanding that type we remove the
+ // its DefId.
+ seen_opaque_tys: FxHashSet<DefId>,
+ // Cache of all expansions we've seen so far. This is a critical
+ // optimization for some large types produced by async fn trees.
+ expanded_cache: FxHashMap<(DefId, SubstsRef<'tcx>), Ty<'tcx>>,
+ primary_def_id: Option<DefId>,
+ found_recursion: bool,
+ found_any_recursion: bool,
+ /// Whether or not to check for recursive opaque types.
+ /// This is `true` when we're explicitly checking for opaque type
+ /// recursion, and 'false' otherwise to avoid unnecessary work.
+ check_recursion: bool,
+ tcx: TyCtxt<'tcx>,
+}
+
+impl<'tcx> OpaqueTypeExpander<'tcx> {
+ fn expand_opaque_ty(&mut self, def_id: DefId, substs: SubstsRef<'tcx>) -> Option<Ty<'tcx>> {
+ if self.found_any_recursion {
+ return None;
+ }
+ let substs = substs.fold_with(self);
+ if !self.check_recursion || self.seen_opaque_tys.insert(def_id) {
+ let expanded_ty = match self.expanded_cache.get(&(def_id, substs)) {
+ Some(expanded_ty) => *expanded_ty,
+ None => {
+ let generic_ty = self.tcx.bound_type_of(def_id);
+ let concrete_ty = generic_ty.subst(self.tcx, substs);
+ let expanded_ty = self.fold_ty(concrete_ty);
+ self.expanded_cache.insert((def_id, substs), expanded_ty);
+ expanded_ty
+ }
+ };
+ if self.check_recursion {
+ self.seen_opaque_tys.remove(&def_id);
+ }
+ Some(expanded_ty)
+ } else {
+ // If another opaque type that we contain is recursive, then it
+ // will report the error, so we don't have to.
+ self.found_any_recursion = true;
+ self.found_recursion = def_id == *self.primary_def_id.as_ref().unwrap();
+ None
+ }
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for OpaqueTypeExpander<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ fn fold_ty(&mut self, t: Ty<'tcx>) -> Ty<'tcx> {
+ if let ty::Opaque(def_id, substs) = *t.kind() {
+ self.expand_opaque_ty(def_id, substs).unwrap_or(t)
+ } else if t.has_opaque_types() {
+ t.super_fold_with(self)
+ } else {
+ t
+ }
+ }
+}
+
+impl<'tcx> Ty<'tcx> {
+ /// Returns the maximum value for the given numeric type (including `char`s)
+ /// or returns `None` if the type is not numeric.
+ pub fn numeric_max_val(self, tcx: TyCtxt<'tcx>) -> Option<ty::Const<'tcx>> {
+ let val = match self.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let (size, signed) = int_size_and_signed(tcx, self);
+ let val =
+ if signed { size.signed_int_max() as u128 } else { size.unsigned_int_max() };
+ Some(val)
+ }
+ ty::Char => Some(std::char::MAX as u128),
+ ty::Float(fty) => Some(match fty {
+ ty::FloatTy::F32 => rustc_apfloat::ieee::Single::INFINITY.to_bits(),
+ ty::FloatTy::F64 => rustc_apfloat::ieee::Double::INFINITY.to_bits(),
+ }),
+ _ => None,
+ };
+
+ val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
+ }
+
+ /// Returns the minimum value for the given numeric type (including `char`s)
+ /// or returns `None` if the type is not numeric.
+ pub fn numeric_min_val(self, tcx: TyCtxt<'tcx>) -> Option<ty::Const<'tcx>> {
+ let val = match self.kind() {
+ ty::Int(_) | ty::Uint(_) => {
+ let (size, signed) = int_size_and_signed(tcx, self);
+ let val = if signed { size.truncate(size.signed_int_min() as u128) } else { 0 };
+ Some(val)
+ }
+ ty::Char => Some(0),
+ ty::Float(fty) => Some(match fty {
+ ty::FloatTy::F32 => (-::rustc_apfloat::ieee::Single::INFINITY).to_bits(),
+ ty::FloatTy::F64 => (-::rustc_apfloat::ieee::Double::INFINITY).to_bits(),
+ }),
+ _ => None,
+ };
+
+ val.map(|v| ty::Const::from_bits(tcx, v, ty::ParamEnv::empty().and(self)))
+ }
+
+ /// Checks whether values of this type `T` are *moved* or *copied*
+ /// when referenced -- this amounts to a check for whether `T:
+ /// Copy`, but note that we **don't** consider lifetimes when
+ /// doing this check. This means that we may generate MIR which
+ /// does copies even when the type actually doesn't satisfy the
+ /// full requirements for the `Copy` trait (cc #29149) -- this
+ /// winds up being reported as an error during NLL borrow check.
+ pub fn is_copy_modulo_regions(
+ self,
+ tcx_at: TyCtxtAt<'tcx>,
+ param_env: ty::ParamEnv<'tcx>,
+ ) -> bool {
+ self.is_trivially_pure_clone_copy() || tcx_at.is_copy_raw(param_env.and(self))
+ }
+
+ /// Checks whether values of this type `T` have a size known at
+ /// compile time (i.e., whether `T: Sized`). Lifetimes are ignored
+ /// for the purposes of this check, so it can be an
+ /// over-approximation in generic contexts, where one can have
+ /// strange rules like `<T as Foo<'static>>::Bar: Sized` that
+ /// actually carry lifetime requirements.
+ pub fn is_sized(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_sized(tcx_at.tcx) || tcx_at.is_sized_raw(param_env.and(self))
+ }
+
+ /// Checks whether values of this type `T` implement the `Freeze`
+ /// trait -- frozen types are those that do not contain an
+ /// `UnsafeCell` anywhere. This is a language concept used to
+ /// distinguish "true immutability", which is relevant to
+ /// optimization as well as the rules around static values. Note
+ /// that the `Freeze` trait is not exposed to end users and is
+ /// effectively an implementation detail.
+ pub fn is_freeze(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_freeze() || tcx_at.is_freeze_raw(param_env.and(self))
+ }
+
+ /// Fast path helper for testing if a type is `Freeze`.
+ ///
+ /// Returning true means the type is known to be `Freeze`. Returning
+ /// `false` means nothing -- could be `Freeze`, might not be.
+ fn is_trivially_freeze(self) -> bool {
+ match self.kind() {
+ ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Bool
+ | ty::Char
+ | ty::Str
+ | ty::Never
+ | ty::Ref(..)
+ | ty::RawPtr(_)
+ | ty::FnDef(..)
+ | ty::Error(_)
+ | ty::FnPtr(_) => true,
+ ty::Tuple(fields) => fields.iter().all(Self::is_trivially_freeze),
+ ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_freeze(),
+ ty::Adt(..)
+ | ty::Bound(..)
+ | ty::Closure(..)
+ | ty::Dynamic(..)
+ | ty::Foreign(_)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(_)
+ | ty::Infer(_)
+ | ty::Opaque(..)
+ | ty::Param(_)
+ | ty::Placeholder(_)
+ | ty::Projection(_) => false,
+ }
+ }
+
+ /// Checks whether values of this type `T` implement the `Unpin` trait.
+ pub fn is_unpin(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_unpin() || tcx_at.is_unpin_raw(param_env.and(self))
+ }
+
+ /// Fast path helper for testing if a type is `Unpin`.
+ ///
+ /// Returning true means the type is known to be `Unpin`. Returning
+ /// `false` means nothing -- could be `Unpin`, might not be.
+ fn is_trivially_unpin(self) -> bool {
+ match self.kind() {
+ ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Bool
+ | ty::Char
+ | ty::Str
+ | ty::Never
+ | ty::Ref(..)
+ | ty::RawPtr(_)
+ | ty::FnDef(..)
+ | ty::Error(_)
+ | ty::FnPtr(_) => true,
+ ty::Tuple(fields) => fields.iter().all(Self::is_trivially_unpin),
+ ty::Slice(elem_ty) | ty::Array(elem_ty, _) => elem_ty.is_trivially_unpin(),
+ ty::Adt(..)
+ | ty::Bound(..)
+ | ty::Closure(..)
+ | ty::Dynamic(..)
+ | ty::Foreign(_)
+ | ty::Generator(..)
+ | ty::GeneratorWitness(_)
+ | ty::Infer(_)
+ | ty::Opaque(..)
+ | ty::Param(_)
+ | ty::Placeholder(_)
+ | ty::Projection(_) => false,
+ }
+ }
+
+ /// If `ty.needs_drop(...)` returns `true`, then `ty` is definitely
+ /// non-copy and *might* have a destructor attached; if it returns
+ /// `false`, then `ty` definitely has no destructor (i.e., no drop glue).
+ ///
+ /// (Note that this implies that if `ty` has a destructor attached,
+ /// then `needs_drop` will definitely return `true` for `ty`.)
+ ///
+ /// Note that this method is used to check eligible types in unions.
+ #[inline]
+ pub fn needs_drop(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ // Avoid querying in simple cases.
+ match needs_drop_components(self, &tcx.data_layout) {
+ Err(AlwaysRequiresDrop) => true,
+ Ok(components) => {
+ let query_ty = match *components {
+ [] => return false,
+ // If we've got a single component, call the query with that
+ // to increase the chance that we hit the query cache.
+ [component_ty] => component_ty,
+ _ => self,
+ };
+
+ // This doesn't depend on regions, so try to minimize distinct
+ // query keys used.
+ // If normalization fails, we just use `query_ty`.
+ let query_ty =
+ tcx.try_normalize_erasing_regions(param_env, query_ty).unwrap_or(query_ty);
+
+ tcx.needs_drop_raw(param_env.and(query_ty))
+ }
+ }
+ }
+
+ /// Checks if `ty` has has a significant drop.
+ ///
+ /// Note that this method can return false even if `ty` has a destructor
+ /// attached; even if that is the case then the adt has been marked with
+ /// the attribute `rustc_insignificant_dtor`.
+ ///
+ /// Note that this method is used to check for change in drop order for
+ /// 2229 drop reorder migration analysis.
+ #[inline]
+ pub fn has_significant_drop(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ // Avoid querying in simple cases.
+ match needs_drop_components(self, &tcx.data_layout) {
+ Err(AlwaysRequiresDrop) => true,
+ Ok(components) => {
+ let query_ty = match *components {
+ [] => return false,
+ // If we've got a single component, call the query with that
+ // to increase the chance that we hit the query cache.
+ [component_ty] => component_ty,
+ _ => self,
+ };
+
+ // FIXME(#86868): We should be canonicalizing, or else moving this to a method of inference
+ // context, or *something* like that, but for now just avoid passing inference
+ // variables to queries that can't cope with them. Instead, conservatively
+ // return "true" (may change drop order).
+ if query_ty.needs_infer() {
+ return true;
+ }
+
+ // This doesn't depend on regions, so try to minimize distinct
+ // query keys used.
+ let erased = tcx.normalize_erasing_regions(param_env, query_ty);
+ tcx.has_significant_drop_raw(param_env.and(erased))
+ }
+ }
+ }
+
+ /// Returns `true` if equality for this type is both reflexive and structural.
+ ///
+ /// Reflexive equality for a type is indicated by an `Eq` impl for that type.
+ ///
+ /// Primitive types (`u32`, `str`) have structural equality by definition. For composite data
+ /// types, equality for the type as a whole is structural when it is the same as equality
+ /// between all components (fields, array elements, etc.) of that type. For ADTs, structural
+ /// equality is indicated by an implementation of `PartialStructuralEq` and `StructuralEq` for
+ /// that type.
+ ///
+ /// This function is "shallow" because it may return `true` for a composite type whose fields
+ /// are not `StructuralEq`. For example, `[T; 4]` has structural equality regardless of `T`
+ /// because equality for arrays is determined by the equality of each array element. If you
+ /// want to know whether a given call to `PartialEq::eq` will proceed structurally all the way
+ /// down, you will need to use a type visitor.
+ #[inline]
+ pub fn is_structural_eq_shallow(self, tcx: TyCtxt<'tcx>) -> bool {
+ match self.kind() {
+ // Look for an impl of both `PartialStructuralEq` and `StructuralEq`.
+ ty::Adt(..) => tcx.has_structural_eq_impls(self),
+
+ // Primitive types that satisfy `Eq`.
+ ty::Bool | ty::Char | ty::Int(_) | ty::Uint(_) | ty::Str | ty::Never => true,
+
+ // Composite types that satisfy `Eq` when all of their fields do.
+ //
+ // Because this function is "shallow", we return `true` for these composites regardless
+ // of the type(s) contained within.
+ ty::Ref(..) | ty::Array(..) | ty::Slice(_) | ty::Tuple(..) => true,
+
+ // Raw pointers use bitwise comparison.
+ ty::RawPtr(_) | ty::FnPtr(_) => true,
+
+ // Floating point numbers are not `Eq`.
+ ty::Float(_) => false,
+
+ // Conservatively return `false` for all others...
+
+ // Anonymous function types
+ ty::FnDef(..) | ty::Closure(..) | ty::Dynamic(..) | ty::Generator(..) => false,
+
+ // Generic or inferred types
+ //
+ // FIXME(ecstaticmorse): Maybe we should `bug` here? This should probably only be
+ // called for known, fully-monomorphized types.
+ ty::Projection(_)
+ | ty::Opaque(..)
+ | ty::Param(_)
+ | ty::Bound(..)
+ | ty::Placeholder(_)
+ | ty::Infer(_) => false,
+
+ ty::Foreign(_) | ty::GeneratorWitness(..) | ty::Error(_) => false,
+ }
+ }
+
+ /// Peel off all reference types in this type until there are none left.
+ ///
+ /// This method is idempotent, i.e. `ty.peel_refs().peel_refs() == ty.peel_refs()`.
+ ///
+ /// # Examples
+ ///
+ /// - `u8` -> `u8`
+ /// - `&'a mut u8` -> `u8`
+ /// - `&'a &'b u8` -> `u8`
+ /// - `&'a *const &'b u8 -> *const &'b u8`
+ pub fn peel_refs(self) -> Ty<'tcx> {
+ let mut ty = self;
+ while let ty::Ref(_, inner_ty, _) = ty.kind() {
+ ty = *inner_ty;
+ }
+ ty
+ }
+
+ #[inline]
+ pub fn outer_exclusive_binder(self) -> ty::DebruijnIndex {
+ self.0.outer_exclusive_binder
+ }
+}
+
+pub enum ExplicitSelf<'tcx> {
+ ByValue,
+ ByReference(ty::Region<'tcx>, hir::Mutability),
+ ByRawPointer(hir::Mutability),
+ ByBox,
+ Other,
+}
+
+impl<'tcx> ExplicitSelf<'tcx> {
+ /// Categorizes an explicit self declaration like `self: SomeType`
+ /// into either `self`, `&self`, `&mut self`, `Box<self>`, or
+ /// `Other`.
+ /// This is mainly used to require the arbitrary_self_types feature
+ /// in the case of `Other`, to improve error messages in the common cases,
+ /// and to make `Other` non-object-safe.
+ ///
+ /// Examples:
+ ///
+ /// ```ignore (illustrative)
+ /// impl<'a> Foo for &'a T {
+ /// // Legal declarations:
+ /// fn method1(self: &&'a T); // ExplicitSelf::ByReference
+ /// fn method2(self: &'a T); // ExplicitSelf::ByValue
+ /// fn method3(self: Box<&'a T>); // ExplicitSelf::ByBox
+ /// fn method4(self: Rc<&'a T>); // ExplicitSelf::Other
+ ///
+ /// // Invalid cases will be caught by `check_method_receiver`:
+ /// fn method_err1(self: &'a mut T); // ExplicitSelf::Other
+ /// fn method_err2(self: &'static T) // ExplicitSelf::ByValue
+ /// fn method_err3(self: &&T) // ExplicitSelf::ByReference
+ /// }
+ /// ```
+ ///
+ pub fn determine<P>(self_arg_ty: Ty<'tcx>, is_self_ty: P) -> ExplicitSelf<'tcx>
+ where
+ P: Fn(Ty<'tcx>) -> bool,
+ {
+ use self::ExplicitSelf::*;
+
+ match *self_arg_ty.kind() {
+ _ if is_self_ty(self_arg_ty) => ByValue,
+ ty::Ref(region, ty, mutbl) if is_self_ty(ty) => ByReference(region, mutbl),
+ ty::RawPtr(ty::TypeAndMut { ty, mutbl }) if is_self_ty(ty) => ByRawPointer(mutbl),
+ ty::Adt(def, _) if def.is_box() && is_self_ty(self_arg_ty.boxed_ty()) => ByBox,
+ _ => Other,
+ }
+ }
+}
+
+/// Returns a list of types such that the given type needs drop if and only if
+/// *any* of the returned types need drop. Returns `Err(AlwaysRequiresDrop)` if
+/// this type always needs drop.
+pub fn needs_drop_components<'tcx>(
+ ty: Ty<'tcx>,
+ target_layout: &TargetDataLayout,
+) -> Result<SmallVec<[Ty<'tcx>; 2]>, AlwaysRequiresDrop> {
+ match ty.kind() {
+ ty::Infer(ty::FreshIntTy(_))
+ | ty::Infer(ty::FreshFloatTy(_))
+ | ty::Bool
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Never
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Char
+ | ty::GeneratorWitness(..)
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::Str => Ok(SmallVec::new()),
+
+ // Foreign types can never have destructors.
+ ty::Foreign(..) => Ok(SmallVec::new()),
+
+ ty::Dynamic(..) | ty::Error(_) => Err(AlwaysRequiresDrop),
+
+ ty::Slice(ty) => needs_drop_components(*ty, target_layout),
+ ty::Array(elem_ty, size) => {
+ match needs_drop_components(*elem_ty, target_layout) {
+ Ok(v) if v.is_empty() => Ok(v),
+ res => match size.kind().try_to_bits(target_layout.pointer_size) {
+ // Arrays of size zero don't need drop, even if their element
+ // type does.
+ Some(0) => Ok(SmallVec::new()),
+ Some(_) => res,
+ // We don't know which of the cases above we are in, so
+ // return the whole type and let the caller decide what to
+ // do.
+ None => Ok(smallvec![ty]),
+ },
+ }
+ }
+ // If any field needs drop, then the whole tuple does.
+ ty::Tuple(fields) => fields.iter().try_fold(SmallVec::new(), move |mut acc, elem| {
+ acc.extend(needs_drop_components(elem, target_layout)?);
+ Ok(acc)
+ }),
+
+ // These require checking for `Copy` bounds or `Adt` destructors.
+ ty::Adt(..)
+ | ty::Projection(..)
+ | ty::Param(_)
+ | ty::Bound(..)
+ | ty::Placeholder(..)
+ | ty::Opaque(..)
+ | ty::Infer(_)
+ | ty::Closure(..)
+ | ty::Generator(..) => Ok(smallvec![ty]),
+ }
+}
+
+pub fn is_trivially_const_drop<'tcx>(ty: Ty<'tcx>) -> bool {
+ match *ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Infer(ty::IntVar(_))
+ | ty::Infer(ty::FloatVar(_))
+ | ty::Str
+ | ty::RawPtr(_)
+ | ty::Ref(..)
+ | ty::FnDef(..)
+ | ty::FnPtr(_)
+ | ty::Never
+ | ty::Foreign(_) => true,
+
+ ty::Opaque(..)
+ | ty::Dynamic(..)
+ | ty::Error(_)
+ | ty::Bound(..)
+ | ty::Param(_)
+ | ty::Placeholder(_)
+ | ty::Projection(_)
+ | ty::Infer(_) => false,
+
+ // Not trivial because they have components, and instead of looking inside,
+ // we'll just perform trait selection.
+ ty::Closure(..) | ty::Generator(..) | ty::GeneratorWitness(_) | ty::Adt(..) => false,
+
+ ty::Array(ty, _) | ty::Slice(ty) => is_trivially_const_drop(ty),
+
+ ty::Tuple(tys) => tys.iter().all(|ty| is_trivially_const_drop(ty)),
+ }
+}
+
+// Does the equivalent of
+// ```
+// let v = self.iter().map(|p| p.fold_with(folder)).collect::<SmallVec<[_; 8]>>();
+// folder.tcx().intern_*(&v)
+// ```
+pub fn fold_list<'tcx, F, T>(
+ list: &'tcx ty::List<T>,
+ folder: &mut F,
+ intern: impl FnOnce(TyCtxt<'tcx>, &[T]) -> &'tcx ty::List<T>,
+) -> Result<&'tcx ty::List<T>, F::Error>
+where
+ F: FallibleTypeFolder<'tcx>,
+ T: TypeFoldable<'tcx> + PartialEq + Copy,
+{
+ let mut iter = list.iter();
+ // Look for the first element that changed
+ match iter.by_ref().enumerate().find_map(|(i, t)| match t.try_fold_with(folder) {
+ Ok(new_t) if new_t == t => None,
+ new_t => Some((i, new_t)),
+ }) {
+ Some((i, Ok(new_t))) => {
+ // An element changed, prepare to intern the resulting list
+ let mut new_list = SmallVec::<[_; 8]>::with_capacity(list.len());
+ new_list.extend_from_slice(&list[..i]);
+ new_list.push(new_t);
+ for t in iter {
+ new_list.push(t.try_fold_with(folder)?)
+ }
+ Ok(intern(folder.tcx(), &new_list))
+ }
+ Some((_, Err(err))) => {
+ return Err(err);
+ }
+ None => Ok(list),
+ }
+}
+
+#[derive(Copy, Clone, Debug, HashStable, TyEncodable, TyDecodable)]
+pub struct AlwaysRequiresDrop;
+
+/// Normalizes all opaque types in the given value, replacing them
+/// with their underlying types.
+pub fn normalize_opaque_types<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ val: &'tcx ty::List<ty::Predicate<'tcx>>,
+) -> &'tcx ty::List<ty::Predicate<'tcx>> {
+ let mut visitor = OpaqueTypeExpander {
+ seen_opaque_tys: FxHashSet::default(),
+ expanded_cache: FxHashMap::default(),
+ primary_def_id: None,
+ found_recursion: false,
+ found_any_recursion: false,
+ check_recursion: false,
+ tcx,
+ };
+ val.fold_with(&mut visitor)
+}
+
+/// Determines whether an item is annotated with `doc(hidden)`.
+pub fn is_doc_hidden(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ tcx.get_attrs(def_id, sym::doc)
+ .filter_map(|attr| attr.meta_item_list())
+ .any(|items| items.iter().any(|item| item.has_name(sym::hidden)))
+}
+
+/// Determines whether an item is an intrinsic by Abi.
+pub fn is_intrinsic(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ matches!(tcx.fn_sig(def_id).abi(), Abi::RustIntrinsic | Abi::PlatformIntrinsic)
+}
+
+pub fn provide(providers: &mut ty::query::Providers) {
+ *providers =
+ ty::query::Providers { normalize_opaque_types, is_doc_hidden, is_intrinsic, ..*providers }
+}
diff --git a/compiler/rustc_middle/src/ty/visit.rs b/compiler/rustc_middle/src/ty/visit.rs
new file mode 100644
index 000000000..536506720
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/visit.rs
@@ -0,0 +1,745 @@
+//! A visiting traversal mechanism for complex data structures that contain type
+//! information.
+//!
+//! This is a read-only traversal of the data structure.
+//!
+//! This traversal has limited flexibility. Only a small number of "types of
+//! interest" within the complex data structures can receive custom
+//! visitation. These are the ones containing the most important type-related
+//! information, such as `Ty`, `Predicate`, `Region`, and `Const`.
+//!
+//! There are three groups of traits involved in each traversal.
+//! - `TypeVisitable`. This is implemented once for many types, including:
+//! - Types of interest, for which the the methods delegate to the
+//! visitor.
+//! - All other types, including generic containers like `Vec` and `Option`.
+//! It defines a "skeleton" of how they should be visited.
+//! - `TypeSuperVisitable`. This is implemented only for each type of interest,
+//! and defines the visiting "skeleton" for these types.
+//! - `TypeVisitor`. This is implemented for each visitor. This defines how
+//! types of interest are visited.
+//!
+//! This means each visit is a mixture of (a) generic visiting operations, and (b)
+//! custom visit operations that are specific to the visitor.
+//! - The `TypeVisitable` impls handle most of the traversal, and call into
+//! `TypeVisitor` when they encounter a type of interest.
+//! - A `TypeVisitor` may call into another `TypeVisitable` impl, because some of
+//! the types of interest are recursive and can contain other types of interest.
+//! - A `TypeVisitor` may also call into a `TypeSuperVisitable` impl, because each
+//! visitor might provide custom handling only for some types of interest, or
+//! only for some variants of each type of interest, and then use default
+//! traversal for the remaining cases.
+//!
+//! For example, if you have `struct S(Ty, U)` where `S: TypeVisitable` and `U:
+//! TypeVisitable`, and an instance `s = S(ty, u)`, it would be visited like so:
+//! ```text
+//! s.visit_with(visitor) calls
+//! - ty.visit_with(visitor) calls
+//! - visitor.visit_ty(ty) may call
+//! - ty.super_visit_with(visitor)
+//! - u.visit_with(visitor)
+//! ```
+use crate::mir;
+use crate::ty::{self, flags::FlagComputation, Binder, Ty, TyCtxt, TypeFlags};
+use rustc_errors::ErrorGuaranteed;
+
+use rustc_data_structures::fx::FxHashSet;
+use rustc_data_structures::sso::SsoHashSet;
+use std::fmt;
+use std::ops::ControlFlow;
+
+/// This trait is implemented for every type that can be visited,
+/// providing the skeleton of the traversal.
+///
+/// To implement this conveniently, use the derive macro located in
+/// `rustc_macros`.
+pub trait TypeVisitable<'tcx>: fmt::Debug + Clone {
+ /// The entry point for visiting. To visit a value `t` with a visitor `v`
+ /// call: `t.visit_with(v)`.
+ ///
+ /// For most types, this just traverses the value, calling `visit_with` on
+ /// each field/element.
+ ///
+ /// For types of interest (such as `Ty`), the implementation of this method
+ /// that calls a visitor method specifically for that type (such as
+ /// `V::visit_ty`). This is where control transfers from `TypeFoldable` to
+ /// `TypeVisitor`.
+ fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy>;
+
+ /// Returns `true` if `self` has any late-bound regions that are either
+ /// bound by `binder` or bound by some binder outside of `binder`.
+ /// If `binder` is `ty::INNERMOST`, this indicates whether
+ /// there are any late-bound regions that appear free.
+ fn has_vars_bound_at_or_above(&self, binder: ty::DebruijnIndex) -> bool {
+ self.visit_with(&mut HasEscapingVarsVisitor { outer_index: binder }).is_break()
+ }
+
+ /// Returns `true` if this `self` has any regions that escape `binder` (and
+ /// hence are not bound by it).
+ fn has_vars_bound_above(&self, binder: ty::DebruijnIndex) -> bool {
+ self.has_vars_bound_at_or_above(binder.shifted_in(1))
+ }
+
+ fn has_escaping_bound_vars(&self) -> bool {
+ self.has_vars_bound_at_or_above(ty::INNERMOST)
+ }
+
+ #[instrument(level = "trace")]
+ fn has_type_flags(&self, flags: TypeFlags) -> bool {
+ self.visit_with(&mut HasTypeFlagsVisitor { flags }).break_value() == Some(FoundFlags)
+ }
+ fn has_projections(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_PROJECTION)
+ }
+ fn has_opaque_types(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_TY_OPAQUE)
+ }
+ fn references_error(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_ERROR)
+ }
+ fn error_reported(&self) -> Option<ErrorGuaranteed> {
+ if self.references_error() {
+ Some(ErrorGuaranteed::unchecked_claim_error_was_emitted())
+ } else {
+ None
+ }
+ }
+ fn has_param_types_or_consts(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_CT_PARAM)
+ }
+ fn has_infer_regions(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_RE_INFER)
+ }
+ fn has_infer_types(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_TY_INFER)
+ }
+ fn has_infer_types_or_consts(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_CT_INFER)
+ }
+ fn needs_infer(&self) -> bool {
+ self.has_type_flags(TypeFlags::NEEDS_INFER)
+ }
+ fn has_placeholders(&self) -> bool {
+ self.has_type_flags(
+ TypeFlags::HAS_RE_PLACEHOLDER
+ | TypeFlags::HAS_TY_PLACEHOLDER
+ | TypeFlags::HAS_CT_PLACEHOLDER,
+ )
+ }
+ fn needs_subst(&self) -> bool {
+ self.has_type_flags(TypeFlags::NEEDS_SUBST)
+ }
+ /// "Free" regions in this context means that it has any region
+ /// that is not (a) erased or (b) late-bound.
+ fn has_free_regions(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_FREE_REGIONS)
+ }
+
+ fn has_erased_regions(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_RE_ERASED)
+ }
+
+ /// True if there are any un-erased free regions.
+ fn has_erasable_regions(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_FREE_REGIONS)
+ }
+
+ /// Indicates whether this value references only 'global'
+ /// generic parameters that are the same regardless of what fn we are
+ /// in. This is used for caching.
+ fn is_global(&self) -> bool {
+ !self.has_type_flags(TypeFlags::HAS_FREE_LOCAL_NAMES)
+ }
+
+ /// True if there are any late-bound regions
+ fn has_late_bound_regions(&self) -> bool {
+ self.has_type_flags(TypeFlags::HAS_RE_LATE_BOUND)
+ }
+
+ /// Indicates whether this value still has parameters/placeholders/inference variables
+ /// which could be replaced later, in a way that would change the results of `impl`
+ /// specialization.
+ fn still_further_specializable(&self) -> bool {
+ self.has_type_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE)
+ }
+}
+
+pub trait TypeSuperVisitable<'tcx>: TypeVisitable<'tcx> {
+ /// Provides a default visit for a type of interest. This should only be
+ /// called within `TypeVisitor` methods, when a non-custom traversal is
+ /// desired for the value of the type of interest passed to that method.
+ /// For example, in `MyVisitor::visit_ty(ty)`, it is valid to call
+ /// `ty.super_visit_with(self)`, but any other visiting should be done
+ /// with `xyz.visit_with(self)`.
+ fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy>;
+}
+
+/// This trait is implemented for every visiting traversal. There is a visit
+/// method defined for every type of interest. Each such method has a default
+/// that recurses into the type's fields in a non-custom fashion.
+pub trait TypeVisitor<'tcx>: Sized {
+ type BreakTy = !;
+
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ t.super_visit_with(self)
+ }
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ t.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ r.super_visit_with(self)
+ }
+
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ c.super_visit_with(self)
+ }
+
+ fn visit_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ControlFlow<Self::BreakTy> {
+ uv.super_visit_with(self)
+ }
+
+ fn visit_predicate(&mut self, p: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
+ p.super_visit_with(self)
+ }
+
+ fn visit_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> ControlFlow<Self::BreakTy> {
+ c.super_visit_with(self)
+ }
+}
+
+///////////////////////////////////////////////////////////////////////////
+// Region folder
+
+impl<'tcx> TyCtxt<'tcx> {
+ /// Invoke `callback` on every region appearing free in `value`.
+ pub fn for_each_free_region(
+ self,
+ value: &impl TypeVisitable<'tcx>,
+ mut callback: impl FnMut(ty::Region<'tcx>),
+ ) {
+ self.any_free_region_meets(value, |r| {
+ callback(r);
+ false
+ });
+ }
+
+ /// Returns `true` if `callback` returns true for every region appearing free in `value`.
+ pub fn all_free_regions_meet(
+ self,
+ value: &impl TypeVisitable<'tcx>,
+ mut callback: impl FnMut(ty::Region<'tcx>) -> bool,
+ ) -> bool {
+ !self.any_free_region_meets(value, |r| !callback(r))
+ }
+
+ /// Returns `true` if `callback` returns true for some region appearing free in `value`.
+ pub fn any_free_region_meets(
+ self,
+ value: &impl TypeVisitable<'tcx>,
+ callback: impl FnMut(ty::Region<'tcx>) -> bool,
+ ) -> bool {
+ struct RegionVisitor<F> {
+ /// The index of a binder *just outside* the things we have
+ /// traversed. If we encounter a bound region bound by this
+ /// binder or one outer to it, it appears free. Example:
+ ///
+ /// ```ignore (illustrative)
+ /// for<'a> fn(for<'b> fn(), T)
+ /// // ^ ^ ^ ^
+ /// // | | | | here, would be shifted in 1
+ /// // | | | here, would be shifted in 2
+ /// // | | here, would be `INNERMOST` shifted in by 1
+ /// // | here, initially, binder would be `INNERMOST`
+ /// ```
+ ///
+ /// You see that, initially, *any* bound value is free,
+ /// because we've not traversed any binders. As we pass
+ /// through a binder, we shift the `outer_index` by 1 to
+ /// account for the new binder that encloses us.
+ outer_index: ty::DebruijnIndex,
+ callback: F,
+ }
+
+ impl<'tcx, F> TypeVisitor<'tcx> for RegionVisitor<F>
+ where
+ F: FnMut(ty::Region<'tcx>) -> bool,
+ {
+ type BreakTy = ();
+
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.outer_index.shift_in(1);
+ let result = t.super_visit_with(self);
+ self.outer_index.shift_out(1);
+ result
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *r {
+ ty::ReLateBound(debruijn, _) if debruijn < self.outer_index => {
+ ControlFlow::CONTINUE
+ }
+ _ => {
+ if (self.callback)(r) {
+ ControlFlow::BREAK
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+ }
+ }
+
+ fn visit_ty(&mut self, ty: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // We're only interested in types involving regions
+ if ty.flags().intersects(TypeFlags::HAS_FREE_REGIONS) {
+ ty.super_visit_with(self)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+ }
+
+ value.visit_with(&mut RegionVisitor { outer_index: ty::INNERMOST, callback }).is_break()
+ }
+
+ /// Returns a set of all late-bound regions that are constrained
+ /// by `value`, meaning that if we instantiate those LBR with
+ /// variables and equate `value` with something else, those
+ /// variables will also be equated.
+ pub fn collect_constrained_late_bound_regions<T>(
+ self,
+ value: &Binder<'tcx, T>,
+ ) -> FxHashSet<ty::BoundRegionKind>
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ self.collect_late_bound_regions(value, true)
+ }
+
+ /// Returns a set of all late-bound regions that appear in `value` anywhere.
+ pub fn collect_referenced_late_bound_regions<T>(
+ self,
+ value: &Binder<'tcx, T>,
+ ) -> FxHashSet<ty::BoundRegionKind>
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ self.collect_late_bound_regions(value, false)
+ }
+
+ fn collect_late_bound_regions<T>(
+ self,
+ value: &Binder<'tcx, T>,
+ just_constraint: bool,
+ ) -> FxHashSet<ty::BoundRegionKind>
+ where
+ T: TypeVisitable<'tcx>,
+ {
+ let mut collector = LateBoundRegionsCollector::new(just_constraint);
+ let result = value.as_ref().skip_binder().visit_with(&mut collector);
+ assert!(result.is_continue()); // should never have stopped early
+ collector.regions
+ }
+}
+
+pub struct ValidateBoundVars<'tcx> {
+ bound_vars: &'tcx ty::List<ty::BoundVariableKind>,
+ binder_index: ty::DebruijnIndex,
+ // We may encounter the same variable at different levels of binding, so
+ // this can't just be `Ty`
+ visited: SsoHashSet<(ty::DebruijnIndex, Ty<'tcx>)>,
+}
+
+impl<'tcx> ValidateBoundVars<'tcx> {
+ pub fn new(bound_vars: &'tcx ty::List<ty::BoundVariableKind>) -> Self {
+ ValidateBoundVars {
+ bound_vars,
+ binder_index: ty::INNERMOST,
+ visited: SsoHashSet::default(),
+ }
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for ValidateBoundVars<'tcx> {
+ type BreakTy = ();
+
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.binder_index.shift_in(1);
+ let result = t.super_visit_with(self);
+ self.binder_index.shift_out(1);
+ result
+ }
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if t.outer_exclusive_binder() < self.binder_index
+ || !self.visited.insert((self.binder_index, t))
+ {
+ return ControlFlow::BREAK;
+ }
+ match *t.kind() {
+ ty::Bound(debruijn, bound_ty) if debruijn == self.binder_index => {
+ if self.bound_vars.len() <= bound_ty.var.as_usize() {
+ bug!("Not enough bound vars: {:?} not found in {:?}", t, self.bound_vars);
+ }
+ let list_var = self.bound_vars[bound_ty.var.as_usize()];
+ match list_var {
+ ty::BoundVariableKind::Ty(kind) => {
+ if kind != bound_ty.kind {
+ bug!(
+ "Mismatched type kinds: {:?} doesn't var in list {:?}",
+ bound_ty.kind,
+ list_var
+ );
+ }
+ }
+ _ => {
+ bug!("Mismatched bound variable kinds! Expected type, found {:?}", list_var)
+ }
+ }
+ }
+
+ _ => (),
+ };
+
+ t.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ match *r {
+ ty::ReLateBound(index, br) if index == self.binder_index => {
+ if self.bound_vars.len() <= br.var.as_usize() {
+ bug!("Not enough bound vars: {:?} not found in {:?}", br, self.bound_vars);
+ }
+ let list_var = self.bound_vars[br.var.as_usize()];
+ match list_var {
+ ty::BoundVariableKind::Region(kind) => {
+ if kind != br.kind {
+ bug!(
+ "Mismatched region kinds: {:?} doesn't match var ({:?}) in list ({:?})",
+ br.kind,
+ list_var,
+ self.bound_vars
+ );
+ }
+ }
+ _ => bug!(
+ "Mismatched bound variable kinds! Expected region, found {:?}",
+ list_var
+ ),
+ }
+ }
+
+ _ => (),
+ };
+
+ r.super_visit_with(self)
+ }
+}
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+struct FoundEscapingVars;
+
+/// An "escaping var" is a bound var whose binder is not part of `t`. A bound var can be a
+/// bound region or a bound type.
+///
+/// So, for example, consider a type like the following, which has two binders:
+///
+/// for<'a> fn(x: for<'b> fn(&'a isize, &'b isize))
+/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ outer scope
+/// ^~~~~~~~~~~~~~~~~~~~~~~~~~~~ inner scope
+///
+/// This type has *bound regions* (`'a`, `'b`), but it does not have escaping regions, because the
+/// binders of both `'a` and `'b` are part of the type itself. However, if we consider the *inner
+/// fn type*, that type has an escaping region: `'a`.
+///
+/// Note that what I'm calling an "escaping var" is often just called a "free var". However,
+/// we already use the term "free var". It refers to the regions or types that we use to represent
+/// bound regions or type params on a fn definition while we are type checking its body.
+///
+/// To clarify, conceptually there is no particular difference between
+/// an "escaping" var and a "free" var. However, there is a big
+/// difference in practice. Basically, when "entering" a binding
+/// level, one is generally required to do some sort of processing to
+/// a bound var, such as replacing it with a fresh/placeholder
+/// var, or making an entry in the environment to represent the
+/// scope to which it is attached, etc. An escaping var represents
+/// a bound var for which this processing has not yet been done.
+struct HasEscapingVarsVisitor {
+ /// Anything bound by `outer_index` or "above" is escaping.
+ outer_index: ty::DebruijnIndex,
+}
+
+impl<'tcx> TypeVisitor<'tcx> for HasEscapingVarsVisitor {
+ type BreakTy = FoundEscapingVars;
+
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.outer_index.shift_in(1);
+ let result = t.super_visit_with(self);
+ self.outer_index.shift_out(1);
+ result
+ }
+
+ #[inline]
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // If the outer-exclusive-binder is *strictly greater* than
+ // `outer_index`, that means that `t` contains some content
+ // bound at `outer_index` or above (because
+ // `outer_exclusive_binder` is always 1 higher than the
+ // content in `t`). Therefore, `t` has some escaping vars.
+ if t.outer_exclusive_binder() > self.outer_index {
+ ControlFlow::Break(FoundEscapingVars)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+
+ #[inline]
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // If the region is bound by `outer_index` or anything outside
+ // of outer index, then it escapes the binders we have
+ // visited.
+ if r.bound_at_or_above_binder(self.outer_index) {
+ ControlFlow::Break(FoundEscapingVars)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+
+ fn visit_const(&mut self, ct: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // we don't have a `visit_infer_const` callback, so we have to
+ // hook in here to catch this case (annoying...), but
+ // otherwise we do want to remember to visit the rest of the
+ // const, as it has types/regions embedded in a lot of other
+ // places.
+ match ct.kind() {
+ ty::ConstKind::Bound(debruijn, _) if debruijn >= self.outer_index => {
+ ControlFlow::Break(FoundEscapingVars)
+ }
+ _ => ct.super_visit_with(self),
+ }
+ }
+
+ #[inline]
+ fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if predicate.outer_exclusive_binder() > self.outer_index {
+ ControlFlow::Break(FoundEscapingVars)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+}
+
+#[derive(Debug, PartialEq, Eq, Copy, Clone)]
+struct FoundFlags;
+
+// FIXME: Optimize for checking for infer flags
+struct HasTypeFlagsVisitor {
+ flags: ty::TypeFlags,
+}
+
+impl std::fmt::Debug for HasTypeFlagsVisitor {
+ fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ self.flags.fmt(fmt)
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor {
+ type BreakTy = FoundFlags;
+
+ #[inline]
+ #[instrument(skip(self), level = "trace")]
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let flags = t.flags();
+ trace!(t.flags=?t.flags());
+ if flags.intersects(self.flags) {
+ ControlFlow::Break(FoundFlags)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+
+ #[inline]
+ #[instrument(skip(self), level = "trace")]
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let flags = r.type_flags();
+ trace!(r.flags=?flags);
+ if flags.intersects(self.flags) {
+ ControlFlow::Break(FoundFlags)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+
+ #[inline]
+ #[instrument(level = "trace")]
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let flags = FlagComputation::for_const(c);
+ trace!(r.flags=?flags);
+ if flags.intersects(self.flags) {
+ ControlFlow::Break(FoundFlags)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+
+ #[inline]
+ #[instrument(level = "trace")]
+ fn visit_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ControlFlow<Self::BreakTy> {
+ let flags = FlagComputation::for_unevaluated_const(uv);
+ trace!(r.flags=?flags);
+ if flags.intersects(self.flags) {
+ ControlFlow::Break(FoundFlags)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+
+ #[inline]
+ #[instrument(level = "trace")]
+ fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
+ debug!(
+ "HasTypeFlagsVisitor: predicate={:?} predicate.flags={:?} self.flags={:?}",
+ predicate,
+ predicate.flags(),
+ self.flags
+ );
+ if predicate.flags().intersects(self.flags) {
+ ControlFlow::Break(FoundFlags)
+ } else {
+ ControlFlow::CONTINUE
+ }
+ }
+}
+
+/// Collects all the late-bound regions at the innermost binding level
+/// into a hash set.
+struct LateBoundRegionsCollector {
+ current_index: ty::DebruijnIndex,
+ regions: FxHashSet<ty::BoundRegionKind>,
+
+ /// `true` if we only want regions that are known to be
+ /// "constrained" when you equate this type with another type. In
+ /// particular, if you have e.g., `&'a u32` and `&'b u32`, equating
+ /// them constraints `'a == 'b`. But if you have `<&'a u32 as
+ /// Trait>::Foo` and `<&'b u32 as Trait>::Foo`, normalizing those
+ /// types may mean that `'a` and `'b` don't appear in the results,
+ /// so they are not considered *constrained*.
+ just_constrained: bool,
+}
+
+impl LateBoundRegionsCollector {
+ fn new(just_constrained: bool) -> Self {
+ LateBoundRegionsCollector {
+ current_index: ty::INNERMOST,
+ regions: Default::default(),
+ just_constrained,
+ }
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector {
+ fn visit_binder<T: TypeVisitable<'tcx>>(
+ &mut self,
+ t: &Binder<'tcx, T>,
+ ) -> ControlFlow<Self::BreakTy> {
+ self.current_index.shift_in(1);
+ let result = t.super_visit_with(self);
+ self.current_index.shift_out(1);
+ result
+ }
+
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // if we are only looking for "constrained" region, we have to
+ // ignore the inputs to a projection, as they may not appear
+ // in the normalized form
+ if self.just_constrained {
+ if let ty::Projection(..) = t.kind() {
+ return ControlFlow::CONTINUE;
+ }
+ }
+
+ t.super_visit_with(self)
+ }
+
+ fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ // if we are only looking for "constrained" region, we have to
+ // ignore the inputs of an unevaluated const, as they may not appear
+ // in the normalized form
+ if self.just_constrained {
+ if let ty::ConstKind::Unevaluated(..) = c.kind() {
+ return ControlFlow::CONTINUE;
+ }
+ }
+
+ c.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::ReLateBound(debruijn, br) = *r {
+ if debruijn == self.current_index {
+ self.regions.insert(br.kind);
+ }
+ }
+ ControlFlow::CONTINUE
+ }
+}
+
+/// Finds the max universe present
+pub struct MaxUniverse {
+ max_universe: ty::UniverseIndex,
+}
+
+impl MaxUniverse {
+ pub fn new() -> Self {
+ MaxUniverse { max_universe: ty::UniverseIndex::ROOT }
+ }
+
+ pub fn max_universe(self) -> ty::UniverseIndex {
+ self.max_universe
+ }
+}
+
+impl<'tcx> TypeVisitor<'tcx> for MaxUniverse {
+ fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::Placeholder(placeholder) = t.kind() {
+ self.max_universe = ty::UniverseIndex::from_u32(
+ self.max_universe.as_u32().max(placeholder.universe.as_u32()),
+ );
+ }
+
+ t.super_visit_with(self)
+ }
+
+ fn visit_const(&mut self, c: ty::consts::Const<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::ConstKind::Placeholder(placeholder) = c.kind() {
+ self.max_universe = ty::UniverseIndex::from_u32(
+ self.max_universe.as_u32().max(placeholder.universe.as_u32()),
+ );
+ }
+
+ c.super_visit_with(self)
+ }
+
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ if let ty::RePlaceholder(placeholder) = *r {
+ self.max_universe = ty::UniverseIndex::from_u32(
+ self.max_universe.as_u32().max(placeholder.universe.as_u32()),
+ );
+ }
+
+ ControlFlow::CONTINUE
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/vtable.rs b/compiler/rustc_middle/src/ty/vtable.rs
new file mode 100644
index 000000000..04a9fd1f7
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/vtable.rs
@@ -0,0 +1,117 @@
+use std::convert::TryFrom;
+use std::fmt;
+
+use crate::mir::interpret::{alloc_range, AllocId, Allocation, Pointer, Scalar, ScalarMaybeUninit};
+use crate::ty::{self, Instance, PolyTraitRef, Ty, TyCtxt};
+use rustc_ast::Mutability;
+
+#[derive(Clone, Copy, PartialEq, HashStable)]
+pub enum VtblEntry<'tcx> {
+ /// destructor of this type (used in vtable header)
+ MetadataDropInPlace,
+ /// layout size of this type (used in vtable header)
+ MetadataSize,
+ /// layout align of this type (used in vtable header)
+ MetadataAlign,
+ /// non-dispatchable associated function that is excluded from trait object
+ Vacant,
+ /// dispatchable associated function
+ Method(Instance<'tcx>),
+ /// pointer to a separate supertrait vtable, can be used by trait upcasting coercion
+ TraitVPtr(PolyTraitRef<'tcx>),
+}
+
+impl<'tcx> fmt::Debug for VtblEntry<'tcx> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // We want to call `Display` on `Instance` and `PolyTraitRef`,
+ // so we implement this manually.
+ match self {
+ VtblEntry::MetadataDropInPlace => write!(f, "MetadataDropInPlace"),
+ VtblEntry::MetadataSize => write!(f, "MetadataSize"),
+ VtblEntry::MetadataAlign => write!(f, "MetadataAlign"),
+ VtblEntry::Vacant => write!(f, "Vacant"),
+ VtblEntry::Method(instance) => write!(f, "Method({})", instance),
+ VtblEntry::TraitVPtr(trait_ref) => write!(f, "TraitVPtr({})", trait_ref),
+ }
+ }
+}
+
+// Needs to be associated with the `'tcx` lifetime
+impl<'tcx> TyCtxt<'tcx> {
+ pub const COMMON_VTABLE_ENTRIES: &'tcx [VtblEntry<'tcx>] =
+ &[VtblEntry::MetadataDropInPlace, VtblEntry::MetadataSize, VtblEntry::MetadataAlign];
+}
+
+pub const COMMON_VTABLE_ENTRIES_DROPINPLACE: usize = 0;
+pub const COMMON_VTABLE_ENTRIES_SIZE: usize = 1;
+pub const COMMON_VTABLE_ENTRIES_ALIGN: usize = 2;
+
+/// Retrieves an allocation that represents the contents of a vtable.
+/// Since this is a query, allocations are cached and not duplicated.
+pub(super) fn vtable_allocation_provider<'tcx>(
+ tcx: TyCtxt<'tcx>,
+ key: (Ty<'tcx>, Option<ty::PolyExistentialTraitRef<'tcx>>),
+) -> AllocId {
+ let (ty, poly_trait_ref) = key;
+
+ let vtable_entries = if let Some(poly_trait_ref) = poly_trait_ref {
+ let trait_ref = poly_trait_ref.with_self_ty(tcx, ty);
+ let trait_ref = tcx.erase_regions(trait_ref);
+
+ tcx.vtable_entries(trait_ref)
+ } else {
+ TyCtxt::COMMON_VTABLE_ENTRIES
+ };
+
+ let layout = tcx
+ .layout_of(ty::ParamEnv::reveal_all().and(ty))
+ .expect("failed to build vtable representation");
+ assert!(!layout.is_unsized(), "can't create a vtable for an unsized type");
+ let size = layout.size.bytes();
+ let align = layout.align.abi.bytes();
+
+ let ptr_size = tcx.data_layout.pointer_size;
+ let ptr_align = tcx.data_layout.pointer_align.abi;
+
+ let vtable_size = ptr_size * u64::try_from(vtable_entries.len()).unwrap();
+ let mut vtable = Allocation::uninit(vtable_size, ptr_align, /* panic_on_fail */ true).unwrap();
+
+ // No need to do any alignment checks on the memory accesses below, because we know the
+ // allocation is correctly aligned as we created it above. Also we're only offsetting by
+ // multiples of `ptr_align`, which means that it will stay aligned to `ptr_align`.
+
+ for (idx, entry) in vtable_entries.iter().enumerate() {
+ let idx: u64 = u64::try_from(idx).unwrap();
+ let scalar = match entry {
+ VtblEntry::MetadataDropInPlace => {
+ let instance = ty::Instance::resolve_drop_in_place(tcx, ty);
+ let fn_alloc_id = tcx.create_fn_alloc(instance);
+ let fn_ptr = Pointer::from(fn_alloc_id);
+ ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
+ }
+ VtblEntry::MetadataSize => Scalar::from_uint(size, ptr_size).into(),
+ VtblEntry::MetadataAlign => Scalar::from_uint(align, ptr_size).into(),
+ VtblEntry::Vacant => continue,
+ VtblEntry::Method(instance) => {
+ // Prepare the fn ptr we write into the vtable.
+ let instance = instance.polymorphize(tcx);
+ let fn_alloc_id = tcx.create_fn_alloc(instance);
+ let fn_ptr = Pointer::from(fn_alloc_id);
+ ScalarMaybeUninit::from_pointer(fn_ptr, &tcx)
+ }
+ VtblEntry::TraitVPtr(trait_ref) => {
+ let super_trait_ref = trait_ref
+ .map_bound(|trait_ref| ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref));
+ let supertrait_alloc_id = tcx.vtable_allocation((ty, Some(super_trait_ref)));
+ let vptr = Pointer::from(supertrait_alloc_id);
+ ScalarMaybeUninit::from_pointer(vptr, &tcx)
+ }
+ };
+ vtable
+ .write_scalar(&tcx, alloc_range(ptr_size * idx, ptr_size), scalar)
+ .expect("failed to build vtable representation");
+ }
+
+ vtable.mutability = Mutability::Not;
+ tcx.create_memory_alloc(tcx.intern_const_alloc(vtable))
+}
diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs
new file mode 100644
index 000000000..02fe1f3a7
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/walk.rs
@@ -0,0 +1,207 @@
+//! An iterator over the type substructure.
+//! WARNING: this does not keep track of the region depth.
+
+use crate::ty::subst::{GenericArg, GenericArgKind};
+use crate::ty::{self, Ty};
+use rustc_data_structures::sso::SsoHashSet;
+use smallvec::{self, SmallVec};
+
+// The TypeWalker's stack is hot enough that it's worth going to some effort to
+// avoid heap allocations.
+type TypeWalkerStack<'tcx> = SmallVec<[GenericArg<'tcx>; 8]>;
+
+pub struct TypeWalker<'tcx> {
+ stack: TypeWalkerStack<'tcx>,
+ last_subtree: usize,
+ pub visited: SsoHashSet<GenericArg<'tcx>>,
+}
+
+/// An iterator for walking the type tree.
+///
+/// It's very easy to produce a deeply
+/// nested type tree with a lot of
+/// identical subtrees. In order to work efficiently
+/// in this situation walker only visits each type once.
+/// It maintains a set of visited types and
+/// skips any types that are already there.
+impl<'tcx> TypeWalker<'tcx> {
+ pub fn new(root: GenericArg<'tcx>) -> Self {
+ Self { stack: smallvec![root], last_subtree: 1, visited: SsoHashSet::new() }
+ }
+
+ /// Skips the subtree corresponding to the last type
+ /// returned by `next()`.
+ ///
+ /// Example: Imagine you are walking `Foo<Bar<i32>, usize>`.
+ ///
+ /// ```ignore (illustrative)
+ /// let mut iter: TypeWalker = ...;
+ /// iter.next(); // yields Foo
+ /// iter.next(); // yields Bar<i32>
+ /// iter.skip_current_subtree(); // skips i32
+ /// iter.next(); // yields usize
+ /// ```
+ pub fn skip_current_subtree(&mut self) {
+ self.stack.truncate(self.last_subtree);
+ }
+}
+
+impl<'tcx> Iterator for TypeWalker<'tcx> {
+ type Item = GenericArg<'tcx>;
+
+ fn next(&mut self) -> Option<GenericArg<'tcx>> {
+ debug!("next(): stack={:?}", self.stack);
+ loop {
+ let next = self.stack.pop()?;
+ self.last_subtree = self.stack.len();
+ if self.visited.insert(next) {
+ push_inner(&mut self.stack, next);
+ debug!("next: stack={:?}", self.stack);
+ return Some(next);
+ }
+ }
+ }
+}
+
+impl<'tcx> GenericArg<'tcx> {
+ /// Iterator that walks `self` and any types reachable from
+ /// `self`, in depth-first order. Note that just walks the types
+ /// that appear in `self`, it does not descend into the fields of
+ /// structs or variants. For example:
+ ///
+ /// ```text
+ /// isize => { isize }
+ /// Foo<Bar<isize>> => { Foo<Bar<isize>>, Bar<isize>, isize }
+ /// [isize] => { [isize], isize }
+ /// ```
+ pub fn walk(self) -> TypeWalker<'tcx> {
+ TypeWalker::new(self)
+ }
+
+ /// Iterator that walks the immediate children of `self`. Hence
+ /// `Foo<Bar<i32>, u32>` yields the sequence `[Bar<i32>, u32]`
+ /// (but not `i32`, like `walk`).
+ ///
+ /// Iterator only walks items once.
+ /// It accepts visited set, updates it with all visited types
+ /// and skips any types that are already there.
+ pub fn walk_shallow(
+ self,
+ visited: &mut SsoHashSet<GenericArg<'tcx>>,
+ ) -> impl Iterator<Item = GenericArg<'tcx>> {
+ let mut stack = SmallVec::new();
+ push_inner(&mut stack, self);
+ stack.retain(|a| visited.insert(*a));
+ stack.into_iter()
+ }
+}
+
+impl<'tcx> Ty<'tcx> {
+ /// Iterator that walks `self` and any types reachable from
+ /// `self`, in depth-first order. Note that just walks the types
+ /// that appear in `self`, it does not descend into the fields of
+ /// structs or variants. For example:
+ ///
+ /// ```text
+ /// isize => { isize }
+ /// Foo<Bar<isize>> => { Foo<Bar<isize>>, Bar<isize>, isize }
+ /// [isize] => { [isize], isize }
+ /// ```
+ pub fn walk(self) -> TypeWalker<'tcx> {
+ TypeWalker::new(self.into())
+ }
+}
+
+/// We push `GenericArg`s on the stack in reverse order so as to
+/// maintain a pre-order traversal. As of the time of this
+/// writing, the fact that the traversal is pre-order is not
+/// known to be significant to any code, but it seems like the
+/// natural order one would expect (basically, the order of the
+/// types as they are written).
+fn push_inner<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent: GenericArg<'tcx>) {
+ match parent.unpack() {
+ GenericArgKind::Type(parent_ty) => match *parent_ty.kind() {
+ ty::Bool
+ | ty::Char
+ | ty::Int(_)
+ | ty::Uint(_)
+ | ty::Float(_)
+ | ty::Str
+ | ty::Infer(_)
+ | ty::Param(_)
+ | ty::Never
+ | ty::Error(_)
+ | ty::Placeholder(..)
+ | ty::Bound(..)
+ | ty::Foreign(..) => {}
+
+ ty::Array(ty, len) => {
+ stack.push(len.into());
+ stack.push(ty.into());
+ }
+ ty::Slice(ty) => {
+ stack.push(ty.into());
+ }
+ ty::RawPtr(mt) => {
+ stack.push(mt.ty.into());
+ }
+ ty::Ref(lt, ty, _) => {
+ stack.push(ty.into());
+ stack.push(lt.into());
+ }
+ ty::Projection(data) => {
+ stack.extend(data.substs.iter().rev());
+ }
+ ty::Dynamic(obj, lt) => {
+ stack.push(lt.into());
+ stack.extend(obj.iter().rev().flat_map(|predicate| {
+ let (substs, opt_ty) = match predicate.skip_binder() {
+ ty::ExistentialPredicate::Trait(tr) => (tr.substs, None),
+ ty::ExistentialPredicate::Projection(p) => (p.substs, Some(p.term)),
+ ty::ExistentialPredicate::AutoTrait(_) =>
+ // Empty iterator
+ {
+ (ty::InternalSubsts::empty(), None)
+ }
+ };
+
+ substs.iter().rev().chain(opt_ty.map(|term| match term {
+ ty::Term::Ty(ty) => ty.into(),
+ ty::Term::Const(ct) => ct.into(),
+ }))
+ }));
+ }
+ ty::Adt(_, substs)
+ | ty::Opaque(_, substs)
+ | ty::Closure(_, substs)
+ | ty::Generator(_, substs, _)
+ | ty::FnDef(_, substs) => {
+ stack.extend(substs.iter().rev());
+ }
+ ty::Tuple(ts) => stack.extend(ts.as_substs().iter().rev()),
+ ty::GeneratorWitness(ts) => {
+ stack.extend(ts.skip_binder().iter().rev().map(|ty| ty.into()));
+ }
+ ty::FnPtr(sig) => {
+ stack.push(sig.skip_binder().output().into());
+ stack.extend(sig.skip_binder().inputs().iter().copied().rev().map(|ty| ty.into()));
+ }
+ },
+ GenericArgKind::Lifetime(_) => {}
+ GenericArgKind::Const(parent_ct) => {
+ stack.push(parent_ct.ty().into());
+ match parent_ct.kind() {
+ ty::ConstKind::Infer(_)
+ | ty::ConstKind::Param(_)
+ | ty::ConstKind::Placeholder(_)
+ | ty::ConstKind::Bound(..)
+ | ty::ConstKind::Value(_)
+ | ty::ConstKind::Error(_) => {}
+
+ ty::ConstKind::Unevaluated(ct) => {
+ stack.extend(ct.substs.iter().rev());
+ }
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/util/bug.rs b/compiler/rustc_middle/src/util/bug.rs
new file mode 100644
index 000000000..fd7045d6a
--- /dev/null
+++ b/compiler/rustc_middle/src/util/bug.rs
@@ -0,0 +1,54 @@
+// These functions are used by macro expansion for bug! and span_bug!
+
+use crate::ty::{tls, TyCtxt};
+use rustc_errors::MultiSpan;
+use rustc_span::Span;
+use std::fmt;
+use std::panic::{panic_any, Location};
+
+#[cold]
+#[inline(never)]
+#[track_caller]
+pub fn bug_fmt(args: fmt::Arguments<'_>) -> ! {
+ // this wrapper mostly exists so I don't have to write a fully
+ // qualified path of None::<Span> inside the bug!() macro definition
+ opt_span_bug_fmt(None::<Span>, args, Location::caller());
+}
+
+#[cold]
+#[inline(never)]
+#[track_caller]
+pub fn span_bug_fmt<S: Into<MultiSpan>>(span: S, args: fmt::Arguments<'_>) -> ! {
+ opt_span_bug_fmt(Some(span), args, Location::caller());
+}
+
+#[track_caller]
+fn opt_span_bug_fmt<S: Into<MultiSpan>>(
+ span: Option<S>,
+ args: fmt::Arguments<'_>,
+ location: &Location<'_>,
+) -> ! {
+ tls::with_opt(move |tcx| {
+ let msg = format!("{}: {}", location, args);
+ match (tcx, span) {
+ (Some(tcx), Some(span)) => tcx.sess.diagnostic().span_bug(span, &msg),
+ (Some(tcx), None) => tcx.sess.diagnostic().bug(&msg),
+ (None, _) => panic_any(msg),
+ }
+ });
+ unreachable!();
+}
+
+/// A query to trigger a `delay_span_bug`. Clearly, if one has a `tcx` one can already trigger a
+/// `delay_span_bug`, so what is the point of this? It exists to help us test `delay_span_bug`'s
+/// interactions with the query system and incremental.
+pub fn trigger_delay_span_bug(tcx: TyCtxt<'_>, key: rustc_hir::def_id::DefId) {
+ tcx.sess.delay_span_bug(
+ tcx.def_span(key),
+ "delayed span bug triggered by #[rustc_error(delay_span_bug_from_inside_query)]",
+ );
+}
+
+pub fn provide(providers: &mut crate::ty::query::Providers) {
+ *providers = crate::ty::query::Providers { trigger_delay_span_bug, ..*providers };
+}
diff --git a/compiler/rustc_middle/src/util/common.rs b/compiler/rustc_middle/src/util/common.rs
new file mode 100644
index 000000000..08977049d
--- /dev/null
+++ b/compiler/rustc_middle/src/util/common.rs
@@ -0,0 +1,67 @@
+use rustc_data_structures::sync::Lock;
+
+use std::fmt::Debug;
+use std::time::{Duration, Instant};
+
+#[cfg(test)]
+mod tests;
+
+pub fn to_readable_str(mut val: usize) -> String {
+ let mut groups = vec![];
+ loop {
+ let group = val % 1000;
+
+ val /= 1000;
+
+ if val == 0 {
+ groups.push(group.to_string());
+ break;
+ } else {
+ groups.push(format!("{:03}", group));
+ }
+ }
+
+ groups.reverse();
+
+ groups.join("_")
+}
+
+pub fn record_time<T, F>(accu: &Lock<Duration>, f: F) -> T
+where
+ F: FnOnce() -> T,
+{
+ let start = Instant::now();
+ let rv = f();
+ let duration = start.elapsed();
+ let mut accu = accu.lock();
+ *accu += duration;
+ rv
+}
+
+pub fn indent<R, F>(op: F) -> R
+where
+ R: Debug,
+ F: FnOnce() -> R,
+{
+ // Use in conjunction with the log post-processor like `src/etc/indenter`
+ // to make debug output more readable.
+ debug!(">>");
+ let r = op();
+ debug!("<< (Result = {:?})", r);
+ r
+}
+
+pub struct Indenter {
+ _cannot_construct_outside_of_this_module: (),
+}
+
+impl Drop for Indenter {
+ fn drop(&mut self) {
+ debug!("<<");
+ }
+}
+
+pub fn indenter() -> Indenter {
+ debug!(">>");
+ Indenter { _cannot_construct_outside_of_this_module: () }
+}
diff --git a/compiler/rustc_middle/src/util/common/tests.rs b/compiler/rustc_middle/src/util/common/tests.rs
new file mode 100644
index 000000000..9a9fb203c
--- /dev/null
+++ b/compiler/rustc_middle/src/util/common/tests.rs
@@ -0,0 +1,14 @@
+use super::*;
+
+#[test]
+fn test_to_readable_str() {
+ assert_eq!("0", to_readable_str(0));
+ assert_eq!("1", to_readable_str(1));
+ assert_eq!("99", to_readable_str(99));
+ assert_eq!("999", to_readable_str(999));
+ assert_eq!("1_000", to_readable_str(1_000));
+ assert_eq!("1_001", to_readable_str(1_001));
+ assert_eq!("999_999", to_readable_str(999_999));
+ assert_eq!("1_000_000", to_readable_str(1_000_000));
+ assert_eq!("1_234_567", to_readable_str(1_234_567));
+}