summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_middle
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_middle')
-rw-r--r--compiler/rustc_middle/Cargo.toml2
-rw-r--r--compiler/rustc_middle/benches/lib.rs54
-rw-r--r--compiler/rustc_middle/src/arena.rs5
-rw-r--r--compiler/rustc_middle/src/dep_graph/dep_node.rs152
-rw-r--r--compiler/rustc_middle/src/dep_graph/mod.rs52
-rw-r--r--compiler/rustc_middle/src/error.rs27
-rw-r--r--compiler/rustc_middle/src/hir/map/mod.rs129
-rw-r--r--compiler/rustc_middle/src/hir/mod.rs36
-rw-r--r--compiler/rustc_middle/src/infer/unify_key.rs2
-rw-r--r--compiler/rustc_middle/src/lib.rs6
-rw-r--r--compiler/rustc_middle/src/lint.rs329
-rw-r--r--compiler/rustc_middle/src/macros.rs13
-rw-r--r--compiler/rustc_middle/src/middle/limits.rs2
-rw-r--r--compiler/rustc_middle/src/middle/privacy.rs214
-rw-r--r--compiler/rustc_middle/src/middle/resolve_lifetime.rs15
-rw-r--r--compiler/rustc_middle/src/middle/stability.rs11
-rw-r--r--compiler/rustc_middle/src/mir/interpret/error.rs19
-rw-r--r--compiler/rustc_middle/src/mir/interpret/pointer.rs2
-rw-r--r--compiler/rustc_middle/src/mir/interpret/queries.rs42
-rw-r--r--compiler/rustc_middle/src/mir/mod.rs114
-rw-r--r--compiler/rustc_middle/src/mir/mono.rs6
-rw-r--r--compiler/rustc_middle/src/mir/pretty.rs3
-rw-r--r--compiler/rustc_middle/src/mir/query.rs9
-rw-r--r--compiler/rustc_middle/src/mir/syntax.rs26
-rw-r--r--compiler/rustc_middle/src/mir/tcx.rs5
-rw-r--r--compiler/rustc_middle/src/mir/type_foldable.rs28
-rw-r--r--compiler/rustc_middle/src/mir/type_visitable.rs19
-rw-r--r--compiler/rustc_middle/src/mir/visit.rs7
-rw-r--r--compiler/rustc_middle/src/query/mod.rs302
-rw-r--r--compiler/rustc_middle/src/thir.rs9
-rw-r--r--compiler/rustc_middle/src/traits/mod.rs16
-rw-r--r--compiler/rustc_middle/src/traits/query.rs4
-rw-r--r--compiler/rustc_middle/src/traits/select.rs12
-rw-r--r--compiler/rustc_middle/src/traits/structural_impls.rs2
-rw-r--r--compiler/rustc_middle/src/ty/abstract_const.rs12
-rw-r--r--compiler/rustc_middle/src/ty/adjustment.rs3
-rw-r--r--compiler/rustc_middle/src/ty/adt.rs27
-rw-r--r--compiler/rustc_middle/src/ty/cast.rs28
-rw-r--r--compiler/rustc_middle/src/ty/codec.rs1
-rw-r--r--compiler/rustc_middle/src/ty/consts.rs52
-rw-r--r--compiler/rustc_middle/src/ty/consts/kind.rs52
-rw-r--r--compiler/rustc_middle/src/ty/context.rs176
-rw-r--r--compiler/rustc_middle/src/ty/diagnostics.rs8
-rw-r--r--compiler/rustc_middle/src/ty/erase_regions.rs5
-rw-r--r--compiler/rustc_middle/src/ty/error.rs8
-rw-r--r--compiler/rustc_middle/src/ty/fast_reject.rs11
-rw-r--r--compiler/rustc_middle/src/ty/flags.rs18
-rw-r--r--compiler/rustc_middle/src/ty/fold.rs40
-rw-r--r--compiler/rustc_middle/src/ty/generics.rs3
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs145
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs204
-rw-r--r--compiler/rustc_middle/src/ty/inhabitedness/mod.rs258
-rw-r--r--compiler/rustc_middle/src/ty/instance.rs6
-rw-r--r--compiler/rustc_middle/src/ty/layout.rs2375
-rw-r--r--compiler/rustc_middle/src/ty/layout_sanity_check.rs303
-rw-r--r--compiler/rustc_middle/src/ty/mod.rs141
-rw-r--r--compiler/rustc_middle/src/ty/normalize_erasing_regions.rs24
-rw-r--r--compiler/rustc_middle/src/ty/opaque_types.rs218
-rw-r--r--compiler/rustc_middle/src/ty/parameterized.rs7
-rw-r--r--compiler/rustc_middle/src/ty/print/mod.rs13
-rw-r--r--compiler/rustc_middle/src/ty/print/pretty.rs230
-rw-r--r--compiler/rustc_middle/src/ty/query.rs24
-rw-r--r--compiler/rustc_middle/src/ty/relate.rs14
-rw-r--r--compiler/rustc_middle/src/ty/rvalue_scopes.rs2
-rw-r--r--compiler/rustc_middle/src/ty/structural_impls.rs55
-rw-r--r--compiler/rustc_middle/src/ty/sty.rs150
-rw-r--r--compiler/rustc_middle/src/ty/subst.rs134
-rw-r--r--compiler/rustc_middle/src/ty/util.rs41
-rw-r--r--compiler/rustc_middle/src/ty/visit.rs32
-rw-r--r--compiler/rustc_middle/src/ty/walk.rs16
-rw-r--r--compiler/rustc_middle/src/values.rs180
71 files changed, 2415 insertions, 4275 deletions
diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml
index cca17a4ec..de916ea8c 100644
--- a/compiler/rustc_middle/Cargo.toml
+++ b/compiler/rustc_middle/Cargo.toml
@@ -12,8 +12,6 @@ chalk-ir = "0.80.0"
either = "1.5.0"
gsgdt = "0.1.2"
polonius-engine = "0.13.0"
-rand = "0.8.4"
-rand_xoshiro = "0.6.0"
rustc_apfloat = { path = "../rustc_apfloat" }
rustc_arena = { path = "../rustc_arena" }
rustc_ast = { path = "../rustc_ast" }
diff --git a/compiler/rustc_middle/benches/lib.rs b/compiler/rustc_middle/benches/lib.rs
deleted file mode 100644
index 237751bcb..000000000
--- a/compiler/rustc_middle/benches/lib.rs
+++ /dev/null
@@ -1,54 +0,0 @@
-#![feature(test)]
-
-extern crate test;
-
-use test::Bencher;
-
-// Static/dynamic method dispatch
-
-struct Struct {
- field: isize,
-}
-
-trait Trait {
- fn method(&self) -> isize;
-}
-
-impl Trait for Struct {
- fn method(&self) -> isize {
- self.field
- }
-}
-
-#[bench]
-fn trait_vtable_method_call(b: &mut Bencher) {
- let s = Struct { field: 10 };
- let t = &s as &dyn Trait;
- b.iter(|| t.method());
-}
-
-#[bench]
-fn trait_static_method_call(b: &mut Bencher) {
- let s = Struct { field: 10 };
- b.iter(|| s.method());
-}
-
-// Overhead of various match forms
-
-#[bench]
-fn option_some(b: &mut Bencher) {
- let x = Some(10);
- b.iter(|| match x {
- Some(y) => y,
- None => 11,
- });
-}
-
-#[bench]
-fn vec_pattern(b: &mut Bencher) {
- let x = [1, 2, 3, 4, 5, 6];
- b.iter(|| match x {
- [1, 2, 3, ..] => 10,
- _ => 11,
- });
-}
diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs
index 6bdf5a023..f8aae86fe 100644
--- a/compiler/rustc_middle/src/arena.rs
+++ b/compiler/rustc_middle/src/arena.rs
@@ -77,7 +77,7 @@ macro_rules! arena_types {
rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::Ty<'tcx>>
>,
[] all_traits: Vec<rustc_hir::def_id::DefId>,
- [] privacy_access_levels: rustc_middle::middle::privacy::AccessLevels,
+ [] effective_visibilities: rustc_middle::middle::privacy::EffectiveVisibilities,
[] foreign_module: rustc_session::cstore::ForeignModule,
[] foreign_modules: Vec<rustc_session::cstore::ForeignModule>,
[] upvars_mentioned: rustc_data_structures::fx::FxIndexMap<rustc_hir::HirId, rustc_hir::Upvar>,
@@ -96,13 +96,14 @@ macro_rules! arena_types {
// since we need to allocate this type on both the `rustc_hir` arena
// (during lowering) and the `librustc_middle` arena (for decoding MIR)
[decode] asm_template: rustc_ast::InlineAsmTemplatePiece,
- [decode] used_trait_imports: rustc_data_structures::fx::FxHashSet<rustc_hir::def_id::LocalDefId>,
+ [decode] used_trait_imports: rustc_data_structures::unord::UnordSet<rustc_hir::def_id::LocalDefId>,
[decode] is_late_bound_map: rustc_data_structures::fx::FxIndexSet<rustc_hir::def_id::LocalDefId>,
[decode] impl_source: rustc_middle::traits::ImplSource<'tcx, ()>,
[] dep_kind: rustc_middle::dep_graph::DepKindStruct<'tcx>,
[decode] trait_impl_trait_tys: rustc_data_structures::fx::FxHashMap<rustc_hir::def_id::DefId, rustc_middle::ty::Ty<'tcx>>,
+ [] bit_set_u32: rustc_index::bit_set::BitSet<u32>,
]);
)
}
diff --git a/compiler/rustc_middle/src/dep_graph/dep_node.rs b/compiler/rustc_middle/src/dep_graph/dep_node.rs
index 1fa0c6bab..6b5568269 100644
--- a/compiler/rustc_middle/src/dep_graph/dep_node.rs
+++ b/compiler/rustc_middle/src/dep_graph/dep_node.rs
@@ -62,86 +62,13 @@ use crate::ty::TyCtxt;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
use rustc_hir::definitions::DefPathHash;
-use rustc_hir::HirId;
+use rustc_hir::{HirId, ItemLocalId, OwnerId};
use rustc_query_system::dep_graph::FingerprintStyle;
use rustc_span::symbol::Symbol;
use std::hash::Hash;
pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams};
-/// This struct stores metadata about each DepKind.
-///
-/// Information is retrieved by indexing the `DEP_KINDS` array using the integer value
-/// of the `DepKind`. Overall, this allows to implement `DepContext` using this manual
-/// jump table instead of large matches.
-pub struct DepKindStruct<'tcx> {
- /// Anonymous queries cannot be replayed from one compiler invocation to the next.
- /// When their result is needed, it is recomputed. They are useful for fine-grained
- /// dependency tracking, and caching within one compiler invocation.
- pub is_anon: bool,
-
- /// Eval-always queries do not track their dependencies, and are always recomputed, even if
- /// their inputs have not changed since the last compiler invocation. The result is still
- /// cached within one compiler invocation.
- pub is_eval_always: bool,
-
- /// Whether the query key can be recovered from the hashed fingerprint.
- /// See [DepNodeParams] trait for the behaviour of each key type.
- pub fingerprint_style: FingerprintStyle,
-
- /// The red/green evaluation system will try to mark a specific DepNode in the
- /// dependency graph as green by recursively trying to mark the dependencies of
- /// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode`
- /// where we don't know if it is red or green and we therefore actually have
- /// to recompute its value in order to find out. Since the only piece of
- /// information that we have at that point is the `DepNode` we are trying to
- /// re-evaluate, we need some way to re-run a query from just that. This is what
- /// `force_from_dep_node()` implements.
- ///
- /// In the general case, a `DepNode` consists of a `DepKind` and an opaque
- /// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint
- /// is usually constructed by computing a stable hash of the query-key that the
- /// `DepNode` corresponds to. Consequently, it is not in general possible to go
- /// back from hash to query-key (since hash functions are not reversible). For
- /// this reason `force_from_dep_node()` is expected to fail from time to time
- /// because we just cannot find out, from the `DepNode` alone, what the
- /// corresponding query-key is and therefore cannot re-run the query.
- ///
- /// The system deals with this case letting `try_mark_green` fail which forces
- /// the root query to be re-evaluated.
- ///
- /// Now, if `force_from_dep_node()` would always fail, it would be pretty useless.
- /// Fortunately, we can use some contextual information that will allow us to
- /// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we
- /// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a
- /// valid `DefPathHash`. Since we also always build a huge table that maps every
- /// `DefPathHash` in the current codebase to the corresponding `DefId`, we have
- /// everything we need to re-run the query.
- ///
- /// Take the `mir_promoted` query as an example. Like many other queries, it
- /// just has a single parameter: the `DefId` of the item it will compute the
- /// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode`
- /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode`
- /// is actually a `DefPathHash`, and can therefore just look up the corresponding
- /// `DefId` in `tcx.def_path_hash_to_def_id`.
- pub force_from_dep_node: Option<fn(tcx: TyCtxt<'tcx>, dep_node: DepNode) -> bool>,
-
- /// Invoke a query to put the on-disk cached value in memory.
- pub try_load_from_on_disk_cache: Option<fn(TyCtxt<'tcx>, DepNode)>,
-}
-
-impl DepKind {
- #[inline(always)]
- pub fn fingerprint_style(self, tcx: TyCtxt<'_>) -> FingerprintStyle {
- // Only fetch the DepKindStruct once.
- let data = tcx.query_kind(self);
- if data.is_anon {
- return FingerprintStyle::Opaque;
- }
- data.fingerprint_style
- }
-}
-
macro_rules! define_dep_nodes {
(
$($(#[$attr:meta])*
@@ -159,7 +86,7 @@ macro_rules! define_dep_nodes {
$( $( #[$attr] )* $variant),*
}
- fn dep_kind_from_label_string(label: &str) -> Result<DepKind, ()> {
+ pub(super) fn dep_kind_from_label_string(label: &str) -> Result<DepKind, ()> {
match label {
$(stringify!($variant) => Ok(DepKind::$variant),)*
_ => Err(()),
@@ -214,11 +141,6 @@ static_assert_size!(DepNode, 18);
static_assert_size!(DepNode, 24);
pub trait DepNodeExt: Sized {
- /// Construct a DepNode from the given DepKind and DefPathHash. This
- /// method will assert that the given DepKind actually requires a
- /// single DefId/DefPathHash parameter.
- fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> Self;
-
/// Extracts the DefId corresponding to this DepNode. This will work
/// if two conditions are met:
///
@@ -243,14 +165,6 @@ pub trait DepNodeExt: Sized {
}
impl DepNodeExt for DepNode {
- /// Construct a DepNode from the given DepKind and DefPathHash. This
- /// method will assert that the given DepKind actually requires a
- /// single DefId/DefPathHash parameter.
- fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> DepNode {
- debug_assert!(kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash);
- DepNode { kind, hash: def_path_hash.0.into() }
- }
-
/// Extracts the DefId corresponding to this DepNode. This will work
/// if two conditions are met:
///
@@ -262,7 +176,7 @@ impl DepNodeExt for DepNode {
/// refers to something from the previous compilation session that
/// has been removed.
fn extract_def_id<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> {
- if self.kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash {
+ if tcx.fingerprint_style(self.kind) == FingerprintStyle::DefPathHash {
Some(tcx.def_path_hash_to_def_id(DefPathHash(self.hash.into()), &mut || {
panic!("Failed to extract DefId: {:?} {}", self.kind, self.hash)
}))
@@ -279,8 +193,8 @@ impl DepNodeExt for DepNode {
) -> Result<DepNode, ()> {
let kind = dep_kind_from_label_string(label)?;
- match kind.fingerprint_style(tcx) {
- FingerprintStyle::Opaque => Err(()),
+ match tcx.fingerprint_style(kind) {
+ FingerprintStyle::Opaque | FingerprintStyle::HirId => Err(()),
FingerprintStyle::Unit => Ok(DepNode::new_no_params(tcx, kind)),
FingerprintStyle::DefPathHash => {
Ok(DepNode::from_def_path_hash(tcx, def_path_hash, kind))
@@ -355,6 +269,28 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for LocalDefId {
}
}
+impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for OwnerId {
+ #[inline(always)]
+ fn fingerprint_style() -> FingerprintStyle {
+ FingerprintStyle::DefPathHash
+ }
+
+ #[inline(always)]
+ fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
+ self.to_def_id().to_fingerprint(tcx)
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ self.to_def_id().to_debug_str(tcx)
+ }
+
+ #[inline(always)]
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ dep_node.extract_def_id(tcx).map(|id| OwnerId { def_id: id.expect_local() })
+ }
+}
+
impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for CrateNum {
#[inline(always)]
fn fingerprint_style() -> FingerprintStyle {
@@ -408,7 +344,7 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for (DefId, DefId) {
impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId {
#[inline(always)]
fn fingerprint_style() -> FingerprintStyle {
- FingerprintStyle::Opaque
+ FingerprintStyle::HirId
}
// We actually would not need to specialize the implementation of this
@@ -417,10 +353,36 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId {
#[inline(always)]
fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint {
let HirId { owner, local_id } = *self;
-
let def_path_hash = tcx.def_path_hash(owner.to_def_id());
- let local_id = Fingerprint::from_smaller_hash(local_id.as_u32().into());
+ Fingerprint::new(
+ // `owner` is local, so is completely defined by the local hash
+ def_path_hash.local_hash(),
+ local_id.as_u32().into(),
+ )
+ }
+
+ #[inline(always)]
+ fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String {
+ let HirId { owner, local_id } = *self;
+ format!("{}.{}", tcx.def_path_str(owner.to_def_id()), local_id.as_u32())
+ }
- def_path_hash.0.combine(local_id)
+ #[inline(always)]
+ fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> {
+ if tcx.fingerprint_style(dep_node.kind) == FingerprintStyle::HirId {
+ let (local_hash, local_id) = Fingerprint::from(dep_node.hash).as_value();
+ let def_path_hash = DefPathHash::new(tcx.sess.local_stable_crate_id(), local_hash);
+ let def_id = tcx
+ .def_path_hash_to_def_id(def_path_hash, &mut || {
+ panic!("Failed to extract HirId: {:?} {}", dep_node.kind, dep_node.hash)
+ })
+ .expect_local();
+ let local_id = local_id
+ .try_into()
+ .unwrap_or_else(|_| panic!("local id should be u32, found {:?}", local_id));
+ Some(HirId { owner: OwnerId { def_id }, local_id: ItemLocalId::from_u32(local_id) })
+ } else {
+ None
+ }
}
}
diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs
index c8b3b52b0..2e62bebc8 100644
--- a/compiler/rustc_middle/src/dep_graph/mod.rs
+++ b/compiler/rustc_middle/src/dep_graph/mod.rs
@@ -11,15 +11,17 @@ pub use rustc_query_system::dep_graph::{
SerializedDepNodeIndex, WorkProduct, WorkProductId,
};
-pub use dep_node::{label_strs, DepKind, DepKindStruct, DepNode, DepNodeExt};
+pub use dep_node::{label_strs, DepKind, DepNode, DepNodeExt};
pub(crate) use dep_node::{make_compile_codegen_unit, make_compile_mono_item};
pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>;
+
pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>;
pub type TaskDepsRef<'a> = rustc_query_system::dep_graph::TaskDepsRef<'a, DepKind>;
pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>;
pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>;
pub type EdgeFilter = rustc_query_system::dep_graph::debug::EdgeFilter<DepKind>;
+pub type DepKindStruct<'tcx> = rustc_query_system::dep_graph::DepKindStruct<TyCtxt<'tcx>>;
impl rustc_query_system::dep_graph::DepKind for DepKind {
const NULL: Self = DepKind::Null;
@@ -91,50 +93,8 @@ impl<'tcx> DepContext for TyCtxt<'tcx> {
self.sess
}
- #[inline(always)]
- fn fingerprint_style(&self, kind: DepKind) -> rustc_query_system::dep_graph::FingerprintStyle {
- kind.fingerprint_style(*self)
- }
-
- #[inline(always)]
- fn is_eval_always(&self, kind: DepKind) -> bool {
- self.query_kind(kind).is_eval_always
- }
-
- fn try_force_from_dep_node(&self, dep_node: DepNode) -> bool {
- debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node);
-
- // We must avoid ever having to call `force_from_dep_node()` for a
- // `DepNode::codegen_unit`:
- // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we
- // would always end up having to evaluate the first caller of the
- // `codegen_unit` query that *is* reconstructible. This might very well be
- // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just
- // to re-trigger calling the `codegen_unit` query with the right key. At
- // that point we would already have re-done all the work we are trying to
- // avoid doing in the first place.
- // The solution is simple: Just explicitly call the `codegen_unit` query for
- // each CGU, right after partitioning. This way `try_mark_green` will always
- // hit the cache instead of having to go through `force_from_dep_node`.
- // This assertion makes sure, we actually keep applying the solution above.
- debug_assert!(
- dep_node.kind != DepKind::codegen_unit,
- "calling force_from_dep_node() on DepKind::codegen_unit"
- );
-
- let cb = self.query_kind(dep_node.kind);
- if let Some(f) = cb.force_from_dep_node {
- f(*self, dep_node);
- true
- } else {
- false
- }
- }
-
- fn try_load_from_on_disk_cache(&self, dep_node: DepNode) {
- let cb = self.query_kind(dep_node.kind);
- if let Some(f) = cb.try_load_from_on_disk_cache {
- f(*self, dep_node)
- }
+ #[inline]
+ fn dep_kind_info(&self, dep_kind: DepKind) -> &DepKindStruct<'tcx> {
+ &self.query_kinds[dep_kind as usize]
}
}
diff --git a/compiler/rustc_middle/src/error.rs b/compiler/rustc_middle/src/error.rs
index 18b31a75b..a7a7ac059 100644
--- a/compiler/rustc_middle/src/error.rs
+++ b/compiler/rustc_middle/src/error.rs
@@ -1,10 +1,10 @@
-use rustc_macros::SessionDiagnostic;
+use rustc_macros::Diagnostic;
use rustc_span::Span;
use crate::ty::Ty;
-#[derive(SessionDiagnostic)]
-#[diag(middle::drop_check_overflow, code = "E0320")]
+#[derive(Diagnostic)]
+#[diag(middle_drop_check_overflow, code = "E0320")]
#[note]
pub struct DropCheckOverflow<'tcx> {
#[primary_span]
@@ -13,8 +13,8 @@ pub struct DropCheckOverflow<'tcx> {
pub overflow_ty: Ty<'tcx>,
}
-#[derive(SessionDiagnostic)]
-#[diag(middle::opaque_hidden_type_mismatch)]
+#[derive(Diagnostic)]
+#[diag(middle_opaque_hidden_type_mismatch)]
pub struct OpaqueHiddenTypeMismatch<'tcx> {
pub self_ty: Ty<'tcx>,
pub other_ty: Ty<'tcx>,
@@ -25,22 +25,22 @@ pub struct OpaqueHiddenTypeMismatch<'tcx> {
pub sub: TypeMismatchReason,
}
-#[derive(SessionSubdiagnostic)]
+#[derive(Subdiagnostic)]
pub enum TypeMismatchReason {
- #[label(middle::conflict_types)]
+ #[label(middle_conflict_types)]
ConflictType {
#[primary_span]
span: Span,
},
- #[note(middle::previous_use_here)]
+ #[note(middle_previous_use_here)]
PreviousUse {
#[primary_span]
span: Span,
},
}
-#[derive(SessionDiagnostic)]
-#[diag(middle::limit_invalid)]
+#[derive(Diagnostic)]
+#[diag(middle_limit_invalid)]
pub struct LimitInvalid<'a> {
#[primary_span]
pub span: Span,
@@ -48,3 +48,10 @@ pub struct LimitInvalid<'a> {
pub value_span: Span,
pub error_str: &'a str,
}
+
+#[derive(Diagnostic)]
+#[diag(middle_const_eval_non_int)]
+pub struct ConstEvalNonIntError {
+ #[primary_span]
+ pub span: Span,
+}
diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs
index 30a23c342..83a4d16d7 100644
--- a/compiler/rustc_middle/src/hir/map/mod.rs
+++ b/compiler/rustc_middle/src/hir/map/mod.rs
@@ -14,7 +14,7 @@ use rustc_index::vec::Idx;
use rustc_middle::hir::nested_filter;
use rustc_span::def_id::StableCrateId;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
-use rustc_span::Span;
+use rustc_span::{Span, DUMMY_SP};
use rustc_target::spec::abi::Abi;
#[inline]
@@ -61,7 +61,7 @@ pub struct ParentHirIterator<'hir> {
}
impl<'hir> Iterator for ParentHirIterator<'hir> {
- type Item = (HirId, Node<'hir>);
+ type Item = HirId;
fn next(&mut self) -> Option<Self::Item> {
if self.current_id == CRATE_HIR_ID {
@@ -77,10 +77,7 @@ impl<'hir> Iterator for ParentHirIterator<'hir> {
}
self.current_id = parent_id;
- if let Some(node) = self.map.find(parent_id) {
- return Some((parent_id, node));
- }
- // If this `HirId` doesn't have an entry, skip it and look for its `parent_id`.
+ return Some(parent_id);
}
}
}
@@ -93,7 +90,7 @@ pub struct ParentOwnerIterator<'hir> {
}
impl<'hir> Iterator for ParentOwnerIterator<'hir> {
- type Item = (LocalDefId, OwnerNode<'hir>);
+ type Item = (OwnerId, OwnerNode<'hir>);
fn next(&mut self) -> Option<Self::Item> {
if self.current_id.local_id.index() != 0 {
@@ -107,13 +104,13 @@ impl<'hir> Iterator for ParentOwnerIterator<'hir> {
}
loop {
// There are nodes that do not have entries, so we need to skip them.
- let parent_id = self.map.def_key(self.current_id.owner).parent;
+ let parent_id = self.map.def_key(self.current_id.owner.def_id).parent;
- let parent_id = parent_id.map_or(CRATE_HIR_ID.owner, |local_def_index| {
+ let parent_id = parent_id.map_or(CRATE_OWNER_ID, |local_def_index| {
let def_id = LocalDefId { local_def_index };
self.map.local_def_id_to_hir_id(def_id).owner
});
- self.current_id = HirId::make_owner(parent_id);
+ self.current_id = HirId::make_owner(parent_id.def_id);
// If this `HirId` doesn't have an entry, skip it and look for its `parent_id`.
if let Some(node) = self.map.tcx.hir_owner(self.current_id.owner) {
@@ -131,7 +128,7 @@ impl<'hir> Map<'hir> {
#[inline]
pub fn root_module(self) -> &'hir Mod<'hir> {
- match self.tcx.hir_owner(CRATE_DEF_ID).map(|o| o.node) {
+ match self.tcx.hir_owner(CRATE_OWNER_ID).map(|o| o.node) {
Some(OwnerNode::Crate(item)) => item,
_ => bug!(),
}
@@ -186,7 +183,7 @@ impl<'hir> Map<'hir> {
#[inline]
pub fn opt_local_def_id(self, hir_id: HirId) -> Option<LocalDefId> {
if hir_id.local_id == ItemLocalId::new(0) {
- Some(hir_id.owner)
+ Some(hir_id.owner.def_id)
} else {
self.tcx
.hir_owner_nodes(hir_id.owner)
@@ -244,7 +241,7 @@ impl<'hir> Map<'hir> {
Node::ImplItem(item) => match item.kind {
ImplItemKind::Const(..) => DefKind::AssocConst,
ImplItemKind::Fn(..) => DefKind::AssocFn,
- ImplItemKind::TyAlias(..) => DefKind::AssocTy,
+ ImplItemKind::Type(..) => DefKind::AssocTy,
},
Node::Variant(_) => DefKind::Variant,
Node::Ctor(variant_data) => {
@@ -352,24 +349,24 @@ impl<'hir> Map<'hir> {
}
pub fn get_generics(self, id: LocalDefId) -> Option<&'hir Generics<'hir>> {
- let node = self.tcx.hir_owner(id)?;
+ let node = self.tcx.hir_owner(OwnerId { def_id: id })?;
node.node.generics()
}
pub fn item(self, id: ItemId) -> &'hir Item<'hir> {
- self.tcx.hir_owner(id.def_id).unwrap().node.expect_item()
+ self.tcx.hir_owner(id.owner_id).unwrap().node.expect_item()
}
pub fn trait_item(self, id: TraitItemId) -> &'hir TraitItem<'hir> {
- self.tcx.hir_owner(id.def_id).unwrap().node.expect_trait_item()
+ self.tcx.hir_owner(id.owner_id).unwrap().node.expect_trait_item()
}
pub fn impl_item(self, id: ImplItemId) -> &'hir ImplItem<'hir> {
- self.tcx.hir_owner(id.def_id).unwrap().node.expect_impl_item()
+ self.tcx.hir_owner(id.owner_id).unwrap().node.expect_impl_item()
}
pub fn foreign_item(self, id: ForeignItemId) -> &'hir ForeignItem<'hir> {
- self.tcx.hir_owner(id.def_id).unwrap().node.expect_foreign_item()
+ self.tcx.hir_owner(id.owner_id).unwrap().node.expect_foreign_item()
}
pub fn body(self, id: BodyId) -> &'hir Body<'hir> {
@@ -393,8 +390,8 @@ impl<'hir> Map<'hir> {
}
pub fn enclosing_body_owner(self, hir_id: HirId) -> LocalDefId {
- for (parent, _) in self.parent_iter(hir_id) {
- if let Some(body) = self.find(parent).map(associated_body).flatten() {
+ for (_, node) in self.parent_iter(hir_id) {
+ if let Some(body) = associated_body(node) {
return self.body_owner_def_id(body);
}
}
@@ -532,7 +529,7 @@ impl<'hir> Map<'hir> {
pub fn get_module(self, module: LocalDefId) -> (&'hir Mod<'hir>, Span, HirId) {
let hir_id = HirId::make_owner(module);
- match self.tcx.hir_owner(module).map(|o| o.node) {
+ match self.tcx.hir_owner(hir_id.owner).map(|o| o.node) {
Some(OwnerNode::Item(&Item { span, kind: ItemKind::Mod(ref m), .. })) => {
(m, span, hir_id)
}
@@ -622,26 +619,33 @@ impl<'hir> Map<'hir> {
pub fn for_each_module(self, mut f: impl FnMut(LocalDefId)) {
let crate_items = self.tcx.hir_crate_items(());
for module in crate_items.submodules.iter() {
- f(*module)
+ f(module.def_id)
}
}
#[inline]
pub fn par_for_each_module(self, f: impl Fn(LocalDefId) + Sync + Send) {
let crate_items = self.tcx.hir_crate_items(());
- par_for_each_in(&crate_items.submodules[..], |module| f(*module))
+ par_for_each_in(&crate_items.submodules[..], |module| f(module.def_id))
}
/// Returns an iterator for the nodes in the ancestor tree of the `current_id`
/// until the crate root is reached. Prefer this over your own loop using `get_parent_node`.
#[inline]
- pub fn parent_iter(self, current_id: HirId) -> ParentHirIterator<'hir> {
+ pub fn parent_id_iter(self, current_id: HirId) -> impl Iterator<Item = HirId> + 'hir {
ParentHirIterator { current_id, map: self }
}
/// Returns an iterator for the nodes in the ancestor tree of the `current_id`
/// until the crate root is reached. Prefer this over your own loop using `get_parent_node`.
#[inline]
+ pub fn parent_iter(self, current_id: HirId) -> impl Iterator<Item = (HirId, Node<'hir>)> {
+ self.parent_id_iter(current_id).filter_map(move |id| Some((id, self.find(id)?)))
+ }
+
+ /// Returns an iterator for the nodes in the ancestor tree of the `current_id`
+ /// until the crate root is reached. Prefer this over your own loop using `get_parent_node`.
+ #[inline]
pub fn parent_owner_iter(self, current_id: HirId) -> ParentOwnerIterator<'hir> {
ParentOwnerIterator { current_id, map: self }
}
@@ -721,27 +725,27 @@ impl<'hir> Map<'hir> {
None
}
- /// Retrieves the `HirId` for `id`'s parent item, or `id` itself if no
+ /// Retrieves the `OwnerId` for `id`'s parent item, or `id` itself if no
/// parent item is in this map. The "parent item" is the closest parent node
/// in the HIR which is recorded by the map and is an item, either an item
/// in a module, trait, or impl.
- pub fn get_parent_item(self, hir_id: HirId) -> LocalDefId {
+ pub fn get_parent_item(self, hir_id: HirId) -> OwnerId {
if let Some((def_id, _node)) = self.parent_owner_iter(hir_id).next() {
def_id
} else {
- CRATE_DEF_ID
+ CRATE_OWNER_ID
}
}
- /// Returns the `HirId` of `id`'s nearest module parent, or `id` itself if no
+ /// Returns the `OwnerId` of `id`'s nearest module parent, or `id` itself if no
/// module parent is in this map.
- pub(super) fn get_module_parent_node(self, hir_id: HirId) -> LocalDefId {
+ pub(super) fn get_module_parent_node(self, hir_id: HirId) -> OwnerId {
for (def_id, node) in self.parent_owner_iter(hir_id) {
if let OwnerNode::Item(&Item { kind: ItemKind::Mod(_), .. }) = node {
return def_id;
}
}
- CRATE_DEF_ID
+ CRATE_OWNER_ID
}
/// When on an if expression, a match arm tail expression or a match arm, give back
@@ -814,30 +818,30 @@ impl<'hir> Map<'hir> {
}
bug!(
"expected foreign mod or inlined parent, found {}",
- self.node_to_string(HirId::make_owner(parent))
+ self.node_to_string(HirId::make_owner(parent.def_id))
)
}
- pub fn expect_owner(self, id: LocalDefId) -> OwnerNode<'hir> {
+ pub fn expect_owner(self, id: OwnerId) -> OwnerNode<'hir> {
self.tcx.hir_owner(id).unwrap_or_else(|| bug!("expected owner for {:?}", id)).node
}
pub fn expect_item(self, id: LocalDefId) -> &'hir Item<'hir> {
- match self.tcx.hir_owner(id) {
+ match self.tcx.hir_owner(OwnerId { def_id: id }) {
Some(Owner { node: OwnerNode::Item(item), .. }) => item,
_ => bug!("expected item, found {}", self.node_to_string(HirId::make_owner(id))),
}
}
pub fn expect_impl_item(self, id: LocalDefId) -> &'hir ImplItem<'hir> {
- match self.tcx.hir_owner(id) {
+ match self.tcx.hir_owner(OwnerId { def_id: id }) {
Some(Owner { node: OwnerNode::ImplItem(item), .. }) => item,
_ => bug!("expected impl item, found {}", self.node_to_string(HirId::make_owner(id))),
}
}
pub fn expect_trait_item(self, id: LocalDefId) -> &'hir TraitItem<'hir> {
- match self.tcx.hir_owner(id) {
+ match self.tcx.hir_owner(OwnerId { def_id: id }) {
Some(Owner { node: OwnerNode::TraitItem(item), .. }) => item,
_ => bug!("expected trait item, found {}", self.node_to_string(HirId::make_owner(id))),
}
@@ -850,11 +854,14 @@ impl<'hir> Map<'hir> {
}
}
- pub fn expect_foreign_item(self, id: LocalDefId) -> &'hir ForeignItem<'hir> {
+ pub fn expect_foreign_item(self, id: OwnerId) -> &'hir ForeignItem<'hir> {
match self.tcx.hir_owner(id) {
Some(Owner { node: OwnerNode::ForeignItem(item), .. }) => item,
_ => {
- bug!("expected foreign item, found {}", self.node_to_string(HirId::make_owner(id)))
+ bug!(
+ "expected foreign item, found {}",
+ self.node_to_string(HirId::make_owner(id.def_id))
+ )
}
}
}
@@ -934,9 +941,19 @@ impl<'hir> Map<'hir> {
let span = match self.find(hir_id)? {
// Function-like.
- Node::Item(Item { kind: ItemKind::Fn(sig, ..), .. })
- | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, ..), .. })
- | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, ..), .. }) => sig.span,
+ Node::Item(Item { kind: ItemKind::Fn(sig, ..), span: outer_span, .. })
+ | Node::TraitItem(TraitItem {
+ kind: TraitItemKind::Fn(sig, ..),
+ span: outer_span,
+ ..
+ })
+ | Node::ImplItem(ImplItem {
+ kind: ImplItemKind::Fn(sig, ..), span: outer_span, ..
+ }) => {
+ // Ensure that the returned span has the item's SyntaxContext, and not the
+ // SyntaxContext of the visibility.
+ sig.span.find_ancestor_in_same_ctxt(*outer_span).unwrap_or(*outer_span)
+ }
// Constants and Statics.
Node::Item(Item {
kind:
@@ -978,7 +995,11 @@ impl<'hir> Map<'hir> {
}
// Other cases.
Node::Item(item) => match &item.kind {
- ItemKind::Use(path, _) => path.span,
+ ItemKind::Use(path, _) => {
+ // Ensure that the returned span has the item's SyntaxContext, and not the
+ // SyntaxContext of the path.
+ path.span.find_ancestor_in_same_ctxt(item.span).unwrap_or(item.span)
+ }
_ => named_span(item.span, item.ident, item.kind.generics()),
},
Node::Variant(variant) => named_span(variant.span, variant.ident, None),
@@ -988,11 +1009,17 @@ impl<'hir> Map<'hir> {
_ => named_span(item.span, item.ident, None),
},
Node::Ctor(_) => return self.opt_span(self.get_parent_node(hir_id)),
- Node::Expr(Expr { kind: ExprKind::Closure(Closure { fn_decl_span, .. }), .. }) => {
- *fn_decl_span
+ Node::Expr(Expr {
+ kind: ExprKind::Closure(Closure { fn_decl_span, .. }),
+ span,
+ ..
+ }) => {
+ // Ensure that the returned span has the item's SyntaxContext.
+ fn_decl_span.find_ancestor_in_same_ctxt(*span).unwrap_or(*span)
}
_ => self.span_with_body(hir_id),
};
+ debug_assert_eq!(span.ctxt(), self.span_with_body(hir_id).ctxt());
Some(span)
}
@@ -1128,7 +1155,7 @@ pub(super) fn crate_hash(tcx: TyCtxt<'_>, crate_num: CrateNum) -> Svh {
.filter_map(|(def_id, info)| {
let _ = info.as_owner()?;
let def_path_hash = definitions.def_path_hash(def_id);
- let span = resolutions.source_span[def_id];
+ let span = resolutions.source_span.get(def_id).unwrap_or(&DUMMY_SP);
debug_assert_eq!(span.parent(), None);
Some((def_path_hash, span))
})
@@ -1217,7 +1244,7 @@ fn hir_id_to_string(map: Map<'_>, id: HirId) -> String {
format!("assoc const {} in {}{}", ii.ident, path_str(), id_str)
}
ImplItemKind::Fn(..) => format!("method {} in {}{}", ii.ident, path_str(), id_str),
- ImplItemKind::TyAlias(_) => {
+ ImplItemKind::Type(_) => {
format!("assoc type {} in {}{}", ii.ident, path_str(), id_str)
}
},
@@ -1290,7 +1317,7 @@ pub(crate) fn hir_crate_items(tcx: TyCtxt<'_>, _: ()) -> ModuleItems {
// A "crate collector" and "module collector" start at a
// module item (the former starts at the crate root) but only
// the former needs to collect it. ItemCollector does not do this for us.
- collector.submodules.push(CRATE_DEF_ID);
+ collector.submodules.push(CRATE_OWNER_ID);
tcx.hir().walk_toplevel_module(&mut collector);
let ItemCollector {
@@ -1318,7 +1345,7 @@ struct ItemCollector<'tcx> {
// otherwise it collects items in some module.
crate_collector: bool,
tcx: TyCtxt<'tcx>,
- submodules: Vec<LocalDefId>,
+ submodules: Vec<OwnerId>,
items: Vec<ItemId>,
trait_items: Vec<TraitItemId>,
impl_items: Vec<ImplItemId>,
@@ -1350,14 +1377,14 @@ impl<'hir> Visitor<'hir> for ItemCollector<'hir> {
fn visit_item(&mut self, item: &'hir Item<'hir>) {
if associated_body(Node::Item(item)).is_some() {
- self.body_owners.push(item.def_id);
+ self.body_owners.push(item.owner_id.def_id);
}
self.items.push(item.item_id());
// Items that are modules are handled here instead of in visit_mod.
if let ItemKind::Mod(module) = &item.kind {
- self.submodules.push(item.def_id);
+ self.submodules.push(item.owner_id);
// A module collector does not recurse inside nested modules.
if self.crate_collector {
intravisit::walk_mod(self, module, item.hir_id());
@@ -1386,7 +1413,7 @@ impl<'hir> Visitor<'hir> for ItemCollector<'hir> {
fn visit_trait_item(&mut self, item: &'hir TraitItem<'hir>) {
if associated_body(Node::TraitItem(item)).is_some() {
- self.body_owners.push(item.def_id);
+ self.body_owners.push(item.owner_id.def_id);
}
self.trait_items.push(item.trait_item_id());
@@ -1395,7 +1422,7 @@ impl<'hir> Visitor<'hir> for ItemCollector<'hir> {
fn visit_impl_item(&mut self, item: &'hir ImplItem<'hir>) {
if associated_body(Node::ImplItem(item)).is_some() {
- self.body_owners.push(item.def_id);
+ self.body_owners.push(item.owner_id.def_id);
}
self.impl_items.push(item.impl_item_id());
diff --git a/compiler/rustc_middle/src/hir/mod.rs b/compiler/rustc_middle/src/hir/mod.rs
index 211a61471..1c6264ad0 100644
--- a/compiler/rustc_middle/src/hir/mod.rs
+++ b/compiler/rustc_middle/src/hir/mod.rs
@@ -39,7 +39,7 @@ impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Owner<'tcx> {
/// bodies. The Ids are in visitor order. This is used to partition a pass between modules.
#[derive(Debug, HashStable, Encodable, Decodable)]
pub struct ModuleItems {
- submodules: Box<[LocalDefId]>,
+ submodules: Box<[OwnerId]>,
items: Box<[ItemId]>,
trait_items: Box<[TraitItemId]>,
impl_items: Box<[ImplItemId]>,
@@ -67,10 +67,10 @@ impl ModuleItems {
pub fn definitions(&self) -> impl Iterator<Item = LocalDefId> + '_ {
self.items
.iter()
- .map(|id| id.def_id)
- .chain(self.trait_items.iter().map(|id| id.def_id))
- .chain(self.impl_items.iter().map(|id| id.def_id))
- .chain(self.foreign_items.iter().map(|id| id.def_id))
+ .map(|id| id.owner_id.def_id)
+ .chain(self.trait_items.iter().map(|id| id.owner_id.def_id))
+ .chain(self.impl_items.iter().map(|id| id.owner_id.def_id))
+ .chain(self.foreign_items.iter().map(|id| id.owner_id.def_id))
}
pub fn par_items(&self, f: impl Fn(ItemId) + Send + Sync) {
@@ -97,7 +97,7 @@ impl<'tcx> TyCtxt<'tcx> {
}
pub fn parent_module(self, id: HirId) -> LocalDefId {
- self.parent_module_from_def_id(id.owner)
+ self.parent_module_from_def_id(id.owner.def_id)
}
pub fn impl_subject(self, def_id: DefId) -> ImplSubject<'tcx> {
@@ -110,13 +110,13 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn provide(providers: &mut Providers) {
providers.parent_module_from_def_id = |tcx, id| {
let hir = tcx.hir();
- hir.get_module_parent_node(hir.local_def_id_to_hir_id(id))
+ hir.get_module_parent_node(hir.local_def_id_to_hir_id(id)).def_id
};
providers.hir_crate_items = map::hir_crate_items;
providers.crate_hash = map::crate_hash;
providers.hir_module_items = map::hir_module_items;
providers.hir_owner = |tcx, id| {
- let owner = tcx.hir_crate(()).owners.get(id)?.as_owner()?;
+ let owner = tcx.hir_crate(()).owners.get(id.def_id)?.as_owner()?;
let node = owner.node();
Some(Owner { node, hash_without_bodies: owner.nodes.hash_without_bodies })
};
@@ -128,21 +128,24 @@ pub fn provide(providers: &mut Providers) {
MaybeOwner::NonOwner(hir_id) => hir_id,
}
};
- providers.hir_owner_nodes = |tcx, id| tcx.hir_crate(()).owners[id].map(|i| &i.nodes);
+ providers.hir_owner_nodes = |tcx, id| tcx.hir_crate(()).owners[id.def_id].map(|i| &i.nodes);
providers.hir_owner_parent = |tcx, id| {
// Accessing the local_parent is ok since its value is hashed as part of `id`'s DefPathHash.
- tcx.opt_local_parent(id).map_or(CRATE_HIR_ID, |parent| {
+ tcx.opt_local_parent(id.def_id).map_or(CRATE_HIR_ID, |parent| {
let mut parent_hir_id = tcx.hir().local_def_id_to_hir_id(parent);
- if let Some(local_id) =
- tcx.hir_crate(()).owners[parent_hir_id.owner].unwrap().parenting.get(&id)
+ if let Some(local_id) = tcx.hir_crate(()).owners[parent_hir_id.owner.def_id]
+ .unwrap()
+ .parenting
+ .get(&id.def_id)
{
parent_hir_id.local_id = *local_id;
}
parent_hir_id
})
};
- providers.hir_attrs =
- |tcx, id| tcx.hir_crate(()).owners[id].as_owner().map_or(AttributeMap::EMPTY, |o| &o.attrs);
+ providers.hir_attrs = |tcx, id| {
+ tcx.hir_crate(()).owners[id.def_id].as_owner().map_or(AttributeMap::EMPTY, |o| &o.attrs)
+ };
providers.source_span =
|tcx, def_id| tcx.resolutions(()).source_span.get(def_id).copied().unwrap_or(DUMMY_SP);
providers.def_span = |tcx, def_id| {
@@ -177,6 +180,7 @@ pub fn provide(providers: &mut Providers) {
let id = id.expect_local();
tcx.resolutions(()).expn_that_defined.get(&id).copied().unwrap_or(ExpnId::root())
};
- providers.in_scope_traits_map =
- |tcx, id| tcx.hir_crate(()).owners[id].as_owner().map(|owner_info| &owner_info.trait_map);
+ providers.in_scope_traits_map = |tcx, id| {
+ tcx.hir_crate(()).owners[id.def_id].as_owner().map(|owner_info| &owner_info.trait_map)
+ };
}
diff --git a/compiler/rustc_middle/src/infer/unify_key.rs b/compiler/rustc_middle/src/infer/unify_key.rs
index f2627885d..41d8c7ffd 100644
--- a/compiler/rustc_middle/src/infer/unify_key.rs
+++ b/compiler/rustc_middle/src/infer/unify_key.rs
@@ -129,7 +129,7 @@ impl<'tcx> UnifyKey for ty::ConstVid<'tcx> {
}
impl<'tcx> UnifyValue for ConstVarValue<'tcx> {
- type Error = (ty::Const<'tcx>, ty::Const<'tcx>);
+ type Error = NoError;
fn unify_values(&value1: &Self, &value2: &Self) -> Result<Self, Self::Error> {
Ok(match (value1.val, value2.val) {
diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs
index 01b9277b9..a58cbc376 100644
--- a/compiler/rustc_middle/src/lib.rs
+++ b/compiler/rustc_middle/src/lib.rs
@@ -31,22 +31,19 @@
#![feature(discriminant_kind)]
#![feature(exhaustive_patterns)]
#![feature(get_mut_unchecked)]
-#![cfg_attr(bootstrap, feature(generic_associated_types))]
#![feature(if_let_guard)]
-#![feature(map_first_last)]
#![feature(negative_impls)]
#![feature(never_type)]
#![feature(extern_types)]
#![feature(new_uninit)]
#![feature(once_cell)]
#![feature(let_chains)]
-#![cfg_attr(bootstrap, feature(let_else))]
#![feature(min_specialization)]
#![feature(trusted_len)]
#![feature(type_alias_impl_trait)]
#![feature(associated_type_bounds)]
#![feature(rustc_attrs)]
-#![feature(half_open_range_patterns)]
+#![cfg_attr(bootstrap, feature(half_open_range_patterns))]
#![feature(control_flow_enum)]
#![feature(associated_type_defaults)]
#![feature(trusted_step)]
@@ -58,6 +55,7 @@
#![feature(drain_filter)]
#![feature(intra_doc_pointers)]
#![feature(yeet_expr)]
+#![feature(result_option_inspect)]
#![feature(const_option)]
#![recursion_limit = "512"]
#![allow(rustc::potential_query_instability)]
diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs
index 2f45222de..79522bd0b 100644
--- a/compiler/rustc_middle/src/lint.rs
+++ b/compiler/rustc_middle/src/lint.rs
@@ -1,20 +1,20 @@
use std::cmp;
use rustc_data_structures::fx::FxHashMap;
-use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
-use rustc_errors::{Diagnostic, DiagnosticId, LintDiagnosticBuilder, MultiSpan};
-use rustc_hir::HirId;
-use rustc_index::vec::IndexVec;
-use rustc_query_system::ich::StableHashingContext;
+use rustc_data_structures::sorted_map::SortedMap;
+use rustc_errors::{Diagnostic, DiagnosticBuilder, DiagnosticId, DiagnosticMessage, MultiSpan};
+use rustc_hir::{HirId, ItemLocalId};
use rustc_session::lint::{
builtin::{self, FORBIDDEN_LINT_GROUPS},
- FutureIncompatibilityReason, Level, Lint, LintExpectationId, LintId,
+ FutureIncompatibilityReason, Level, Lint, LintId,
};
use rustc_session::Session;
use rustc_span::hygiene::MacroKind;
use rustc_span::source_map::{DesugaringKind, ExpnKind};
use rustc_span::{symbol, Span, Symbol, DUMMY_SP};
+use crate::ty::TyCtxt;
+
/// How a lint level was set.
#[derive(Clone, Copy, PartialEq, Eq, HashStable, Debug)]
pub enum LintLevelSource {
@@ -23,7 +23,12 @@ pub enum LintLevelSource {
Default,
/// Lint level was set by an attribute.
- Node(Symbol, Span, Option<Symbol> /* RFC 2383 reason */),
+ Node {
+ name: Symbol,
+ span: Span,
+ /// RFC 2383 reason
+ reason: Option<Symbol>,
+ },
/// Lint level was set by a command-line flag.
/// The provided `Level` is the level specified on the command line.
@@ -35,7 +40,7 @@ impl LintLevelSource {
pub fn name(&self) -> Symbol {
match *self {
LintLevelSource::Default => symbol::kw::Default,
- LintLevelSource::Node(name, _, _) => name,
+ LintLevelSource::Node { name, .. } => name,
LintLevelSource::CommandLine(name, _) => name,
}
}
@@ -43,7 +48,7 @@ impl LintLevelSource {
pub fn span(&self) -> Span {
match *self {
LintLevelSource::Default => DUMMY_SP,
- LintLevelSource::Node(_, span, _) => span,
+ LintLevelSource::Node { span, .. } => span,
LintLevelSource::CommandLine(_, _) => DUMMY_SP,
}
}
@@ -52,145 +57,137 @@ impl LintLevelSource {
/// A tuple of a lint level and its source.
pub type LevelAndSource = (Level, LintLevelSource);
-#[derive(Debug, HashStable)]
-pub struct LintLevelSets {
- pub list: IndexVec<LintStackIndex, LintSet>,
- pub lint_cap: Level,
-}
-
-rustc_index::newtype_index! {
- #[derive(HashStable)]
- pub struct LintStackIndex {
- const COMMAND_LINE = 0,
- }
-}
-
-#[derive(Debug, HashStable)]
-pub struct LintSet {
- // -A,-W,-D flags, a `Symbol` for the flag itself and `Level` for which
- // flag.
- pub specs: FxHashMap<LintId, LevelAndSource>,
-
- pub parent: LintStackIndex,
+/// Return type for the `shallow_lint_levels_on` query.
+///
+/// This map represents the set of allowed lints and allowance levels given
+/// by the attributes for *a single HirId*.
+#[derive(Default, Debug, HashStable)]
+pub struct ShallowLintLevelMap {
+ pub specs: SortedMap<ItemLocalId, FxHashMap<LintId, LevelAndSource>>,
}
-impl LintLevelSets {
- pub fn new() -> Self {
- LintLevelSets { list: IndexVec::new(), lint_cap: Level::Forbid }
- }
-
- pub fn get_lint_level(
- &self,
- lint: &'static Lint,
- idx: LintStackIndex,
- aux: Option<&FxHashMap<LintId, LevelAndSource>>,
- sess: &Session,
- ) -> LevelAndSource {
- let (level, mut src) = self.get_lint_id_level(LintId::of(lint), idx, aux);
-
- // If `level` is none then we actually assume the default level for this
- // lint.
- let mut level = level.unwrap_or_else(|| lint.default_level(sess.edition()));
-
- // If we're about to issue a warning, check at the last minute for any
- // directives against the warnings "lint". If, for example, there's an
- // `allow(warnings)` in scope then we want to respect that instead.
- //
- // We exempt `FORBIDDEN_LINT_GROUPS` from this because it specifically
- // triggers in cases (like #80988) where you have `forbid(warnings)`,
- // and so if we turned that into an error, it'd defeat the purpose of the
- // future compatibility warning.
- if level == Level::Warn && LintId::of(lint) != LintId::of(FORBIDDEN_LINT_GROUPS) {
- let (warnings_level, warnings_src) =
- self.get_lint_id_level(LintId::of(builtin::WARNINGS), idx, aux);
- if let Some(configured_warning_level) = warnings_level {
- if configured_warning_level != Level::Warn {
- level = configured_warning_level;
- src = warnings_src;
- }
+/// From an initial level and source, verify the effect of special annotations:
+/// `warnings` lint level and lint caps.
+///
+/// The return of this function is suitable for diagnostics.
+pub fn reveal_actual_level(
+ level: Option<Level>,
+ src: &mut LintLevelSource,
+ sess: &Session,
+ lint: LintId,
+ probe_for_lint_level: impl FnOnce(LintId) -> (Option<Level>, LintLevelSource),
+) -> Level {
+ // If `level` is none then we actually assume the default level for this lint.
+ let mut level = level.unwrap_or_else(|| lint.lint.default_level(sess.edition()));
+
+ // If we're about to issue a warning, check at the last minute for any
+ // directives against the warnings "lint". If, for example, there's an
+ // `allow(warnings)` in scope then we want to respect that instead.
+ //
+ // We exempt `FORBIDDEN_LINT_GROUPS` from this because it specifically
+ // triggers in cases (like #80988) where you have `forbid(warnings)`,
+ // and so if we turned that into an error, it'd defeat the purpose of the
+ // future compatibility warning.
+ if level == Level::Warn && lint != LintId::of(FORBIDDEN_LINT_GROUPS) {
+ let (warnings_level, warnings_src) = probe_for_lint_level(LintId::of(builtin::WARNINGS));
+ if let Some(configured_warning_level) = warnings_level {
+ if configured_warning_level != Level::Warn {
+ level = configured_warning_level;
+ *src = warnings_src;
}
}
+ }
- // Ensure that we never exceed the `--cap-lints` argument
- // unless the source is a --force-warn
- level = if let LintLevelSource::CommandLine(_, Level::ForceWarn(_)) = src {
- level
- } else {
- cmp::min(level, self.lint_cap)
- };
-
- if let Some(driver_level) = sess.driver_lint_caps.get(&LintId::of(lint)) {
- // Ensure that we never exceed driver level.
- level = cmp::min(*driver_level, level);
- }
+ // Ensure that we never exceed the `--cap-lints` argument unless the source is a --force-warn
+ level = if let LintLevelSource::CommandLine(_, Level::ForceWarn(_)) = src {
+ level
+ } else {
+ cmp::min(level, sess.opts.lint_cap.unwrap_or(Level::Forbid))
+ };
- (level, src)
+ if let Some(driver_level) = sess.driver_lint_caps.get(&lint) {
+ // Ensure that we never exceed driver level.
+ level = cmp::min(*driver_level, level);
}
- pub fn get_lint_id_level(
+ level
+}
+
+impl ShallowLintLevelMap {
+ /// Perform a deep probe in the HIR tree looking for the actual level for the lint.
+ /// This lint level is not usable for diagnostics, it needs to be corrected by
+ /// `reveal_actual_level` beforehand.
+ #[instrument(level = "trace", skip(self, tcx), ret)]
+ fn probe_for_lint_level(
&self,
+ tcx: TyCtxt<'_>,
id: LintId,
- mut idx: LintStackIndex,
- aux: Option<&FxHashMap<LintId, LevelAndSource>>,
+ start: HirId,
) -> (Option<Level>, LintLevelSource) {
- if let Some(specs) = aux {
- if let Some(&(level, src)) = specs.get(&id) {
- return (Some(level), src);
- }
+ if let Some(map) = self.specs.get(&start.local_id)
+ && let Some(&(level, src)) = map.get(&id)
+ {
+ return (Some(level), src);
}
- loop {
- let LintSet { ref specs, parent } = self.list[idx];
- if let Some(&(level, src)) = specs.get(&id) {
- return (Some(level), src);
+
+ let mut owner = start.owner;
+ let mut specs = &self.specs;
+
+ for parent in tcx.hir().parent_id_iter(start) {
+ if parent.owner != owner {
+ owner = parent.owner;
+ specs = &tcx.shallow_lint_levels_on(owner).specs;
}
- if idx == COMMAND_LINE {
- return (None, LintLevelSource::Default);
+ if let Some(map) = specs.get(&parent.local_id)
+ && let Some(&(level, src)) = map.get(&id)
+ {
+ return (Some(level), src);
}
- idx = parent;
}
- }
-}
-#[derive(Debug)]
-pub struct LintLevelMap {
- /// This is a collection of lint expectations as described in RFC 2383, that
- /// can be fulfilled during this compilation session. This means that at least
- /// one expected lint is currently registered in the lint store.
- ///
- /// The [`LintExpectationId`] is stored as a part of the [`Expect`](Level::Expect)
- /// lint level.
- pub lint_expectations: Vec<(LintExpectationId, LintExpectation)>,
- pub sets: LintLevelSets,
- pub id_to_set: FxHashMap<HirId, LintStackIndex>,
-}
+ (None, LintLevelSource::Default)
+ }
-impl LintLevelMap {
- /// If the `id` was previously registered with `register_id` when building
- /// this `LintLevelMap` this returns the corresponding lint level and source
- /// of the lint level for the lint provided.
- ///
- /// If the `id` was not previously registered, returns `None`. If `None` is
- /// returned then the parent of `id` should be acquired and this function
- /// should be called again.
- pub fn level_and_source(
+ /// Fetch and return the user-visible lint level for the given lint at the given HirId.
+ #[instrument(level = "trace", skip(self, tcx), ret)]
+ pub fn lint_level_id_at_node(
&self,
- lint: &'static Lint,
- id: HirId,
- session: &Session,
- ) -> Option<LevelAndSource> {
- self.id_to_set.get(&id).map(|idx| self.sets.get_lint_level(lint, *idx, None, session))
+ tcx: TyCtxt<'_>,
+ lint: LintId,
+ cur: HirId,
+ ) -> (Level, LintLevelSource) {
+ let (level, mut src) = self.probe_for_lint_level(tcx, lint, cur);
+ let level = reveal_actual_level(level, &mut src, tcx.sess, lint, |lint| {
+ self.probe_for_lint_level(tcx, lint, cur)
+ });
+ (level, src)
}
}
-impl<'a> HashStable<StableHashingContext<'a>> for LintLevelMap {
- #[inline]
- fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
- let LintLevelMap { ref sets, ref id_to_set, ref lint_expectations } = *self;
+impl TyCtxt<'_> {
+ /// Fetch and return the user-visible lint level for the given lint at the given HirId.
+ pub fn lint_level_at_node(self, lint: &'static Lint, id: HirId) -> (Level, LintLevelSource) {
+ self.shallow_lint_levels_on(id.owner).lint_level_id_at_node(self, LintId::of(lint), id)
+ }
- id_to_set.hash_stable(hcx, hasher);
- lint_expectations.hash_stable(hcx, hasher);
+ /// Walks upwards from `id` to find a node which might change lint levels with attributes.
+ /// It stops at `bound` and just returns it if reached.
+ pub fn maybe_lint_level_root_bounded(self, mut id: HirId, bound: HirId) -> HirId {
+ let hir = self.hir();
+ loop {
+ if id == bound {
+ return bound;
+ }
- hcx.while_hashing_spans(true, |hcx| sets.hash_stable(hcx, hasher))
+ if hir.attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) {
+ return id;
+ }
+ let next = hir.get_parent_node(id);
+ if next == id {
+ bug!("lint traversal reached the root of the crate");
+ }
+ id = next;
+ }
}
}
@@ -261,11 +258,11 @@ pub fn explain_lint_level_source(
));
}
}
- LintLevelSource::Node(lint_attr_name, src, reason) => {
+ LintLevelSource::Node { name: lint_attr_name, span, reason, .. } => {
if let Some(rationale) = reason {
err.note(rationale.as_str());
}
- err.span_note_once(src, "the lint level is defined here");
+ err.span_note_once(span, "the lint level is defined here");
if lint_attr_name.as_str() != name {
let level_str = level.as_str();
err.note_once(&format!(
@@ -277,23 +274,65 @@ pub fn explain_lint_level_source(
}
}
-pub fn struct_lint_level<'s, 'd>(
- sess: &'s Session,
+/// The innermost function for emitting lints.
+///
+/// If you are loocking to implement a lint, look for higher level functions,
+/// for example:
+/// - [`TyCtxt::emit_spanned_lint`]
+/// - [`TyCtxt::struct_span_lint_hir`]
+/// - [`TyCtxt::emit_lint`]
+/// - [`TyCtxt::struct_lint_node`]
+/// - `LintContext::lookup`
+///
+/// ## `decorate` signature
+///
+/// The return value of `decorate` is ignored by this function. So what is the
+/// point of returning `&'b mut DiagnosticBuilder<'a, ()>`?
+///
+/// There are 2 reasons for this signature.
+///
+/// First of all, it prevents accidental use of `.emit()` -- it's clear that the
+/// builder will be later used and shouldn't be emitted right away (this is
+/// especially important because the old API expected you to call `.emit()` in
+/// the closure).
+///
+/// Second of all, it makes the most common case of adding just a single label
+/// /suggestion much nicer, since [`DiagnosticBuilder`] methods return
+/// `&mut DiagnosticBuilder`, you can just chain methods, without needed
+/// awkward `{ ...; }`:
+/// ```ignore pseudo-code
+/// struct_lint_level(
+/// ...,
+/// |lint| lint.span_label(sp, "lbl")
+/// // ^^^^^^^^^^^^^^^^^^^^^ returns `&mut DiagnosticBuilder` by default
+/// )
+/// ```
+pub fn struct_lint_level(
+ sess: &Session,
lint: &'static Lint,
level: Level,
src: LintLevelSource,
span: Option<MultiSpan>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>) + 'd,
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
) {
// Avoid codegen bloat from monomorphization by immediately doing dyn dispatch of `decorate` to
// the "real" work.
- fn struct_lint_level_impl<'s, 'd>(
- sess: &'s Session,
+ fn struct_lint_level_impl(
+ sess: &Session,
lint: &'static Lint,
level: Level,
src: LintLevelSource,
span: Option<MultiSpan>,
- decorate: Box<dyn for<'b> FnOnce(LintDiagnosticBuilder<'b, ()>) + 'd>,
+ msg: impl Into<DiagnosticMessage>,
+ decorate: Box<
+ dyn '_
+ + for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
+ >,
) {
// Check for future incompatibility lints and issue a stronger warning.
let future_incompatible = lint.future_incompatible;
@@ -344,6 +383,8 @@ pub fn struct_lint_level<'s, 'd>(
(Level::Deny | Level::Forbid, None) => sess.diagnostic().struct_err_lint(""),
};
+ err.set_is_lint();
+
// If this code originates in a foreign macro, aka something that this crate
// did not itself author, then it's likely that there's nothing this crate
// can do about it. We probably want to skip the lint entirely.
@@ -366,6 +407,10 @@ pub fn struct_lint_level<'s, 'd>(
}
}
+ // Delay evaluating and setting the primary message until after we've
+ // suppressed the lint due to macros.
+ err.set_primary_message(msg);
+
// Lint diagnostics that are covered by the expect level will not be emitted outside
// the compiler. It is therefore not necessary to add any information for the user.
// This will therefore directly call the decorate function which will in turn emit
@@ -373,12 +418,12 @@ pub fn struct_lint_level<'s, 'd>(
if let Level::Expect(_) = level {
let name = lint.name_lower();
err.code(DiagnosticId::Lint { name, has_future_breakage, is_force_warn: false });
- decorate(LintDiagnosticBuilder::new(err));
+
+ decorate(&mut err);
+ err.emit();
return;
}
- explain_lint_level_source(lint, level, src, &mut err);
-
let name = lint.name_lower();
let is_force_warn = matches!(level, Level::ForceWarn(_));
err.code(DiagnosticId::Lint { name, has_future_breakage, is_force_warn });
@@ -417,10 +462,12 @@ pub fn struct_lint_level<'s, 'd>(
}
}
- // Finally, run `decorate`. This function is also responsible for emitting the diagnostic.
- decorate(LintDiagnosticBuilder::new(err));
+ // Finally, run `decorate`.
+ decorate(&mut err);
+ explain_lint_level_source(lint, level, src, &mut *err);
+ err.emit()
}
- struct_lint_level_impl(sess, lint, level, src, span, Box::new(decorate))
+ struct_lint_level_impl(sess, lint, level, src, span, msg, Box::new(decorate))
}
/// Returns whether `span` originates in a foreign crate's external macro.
@@ -432,7 +479,9 @@ pub fn in_external_macro(sess: &Session, span: Span) -> bool {
match expn_data.kind {
ExpnKind::Inlined
| ExpnKind::Root
- | ExpnKind::Desugaring(DesugaringKind::ForLoop | DesugaringKind::WhileLoop) => false,
+ | ExpnKind::Desugaring(
+ DesugaringKind::ForLoop | DesugaringKind::WhileLoop | DesugaringKind::OpaqueTy,
+ ) => false,
ExpnKind::AstPass(_) | ExpnKind::Desugaring(_) => true, // well, it's "external"
ExpnKind::Macro(MacroKind::Bang, _) => {
// Dummy span for the `def_site` means it's an external macro.
diff --git a/compiler/rustc_middle/src/macros.rs b/compiler/rustc_middle/src/macros.rs
index 0e85c60a3..01fe72de6 100644
--- a/compiler/rustc_middle/src/macros.rs
+++ b/compiler/rustc_middle/src/macros.rs
@@ -54,13 +54,22 @@ macro_rules! TrivialTypeTraversalImpls {
impl<$tcx> $crate::ty::fold::TypeFoldable<$tcx> for $ty {
fn try_fold_with<F: $crate::ty::fold::FallibleTypeFolder<$tcx>>(
self,
- _: &mut F
- ) -> ::std::result::Result<$ty, F::Error> {
+ _: &mut F,
+ ) -> ::std::result::Result<Self, F::Error> {
Ok(self)
}
+
+ #[inline]
+ fn fold_with<F: $crate::ty::fold::TypeFolder<$tcx>>(
+ self,
+ _: &mut F,
+ ) -> Self {
+ self
+ }
}
impl<$tcx> $crate::ty::visit::TypeVisitable<$tcx> for $ty {
+ #[inline]
fn visit_with<F: $crate::ty::visit::TypeVisitor<$tcx>>(
&self,
_: &mut F)
diff --git a/compiler/rustc_middle/src/middle/limits.rs b/compiler/rustc_middle/src/middle/limits.rs
index 53c4d9267..12aef66bc 100644
--- a/compiler/rustc_middle/src/middle/limits.rs
+++ b/compiler/rustc_middle/src/middle/limits.rs
@@ -38,7 +38,7 @@ pub fn provide(providers: &mut ty::query::Providers) {
tcx.hir().krate_attrs(),
tcx.sess,
sym::const_eval_limit,
- 1_000_000,
+ 2_000_000,
),
}
}
diff --git a/compiler/rustc_middle/src/middle/privacy.rs b/compiler/rustc_middle/src/middle/privacy.rs
index 751c7f464..9c68c7504 100644
--- a/compiler/rustc_middle/src/middle/privacy.rs
+++ b/compiler/rustc_middle/src/middle/privacy.rs
@@ -1,64 +1,218 @@
//! A pass that checks to make sure private fields and methods aren't used
//! outside their scopes. This pass will also generate a set of exported items
//! which are available for use externally when compiled as a library.
-
+use crate::ty::{DefIdTree, Visibility};
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_macros::HashStable;
use rustc_query_system::ich::StableHashingContext;
-use rustc_span::def_id::LocalDefId;
+use rustc_span::def_id::{DefId, LocalDefId};
use std::hash::Hash;
-/// Represents the levels of accessibility an item can have.
+/// Represents the levels of effective visibility an item can have.
///
-/// The variants are sorted in ascending order of accessibility.
+/// The variants are sorted in ascending order of directness.
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, HashStable)]
-pub enum AccessLevel {
- /// Superset of `AccessLevel::Reachable` used to mark impl Trait items.
- ReachableFromImplTrait,
- /// Exported items + items participating in various kinds of public interfaces,
- /// but not directly nameable. For example, if function `fn f() -> T {...}` is
- /// public, then type `T` is reachable. Its values can be obtained by other crates
- /// even if the type itself is not nameable.
+pub enum Level {
+ /// Superset of `Reachable` including items leaked through return position `impl Trait`.
+ ReachableThroughImplTrait,
+ /// Item is either reexported, or leaked through any kind of interface.
+ /// For example, if function `fn f() -> T {...}` is directly public, then type `T` is publicly
+ /// reachable and its values can be obtained by other crates even if the type itself is not
+ /// nameable.
Reachable,
- /// Public items + items accessible to other crates with the help of `pub use` re-exports.
- Exported,
- /// Items accessible to other crates directly, without the help of re-exports.
- Public,
+ /// Item is accessible either directly, or with help of `use` reexports.
+ Reexported,
+ /// Item is directly accessible, without help of reexports.
+ Direct,
+}
+
+impl Level {
+ pub fn all_levels() -> [Level; 4] {
+ [Level::Direct, Level::Reexported, Level::Reachable, Level::ReachableThroughImplTrait]
+ }
+}
+
+#[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable)]
+pub struct EffectiveVisibility {
+ direct: Visibility,
+ reexported: Visibility,
+ reachable: Visibility,
+ reachable_through_impl_trait: Visibility,
+}
+
+impl EffectiveVisibility {
+ pub fn at_level(&self, level: Level) -> &Visibility {
+ match level {
+ Level::Direct => &self.direct,
+ Level::Reexported => &self.reexported,
+ Level::Reachable => &self.reachable,
+ Level::ReachableThroughImplTrait => &self.reachable_through_impl_trait,
+ }
+ }
+
+ fn at_level_mut(&mut self, level: Level) -> &mut Visibility {
+ match level {
+ Level::Direct => &mut self.direct,
+ Level::Reexported => &mut self.reexported,
+ Level::Reachable => &mut self.reachable,
+ Level::ReachableThroughImplTrait => &mut self.reachable_through_impl_trait,
+ }
+ }
+
+ pub fn is_public_at_level(&self, level: Level) -> bool {
+ self.at_level(level).is_public()
+ }
+
+ pub fn from_vis(vis: Visibility) -> EffectiveVisibility {
+ EffectiveVisibility {
+ direct: vis,
+ reexported: vis,
+ reachable: vis,
+ reachable_through_impl_trait: vis,
+ }
+ }
}
-/// Holds a map of accessibility levels for reachable HIR nodes.
+/// Holds a map of effective visibilities for reachable HIR nodes.
#[derive(Debug, Clone)]
-pub struct AccessLevels<Id = LocalDefId> {
- pub map: FxHashMap<Id, AccessLevel>,
+pub struct EffectiveVisibilities<Id = LocalDefId> {
+ map: FxHashMap<Id, EffectiveVisibility>,
}
-impl<Id: Hash + Eq> AccessLevels<Id> {
- /// See `AccessLevel::Reachable`.
+impl<Id: Hash + Eq + Copy> EffectiveVisibilities<Id> {
+ pub fn is_public_at_level(&self, id: Id, level: Level) -> bool {
+ self.effective_vis(id)
+ .map_or(false, |effective_vis| effective_vis.is_public_at_level(level))
+ }
+
+ /// See `Level::Reachable`.
pub fn is_reachable(&self, id: Id) -> bool {
- self.map.get(&id) >= Some(&AccessLevel::Reachable)
+ self.is_public_at_level(id, Level::Reachable)
}
- /// See `AccessLevel::Exported`.
+ /// See `Level::Reexported`.
pub fn is_exported(&self, id: Id) -> bool {
- self.map.get(&id) >= Some(&AccessLevel::Exported)
+ self.is_public_at_level(id, Level::Reexported)
+ }
+
+ /// See `Level::Direct`.
+ pub fn is_directly_public(&self, id: Id) -> bool {
+ self.is_public_at_level(id, Level::Direct)
+ }
+
+ pub fn public_at_level(&self, id: Id) -> Option<Level> {
+ self.effective_vis(id).and_then(|effective_vis| {
+ for level in Level::all_levels() {
+ if effective_vis.is_public_at_level(level) {
+ return Some(level);
+ }
+ }
+ None
+ })
+ }
+
+ pub fn effective_vis(&self, id: Id) -> Option<&EffectiveVisibility> {
+ self.map.get(&id)
}
- /// See `AccessLevel::Public`.
- pub fn is_public(&self, id: Id) -> bool {
- self.map.get(&id) >= Some(&AccessLevel::Public)
+ pub fn iter(&self) -> impl Iterator<Item = (&Id, &EffectiveVisibility)> {
+ self.map.iter()
+ }
+
+ pub fn map_id<OutId: Hash + Eq + Copy>(
+ &self,
+ f: impl Fn(Id) -> OutId,
+ ) -> EffectiveVisibilities<OutId> {
+ EffectiveVisibilities { map: self.map.iter().map(|(k, v)| (f(*k), *v)).collect() }
+ }
+
+ pub fn set_public_at_level(
+ &mut self,
+ id: Id,
+ default_vis: impl FnOnce() -> Visibility,
+ level: Level,
+ ) {
+ let mut effective_vis = self
+ .effective_vis(id)
+ .copied()
+ .unwrap_or_else(|| EffectiveVisibility::from_vis(default_vis()));
+ for l in Level::all_levels() {
+ if l <= level {
+ *effective_vis.at_level_mut(l) = Visibility::Public;
+ }
+ }
+ self.map.insert(id, effective_vis);
+ }
+}
+
+impl<Id: Hash + Eq + Copy + Into<DefId>> EffectiveVisibilities<Id> {
+ // `parent_id` is not necessarily a parent in source code tree,
+ // it is the node from which the maximum effective visibility is inherited.
+ pub fn update(
+ &mut self,
+ id: Id,
+ nominal_vis: Visibility,
+ default_vis: impl FnOnce() -> Visibility,
+ parent_id: Id,
+ level: Level,
+ tree: impl DefIdTree,
+ ) -> bool {
+ let mut changed = false;
+ let mut current_effective_vis = self.effective_vis(id).copied().unwrap_or_else(|| {
+ if id.into().is_crate_root() {
+ EffectiveVisibility::from_vis(Visibility::Public)
+ } else {
+ EffectiveVisibility::from_vis(default_vis())
+ }
+ });
+ if let Some(inherited_effective_vis) = self.effective_vis(parent_id) {
+ let mut inherited_effective_vis_at_prev_level =
+ *inherited_effective_vis.at_level(level);
+ let mut calculated_effective_vis = inherited_effective_vis_at_prev_level;
+ for l in Level::all_levels() {
+ if level >= l {
+ let inherited_effective_vis_at_level = *inherited_effective_vis.at_level(l);
+ let current_effective_vis_at_level = current_effective_vis.at_level_mut(l);
+ // effective visibility for id shouldn't be recalculated if
+ // inherited from parent_id effective visibility isn't changed at next level
+ if !(inherited_effective_vis_at_prev_level == inherited_effective_vis_at_level
+ && level != l)
+ {
+ calculated_effective_vis =
+ if nominal_vis.is_at_least(inherited_effective_vis_at_level, tree) {
+ inherited_effective_vis_at_level
+ } else {
+ nominal_vis
+ };
+ }
+ // effective visibility can't be decreased at next update call for the
+ // same id
+ if *current_effective_vis_at_level != calculated_effective_vis
+ && calculated_effective_vis
+ .is_at_least(*current_effective_vis_at_level, tree)
+ {
+ changed = true;
+ *current_effective_vis_at_level = calculated_effective_vis;
+ }
+ inherited_effective_vis_at_prev_level = inherited_effective_vis_at_level;
+ }
+ }
+ }
+ self.map.insert(id, current_effective_vis);
+ changed
}
}
-impl<Id> Default for AccessLevels<Id> {
+impl<Id> Default for EffectiveVisibilities<Id> {
fn default() -> Self {
- AccessLevels { map: Default::default() }
+ EffectiveVisibilities { map: Default::default() }
}
}
-impl<'a> HashStable<StableHashingContext<'a>> for AccessLevels {
+impl<'a> HashStable<StableHashingContext<'a>> for EffectiveVisibilities {
fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) {
- let AccessLevels { ref map } = *self;
+ let EffectiveVisibilities { ref map } = *self;
map.hash_stable(hcx, hasher);
}
}
diff --git a/compiler/rustc_middle/src/middle/resolve_lifetime.rs b/compiler/rustc_middle/src/middle/resolve_lifetime.rs
index a171f5711..c3bf1c717 100644
--- a/compiler/rustc_middle/src/middle/resolve_lifetime.rs
+++ b/compiler/rustc_middle/src/middle/resolve_lifetime.rs
@@ -2,9 +2,9 @@
use crate::ty;
-use rustc_data_structures::fx::{FxHashMap, FxHashSet};
-use rustc_hir::def_id::{DefId, LocalDefId};
-use rustc_hir::ItemLocalId;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_hir::def_id::DefId;
+use rustc_hir::{ItemLocalId, OwnerId};
use rustc_macros::HashStable;
#[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, Debug, HashStable)]
@@ -49,12 +49,7 @@ pub enum ObjectLifetimeDefault {
pub struct ResolveLifetimes {
/// Maps from every use of a named (not anonymous) lifetime to a
/// `Region` describing how that region is bound
- pub defs: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Region>>,
+ pub defs: FxHashMap<OwnerId, FxHashMap<ItemLocalId, Region>>,
- /// Set of lifetime def ids that are late-bound; a region can
- /// be late-bound if (a) it does NOT appear in a where-clause and
- /// (b) it DOES appear in the arguments.
- pub late_bound: FxHashMap<LocalDefId, FxHashSet<LocalDefId>>,
-
- pub late_bound_vars: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>>,
+ pub late_bound_vars: FxHashMap<OwnerId, FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>>,
}
diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs
index d182929c4..61bc089e4 100644
--- a/compiler/rustc_middle/src/middle/stability.rs
+++ b/compiler/rustc_middle/src/middle/stability.rs
@@ -253,13 +253,12 @@ fn late_report_deprecation(
return;
}
let method_span = method_span.unwrap_or(span);
- tcx.struct_span_lint_hir(lint, hir_id, method_span, |lint| {
- let mut diag = lint.build(message);
+ tcx.struct_span_lint_hir(lint, hir_id, method_span, message, |diag| {
if let hir::Node::Expr(_) = tcx.hir().get(hir_id) {
let kind = tcx.def_kind(def_id).descr(def_id);
- deprecation_suggestion(&mut diag, kind, suggestion, method_span);
+ deprecation_suggestion(diag, kind, suggestion, method_span);
}
- diag.emit();
+ diag
});
}
@@ -621,9 +620,7 @@ impl<'tcx> TyCtxt<'tcx> {
unmarked: impl FnOnce(Span, DefId),
) -> bool {
let soft_handler = |lint, span, msg: &_| {
- self.struct_span_lint_hir(lint, id.unwrap_or(hir::CRATE_HIR_ID), span, |lint| {
- lint.build(msg).emit();
- })
+ self.struct_span_lint_hir(lint, id.unwrap_or(hir::CRATE_HIR_ID), span, msg, |lint| lint)
};
let eval_result =
self.eval_stability_allow_unstable(def_id, id, span, method_span, allow_unstable);
diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs
index e4039cc7c..b5a50cc15 100644
--- a/compiler/rustc_middle/src/mir/interpret/error.rs
+++ b/compiler/rustc_middle/src/mir/interpret/error.rs
@@ -479,12 +479,7 @@ impl<T: Any> AsAny for T {
}
/// A trait for machine-specific errors (or other "machine stop" conditions).
-pub trait MachineStopType: AsAny + fmt::Display + Send {
- /// If `true`, emit a hard error instead of going through the `CONST_ERR` lint
- fn is_hard_err(&self) -> bool {
- false
- }
-}
+pub trait MachineStopType: AsAny + fmt::Display + Send {}
impl dyn MachineStopType {
#[inline(always)]
@@ -543,16 +538,4 @@ impl InterpError<'_> {
| InterpError::UndefinedBehavior(UndefinedBehaviorInfo::Ub(_))
)
}
-
- /// Should this error be reported as a hard error, preventing compilation, or a soft error,
- /// causing a deny-by-default lint?
- pub fn is_hard_err(&self) -> bool {
- use InterpError::*;
- match *self {
- MachineStop(ref err) => err.is_hard_err(),
- UndefinedBehavior(_) => true,
- ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted) => true,
- _ => false,
- }
- }
}
diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs
index 95e52e391..23c2ce647 100644
--- a/compiler/rustc_middle/src/mir/interpret/pointer.rs
+++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs
@@ -43,7 +43,7 @@ pub trait PointerArithmetic: HasDataLayout {
let val = val as i64;
// Now wrap-around into the machine_isize range.
if val > self.machine_isize_max() {
- // This can only happen the the ptr size is < 64, so we know max_usize_plus_1 fits into
+ // This can only happen if the ptr size is < 64, so we know max_usize_plus_1 fits into
// i64.
debug_assert!(self.pointer_size().bits() < 64);
let max_usize_plus_1 = 1u128 << self.pointer_size().bits();
diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs
index 4207988d7..473894ac1 100644
--- a/compiler/rustc_middle/src/mir/interpret/queries.rs
+++ b/compiler/rustc_middle/src/mir/interpret/queries.rs
@@ -4,7 +4,9 @@ use crate::mir;
use crate::ty::subst::InternalSubsts;
use crate::ty::visit::TypeVisitable;
use crate::ty::{self, query::TyCtxtAt, query::TyCtxtEnsure, TyCtxt};
+use rustc_hir::def::DefKind;
use rustc_hir::def_id::DefId;
+use rustc_session::lint;
use rustc_span::{Span, DUMMY_SP};
impl<'tcx> TyCtxt<'tcx> {
@@ -36,7 +38,7 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn const_eval_resolve(
self,
param_env: ty::ParamEnv<'tcx>,
- ct: ty::Unevaluated<'tcx>,
+ ct: mir::UnevaluatedConst<'tcx>,
span: Option<Span>,
) -> EvalToConstValueResult<'tcx> {
// Cannot resolve `Unevaluated` constants that contain inference
@@ -45,11 +47,15 @@ impl<'tcx> TyCtxt<'tcx> {
//
// When trying to evaluate constants containing inference variables,
// use `Infcx::const_eval_resolve` instead.
- if ct.substs.has_infer_types_or_consts() {
+ if ct.substs.has_non_region_infer() {
bug!("did not expect inference variables here");
}
- match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) {
+ match ty::Instance::resolve_opt_const_arg(
+ self, param_env,
+ // FIXME: maybe have a separate version for resolving mir::UnevaluatedConst?
+ ct.def, ct.substs,
+ ) {
Ok(Some(instance)) => {
let cid = GlobalId { instance, promoted: ct.promoted };
self.const_eval_global_id(param_env, cid, span)
@@ -63,7 +69,7 @@ impl<'tcx> TyCtxt<'tcx> {
pub fn const_eval_resolve_for_typeck(
self,
param_env: ty::ParamEnv<'tcx>,
- ct: ty::Unevaluated<'tcx, ()>,
+ ct: ty::UnevaluatedConst<'tcx>,
span: Option<Span>,
) -> EvalToValTreeResult<'tcx> {
// Cannot resolve `Unevaluated` constants that contain inference
@@ -72,14 +78,36 @@ impl<'tcx> TyCtxt<'tcx> {
//
// When trying to evaluate constants containing inference variables,
// use `Infcx::const_eval_resolve` instead.
- if ct.substs.has_infer_types_or_consts() {
+ if ct.substs.has_non_region_infer() {
bug!("did not expect inference variables here");
}
match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) {
Ok(Some(instance)) => {
let cid = GlobalId { instance, promoted: None };
- self.const_eval_global_id_for_typeck(param_env, cid, span)
+ self.const_eval_global_id_for_typeck(param_env, cid, span).inspect(|_| {
+ // We are emitting the lint here instead of in `is_const_evaluatable`
+ // as we normalize obligations before checking them, and normalization
+ // uses this function to evaluate this constant.
+ //
+ // @lcnr believes that successfully evaluating even though there are
+ // used generic parameters is a bug of evaluation, so checking for it
+ // here does feel somewhat sensible.
+ if !self.features().generic_const_exprs && ct.substs.has_non_region_param() {
+ assert!(matches!(self.def_kind(ct.def.did), DefKind::AnonConst));
+ let mir_body = self.mir_for_ctfe_opt_const_arg(ct.def);
+ if mir_body.is_polymorphic {
+ let Some(local_def_id) = ct.def.did.as_local() else { return };
+ self.struct_span_lint_hir(
+ lint::builtin::CONST_EVALUATABLE_UNCHECKED,
+ self.hir().local_def_id_to_hir_id(local_def_id),
+ self.def_span(ct.def.did),
+ "cannot use constants which depend on generic parameters in types",
+ |err| err,
+ )
+ }
+ }
+ })
}
Ok(None) => Err(ErrorHandled::TooGeneric),
Err(error_reported) => Err(ErrorHandled::Reported(error_reported)),
@@ -211,7 +239,7 @@ impl<'tcx> TyCtxt<'tcx> {
self,
param_env: ty::ParamEnv<'tcx>,
constant: mir::ConstantKind<'tcx>,
- ) -> mir::DestructuredMirConstant<'tcx> {
+ ) -> mir::DestructuredConstant<'tcx> {
self.try_destructure_mir_constant(param_env.and(constant)).unwrap()
}
}
diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs
index 3d7a6230e..79db35a76 100644
--- a/compiler/rustc_middle/src/mir/mod.rs
+++ b/compiler/rustc_middle/src/mir/mod.rs
@@ -7,12 +7,12 @@ use crate::mir::interpret::{
};
use crate::mir::visit::MirVisitable;
use crate::ty::codec::{TyDecoder, TyEncoder};
-use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable};
+use crate::ty::fold::{FallibleTypeFolder, TypeFoldable};
use crate::ty::print::{FmtPrinter, Printer};
-use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef};
-use crate::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor};
+use crate::ty::visit::{TypeVisitable, TypeVisitor};
use crate::ty::{self, List, Ty, TyCtxt};
use crate::ty::{AdtDef, InstanceDef, ScalarInt, UserTypeAnnotationIndex};
+use crate::ty::{GenericArg, InternalSubsts, SubstsRef};
use rustc_data_structures::captures::Captures;
use rustc_errors::ErrorGuaranteed;
@@ -116,11 +116,6 @@ pub trait MirPass<'tcx> {
fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>);
- /// If this pass causes the MIR to enter a new phase, return that phase.
- fn phase_change(&self) -> Option<MirPhase> {
- None
- }
-
fn is_mir_dump_enabled(&self) -> bool {
true
}
@@ -145,6 +140,35 @@ impl MirPhase {
}
}
+impl Display for MirPhase {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ match self {
+ MirPhase::Built => write!(f, "built"),
+ MirPhase::Analysis(p) => write!(f, "analysis-{}", p),
+ MirPhase::Runtime(p) => write!(f, "runtime-{}", p),
+ }
+ }
+}
+
+impl Display for AnalysisPhase {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ match self {
+ AnalysisPhase::Initial => write!(f, "initial"),
+ AnalysisPhase::PostCleanup => write!(f, "post_cleanup"),
+ }
+ }
+}
+
+impl Display for RuntimePhase {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ match self {
+ RuntimePhase::Initial => write!(f, "initial"),
+ RuntimePhase::PostCleanup => write!(f, "post_cleanup"),
+ RuntimePhase::Optimized => write!(f, "optimized"),
+ }
+ }
+}
+
/// Where a specific `mir::Body` comes from.
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[derive(HashStable, TyEncodable, TyDecodable, TypeFoldable, TypeVisitable)]
@@ -207,6 +231,9 @@ pub struct Body<'tcx> {
/// us to see the difference and forego optimization on the inlined promoted items.
pub phase: MirPhase,
+ /// How many passses we have executed since starting the current phase. Used for debug output.
+ pub pass_count: usize,
+
pub source: MirSource<'tcx>,
/// A list of source scopes; these are referenced by statements
@@ -292,6 +319,7 @@ impl<'tcx> Body<'tcx> {
let mut body = Body {
phase: MirPhase::Built,
+ pass_count: 1,
source,
basic_blocks: BasicBlocks::new(basic_blocks),
source_scopes,
@@ -313,7 +341,7 @@ impl<'tcx> Body<'tcx> {
is_polymorphic: false,
tainted_by_errors,
};
- body.is_polymorphic = body.has_param_types_or_consts();
+ body.is_polymorphic = body.has_non_region_param();
body
}
@@ -325,6 +353,7 @@ impl<'tcx> Body<'tcx> {
pub fn new_cfg_only(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self {
let mut body = Body {
phase: MirPhase::Built,
+ pass_count: 1,
source: MirSource::item(CRATE_DEF_ID.to_def_id()),
basic_blocks: BasicBlocks::new(basic_blocks),
source_scopes: IndexVec::new(),
@@ -339,7 +368,7 @@ impl<'tcx> Body<'tcx> {
is_polymorphic: false,
tainted_by_errors: None,
};
- body.is_polymorphic = body.has_param_types_or_consts();
+ body.is_polymorphic = body.has_non_region_param();
body
}
@@ -1380,6 +1409,7 @@ impl<V, T> ProjectionElem<V, T> {
Self::Field(_, _)
| Self::Index(_)
+ | Self::OpaqueCast(_)
| Self::ConstantIndex { .. }
| Self::Subslice { .. }
| Self::Downcast(_, _) => false,
@@ -1574,7 +1604,9 @@ impl Debug for Place<'_> {
fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result {
for elem in self.projection.iter().rev() {
match elem {
- ProjectionElem::Downcast(_, _) | ProjectionElem::Field(_, _) => {
+ ProjectionElem::OpaqueCast(_)
+ | ProjectionElem::Downcast(_, _)
+ | ProjectionElem::Field(_, _) => {
write!(fmt, "(").unwrap();
}
ProjectionElem::Deref => {
@@ -1590,6 +1622,9 @@ impl Debug for Place<'_> {
for elem in self.projection.iter() {
match elem {
+ ProjectionElem::OpaqueCast(ty) => {
+ write!(fmt, " as {})", ty)?;
+ }
ProjectionElem::Downcast(Some(name), _index) => {
write!(fmt, " as {})", name)?;
}
@@ -1818,7 +1853,6 @@ impl<'tcx> Rvalue<'tcx> {
// While the model is undecided, we should be conservative. See
// <https://www.ralfj.de/blog/2022/04/11/provenance-exposed.html>
Rvalue::Cast(CastKind::PointerExposeAddress, _, _) => false,
- Rvalue::Cast(CastKind::DynStar, _, _) => false,
Rvalue::Use(_)
| Rvalue::CopyForDeref(_)
@@ -1828,7 +1862,15 @@ impl<'tcx> Rvalue<'tcx> {
| Rvalue::AddressOf(_, _)
| Rvalue::Len(_)
| Rvalue::Cast(
- CastKind::Misc | CastKind::Pointer(_) | CastKind::PointerFromExposedAddress,
+ CastKind::IntToInt
+ | CastKind::FloatToInt
+ | CastKind::FloatToFloat
+ | CastKind::IntToFloat
+ | CastKind::FnPtrToPtr
+ | CastKind::PtrToPtr
+ | CastKind::Pointer(_)
+ | CastKind::PointerFromExposedAddress
+ | CastKind::DynStar,
_,
_,
)
@@ -2043,13 +2085,13 @@ pub struct Constant<'tcx> {
}
#[derive(Clone, Copy, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable, Debug)]
-#[derive(Lift)]
+#[derive(Lift, TypeFoldable, TypeVisitable)]
pub enum ConstantKind<'tcx> {
/// This constant came from the type system
Ty(ty::Const<'tcx>),
/// An unevaluated mir constant which is not part of the type system.
- Unevaluated(ty::Unevaluated<'tcx, Option<Promoted>>, Ty<'tcx>),
+ Unevaluated(UnevaluatedConst<'tcx>, Ty<'tcx>),
/// This constant cannot go back into the type system, as it represents
/// something the type system cannot handle (e.g. pointers).
@@ -2309,12 +2351,11 @@ impl<'tcx> ConstantKind<'tcx> {
ty::InlineConstSubsts::new(tcx, ty::InlineConstSubstsParts { parent_substs, ty })
.substs;
- let uneval = ty::Unevaluated {
+ let uneval = UnevaluatedConst {
def: ty::WithOptConstParam::unknown(def_id).to_global(),
substs,
promoted: None,
};
-
debug_assert!(!uneval.has_free_regions());
Self::Unevaluated(uneval, ty)
@@ -2398,7 +2439,7 @@ impl<'tcx> ConstantKind<'tcx> {
let hir_id = tcx.hir().local_def_id_to_hir_id(def.did);
let span = tcx.hir().span(hir_id);
- let uneval = ty::Unevaluated::new(def.to_global(), substs);
+ let uneval = UnevaluatedConst::new(def.to_global(), substs);
debug!(?span, ?param_env);
match tcx.const_eval_resolve(param_env, uneval, Some(span)) {
@@ -2411,7 +2452,7 @@ impl<'tcx> ConstantKind<'tcx> {
// Error was handled in `const_eval_resolve`. Here we just create a
// new unevaluated const and error hard later in codegen
Self::Unevaluated(
- ty::Unevaluated {
+ UnevaluatedConst {
def: def.to_global(),
substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()),
promoted: None,
@@ -2434,6 +2475,34 @@ impl<'tcx> ConstantKind<'tcx> {
}
}
+/// An unevaluated (potentially generic) constant used in MIR.
+#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)]
+#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)]
+pub struct UnevaluatedConst<'tcx> {
+ pub def: ty::WithOptConstParam<DefId>,
+ pub substs: SubstsRef<'tcx>,
+ pub promoted: Option<Promoted>,
+}
+
+impl<'tcx> UnevaluatedConst<'tcx> {
+ // FIXME: probably should get rid of this method. It's also wrong to
+ // shrink and then later expand a promoted.
+ #[inline]
+ pub fn shrink(self) -> ty::UnevaluatedConst<'tcx> {
+ ty::UnevaluatedConst { def: self.def, substs: self.substs }
+ }
+}
+
+impl<'tcx> UnevaluatedConst<'tcx> {
+ #[inline]
+ pub fn new(
+ def: ty::WithOptConstParam<DefId>,
+ substs: SubstsRef<'tcx>,
+ ) -> UnevaluatedConst<'tcx> {
+ UnevaluatedConst { def, substs, promoted: Default::default() }
+ }
+}
+
/// A collection of projections into user types.
///
/// They are projections because a binding can occur a part of a
@@ -2727,7 +2796,7 @@ fn pretty_print_const_value<'tcx>(
}
// Aggregates, printed as array/tuple/struct/variant construction syntax.
//
- // NB: the `has_param_types_or_consts` check ensures that we can use
+ // NB: the `has_non_region_param` check ensures that we can use
// the `destructure_const` query with an empty `ty::ParamEnv` without
// introducing ICEs (e.g. via `layout_of`) from missing bounds.
// E.g. `transmute([0usize; 2]): (u8, *mut T)` needs to know `T: Sized`
@@ -2735,7 +2804,7 @@ fn pretty_print_const_value<'tcx>(
//
// FIXME(eddyb) for `--emit=mir`/`-Z dump-mir`, we should provide the
// correct `ty::ParamEnv` to allow printing *all* constant values.
- (_, ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) if !ty.has_param_types_or_consts() => {
+ (_, ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) if !ty.has_non_region_param() => {
let ct = tcx.lift(ct).unwrap();
let ty = tcx.lift(ty).unwrap();
if let Some(contents) = tcx.try_destructure_mir_constant(
@@ -2906,11 +2975,12 @@ impl Location {
mod size_asserts {
use super::*;
use rustc_data_structures::static_assert_size;
- // These are in alphabetical order, which is easy to maintain.
+ // tidy-alphabetical-start
static_assert_size!(BasicBlockData<'_>, 144);
static_assert_size!(LocalDecl<'_>, 56);
static_assert_size!(Statement<'_>, 32);
static_assert_size!(StatementKind<'_>, 16);
static_assert_size!(Terminator<'_>, 112);
static_assert_size!(TerminatorKind<'_>, 96);
+ // tidy-alphabetical-end
}
diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs
index 21ae121e1..15a24aa4a 100644
--- a/compiler/rustc_middle/src/mir/mono.rs
+++ b/compiler/rustc_middle/src/mir/mono.rs
@@ -81,7 +81,7 @@ impl<'tcx> MonoItem<'tcx> {
MonoItem::Fn(instance) => tcx.symbol_name(instance),
MonoItem::Static(def_id) => tcx.symbol_name(Instance::mono(tcx, def_id)),
MonoItem::GlobalAsm(item_id) => {
- SymbolName::new(tcx, &format!("global_asm_{:?}", item_id.def_id))
+ SymbolName::new(tcx, &format!("global_asm_{:?}", item_id.owner_id))
}
}
}
@@ -182,7 +182,7 @@ impl<'tcx> MonoItem<'tcx> {
match *self {
MonoItem::Fn(Instance { def, .. }) => def.def_id().as_local(),
MonoItem::Static(def_id) => def_id.as_local(),
- MonoItem::GlobalAsm(item_id) => Some(item_id.def_id),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.owner_id.def_id),
}
.map(|def_id| tcx.def_span(def_id))
}
@@ -373,7 +373,7 @@ impl<'tcx> CodegenUnit<'tcx> {
}
}
MonoItem::Static(def_id) => def_id.as_local().map(Idx::index),
- MonoItem::GlobalAsm(item_id) => Some(item_id.def_id.index()),
+ MonoItem::GlobalAsm(item_id) => Some(item_id.owner_id.def_id.index()),
},
item.symbol_name(tcx),
)
diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs
index 0b42137d4..05dcfba77 100644
--- a/compiler/rustc_middle/src/mir/pretty.rs
+++ b/compiler/rustc_middle/src/mir/pretty.rs
@@ -466,10 +466,9 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> {
ty::ConstKind::Param(p) => format!("Param({})", p),
ty::ConstKind::Unevaluated(uv) => {
format!(
- "Unevaluated({}, {:?}, {:?})",
+ "Unevaluated({}, {:?})",
self.tcx.def_path_str(uv.def.did),
uv.substs,
- uv.promoted,
)
}
ty::ConstKind::Value(val) => format!("Value({})", fmt_valtree(&val)),
diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs
index d89efe2b3..efd7357af 100644
--- a/compiler/rustc_middle/src/mir/query.rs
+++ b/compiler/rustc_middle/src/mir/query.rs
@@ -392,16 +392,9 @@ pub enum ClosureOutlivesSubject<'tcx> {
Region(ty::RegionVid),
}
-/// The constituent parts of a type level constant of kind ADT or array.
-#[derive(Copy, Clone, Debug, HashStable)]
-pub struct DestructuredConst<'tcx> {
- pub variant: Option<VariantIdx>,
- pub fields: &'tcx [ty::Const<'tcx>],
-}
-
/// The constituent parts of a mir constant of kind ADT or array.
#[derive(Copy, Clone, Debug, HashStable)]
-pub struct DestructuredMirConstant<'tcx> {
+pub struct DestructuredConstant<'tcx> {
pub variant: Option<VariantIdx>,
pub fields: &'tcx [ConstantKind<'tcx>],
}
diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs
index c7d0283aa..85ef51f12 100644
--- a/compiler/rustc_middle/src/mir/syntax.rs
+++ b/compiler/rustc_middle/src/mir/syntax.rs
@@ -82,9 +82,10 @@ pub enum MirPhase {
/// access to. This occurs in generator bodies. Such locals do not behave like other locals,
/// because they eg may be aliased in surprising ways. Runtime MIR has no such special locals -
/// all generator bodies are lowered and so all places that look like locals really are locals.
- /// - Const prop lints: The lint pass which reports eg `200_u8 + 200_u8` as an error is run as a
- /// part of analysis to runtime MIR lowering. This means that transformations which may supress
- /// such errors may not run on analysis MIR.
+ ///
+ /// Also note that the lint pass which reports eg `200_u8 + 200_u8` as an error is run as a part
+ /// of analysis to runtime MIR lowering. To ensure lints are reported reliably, this means that
+ /// transformations which may supress such errors should not run on analysis MIR.
Runtime(RuntimePhase),
}
@@ -829,6 +830,9 @@ pub type AssertMessage<'tcx> = AssertKind<Operand<'tcx>>;
/// generator has more than one variant, the parent place's variant index must be set, indicating
/// which variant is being used. If it has just one variant, the variant index may or may not be
/// included - the single possible variant is inferred if it is not included.
+/// - [`OpaqueCast`](ProjectionElem::OpaqueCast): This projection changes the place's type to the
+/// given one, and makes no other changes. A `OpaqueCast` projection on any type other than an
+/// opaque type from the current crate is not well-formed.
/// - [`ConstantIndex`](ProjectionElem::ConstantIndex): Computes an offset in units of `T` into the
/// place as described in the documentation for the `ProjectionElem`. The resulting address is
/// the parent's address plus that offset, and the type is `T`. This is only legal if the parent
@@ -928,6 +932,10 @@ pub enum ProjectionElem<V, T> {
///
/// The included Symbol is the name of the variant, used for printing MIR.
Downcast(Option<Symbol>, VariantIdx),
+
+ /// Like an explicit cast from an opaque type to a concrete type, but without
+ /// requiring an intermediate variable.
+ OpaqueCast(T),
}
/// Alias for projections as they appear in places, where the base is a place
@@ -1141,8 +1149,12 @@ pub enum CastKind {
Pointer(PointerCast),
/// Cast into a dyn* object.
DynStar,
- /// Remaining unclassified casts.
- Misc,
+ IntToInt,
+ FloatToInt,
+ FloatToFloat,
+ IntToFloat,
+ PtrToPtr,
+ FnPtrToPtr,
}
#[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)]
@@ -1233,11 +1245,11 @@ pub enum BinOp {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
mod size_asserts {
use super::*;
- // These are in alphabetical order, which is easy to maintain.
- #[cfg(not(bootstrap))]
+ // tidy-alphabetical-start
static_assert_size!(AggregateKind<'_>, 40);
static_assert_size!(Operand<'_>, 24);
static_assert_size!(Place<'_>, 16);
static_assert_size!(PlaceElem<'_>, 24);
static_assert_size!(Rvalue<'_>, 40);
+ // tidy-alphabetical-end
}
diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs
index 405003156..fa3adafd4 100644
--- a/compiler/rustc_middle/src/mir/tcx.rs
+++ b/compiler/rustc_middle/src/mir/tcx.rs
@@ -4,7 +4,6 @@
*/
use crate::mir::*;
-use crate::ty::subst::Subst;
use crate::ty::{self, Ty, TyCtxt};
use rustc_hir as hir;
use rustc_target::abi::VariantIdx;
@@ -57,7 +56,7 @@ impl<'tcx> PlaceTy<'tcx> {
/// `PlaceElem`, where we can just use the `Ty` that is already
/// stored inline on field projection elems.
pub fn projection_ty(self, tcx: TyCtxt<'tcx>, elem: PlaceElem<'tcx>) -> PlaceTy<'tcx> {
- self.projection_ty_core(tcx, ty::ParamEnv::empty(), &elem, |_, _, ty| ty)
+ self.projection_ty_core(tcx, ty::ParamEnv::empty(), &elem, |_, _, ty| ty, |_, ty| ty)
}
/// `place_ty.projection_ty_core(tcx, elem, |...| { ... })`
@@ -71,6 +70,7 @@ impl<'tcx> PlaceTy<'tcx> {
param_env: ty::ParamEnv<'tcx>,
elem: &ProjectionElem<V, T>,
mut handle_field: impl FnMut(&Self, Field, T) -> Ty<'tcx>,
+ mut handle_opaque_cast: impl FnMut(&Self, T) -> Ty<'tcx>,
) -> PlaceTy<'tcx>
where
V: ::std::fmt::Debug,
@@ -109,6 +109,7 @@ impl<'tcx> PlaceTy<'tcx> {
PlaceTy { ty: self.ty, variant_index: Some(index) }
}
ProjectionElem::Field(f, fty) => PlaceTy::from_ty(handle_field(&self, f, fty)),
+ ProjectionElem::OpaqueCast(ty) => PlaceTy::from_ty(handle_opaque_cast(&self, ty)),
};
debug!("projection_ty self: {:?} elem: {:?} yields: {:?}", self, elem, answer);
answer
diff --git a/compiler/rustc_middle/src/mir/type_foldable.rs b/compiler/rustc_middle/src/mir/type_foldable.rs
index 9d098c808..4c0974f86 100644
--- a/compiler/rustc_middle/src/mir/type_foldable.rs
+++ b/compiler/rustc_middle/src/mir/type_foldable.rs
@@ -26,6 +26,12 @@ TrivialTypeTraversalAndLiftImpls! {
GeneratorSavedLocal,
}
+TrivialTypeTraversalImpls! {
+ for <'tcx> {
+ ConstValue<'tcx>,
+ }
+}
+
impl<'tcx> TypeFoldable<'tcx> for &'tcx [InlineAsmTemplatePiece] {
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> {
Ok(self)
@@ -49,25 +55,3 @@ impl<'tcx, R: Idx, C: Idx> TypeFoldable<'tcx> for BitMatrix<R, C> {
Ok(self)
}
}
-
-impl<'tcx> TypeFoldable<'tcx> for ConstantKind<'tcx> {
- #[inline(always)]
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- folder.try_fold_mir_const(self)
- }
-}
-
-impl<'tcx> TypeSuperFoldable<'tcx> for ConstantKind<'tcx> {
- fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
- self,
- folder: &mut F,
- ) -> Result<Self, F::Error> {
- match self {
- ConstantKind::Ty(c) => Ok(ConstantKind::Ty(c.try_fold_with(folder)?)),
- ConstantKind::Val(v, t) => Ok(ConstantKind::Val(v, t.try_fold_with(folder)?)),
- ConstantKind::Unevaluated(uv, t) => {
- Ok(ConstantKind::Unevaluated(uv.try_fold_with(folder)?, t.try_fold_with(folder)?))
- }
- }
- }
-}
diff --git a/compiler/rustc_middle/src/mir/type_visitable.rs b/compiler/rustc_middle/src/mir/type_visitable.rs
index be19bb486..e7cd497b2 100644
--- a/compiler/rustc_middle/src/mir/type_visitable.rs
+++ b/compiler/rustc_middle/src/mir/type_visitable.rs
@@ -7,22 +7,3 @@ impl<'tcx, R: Idx, C: Idx> TypeVisitable<'tcx> for BitMatrix<R, C> {
ControlFlow::CONTINUE
}
}
-
-impl<'tcx> TypeVisitable<'tcx> for ConstantKind<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- visitor.visit_mir_const(*self)
- }
-}
-
-impl<'tcx> TypeSuperVisitable<'tcx> for ConstantKind<'tcx> {
- fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- match *self {
- ConstantKind::Ty(c) => c.visit_with(visitor),
- ConstantKind::Val(_, t) => t.visit_with(visitor),
- ConstantKind::Unevaluated(uv, t) => {
- uv.visit_with(visitor)?;
- t.visit_with(visitor)
- }
- }
- }
-}
diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs
index d9b24566b..ddcf3711b 100644
--- a/compiler/rustc_middle/src/mir/visit.rs
+++ b/compiler/rustc_middle/src/mir/visit.rs
@@ -1084,6 +1084,11 @@ macro_rules! visit_place_fns {
self.visit_ty(&mut new_ty, TyContext::Location(location));
if ty != new_ty { Some(PlaceElem::Field(field, new_ty)) } else { None }
}
+ PlaceElem::OpaqueCast(ty) => {
+ let mut new_ty = ty;
+ self.visit_ty(&mut new_ty, TyContext::Location(location));
+ if ty != new_ty { Some(PlaceElem::OpaqueCast(new_ty)) } else { None }
+ }
PlaceElem::Deref
| PlaceElem::ConstantIndex { .. }
| PlaceElem::Subslice { .. }
@@ -1153,7 +1158,7 @@ macro_rules! visit_place_fns {
location: Location,
) {
match elem {
- ProjectionElem::Field(_field, ty) => {
+ ProjectionElem::OpaqueCast(ty) | ProjectionElem::Field(_, ty) => {
self.visit_ty(ty, TyContext::Location(location));
}
ProjectionElem::Index(local) => {
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs
index 4b336ea62..3d720f09b 100644
--- a/compiler/rustc_middle/src/query/mod.rs
+++ b/compiler/rustc_middle/src/query/mod.rs
@@ -4,6 +4,9 @@
//! ["Queries: demand-driven compilation"](https://rustc-dev-guide.rust-lang.org/query.html).
//! This chapter includes instructions for adding new queries.
+use crate::ty::{self, print::describe_as_module, TyCtxt};
+use rustc_span::def_id::LOCAL_CRATE;
+
// Each of these queries corresponds to a function pointer field in the
// `Providers` struct for requesting a value of that type, and a method
// on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way
@@ -17,19 +20,19 @@
// as they will raise an fatal error on query cycles instead.
rustc_queries! {
query trigger_delay_span_bug(key: DefId) -> () {
- desc { "trigger a delay span bug" }
+ desc { "triggering a delay span bug" }
}
- query resolutions(_: ()) -> &'tcx ty::ResolverOutputs {
+ query resolutions(_: ()) -> &'tcx ty::ResolverGlobalCtxt {
eval_always
no_hash
- desc { "get the resolver outputs" }
+ desc { "getting the resolver outputs" }
}
query resolver_for_lowering(_: ()) -> &'tcx Steal<ty::ResolverAstLowering> {
eval_always
no_hash
- desc { "get the resolver for lowering" }
+ desc { "getting the resolver for lowering" }
}
/// Return the span for a definition.
@@ -37,7 +40,7 @@ rustc_queries! {
/// This span is meant for dep-tracking rather than diagnostics. It should not be used outside
/// of rustc_middle::hir::source_map.
query source_span(key: LocalDefId) -> Span {
- desc { "get the source span" }
+ desc { "getting the source span" }
}
/// Represents crate as a whole (as distinct from the top-level crate module).
@@ -49,14 +52,14 @@ rustc_queries! {
query hir_crate(key: ()) -> Crate<'tcx> {
arena_cache
eval_always
- desc { "get the crate HIR" }
+ desc { "getting the crate HIR" }
}
/// All items in the crate.
query hir_crate_items(_: ()) -> rustc_middle::hir::ModuleItems {
arena_cache
eval_always
- desc { "get HIR crate items" }
+ desc { "getting HIR crate items" }
}
/// The items in a module.
@@ -65,7 +68,7 @@ rustc_queries! {
/// Avoid calling this query directly.
query hir_module_items(key: LocalDefId) -> rustc_middle::hir::ModuleItems {
arena_cache
- desc { |tcx| "HIR module items in `{}`", tcx.def_path_str(key.to_def_id()) }
+ desc { |tcx| "getting HIR module items in `{}`", tcx.def_path_str(key.to_def_id()) }
cache_on_disk_if { true }
}
@@ -73,8 +76,8 @@ rustc_queries! {
///
/// This can be conveniently accessed by methods on `tcx.hir()`.
/// Avoid calling this query directly.
- query hir_owner(key: LocalDefId) -> Option<crate::hir::Owner<'tcx>> {
- desc { |tcx| "HIR owner of `{}`", tcx.def_path_str(key.to_def_id()) }
+ query hir_owner(key: hir::OwnerId) -> Option<crate::hir::Owner<'tcx>> {
+ desc { |tcx| "getting HIR owner of `{}`", tcx.def_path_str(key.to_def_id()) }
}
/// Gives access to the HIR ID for the given `LocalDefId` owner `key`.
@@ -82,31 +85,31 @@ rustc_queries! {
/// This can be conveniently accessed by methods on `tcx.hir()`.
/// Avoid calling this query directly.
query local_def_id_to_hir_id(key: LocalDefId) -> hir::HirId {
- desc { |tcx| "HIR ID of `{}`", tcx.def_path_str(key.to_def_id()) }
+ desc { |tcx| "getting HIR ID of `{}`", tcx.def_path_str(key.to_def_id()) }
}
/// Gives access to the HIR node's parent for the HIR owner `key`.
///
/// This can be conveniently accessed by methods on `tcx.hir()`.
/// Avoid calling this query directly.
- query hir_owner_parent(key: LocalDefId) -> hir::HirId {
- desc { |tcx| "HIR parent of `{}`", tcx.def_path_str(key.to_def_id()) }
+ query hir_owner_parent(key: hir::OwnerId) -> hir::HirId {
+ desc { |tcx| "getting HIR parent of `{}`", tcx.def_path_str(key.to_def_id()) }
}
/// Gives access to the HIR nodes and bodies inside the HIR owner `key`.
///
/// This can be conveniently accessed by methods on `tcx.hir()`.
/// Avoid calling this query directly.
- query hir_owner_nodes(key: LocalDefId) -> hir::MaybeOwner<&'tcx hir::OwnerNodes<'tcx>> {
- desc { |tcx| "HIR owner items in `{}`", tcx.def_path_str(key.to_def_id()) }
+ query hir_owner_nodes(key: hir::OwnerId) -> hir::MaybeOwner<&'tcx hir::OwnerNodes<'tcx>> {
+ desc { |tcx| "getting HIR owner items in `{}`", tcx.def_path_str(key.to_def_id()) }
}
/// Gives access to the HIR attributes inside the HIR owner `key`.
///
/// This can be conveniently accessed by methods on `tcx.hir()`.
/// Avoid calling this query directly.
- query hir_attrs(key: LocalDefId) -> &'tcx hir::AttributeMap<'tcx> {
- desc { |tcx| "HIR owner attributes in `{}`", tcx.def_path_str(key.to_def_id()) }
+ query hir_attrs(key: hir::OwnerId) -> &'tcx hir::AttributeMap<'tcx> {
+ desc { |tcx| "getting HIR owner attributes in `{}`", tcx.def_path_str(key.to_def_id()) }
}
/// Computes the `DefId` of the corresponding const parameter in case the `key` is a
@@ -135,7 +138,7 @@ rustc_queries! {
/// Given the def_id of a const-generic parameter, computes the associated default const
/// parameter. e.g. `fn example<const N: usize=3>` called on `N` would return `3`.
query const_param_default(param: DefId) -> ty::Const<'tcx> {
- desc { |tcx| "compute const default for a given parameter `{}`", tcx.def_path_str(param) }
+ desc { |tcx| "computing const default for a given parameter `{}`", tcx.def_path_str(param) }
cache_on_disk_if { param.is_local() }
separate_provide_extern
}
@@ -164,7 +167,7 @@ rustc_queries! {
query collect_trait_impl_trait_tys(key: DefId)
-> Result<&'tcx FxHashMap<DefId, Ty<'tcx>>, ErrorGuaranteed>
{
- desc { "compare an impl and trait method signature, inferring any hidden `impl Trait` types in the process" }
+ desc { "comparing an impl and trait method signature, inferring any hidden `impl Trait` types in the process" }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
@@ -274,19 +277,24 @@ rustc_queries! {
separate_provide_extern
}
- query lint_levels(_: ()) -> LintLevelMap {
+ query shallow_lint_levels_on(key: hir::OwnerId) -> rustc_middle::lint::ShallowLintLevelMap {
+ eval_always // fetches `resolutions`
arena_cache
- eval_always
- desc { "computing the lint levels for items in this crate" }
+ desc { |tcx| "looking up lint levels for `{}`", tcx.def_path_str(key.to_def_id()) }
+ }
+
+ query lint_expectations(_: ()) -> Vec<(LintExpectationId, LintExpectation)> {
+ arena_cache
+ desc { "computing `#[expect]`ed lints in this crate" }
}
query parent_module_from_def_id(key: LocalDefId) -> LocalDefId {
eval_always
- desc { |tcx| "parent module of `{}`", tcx.def_path_str(key.to_def_id()) }
+ desc { |tcx| "getting the parent module of `{}`", tcx.def_path_str(key.to_def_id()) }
}
query expn_that_defined(key: DefId) -> rustc_span::ExpnId {
- desc { |tcx| "expansion that defined `{}`", tcx.def_path_str(key) }
+ desc { |tcx| "getting the expansion that defined `{}`", tcx.def_path_str(key) }
separate_provide_extern
}
@@ -296,6 +304,32 @@ rustc_queries! {
separate_provide_extern
}
+ /// Checks whether a type is representable or infinitely sized
+ query representability(_: LocalDefId) -> rustc_middle::ty::Representability {
+ desc { "checking if `{}` is representable", tcx.def_path_str(key.to_def_id()) }
+ // infinitely sized types will cause a cycle
+ cycle_delay_bug
+ // we don't want recursive representability calls to be forced with
+ // incremental compilation because, if a cycle occurs, we need the
+ // entire cycle to be in memory for diagnostics
+ anon
+ }
+
+ /// An implementation detail for the `representability` query
+ query representability_adt_ty(_: Ty<'tcx>) -> rustc_middle::ty::Representability {
+ desc { "checking if `{}` is representable", key }
+ cycle_delay_bug
+ anon
+ }
+
+ /// Set of param indexes for type params that are in the type's representation
+ query params_in_repr(key: DefId) -> rustc_index::bit_set::BitSet<u32> {
+ desc { "finding type parameters in the representation" }
+ arena_cache
+ no_hash
+ separate_provide_extern
+ }
+
/// Fetch the THIR for a given body. If typeck for that body failed, returns an empty `Thir`.
query thir_body(key: ty::WithOptConstParam<LocalDefId>)
-> Result<(&'tcx Steal<thir::Thir<'tcx>>, thir::ExprId), ErrorGuaranteed>
@@ -349,7 +383,7 @@ rustc_queries! {
/// See the README for the `mir` module for details.
query mir_const(key: ty::WithOptConstParam<LocalDefId>) -> &'tcx Steal<mir::Body<'tcx>> {
desc {
- |tcx| "processing MIR for {}`{}`",
+ |tcx| "preparing {}`{}` for borrow checking",
if key.const_param_did.is_some() { "the const argument " } else { "" },
tcx.def_path_str(key.did.to_def_id()),
}
@@ -361,7 +395,7 @@ rustc_queries! {
key: DefId
) -> Result<Option<&'tcx [ty::abstract_const::Node<'tcx>]>, ErrorGuaranteed> {
desc {
- |tcx| "building an abstract representation for {}", tcx.def_path_str(key),
+ |tcx| "building an abstract representation for `{}`", tcx.def_path_str(key),
}
separate_provide_extern
}
@@ -371,16 +405,16 @@ rustc_queries! {
) -> Result<Option<&'tcx [ty::abstract_const::Node<'tcx>]>, ErrorGuaranteed> {
desc {
|tcx|
- "building an abstract representation for the const argument {}",
+ "building an abstract representation for the const argument `{}`",
tcx.def_path_str(key.0.to_def_id()),
}
}
query try_unify_abstract_consts(key:
- ty::ParamEnvAnd<'tcx, (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()>
+ ty::ParamEnvAnd<'tcx, (ty::UnevaluatedConst<'tcx>, ty::UnevaluatedConst<'tcx>
)>) -> bool {
desc {
- |tcx| "trying to unify the generic constants {} and {}",
+ |tcx| "trying to unify the generic constants `{}` and `{}`",
tcx.def_path_str(key.value.0.def.did), tcx.def_path_str(key.value.1.def.did)
}
}
@@ -402,7 +436,7 @@ rustc_queries! {
query mir_for_ctfe_of_const_arg(key: (LocalDefId, DefId)) -> &'tcx mir::Body<'tcx> {
desc {
- |tcx| "MIR for CTFE of the const argument `{}`",
+ |tcx| "caching MIR for CTFE of the const argument `{}`",
tcx.def_path_str(key.0.to_def_id())
}
}
@@ -414,7 +448,7 @@ rustc_queries! {
) {
no_hash
desc {
- |tcx| "processing {}`{}`",
+ |tcx| "processing MIR for {}`{}`",
if key.const_param_did.is_some() { "the const argument " } else { "" },
tcx.def_path_str(key.did.to_def_id()),
}
@@ -425,7 +459,7 @@ rustc_queries! {
) -> Vec<rustc_span::Symbol> {
arena_cache
desc {
- |tcx| "symbols for captures of closure `{}` in `{}`",
+ |tcx| "finding symbols for captures of closure `{}` in `{}`",
tcx.def_path_str(key.1.to_def_id()),
tcx.def_path_str(key.0.to_def_id())
}
@@ -487,12 +521,12 @@ rustc_queries! {
// queries). Making it anonymous avoids hashing the result, which
// may save a bit of time.
anon
- desc { "erasing regions from `{:?}`", ty }
+ desc { "erasing regions from `{}`", ty }
}
query wasm_import_module_map(_: CrateNum) -> FxHashMap<DefId, String> {
arena_cache
- desc { "wasm import module map" }
+ desc { "getting wasm import module map" }
}
/// Maps from the `DefId` of an item (trait/struct/enum/fn) to the
@@ -582,16 +616,8 @@ rustc_queries! {
separate_provide_extern
}
- // The cycle error here should be reported as an error by `check_representable`.
- // We consider the type as Sized in the meanwhile to avoid
- // further errors (done in impl Value for AdtSizedConstraint).
- // Use `cycle_delay_bug` to delay the cycle error here to be emitted later
- // in case we accidentally otherwise don't emit an error.
- query adt_sized_constraint(
- key: DefId
- ) -> AdtSizedConstraint<'tcx> {
+ query adt_sized_constraint(key: DefId) -> &'tcx [Ty<'tcx>] {
desc { |tcx| "computing `Sized` constraints for `{}`", tcx.def_path_str(key) }
- cycle_delay_bug
}
query adt_dtorck_constraint(
@@ -680,7 +706,7 @@ rustc_queries! {
/// Collects the associated items defined on a trait or impl.
query associated_items(key: DefId) -> ty::AssocItems<'tcx> {
arena_cache
- desc { |tcx| "collecting associated items of {}", tcx.def_path_str(key) }
+ desc { |tcx| "collecting associated items of `{}`", tcx.def_path_str(key) }
}
/// Maps from associated items on a trait to the corresponding associated
@@ -706,7 +732,7 @@ rustc_queries! {
///`{ trait_f: impl_f, trait_g: impl_g }`
query impl_item_implementor_ids(impl_id: DefId) -> FxHashMap<DefId, DefId> {
arena_cache
- desc { |tcx| "comparing impl items against trait for {}", tcx.def_path_str(impl_id) }
+ desc { |tcx| "comparing impl items against trait for `{}`", tcx.def_path_str(impl_id) }
}
/// Given an `impl_id`, return the trait it implements.
@@ -778,7 +804,7 @@ rustc_queries! {
/// Note that we've liberated the late bound regions of function signatures, so
/// this can not be used to check whether these types are well formed.
query assumed_wf_types(key: DefId) -> &'tcx ty::List<Ty<'tcx>> {
- desc { |tcx| "computing the implied bounds of {}", tcx.def_path_str(key) }
+ desc { |tcx| "computing the implied bounds of `{}`", tcx.def_path_str(key) }
}
/// Computes the signature of the function.
@@ -827,7 +853,7 @@ rustc_queries! {
}
query check_liveness(key: DefId) {
- desc { |tcx| "checking liveness of variables in {}", tcx.def_path_str(key) }
+ desc { |tcx| "checking liveness of variables in `{}`", tcx.def_path_str(key) }
}
/// Return the live symbols in the crate for dead code check.
@@ -839,7 +865,7 @@ rustc_queries! {
FxHashMap<LocalDefId, Vec<(DefId, DefId)>>
) {
arena_cache
- desc { "find live symbols in crate" }
+ desc { "finding live symbols in crate" }
}
query check_mod_deathness(key: LocalDefId) -> () {
@@ -886,8 +912,8 @@ rustc_queries! {
cache_on_disk_if { true }
}
- query used_trait_imports(key: LocalDefId) -> &'tcx FxHashSet<LocalDefId> {
- desc { |tcx| "used_trait_imports `{}`", tcx.def_path_str(key.to_def_id()) }
+ query used_trait_imports(key: LocalDefId) -> &'tcx UnordSet<LocalDefId> {
+ desc { |tcx| "finding used_trait_imports `{}`", tcx.def_path_str(key.to_def_id()) }
cache_on_disk_if { true }
}
@@ -916,7 +942,7 @@ rustc_queries! {
/// Not meant to be used directly outside of coherence.
query crate_inherent_impls(k: ()) -> CrateInherentImpls {
arena_cache
- desc { "all inherent impls defined in crate" }
+ desc { "finding all inherent impls defined in crate" }
}
/// Checks all types in the crate for overlap in their inherent impls. Reports errors.
@@ -1003,8 +1029,10 @@ rustc_queries! {
/// Tries to destructure an `mir::ConstantKind` ADT or array into its variant index
/// and its field values.
- query try_destructure_mir_constant(key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>) -> Option<mir::DestructuredMirConstant<'tcx>> {
- desc { "destructuring mir constant"}
+ query try_destructure_mir_constant(
+ key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>
+ ) -> Option<mir::DestructuredConstant<'tcx>> {
+ desc { "destructuring MIR constant"}
remap_env_constness
}
@@ -1013,12 +1041,12 @@ rustc_queries! {
query deref_mir_constant(
key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>
) -> mir::ConstantKind<'tcx> {
- desc { "dereferencing mir constant" }
+ desc { "dereferencing MIR constant" }
remap_env_constness
}
query const_caller_location(key: (rustc_span::Symbol, u32, u32)) -> ConstValue<'tcx> {
- desc { "get a &core::panic::Location referring to a span" }
+ desc { "getting a &core::panic::Location referring to a span" }
}
// FIXME get rid of this with valtrees
@@ -1037,10 +1065,10 @@ rustc_queries! {
cache_on_disk_if { key.is_local() }
}
- /// Performs part of the privacy check and computes "access levels".
- query privacy_access_levels(_: ()) -> &'tcx AccessLevels {
+ /// Performs part of the privacy check and computes effective visibilities.
+ query effective_visibilities(_: ()) -> &'tcx EffectiveVisibilities {
eval_always
- desc { "privacy access levels" }
+ desc { "checking effective visibilities" }
}
query check_private_in_public(_: ()) -> () {
eval_always
@@ -1124,6 +1152,11 @@ rustc_queries! {
desc { |tcx| "checking whether `{}` is `doc(hidden)`", tcx.def_path_str(def_id) }
}
+ /// Determines whether an item is annotated with `doc(notable_trait)`.
+ query is_doc_notable_trait(def_id: DefId) -> bool {
+ desc { |tcx| "checking whether `{}` is `doc(notable_trait)`", tcx.def_path_str(def_id) }
+ }
+
/// Returns the attributes on the item at `def_id`.
///
/// Do not use this directly, use `tcx.get_attrs` instead.
@@ -1163,29 +1196,29 @@ rustc_queries! {
}
query is_ctfe_mir_available(key: DefId) -> bool {
- desc { |tcx| "checking if item has ctfe mir available: `{}`", tcx.def_path_str(key) }
+ desc { |tcx| "checking if item has CTFE MIR available: `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query is_mir_available(key: DefId) -> bool {
- desc { |tcx| "checking if item has mir available: `{}`", tcx.def_path_str(key) }
+ desc { |tcx| "checking if item has MIR available: `{}`", tcx.def_path_str(key) }
cache_on_disk_if { key.is_local() }
separate_provide_extern
}
query own_existential_vtable_entries(
- key: ty::PolyExistentialTraitRef<'tcx>
+ key: DefId
) -> &'tcx [DefId] {
- desc { |tcx| "finding all existential vtable entries for trait {}", tcx.def_path_str(key.def_id()) }
+ desc { |tcx| "finding all existential vtable entries for trait `{}`", tcx.def_path_str(key) }
}
query vtable_entries(key: ty::PolyTraitRef<'tcx>)
-> &'tcx [ty::VtblEntry<'tcx>] {
- desc { |tcx| "finding all vtable entries for trait {}", tcx.def_path_str(key.def_id()) }
+ desc { |tcx| "finding all vtable entries for trait `{}`", tcx.def_path_str(key.def_id()) }
}
- query vtable_trait_upcasting_coercion_new_vptr_slot(key: (ty::Ty<'tcx>, ty::Ty<'tcx>)) -> Option<usize> {
- desc { |tcx| "finding the slot within vtable for trait object {} vtable ptr during trait upcasting coercion from {} vtable",
+ query vtable_trait_upcasting_coercion_new_vptr_slot(key: (Ty<'tcx>, Ty<'tcx>)) -> Option<usize> {
+ desc { |tcx| "finding the slot within vtable for trait object `{}` vtable ptr during trait upcasting coercion from `{}` vtable",
key.1, key.0 }
}
@@ -1205,13 +1238,13 @@ rustc_queries! {
/// Return all `impl` blocks in the current crate.
query all_local_trait_impls(_: ()) -> &'tcx rustc_data_structures::fx::FxIndexMap<DefId, Vec<LocalDefId>> {
- desc { "local trait impls" }
+ desc { "finding local trait impls" }
}
/// Given a trait `trait_id`, return all known `impl` blocks.
query trait_impls_of(trait_id: DefId) -> ty::trait_def::TraitImpls {
arena_cache
- desc { |tcx| "trait impls of `{}`", tcx.def_path_str(trait_id) }
+ desc { |tcx| "finding trait impls of `{}`", tcx.def_path_str(trait_id) }
}
query specialization_graph_of(trait_id: DefId) -> specialization_graph::Graph {
@@ -1220,7 +1253,7 @@ rustc_queries! {
cache_on_disk_if { true }
}
query object_safety_violations(trait_id: DefId) -> &'tcx [traits::ObjectSafetyViolation] {
- desc { |tcx| "determine object safety of trait `{}`", tcx.def_path_str(trait_id) }
+ desc { |tcx| "determining object safety of trait `{}`", tcx.def_path_str(trait_id) }
}
/// Gets the ParameterEnvironment for a given item; this environment
@@ -1278,7 +1311,7 @@ rustc_queries! {
/// correctly.
query has_structural_eq_impls(ty: Ty<'tcx>) -> bool {
desc {
- "computing whether `{:?}` implements `PartialStructuralEq` and `StructuralEq`",
+ "computing whether `{}` implements `PartialStructuralEq` and `StructuralEq`",
ty
}
}
@@ -1337,13 +1370,13 @@ rustc_queries! {
query dylib_dependency_formats(_: CrateNum)
-> &'tcx [(CrateNum, LinkagePreference)] {
- desc { "dylib dependency formats of crate" }
+ desc { "getting dylib dependency formats of crate" }
separate_provide_extern
}
query dependency_formats(_: ()) -> Lrc<crate::middle::dependency_format::Dependencies> {
arena_cache
- desc { "get the linkage format of all dependencies" }
+ desc { "getting the linkage format of all dependencies" }
}
query is_compiler_builtins(_: CrateNum) -> bool {
@@ -1365,31 +1398,31 @@ rustc_queries! {
}
query is_profiler_runtime(_: CrateNum) -> bool {
fatal_cycle
- desc { "query a crate is `#![profiler_runtime]`" }
+ desc { "checking if a crate is `#![profiler_runtime]`" }
separate_provide_extern
}
query has_ffi_unwind_calls(key: LocalDefId) -> bool {
- desc { |tcx| "check if `{}` contains FFI-unwind calls", tcx.def_path_str(key.to_def_id()) }
+ desc { |tcx| "checking if `{}` contains FFI-unwind calls", tcx.def_path_str(key.to_def_id()) }
cache_on_disk_if { true }
}
query required_panic_strategy(_: CrateNum) -> Option<PanicStrategy> {
fatal_cycle
- desc { "query a crate's required panic strategy" }
+ desc { "getting a crate's required panic strategy" }
separate_provide_extern
}
query panic_in_drop_strategy(_: CrateNum) -> PanicStrategy {
fatal_cycle
- desc { "query a crate's configured panic-in-drop strategy" }
+ desc { "getting a crate's configured panic-in-drop strategy" }
separate_provide_extern
}
query is_no_builtins(_: CrateNum) -> bool {
fatal_cycle
- desc { "test whether a crate has `#![no_builtins]`" }
+ desc { "getting whether a crate has `#![no_builtins]`" }
separate_provide_extern
}
query symbol_mangling_version(_: CrateNum) -> SymbolManglingVersion {
fatal_cycle
- desc { "query a crate's symbol mangling version" }
+ desc { "getting a crate's symbol mangling version" }
separate_provide_extern
}
@@ -1402,9 +1435,9 @@ rustc_queries! {
query specializes(_: (DefId, DefId)) -> bool {
desc { "computing whether impls specialize one another" }
}
- query in_scope_traits_map(_: LocalDefId)
+ query in_scope_traits_map(_: hir::OwnerId)
-> Option<&'tcx FxHashMap<ItemLocalId, Box<[TraitCandidate]>>> {
- desc { "traits in scope at a block" }
+ desc { "getting traits in scope at a block" }
}
query module_reexports(def_id: LocalDefId) -> Option<&'tcx [ModChild]> {
@@ -1417,7 +1450,7 @@ rustc_queries! {
separate_provide_extern
}
- query check_well_formed(key: LocalDefId) -> () {
+ query check_well_formed(key: hir::OwnerId) -> () {
desc { |tcx| "checking that `{}` is well-formed", tcx.def_path_str(key.to_def_id()) }
}
@@ -1554,18 +1587,8 @@ rustc_queries! {
separate_provide_extern
}
- query is_dllimport_foreign_item(def_id: DefId) -> bool {
- desc { |tcx| "is_dllimport_foreign_item({})", tcx.def_path_str(def_id) }
- }
- query is_statically_included_foreign_item(def_id: DefId) -> bool {
- desc { |tcx| "is_statically_included_foreign_item({})", tcx.def_path_str(def_id) }
- }
- query native_library_kind(def_id: DefId)
- -> Option<NativeLibKind> {
- desc { |tcx| "native_library_kind({})", tcx.def_path_str(def_id) }
- }
query native_library(def_id: DefId) -> Option<&'tcx NativeLib> {
- desc { |tcx| "native_library({})", tcx.def_path_str(def_id) }
+ desc { |tcx| "getting the native library for `{}`", tcx.def_path_str(def_id) }
}
/// Does lifetime resolution, but does not descend into trait items. This
@@ -1584,7 +1607,7 @@ rustc_queries! {
arena_cache
desc { "resolving lifetimes" }
}
- query named_region_map(_: LocalDefId) ->
+ query named_region_map(_: hir::OwnerId) ->
Option<&'tcx FxHashMap<ItemLocalId, Region>> {
desc { "looking up a named region" }
}
@@ -1600,7 +1623,7 @@ rustc_queries! {
desc { "looking up lifetime defaults for generic parameter `{}`", tcx.def_path_str(key) }
separate_provide_extern
}
- query late_bound_vars_map(_: LocalDefId)
+ query late_bound_vars_map(_: hir::OwnerId)
-> Option<&'tcx FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>> {
desc { "looking up late bound vars" }
}
@@ -1620,14 +1643,13 @@ rustc_queries! {
separate_provide_extern
}
- /// Computes the set of modules from which this type is visibly uninhabited.
- /// To check whether a type is uninhabited at all (not just from a given module), you could
- /// check whether the forest is empty.
- query type_uninhabited_from(
- key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>
- ) -> ty::inhabitedness::DefIdForest<'tcx> {
- desc { "computing the inhabitedness of `{:?}`", key }
- remap_env_constness
+ query inhabited_predicate_adt(key: DefId) -> ty::inhabitedness::InhabitedPredicate<'tcx> {
+ desc { "computing the uninhabited predicate of `{:?}`", key }
+ }
+
+ /// Do not call this query directly: invoke `Ty::inhabited_predicate` instead.
+ query inhabited_predicate_type(key: Ty<'tcx>) -> ty::inhabitedness::InhabitedPredicate<'tcx> {
+ desc { "computing the uninhabited predicate of `{}`", key }
}
query dep_kind(_: CrateNum) -> CrateDepKind {
@@ -1665,7 +1687,7 @@ rustc_queries! {
}
/// Whether the function is an intrinsic
query is_intrinsic(def_id: DefId) -> bool {
- desc { |tcx| "is_intrinsic({})", tcx.def_path_str(def_id) }
+ desc { |tcx| "checking whether `{}` is an intrinsic", tcx.def_path_str(def_id) }
separate_provide_extern
}
/// Returns the lang items defined in another crate by loading it from metadata.
@@ -1732,12 +1754,12 @@ rustc_queries! {
/// is marked as a private dependency
query is_private_dep(c: CrateNum) -> bool {
eval_always
- desc { "check whether crate {} is a private dependency", c }
+ desc { "checking whether crate `{}` is a private dependency", c }
separate_provide_extern
}
query allocator_kind(_: ()) -> Option<AllocatorKind> {
eval_always
- desc { "allocator kind for the current crate" }
+ desc { "getting the allocator kind for the current crate" }
}
query upvars_mentioned(def_id: DefId) -> Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>> {
@@ -1750,7 +1772,7 @@ rustc_queries! {
desc { "looking up all possibly unused extern crates" }
}
query names_imported_by_glob_use(def_id: LocalDefId) -> &'tcx FxHashSet<Symbol> {
- desc { |tcx| "names_imported_by_glob_use for `{}`", tcx.def_path_str(def_id.to_def_id()) }
+ desc { |tcx| "finding names imported by glob use for `{}`", tcx.def_path_str(def_id.to_def_id()) }
}
query stability_index(_: ()) -> stability::Index {
@@ -1776,7 +1798,7 @@ rustc_queries! {
/// correspond to a publicly visible symbol in `cnum` machine code.
/// - The `exported_symbols` sets of different crates do not intersect.
query exported_symbols(cnum: CrateNum) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] {
- desc { "exported_symbols" }
+ desc { "collecting exported symbols for crate `{}`", cnum}
cache_on_disk_if { *cnum == LOCAL_CRATE }
separate_provide_extern
}
@@ -1785,6 +1807,7 @@ rustc_queries! {
eval_always
desc { "collect_and_partition_mono_items" }
}
+
query is_codegened_item(def_id: DefId) -> bool {
desc { |tcx| "determining whether `{}` needs codegen", tcx.def_path_str(def_id) }
}
@@ -1792,12 +1815,13 @@ rustc_queries! {
/// All items participating in code generation together with items inlined into them.
query codegened_and_inlined_items(_: ()) -> &'tcx DefIdSet {
eval_always
- desc { "codegened_and_inlined_items" }
+ desc { "collecting codegened and inlined items" }
}
- query codegen_unit(_: Symbol) -> &'tcx CodegenUnit<'tcx> {
- desc { "codegen_unit" }
+ query codegen_unit(sym: Symbol) -> &'tcx CodegenUnit<'tcx> {
+ desc { "getting codegen unit `{sym}`" }
}
+
query unused_generic_params(key: ty::InstanceDef<'tcx>) -> FiniteBitSet<u32> {
cache_on_disk_if { key.def_id().is_local() }
desc {
@@ -1806,6 +1830,7 @@ rustc_queries! {
}
separate_provide_extern
}
+
query backend_optimization_level(_: ()) -> OptLevel {
desc { "optimization level used by backend" }
}
@@ -1816,7 +1841,7 @@ rustc_queries! {
/// has been destroyed.
query output_filenames(_: ()) -> &'tcx Arc<OutputFilenames> {
eval_always
- desc { "output_filenames" }
+ desc { "getting output filenames" }
}
/// Do not call this query directly: invoke `normalize` instead.
@@ -1826,7 +1851,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, NormalizationResult<'tcx>>>,
NoSolution,
> {
- desc { "normalizing `{:?}`", goal }
+ desc { "normalizing `{}`", goal.value.value }
remap_env_constness
}
@@ -1838,21 +1863,13 @@ rustc_queries! {
remap_env_constness
}
- /// Do not call this query directly: invoke `try_normalize_erasing_regions` instead.
- query try_normalize_mir_const_after_erasing_regions(
- goal: ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>
- ) -> Result<mir::ConstantKind<'tcx>, NoSolution> {
- desc { "normalizing `{}`", goal.value }
- remap_env_constness
- }
-
query implied_outlives_bounds(
goal: CanonicalTyGoal<'tcx>
) -> Result<
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>,
NoSolution,
> {
- desc { "computing implied outlives bounds for `{:?}`", goal }
+ desc { "computing implied outlives bounds for `{}`", goal.value.value }
remap_env_constness
}
@@ -1864,7 +1881,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, DropckOutlivesResult<'tcx>>>,
NoSolution,
> {
- desc { "computing dropck types for `{:?}`", goal }
+ desc { "computing dropck types for `{}`", goal.value.value }
remap_env_constness
}
@@ -1892,7 +1909,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
NoSolution,
> {
- desc { "evaluating `type_op_ascribe_user_type` `{:?}`", goal }
+ desc { "evaluating `type_op_ascribe_user_type` `{:?}`", goal.value.value }
remap_env_constness
}
@@ -1903,7 +1920,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
NoSolution,
> {
- desc { "evaluating `type_op_eq` `{:?}`", goal }
+ desc { "evaluating `type_op_eq` `{:?}`", goal.value.value }
remap_env_constness
}
@@ -1914,7 +1931,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
NoSolution,
> {
- desc { "evaluating `type_op_subtype` `{:?}`", goal }
+ desc { "evaluating `type_op_subtype` `{:?}`", goal.value.value }
remap_env_constness
}
@@ -1925,7 +1942,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>,
NoSolution,
> {
- desc { "evaluating `type_op_prove_predicate` `{:?}`", goal }
+ desc { "evaluating `type_op_prove_predicate` `{:?}`", goal.value.value }
}
/// Do not call this query directly: part of the `Normalize` type-op
@@ -1935,7 +1952,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Ty<'tcx>>>,
NoSolution,
> {
- desc { "normalizing `{:?}`", goal }
+ desc { "normalizing `{}`", goal.value.value.value }
remap_env_constness
}
@@ -1946,7 +1963,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::Predicate<'tcx>>>,
NoSolution,
> {
- desc { "normalizing `{:?}`", goal }
+ desc { "normalizing `{:?}`", goal.value.value.value }
remap_env_constness
}
@@ -1957,7 +1974,7 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::PolyFnSig<'tcx>>>,
NoSolution,
> {
- desc { "normalizing `{:?}`", goal }
+ desc { "normalizing `{:?}`", goal.value.value.value }
remap_env_constness
}
@@ -1968,20 +1985,20 @@ rustc_queries! {
&'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::FnSig<'tcx>>>,
NoSolution,
> {
- desc { "normalizing `{:?}`", goal }
+ desc { "normalizing `{:?}`", goal.value.value.value }
remap_env_constness
}
query subst_and_check_impossible_predicates(key: (DefId, SubstsRef<'tcx>)) -> bool {
desc { |tcx|
- "impossible substituted predicates:`{}`",
+ "checking impossible substituted predicates: `{}`",
tcx.def_path_str(key.0)
}
}
query is_impossible_method(key: (DefId, DefId)) -> bool {
desc { |tcx|
- "checking if {} is impossible to call within {}",
+ "checking if `{}` is impossible to call within `{}`",
tcx.def_path_str(key.1),
tcx.def_path_str(key.0),
}
@@ -1990,7 +2007,7 @@ rustc_queries! {
query method_autoderef_steps(
goal: CanonicalTyGoal<'tcx>
) -> MethodAutoderefStepsResult<'tcx> {
- desc { "computing autoderef types for `{:?}`", goal }
+ desc { "computing autoderef types for `{}`", goal.value.value }
remap_env_constness
}
@@ -2038,7 +2055,7 @@ rustc_queries! {
}
query normalize_opaque_types(key: &'tcx ty::List<ty::Predicate<'tcx>>) -> &'tcx ty::List<ty::Predicate<'tcx>> {
- desc { "normalizing opaque types in {:?}", key }
+ desc { "normalizing opaque types in `{:?}`", key }
}
/// Checks whether a type is definitely uninhabited. This is
@@ -2048,7 +2065,7 @@ rustc_queries! {
/// will be `Abi::Uninhabited`. (Note that uninhabited types may have nonzero
/// size, to account for partial initialisation. See #49298 for details.)
query conservative_is_privately_uninhabited(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool {
- desc { "conservatively checking if {:?} is privately uninhabited", key }
+ desc { "conservatively checking if `{}` is privately uninhabited", key.value }
remap_env_constness
}
@@ -2068,7 +2085,7 @@ rustc_queries! {
arena_cache
eval_always
no_hash
- desc { "performing HIR wf-checking for predicate {:?} at item {:?}", key.0, key.1 }
+ desc { "performing HIR wf-checking for predicate `{:?}` at item `{:?}`", key.0, key.1 }
}
@@ -2087,10 +2104,21 @@ rustc_queries! {
}
query permits_uninit_init(key: TyAndLayout<'tcx>) -> bool {
- desc { "checking to see if {:?} permits being left uninit", key.ty }
+ desc { "checking to see if `{}` permits being left uninit", key.ty }
}
query permits_zero_init(key: TyAndLayout<'tcx>) -> bool {
- desc { "checking to see if {:?} permits being left zeroed", key.ty }
+ desc { "checking to see if `{}` permits being left zeroed", key.ty }
+ }
+
+ query compare_assoc_const_impl_item_with_trait_item(
+ key: (LocalDefId, DefId)
+ ) -> Result<(), ErrorGuaranteed> {
+ desc { |tcx| "checking assoc const `{}` has the same type as trait item", tcx.def_path_str(key.0.to_def_id()) }
+ }
+
+ query deduced_param_attrs(def_id: DefId) -> &'tcx [ty::DeducedParamAttrs] {
+ desc { |tcx| "deducing parameter attributes for {}", tcx.def_path_str(def_id) }
+ separate_provide_extern
}
}
diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs
index 86b415050..ea7a507d7 100644
--- a/compiler/rustc_middle/src/thir.rs
+++ b/compiler/rustc_middle/src/thir.rs
@@ -203,7 +203,7 @@ pub enum StmtKind<'tcx> {
/// `let pat: ty = <INIT>`
initializer: Option<ExprId>,
- /// `let pat: ty = <INIT> else { <ELSE> }
+ /// `let pat: ty = <INIT> else { <ELSE> }`
else_block: Option<BlockId>,
/// The lint level for this `let` statement.
@@ -848,16 +848,13 @@ impl<'tcx> fmt::Display for Pat<'tcx> {
#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))]
mod size_asserts {
use super::*;
- // These are in alphabetical order, which is easy to maintain.
+ // tidy-alphabetical-start
static_assert_size!(Block, 56);
static_assert_size!(Expr<'_>, 64);
static_assert_size!(ExprKind<'_>, 40);
- #[cfg(not(bootstrap))]
static_assert_size!(Pat<'_>, 72);
- #[cfg(not(bootstrap))]
static_assert_size!(PatKind<'_>, 56);
- #[cfg(not(bootstrap))]
static_assert_size!(Stmt<'_>, 48);
- #[cfg(not(bootstrap))]
static_assert_size!(StmtKind<'_>, 40);
+ // tidy-alphabetical-end
}
diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs
index 68a7af0b8..e73d44bbb 100644
--- a/compiler/rustc_middle/src/traits/mod.rs
+++ b/compiler/rustc_middle/src/traits/mod.rs
@@ -598,11 +598,6 @@ pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>;
/// // type parameters, ImplSource will carry resolutions for those as well:
/// concrete.clone(); // ImplSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])])
///
-/// // Case A: ImplSource points at a specific impl. Only possible when
-/// // type is concretely known. If the impl itself has bounded
-/// // type parameters, ImplSource will carry resolutions for those as well:
-/// concrete.clone(); // ImplSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])])
-///
/// // Case B: ImplSource must be provided by caller. This applies when
/// // type is a type parameter.
/// param.clone(); // ImplSource::Param
@@ -664,10 +659,6 @@ pub enum ImplSource<'tcx, N> {
/// ImplSource for a `const Drop` implementation.
ConstDestruct(ImplSourceConstDestructData<N>),
-
- /// ImplSource for a `std::marker::Tuple` implementation.
- /// This has no nested predicates ever, so no data.
- Tuple,
}
impl<'tcx, N> ImplSource<'tcx, N> {
@@ -682,8 +673,7 @@ impl<'tcx, N> ImplSource<'tcx, N> {
ImplSource::Object(d) => d.nested,
ImplSource::FnPointer(d) => d.nested,
ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
- | ImplSource::Pointee(ImplSourcePointeeData)
- | ImplSource::Tuple => Vec::new(),
+ | ImplSource::Pointee(ImplSourcePointeeData) => vec![],
ImplSource::TraitAlias(d) => d.nested,
ImplSource::TraitUpcasting(d) => d.nested,
ImplSource::ConstDestruct(i) => i.nested,
@@ -701,8 +691,7 @@ impl<'tcx, N> ImplSource<'tcx, N> {
ImplSource::Object(d) => &d.nested,
ImplSource::FnPointer(d) => &d.nested,
ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData)
- | ImplSource::Pointee(ImplSourcePointeeData)
- | ImplSource::Tuple => &[],
+ | ImplSource::Pointee(ImplSourcePointeeData) => &[],
ImplSource::TraitAlias(d) => &d.nested,
ImplSource::TraitUpcasting(d) => &d.nested,
ImplSource::ConstDestruct(i) => &i.nested,
@@ -769,7 +758,6 @@ impl<'tcx, N> ImplSource<'tcx, N> {
nested: i.nested.into_iter().map(f).collect(),
})
}
- ImplSource::Tuple => ImplSource::Tuple,
}
}
}
diff --git a/compiler/rustc_middle/src/traits/query.rs b/compiler/rustc_middle/src/traits/query.rs
index 0e6cacb9f..fb152b63f 100644
--- a/compiler/rustc_middle/src/traits/query.rs
+++ b/compiler/rustc_middle/src/traits/query.rs
@@ -8,8 +8,9 @@
use crate::error::DropCheckOverflow;
use crate::infer::canonical::{Canonical, QueryResponse};
use crate::ty::error::TypeError;
-use crate::ty::subst::GenericArg;
+use crate::ty::subst::{GenericArg, SubstsRef};
use crate::ty::{self, Ty, TyCtxt};
+use rustc_hir::def_id::DefId;
use rustc_span::source_map::Span;
use std::iter::FromIterator;
@@ -219,4 +220,5 @@ pub enum OutlivesBound<'tcx> {
RegionSubRegion(ty::Region<'tcx>, ty::Region<'tcx>),
RegionSubParam(ty::Region<'tcx>, ty::ParamTy),
RegionSubProjection(ty::Region<'tcx>, ty::ProjectionTy<'tcx>),
+ RegionSubOpaque(ty::Region<'tcx>, DefId, SubstsRef<'tcx>),
}
diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs
index 53af3e905..85ead3171 100644
--- a/compiler/rustc_middle/src/traits/select.rs
+++ b/compiler/rustc_middle/src/traits/select.rs
@@ -115,12 +115,13 @@ pub enum SelectionCandidate<'tcx> {
ParamCandidate(ty::PolyTraitPredicate<'tcx>),
ImplCandidate(DefId),
- AutoImplCandidate(DefId),
+ AutoImplCandidate,
/// This is a trait matching with a projected type as `Self`, and we found
/// an applicable bound in the trait definition. The `usize` is an index
- /// into the list returned by `tcx.item_bounds`.
- ProjectionCandidate(usize),
+ /// into the list returned by `tcx.item_bounds`. The constness is the
+ /// constness of the bound in the trait.
+ ProjectionCandidate(usize, ty::BoundConstness),
/// Implementation of a `Fn`-family trait by one of the anonymous types
/// generated for an `||` expression.
@@ -142,7 +143,7 @@ pub enum SelectionCandidate<'tcx> {
/// Builtin implementation of `Pointee`.
PointeeCandidate,
- TraitAliasCandidate(DefId),
+ TraitAliasCandidate,
/// Matching `dyn Trait` with a supertrait of `Trait`. The index is the
/// position in the iterator returned by
@@ -160,9 +161,6 @@ pub enum SelectionCandidate<'tcx> {
/// Implementation of `const Destruct`, optionally from a custom `impl const Drop`.
ConstDestructCandidate(Option<DefId>),
-
- /// Witnesses the fact that a type is a tuple.
- TupleCandidate,
}
/// The result of trait evaluation. The order is important
diff --git a/compiler/rustc_middle/src/traits/structural_impls.rs b/compiler/rustc_middle/src/traits/structural_impls.rs
index c526344e1..7fbd57ac7 100644
--- a/compiler/rustc_middle/src/traits/structural_impls.rs
+++ b/compiler/rustc_middle/src/traits/structural_impls.rs
@@ -34,8 +34,6 @@ impl<'tcx, N: fmt::Debug> fmt::Debug for traits::ImplSource<'tcx, N> {
super::ImplSource::TraitUpcasting(ref d) => write!(f, "{:?}", d),
super::ImplSource::ConstDestruct(ref d) => write!(f, "{:?}", d),
-
- super::ImplSource::Tuple => write!(f, "ImplSource::Tuple"),
}
}
}
diff --git a/compiler/rustc_middle/src/ty/abstract_const.rs b/compiler/rustc_middle/src/ty/abstract_const.rs
index 8f79b4705..1aa4df778 100644
--- a/compiler/rustc_middle/src/ty/abstract_const.rs
+++ b/compiler/rustc_middle/src/ty/abstract_const.rs
@@ -1,7 +1,7 @@
//! A subset of a mir body used for const evaluatability checking.
use crate::mir;
use crate::ty::visit::TypeVisitable;
-use crate::ty::{self, subst::Subst, DelaySpanBugEmitted, EarlyBinder, SubstsRef, Ty, TyCtxt};
+use crate::ty::{self, DelaySpanBugEmitted, EarlyBinder, SubstsRef, Ty, TyCtxt};
use rustc_errors::ErrorGuaranteed;
use rustc_hir::def_id::DefId;
use std::cmp;
@@ -30,7 +30,7 @@ pub struct AbstractConst<'tcx> {
impl<'tcx> AbstractConst<'tcx> {
pub fn new(
tcx: TyCtxt<'tcx>,
- uv: ty::Unevaluated<'tcx, ()>,
+ uv: ty::UnevaluatedConst<'tcx>,
) -> Result<Option<AbstractConst<'tcx>>, ErrorGuaranteed> {
let inner = tcx.thir_abstract_const_opt_const_arg(uv.def)?;
debug!("AbstractConst::new({:?}) = {:?}", uv, inner);
@@ -71,16 +71,16 @@ impl<'tcx> AbstractConst<'tcx> {
walk_abstract_const::<!, _>(tcx, self, |node| {
match node.root(tcx) {
Node::Leaf(leaf) => {
- if leaf.has_infer_types_or_consts() {
+ if leaf.has_non_region_infer() {
failure_kind = FailureKind::MentionsInfer;
- } else if leaf.has_param_types_or_consts() {
+ } else if leaf.has_non_region_param() {
failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam);
}
}
Node::Cast(_, _, ty) => {
- if ty.has_infer_types_or_consts() {
+ if ty.has_non_region_infer() {
failure_kind = FailureKind::MentionsInfer;
- } else if ty.has_param_types_or_consts() {
+ } else if ty.has_non_region_param() {
failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam);
}
}
diff --git a/compiler/rustc_middle/src/ty/adjustment.rs b/compiler/rustc_middle/src/ty/adjustment.rs
index b809f1767..4682ac96b 100644
--- a/compiler/rustc_middle/src/ty/adjustment.rs
+++ b/compiler/rustc_middle/src/ty/adjustment.rs
@@ -101,6 +101,9 @@ pub enum Adjust<'tcx> {
Borrow(AutoBorrow<'tcx>),
Pointer(PointerCast),
+
+ /// Cast into a dyn* object.
+ DynStar,
}
/// An overloaded autoderef step, representing a `Deref(Mut)::deref(_mut)`
diff --git a/compiler/rustc_middle/src/ty/adt.rs b/compiler/rustc_middle/src/ty/adt.rs
index 2e596b275..b0a2412ab 100644
--- a/compiler/rustc_middle/src/ty/adt.rs
+++ b/compiler/rustc_middle/src/ty/adt.rs
@@ -26,9 +26,6 @@ use super::{
Destructor, FieldDef, GenericPredicates, ReprOptions, Ty, TyCtxt, VariantDef, VariantDiscr,
};
-#[derive(Copy, Clone, HashStable, Debug)]
-pub struct AdtSizedConstraint<'tcx>(pub &'tcx [Ty<'tcx>]);
-
bitflags! {
#[derive(HashStable, TyEncodable, TyDecodable)]
pub struct AdtFlags: u32 {
@@ -332,13 +329,13 @@ impl<'tcx> AdtDef<'tcx> {
self.flags().contains(AdtFlags::IS_PHANTOM_DATA)
}
- /// Returns `true` if this is Box<T>.
+ /// Returns `true` if this is `Box<T>`.
#[inline]
pub fn is_box(self) -> bool {
self.flags().contains(AdtFlags::IS_BOX)
}
- /// Returns `true` if this is UnsafeCell<T>.
+ /// Returns `true` if this is `UnsafeCell<T>`.
#[inline]
pub fn is_unsafe_cell(self) -> bool {
self.flags().contains(AdtFlags::IS_UNSAFE_CELL)
@@ -438,7 +435,8 @@ impl<'tcx> AdtDef<'tcx> {
| Res::Def(DefKind::Union, _)
| Res::Def(DefKind::TyAlias, _)
| Res::Def(DefKind::AssocTy, _)
- | Res::SelfTy { .. }
+ | Res::SelfTyParam { .. }
+ | Res::SelfTyAlias { .. }
| Res::SelfCtor(..) => self.non_enum_variant(),
_ => bug!("unexpected res {:?} in variant_of_res", res),
}
@@ -457,11 +455,9 @@ impl<'tcx> AdtDef<'tcx> {
Some(Discr { val: b, ty })
} else {
info!("invalid enum discriminant: {:#?}", val);
- crate::mir::interpret::struct_error(
- tcx.at(tcx.def_span(expr_did)),
- "constant evaluation of enum discriminant resulted in non-integer",
- )
- .emit();
+ tcx.sess.emit_err(crate::error::ConstEvalNonIntError {
+ span: tcx.def_span(expr_did),
+ });
None
}
}
@@ -564,6 +560,13 @@ impl<'tcx> AdtDef<'tcx> {
/// Due to normalization being eager, this applies even if
/// the associated type is behind a pointer (e.g., issue #31299).
pub fn sized_constraint(self, tcx: TyCtxt<'tcx>) -> ty::EarlyBinder<&'tcx [Ty<'tcx>]> {
- ty::EarlyBinder(tcx.adt_sized_constraint(self.did()).0)
+ ty::EarlyBinder(tcx.adt_sized_constraint(self.did()))
}
}
+
+#[derive(Clone, Copy, Debug)]
+#[derive(HashStable)]
+pub enum Representability {
+ Representable,
+ Infinite,
+}
diff --git a/compiler/rustc_middle/src/ty/cast.rs b/compiler/rustc_middle/src/ty/cast.rs
index 981e2d3b6..e65585955 100644
--- a/compiler/rustc_middle/src/ty/cast.rs
+++ b/compiler/rustc_middle/src/ty/cast.rs
@@ -2,6 +2,7 @@
// typeck and codegen.
use crate::ty::{self, Ty};
+use rustc_middle::mir;
use rustc_macros::HashStable;
@@ -38,7 +39,7 @@ pub enum CastTy<'tcx> {
}
/// Cast Kind. See [RFC 401](https://rust-lang.github.io/rfcs/0401-coercions.html)
-/// (or librustc_typeck/check/cast.rs).
+/// (or rustc_hir_analysis/check/cast.rs).
#[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)]
pub enum CastKind {
CoercionCast,
@@ -75,3 +76,28 @@ impl<'tcx> CastTy<'tcx> {
}
}
}
+
+/// Returns `mir::CastKind` from the given parameters.
+pub fn mir_cast_kind<'tcx>(from_ty: Ty<'tcx>, cast_ty: Ty<'tcx>) -> mir::CastKind {
+ let from = CastTy::from_ty(from_ty);
+ let cast = CastTy::from_ty(cast_ty);
+ let cast_kind = match (from, cast) {
+ (Some(CastTy::Ptr(_) | CastTy::FnPtr), Some(CastTy::Int(_))) => {
+ mir::CastKind::PointerExposeAddress
+ }
+ (Some(CastTy::Int(_)), Some(CastTy::Ptr(_))) => mir::CastKind::PointerFromExposedAddress,
+ (_, Some(CastTy::DynStar)) => mir::CastKind::DynStar,
+ (Some(CastTy::Int(_)), Some(CastTy::Int(_))) => mir::CastKind::IntToInt,
+ (Some(CastTy::FnPtr), Some(CastTy::Ptr(_))) => mir::CastKind::FnPtrToPtr,
+
+ (Some(CastTy::Float), Some(CastTy::Int(_))) => mir::CastKind::FloatToInt,
+ (Some(CastTy::Int(_)), Some(CastTy::Float)) => mir::CastKind::IntToFloat,
+ (Some(CastTy::Float), Some(CastTy::Float)) => mir::CastKind::FloatToFloat,
+ (Some(CastTy::Ptr(_)), Some(CastTy::Ptr(_))) => mir::CastKind::PtrToPtr,
+
+ (_, _) => {
+ bug!("Attempting to cast non-castable types {:?} and {:?}", from_ty, cast_ty)
+ }
+ };
+ cast_kind
+}
diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs
index 51137c526..14ec88b7e 100644
--- a/compiler/rustc_middle/src/ty/codec.rs
+++ b/compiler/rustc_middle/src/ty/codec.rs
@@ -455,6 +455,7 @@ impl_arena_copy_decoder! {<'tcx>
rustc_span::def_id::DefId,
rustc_span::def_id::LocalDefId,
(rustc_middle::middle::exported_symbols::ExportedSymbol<'tcx>, rustc_middle::middle::exported_symbols::SymbolExportInfo),
+ ty::DeducedParamAttrs,
}
#[macro_export]
diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs
index 339ff4d35..f998e6083 100644
--- a/compiler/rustc_middle/src/ty/consts.rs
+++ b/compiler/rustc_middle/src/ty/consts.rs
@@ -1,9 +1,6 @@
use crate::mir::interpret::LitToConstInput;
use crate::mir::ConstantKind;
-use crate::ty::{
- self, InlineConstSubsts, InlineConstSubstsParts, InternalSubsts, ParamEnv, ParamEnvAnd, Ty,
- TyCtxt, TypeVisitable,
-};
+use crate::ty::{self, InternalSubsts, ParamEnv, ParamEnvAnd, Ty, TyCtxt};
use rustc_data_structures::intern::Interned;
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
@@ -81,10 +78,9 @@ impl<'tcx> Const<'tcx> {
match Self::try_eval_lit_or_param(tcx, ty, expr) {
Some(v) => v,
None => tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
+ kind: ty::ConstKind::Unevaluated(ty::UnevaluatedConst {
def: def.to_global(),
substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()),
- promoted: (),
}),
ty,
}),
@@ -151,46 +147,6 @@ impl<'tcx> Const<'tcx> {
}
}
- pub fn from_inline_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self {
- debug!("Const::from_inline_const(def_id={:?})", def_id);
-
- let hir_id = tcx.hir().local_def_id_to_hir_id(def_id);
-
- let body_id = match tcx.hir().get(hir_id) {
- hir::Node::AnonConst(ac) => ac.body,
- _ => span_bug!(
- tcx.def_span(def_id.to_def_id()),
- "from_inline_const can only process anonymous constants"
- ),
- };
-
- let expr = &tcx.hir().body(body_id).value;
-
- let ty = tcx.typeck(def_id).node_type(hir_id);
-
- let ret = match Self::try_eval_lit_or_param(tcx, ty, expr) {
- Some(v) => v,
- None => {
- let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id());
- let parent_substs =
- tcx.erase_regions(InternalSubsts::identity_for_item(tcx, typeck_root_def_id));
- let substs =
- InlineConstSubsts::new(tcx, InlineConstSubstsParts { parent_substs, ty })
- .substs;
- tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
- def: ty::WithOptConstParam::unknown(def_id).to_global(),
- substs,
- promoted: (),
- }),
- ty,
- })
- }
- };
- debug_assert!(!ret.has_free_regions());
- ret
- }
-
/// Interns the given value as a constant.
#[inline]
pub fn from_value(tcx: TyCtxt<'tcx>, val: ty::ValTree<'tcx>, ty: Ty<'tcx>) -> Self {
@@ -307,6 +263,10 @@ impl<'tcx> Const<'tcx> {
self.try_eval_usize(tcx, param_env)
.unwrap_or_else(|| bug!("expected usize, got {:#?}", self))
}
+
+ pub fn is_ct_infer(self) -> bool {
+ matches!(self.kind(), ty::ConstKind::Infer(_))
+ }
}
pub fn const_param_default<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Const<'tcx> {
diff --git a/compiler/rustc_middle/src/ty/consts/kind.rs b/compiler/rustc_middle/src/ty/consts/kind.rs
index 455015280..4ab761e07 100644
--- a/compiler/rustc_middle/src/ty/consts/kind.rs
+++ b/compiler/rustc_middle/src/ty/consts/kind.rs
@@ -1,10 +1,11 @@
use std::convert::TryInto;
+use crate::mir;
use crate::mir::interpret::{AllocId, ConstValue, Scalar};
-use crate::mir::Promoted;
use crate::ty::subst::{InternalSubsts, SubstsRef};
use crate::ty::ParamEnv;
use crate::ty::{self, TyCtxt, TypeVisitable};
+use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_errors::ErrorGuaranteed;
use rustc_hir::def_id::DefId;
use rustc_macros::HashStable;
@@ -12,40 +13,34 @@ use rustc_target::abi::Size;
use super::ScalarInt;
-/// An unevaluated, potentially generic, constant.
+/// An unevaluated (potentially generic) constant used in the type-system.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)]
-#[derive(Hash, HashStable)]
-pub struct Unevaluated<'tcx, P = Option<Promoted>> {
+#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)]
+pub struct UnevaluatedConst<'tcx> {
pub def: ty::WithOptConstParam<DefId>,
pub substs: SubstsRef<'tcx>,
- pub promoted: P,
}
-impl rustc_errors::IntoDiagnosticArg for Unevaluated<'_> {
+impl rustc_errors::IntoDiagnosticArg for UnevaluatedConst<'_> {
fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> {
format!("{:?}", self).into_diagnostic_arg()
}
}
-impl<'tcx> Unevaluated<'tcx> {
- #[inline]
- pub fn shrink(self) -> Unevaluated<'tcx, ()> {
- debug_assert_eq!(self.promoted, None);
- Unevaluated { def: self.def, substs: self.substs, promoted: () }
- }
-}
-
-impl<'tcx> Unevaluated<'tcx, ()> {
+impl<'tcx> UnevaluatedConst<'tcx> {
#[inline]
- pub fn expand(self) -> Unevaluated<'tcx> {
- Unevaluated { def: self.def, substs: self.substs, promoted: None }
+ pub fn expand(self) -> mir::UnevaluatedConst<'tcx> {
+ mir::UnevaluatedConst { def: self.def, substs: self.substs, promoted: None }
}
}
-impl<'tcx, P: Default> Unevaluated<'tcx, P> {
+impl<'tcx> UnevaluatedConst<'tcx> {
#[inline]
- pub fn new(def: ty::WithOptConstParam<DefId>, substs: SubstsRef<'tcx>) -> Unevaluated<'tcx, P> {
- Unevaluated { def, substs, promoted: Default::default() }
+ pub fn new(
+ def: ty::WithOptConstParam<DefId>,
+ substs: SubstsRef<'tcx>,
+ ) -> UnevaluatedConst<'tcx> {
+ UnevaluatedConst { def, substs }
}
}
@@ -67,7 +62,7 @@ pub enum ConstKind<'tcx> {
/// Used in the HIR by using `Unevaluated` everywhere and later normalizing to one of the other
/// variants when the code is monomorphic enough for that.
- Unevaluated(Unevaluated<'tcx, ()>),
+ Unevaluated(UnevaluatedConst<'tcx>),
/// Used to hold computed value.
Value(ty::ValTree<'tcx>),
@@ -114,7 +109,6 @@ impl<'tcx> ConstKind<'tcx> {
/// An inference variable for a const, for use in const generics.
#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)]
-#[derive(HashStable)]
pub enum InferConst<'tcx> {
/// Infer the value of the const.
Var(ty::ConstVid<'tcx>),
@@ -122,6 +116,15 @@ pub enum InferConst<'tcx> {
Fresh(u32),
}
+impl<CTX> HashStable<CTX> for InferConst<'_> {
+ fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) {
+ match self {
+ InferConst::Var(_) => panic!("const variables should not be hashed: {self:?}"),
+ InferConst::Fresh(i) => i.hash_stable(hcx, hasher),
+ }
+ }
+}
+
enum EvalMode {
Typeck,
Mir,
@@ -185,8 +188,6 @@ impl<'tcx> ConstKind<'tcx> {
if let ConstKind::Unevaluated(unevaluated) = self {
use crate::mir::interpret::ErrorHandled;
- assert_eq!(unevaluated.promoted, ());
-
// HACK(eddyb) this erases lifetimes even though `const_eval_resolve`
// also does later, but we want to do it before checking for
// inference variables.
@@ -204,10 +205,9 @@ impl<'tcx> ConstKind<'tcx> {
// FIXME(eddyb, skinny121) pass `InferCtxt` into here when it's available, so that
// we can call `infcx.const_eval_resolve` which handles inference variables.
let param_env_and = if param_env_and.needs_infer() {
- tcx.param_env(unevaluated.def.did).and(ty::Unevaluated {
+ tcx.param_env(unevaluated.def.did).and(ty::UnevaluatedConst {
def: unevaluated.def,
substs: InternalSubsts::identity_for_item(tcx, unevaluated.def.did),
- promoted: (),
})
} else {
param_env_and
diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs
index 0b497fa4a..3d7e2a083 100644
--- a/compiler/rustc_middle/src/ty/context.rs
+++ b/compiler/rustc_middle/src/ty/context.rs
@@ -1,10 +1,10 @@
//! Type context book-keeping.
use crate::arena::Arena;
-use crate::dep_graph::{DepGraph, DepKind, DepKindStruct};
+use crate::dep_graph::{DepGraph, DepKindStruct};
use crate::hir::place::Place as HirPlace;
use crate::infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos};
-use crate::lint::{struct_lint_level, LintLevelSource};
+use crate::lint::struct_lint_level;
use crate::middle::codegen_fn_attrs::CodegenFnAttrs;
use crate::middle::resolve_lifetime;
use crate::middle::stability;
@@ -15,7 +15,6 @@ use crate::mir::{
use crate::thir::Thir;
use crate::traits;
use crate::ty::query::{self, TyCtxtAt};
-use crate::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSubsts};
use crate::ty::{
self, AdtDef, AdtDefData, AdtKind, Binder, BindingMode, BoundVar, CanonicalPolyFnSig,
ClosureSizeProfileData, Const, ConstS, ConstVid, DefIdTree, ExistentialPredicate, FloatTy,
@@ -24,6 +23,7 @@ use crate::ty::{
RegionKind, ReprOptions, TraitObjectVisitor, Ty, TyKind, TyS, TyVar, TyVid, TypeAndMut, UintTy,
Visibility,
};
+use crate::ty::{GenericArg, GenericArgKind, InternalSubsts, SubstsRef, UserSubsts};
use rustc_ast as ast;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::{FxHashMap, FxHashSet};
@@ -34,12 +34,16 @@ use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::steal::Steal;
use rustc_data_structures::sync::{self, Lock, Lrc, ReadGuard, RwLock, WorkerLocal};
+use rustc_data_structures::unord::UnordSet;
use rustc_data_structures::vec_map::VecMap;
-use rustc_errors::{DecorateLint, ErrorGuaranteed, LintDiagnosticBuilder, MultiSpan};
+use rustc_errors::{
+ DecorateLint, DiagnosticBuilder, DiagnosticMessage, ErrorGuaranteed, MultiSpan,
+};
use rustc_hir as hir;
use rustc_hir::def::{DefKind, Res};
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LOCAL_CRATE};
use rustc_hir::definitions::Definitions;
+use rustc_hir::hir_id::OwnerId;
use rustc_hir::intravisit::Visitor;
use rustc_hir::lang_items::LangItem;
use rustc_hir::{
@@ -53,7 +57,7 @@ use rustc_query_system::ich::StableHashingContext;
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
use rustc_session::config::{CrateType, OutputFilenames};
use rustc_session::cstore::CrateStoreDyn;
-use rustc_session::lint::{Level, Lint};
+use rustc_session::lint::Lint;
use rustc_session::Limit;
use rustc_session::Session;
use rustc_span::def_id::{DefPathHash, StableCrateId};
@@ -76,7 +80,7 @@ use std::mem;
use std::ops::{Bound, Deref};
use std::sync::Arc;
-use super::{ImplPolarity, RvalueScopes};
+use super::{ImplPolarity, ResolverOutputs, RvalueScopes};
pub trait OnDiskCache<'tcx>: rustc_data_structures::sync::Sync {
/// Creates a new `OnDiskCache` instance from the serialized data in `data`.
@@ -195,9 +199,9 @@ impl<'tcx> CtxtInterners<'tcx> {
.intern(kind, |kind| {
let flags = super::flags::FlagComputation::for_kind(&kind);
- // It's impossible to hash inference regions (and will ICE), so we don't need to try to cache them.
+ // It's impossible to hash inference variables (and will ICE), so we don't need to try to cache them.
// Without incremental, we rarely stable-hash types, so let's not do it proactively.
- let stable_hash = if flags.flags.intersects(TypeFlags::HAS_RE_INFER)
+ let stable_hash = if flags.flags.intersects(TypeFlags::NEEDS_INFER)
|| sess.opts.incremental.is_none()
{
Fingerprint::ZERO
@@ -288,7 +292,7 @@ pub struct CommonConsts<'tcx> {
}
pub struct LocalTableInContext<'a, V> {
- hir_owner: LocalDefId,
+ hir_owner: OwnerId,
data: &'a ItemLocalMap<V>,
}
@@ -300,7 +304,7 @@ pub struct LocalTableInContext<'a, V> {
/// would result in lookup errors, or worse, in silently wrong data being
/// stored/returned.
#[inline]
-fn validate_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) {
+fn validate_hir_id_for_typeck_results(hir_owner: OwnerId, hir_id: hir::HirId) {
if hir_id.owner != hir_owner {
invalid_hir_id_for_typeck_results(hir_owner, hir_id);
}
@@ -308,7 +312,7 @@ fn validate_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId)
#[cold]
#[inline(never)]
-fn invalid_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) {
+fn invalid_hir_id_for_typeck_results(hir_owner: OwnerId, hir_id: hir::HirId) {
ty::tls::with(|tcx| {
bug!(
"node {} with HirId::owner {:?} cannot be placed in TypeckResults with hir_owner {:?}",
@@ -344,7 +348,7 @@ impl<'a, V> ::std::ops::Index<hir::HirId> for LocalTableInContext<'a, V> {
}
pub struct LocalTableInContextMut<'a, V> {
- hir_owner: LocalDefId,
+ hir_owner: OwnerId,
data: &'a mut ItemLocalMap<V>,
}
@@ -416,7 +420,7 @@ pub struct GeneratorDiagnosticData<'tcx> {
#[derive(TyEncodable, TyDecodable, Debug, HashStable)]
pub struct TypeckResults<'tcx> {
/// The `HirId::owner` all `ItemLocalId`s in this table are relative to.
- pub hir_owner: LocalDefId,
+ pub hir_owner: OwnerId,
/// Resolved definitions for `<T>::X` associated paths and
/// method calls, including those of overloaded operators.
@@ -528,19 +532,17 @@ pub struct TypeckResults<'tcx> {
/// This is used for warning unused imports. During type
/// checking, this `Lrc` should not be cloned: it must have a ref-count
/// of 1 so that we can insert things into the set mutably.
- pub used_trait_imports: Lrc<FxHashSet<LocalDefId>>,
+ pub used_trait_imports: Lrc<UnordSet<LocalDefId>>,
/// If any errors occurred while type-checking this body,
/// this field will be set to `Some(ErrorGuaranteed)`.
pub tainted_by_errors: Option<ErrorGuaranteed>,
/// All the opaque types that have hidden types set
- /// by this function. For return-position-impl-trait we also store the
- /// type here, so that mir-borrowck can figure out hidden types,
+ /// by this function. We also store the
+ /// type here, so that mir-borrowck can use it as a hint for figuring out hidden types,
/// even if they are only set in dead code (which doesn't show up in MIR).
- /// For type-alias-impl-trait, this map is only used to prevent query cycles,
- /// so the hidden types are all `None`.
- pub concrete_opaque_types: VecMap<LocalDefId, Option<Ty<'tcx>>>,
+ pub concrete_opaque_types: VecMap<LocalDefId, ty::OpaqueHiddenType<'tcx>>,
/// Tracks the minimum captures required for a closure;
/// see `MinCaptureInformationMap` for more details.
@@ -572,7 +574,7 @@ pub struct TypeckResults<'tcx> {
/// Tracks the rvalue scoping rules which defines finer scoping for rvalue expressions
/// by applying extended parameter rules.
- /// Details may be find in `rustc_typeck::check::rvalue_scopes`.
+ /// Details may be find in `rustc_hir_analysis::check::rvalue_scopes`.
pub rvalue_scopes: RvalueScopes,
/// Stores the type, expression, span and optional scope span of all types
@@ -591,7 +593,7 @@ pub struct TypeckResults<'tcx> {
}
impl<'tcx> TypeckResults<'tcx> {
- pub fn new(hir_owner: LocalDefId) -> TypeckResults<'tcx> {
+ pub fn new(hir_owner: OwnerId) -> TypeckResults<'tcx> {
TypeckResults {
hir_owner,
type_dependent_defs: Default::default(),
@@ -1066,10 +1068,9 @@ pub struct GlobalCtxt<'tcx> {
pub consts: CommonConsts<'tcx>,
definitions: RwLock<Definitions>,
- cstore: Box<CrateStoreDyn>,
/// Output of the resolver.
- pub(crate) untracked_resolutions: ty::ResolverOutputs,
+ pub(crate) untracked_resolutions: ty::ResolverGlobalCtxt,
untracked_resolver_for_lowering: Steal<ty::ResolverAstLowering>,
/// The entire crate as AST. This field serves as the input for the hir_crate query,
/// which lowers it from AST to HIR. It must not be read or used by anything else.
@@ -1083,7 +1084,7 @@ pub struct GlobalCtxt<'tcx> {
pub queries: &'tcx dyn query::QueryEngine<'tcx>,
pub query_caches: query::QueryCaches<'tcx>,
- query_kinds: &'tcx [DepKindStruct<'tcx>],
+ pub(crate) query_kinds: &'tcx [DepKindStruct<'tcx>],
// Internal caches for metadata decoding. No need to track deps on this.
pub ty_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>,
@@ -1232,10 +1233,7 @@ impl<'tcx> TyCtxt<'tcx> {
lint_store: Lrc<dyn Any + sync::Send + sync::Sync>,
arena: &'tcx WorkerLocal<Arena<'tcx>>,
hir_arena: &'tcx WorkerLocal<hir::Arena<'tcx>>,
- definitions: Definitions,
- cstore: Box<CrateStoreDyn>,
- untracked_resolutions: ty::ResolverOutputs,
- untracked_resolver_for_lowering: ty::ResolverAstLowering,
+ resolver_outputs: ResolverOutputs,
krate: Lrc<ast::Crate>,
dep_graph: DepGraph,
on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>,
@@ -1244,6 +1242,11 @@ impl<'tcx> TyCtxt<'tcx> {
crate_name: &str,
output_filenames: OutputFilenames,
) -> GlobalCtxt<'tcx> {
+ let ResolverOutputs {
+ definitions,
+ global_ctxt: untracked_resolutions,
+ ast_lowering: untracked_resolver_for_lowering,
+ } = resolver_outputs;
let data_layout = TargetDataLayout::parse(&s.target).unwrap_or_else(|err| {
s.emit_fatal(err);
});
@@ -1252,7 +1255,7 @@ impl<'tcx> TyCtxt<'tcx> {
&interners,
s,
&definitions,
- &*cstore,
+ &*untracked_resolutions.cstore,
// This is only used to create a stable hashing context.
&untracked_resolutions.source_span,
);
@@ -1267,7 +1270,6 @@ impl<'tcx> TyCtxt<'tcx> {
interners,
dep_graph,
definitions: RwLock::new(definitions),
- cstore,
prof: s.prof.clone(),
types: common_types,
lifetimes: common_lifetimes,
@@ -1290,10 +1292,6 @@ impl<'tcx> TyCtxt<'tcx> {
}
}
- pub(crate) fn query_kind(self, k: DepKind) -> &'tcx DepKindStruct<'tcx> {
- &self.query_kinds[k as usize]
- }
-
/// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used.
#[track_caller]
pub fn ty_error(self) -> Ty<'tcx> {
@@ -1372,7 +1370,7 @@ impl<'tcx> TyCtxt<'tcx> {
if let Some(id) = id.as_local() {
self.definitions_untracked().def_key(id)
} else {
- self.cstore.def_key(id)
+ self.untracked_resolutions.cstore.def_key(id)
}
}
@@ -1386,7 +1384,7 @@ impl<'tcx> TyCtxt<'tcx> {
if let Some(id) = id.as_local() {
self.definitions_untracked().def_path(id)
} else {
- self.cstore.def_path(id)
+ self.untracked_resolutions.cstore.def_path(id)
}
}
@@ -1396,7 +1394,7 @@ impl<'tcx> TyCtxt<'tcx> {
if let Some(def_id) = def_id.as_local() {
self.definitions_untracked().def_path_hash(def_id)
} else {
- self.cstore.def_path_hash(def_id)
+ self.untracked_resolutions.cstore.def_path_hash(def_id)
}
}
@@ -1405,7 +1403,7 @@ impl<'tcx> TyCtxt<'tcx> {
if crate_num == LOCAL_CRATE {
self.sess.local_stable_crate_id()
} else {
- self.cstore.stable_crate_id(crate_num)
+ self.untracked_resolutions.cstore.stable_crate_id(crate_num)
}
}
@@ -1416,7 +1414,7 @@ impl<'tcx> TyCtxt<'tcx> {
if stable_crate_id == self.sess.local_stable_crate_id() {
LOCAL_CRATE
} else {
- self.cstore.stable_crate_id_to_crate_num(stable_crate_id)
+ self.untracked_resolutions.cstore.stable_crate_id_to_crate_num(stable_crate_id)
}
}
@@ -1435,8 +1433,9 @@ impl<'tcx> TyCtxt<'tcx> {
} else {
// If this is a DefPathHash from an upstream crate, let the CrateStore map
// it to a DefId.
- let cnum = self.cstore.stable_crate_id_to_crate_num(stable_crate_id);
- self.cstore.def_path_hash_to_def_id(cnum, hash)
+ let cstore = &*self.untracked_resolutions.cstore;
+ let cnum = cstore.stable_crate_id_to_crate_num(stable_crate_id);
+ cstore.def_path_hash_to_def_id(cnum, hash)
}
}
@@ -1448,7 +1447,7 @@ impl<'tcx> TyCtxt<'tcx> {
let (crate_name, stable_crate_id) = if def_id.is_local() {
(self.crate_name, self.sess.local_stable_crate_id())
} else {
- let cstore = &self.cstore;
+ let cstore = &*self.untracked_resolutions.cstore;
(cstore.crate_name(def_id.krate), cstore.stable_crate_id(def_id.krate))
};
@@ -1523,7 +1522,7 @@ impl<'tcx> TyCtxt<'tcx> {
/// Note that this is *untracked* and should only be used within the query
/// system if the result is otherwise tracked through queries
pub fn cstore_untracked(self) -> &'tcx CrateStoreDyn {
- &*self.cstore
+ &*self.untracked_resolutions.cstore
}
/// Note that this is *untracked* and should only be used within the query
@@ -1549,7 +1548,7 @@ impl<'tcx> TyCtxt<'tcx> {
let hcx = StableHashingContext::new(
self.sess,
&*definitions,
- &*self.cstore,
+ &*self.untracked_resolutions.cstore,
&self.untracked_resolutions.source_span,
);
f(hcx)
@@ -1725,7 +1724,7 @@ impl<'tcx> TyCtxt<'tcx> {
#[inline]
pub fn local_visibility(self, def_id: LocalDefId) -> Visibility {
- self.visibility(def_id.to_def_id()).expect_local()
+ self.visibility(def_id).expect_local()
}
}
@@ -2367,7 +2366,7 @@ impl<'tcx> TyCtxt<'tcx> {
st,
self.sess,
&self.definitions.read(),
- &*self.cstore,
+ &*self.untracked_resolutions.cstore,
// This is only used to create a stable hashing context.
&self.untracked_resolutions.source_span,
)
@@ -2812,44 +2811,6 @@ impl<'tcx> TyCtxt<'tcx> {
iter.intern_with(|xs| self.intern_bound_variable_kinds(xs))
}
- /// Walks upwards from `id` to find a node which might change lint levels with attributes.
- /// It stops at `bound` and just returns it if reached.
- pub fn maybe_lint_level_root_bounded(self, mut id: HirId, bound: HirId) -> HirId {
- let hir = self.hir();
- loop {
- if id == bound {
- return bound;
- }
-
- if hir.attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) {
- return id;
- }
- let next = hir.get_parent_node(id);
- if next == id {
- bug!("lint traversal reached the root of the crate");
- }
- id = next;
- }
- }
-
- pub fn lint_level_at_node(
- self,
- lint: &'static Lint,
- mut id: hir::HirId,
- ) -> (Level, LintLevelSource) {
- let sets = self.lint_levels(());
- loop {
- if let Some(pair) = sets.level_and_source(lint, id, self.sess) {
- return pair;
- }
- let next = self.hir().get_parent_node(id);
- if next == id {
- bug!("lint traversal reached the root of the crate");
- }
- id = next;
- }
- }
-
/// Emit a lint at `span` from a lint struct (some type that implements `DecorateLint`,
/// typically generated by `#[derive(LintDiagnostic)]`).
pub fn emit_spanned_lint(
@@ -2859,18 +2820,28 @@ impl<'tcx> TyCtxt<'tcx> {
span: impl Into<MultiSpan>,
decorator: impl for<'a> DecorateLint<'a, ()>,
) {
- self.struct_span_lint_hir(lint, hir_id, span, |diag| decorator.decorate_lint(diag))
+ self.struct_span_lint_hir(lint, hir_id, span, decorator.msg(), |diag| {
+ decorator.decorate_lint(diag)
+ })
}
+ /// Emit a lint at the appropriate level for a hir node, with an associated span.
+ ///
+ /// Return value of the `decorate` closure is ignored, see [`struct_lint_level`] for a detailed explanation.
+ ///
+ /// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature
pub fn struct_span_lint_hir(
self,
lint: &'static Lint,
hir_id: HirId,
span: impl Into<MultiSpan>,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
) {
let (level, src) = self.lint_level_at_node(lint, hir_id);
- struct_lint_level(self.sess, lint, level, src, Some(span.into()), decorate);
+ struct_lint_level(self.sess, lint, level, src, Some(span.into()), msg, decorate);
}
/// Emit a lint from a lint struct (some type that implements `DecorateLint`, typically
@@ -2881,17 +2852,25 @@ impl<'tcx> TyCtxt<'tcx> {
id: HirId,
decorator: impl for<'a> DecorateLint<'a, ()>,
) {
- self.struct_lint_node(lint, id, |diag| decorator.decorate_lint(diag))
+ self.struct_lint_node(lint, id, decorator.msg(), |diag| decorator.decorate_lint(diag))
}
+ /// Emit a lint at the appropriate level for a hir node.
+ ///
+ /// Return value of the `decorate` closure is ignored, see [`struct_lint_level`] for a detailed explanation.
+ ///
+ /// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature
pub fn struct_lint_node(
self,
lint: &'static Lint,
id: HirId,
- decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>),
+ msg: impl Into<DiagnosticMessage>,
+ decorate: impl for<'a, 'b> FnOnce(
+ &'b mut DiagnosticBuilder<'a, ()>,
+ ) -> &'b mut DiagnosticBuilder<'a, ()>,
) {
let (level, src) = self.lint_level_at_node(lint, id);
- struct_lint_level(self.sess, lint, level, src, None, decorate);
+ struct_lint_level(self.sess, lint, level, src, None, msg, decorate);
}
pub fn in_scope_traits(self, id: HirId) -> Option<&'tcx [TraitCandidate]> {
@@ -2906,7 +2885,7 @@ impl<'tcx> TyCtxt<'tcx> {
}
pub fn is_late_bound(self, id: HirId) -> bool {
- self.is_late_bound_map(id.owner).map_or(false, |set| {
+ self.is_late_bound_map(id.owner.def_id).map_or(false, |set| {
let def_id = self.hir().local_def_id(id);
set.contains(&def_id)
})
@@ -2977,6 +2956,21 @@ impl<'tcx> TyCtxtAt<'tcx> {
}
}
+/// Parameter attributes that can only be determined by examining the body of a function instead
+/// of just its signature.
+///
+/// These can be useful for optimization purposes when a function is directly called. We compute
+/// them and store them into the crate metadata so that downstream crates can make use of them.
+///
+/// Right now, we only have `read_only`, but `no_capture` and `no_alias` might be useful in the
+/// future.
+#[derive(Clone, Copy, PartialEq, Debug, Default, TyDecodable, TyEncodable, HashStable)]
+pub struct DeducedParamAttrs {
+ /// The parameter is marked immutable in the function and contains no `UnsafeCell` (i.e. its
+ /// type is freeze).
+ pub read_only: bool,
+}
+
// We are comparing types with different invariant lifetimes, so `ptr::eq`
// won't work for us.
fn ptr_eq<T, U>(t: *const T, u: *const U) -> bool {
diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs
index 855917fb8..b8fd01e6a 100644
--- a/compiler/rustc_middle/src/ty/diagnostics.rs
+++ b/compiler/rustc_middle/src/ty/diagnostics.rs
@@ -511,3 +511,11 @@ impl<'tcx> TypeVisitor<'tcx> for IsSuggestableVisitor<'tcx> {
c.super_visit_with(self)
}
}
+
+#[derive(Diagnostic)]
+#[diag(borrowck_const_not_used_in_type_alias)]
+pub(super) struct ConstNotUsedTraitAlias {
+ pub ct: String,
+ #[primary_span]
+ pub span: Span,
+}
diff --git a/compiler/rustc_middle/src/ty/erase_regions.rs b/compiler/rustc_middle/src/ty/erase_regions.rs
index 3226950e7..ffdac93bc 100644
--- a/compiler/rustc_middle/src/ty/erase_regions.rs
+++ b/compiler/rustc_middle/src/ty/erase_regions.rs
@@ -1,4 +1,3 @@
-use crate::mir;
use crate::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable};
use crate::ty::visit::TypeVisitable;
use crate::ty::{self, Ty, TyCtxt, TypeFlags};
@@ -67,8 +66,4 @@ impl<'tcx> TypeFolder<'tcx> for RegionEraserVisitor<'tcx> {
_ => self.tcx.lifetimes.re_erased,
}
}
-
- fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
- c.super_fold_with(self)
- }
}
diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs
index 01e1e97b2..4e6cdb786 100644
--- a/compiler/rustc_middle/src/ty/error.rs
+++ b/compiler/rustc_middle/src/ty/error.rs
@@ -861,7 +861,7 @@ fn foo(&self) -> Self::T { String::new() }
// When `body_owner` is an `impl` or `trait` item, look in its associated types for
// `expected` and point at it.
let parent_id = self.hir().get_parent_item(hir_id);
- let item = self.hir().find_by_def_id(parent_id);
+ let item = self.hir().find_by_def_id(parent_id.def_id);
debug!("expected_projection parent item {:?}", item);
match item {
Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Trait(.., items), .. })) => {
@@ -872,9 +872,9 @@ fn foo(&self) -> Self::T { String::new() }
// FIXME: account for returning some type in a trait fn impl that has
// an assoc type as a return type (#72076).
if let hir::Defaultness::Default { has_value: true } =
- self.impl_defaultness(item.id.def_id)
+ self.impl_defaultness(item.id.owner_id)
{
- if self.type_of(item.id.def_id) == found {
+ if self.type_of(item.id.owner_id) == found {
diag.span_label(
item.span,
"associated type defaults can't be assumed inside the \
@@ -894,7 +894,7 @@ fn foo(&self) -> Self::T { String::new() }
})) => {
for item in &items[..] {
if let hir::AssocItemKind::Type = item.kind {
- if self.type_of(item.id.def_id) == found {
+ if self.type_of(item.id.owner_id) == found {
diag.span_label(item.span, "expected this associated type");
return true;
}
diff --git a/compiler/rustc_middle/src/ty/fast_reject.rs b/compiler/rustc_middle/src/ty/fast_reject.rs
index 8d019a3ba..3be0bc4de 100644
--- a/compiler/rustc_middle/src/ty/fast_reject.rs
+++ b/compiler/rustc_middle/src/ty/fast_reject.rs
@@ -132,7 +132,7 @@ pub fn simplify_type<'tcx>(
// don't unify with anything else as long as they are fully normalized.
//
// We will have to be careful with lazy normalization here.
- TreatParams::AsPlaceholder if !ty.has_infer_types_or_consts() => {
+ TreatParams::AsPlaceholder if !ty.has_non_region_infer() => {
debug!("treating `{}` as a placeholder", ty);
Some(PlaceholderSimplifiedType)
}
@@ -384,14 +384,7 @@ impl DeepRejectCtxt {
// they might unify with any value.
ty::ConstKind::Unevaluated(_) | ty::ConstKind::Error(_) => true,
ty::ConstKind::Value(obl) => match k {
- ty::ConstKind::Value(imp) => {
- // FIXME(valtrees): Once we have valtrees, we can just
- // compare them directly here.
- match (obl.try_to_scalar_int(), imp.try_to_scalar_int()) {
- (Some(obl), Some(imp)) => obl == imp,
- _ => true,
- }
- }
+ ty::ConstKind::Value(imp) => obl == imp,
_ => true,
},
diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs
index 98b8a7386..7201737be 100644
--- a/compiler/rustc_middle/src/ty/flags.rs
+++ b/compiler/rustc_middle/src/ty/flags.rs
@@ -34,12 +34,6 @@ impl FlagComputation {
result.flags
}
- pub fn for_unevaluated_const(uv: ty::Unevaluated<'_>) -> TypeFlags {
- let mut result = FlagComputation::new();
- result.add_unevaluated_const(uv);
- result.flags
- }
-
fn add_flags(&mut self, flags: TypeFlags) {
self.flags = self.flags | flags;
}
@@ -256,7 +250,7 @@ impl FlagComputation {
self.add_substs(substs);
}
ty::PredicateKind::ConstEvaluatable(uv) => {
- self.add_unevaluated_const(uv);
+ self.add_const(uv);
}
ty::PredicateKind::ConstEquate(expected, found) => {
self.add_const(expected);
@@ -289,7 +283,10 @@ impl FlagComputation {
fn add_const(&mut self, c: ty::Const<'_>) {
self.add_ty(c.ty());
match c.kind() {
- ty::ConstKind::Unevaluated(unevaluated) => self.add_unevaluated_const(unevaluated),
+ ty::ConstKind::Unevaluated(uv) => {
+ self.add_substs(uv.substs);
+ self.add_flags(TypeFlags::HAS_CT_PROJECTION);
+ }
ty::ConstKind::Infer(infer) => {
self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE);
match infer {
@@ -313,11 +310,6 @@ impl FlagComputation {
}
}
- fn add_unevaluated_const<P>(&mut self, ct: ty::Unevaluated<'_, P>) {
- self.add_substs(ct.substs);
- self.add_flags(TypeFlags::HAS_CT_PROJECTION);
- }
-
fn add_existential_projection(&mut self, projection: &ty::ExistentialProjection<'_>) {
self.add_substs(projection.substs);
match projection.term.unpack() {
diff --git a/compiler/rustc_middle/src/ty/fold.rs b/compiler/rustc_middle/src/ty/fold.rs
index cac95e14a..54f1499eb 100644
--- a/compiler/rustc_middle/src/ty/fold.rs
+++ b/compiler/rustc_middle/src/ty/fold.rs
@@ -13,8 +13,7 @@
//!
//! There are three groups of traits involved in each traversal.
//! - `TypeFoldable`. This is implemented once for many types, including:
-//! - Types of interest, for which the the methods delegate to the
-//! folder.
+//! - Types of interest, for which the methods delegate to the folder.
//! - All other types, including generic containers like `Vec` and `Option`.
//! It defines a "skeleton" of how they should be folded.
//! - `TypeSuperFoldable`. This is implemented only for each type of interest,
@@ -43,7 +42,6 @@
//! - ty.super_fold_with(folder)
//! - u.fold_with(folder)
//! ```
-use crate::mir;
use crate::ty::{self, Binder, BoundTy, Ty, TyCtxt, TypeVisitable};
use rustc_data_structures::fx::FxIndexMap;
use rustc_hir::def_id::DefId;
@@ -128,17 +126,9 @@ pub trait TypeFolder<'tcx>: FallibleTypeFolder<'tcx, Error = !> {
c.super_fold_with(self)
}
- fn fold_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ty::Unevaluated<'tcx> {
- uv.super_fold_with(self)
- }
-
fn fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> ty::Predicate<'tcx> {
p.super_fold_with(self)
}
-
- fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
- bug!("most type folders should not be folding MIR datastructures: {:?}", c)
- }
}
/// This trait is implemented for every folding traversal. There is a fold
@@ -172,26 +162,12 @@ pub trait FallibleTypeFolder<'tcx>: Sized {
c.try_super_fold_with(self)
}
- fn try_fold_unevaluated(
- &mut self,
- c: ty::Unevaluated<'tcx>,
- ) -> Result<ty::Unevaluated<'tcx>, Self::Error> {
- c.try_super_fold_with(self)
- }
-
fn try_fold_predicate(
&mut self,
p: ty::Predicate<'tcx>,
) -> Result<ty::Predicate<'tcx>, Self::Error> {
p.try_super_fold_with(self)
}
-
- fn try_fold_mir_const(
- &mut self,
- c: mir::ConstantKind<'tcx>,
- ) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
- bug!("most type folders should not be folding MIR datastructures: {:?}", c)
- }
}
// This blanket implementation of the fallible trait for infallible folders
@@ -225,23 +201,9 @@ where
Ok(self.fold_const(c))
}
- fn try_fold_unevaluated(
- &mut self,
- c: ty::Unevaluated<'tcx>,
- ) -> Result<ty::Unevaluated<'tcx>, !> {
- Ok(self.fold_unevaluated(c))
- }
-
fn try_fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> Result<ty::Predicate<'tcx>, !> {
Ok(self.fold_predicate(p))
}
-
- fn try_fold_mir_const(
- &mut self,
- c: mir::ConstantKind<'tcx>,
- ) -> Result<mir::ConstantKind<'tcx>, !> {
- Ok(self.fold_mir_const(c))
- }
}
///////////////////////////////////////////////////////////////////////////
diff --git a/compiler/rustc_middle/src/ty/generics.rs b/compiler/rustc_middle/src/ty/generics.rs
index 0c8bdde9c..19754d145 100644
--- a/compiler/rustc_middle/src/ty/generics.rs
+++ b/compiler/rustc_middle/src/ty/generics.rs
@@ -1,6 +1,5 @@
use crate::ty;
-use crate::ty::subst::{Subst, SubstsRef};
-use crate::ty::EarlyBinder;
+use crate::ty::{EarlyBinder, SubstsRef};
use rustc_ast as ast;
use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def_id::DefId;
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs b/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs
deleted file mode 100644
index c4ad698ba..000000000
--- a/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs
+++ /dev/null
@@ -1,145 +0,0 @@
-use crate::ty::context::TyCtxt;
-use crate::ty::{DefId, DefIdTree};
-use rustc_span::def_id::CRATE_DEF_ID;
-use smallvec::SmallVec;
-use std::mem;
-
-use DefIdForest::*;
-
-/// Represents a forest of `DefId`s closed under the ancestor relation. That is,
-/// if a `DefId` representing a module is contained in the forest then all
-/// `DefId`s defined in that module or submodules are also implicitly contained
-/// in the forest.
-///
-/// This is used to represent a set of modules in which a type is visibly
-/// uninhabited.
-///
-/// We store the minimal set of `DefId`s required to represent the whole set. If A and B are
-/// `DefId`s in the `DefIdForest`, and A is a parent of B, then only A will be stored. When this is
-/// used with `type_uninhabited_from`, there will very rarely be more than one `DefId` stored.
-#[derive(Copy, Clone, HashStable, Debug)]
-pub enum DefIdForest<'a> {
- Empty,
- Single(DefId),
- /// This variant is very rare.
- /// Invariant: >1 elements
- Multiple(&'a [DefId]),
-}
-
-/// Tests whether a slice of roots contains a given DefId.
-#[inline]
-fn slice_contains<'tcx>(tcx: TyCtxt<'tcx>, slice: &[DefId], id: DefId) -> bool {
- slice.iter().any(|root_id| tcx.is_descendant_of(id, *root_id))
-}
-
-impl<'tcx> DefIdForest<'tcx> {
- /// Creates an empty forest.
- pub fn empty() -> DefIdForest<'tcx> {
- DefIdForest::Empty
- }
-
- /// Creates a forest consisting of a single tree representing the entire
- /// crate.
- #[inline]
- pub fn full() -> DefIdForest<'tcx> {
- DefIdForest::from_id(CRATE_DEF_ID.to_def_id())
- }
-
- /// Creates a forest containing a `DefId` and all its descendants.
- pub fn from_id(id: DefId) -> DefIdForest<'tcx> {
- DefIdForest::Single(id)
- }
-
- fn as_slice(&self) -> &[DefId] {
- match self {
- Empty => &[],
- Single(id) => std::slice::from_ref(id),
- Multiple(root_ids) => root_ids,
- }
- }
-
- // Only allocates in the rare `Multiple` case.
- fn from_vec(tcx: TyCtxt<'tcx>, root_ids: SmallVec<[DefId; 1]>) -> DefIdForest<'tcx> {
- match &root_ids[..] {
- [] => Empty,
- [id] => Single(*id),
- _ => DefIdForest::Multiple(tcx.arena.alloc_from_iter(root_ids)),
- }
- }
-
- /// Tests whether the forest is empty.
- pub fn is_empty(&self) -> bool {
- match self {
- Empty => true,
- Single(..) | Multiple(..) => false,
- }
- }
-
- /// Iterate over the set of roots.
- fn iter(&self) -> impl Iterator<Item = DefId> + '_ {
- self.as_slice().iter().copied()
- }
-
- /// Tests whether the forest contains a given DefId.
- pub fn contains(&self, tcx: TyCtxt<'tcx>, id: DefId) -> bool {
- slice_contains(tcx, self.as_slice(), id)
- }
-
- /// Calculate the intersection of a collection of forests.
- pub fn intersection<I>(tcx: TyCtxt<'tcx>, iter: I) -> DefIdForest<'tcx>
- where
- I: IntoIterator<Item = DefIdForest<'tcx>>,
- {
- let mut iter = iter.into_iter();
- let mut ret: SmallVec<[_; 1]> = if let Some(first) = iter.next() {
- SmallVec::from_slice(first.as_slice())
- } else {
- return DefIdForest::full();
- };
-
- let mut next_ret: SmallVec<[_; 1]> = SmallVec::new();
- for next_forest in iter {
- // No need to continue if the intersection is already empty.
- if ret.is_empty() || next_forest.is_empty() {
- return DefIdForest::empty();
- }
-
- // We keep the elements in `ret` that are also in `next_forest`.
- next_ret.extend(ret.iter().copied().filter(|&id| next_forest.contains(tcx, id)));
- // We keep the elements in `next_forest` that are also in `ret`.
- next_ret.extend(next_forest.iter().filter(|&id| slice_contains(tcx, &ret, id)));
-
- mem::swap(&mut next_ret, &mut ret);
- next_ret.clear();
- }
- DefIdForest::from_vec(tcx, ret)
- }
-
- /// Calculate the union of a collection of forests.
- pub fn union<I>(tcx: TyCtxt<'tcx>, iter: I) -> DefIdForest<'tcx>
- where
- I: IntoIterator<Item = DefIdForest<'tcx>>,
- {
- let mut ret: SmallVec<[_; 1]> = SmallVec::new();
- let mut next_ret: SmallVec<[_; 1]> = SmallVec::new();
- for next_forest in iter {
- // Union with the empty set is a no-op.
- if next_forest.is_empty() {
- continue;
- }
-
- // We add everything in `ret` that is not in `next_forest`.
- next_ret.extend(ret.iter().copied().filter(|&id| !next_forest.contains(tcx, id)));
- // We add everything in `next_forest` that we haven't added yet.
- for id in next_forest.iter() {
- if !slice_contains(tcx, &next_ret, id) {
- next_ret.push(id);
- }
- }
-
- mem::swap(&mut next_ret, &mut ret);
- next_ret.clear();
- }
- DefIdForest::from_vec(tcx, ret)
- }
-}
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs
new file mode 100644
index 000000000..b7aa45572
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs
@@ -0,0 +1,204 @@
+use crate::ty::context::TyCtxt;
+use crate::ty::{self, DefId, DefIdTree, ParamEnv, Ty};
+
+/// Represents whether some type is inhabited in a given context.
+/// Examples of uninhabited types are `!`, `enum Void {}`, or a struct
+/// containing either of those types.
+/// A type's inhabitedness may depend on the `ParamEnv` as well as what types
+/// are visible in the current module.
+#[derive(Clone, Copy, Debug, PartialEq, HashStable)]
+pub enum InhabitedPredicate<'tcx> {
+ /// Inhabited
+ True,
+ /// Uninhabited
+ False,
+ /// Uninhabited when a const value is non-zero. This occurs when there is an
+ /// array of uninhabited items, but the array is inhabited if it is empty.
+ ConstIsZero(ty::Const<'tcx>),
+ /// Uninhabited if within a certain module. This occurs when an uninhabited
+ /// type has restricted visibility.
+ NotInModule(DefId),
+ /// Inhabited if some generic type is inhabited.
+ /// These are replaced by calling [`Self::subst`].
+ GenericType(Ty<'tcx>),
+ /// A AND B
+ And(&'tcx [InhabitedPredicate<'tcx>; 2]),
+ /// A OR B
+ Or(&'tcx [InhabitedPredicate<'tcx>; 2]),
+}
+
+impl<'tcx> InhabitedPredicate<'tcx> {
+ /// Returns true if the corresponding type is inhabited in the given
+ /// `ParamEnv` and module
+ pub fn apply(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, module_def_id: DefId) -> bool {
+ let Ok(result) = self
+ .apply_inner::<!>(tcx, param_env, &|id| Ok(tcx.is_descendant_of(module_def_id, id)));
+ result
+ }
+
+ /// Same as `apply`, but returns `None` if self contains a module predicate
+ pub fn apply_any_module(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<bool> {
+ self.apply_inner(tcx, param_env, &|_| Err(())).ok()
+ }
+
+ fn apply_inner<E>(
+ self,
+ tcx: TyCtxt<'tcx>,
+ param_env: ParamEnv<'tcx>,
+ in_module: &impl Fn(DefId) -> Result<bool, E>,
+ ) -> Result<bool, E> {
+ match self {
+ Self::False => Ok(false),
+ Self::True => Ok(true),
+ Self::ConstIsZero(const_) => match const_.try_eval_usize(tcx, param_env) {
+ None | Some(0) => Ok(true),
+ Some(1..) => Ok(false),
+ },
+ Self::NotInModule(id) => in_module(id).map(|in_mod| !in_mod),
+ Self::GenericType(_) => Ok(true),
+ Self::And([a, b]) => try_and(a, b, |x| x.apply_inner(tcx, param_env, in_module)),
+ Self::Or([a, b]) => try_or(a, b, |x| x.apply_inner(tcx, param_env, in_module)),
+ }
+ }
+
+ pub fn and(self, tcx: TyCtxt<'tcx>, other: Self) -> Self {
+ self.reduce_and(tcx, other).unwrap_or_else(|| Self::And(tcx.arena.alloc([self, other])))
+ }
+
+ pub fn or(self, tcx: TyCtxt<'tcx>, other: Self) -> Self {
+ self.reduce_or(tcx, other).unwrap_or_else(|| Self::Or(tcx.arena.alloc([self, other])))
+ }
+
+ pub fn all(tcx: TyCtxt<'tcx>, iter: impl IntoIterator<Item = Self>) -> Self {
+ let mut result = Self::True;
+ for pred in iter {
+ if matches!(pred, Self::False) {
+ return Self::False;
+ }
+ result = result.and(tcx, pred);
+ }
+ result
+ }
+
+ pub fn any(tcx: TyCtxt<'tcx>, iter: impl IntoIterator<Item = Self>) -> Self {
+ let mut result = Self::False;
+ for pred in iter {
+ if matches!(pred, Self::True) {
+ return Self::True;
+ }
+ result = result.or(tcx, pred);
+ }
+ result
+ }
+
+ fn reduce_and(self, tcx: TyCtxt<'tcx>, other: Self) -> Option<Self> {
+ match (self, other) {
+ (Self::True, a) | (a, Self::True) => Some(a),
+ (Self::False, _) | (_, Self::False) => Some(Self::False),
+ (Self::ConstIsZero(a), Self::ConstIsZero(b)) if a == b => Some(Self::ConstIsZero(a)),
+ (Self::NotInModule(a), Self::NotInModule(b)) if a == b => Some(Self::NotInModule(a)),
+ (Self::NotInModule(a), Self::NotInModule(b)) if tcx.is_descendant_of(a, b) => {
+ Some(Self::NotInModule(b))
+ }
+ (Self::NotInModule(a), Self::NotInModule(b)) if tcx.is_descendant_of(b, a) => {
+ Some(Self::NotInModule(a))
+ }
+ (Self::GenericType(a), Self::GenericType(b)) if a == b => Some(Self::GenericType(a)),
+ (Self::And(&[a, b]), c) | (c, Self::And(&[a, b])) => {
+ if let Some(ac) = a.reduce_and(tcx, c) {
+ Some(ac.and(tcx, b))
+ } else if let Some(bc) = b.reduce_and(tcx, c) {
+ Some(Self::And(tcx.arena.alloc([a, bc])))
+ } else {
+ None
+ }
+ }
+ _ => None,
+ }
+ }
+
+ fn reduce_or(self, tcx: TyCtxt<'tcx>, other: Self) -> Option<Self> {
+ match (self, other) {
+ (Self::True, _) | (_, Self::True) => Some(Self::True),
+ (Self::False, a) | (a, Self::False) => Some(a),
+ (Self::ConstIsZero(a), Self::ConstIsZero(b)) if a == b => Some(Self::ConstIsZero(a)),
+ (Self::NotInModule(a), Self::NotInModule(b)) if a == b => Some(Self::NotInModule(a)),
+ (Self::NotInModule(a), Self::NotInModule(b)) if tcx.is_descendant_of(a, b) => {
+ Some(Self::NotInModule(a))
+ }
+ (Self::NotInModule(a), Self::NotInModule(b)) if tcx.is_descendant_of(b, a) => {
+ Some(Self::NotInModule(b))
+ }
+ (Self::GenericType(a), Self::GenericType(b)) if a == b => Some(Self::GenericType(a)),
+ (Self::Or(&[a, b]), c) | (c, Self::Or(&[a, b])) => {
+ if let Some(ac) = a.reduce_or(tcx, c) {
+ Some(ac.or(tcx, b))
+ } else if let Some(bc) = b.reduce_or(tcx, c) {
+ Some(Self::Or(tcx.arena.alloc([a, bc])))
+ } else {
+ None
+ }
+ }
+ _ => None,
+ }
+ }
+
+ /// Replaces generic types with its corresponding predicate
+ pub fn subst(self, tcx: TyCtxt<'tcx>, substs: ty::SubstsRef<'tcx>) -> Self {
+ self.subst_opt(tcx, substs).unwrap_or(self)
+ }
+
+ fn subst_opt(self, tcx: TyCtxt<'tcx>, substs: ty::SubstsRef<'tcx>) -> Option<Self> {
+ match self {
+ Self::ConstIsZero(c) => {
+ let c = ty::EarlyBinder(c).subst(tcx, substs);
+ let pred = match c.kind().try_to_machine_usize(tcx) {
+ Some(0) => Self::True,
+ Some(1..) => Self::False,
+ None => Self::ConstIsZero(c),
+ };
+ Some(pred)
+ }
+ Self::GenericType(t) => {
+ Some(ty::EarlyBinder(t).subst(tcx, substs).inhabited_predicate(tcx))
+ }
+ Self::And(&[a, b]) => match a.subst_opt(tcx, substs) {
+ None => b.subst_opt(tcx, substs).map(|b| a.and(tcx, b)),
+ Some(InhabitedPredicate::False) => Some(InhabitedPredicate::False),
+ Some(a) => Some(a.and(tcx, b.subst_opt(tcx, substs).unwrap_or(b))),
+ },
+ Self::Or(&[a, b]) => match a.subst_opt(tcx, substs) {
+ None => b.subst_opt(tcx, substs).map(|b| a.or(tcx, b)),
+ Some(InhabitedPredicate::True) => Some(InhabitedPredicate::True),
+ Some(a) => Some(a.or(tcx, b.subst_opt(tcx, substs).unwrap_or(b))),
+ },
+ _ => None,
+ }
+ }
+}
+
+// this is basically like `f(a)? && f(b)?` but different in the case of
+// `Ok(false) && Err(_) -> Ok(false)`
+fn try_and<T, E>(a: T, b: T, f: impl Fn(T) -> Result<bool, E>) -> Result<bool, E> {
+ let a = f(a);
+ if matches!(a, Ok(false)) {
+ return Ok(false);
+ }
+ match (a, f(b)) {
+ (_, Ok(false)) | (Ok(false), _) => Ok(false),
+ (Ok(true), Ok(true)) => Ok(true),
+ (Err(e), _) | (_, Err(e)) => Err(e),
+ }
+}
+
+fn try_or<T, E>(a: T, b: T, f: impl Fn(T) -> Result<bool, E>) -> Result<bool, E> {
+ let a = f(a);
+ if matches!(a, Ok(true)) {
+ return Ok(true);
+ }
+ match (a, f(b)) {
+ (_, Ok(true)) | (Ok(true), _) => Ok(true),
+ (Ok(false), Ok(false)) => Ok(false),
+ (Err(e), _) | (_, Err(e)) => Err(e),
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
index aaa66deb2..279a728ea 100644
--- a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
+++ b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs
@@ -1,57 +1,60 @@
-pub use self::def_id_forest::DefIdForest;
+//! This module contains logic for determining whether a type is inhabited or
+//! uninhabited. The [`InhabitedPredicate`] type captures the minimum
+//! information needed to determine whether a type is inhabited given a
+//! `ParamEnv` and module ID.
+//!
+//! # Example
+//! ```rust
+//! enum Void {}
+//! mod a {
+//! pub mod b {
+//! pub struct SecretlyUninhabited {
+//! _priv: !,
+//! }
+//! }
+//! }
+//!
+//! mod c {
+//! pub struct AlsoSecretlyUninhabited {
+//! _priv: Void,
+//! }
+//! mod d {
+//! }
+//! }
+//!
+//! struct Foo {
+//! x: a::b::SecretlyUninhabited,
+//! y: c::AlsoSecretlyUninhabited,
+//! }
+//! ```
+//! In this code, the type `Foo` will only be visibly uninhabited inside the
+//! modules `b`, `c` and `d`. Calling `uninhabited_predicate` on `Foo` will
+//! return `NotInModule(b) AND NotInModule(c)`.
+//!
+//! We need this information for pattern-matching on `Foo` or types that contain
+//! `Foo`.
+//!
+//! # Example
+//! ```rust
+//! let foo_result: Result<T, Foo> = ... ;
+//! let Ok(t) = foo_result;
+//! ```
+//! This code should only compile in modules where the uninhabitedness of `Foo`
+//! is visible.
-use crate::ty;
use crate::ty::context::TyCtxt;
-use crate::ty::{AdtDef, FieldDef, Ty, VariantDef};
-use crate::ty::{AdtKind, Visibility};
-use crate::ty::{DefId, SubstsRef};
+use crate::ty::{self, DefId, Ty, VariantDef, Visibility};
use rustc_type_ir::sty::TyKind::*;
-mod def_id_forest;
+pub mod inhabited_predicate;
-// The methods in this module calculate `DefIdForest`s of modules in which an
-// `AdtDef`/`VariantDef`/`FieldDef` is visibly uninhabited.
-//
-// # Example
-// ```rust
-// enum Void {}
-// mod a {
-// pub mod b {
-// pub struct SecretlyUninhabited {
-// _priv: !,
-// }
-// }
-// }
-//
-// mod c {
-// pub struct AlsoSecretlyUninhabited {
-// _priv: Void,
-// }
-// mod d {
-// }
-// }
-//
-// struct Foo {
-// x: a::b::SecretlyUninhabited,
-// y: c::AlsoSecretlyUninhabited,
-// }
-// ```
-// In this code, the type `Foo` will only be visibly uninhabited inside the
-// modules `b`, `c` and `d`. Calling `uninhabited_from` on `Foo` or its `AdtDef` will
-// return the forest of modules {`b`, `c`->`d`} (represented in a `DefIdForest` by the
-// set {`b`, `c`}).
-//
-// We need this information for pattern-matching on `Foo` or types that contain
-// `Foo`.
-//
-// # Example
-// ```rust
-// let foo_result: Result<T, Foo> = ... ;
-// let Ok(t) = foo_result;
-// ```
-// This code should only compile in modules where the uninhabitedness of `Foo` is
-// visible.
+pub use inhabited_predicate::InhabitedPredicate;
+
+pub(crate) fn provide(providers: &mut ty::query::Providers) {
+ *providers =
+ ty::query::Providers { inhabited_predicate_adt, inhabited_predicate_type, ..*providers };
+}
impl<'tcx> TyCtxt<'tcx> {
/// Checks whether a type is visibly uninhabited from a particular module.
@@ -100,131 +103,92 @@ impl<'tcx> TyCtxt<'tcx> {
ty: Ty<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> bool {
- // To check whether this type is uninhabited at all (not just from the
- // given node), you could check whether the forest is empty.
- // ```
- // forest.is_empty()
- // ```
- ty.uninhabited_from(self, param_env).contains(self, module)
+ !ty.inhabited_predicate(self).apply(self, param_env, module)
}
}
-impl<'tcx> AdtDef<'tcx> {
- /// Calculates the forest of `DefId`s from which this ADT is visibly uninhabited.
- fn uninhabited_from(
- self,
- tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- ) -> DefIdForest<'tcx> {
- // Non-exhaustive ADTs from other crates are always considered inhabited.
- if self.is_variant_list_non_exhaustive() && !self.did().is_local() {
- DefIdForest::empty()
- } else {
- DefIdForest::intersection(
- tcx,
- self.variants()
- .iter()
- .map(|v| v.uninhabited_from(tcx, substs, self.adt_kind(), param_env)),
- )
+/// Returns an `InhabitedPredicate` that is generic over type parameters and
+/// requires calling [`InhabitedPredicate::subst`]
+fn inhabited_predicate_adt(tcx: TyCtxt<'_>, def_id: DefId) -> InhabitedPredicate<'_> {
+ if let Some(def_id) = def_id.as_local() {
+ if matches!(tcx.representability(def_id), ty::Representability::Infinite) {
+ return InhabitedPredicate::True;
}
}
+ let adt = tcx.adt_def(def_id);
+ InhabitedPredicate::any(
+ tcx,
+ adt.variants().iter().map(|variant| variant.inhabited_predicate(tcx, adt)),
+ )
}
impl<'tcx> VariantDef {
/// Calculates the forest of `DefId`s from which this variant is visibly uninhabited.
- pub fn uninhabited_from(
+ pub fn inhabited_predicate(
&self,
tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
- adt_kind: AdtKind,
- param_env: ty::ParamEnv<'tcx>,
- ) -> DefIdForest<'tcx> {
- let is_enum = match adt_kind {
- // For now, `union`s are never considered uninhabited.
- // The precise semantics of inhabitedness with respect to unions is currently undecided.
- AdtKind::Union => return DefIdForest::empty(),
- AdtKind::Enum => true,
- AdtKind::Struct => false,
- };
- // Non-exhaustive variants from other crates are always considered inhabited.
+ adt: ty::AdtDef<'_>,
+ ) -> InhabitedPredicate<'tcx> {
+ debug_assert!(!adt.is_union());
if self.is_field_list_non_exhaustive() && !self.def_id.is_local() {
- DefIdForest::empty()
- } else {
- DefIdForest::union(
- tcx,
- self.fields.iter().map(|f| f.uninhabited_from(tcx, substs, is_enum, param_env)),
- )
+ // Non-exhaustive variants from other crates are always considered inhabited.
+ return InhabitedPredicate::True;
}
- }
-}
-
-impl<'tcx> FieldDef {
- /// Calculates the forest of `DefId`s from which this field is visibly uninhabited.
- fn uninhabited_from(
- &self,
- tcx: TyCtxt<'tcx>,
- substs: SubstsRef<'tcx>,
- is_enum: bool,
- param_env: ty::ParamEnv<'tcx>,
- ) -> DefIdForest<'tcx> {
- let data_uninhabitedness = move || self.ty(tcx, substs).uninhabited_from(tcx, param_env);
- if is_enum {
- data_uninhabitedness()
- } else {
- match self.vis {
- Visibility::Restricted(from) => {
- let forest = DefIdForest::from_id(from);
- let iter = Some(forest).into_iter().chain(Some(data_uninhabitedness()));
- DefIdForest::intersection(tcx, iter)
+ InhabitedPredicate::all(
+ tcx,
+ self.fields.iter().map(|field| {
+ let pred = tcx.type_of(field.did).inhabited_predicate(tcx);
+ if adt.is_enum() {
+ return pred;
}
- Visibility::Public => data_uninhabitedness(),
- }
- }
+ match field.vis {
+ Visibility::Public => pred,
+ Visibility::Restricted(from) => {
+ pred.or(tcx, InhabitedPredicate::NotInModule(from))
+ }
+ }
+ }),
+ )
}
}
impl<'tcx> Ty<'tcx> {
- /// Calculates the forest of `DefId`s from which this type is visibly uninhabited.
- fn uninhabited_from(
- self,
- tcx: TyCtxt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- ) -> DefIdForest<'tcx> {
- tcx.type_uninhabited_from(param_env.and(self))
+ pub fn inhabited_predicate(self, tcx: TyCtxt<'tcx>) -> InhabitedPredicate<'tcx> {
+ match self.kind() {
+ // For now, union`s are always considered inhabited
+ Adt(adt, _) if adt.is_union() => InhabitedPredicate::True,
+ // Non-exhaustive ADTs from other crates are always considered inhabited
+ Adt(adt, _) if adt.is_variant_list_non_exhaustive() && !adt.did().is_local() => {
+ InhabitedPredicate::True
+ }
+ Never => InhabitedPredicate::False,
+ Param(_) | Projection(_) => InhabitedPredicate::GenericType(self),
+ Tuple(tys) if tys.is_empty() => InhabitedPredicate::True,
+ // use a query for more complex cases
+ Adt(..) | Array(..) | Tuple(_) => tcx.inhabited_predicate_type(self),
+ // references and other types are inhabited
+ _ => InhabitedPredicate::True,
+ }
}
}
-// Query provider for `type_uninhabited_from`.
-pub(crate) fn type_uninhabited_from<'tcx>(
- tcx: TyCtxt<'tcx>,
- key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
-) -> DefIdForest<'tcx> {
- let ty = key.value;
- let param_env = key.param_env;
+/// N.B. this query should only be called through `Ty::inhabited_predicate`
+fn inhabited_predicate_type<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> InhabitedPredicate<'tcx> {
match *ty.kind() {
- Adt(def, substs) => def.uninhabited_from(tcx, substs, param_env),
+ Adt(adt, substs) => tcx.inhabited_predicate_adt(adt.did()).subst(tcx, substs),
- Never => DefIdForest::full(),
-
- Tuple(ref tys) => {
- DefIdForest::union(tcx, tys.iter().map(|ty| ty.uninhabited_from(tcx, param_env)))
+ Tuple(tys) => {
+ InhabitedPredicate::all(tcx, tys.iter().map(|ty| ty.inhabited_predicate(tcx)))
}
- Array(ty, len) => match len.try_eval_usize(tcx, param_env) {
- Some(0) | None => DefIdForest::empty(),
- // If the array is definitely non-empty, it's uninhabited if
- // the type of its elements is uninhabited.
- Some(1..) => ty.uninhabited_from(tcx, param_env),
+ // If we can evaluate the array length before having a `ParamEnv`, then
+ // we can simplify the predicate. This is an optimization.
+ Array(ty, len) => match len.kind().try_to_machine_usize(tcx) {
+ Some(0) => InhabitedPredicate::True,
+ Some(1..) => ty.inhabited_predicate(tcx),
+ None => ty.inhabited_predicate(tcx).or(tcx, InhabitedPredicate::ConstIsZero(len)),
},
- // References to uninitialised memory are valid for any type, including
- // uninhabited types, in unsafe code, so we treat all references as
- // inhabited.
- // The precise semantics of inhabitedness with respect to references is currently
- // undecided.
- Ref(..) => DefIdForest::empty(),
-
- _ => DefIdForest::empty(),
+ _ => bug!("unexpected TyKind, use `Ty::inhabited_predicate`"),
}
}
diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs
index 9afd66207..6c1414f7b 100644
--- a/compiler/rustc_middle/src/ty/instance.rs
+++ b/compiler/rustc_middle/src/ty/instance.rs
@@ -1,9 +1,7 @@
use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use crate::ty::print::{FmtPrinter, Printer};
-use crate::ty::subst::{InternalSubsts, Subst};
-use crate::ty::{
- self, EarlyBinder, SubstsRef, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable, TypeVisitable,
-};
+use crate::ty::{self, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable, TypeVisitable};
+use crate::ty::{EarlyBinder, InternalSubsts, SubstsRef};
use rustc_errors::ErrorGuaranteed;
use rustc_hir::def::Namespace;
use rustc_hir::def_id::{CrateNum, DefId};
diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs
index 042eeec3f..3312f44c6 100644
--- a/compiler/rustc_middle/src/ty/layout.rs
+++ b/compiler/rustc_middle/src/ty/layout.rs
@@ -1,41 +1,23 @@
use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
-use crate::mir::{GeneratorLayout, GeneratorSavedLocal};
use crate::ty::normalize_erasing_regions::NormalizationError;
-use crate::ty::subst::Subst;
-use crate::ty::{
- self, layout_sanity_check::sanity_check_layout, subst::SubstsRef, EarlyBinder, ReprOptions, Ty,
- TyCtxt, TypeVisitable,
-};
+use crate::ty::{self, ReprOptions, Ty, TyCtxt, TypeVisitable};
use rustc_ast as ast;
use rustc_attr as attr;
+use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic};
use rustc_hir as hir;
use rustc_hir::def_id::DefId;
-use rustc_hir::lang_items::LangItem;
-use rustc_index::bit_set::BitSet;
-use rustc_index::vec::{Idx, IndexVec};
-use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo};
-use rustc_span::symbol::Symbol;
+use rustc_index::vec::Idx;
+use rustc_session::config::OptLevel;
use rustc_span::{Span, DUMMY_SP};
-use rustc_target::abi::call::{
- ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind,
-};
+use rustc_target::abi::call::FnAbi;
use rustc_target::abi::*;
use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target};
-use std::cmp::{self, Ordering};
+use std::cmp::{self};
use std::fmt;
-use std::iter;
use std::num::NonZeroUsize;
use std::ops::Bound;
-use rand::{seq::SliceRandom, SeedableRng};
-use rand_xoshiro::Xoshiro128StarStar;
-
-pub fn provide(providers: &mut ty::query::Providers) {
- *providers =
- ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers };
-}
-
pub trait IntegerExt {
fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>;
fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer;
@@ -207,6 +189,31 @@ pub enum LayoutError<'tcx> {
NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>),
}
+impl<'a> IntoDiagnostic<'a, !> for LayoutError<'a> {
+ fn into_diagnostic(self, handler: &'a Handler) -> DiagnosticBuilder<'a, !> {
+ let mut diag = handler.struct_fatal("");
+
+ match self {
+ LayoutError::Unknown(ty) => {
+ diag.set_arg("ty", ty);
+ diag.set_primary_message(rustc_errors::fluent::middle_unknown_layout);
+ }
+ LayoutError::SizeOverflow(ty) => {
+ diag.set_arg("ty", ty);
+ diag.set_primary_message(rustc_errors::fluent::middle_values_too_big);
+ }
+ LayoutError::NormalizationFailure(ty, e) => {
+ diag.set_arg("ty", ty);
+ diag.set_arg("failure_ty", e.get_type_for_failure());
+ diag.set_primary_message(rustc_errors::fluent::middle_cannot_be_normalized);
+ }
+ }
+ diag
+ }
+}
+
+// FIXME: Once the other errors that embed this error have been converted to translateable
+// diagnostics, this Display impl should be removed.
impl<'tcx> fmt::Display for LayoutError<'tcx> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
@@ -224,1814 +231,12 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> {
}
}
-#[instrument(skip(tcx, query), level = "debug")]
-fn layout_of<'tcx>(
- tcx: TyCtxt<'tcx>,
- query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>,
-) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> {
- let (param_env, ty) = query.into_parts();
- debug!(?ty);
-
- let param_env = param_env.with_reveal_all_normalized(tcx);
- let unnormalized_ty = ty;
-
- // FIXME: We might want to have two different versions of `layout_of`:
- // One that can be called after typecheck has completed and can use
- // `normalize_erasing_regions` here and another one that can be called
- // before typecheck has completed and uses `try_normalize_erasing_regions`.
- let ty = match tcx.try_normalize_erasing_regions(param_env, ty) {
- Ok(t) => t,
- Err(normalization_error) => {
- return Err(LayoutError::NormalizationFailure(ty, normalization_error));
- }
- };
-
- if ty != unnormalized_ty {
- // Ensure this layout is also cached for the normalized type.
- return tcx.layout_of(param_env.and(ty));
- }
-
- let cx = LayoutCx { tcx, param_env };
-
- let layout = cx.layout_of_uncached(ty)?;
- let layout = TyAndLayout { ty, layout };
-
- cx.record_layout_for_printing(layout);
-
- sanity_check_layout(&cx, &layout);
-
- Ok(layout)
-}
-
#[derive(Clone, Copy)]
pub struct LayoutCx<'tcx, C> {
pub tcx: C,
pub param_env: ty::ParamEnv<'tcx>,
}
-#[derive(Copy, Clone, Debug)]
-enum StructKind {
- /// A tuple, closure, or univariant which cannot be coerced to unsized.
- AlwaysSized,
- /// A univariant, the last field of which may be coerced to unsized.
- MaybeUnsized,
- /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag).
- Prefixed(Size, Align),
-}
-
-// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`.
-// This is used to go between `memory_index` (source field order to memory order)
-// and `inverse_memory_index` (memory order to source field order).
-// See also `FieldsShape::Arbitrary::memory_index` for more details.
-// FIXME(eddyb) build a better abstraction for permutations, if possible.
-fn invert_mapping(map: &[u32]) -> Vec<u32> {
- let mut inverse = vec![0; map.len()];
- for i in 0..map.len() {
- inverse[map[i] as usize] = i as u32;
- }
- inverse
-}
-
-impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
- fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> {
- let dl = self.data_layout();
- let b_align = b.align(dl);
- let align = a.align(dl).max(b_align).max(dl.aggregate_align);
- let b_offset = a.size(dl).align_to(b_align.abi);
- let size = (b_offset + b.size(dl)).align_to(align.abi);
-
- // HACK(nox): We iter on `b` and then `a` because `max_by_key`
- // returns the last maximum.
- let largest_niche = Niche::from_scalar(dl, b_offset, b)
- .into_iter()
- .chain(Niche::from_scalar(dl, Size::ZERO, a))
- .max_by_key(|niche| niche.available(dl));
-
- LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Arbitrary {
- offsets: vec![Size::ZERO, b_offset],
- memory_index: vec![0, 1],
- },
- abi: Abi::ScalarPair(a, b),
- largest_niche,
- align,
- size,
- }
- }
-
- fn univariant_uninterned(
- &self,
- ty: Ty<'tcx>,
- fields: &[TyAndLayout<'_>],
- repr: &ReprOptions,
- kind: StructKind,
- ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> {
- let dl = self.data_layout();
- let pack = repr.pack;
- if pack.is_some() && repr.align.is_some() {
- self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned");
- return Err(LayoutError::Unknown(ty));
- }
-
- let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-
- let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect();
-
- let optimize = !repr.inhibit_struct_field_reordering_opt();
- if optimize {
- let end =
- if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() };
- let optimizing = &mut inverse_memory_index[..end];
- let field_align = |f: &TyAndLayout<'_>| {
- if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi }
- };
-
- // If `-Z randomize-layout` was enabled for the type definition we can shuffle
- // the field ordering to try and catch some code making assumptions about layouts
- // we don't guarantee
- if repr.can_randomize_type_layout() {
- // `ReprOptions.layout_seed` is a deterministic seed that we can use to
- // randomize field ordering with
- let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed);
-
- // Shuffle the ordering of the fields
- optimizing.shuffle(&mut rng);
-
- // Otherwise we just leave things alone and actually optimize the type's fields
- } else {
- match kind {
- StructKind::AlwaysSized | StructKind::MaybeUnsized => {
- optimizing.sort_by_key(|&x| {
- // Place ZSTs first to avoid "interesting offsets",
- // especially with only one or two non-ZST fields.
- let f = &fields[x as usize];
- (!f.is_zst(), cmp::Reverse(field_align(f)))
- });
- }
-
- StructKind::Prefixed(..) => {
- // Sort in ascending alignment so that the layout stays optimal
- // regardless of the prefix
- optimizing.sort_by_key(|&x| field_align(&fields[x as usize]));
- }
- }
-
- // FIXME(Kixiron): We can always shuffle fields within a given alignment class
- // regardless of the status of `-Z randomize-layout`
- }
- }
-
- // inverse_memory_index holds field indices by increasing memory offset.
- // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5.
- // We now write field offsets to the corresponding offset slot;
- // field 5 with offset 0 puts 0 in offsets[5].
- // At the bottom of this function, we invert `inverse_memory_index` to
- // produce `memory_index` (see `invert_mapping`).
-
- let mut sized = true;
- let mut offsets = vec![Size::ZERO; fields.len()];
- let mut offset = Size::ZERO;
- let mut largest_niche = None;
- let mut largest_niche_available = 0;
-
- if let StructKind::Prefixed(prefix_size, prefix_align) = kind {
- let prefix_align =
- if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align };
- align = align.max(AbiAndPrefAlign::new(prefix_align));
- offset = prefix_size.align_to(prefix_align);
- }
-
- for &i in &inverse_memory_index {
- let field = fields[i as usize];
- if !sized {
- self.tcx.sess.delay_span_bug(
- DUMMY_SP,
- &format!(
- "univariant: field #{} of `{}` comes after unsized field",
- offsets.len(),
- ty
- ),
- );
- }
-
- if field.is_unsized() {
- sized = false;
- }
-
- // Invariant: offset < dl.obj_size_bound() <= 1<<61
- let field_align = if let Some(pack) = pack {
- field.align.min(AbiAndPrefAlign::new(pack))
- } else {
- field.align
- };
- offset = offset.align_to(field_align.abi);
- align = align.max(field_align);
-
- debug!("univariant offset: {:?} field: {:#?}", offset, field);
- offsets[i as usize] = offset;
-
- if let Some(mut niche) = field.largest_niche {
- let available = niche.available(dl);
- if available > largest_niche_available {
- largest_niche_available = available;
- niche.offset += offset;
- largest_niche = Some(niche);
- }
- }
-
- offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?;
- }
-
- if let Some(repr_align) = repr.align {
- align = align.max(AbiAndPrefAlign::new(repr_align));
- }
-
- debug!("univariant min_size: {:?}", offset);
- let min_size = offset;
-
- // As stated above, inverse_memory_index holds field indices by increasing offset.
- // This makes it an already-sorted view of the offsets vec.
- // To invert it, consider:
- // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0.
- // Field 5 would be the first element, so memory_index is i:
- // Note: if we didn't optimize, it's already right.
-
- let memory_index =
- if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index };
-
- let size = min_size.align_to(align.abi);
- let mut abi = Abi::Aggregate { sized };
-
- // Unpack newtype ABIs and find scalar pairs.
- if sized && size.bytes() > 0 {
- // All other fields must be ZSTs.
- let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst());
-
- match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) {
- // We have exactly one non-ZST field.
- (Some((i, field)), None, None) => {
- // Field fills the struct and it has a scalar or scalar pair ABI.
- if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size
- {
- match field.abi {
- // For plain scalars, or vectors of them, we can't unpack
- // newtypes for `#[repr(C)]`, as that affects C ABIs.
- Abi::Scalar(_) | Abi::Vector { .. } if optimize => {
- abi = field.abi;
- }
- // But scalar pairs are Rust-specific and get
- // treated as aggregates by C ABIs anyway.
- Abi::ScalarPair(..) => {
- abi = field.abi;
- }
- _ => {}
- }
- }
- }
-
- // Two non-ZST fields, and they're both scalars.
- (Some((i, a)), Some((j, b)), None) => {
- match (a.abi, b.abi) {
- (Abi::Scalar(a), Abi::Scalar(b)) => {
- // Order by the memory placement, not source order.
- let ((i, a), (j, b)) = if offsets[i] < offsets[j] {
- ((i, a), (j, b))
- } else {
- ((j, b), (i, a))
- };
- let pair = self.scalar_pair(a, b);
- let pair_offsets = match pair.fields {
- FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
- assert_eq!(memory_index, &[0, 1]);
- offsets
- }
- _ => bug!(),
- };
- if offsets[i] == pair_offsets[0]
- && offsets[j] == pair_offsets[1]
- && align == pair.align
- && size == pair.size
- {
- // We can use `ScalarPair` only when it matches our
- // already computed layout (including `#[repr(C)]`).
- abi = pair.abi;
- }
- }
- _ => {}
- }
- }
-
- _ => {}
- }
- }
-
- if fields.iter().any(|f| f.abi.is_uninhabited()) {
- abi = Abi::Uninhabited;
- }
-
- Ok(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Arbitrary { offsets, memory_index },
- abi,
- largest_niche,
- align,
- size,
- })
- }
-
- fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
- let tcx = self.tcx;
- let param_env = self.param_env;
- let dl = self.data_layout();
- let scalar_unit = |value: Primitive| {
- let size = value.size(dl);
- assert!(size.bits() <= 128);
- Scalar::Initialized { value, valid_range: WrappingRange::full(size) }
- };
- let scalar =
- |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value)));
-
- let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| {
- Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?))
- };
- debug_assert!(!ty.has_infer_types_or_consts());
-
- Ok(match *ty.kind() {
- // Basic scalars.
- ty::Bool => tcx.intern_layout(LayoutS::scalar(
- self,
- Scalar::Initialized {
- value: Int(I8, false),
- valid_range: WrappingRange { start: 0, end: 1 },
- },
- )),
- ty::Char => tcx.intern_layout(LayoutS::scalar(
- self,
- Scalar::Initialized {
- value: Int(I32, false),
- valid_range: WrappingRange { start: 0, end: 0x10FFFF },
- },
- )),
- ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)),
- ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)),
- ty::Float(fty) => scalar(match fty {
- ty::FloatTy::F32 => F32,
- ty::FloatTy::F64 => F64,
- }),
- ty::FnPtr(_) => {
- let mut ptr = scalar_unit(Pointer);
- ptr.valid_range_mut().start = 1;
- tcx.intern_layout(LayoutS::scalar(self, ptr))
- }
-
- // The never type.
- ty::Never => tcx.intern_layout(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Primitive,
- abi: Abi::Uninhabited,
- largest_niche: None,
- align: dl.i8_align,
- size: Size::ZERO,
- }),
-
- // Potentially-wide pointers.
- ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => {
- let mut data_ptr = scalar_unit(Pointer);
- if !ty.is_unsafe_ptr() {
- data_ptr.valid_range_mut().start = 1;
- }
-
- let pointee = tcx.normalize_erasing_regions(param_env, pointee);
- if pointee.is_sized(tcx.at(DUMMY_SP), param_env) {
- return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
- }
-
- let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
- let metadata = match unsized_part.kind() {
- ty::Foreign(..) => {
- return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr)));
- }
- ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)),
- ty::Dynamic(..) => {
- let mut vtable = scalar_unit(Pointer);
- vtable.valid_range_mut().start = 1;
- vtable
- }
- _ => return Err(LayoutError::Unknown(unsized_part)),
- };
-
- // Effectively a (ptr, meta) tuple.
- tcx.intern_layout(self.scalar_pair(data_ptr, metadata))
- }
-
- ty::Dynamic(_, _, ty::DynStar) => {
- let mut data = scalar_unit(Int(dl.ptr_sized_integer(), false));
- data.valid_range_mut().start = 0;
- let mut vtable = scalar_unit(Pointer);
- vtable.valid_range_mut().start = 1;
- tcx.intern_layout(self.scalar_pair(data, vtable))
- }
-
- // Arrays and slices.
- ty::Array(element, mut count) => {
- if count.has_projections() {
- count = tcx.normalize_erasing_regions(param_env, count);
- if count.has_projections() {
- return Err(LayoutError::Unknown(ty));
- }
- }
-
- let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?;
- let element = self.layout_of(element)?;
- let size =
- element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?;
-
- let abi =
- if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) {
- Abi::Uninhabited
- } else {
- Abi::Aggregate { sized: true }
- };
-
- let largest_niche = if count != 0 { element.largest_niche } else { None };
-
- tcx.intern_layout(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Array { stride: element.size, count },
- abi,
- largest_niche,
- align: element.align,
- size,
- })
- }
- ty::Slice(element) => {
- let element = self.layout_of(element)?;
- tcx.intern_layout(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Array { stride: element.size, count: 0 },
- abi: Abi::Aggregate { sized: false },
- largest_niche: None,
- align: element.align,
- size: Size::ZERO,
- })
- }
- ty::Str => tcx.intern_layout(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 },
- abi: Abi::Aggregate { sized: false },
- largest_niche: None,
- align: dl.i8_align,
- size: Size::ZERO,
- }),
-
- // Odd unit types.
- ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?,
- ty::Dynamic(_, _, ty::Dyn) | ty::Foreign(..) => {
- let mut unit = self.univariant_uninterned(
- ty,
- &[],
- &ReprOptions::default(),
- StructKind::AlwaysSized,
- )?;
- match unit.abi {
- Abi::Aggregate { ref mut sized } => *sized = false,
- _ => bug!(),
- }
- tcx.intern_layout(unit)
- }
-
- ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?,
-
- ty::Closure(_, ref substs) => {
- let tys = substs.as_closure().upvar_tys();
- univariant(
- &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?,
- &ReprOptions::default(),
- StructKind::AlwaysSized,
- )?
- }
-
- ty::Tuple(tys) => {
- let kind =
- if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized };
-
- univariant(
- &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?,
- &ReprOptions::default(),
- kind,
- )?
- }
-
- // SIMD vector types.
- ty::Adt(def, substs) if def.repr().simd() => {
- if !def.is_struct() {
- // Should have yielded E0517 by now.
- tcx.sess.delay_span_bug(
- DUMMY_SP,
- "#[repr(simd)] was applied to an ADT that is not a struct",
- );
- return Err(LayoutError::Unknown(ty));
- }
-
- // Supported SIMD vectors are homogeneous ADTs with at least one field:
- //
- // * #[repr(simd)] struct S(T, T, T, T);
- // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T }
- // * #[repr(simd)] struct S([T; 4])
- //
- // where T is a primitive scalar (integer/float/pointer).
-
- // SIMD vectors with zero fields are not supported.
- // (should be caught by typeck)
- if def.non_enum_variant().fields.is_empty() {
- tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
- }
-
- // Type of the first ADT field:
- let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs);
-
- // Heterogeneous SIMD vectors are not supported:
- // (should be caught by typeck)
- for fi in &def.non_enum_variant().fields {
- if fi.ty(tcx, substs) != f0_ty {
- tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty));
- }
- }
-
- // The element type and number of elements of the SIMD vector
- // are obtained from:
- //
- // * the element type and length of the single array field, if
- // the first field is of array type, or
- //
- // * the homogeneous field type and the number of fields.
- let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() {
- // First ADT field is an array:
-
- // SIMD vectors with multiple array fields are not supported:
- // (should be caught by typeck)
- if def.non_enum_variant().fields.len() != 1 {
- tcx.sess.fatal(&format!(
- "monomorphising SIMD type `{}` with more than one array field",
- ty
- ));
- }
-
- // Extract the number of elements from the layout of the array field:
- let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else {
- return Err(LayoutError::Unknown(ty));
- };
-
- (*e_ty, *count, true)
- } else {
- // First ADT field is not an array:
- (f0_ty, def.non_enum_variant().fields.len() as _, false)
- };
-
- // SIMD vectors of zero length are not supported.
- // Additionally, lengths are capped at 2^16 as a fixed maximum backends must
- // support.
- //
- // Can't be caught in typeck if the array length is generic.
- if e_len == 0 {
- tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty));
- } else if e_len > MAX_SIMD_LANES {
- tcx.sess.fatal(&format!(
- "monomorphising SIMD type `{}` of length greater than {}",
- ty, MAX_SIMD_LANES,
- ));
- }
-
- // Compute the ABI of the element type:
- let e_ly = self.layout_of(e_ty)?;
- let Abi::Scalar(e_abi) = e_ly.abi else {
- // This error isn't caught in typeck, e.g., if
- // the element type of the vector is generic.
- tcx.sess.fatal(&format!(
- "monomorphising SIMD type `{}` with a non-primitive-scalar \
- (integer/float/pointer) element type `{}`",
- ty, e_ty
- ))
- };
-
- // Compute the size and alignment of the vector:
- let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?;
- let align = dl.vector_align(size);
- let size = size.align_to(align.abi);
-
- // Compute the placement of the vector fields:
- let fields = if is_array {
- FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] }
- } else {
- FieldsShape::Array { stride: e_ly.size, count: e_len }
- };
-
- tcx.intern_layout(LayoutS {
- variants: Variants::Single { index: VariantIdx::new(0) },
- fields,
- abi: Abi::Vector { element: e_abi, count: e_len },
- largest_niche: e_ly.largest_niche,
- size,
- align,
- })
- }
-
- // ADTs.
- ty::Adt(def, substs) => {
- // Cache the field layouts.
- let variants = def
- .variants()
- .iter()
- .map(|v| {
- v.fields
- .iter()
- .map(|field| self.layout_of(field.ty(tcx, substs)))
- .collect::<Result<Vec<_>, _>>()
- })
- .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
- if def.is_union() {
- if def.repr().pack.is_some() && def.repr().align.is_some() {
- self.tcx.sess.delay_span_bug(
- tcx.def_span(def.did()),
- "union cannot be packed and aligned",
- );
- return Err(LayoutError::Unknown(ty));
- }
-
- let mut align =
- if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align };
-
- if let Some(repr_align) = def.repr().align {
- align = align.max(AbiAndPrefAlign::new(repr_align));
- }
-
- let optimize = !def.repr().inhibit_union_abi_opt();
- let mut size = Size::ZERO;
- let mut abi = Abi::Aggregate { sized: true };
- let index = VariantIdx::new(0);
- for field in &variants[index] {
- assert!(!field.is_unsized());
- align = align.max(field.align);
-
- // If all non-ZST fields have the same ABI, forward this ABI
- if optimize && !field.is_zst() {
- // Discard valid range information and allow undef
- let field_abi = match field.abi {
- Abi::Scalar(x) => Abi::Scalar(x.to_union()),
- Abi::ScalarPair(x, y) => {
- Abi::ScalarPair(x.to_union(), y.to_union())
- }
- Abi::Vector { element: x, count } => {
- Abi::Vector { element: x.to_union(), count }
- }
- Abi::Uninhabited | Abi::Aggregate { .. } => {
- Abi::Aggregate { sized: true }
- }
- };
-
- if size == Size::ZERO {
- // first non ZST: initialize 'abi'
- abi = field_abi;
- } else if abi != field_abi {
- // different fields have different ABI: reset to Aggregate
- abi = Abi::Aggregate { sized: true };
- }
- }
-
- size = cmp::max(size, field.size);
- }
-
- if let Some(pack) = def.repr().pack {
- align = align.min(AbiAndPrefAlign::new(pack));
- }
-
- return Ok(tcx.intern_layout(LayoutS {
- variants: Variants::Single { index },
- fields: FieldsShape::Union(
- NonZeroUsize::new(variants[index].len())
- .ok_or(LayoutError::Unknown(ty))?,
- ),
- abi,
- largest_niche: None,
- align,
- size: size.align_to(align.abi),
- }));
- }
-
- // A variant is absent if it's uninhabited and only has ZST fields.
- // Present uninhabited variants only require space for their fields,
- // but *not* an encoding of the discriminant (e.g., a tag value).
- // See issue #49298 for more details on the need to leave space
- // for non-ZST uninhabited data (mostly partial initialization).
- let absent = |fields: &[TyAndLayout<'_>]| {
- let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited());
- let is_zst = fields.iter().all(|f| f.is_zst());
- uninhabited && is_zst
- };
- let (present_first, present_second) = {
- let mut present_variants = variants
- .iter_enumerated()
- .filter_map(|(i, v)| if absent(v) { None } else { Some(i) });
- (present_variants.next(), present_variants.next())
- };
- let present_first = match present_first {
- Some(present_first) => present_first,
- // Uninhabited because it has no variants, or only absent ones.
- None if def.is_enum() => {
- return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout);
- }
- // If it's a struct, still compute a layout so that we can still compute the
- // field offsets.
- None => VariantIdx::new(0),
- };
-
- let is_struct = !def.is_enum() ||
- // Only one variant is present.
- (present_second.is_none() &&
- // Representation optimizations are allowed.
- !def.repr().inhibit_enum_layout_opt());
- if is_struct {
- // Struct, or univariant enum equivalent to a struct.
- // (Typechecking will reject discriminant-sizing attrs.)
-
- let v = present_first;
- let kind = if def.is_enum() || variants[v].is_empty() {
- StructKind::AlwaysSized
- } else {
- let param_env = tcx.param_env(def.did());
- let last_field = def.variant(v).fields.last().unwrap();
- let always_sized =
- tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env);
- if !always_sized {
- StructKind::MaybeUnsized
- } else {
- StructKind::AlwaysSized
- }
- };
-
- let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?;
- st.variants = Variants::Single { index: v };
-
- if def.is_unsafe_cell() {
- let hide_niches = |scalar: &mut _| match scalar {
- Scalar::Initialized { value, valid_range } => {
- *valid_range = WrappingRange::full(value.size(dl))
- }
- // Already doesn't have any niches
- Scalar::Union { .. } => {}
- };
- match &mut st.abi {
- Abi::Uninhabited => {}
- Abi::Scalar(scalar) => hide_niches(scalar),
- Abi::ScalarPair(a, b) => {
- hide_niches(a);
- hide_niches(b);
- }
- Abi::Vector { element, count: _ } => hide_niches(element),
- Abi::Aggregate { sized: _ } => {}
- }
- st.largest_niche = None;
- return Ok(tcx.intern_layout(st));
- }
-
- let (start, end) = self.tcx.layout_scalar_valid_range(def.did());
- match st.abi {
- Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => {
- // the asserts ensure that we are not using the
- // `#[rustc_layout_scalar_valid_range(n)]`
- // attribute to widen the range of anything as that would probably
- // result in UB somewhere
- // FIXME(eddyb) the asserts are probably not needed,
- // as larger validity ranges would result in missed
- // optimizations, *not* wrongly assuming the inner
- // value is valid. e.g. unions enlarge validity ranges,
- // because the values may be uninitialized.
- if let Bound::Included(start) = start {
- // FIXME(eddyb) this might be incorrect - it doesn't
- // account for wrap-around (end < start) ranges.
- let valid_range = scalar.valid_range_mut();
- assert!(valid_range.start <= start);
- valid_range.start = start;
- }
- if let Bound::Included(end) = end {
- // FIXME(eddyb) this might be incorrect - it doesn't
- // account for wrap-around (end < start) ranges.
- let valid_range = scalar.valid_range_mut();
- assert!(valid_range.end >= end);
- valid_range.end = end;
- }
-
- // Update `largest_niche` if we have introduced a larger niche.
- let niche = Niche::from_scalar(dl, Size::ZERO, *scalar);
- if let Some(niche) = niche {
- match st.largest_niche {
- Some(largest_niche) => {
- // Replace the existing niche even if they're equal,
- // because this one is at a lower offset.
- if largest_niche.available(dl) <= niche.available(dl) {
- st.largest_niche = Some(niche);
- }
- }
- None => st.largest_niche = Some(niche),
- }
- }
- }
- _ => assert!(
- start == Bound::Unbounded && end == Bound::Unbounded,
- "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}",
- def,
- st,
- ),
- }
-
- return Ok(tcx.intern_layout(st));
- }
-
- // At this point, we have handled all unions and
- // structs. (We have also handled univariant enums
- // that allow representation optimization.)
- assert!(def.is_enum());
-
- // Until we've decided whether to use the tagged or
- // niche filling LayoutS, we don't want to intern the
- // variant layouts, so we can't store them in the
- // overall LayoutS. Store the overall LayoutS
- // and the variant LayoutSs here until then.
- struct TmpLayout<'tcx> {
- layout: LayoutS<'tcx>,
- variants: IndexVec<VariantIdx, LayoutS<'tcx>>,
- }
-
- let calculate_niche_filling_layout =
- || -> Result<Option<TmpLayout<'tcx>>, LayoutError<'tcx>> {
- // The current code for niche-filling relies on variant indices
- // instead of actual discriminants, so enums with
- // explicit discriminants (RFC #2363) would misbehave.
- if def.repr().inhibit_enum_layout_opt()
- || def
- .variants()
- .iter_enumerated()
- .any(|(i, v)| v.discr != ty::VariantDiscr::Relative(i.as_u32()))
- {
- return Ok(None);
- }
-
- if variants.len() < 2 {
- return Ok(None);
- }
-
- let mut align = dl.aggregate_align;
- let mut variant_layouts = variants
- .iter_enumerated()
- .map(|(j, v)| {
- let mut st = self.univariant_uninterned(
- ty,
- v,
- &def.repr(),
- StructKind::AlwaysSized,
- )?;
- st.variants = Variants::Single { index: j };
-
- align = align.max(st.align);
-
- Ok(st)
- })
- .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
- let largest_variant_index = match variant_layouts
- .iter_enumerated()
- .max_by_key(|(_i, layout)| layout.size.bytes())
- .map(|(i, _layout)| i)
- {
- None => return Ok(None),
- Some(i) => i,
- };
-
- let all_indices = VariantIdx::new(0)..=VariantIdx::new(variants.len() - 1);
- let needs_disc = |index: VariantIdx| {
- index != largest_variant_index && !absent(&variants[index])
- };
- let niche_variants = all_indices.clone().find(|v| needs_disc(*v)).unwrap()
- ..=all_indices.rev().find(|v| needs_disc(*v)).unwrap();
-
- let count = niche_variants.size_hint().1.unwrap() as u128;
-
- // Find the field with the largest niche
- let (field_index, niche, (niche_start, niche_scalar)) = match variants
- [largest_variant_index]
- .iter()
- .enumerate()
- .filter_map(|(j, field)| Some((j, field.largest_niche?)))
- .max_by_key(|(_, niche)| niche.available(dl))
- .and_then(|(j, niche)| Some((j, niche, niche.reserve(self, count)?)))
- {
- None => return Ok(None),
- Some(x) => x,
- };
-
- let niche_offset = niche.offset
- + variant_layouts[largest_variant_index].fields.offset(field_index);
- let niche_size = niche.value.size(dl);
- let size = variant_layouts[largest_variant_index].size.align_to(align.abi);
-
- let all_variants_fit =
- variant_layouts.iter_enumerated_mut().all(|(i, layout)| {
- if i == largest_variant_index {
- return true;
- }
-
- layout.largest_niche = None;
-
- if layout.size <= niche_offset {
- // This variant will fit before the niche.
- return true;
- }
-
- // Determine if it'll fit after the niche.
- let this_align = layout.align.abi;
- let this_offset = (niche_offset + niche_size).align_to(this_align);
-
- if this_offset + layout.size > size {
- return false;
- }
-
- // It'll fit, but we need to make some adjustments.
- match layout.fields {
- FieldsShape::Arbitrary { ref mut offsets, .. } => {
- for (j, offset) in offsets.iter_mut().enumerate() {
- if !variants[i][j].is_zst() {
- *offset += this_offset;
- }
- }
- }
- _ => {
- panic!("Layout of fields should be Arbitrary for variants")
- }
- }
-
- // It can't be a Scalar or ScalarPair because the offset isn't 0.
- if !layout.abi.is_uninhabited() {
- layout.abi = Abi::Aggregate { sized: true };
- }
- layout.size += this_offset;
-
- true
- });
-
- if !all_variants_fit {
- return Ok(None);
- }
-
- let largest_niche = Niche::from_scalar(dl, niche_offset, niche_scalar);
-
- let others_zst = variant_layouts.iter_enumerated().all(|(i, layout)| {
- i == largest_variant_index || layout.size == Size::ZERO
- });
- let same_size = size == variant_layouts[largest_variant_index].size;
- let same_align = align == variant_layouts[largest_variant_index].align;
-
- let abi = if variant_layouts.iter().all(|v| v.abi.is_uninhabited()) {
- Abi::Uninhabited
- } else if same_size && same_align && others_zst {
- match variant_layouts[largest_variant_index].abi {
- // When the total alignment and size match, we can use the
- // same ABI as the scalar variant with the reserved niche.
- Abi::Scalar(_) => Abi::Scalar(niche_scalar),
- Abi::ScalarPair(first, second) => {
- // Only the niche is guaranteed to be initialised,
- // so use union layouts for the other primitive.
- if niche_offset == Size::ZERO {
- Abi::ScalarPair(niche_scalar, second.to_union())
- } else {
- Abi::ScalarPair(first.to_union(), niche_scalar)
- }
- }
- _ => Abi::Aggregate { sized: true },
- }
- } else {
- Abi::Aggregate { sized: true }
- };
-
- let layout = LayoutS {
- variants: Variants::Multiple {
- tag: niche_scalar,
- tag_encoding: TagEncoding::Niche {
- untagged_variant: largest_variant_index,
- niche_variants,
- niche_start,
- },
- tag_field: 0,
- variants: IndexVec::new(),
- },
- fields: FieldsShape::Arbitrary {
- offsets: vec![niche_offset],
- memory_index: vec![0],
- },
- abi,
- largest_niche,
- size,
- align,
- };
-
- Ok(Some(TmpLayout { layout, variants: variant_layouts }))
- };
-
- let niche_filling_layout = calculate_niche_filling_layout()?;
-
- let (mut min, mut max) = (i128::MAX, i128::MIN);
- let discr_type = def.repr().discr_type();
- let bits = Integer::from_attr(self, discr_type).size().bits();
- for (i, discr) in def.discriminants(tcx) {
- if variants[i].iter().any(|f| f.abi.is_uninhabited()) {
- continue;
- }
- let mut x = discr.val as i128;
- if discr_type.is_signed() {
- // sign extend the raw representation to be an i128
- x = (x << (128 - bits)) >> (128 - bits);
- }
- if x < min {
- min = x;
- }
- if x > max {
- max = x;
- }
- }
- // We might have no inhabited variants, so pretend there's at least one.
- if (min, max) == (i128::MAX, i128::MIN) {
- min = 0;
- max = 0;
- }
- assert!(min <= max, "discriminant range is {}...{}", min, max);
- let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max);
-
- let mut align = dl.aggregate_align;
- let mut size = Size::ZERO;
-
- // We're interested in the smallest alignment, so start large.
- let mut start_align = Align::from_bytes(256).unwrap();
- assert_eq!(Integer::for_align(dl, start_align), None);
-
- // repr(C) on an enum tells us to make a (tag, union) layout,
- // so we need to grow the prefix alignment to be at least
- // the alignment of the union. (This value is used both for
- // determining the alignment of the overall enum, and the
- // determining the alignment of the payload after the tag.)
- let mut prefix_align = min_ity.align(dl).abi;
- if def.repr().c() {
- for fields in &variants {
- for field in fields {
- prefix_align = prefix_align.max(field.align.abi);
- }
- }
- }
-
- // Create the set of structs that represent each variant.
- let mut layout_variants = variants
- .iter_enumerated()
- .map(|(i, field_layouts)| {
- let mut st = self.univariant_uninterned(
- ty,
- &field_layouts,
- &def.repr(),
- StructKind::Prefixed(min_ity.size(), prefix_align),
- )?;
- st.variants = Variants::Single { index: i };
- // Find the first field we can't move later
- // to make room for a larger discriminant.
- for field in
- st.fields.index_by_increasing_offset().map(|j| field_layouts[j])
- {
- if !field.is_zst() || field.align.abi.bytes() != 1 {
- start_align = start_align.min(field.align.abi);
- break;
- }
- }
- size = cmp::max(size, st.size);
- align = align.max(st.align);
- Ok(st)
- })
- .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
- // Align the maximum variant size to the largest alignment.
- size = size.align_to(align.abi);
-
- if size.bytes() >= dl.obj_size_bound() {
- return Err(LayoutError::SizeOverflow(ty));
- }
-
- let typeck_ity = Integer::from_attr(dl, def.repr().discr_type());
- if typeck_ity < min_ity {
- // It is a bug if Layout decided on a greater discriminant size than typeck for
- // some reason at this point (based on values discriminant can take on). Mostly
- // because this discriminant will be loaded, and then stored into variable of
- // type calculated by typeck. Consider such case (a bug): typeck decided on
- // byte-sized discriminant, but layout thinks we need a 16-bit to store all
- // discriminant values. That would be a bug, because then, in codegen, in order
- // to store this 16-bit discriminant into 8-bit sized temporary some of the
- // space necessary to represent would have to be discarded (or layout is wrong
- // on thinking it needs 16 bits)
- bug!(
- "layout decided on a larger discriminant type ({:?}) than typeck ({:?})",
- min_ity,
- typeck_ity
- );
- // However, it is fine to make discr type however large (as an optimisation)
- // after this point – we’ll just truncate the value we load in codegen.
- }
-
- // Check to see if we should use a different type for the
- // discriminant. We can safely use a type with the same size
- // as the alignment of the first field of each variant.
- // We increase the size of the discriminant to avoid LLVM copying
- // padding when it doesn't need to. This normally causes unaligned
- // load/stores and excessive memcpy/memset operations. By using a
- // bigger integer size, LLVM can be sure about its contents and
- // won't be so conservative.
-
- // Use the initial field alignment
- let mut ity = if def.repr().c() || def.repr().int.is_some() {
- min_ity
- } else {
- Integer::for_align(dl, start_align).unwrap_or(min_ity)
- };
-
- // If the alignment is not larger than the chosen discriminant size,
- // don't use the alignment as the final size.
- if ity <= min_ity {
- ity = min_ity;
- } else {
- // Patch up the variants' first few fields.
- let old_ity_size = min_ity.size();
- let new_ity_size = ity.size();
- for variant in &mut layout_variants {
- match variant.fields {
- FieldsShape::Arbitrary { ref mut offsets, .. } => {
- for i in offsets {
- if *i <= old_ity_size {
- assert_eq!(*i, old_ity_size);
- *i = new_ity_size;
- }
- }
- // We might be making the struct larger.
- if variant.size <= old_ity_size {
- variant.size = new_ity_size;
- }
- }
- _ => bug!(),
- }
- }
- }
-
- let tag_mask = ity.size().unsigned_int_max();
- let tag = Scalar::Initialized {
- value: Int(ity, signed),
- valid_range: WrappingRange {
- start: (min as u128 & tag_mask),
- end: (max as u128 & tag_mask),
- },
- };
- let mut abi = Abi::Aggregate { sized: true };
-
- if layout_variants.iter().all(|v| v.abi.is_uninhabited()) {
- abi = Abi::Uninhabited;
- } else if tag.size(dl) == size {
- // Make sure we only use scalar layout when the enum is entirely its
- // own tag (i.e. it has no padding nor any non-ZST variant fields).
- abi = Abi::Scalar(tag);
- } else {
- // Try to use a ScalarPair for all tagged enums.
- let mut common_prim = None;
- let mut common_prim_initialized_in_all_variants = true;
- for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) {
- let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else {
- bug!();
- };
- let mut fields =
- iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst());
- let (field, offset) = match (fields.next(), fields.next()) {
- (None, None) => {
- common_prim_initialized_in_all_variants = false;
- continue;
- }
- (Some(pair), None) => pair,
- _ => {
- common_prim = None;
- break;
- }
- };
- let prim = match field.abi {
- Abi::Scalar(scalar) => {
- common_prim_initialized_in_all_variants &=
- matches!(scalar, Scalar::Initialized { .. });
- scalar.primitive()
- }
- _ => {
- common_prim = None;
- break;
- }
- };
- if let Some(pair) = common_prim {
- // This is pretty conservative. We could go fancier
- // by conflating things like i32 and u32, or even
- // realising that (u8, u8) could just cohabit with
- // u16 or even u32.
- if pair != (prim, offset) {
- common_prim = None;
- break;
- }
- } else {
- common_prim = Some((prim, offset));
- }
- }
- if let Some((prim, offset)) = common_prim {
- let prim_scalar = if common_prim_initialized_in_all_variants {
- scalar_unit(prim)
- } else {
- // Common prim might be uninit.
- Scalar::Union { value: prim }
- };
- let pair = self.scalar_pair(tag, prim_scalar);
- let pair_offsets = match pair.fields {
- FieldsShape::Arbitrary { ref offsets, ref memory_index } => {
- assert_eq!(memory_index, &[0, 1]);
- offsets
- }
- _ => bug!(),
- };
- if pair_offsets[0] == Size::ZERO
- && pair_offsets[1] == *offset
- && align == pair.align
- && size == pair.size
- {
- // We can use `ScalarPair` only when it matches our
- // already computed layout (including `#[repr(C)]`).
- abi = pair.abi;
- }
- }
- }
-
- // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the
- // variants to ensure they are consistent. This is because a downcast is
- // semantically a NOP, and thus should not affect layout.
- if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) {
- for variant in &mut layout_variants {
- // We only do this for variants with fields; the others are not accessed anyway.
- // Also do not overwrite any already existing "clever" ABIs.
- if variant.fields.count() > 0
- && matches!(variant.abi, Abi::Aggregate { .. })
- {
- variant.abi = abi;
- // Also need to bump up the size and alignment, so that the entire value fits in here.
- variant.size = cmp::max(variant.size, size);
- variant.align.abi = cmp::max(variant.align.abi, align.abi);
- }
- }
- }
-
- let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag);
-
- let tagged_layout = LayoutS {
- variants: Variants::Multiple {
- tag,
- tag_encoding: TagEncoding::Direct,
- tag_field: 0,
- variants: IndexVec::new(),
- },
- fields: FieldsShape::Arbitrary {
- offsets: vec![Size::ZERO],
- memory_index: vec![0],
- },
- largest_niche,
- abi,
- align,
- size,
- };
-
- let tagged_layout = TmpLayout { layout: tagged_layout, variants: layout_variants };
-
- let mut best_layout = match (tagged_layout, niche_filling_layout) {
- (tl, Some(nl)) => {
- // Pick the smaller layout; otherwise,
- // pick the layout with the larger niche; otherwise,
- // pick tagged as it has simpler codegen.
- use Ordering::*;
- let niche_size = |tmp_l: &TmpLayout<'_>| {
- tmp_l.layout.largest_niche.map_or(0, |n| n.available(dl))
- };
- match (
- tl.layout.size.cmp(&nl.layout.size),
- niche_size(&tl).cmp(&niche_size(&nl)),
- ) {
- (Greater, _) => nl,
- (Equal, Less) => nl,
- _ => tl,
- }
- }
- (tl, None) => tl,
- };
-
- // Now we can intern the variant layouts and store them in the enum layout.
- best_layout.layout.variants = match best_layout.layout.variants {
- Variants::Multiple { tag, tag_encoding, tag_field, .. } => Variants::Multiple {
- tag,
- tag_encoding,
- tag_field,
- variants: best_layout
- .variants
- .into_iter()
- .map(|layout| tcx.intern_layout(layout))
- .collect(),
- },
- _ => bug!(),
- };
-
- tcx.intern_layout(best_layout.layout)
- }
-
- // Types with no meaningful known layout.
- ty::Projection(_) | ty::Opaque(..) => {
- // NOTE(eddyb) `layout_of` query should've normalized these away,
- // if that was possible, so there's no reason to try again here.
- return Err(LayoutError::Unknown(ty));
- }
-
- ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => {
- bug!("Layout::compute: unexpected type `{}`", ty)
- }
-
- ty::Bound(..) | ty::Param(_) | ty::Error(_) => {
- return Err(LayoutError::Unknown(ty));
- }
- })
- }
-}
-
-/// Overlap eligibility and variant assignment for each GeneratorSavedLocal.
-#[derive(Clone, Debug, PartialEq)]
-enum SavedLocalEligibility {
- Unassigned,
- Assigned(VariantIdx),
- // FIXME: Use newtype_index so we aren't wasting bytes
- Ineligible(Option<u32>),
-}
-
-// When laying out generators, we divide our saved local fields into two
-// categories: overlap-eligible and overlap-ineligible.
-//
-// Those fields which are ineligible for overlap go in a "prefix" at the
-// beginning of the layout, and always have space reserved for them.
-//
-// Overlap-eligible fields are only assigned to one variant, so we lay
-// those fields out for each variant and put them right after the
-// prefix.
-//
-// Finally, in the layout details, we point to the fields from the
-// variants they are assigned to. It is possible for some fields to be
-// included in multiple variants. No field ever "moves around" in the
-// layout; its offset is always the same.
-//
-// Also included in the layout are the upvars and the discriminant.
-// These are included as fields on the "outer" layout; they are not part
-// of any variant.
-impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
- /// Compute the eligibility and assignment of each local.
- fn generator_saved_local_eligibility(
- &self,
- info: &GeneratorLayout<'tcx>,
- ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) {
- use SavedLocalEligibility::*;
-
- let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> =
- IndexVec::from_elem_n(Unassigned, info.field_tys.len());
-
- // The saved locals not eligible for overlap. These will get
- // "promoted" to the prefix of our generator.
- let mut ineligible_locals = BitSet::new_empty(info.field_tys.len());
-
- // Figure out which of our saved locals are fields in only
- // one variant. The rest are deemed ineligible for overlap.
- for (variant_index, fields) in info.variant_fields.iter_enumerated() {
- for local in fields {
- match assignments[*local] {
- Unassigned => {
- assignments[*local] = Assigned(variant_index);
- }
- Assigned(idx) => {
- // We've already seen this local at another suspension
- // point, so it is no longer a candidate.
- trace!(
- "removing local {:?} in >1 variant ({:?}, {:?})",
- local,
- variant_index,
- idx
- );
- ineligible_locals.insert(*local);
- assignments[*local] = Ineligible(None);
- }
- Ineligible(_) => {}
- }
- }
- }
-
- // Next, check every pair of eligible locals to see if they
- // conflict.
- for local_a in info.storage_conflicts.rows() {
- let conflicts_a = info.storage_conflicts.count(local_a);
- if ineligible_locals.contains(local_a) {
- continue;
- }
-
- for local_b in info.storage_conflicts.iter(local_a) {
- // local_a and local_b are storage live at the same time, therefore they
- // cannot overlap in the generator layout. The only way to guarantee
- // this is if they are in the same variant, or one is ineligible
- // (which means it is stored in every variant).
- if ineligible_locals.contains(local_b)
- || assignments[local_a] == assignments[local_b]
- {
- continue;
- }
-
- // If they conflict, we will choose one to make ineligible.
- // This is not always optimal; it's just a greedy heuristic that
- // seems to produce good results most of the time.
- let conflicts_b = info.storage_conflicts.count(local_b);
- let (remove, other) =
- if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) };
- ineligible_locals.insert(remove);
- assignments[remove] = Ineligible(None);
- trace!("removing local {:?} due to conflict with {:?}", remove, other);
- }
- }
-
- // Count the number of variants in use. If only one of them, then it is
- // impossible to overlap any locals in our layout. In this case it's
- // always better to make the remaining locals ineligible, so we can
- // lay them out with the other locals in the prefix and eliminate
- // unnecessary padding bytes.
- {
- let mut used_variants = BitSet::new_empty(info.variant_fields.len());
- for assignment in &assignments {
- if let Assigned(idx) = assignment {
- used_variants.insert(*idx);
- }
- }
- if used_variants.count() < 2 {
- for assignment in assignments.iter_mut() {
- *assignment = Ineligible(None);
- }
- ineligible_locals.insert_all();
- }
- }
-
- // Write down the order of our locals that will be promoted to the prefix.
- {
- for (idx, local) in ineligible_locals.iter().enumerate() {
- assignments[local] = Ineligible(Some(idx as u32));
- }
- }
- debug!("generator saved local assignments: {:?}", assignments);
-
- (ineligible_locals, assignments)
- }
-
- /// Compute the full generator layout.
- fn generator_layout(
- &self,
- ty: Ty<'tcx>,
- def_id: hir::def_id::DefId,
- substs: SubstsRef<'tcx>,
- ) -> Result<Layout<'tcx>, LayoutError<'tcx>> {
- use SavedLocalEligibility::*;
- let tcx = self.tcx;
- let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs);
-
- let Some(info) = tcx.generator_layout(def_id) else {
- return Err(LayoutError::Unknown(ty));
- };
- let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info);
-
- // Build a prefix layout, including "promoting" all ineligible
- // locals as part of the prefix. We compute the layout of all of
- // these fields at once to get optimal packing.
- let tag_index = substs.as_generator().prefix_tys().count();
-
- // `info.variant_fields` already accounts for the reserved variants, so no need to add them.
- let max_discr = (info.variant_fields.len() - 1) as u128;
- let discr_int = Integer::fit_unsigned(max_discr);
- let discr_int_ty = discr_int.to_ty(tcx, false);
- let tag = Scalar::Initialized {
- value: Primitive::Int(discr_int, false),
- valid_range: WrappingRange { start: 0, end: max_discr },
- };
- let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag));
- let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout };
-
- let promoted_layouts = ineligible_locals
- .iter()
- .map(|local| subst_field(info.field_tys[local]))
- .map(|ty| tcx.mk_maybe_uninit(ty))
- .map(|ty| self.layout_of(ty));
- let prefix_layouts = substs
- .as_generator()
- .prefix_tys()
- .map(|ty| self.layout_of(ty))
- .chain(iter::once(Ok(tag_layout)))
- .chain(promoted_layouts)
- .collect::<Result<Vec<_>, _>>()?;
- let prefix = self.univariant_uninterned(
- ty,
- &prefix_layouts,
- &ReprOptions::default(),
- StructKind::AlwaysSized,
- )?;
-
- let (prefix_size, prefix_align) = (prefix.size, prefix.align);
-
- // Split the prefix layout into the "outer" fields (upvars and
- // discriminant) and the "promoted" fields. Promoted fields will
- // get included in each variant that requested them in
- // GeneratorLayout.
- debug!("prefix = {:#?}", prefix);
- let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields {
- FieldsShape::Arbitrary { mut offsets, memory_index } => {
- let mut inverse_memory_index = invert_mapping(&memory_index);
-
- // "a" (`0..b_start`) and "b" (`b_start..`) correspond to
- // "outer" and "promoted" fields respectively.
- let b_start = (tag_index + 1) as u32;
- let offsets_b = offsets.split_off(b_start as usize);
- let offsets_a = offsets;
-
- // Disentangle the "a" and "b" components of `inverse_memory_index`
- // by preserving the order but keeping only one disjoint "half" each.
- // FIXME(eddyb) build a better abstraction for permutations, if possible.
- let inverse_memory_index_b: Vec<_> =
- inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect();
- inverse_memory_index.retain(|&i| i < b_start);
- let inverse_memory_index_a = inverse_memory_index;
-
- // Since `inverse_memory_index_{a,b}` each only refer to their
- // respective fields, they can be safely inverted
- let memory_index_a = invert_mapping(&inverse_memory_index_a);
- let memory_index_b = invert_mapping(&inverse_memory_index_b);
-
- let outer_fields =
- FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a };
- (outer_fields, offsets_b, memory_index_b)
- }
- _ => bug!(),
- };
-
- let mut size = prefix.size;
- let mut align = prefix.align;
- let variants = info
- .variant_fields
- .iter_enumerated()
- .map(|(index, variant_fields)| {
- // Only include overlap-eligible fields when we compute our variant layout.
- let variant_only_tys = variant_fields
- .iter()
- .filter(|local| match assignments[**local] {
- Unassigned => bug!(),
- Assigned(v) if v == index => true,
- Assigned(_) => bug!("assignment does not match variant"),
- Ineligible(_) => false,
- })
- .map(|local| subst_field(info.field_tys[*local]));
-
- let mut variant = self.univariant_uninterned(
- ty,
- &variant_only_tys
- .map(|ty| self.layout_of(ty))
- .collect::<Result<Vec<_>, _>>()?,
- &ReprOptions::default(),
- StructKind::Prefixed(prefix_size, prefix_align.abi),
- )?;
- variant.variants = Variants::Single { index };
-
- let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else {
- bug!();
- };
-
- // Now, stitch the promoted and variant-only fields back together in
- // the order they are mentioned by our GeneratorLayout.
- // Because we only use some subset (that can differ between variants)
- // of the promoted fields, we can't just pick those elements of the
- // `promoted_memory_index` (as we'd end up with gaps).
- // So instead, we build an "inverse memory_index", as if all of the
- // promoted fields were being used, but leave the elements not in the
- // subset as `INVALID_FIELD_IDX`, which we can filter out later to
- // obtain a valid (bijective) mapping.
- const INVALID_FIELD_IDX: u32 = !0;
- let mut combined_inverse_memory_index =
- vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()];
- let mut offsets_and_memory_index = iter::zip(offsets, memory_index);
- let combined_offsets = variant_fields
- .iter()
- .enumerate()
- .map(|(i, local)| {
- let (offset, memory_index) = match assignments[*local] {
- Unassigned => bug!(),
- Assigned(_) => {
- let (offset, memory_index) =
- offsets_and_memory_index.next().unwrap();
- (offset, promoted_memory_index.len() as u32 + memory_index)
- }
- Ineligible(field_idx) => {
- let field_idx = field_idx.unwrap() as usize;
- (promoted_offsets[field_idx], promoted_memory_index[field_idx])
- }
- };
- combined_inverse_memory_index[memory_index as usize] = i as u32;
- offset
- })
- .collect();
-
- // Remove the unused slots and invert the mapping to obtain the
- // combined `memory_index` (also see previous comment).
- combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX);
- let combined_memory_index = invert_mapping(&combined_inverse_memory_index);
-
- variant.fields = FieldsShape::Arbitrary {
- offsets: combined_offsets,
- memory_index: combined_memory_index,
- };
-
- size = size.max(variant.size);
- align = align.max(variant.align);
- Ok(tcx.intern_layout(variant))
- })
- .collect::<Result<IndexVec<VariantIdx, _>, _>>()?;
-
- size = size.align_to(align.abi);
-
- let abi =
- if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) {
- Abi::Uninhabited
- } else {
- Abi::Aggregate { sized: true }
- };
-
- let layout = tcx.intern_layout(LayoutS {
- variants: Variants::Multiple {
- tag,
- tag_encoding: TagEncoding::Direct,
- tag_field: tag_index,
- variants,
- },
- fields: outer_fields,
- abi,
- largest_niche: prefix.largest_niche,
- size,
- align,
- });
- debug!("generator layout ({:?}): {:#?}", ty, layout);
- Ok(layout)
- }
-
- /// This is invoked by the `layout_of` query to record the final
- /// layout of each type.
- #[inline(always)]
- fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) {
- // If we are running with `-Zprint-type-sizes`, maybe record layouts
- // for dumping later.
- if self.tcx.sess.opts.unstable_opts.print_type_sizes {
- self.record_layout_for_printing_outlined(layout)
- }
- }
-
- fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) {
- // Ignore layouts that are done with non-empty environments or
- // non-monomorphic layouts, as the user only wants to see the stuff
- // resulting from the final codegen session.
- if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() {
- return;
- }
-
- // (delay format until we actually need it)
- let record = |kind, packed, opt_discr_size, variants| {
- let type_desc = format!("{:?}", layout.ty);
- self.tcx.sess.code_stats.record_type_size(
- kind,
- type_desc,
- layout.align.abi,
- layout.size,
- packed,
- opt_discr_size,
- variants,
- );
- };
-
- let adt_def = match *layout.ty.kind() {
- ty::Adt(ref adt_def, _) => {
- debug!("print-type-size t: `{:?}` process adt", layout.ty);
- adt_def
- }
-
- ty::Closure(..) => {
- debug!("print-type-size t: `{:?}` record closure", layout.ty);
- record(DataTypeKind::Closure, false, None, vec![]);
- return;
- }
-
- _ => {
- debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty);
- return;
- }
- };
-
- let adt_kind = adt_def.adt_kind();
- let adt_packed = adt_def.repr().pack.is_some();
-
- let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| {
- let mut min_size = Size::ZERO;
- let field_info: Vec<_> = flds
- .iter()
- .enumerate()
- .map(|(i, &name)| {
- let field_layout = layout.field(self, i);
- let offset = layout.fields.offset(i);
- let field_end = offset + field_layout.size;
- if min_size < field_end {
- min_size = field_end;
- }
- FieldInfo {
- name,
- offset: offset.bytes(),
- size: field_layout.size.bytes(),
- align: field_layout.align.abi.bytes(),
- }
- })
- .collect();
-
- VariantInfo {
- name: n,
- kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact },
- align: layout.align.abi.bytes(),
- size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() },
- fields: field_info,
- }
- };
-
- match layout.variants {
- Variants::Single { index } => {
- if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive {
- debug!(
- "print-type-size `{:#?}` variant {}",
- layout,
- adt_def.variant(index).name
- );
- let variant_def = &adt_def.variant(index);
- let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
- record(
- adt_kind.into(),
- adt_packed,
- None,
- vec![build_variant_info(Some(variant_def.name), &fields, layout)],
- );
- } else {
- // (This case arises for *empty* enums; so give it
- // zero variants.)
- record(adt_kind.into(), adt_packed, None, vec![]);
- }
- }
-
- Variants::Multiple { tag, ref tag_encoding, .. } => {
- debug!(
- "print-type-size `{:#?}` adt general variants def {}",
- layout.ty,
- adt_def.variants().len()
- );
- let variant_infos: Vec<_> = adt_def
- .variants()
- .iter_enumerated()
- .map(|(i, variant_def)| {
- let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect();
- build_variant_info(
- Some(variant_def.name),
- &fields,
- layout.for_variant(self, i),
- )
- })
- .collect();
- record(
- adt_kind.into(),
- adt_packed,
- match tag_encoding {
- TagEncoding::Direct => Some(tag.size(self)),
- _ => None,
- },
- variant_infos,
- );
- }
- }
- }
-}
-
/// Type size "skeleton", i.e., the only information determining a type's size.
/// While this is conservative, (aside from constant sizes, only pointers,
/// newtypes thereof and null pointer optimized enums are allowed), it is
@@ -2058,7 +263,7 @@ impl<'tcx> SizeSkeleton<'tcx> {
tcx: TyCtxt<'tcx>,
param_env: ty::ParamEnv<'tcx>,
) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> {
- debug_assert!(!ty.has_infer_types_or_consts());
+ debug_assert!(!ty.has_non_region_infer());
// First try computing a static layout.
let err = match tcx.layout_of(param_env.and(ty)) {
@@ -2074,7 +279,7 @@ impl<'tcx> SizeSkeleton<'tcx> {
let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env);
match tail.kind() {
ty::Param(_) | ty::Projection(_) => {
- debug_assert!(tail.has_param_types_or_consts());
+ debug_assert!(tail.has_non_region_param());
Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) })
}
_ => bug!(
@@ -2625,7 +830,7 @@ where
} else {
match mt {
hir::Mutability::Not => {
- if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) {
+ if ty.is_freeze(tcx, cx.param_env()) {
PointerKind::Frozen
} else {
PointerKind::SharedMutable
@@ -2636,7 +841,7 @@ where
// noalias, as another pointer to the structure can be obtained, that
// is not based-on the original reference. We consider all !Unpin
// types to be potentially self-referential here.
- if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) {
+ if ty.is_unpin(tcx, cx.param_env()) {
PointerKind::UniqueBorrowed
} else {
PointerKind::UniqueBorrowedPinned
@@ -2748,112 +953,6 @@ where
}
}
-impl<'tcx> ty::Instance<'tcx> {
- // NOTE(eddyb) this is private to avoid using it from outside of
- // `fn_abi_of_instance` - any other uses are either too high-level
- // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead),
- // or should go through `FnAbi` instead, to avoid losing any
- // adjustments `fn_abi_of_instance` might be performing.
- #[tracing::instrument(level = "debug", skip(tcx, param_env))]
- fn fn_sig_for_fn_abi(
- &self,
- tcx: TyCtxt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- ) -> ty::PolyFnSig<'tcx> {
- let ty = self.ty(tcx, param_env);
- match *ty.kind() {
- ty::FnDef(..) => {
- // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering
- // parameters unused if they show up in the signature, but not in the `mir::Body`
- // (i.e. due to being inside a projection that got normalized, see
- // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping
- // track of a polymorphization `ParamEnv` to allow normalizing later.
- let mut sig = match *ty.kind() {
- ty::FnDef(def_id, substs) => tcx
- .normalize_erasing_regions(tcx.param_env(def_id), tcx.bound_fn_sig(def_id))
- .subst(tcx, substs),
- _ => unreachable!(),
- };
-
- if let ty::InstanceDef::VTableShim(..) = self.def {
- // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`.
- sig = sig.map_bound(|mut sig| {
- let mut inputs_and_output = sig.inputs_and_output.to_vec();
- inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]);
- sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output);
- sig
- });
- }
- sig
- }
- ty::Closure(def_id, substs) => {
- let sig = substs.as_closure().sig();
-
- let bound_vars = tcx.mk_bound_variable_kinds(
- sig.bound_vars()
- .iter()
- .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
- );
- let br = ty::BoundRegion {
- var: ty::BoundVar::from_usize(bound_vars.len() - 1),
- kind: ty::BoundRegionKind::BrEnv,
- };
- let env_region = ty::ReLateBound(ty::INNERMOST, br);
- let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap();
-
- let sig = sig.skip_binder();
- ty::Binder::bind_with_vars(
- tcx.mk_fn_sig(
- iter::once(env_ty).chain(sig.inputs().iter().cloned()),
- sig.output(),
- sig.c_variadic,
- sig.unsafety,
- sig.abi,
- ),
- bound_vars,
- )
- }
- ty::Generator(_, substs, _) => {
- let sig = substs.as_generator().poly_sig();
-
- let bound_vars = tcx.mk_bound_variable_kinds(
- sig.bound_vars()
- .iter()
- .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))),
- );
- let br = ty::BoundRegion {
- var: ty::BoundVar::from_usize(bound_vars.len() - 1),
- kind: ty::BoundRegionKind::BrEnv,
- };
- let env_region = ty::ReLateBound(ty::INNERMOST, br);
- let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty);
-
- let pin_did = tcx.require_lang_item(LangItem::Pin, None);
- let pin_adt_ref = tcx.adt_def(pin_did);
- let pin_substs = tcx.intern_substs(&[env_ty.into()]);
- let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs);
-
- let sig = sig.skip_binder();
- let state_did = tcx.require_lang_item(LangItem::GeneratorState, None);
- let state_adt_ref = tcx.adt_def(state_did);
- let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]);
- let ret_ty = tcx.mk_adt(state_adt_ref, state_substs);
- ty::Binder::bind_with_vars(
- tcx.mk_fn_sig(
- [env_ty, sig.resume_ty].iter(),
- &ret_ty,
- false,
- hir::Unsafety::Normal,
- rustc_target::spec::abi::Abi::Rust,
- ),
- bound_vars,
- )
- }
- _ => bug!("unexpected type {:?} in Instance::fn_sig", ty),
- }
- }
-}
-
/// Calculates whether a function's ABI can unwind or not.
///
/// This takes two primary parameters:
@@ -2996,40 +1095,6 @@ pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: Spe
}
}
-#[inline]
-pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv {
- use rustc_target::spec::abi::Abi::*;
- match tcx.sess.target.adjust_abi(abi) {
- RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust,
- RustCold => Conv::RustCold,
-
- // It's the ABI's job to select this, not ours.
- System { .. } => bug!("system abi should be selected elsewhere"),
- EfiApi => bug!("eficall abi should be selected elsewhere"),
-
- Stdcall { .. } => Conv::X86Stdcall,
- Fastcall { .. } => Conv::X86Fastcall,
- Vectorcall { .. } => Conv::X86VectorCall,
- Thiscall { .. } => Conv::X86ThisCall,
- C { .. } => Conv::C,
- Unadjusted => Conv::C,
- Win64 { .. } => Conv::X86_64Win64,
- SysV64 { .. } => Conv::X86_64SysV,
- Aapcs { .. } => Conv::ArmAapcs,
- CCmseNonSecureCall => Conv::CCmseNonSecureCall,
- PtxKernel => Conv::PtxKernel,
- Msp430Interrupt => Conv::Msp430Intr,
- X86Interrupt => Conv::X86Intr,
- AmdGpuKernel => Conv::AmdGpuKernel,
- AvrInterrupt => Conv::AvrInterrupt,
- AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt,
- Wasm => Conv::C,
-
- // These API constants ought to be more specific...
- Cdecl { .. } => Conv::C,
- }
-}
-
/// Error produced by attempting to compute or adjust a `FnAbi`.
#[derive(Copy, Clone, Debug, HashStable)]
pub enum FnAbiError<'tcx> {
@@ -3061,6 +1126,12 @@ impl<'tcx> fmt::Display for FnAbiError<'tcx> {
}
}
+impl<'tcx> IntoDiagnostic<'tcx, !> for FnAbiError<'tcx> {
+ fn into_diagnostic(self, handler: &'tcx Handler) -> DiagnosticBuilder<'tcx, !> {
+ handler.struct_fatal(self.to_string())
+ }
+}
+
// FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not
// just for error handling.
#[derive(Debug)]
@@ -3142,367 +1213,3 @@ pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> {
}
impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {}
-
-fn fn_abi_of_fn_ptr<'tcx>(
- tcx: TyCtxt<'tcx>,
- query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
-) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
- let (param_env, (sig, extra_args)) = query.into_parts();
-
- LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false)
-}
-
-fn fn_abi_of_instance<'tcx>(
- tcx: TyCtxt<'tcx>,
- query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>,
-) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
- let (param_env, (instance, extra_args)) = query.into_parts();
-
- let sig = instance.fn_sig_for_fn_abi(tcx, param_env);
-
- let caller_location = if instance.def.requires_caller_location(tcx) {
- Some(tcx.caller_location_ty())
- } else {
- None
- };
-
- LayoutCx { tcx, param_env }.fn_abi_new_uncached(
- sig,
- extra_args,
- caller_location,
- Some(instance.def_id()),
- matches!(instance.def, ty::InstanceDef::Virtual(..)),
- )
-}
-
-// Handle safe Rust thin and fat pointers.
-pub fn adjust_for_rust_scalar<'tcx>(
- cx: LayoutCx<'tcx, TyCtxt<'tcx>>,
- attrs: &mut ArgAttributes,
- scalar: Scalar,
- layout: TyAndLayout<'tcx>,
- offset: Size,
- is_return: bool,
-) {
- // Booleans are always a noundef i1 that needs to be zero-extended.
- if scalar.is_bool() {
- attrs.ext(ArgExtension::Zext);
- attrs.set(ArgAttribute::NoUndef);
- return;
- }
-
- // Scalars which have invalid values cannot be undef.
- if !scalar.is_always_valid(&cx) {
- attrs.set(ArgAttribute::NoUndef);
- }
-
- // Only pointer types handled below.
- let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return };
-
- if !valid_range.contains(0) {
- attrs.set(ArgAttribute::NonNull);
- }
-
- if let Some(pointee) = layout.pointee_info_at(&cx, offset) {
- if let Some(kind) = pointee.safe {
- attrs.pointee_align = Some(pointee.align);
-
- // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable
- // for the entire duration of the function as they can be deallocated
- // at any time. Same for shared mutable references. If LLVM had a
- // way to say "dereferenceable on entry" we could use it here.
- attrs.pointee_size = match kind {
- PointerKind::UniqueBorrowed
- | PointerKind::UniqueBorrowedPinned
- | PointerKind::Frozen => pointee.size,
- PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO,
- };
-
- // `Box`, `&T`, and `&mut T` cannot be undef.
- // Note that this only applies to the value of the pointer itself;
- // this attribute doesn't make it UB for the pointed-to data to be undef.
- attrs.set(ArgAttribute::NoUndef);
-
- // The aliasing rules for `Box<T>` are still not decided, but currently we emit
- // `noalias` for it. This can be turned off using an unstable flag.
- // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326
- let noalias_for_box = cx.tcx.sess.opts.unstable_opts.box_noalias.unwrap_or(true);
-
- // `&mut` pointer parameters never alias other parameters,
- // or mutable global data
- //
- // `&T` where `T` contains no `UnsafeCell<U>` is immutable,
- // and can be marked as both `readonly` and `noalias`, as
- // LLVM's definition of `noalias` is based solely on memory
- // dependencies rather than pointer equality
- //
- // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute
- // for UniqueBorrowed arguments, so that the codegen backend can decide whether
- // or not to actually emit the attribute. It can also be controlled with the
- // `-Zmutable-noalias` debugging option.
- let no_alias = match kind {
- PointerKind::SharedMutable
- | PointerKind::UniqueBorrowed
- | PointerKind::UniqueBorrowedPinned => false,
- PointerKind::UniqueOwned => noalias_for_box,
- PointerKind::Frozen => !is_return,
- };
- if no_alias {
- attrs.set(ArgAttribute::NoAlias);
- }
-
- if kind == PointerKind::Frozen && !is_return {
- attrs.set(ArgAttribute::ReadOnly);
- }
-
- if kind == PointerKind::UniqueBorrowed && !is_return {
- attrs.set(ArgAttribute::NoAliasMutRef);
- }
- }
- }
-}
-
-impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> {
- // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?)
- // arguments of this method, into a separate `struct`.
- #[tracing::instrument(
- level = "debug",
- skip(self, caller_location, fn_def_id, force_thin_self_ptr)
- )]
- fn fn_abi_new_uncached(
- &self,
- sig: ty::PolyFnSig<'tcx>,
- extra_args: &[Ty<'tcx>],
- caller_location: Option<Ty<'tcx>>,
- fn_def_id: Option<DefId>,
- // FIXME(eddyb) replace this with something typed, like an `enum`.
- force_thin_self_ptr: bool,
- ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> {
- let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig);
-
- let conv = conv_from_spec_abi(self.tcx(), sig.abi);
-
- let mut inputs = sig.inputs();
- let extra_args = if sig.abi == RustCall {
- assert!(!sig.c_variadic && extra_args.is_empty());
-
- if let Some(input) = sig.inputs().last() {
- if let ty::Tuple(tupled_arguments) = input.kind() {
- inputs = &sig.inputs()[0..sig.inputs().len() - 1];
- tupled_arguments
- } else {
- bug!(
- "argument to function with \"rust-call\" ABI \
- is not a tuple"
- );
- }
- } else {
- bug!(
- "argument to function with \"rust-call\" ABI \
- is not a tuple"
- );
- }
- } else {
- assert!(sig.c_variadic || extra_args.is_empty());
- extra_args
- };
-
- let target = &self.tcx.sess.target;
- let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc");
- let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu";
- let linux_s390x_gnu_like =
- target.os == "linux" && target.arch == "s390x" && target_env_gnu_like;
- let linux_sparc64_gnu_like =
- target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like;
- let linux_powerpc_gnu_like =
- target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like;
- use SpecAbi::*;
- let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall);
-
- let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> {
- let span = tracing::debug_span!("arg_of");
- let _entered = span.enter();
- let is_return = arg_idx.is_none();
-
- let layout = self.layout_of(ty)?;
- let layout = if force_thin_self_ptr && arg_idx == Some(0) {
- // Don't pass the vtable, it's not an argument of the virtual fn.
- // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait`
- // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen
- make_thin_self_ptr(self, layout)
- } else {
- layout
- };
-
- let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| {
- let mut attrs = ArgAttributes::new();
- adjust_for_rust_scalar(*self, &mut attrs, scalar, *layout, offset, is_return);
- attrs
- });
-
- if arg.layout.is_zst() {
- // For some forsaken reason, x86_64-pc-windows-gnu
- // doesn't ignore zero-sized struct arguments.
- // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}.
- if is_return
- || rust_abi
- || (!win_x64_gnu
- && !linux_s390x_gnu_like
- && !linux_sparc64_gnu_like
- && !linux_powerpc_gnu_like)
- {
- arg.mode = PassMode::Ignore;
- }
- }
-
- Ok(arg)
- };
-
- let mut fn_abi = FnAbi {
- ret: arg_of(sig.output(), None)?,
- args: inputs
- .iter()
- .copied()
- .chain(extra_args.iter().copied())
- .chain(caller_location)
- .enumerate()
- .map(|(i, ty)| arg_of(ty, Some(i)))
- .collect::<Result<_, _>>()?,
- c_variadic: sig.c_variadic,
- fixed_count: inputs.len() as u32,
- conv,
- can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi),
- };
- self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?;
- debug!("fn_abi_new_uncached = {:?}", fn_abi);
- Ok(self.tcx.arena.alloc(fn_abi))
- }
-
- #[tracing::instrument(level = "trace", skip(self))]
- fn fn_abi_adjust_for_abi(
- &self,
- fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>,
- abi: SpecAbi,
- ) -> Result<(), FnAbiError<'tcx>> {
- if abi == SpecAbi::Unadjusted {
- return Ok(());
- }
-
- if abi == SpecAbi::Rust
- || abi == SpecAbi::RustCall
- || abi == SpecAbi::RustIntrinsic
- || abi == SpecAbi::PlatformIntrinsic
- {
- let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| {
- if arg.is_ignore() {
- return;
- }
-
- match arg.layout.abi {
- Abi::Aggregate { .. } => {}
-
- // This is a fun case! The gist of what this is doing is
- // that we want callers and callees to always agree on the
- // ABI of how they pass SIMD arguments. If we were to *not*
- // make these arguments indirect then they'd be immediates
- // in LLVM, which means that they'd used whatever the
- // appropriate ABI is for the callee and the caller. That
- // means, for example, if the caller doesn't have AVX
- // enabled but the callee does, then passing an AVX argument
- // across this boundary would cause corrupt data to show up.
- //
- // This problem is fixed by unconditionally passing SIMD
- // arguments through memory between callers and callees
- // which should get them all to agree on ABI regardless of
- // target feature sets. Some more information about this
- // issue can be found in #44367.
- //
- // Note that the platform intrinsic ABI is exempt here as
- // that's how we connect up to LLVM and it's unstable
- // anyway, we control all calls to it in libstd.
- Abi::Vector { .. }
- if abi != SpecAbi::PlatformIntrinsic
- && self.tcx.sess.target.simd_types_indirect =>
- {
- arg.make_indirect();
- return;
- }
-
- _ => return,
- }
-
- let size = arg.layout.size;
- if arg.layout.is_unsized() || size > Pointer.size(self) {
- arg.make_indirect();
- } else {
- // We want to pass small aggregates as immediates, but using
- // a LLVM aggregate type for this leads to bad optimizations,
- // so we pick an appropriately sized integer type instead.
- arg.cast_to(Reg { kind: RegKind::Integer, size });
- }
- };
- fixup(&mut fn_abi.ret);
- for arg in fn_abi.args.iter_mut() {
- fixup(arg);
- }
- } else {
- fn_abi.adjust_for_foreign_abi(self, abi)?;
- }
-
- Ok(())
- }
-}
-
-#[tracing::instrument(level = "debug", skip(cx))]
-fn make_thin_self_ptr<'tcx>(
- cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>),
- layout: TyAndLayout<'tcx>,
-) -> TyAndLayout<'tcx> {
- let tcx = cx.tcx();
- let fat_pointer_ty = if layout.is_unsized() {
- // unsized `self` is passed as a pointer to `self`
- // FIXME (mikeyhew) change this to use &own if it is ever added to the language
- tcx.mk_mut_ptr(layout.ty)
- } else {
- match layout.abi {
- Abi::ScalarPair(..) | Abi::Scalar(..) => (),
- _ => bug!("receiver type has unsupported layout: {:?}", layout),
- }
-
- // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self>
- // with a Scalar (not ScalarPair) ABI. This is a hack that is understood
- // elsewhere in the compiler as a method on a `dyn Trait`.
- // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we
- // get a built-in pointer type
- let mut fat_pointer_layout = layout;
- 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr()
- && !fat_pointer_layout.ty.is_region_ptr()
- {
- for i in 0..fat_pointer_layout.fields.count() {
- let field_layout = fat_pointer_layout.field(cx, i);
-
- if !field_layout.is_zst() {
- fat_pointer_layout = field_layout;
- continue 'descend_newtypes;
- }
- }
-
- bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout);
- }
-
- fat_pointer_layout.ty
- };
-
- // we now have a type like `*mut RcBox<dyn Trait>`
- // change its layout to that of `*mut ()`, a thin pointer, but keep the same type
- // this is understood as a special case elsewhere in the compiler
- let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit());
-
- TyAndLayout {
- ty: fat_pointer_ty,
-
- // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result`
- // should always work because the type is always `*mut ()`.
- ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap()
- }
-}
diff --git a/compiler/rustc_middle/src/ty/layout_sanity_check.rs b/compiler/rustc_middle/src/ty/layout_sanity_check.rs
deleted file mode 100644
index 87c85dcff..000000000
--- a/compiler/rustc_middle/src/ty/layout_sanity_check.rs
+++ /dev/null
@@ -1,303 +0,0 @@
-use crate::ty::{
- layout::{LayoutCx, TyAndLayout},
- TyCtxt,
-};
-use rustc_target::abi::*;
-
-use std::cmp;
-
-/// Enforce some basic invariants on layouts.
-pub(super) fn sanity_check_layout<'tcx>(
- cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
- layout: &TyAndLayout<'tcx>,
-) {
- // Type-level uninhabitedness should always imply ABI uninhabitedness.
- if cx.tcx.conservative_is_privately_uninhabited(cx.param_env.and(layout.ty)) {
- assert!(layout.abi.is_uninhabited());
- }
-
- if layout.size.bytes() % layout.align.abi.bytes() != 0 {
- bug!("size is not a multiple of align, in the following layout:\n{layout:#?}");
- }
-
- if cfg!(debug_assertions) {
- /// Yields non-ZST fields of the type
- fn non_zst_fields<'tcx, 'a>(
- cx: &'a LayoutCx<'tcx, TyCtxt<'tcx>>,
- layout: &'a TyAndLayout<'tcx>,
- ) -> impl Iterator<Item = (Size, TyAndLayout<'tcx>)> + 'a {
- (0..layout.layout.fields().count()).filter_map(|i| {
- let field = layout.field(cx, i);
- // Also checking `align == 1` here leads to test failures in
- // `layout/zero-sized-array-union.rs`, where a type has a zero-size field with
- // alignment 4 that still gets ignored during layout computation (which is okay
- // since other fields already force alignment 4).
- let zst = field.is_zst();
- (!zst).then(|| (layout.fields.offset(i), field))
- })
- }
-
- fn skip_newtypes<'tcx>(
- cx: &LayoutCx<'tcx, TyCtxt<'tcx>>,
- layout: &TyAndLayout<'tcx>,
- ) -> TyAndLayout<'tcx> {
- if matches!(layout.layout.variants(), Variants::Multiple { .. }) {
- // Definitely not a newtype of anything.
- return *layout;
- }
- let mut fields = non_zst_fields(cx, layout);
- let Some(first) = fields.next() else {
- // No fields here, so this could be a primitive or enum -- either way it's not a newtype around a thing
- return *layout
- };
- if fields.next().is_none() {
- let (offset, first) = first;
- if offset == Size::ZERO && first.layout.size() == layout.size {
- // This is a newtype, so keep recursing.
- // FIXME(RalfJung): I don't think it would be correct to do any checks for
- // alignment here, so we don't. Is that correct?
- return skip_newtypes(cx, &first);
- }
- }
- // No more newtypes here.
- *layout
- }
-
- fn check_layout_abi<'tcx>(cx: &LayoutCx<'tcx, TyCtxt<'tcx>>, layout: &TyAndLayout<'tcx>) {
- match layout.layout.abi() {
- Abi::Scalar(scalar) => {
- // No padding in scalars.
- let size = scalar.size(cx);
- let align = scalar.align(cx).abi;
- assert_eq!(
- layout.layout.size(),
- size,
- "size mismatch between ABI and layout in {layout:#?}"
- );
- assert_eq!(
- layout.layout.align().abi,
- align,
- "alignment mismatch between ABI and layout in {layout:#?}"
- );
- // Check that this matches the underlying field.
- let inner = skip_newtypes(cx, layout);
- assert!(
- matches!(inner.layout.abi(), Abi::Scalar(_)),
- "`Scalar` type {} is newtype around non-`Scalar` type {}",
- layout.ty,
- inner.ty
- );
- match inner.layout.fields() {
- FieldsShape::Primitive => {
- // Fine.
- }
- FieldsShape::Union(..) => {
- // FIXME: I guess we could also check something here? Like, look at all fields?
- return;
- }
- FieldsShape::Arbitrary { .. } => {
- // Should be an enum, the only field is the discriminant.
- assert!(
- inner.ty.is_enum(),
- "`Scalar` layout for non-primitive non-enum type {}",
- inner.ty
- );
- assert_eq!(
- inner.layout.fields().count(),
- 1,
- "`Scalar` layout for multiple-field type in {inner:#?}",
- );
- let offset = inner.layout.fields().offset(0);
- let field = inner.field(cx, 0);
- // The field should be at the right offset, and match the `scalar` layout.
- assert_eq!(
- offset,
- Size::ZERO,
- "`Scalar` field at non-0 offset in {inner:#?}",
- );
- assert_eq!(
- field.size, size,
- "`Scalar` field with bad size in {inner:#?}",
- );
- assert_eq!(
- field.align.abi, align,
- "`Scalar` field with bad align in {inner:#?}",
- );
- assert!(
- matches!(field.abi, Abi::Scalar(_)),
- "`Scalar` field with bad ABI in {inner:#?}",
- );
- }
- _ => {
- panic!("`Scalar` layout for non-primitive non-enum type {}", inner.ty);
- }
- }
- }
- Abi::ScalarPair(scalar1, scalar2) => {
- // Sanity-check scalar pairs. These are a bit more flexible and support
- // padding, but we can at least ensure both fields actually fit into the layout
- // and the alignment requirement has not been weakened.
- let size1 = scalar1.size(cx);
- let align1 = scalar1.align(cx).abi;
- let size2 = scalar2.size(cx);
- let align2 = scalar2.align(cx).abi;
- assert!(
- layout.layout.align().abi >= cmp::max(align1, align2),
- "alignment mismatch between ABI and layout in {layout:#?}",
- );
- let field2_offset = size1.align_to(align2);
- assert!(
- layout.layout.size() >= field2_offset + size2,
- "size mismatch between ABI and layout in {layout:#?}"
- );
- // Check that the underlying pair of fields matches.
- let inner = skip_newtypes(cx, layout);
- assert!(
- matches!(inner.layout.abi(), Abi::ScalarPair(..)),
- "`ScalarPair` type {} is newtype around non-`ScalarPair` type {}",
- layout.ty,
- inner.ty
- );
- if matches!(inner.layout.variants(), Variants::Multiple { .. }) {
- // FIXME: ScalarPair for enums is enormously complicated and it is very hard
- // to check anything about them.
- return;
- }
- match inner.layout.fields() {
- FieldsShape::Arbitrary { .. } => {
- // Checked below.
- }
- FieldsShape::Union(..) => {
- // FIXME: I guess we could also check something here? Like, look at all fields?
- return;
- }
- _ => {
- panic!("`ScalarPair` layout with unexpected field shape in {inner:#?}");
- }
- }
- let mut fields = non_zst_fields(cx, &inner);
- let (offset1, field1) = fields.next().unwrap_or_else(|| {
- panic!("`ScalarPair` layout for type with not even one non-ZST field: {inner:#?}")
- });
- let (offset2, field2) = fields.next().unwrap_or_else(|| {
- panic!("`ScalarPair` layout for type with less than two non-ZST fields: {inner:#?}")
- });
- assert!(
- fields.next().is_none(),
- "`ScalarPair` layout for type with at least three non-ZST fields: {inner:#?}"
- );
- // The fields might be in opposite order.
- let (offset1, field1, offset2, field2) = if offset1 <= offset2 {
- (offset1, field1, offset2, field2)
- } else {
- (offset2, field2, offset1, field1)
- };
- // The fields should be at the right offset, and match the `scalar` layout.
- assert_eq!(
- offset1,
- Size::ZERO,
- "`ScalarPair` first field at non-0 offset in {inner:#?}",
- );
- assert_eq!(
- field1.size, size1,
- "`ScalarPair` first field with bad size in {inner:#?}",
- );
- assert_eq!(
- field1.align.abi, align1,
- "`ScalarPair` first field with bad align in {inner:#?}",
- );
- assert!(
- matches!(field1.abi, Abi::Scalar(_)),
- "`ScalarPair` first field with bad ABI in {inner:#?}",
- );
- assert_eq!(
- offset2, field2_offset,
- "`ScalarPair` second field at bad offset in {inner:#?}",
- );
- assert_eq!(
- field2.size, size2,
- "`ScalarPair` second field with bad size in {inner:#?}",
- );
- assert_eq!(
- field2.align.abi, align2,
- "`ScalarPair` second field with bad align in {inner:#?}",
- );
- assert!(
- matches!(field2.abi, Abi::Scalar(_)),
- "`ScalarPair` second field with bad ABI in {inner:#?}",
- );
- }
- Abi::Vector { count, element } => {
- // No padding in vectors. Alignment can be strengthened, though.
- assert!(
- layout.layout.align().abi >= element.align(cx).abi,
- "alignment mismatch between ABI and layout in {layout:#?}"
- );
- let size = element.size(cx) * count;
- assert_eq!(
- layout.layout.size(),
- size.align_to(cx.data_layout().vector_align(size).abi),
- "size mismatch between ABI and layout in {layout:#?}"
- );
- }
- Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check.
- }
- }
-
- check_layout_abi(cx, layout);
-
- if let Variants::Multiple { variants, .. } = &layout.variants {
- for variant in variants.iter() {
- // No nested "multiple".
- assert!(matches!(variant.variants(), Variants::Single { .. }));
- // Variants should have the same or a smaller size as the full thing,
- // and same for alignment.
- if variant.size() > layout.size {
- bug!(
- "Type with size {} bytes has variant with size {} bytes: {layout:#?}",
- layout.size.bytes(),
- variant.size().bytes(),
- )
- }
- if variant.align().abi > layout.align.abi {
- bug!(
- "Type with alignment {} bytes has variant with alignment {} bytes: {layout:#?}",
- layout.align.abi.bytes(),
- variant.align().abi.bytes(),
- )
- }
- // Skip empty variants.
- if variant.size() == Size::ZERO
- || variant.fields().count() == 0
- || variant.abi().is_uninhabited()
- {
- // These are never actually accessed anyway, so we can skip the coherence check
- // for them. They also fail that check, since they have
- // `Aggregate`/`Uninhbaited` ABI even when the main type is
- // `Scalar`/`ScalarPair`. (Note that sometimes, variants with fields have size
- // 0, and sometimes, variants without fields have non-0 size.)
- continue;
- }
- // The top-level ABI and the ABI of the variants should be coherent.
- let scalar_coherent = |s1: Scalar, s2: Scalar| {
- s1.size(cx) == s2.size(cx) && s1.align(cx) == s2.align(cx)
- };
- let abi_coherent = match (layout.abi, variant.abi()) {
- (Abi::Scalar(s1), Abi::Scalar(s2)) => scalar_coherent(s1, s2),
- (Abi::ScalarPair(a1, b1), Abi::ScalarPair(a2, b2)) => {
- scalar_coherent(a1, a2) && scalar_coherent(b1, b2)
- }
- (Abi::Uninhabited, _) => true,
- (Abi::Aggregate { .. }, _) => true,
- _ => false,
- };
- if !abi_coherent {
- bug!(
- "Variant ABI is incompatible with top-level ABI:\nvariant={:#?}\nTop-level: {layout:#?}",
- variant
- );
- }
- }
- }
- }
-}
diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs
index 3f9871190..a42d05706 100644
--- a/compiler/rustc_middle/src/ty/mod.rs
+++ b/compiler/rustc_middle/src/ty/mod.rs
@@ -17,7 +17,7 @@ pub use self::IntVarValue::*;
pub use self::Variance::*;
use crate::error::{OpaqueHiddenTypeMismatch, TypeMismatchReason};
use crate::metadata::ModChild;
-use crate::middle::privacy::AccessLevels;
+use crate::middle::privacy::EffectiveVisibilities;
use crate::mir::{Body, GeneratorLayout};
use crate::traits::{self, Reveal};
use crate::ty;
@@ -26,6 +26,7 @@ use crate::ty::util::Discr;
pub use adt::*;
pub use assoc::*;
pub use generics::*;
+use hir::OpaqueTyOrigin;
use rustc_ast as ast;
use rustc_ast::node_id::NodeMap;
use rustc_attr as attr;
@@ -37,11 +38,13 @@ use rustc_data_structures::tagged_ptr::CopyTaggedPtr;
use rustc_hir as hir;
use rustc_hir::def::{CtorKind, CtorOf, DefKind, LifetimeRes, Res};
use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LocalDefIdMap};
+use rustc_hir::definitions::Definitions;
use rustc_hir::Node;
use rustc_index::vec::IndexVec;
use rustc_macros::HashStable;
use rustc_query_system::ich::StableHashingContext;
use rustc_serialize::{Decodable, Encodable};
+use rustc_session::cstore::CrateStoreDyn;
use rustc_span::hygiene::MacroKind;
use rustc_span::symbol::{kw, sym, Ident, Symbol};
use rustc_span::{ExpnId, Span};
@@ -73,11 +76,11 @@ pub use self::closure::{
CAPTURE_STRUCT_LOCAL,
};
pub use self::consts::{
- Const, ConstInt, ConstKind, ConstS, InferConst, ScalarInt, Unevaluated, ValTree,
+ Const, ConstInt, ConstKind, ConstS, InferConst, ScalarInt, UnevaluatedConst, ValTree,
};
pub use self::context::{
tls, CanonicalUserType, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations,
- CtxtInterners, DelaySpanBugEmitted, FreeRegionInfo, GeneratorDiagnosticData,
+ CtxtInterners, DeducedParamAttrs, DelaySpanBugEmitted, FreeRegionInfo, GeneratorDiagnosticData,
GeneratorInteriorTypeCause, GlobalCtxt, Lift, OnDiskCache, TyCtxt, TypeckResults, UserType,
UserTypeAnnotationIndex,
};
@@ -89,9 +92,9 @@ pub use self::sty::BoundRegionKind::*;
pub use self::sty::{
Article, Binder, BoundRegion, BoundRegionKind, BoundTy, BoundTyKind, BoundVar,
BoundVariableKind, CanonicalPolyFnSig, ClosureSubsts, ClosureSubstsParts, ConstVid,
- EarlyBinder, EarlyBoundRegion, ExistentialPredicate, ExistentialProjection,
- ExistentialTraitRef, FnSig, FreeRegion, GenSig, GeneratorSubsts, GeneratorSubstsParts,
- InlineConstSubsts, InlineConstSubstsParts, ParamConst, ParamTy, PolyExistentialProjection,
+ EarlyBoundRegion, ExistentialPredicate, ExistentialProjection, ExistentialTraitRef, FnSig,
+ FreeRegion, GenSig, GeneratorSubsts, GeneratorSubstsParts, InlineConstSubsts,
+ InlineConstSubstsParts, ParamConst, ParamTy, PolyExistentialProjection,
PolyExistentialTraitRef, PolyFnSig, PolyGenSig, PolyTraitRef, ProjectionTy, Region, RegionKind,
RegionVid, TraitRef, TyKind, TypeAndMut, UpvarSubsts, VarianceDiagInfo,
};
@@ -130,8 +133,8 @@ mod erase_regions;
mod generics;
mod impls_ty;
mod instance;
-mod layout_sanity_check;
mod list;
+mod opaque_types;
mod parameterized;
mod rvalue_scopes;
mod structural_impls;
@@ -141,8 +144,15 @@ mod sty;
pub type RegisteredTools = FxHashSet<Ident>;
-#[derive(Debug)]
pub struct ResolverOutputs {
+ pub definitions: Definitions,
+ pub global_ctxt: ResolverGlobalCtxt,
+ pub ast_lowering: ResolverAstLowering,
+}
+
+#[derive(Debug)]
+pub struct ResolverGlobalCtxt {
+ pub cstore: Box<CrateStoreDyn>,
pub visibilities: FxHashMap<LocalDefId, Visibility>,
/// This field is used to decide whether we should make `PRIVATE_IN_PUBLIC` a hard error.
pub has_pub_restricted: bool,
@@ -150,7 +160,7 @@ pub struct ResolverOutputs {
pub expn_that_defined: FxHashMap<LocalDefId, ExpnId>,
/// Reference span for definitions.
pub source_span: IndexVec<LocalDefId, Span>,
- pub access_levels: AccessLevels,
+ pub effective_visibilities: EffectiveVisibilities,
pub extern_crate_map: FxHashMap<LocalDefId, CrateNum>,
pub maybe_unused_trait_imports: FxIndexSet<LocalDefId>,
pub maybe_unused_extern_crates: Vec<(LocalDefId, Span)>,
@@ -682,7 +692,7 @@ pub enum PredicateKind<'tcx> {
Coerce(CoercePredicate<'tcx>),
/// Constant initializer must evaluate successfully.
- ConstEvaluatable(ty::Unevaluated<'tcx, ()>),
+ ConstEvaluatable(ty::Const<'tcx>),
/// Constants must be equal. The first component is the const that is expected.
ConstEquate(Const<'tcx>, Const<'tcx>),
@@ -861,6 +871,11 @@ impl<'tcx> TraitPredicate<'tcx> {
(BoundConstness::ConstIfConst, hir::Constness::NotConst) => false,
}
}
+
+ pub fn without_const(mut self) -> Self {
+ self.constness = BoundConstness::NotConst;
+ self
+ }
}
impl<'tcx> PolyTraitPredicate<'tcx> {
@@ -1296,6 +1311,106 @@ impl<'tcx> OpaqueHiddenType<'tcx> {
sub: sub_diag,
});
}
+
+ #[instrument(level = "debug", skip(tcx), ret)]
+ pub fn remap_generic_params_to_declaration_params(
+ self,
+ opaque_type_key: OpaqueTypeKey<'tcx>,
+ tcx: TyCtxt<'tcx>,
+ // typeck errors have subpar spans for opaque types, so delay error reporting until borrowck.
+ ignore_errors: bool,
+ origin: OpaqueTyOrigin,
+ ) -> Self {
+ let OpaqueTypeKey { def_id, substs } = opaque_type_key;
+
+ // Use substs to build up a reverse map from regions to their
+ // identity mappings. This is necessary because of `impl
+ // Trait` lifetimes are computed by replacing existing
+ // lifetimes with 'static and remapping only those used in the
+ // `impl Trait` return type, resulting in the parameters
+ // shifting.
+ let id_substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id());
+ debug!(?id_substs);
+
+ let map = substs.iter().zip(id_substs);
+
+ let map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>> = match origin {
+ // HACK: The HIR lowering for async fn does not generate
+ // any `+ Captures<'x>` bounds for the `impl Future<...>`, so all async fns with lifetimes
+ // would now fail to compile. We should probably just make hir lowering fill this in properly.
+ OpaqueTyOrigin::AsyncFn(_) => map.collect(),
+ OpaqueTyOrigin::FnReturn(_) | OpaqueTyOrigin::TyAlias => {
+ // Opaque types may only use regions that are bound. So for
+ // ```rust
+ // type Foo<'a, 'b, 'c> = impl Trait<'a> + 'b;
+ // ```
+ // we may not use `'c` in the hidden type.
+ struct OpaqueTypeLifetimeCollector<'tcx> {
+ lifetimes: FxHashSet<ty::Region<'tcx>>,
+ }
+
+ impl<'tcx> ty::TypeVisitor<'tcx> for OpaqueTypeLifetimeCollector<'tcx> {
+ fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
+ self.lifetimes.insert(r);
+ r.super_visit_with(self)
+ }
+ }
+
+ let mut collector = OpaqueTypeLifetimeCollector { lifetimes: Default::default() };
+
+ for pred in tcx.bound_explicit_item_bounds(def_id.to_def_id()).transpose_iter() {
+ let pred = pred.map_bound(|(pred, _)| *pred).subst(tcx, id_substs);
+
+ trace!(pred=?pred.kind());
+
+ // We only ignore opaque type substs if the opaque type is the outermost type.
+ // The opaque type may be nested within itself via recursion in e.g.
+ // type Foo<'a> = impl PartialEq<Foo<'a>>;
+ // which thus mentions `'a` and should thus accept hidden types that borrow 'a
+ // instead of requiring an additional `+ 'a`.
+ match pred.kind().skip_binder() {
+ ty::PredicateKind::Trait(TraitPredicate {
+ trait_ref: ty::TraitRef { def_id: _, substs },
+ constness: _,
+ polarity: _,
+ }) => {
+ trace!(?substs);
+ for subst in &substs[1..] {
+ subst.visit_with(&mut collector);
+ }
+ }
+ ty::PredicateKind::Projection(ty::ProjectionPredicate {
+ projection_ty: ty::ProjectionTy { substs, item_def_id: _ },
+ term,
+ }) => {
+ for subst in &substs[1..] {
+ subst.visit_with(&mut collector);
+ }
+ term.visit_with(&mut collector);
+ }
+ _ => {
+ pred.visit_with(&mut collector);
+ }
+ }
+ }
+ let lifetimes = collector.lifetimes;
+ trace!(?lifetimes);
+ map.filter(|(_, v)| {
+ let ty::GenericArgKind::Lifetime(lt) = v.unpack() else {
+ return true;
+ };
+ lifetimes.contains(&lt)
+ })
+ .collect()
+ }
+ };
+ debug!("map = {:#?}", map);
+
+ // Convert the type from the function into a type valid outside
+ // the function, by replacing invalid regions with 'static,
+ // after producing an error for each of them.
+ self.fold_with(&mut opaque_types::ReverseMapper::new(tcx, map, self.span, ignore_errors))
+ }
}
/// The "placeholder index" fully defines a placeholder region, type, or const. Placeholders are
@@ -2590,7 +2705,7 @@ pub fn provide(providers: &mut ty::query::Providers) {
closure::provide(providers);
context::provide(providers);
erase_regions::provide(providers);
- layout::provide(providers);
+ inhabitedness::provide(providers);
util::provide(providers);
print::provide(providers);
super::util::bug::provide(providers);
@@ -2598,7 +2713,6 @@ pub fn provide(providers: &mut ty::query::Providers) {
*providers = ty::query::Providers {
trait_impls_of: trait_def::trait_impls_of_provider,
incoherent_impls: trait_def::incoherent_impls_provider,
- type_uninhabited_from: inhabitedness::type_uninhabited_from,
const_param_default: consts::const_param_default,
vtable_allocation: vtable::vtable_allocation_provider,
..*providers
@@ -2667,8 +2781,9 @@ pub struct DestructuredConst<'tcx> {
mod size_asserts {
use super::*;
use rustc_data_structures::static_assert_size;
- // These are in alphabetical order, which is easy to maintain.
+ // tidy-alphabetical-start
static_assert_size!(PredicateS<'_>, 48);
static_assert_size!(TyS<'_>, 40);
static_assert_size!(WithStableHash<TyS<'_>>, 56);
+ // tidy-alphabetical-end
}
diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
index 9db5a2894..ee13920d5 100644
--- a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
+++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs
@@ -10,8 +10,7 @@
use crate::mir;
use crate::traits::query::NoSolution;
use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder};
-use crate::ty::subst::{Subst, SubstsRef};
-use crate::ty::{self, EarlyBinder, Ty, TyCtxt};
+use crate::ty::{self, EarlyBinder, SubstsRef, Ty, TyCtxt};
#[derive(Debug, Copy, Clone, HashStable, TyEncodable, TyDecodable)]
pub enum NormalizationError<'tcx> {
@@ -215,15 +214,6 @@ impl<'tcx> TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> {
fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> {
self.normalize_generic_arg_after_erasing_regions(c.into()).expect_const()
}
-
- #[inline]
- fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
- // FIXME: This *probably* needs canonicalization too!
- let arg = self.param_env.and(c);
- self.tcx
- .try_normalize_mir_const_after_erasing_regions(arg)
- .unwrap_or_else(|_| bug!("failed to normalize {:?}", c))
- }
}
struct TryNormalizeAfterErasingRegionsFolder<'tcx> {
@@ -268,16 +258,4 @@ impl<'tcx> FallibleTypeFolder<'tcx> for TryNormalizeAfterErasingRegionsFolder<'t
Err(_) => Err(NormalizationError::Const(c)),
}
}
-
- fn try_fold_mir_const(
- &mut self,
- c: mir::ConstantKind<'tcx>,
- ) -> Result<mir::ConstantKind<'tcx>, Self::Error> {
- // FIXME: This *probably* needs canonicalization too!
- let arg = self.param_env.and(c);
- match self.tcx.try_normalize_mir_const_after_erasing_regions(arg) {
- Ok(c) => Ok(c),
- Err(_) => Err(NormalizationError::ConstantKind(c)),
- }
- }
}
diff --git a/compiler/rustc_middle/src/ty/opaque_types.rs b/compiler/rustc_middle/src/ty/opaque_types.rs
new file mode 100644
index 000000000..b05c63109
--- /dev/null
+++ b/compiler/rustc_middle/src/ty/opaque_types.rs
@@ -0,0 +1,218 @@
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::ty::fold::{TypeFolder, TypeSuperFoldable};
+use rustc_middle::ty::subst::{GenericArg, GenericArgKind};
+use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable};
+use rustc_span::Span;
+
+/// Converts generic params of a TypeFoldable from one
+/// item's generics to another. Usually from a function's generics
+/// list to the opaque type's own generics.
+pub(super) struct ReverseMapper<'tcx> {
+ tcx: TyCtxt<'tcx>,
+ map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>>,
+ /// see call sites to fold_kind_no_missing_regions_error
+ /// for an explanation of this field.
+ do_not_error: bool,
+
+ /// We do not want to emit any errors in typeck because
+ /// the spans in typeck are subpar at the moment.
+ /// Borrowck will do the same work again (this time with
+ /// lifetime information) and thus report better errors.
+ ignore_errors: bool,
+
+ /// Span of function being checked.
+ span: Span,
+}
+
+impl<'tcx> ReverseMapper<'tcx> {
+ pub(super) fn new(
+ tcx: TyCtxt<'tcx>,
+ map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>>,
+ span: Span,
+ ignore_errors: bool,
+ ) -> Self {
+ Self { tcx, map, do_not_error: false, ignore_errors, span }
+ }
+
+ fn fold_kind_no_missing_regions_error(&mut self, kind: GenericArg<'tcx>) -> GenericArg<'tcx> {
+ assert!(!self.do_not_error);
+ self.do_not_error = true;
+ let kind = kind.fold_with(self);
+ self.do_not_error = false;
+ kind
+ }
+
+ fn fold_kind_normally(&mut self, kind: GenericArg<'tcx>) -> GenericArg<'tcx> {
+ assert!(!self.do_not_error);
+ kind.fold_with(self)
+ }
+}
+
+impl<'tcx> TypeFolder<'tcx> for ReverseMapper<'tcx> {
+ fn tcx(&self) -> TyCtxt<'tcx> {
+ self.tcx
+ }
+
+ #[instrument(skip(self), level = "debug")]
+ fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
+ match *r {
+ // Ignore bound regions and `'static` regions that appear in the
+ // type, we only need to remap regions that reference lifetimes
+ // from the function declaration.
+ // This would ignore `'r` in a type like `for<'r> fn(&'r u32)`.
+ ty::ReLateBound(..) | ty::ReStatic => return r,
+
+ // If regions have been erased (by writeback), don't try to unerase
+ // them.
+ ty::ReErased => return r,
+
+ // The regions that we expect from borrow checking.
+ ty::ReEarlyBound(_) | ty::ReFree(_) => {}
+
+ ty::RePlaceholder(_) | ty::ReVar(_) => {
+ // All of the regions in the type should either have been
+ // erased by writeback, or mapped back to named regions by
+ // borrow checking.
+ bug!("unexpected region kind in opaque type: {:?}", r);
+ }
+ }
+
+ match self.map.get(&r.into()).map(|k| k.unpack()) {
+ Some(GenericArgKind::Lifetime(r1)) => r1,
+ Some(u) => panic!("region mapped to unexpected kind: {:?}", u),
+ None if self.do_not_error => self.tcx.lifetimes.re_static,
+ None => {
+ self.tcx
+ .sess
+ .struct_span_err(self.span, "non-defining opaque type use in defining scope")
+ .span_label(
+ self.span,
+ format!(
+ "lifetime `{}` is part of concrete type but not used in \
+ parameter list of the `impl Trait` type alias",
+ r
+ ),
+ )
+ .emit();
+
+ self.tcx().lifetimes.re_static
+ }
+ }
+ }
+
+ fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> {
+ match *ty.kind() {
+ ty::Closure(def_id, substs) => {
+ // I am a horrible monster and I pray for death. When
+ // we encounter a closure here, it is always a closure
+ // from within the function that we are currently
+ // type-checking -- one that is now being encapsulated
+ // in an opaque type. Ideally, we would
+ // go through the types/lifetimes that it references
+ // and treat them just like we would any other type,
+ // which means we would error out if we find any
+ // reference to a type/region that is not in the
+ // "reverse map".
+ //
+ // **However,** in the case of closures, there is a
+ // somewhat subtle (read: hacky) consideration. The
+ // problem is that our closure types currently include
+ // all the lifetime parameters declared on the
+ // enclosing function, even if they are unused by the
+ // closure itself. We can't readily filter them out,
+ // so here we replace those values with `'empty`. This
+ // can't really make a difference to the rest of the
+ // compiler; those regions are ignored for the
+ // outlives relation, and hence don't affect trait
+ // selection or auto traits, and they are erased
+ // during codegen.
+
+ let generics = self.tcx.generics_of(def_id);
+ let substs = self.tcx.mk_substs(substs.iter().enumerate().map(|(index, kind)| {
+ if index < generics.parent_count {
+ // Accommodate missing regions in the parent kinds...
+ self.fold_kind_no_missing_regions_error(kind)
+ } else {
+ // ...but not elsewhere.
+ self.fold_kind_normally(kind)
+ }
+ }));
+
+ self.tcx.mk_closure(def_id, substs)
+ }
+
+ ty::Generator(def_id, substs, movability) => {
+ let generics = self.tcx.generics_of(def_id);
+ let substs = self.tcx.mk_substs(substs.iter().enumerate().map(|(index, kind)| {
+ if index < generics.parent_count {
+ // Accommodate missing regions in the parent kinds...
+ self.fold_kind_no_missing_regions_error(kind)
+ } else {
+ // ...but not elsewhere.
+ self.fold_kind_normally(kind)
+ }
+ }));
+
+ self.tcx.mk_generator(def_id, substs, movability)
+ }
+
+ ty::Param(param) => {
+ // Look it up in the substitution list.
+ match self.map.get(&ty.into()).map(|k| k.unpack()) {
+ // Found it in the substitution list; replace with the parameter from the
+ // opaque type.
+ Some(GenericArgKind::Type(t1)) => t1,
+ Some(u) => panic!("type mapped to unexpected kind: {:?}", u),
+ None => {
+ debug!(?param, ?self.map);
+ if !self.ignore_errors {
+ self.tcx
+ .sess
+ .struct_span_err(
+ self.span,
+ &format!(
+ "type parameter `{}` is part of concrete type but not \
+ used in parameter list for the `impl Trait` type alias",
+ ty
+ ),
+ )
+ .emit();
+ }
+
+ self.tcx().ty_error()
+ }
+ }
+ }
+
+ _ => ty.super_fold_with(self),
+ }
+ }
+
+ fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> {
+ trace!("checking const {:?}", ct);
+ // Find a const parameter
+ match ct.kind() {
+ ty::ConstKind::Param(..) => {
+ // Look it up in the substitution list.
+ match self.map.get(&ct.into()).map(|k| k.unpack()) {
+ // Found it in the substitution list, replace with the parameter from the
+ // opaque type.
+ Some(GenericArgKind::Const(c1)) => c1,
+ Some(u) => panic!("const mapped to unexpected kind: {:?}", u),
+ None => {
+ if !self.ignore_errors {
+ self.tcx.sess.emit_err(ty::ConstNotUsedTraitAlias {
+ ct: ct.to_string(),
+ span: self.span,
+ });
+ }
+
+ self.tcx().const_error(ct.ty())
+ }
+ }
+ }
+
+ _ => ct,
+ }
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/parameterized.rs b/compiler/rustc_middle/src/ty/parameterized.rs
index 9c8dc30e2..e1e705a92 100644
--- a/compiler/rustc_middle/src/ty/parameterized.rs
+++ b/compiler/rustc_middle/src/ty/parameterized.rs
@@ -1,3 +1,4 @@
+use rustc_data_structures::fx::FxHashMap;
use rustc_hir::def_id::{DefId, DefIndex};
use rustc_index::vec::{Idx, IndexVec};
@@ -29,6 +30,10 @@ impl<I: Idx + 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for IndexVe
type Value<'tcx> = IndexVec<I, T::Value<'tcx>>;
}
+impl<I: 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for FxHashMap<I, T> {
+ type Value<'tcx> = FxHashMap<I, T::Value<'tcx>>;
+}
+
impl<T: ParameterizedOverTcx> ParameterizedOverTcx for ty::Binder<'static, T> {
type Value<'tcx> = ty::Binder<'tcx, T::Value<'tcx>>;
}
@@ -56,6 +61,7 @@ trivially_parameterized_over_tcx! {
crate::middle::resolve_lifetime::ObjectLifetimeDefault,
crate::mir::ConstQualifs,
ty::AssocItemContainer,
+ ty::DeducedParamAttrs,
ty::Generics,
ty::ImplPolarity,
ty::ReprOptions,
@@ -77,6 +83,7 @@ trivially_parameterized_over_tcx! {
rustc_hir::def::DefKind,
rustc_hir::def_id::DefIndex,
rustc_hir::definitions::DefKey,
+ rustc_index::bit_set::BitSet<u32>,
rustc_index::bit_set::FiniteBitSet<u32>,
rustc_session::cstore::ForeignModule,
rustc_session::cstore::LinkagePreference,
diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs
index d57cf8f01..44b9548db 100644
--- a/compiler/rustc_middle/src/ty/print/mod.rs
+++ b/compiler/rustc_middle/src/ty/print/mod.rs
@@ -1,9 +1,9 @@
-use crate::ty::subst::{GenericArg, Subst};
+use crate::ty::GenericArg;
use crate::ty::{self, DefIdTree, Ty, TyCtxt};
use rustc_data_structures::fx::FxHashSet;
use rustc_data_structures::sso::SsoHashSet;
-use rustc_hir::def_id::{CrateNum, DefId};
+use rustc_hir::def_id::{CrateNum, DefId, LocalDefId};
use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData};
// `pretty` is a separate module only for organization.
@@ -325,3 +325,12 @@ impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for ty::Const<'tcx> {
cx.print_const(*self)
}
}
+
+// This is only used by query descriptions
+pub fn describe_as_module(def_id: LocalDefId, tcx: TyCtxt<'_>) -> String {
+ if def_id.is_top_level_module() {
+ "top-level module".to_string()
+ } else {
+ format!("module `{}`", tcx.def_path_str(def_id.to_def_id()))
+ }
+}
diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs
index 97bddb93e..ef9aa236b 100644
--- a/compiler/rustc_middle/src/ty/print/pretty.rs
+++ b/compiler/rustc_middle/src/ty/print/pretty.rs
@@ -1,9 +1,9 @@
use crate::mir::interpret::{AllocRange, GlobalAlloc, Pointer, Provenance, Scalar};
-use crate::ty::subst::{GenericArg, GenericArgKind, Subst};
use crate::ty::{
self, ConstInt, DefIdTree, ParamConst, ScalarInt, Term, TermKind, Ty, TyCtxt, TypeFoldable,
TypeSuperFoldable, TypeSuperVisitable, TypeVisitable,
};
+use crate::ty::{GenericArg, GenericArgKind};
use rustc_apfloat::ieee::{Double, Single};
use rustc_data_structures::fx::{FxHashMap, FxIndexMap};
use rustc_data_structures::sso::SsoHashSet;
@@ -16,6 +16,7 @@ use rustc_session::cstore::{ExternCrate, ExternCrateSource};
use rustc_span::symbol::{kw, Ident, Symbol};
use rustc_target::abi::Size;
use rustc_target::spec::abi::Abi;
+use smallvec::SmallVec;
use std::cell::Cell;
use std::char;
@@ -62,6 +63,7 @@ thread_local! {
static NO_TRIMMED_PATH: Cell<bool> = const { Cell::new(false) };
static NO_QUERIES: Cell<bool> = const { Cell::new(false) };
static NO_VISIBLE_PATH: Cell<bool> = const { Cell::new(false) };
+ static NO_VERBOSE_CONSTANTS: Cell<bool> = const { Cell::new(false) };
}
macro_rules! define_helper {
@@ -116,6 +118,9 @@ define_helper!(
/// Prevent selection of visible paths. `Display` impl of DefId will prefer
/// visible (public) reexports of types as paths.
fn with_no_visible_paths(NoVisibleGuard, NO_VISIBLE_PATH);
+ /// Prevent verbose printing of constants. Verbose printing of constants is
+ /// never desirable in some contexts like `std::any::type_name`.
+ fn with_no_verbose_constants(NoVerboseConstantsGuard, NO_VERBOSE_CONSTANTS);
);
/// The "region highlights" are used to control region printing during
@@ -637,7 +642,9 @@ pub trait PrettyPrinter<'tcx>:
p!(print_def_path(def_id, &[]));
}
ty::Projection(ref data) => {
- if self.tcx().def_kind(data.item_def_id) == DefKind::ImplTraitPlaceholder {
+ if !(self.tcx().sess.verbose() || NO_QUERIES.with(|q| q.get()))
+ && self.tcx().def_kind(data.item_def_id) == DefKind::ImplTraitPlaceholder
+ {
return self.pretty_print_opaque_impl_type(data.item_def_id, data.substs);
} else {
p!(print(data))
@@ -756,7 +763,7 @@ pub trait PrettyPrinter<'tcx>:
}
ty::Array(ty, sz) => {
p!("[", print(ty), "; ");
- if self.tcx().sess.verbose() {
+ if !NO_VERBOSE_CONSTANTS.with(|flag| flag.get()) && self.tcx().sess.verbose() {
p!(write("{:?}", sz));
} else if let ty::ConstKind::Unevaluated(..) = sz.kind() {
// Do not try to evaluate unevaluated constants. If we are const evaluating an
@@ -792,9 +799,9 @@ pub trait PrettyPrinter<'tcx>:
let mut traits = FxIndexMap::default();
let mut fn_traits = FxIndexMap::default();
let mut is_sized = false;
+ let mut lifetimes = SmallVec::<[ty::Region<'tcx>; 1]>::new();
- for predicate in bounds.transpose_iter().map(|e| e.map_bound(|(p, _)| *p)) {
- let predicate = predicate.subst(tcx, substs);
+ for (predicate, _) in bounds.subst_iter_copied(tcx, substs) {
let bound_predicate = predicate.kind();
match bound_predicate.skip_binder() {
@@ -823,6 +830,9 @@ pub trait PrettyPrinter<'tcx>:
&mut fn_traits,
);
}
+ ty::PredicateKind::TypeOutlives(outlives) => {
+ lifetimes.push(outlives.1);
+ }
_ => {}
}
}
@@ -927,7 +937,7 @@ pub trait PrettyPrinter<'tcx>:
// unless we can find out what generator return type it comes from.
let term = if let Some(ty) = term.skip_binder().ty()
&& let ty::Projection(proj) = ty.kind()
- && let assoc = tcx.associated_item(proj.item_def_id)
+ && let Some(assoc) = tcx.opt_associated_item(proj.item_def_id)
&& assoc.trait_container(tcx) == tcx.lang_items().gen_trait()
&& assoc.name == rustc_span::sym::Return
{
@@ -976,6 +986,11 @@ pub trait PrettyPrinter<'tcx>:
write!(self, "Sized")?;
}
+ for re in lifetimes {
+ write!(self, " + ")?;
+ self = self.print_region(re)?;
+ }
+
Ok(self)
}
@@ -1088,17 +1103,9 @@ pub trait PrettyPrinter<'tcx>:
.generics_of(principal.def_id)
.own_substs_no_defaults(cx.tcx(), principal.substs);
- // Don't print `'_` if there's no unerased regions.
- let print_regions = args.iter().any(|arg| match arg.unpack() {
- GenericArgKind::Lifetime(r) => !r.is_erased(),
- _ => false,
- });
- let mut args = args.iter().cloned().filter(|arg| match arg.unpack() {
- GenericArgKind::Lifetime(_) => print_regions,
- _ => true,
- });
let mut projections = predicates.projection_bounds();
+ let mut args = args.iter().cloned();
let arg0 = args.next();
let projection0 = projections.next();
if arg0.is_some() || projection0.is_some() {
@@ -1178,7 +1185,7 @@ pub trait PrettyPrinter<'tcx>:
) -> Result<Self::Const, Self::Error> {
define_scoped_cx!(self);
- if self.tcx().sess.verbose() {
+ if !NO_VERBOSE_CONSTANTS.with(|flag| flag.get()) && self.tcx().sess.verbose() {
p!(write("Const({:?}: {:?})", ct.kind(), ct.ty()));
return Ok(self);
}
@@ -1201,9 +1208,7 @@ pub trait PrettyPrinter<'tcx>:
}
match ct.kind() {
- ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted }) => {
- assert_eq!(promoted, ());
-
+ ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, substs }) => {
match self.tcx().def_kind(def.did) {
DefKind::Static(..) | DefKind::Const | DefKind::AssocConst => {
p!(print_value_path(def.did, substs))
@@ -1415,7 +1420,7 @@ pub trait PrettyPrinter<'tcx>:
) -> Result<Self::Const, Self::Error> {
define_scoped_cx!(self);
- if self.tcx().sess.verbose() {
+ if !NO_VERBOSE_CONSTANTS.with(|flag| flag.get()) && self.tcx().sess.verbose() {
p!(write("ValTree({:?}: ", valtree), print(ty), ")");
return Ok(self);
}
@@ -1572,7 +1577,9 @@ pub struct FmtPrinterData<'a, 'tcx> {
in_value: bool,
pub print_alloc_ids: bool,
+ // set of all named (non-anonymous) region names
used_region_names: FxHashSet<Symbol>,
+
region_index: usize,
binder_depth: usize,
printed_type_count: usize,
@@ -1847,22 +1854,11 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> {
) -> Result<Self::Path, Self::Error> {
self = print_prefix(self)?;
- // Don't print `'_` if there's no unerased regions.
- let print_regions = self.tcx.sess.verbose()
- || args.iter().any(|arg| match arg.unpack() {
- GenericArgKind::Lifetime(r) => !r.is_erased(),
- _ => false,
- });
- let args = args.iter().cloned().filter(|arg| match arg.unpack() {
- GenericArgKind::Lifetime(_) => print_regions,
- _ => true,
- });
-
- if args.clone().next().is_some() {
+ if args.first().is_some() {
if self.in_value {
write!(self, "::")?;
}
- self.generic_delimiters(|cx| cx.comma_sep(args))
+ self.generic_delimiters(|cx| cx.comma_sep(args.iter().cloned()))
} else {
Ok(self)
}
@@ -2074,7 +2070,14 @@ struct RegionFolder<'a, 'tcx> {
tcx: TyCtxt<'tcx>,
current_index: ty::DebruijnIndex,
region_map: BTreeMap<ty::BoundRegion, ty::Region<'tcx>>,
- name: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a),
+ name: &'a mut (
+ dyn FnMut(
+ Option<ty::DebruijnIndex>, // Debruijn index of the folded late-bound region
+ ty::DebruijnIndex, // Index corresponding to binder level
+ ty::BoundRegion,
+ ) -> ty::Region<'tcx>
+ + 'a
+ ),
}
impl<'a, 'tcx> ty::TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
@@ -2105,7 +2108,9 @@ impl<'a, 'tcx> ty::TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
let name = &mut self.name;
let region = match *r {
- ty::ReLateBound(_, br) => *self.region_map.entry(br).or_insert_with(|| name(br)),
+ ty::ReLateBound(db, br) if db >= self.current_index => {
+ *self.region_map.entry(br).or_insert_with(|| name(Some(db), self.current_index, br))
+ }
ty::RePlaceholder(ty::PlaceholderRegion { name: kind, .. }) => {
// If this is an anonymous placeholder, don't rename. Otherwise, in some
// async fns, we get a `for<'r> Send` bound
@@ -2114,7 +2119,10 @@ impl<'a, 'tcx> ty::TypeFolder<'tcx> for RegionFolder<'a, 'tcx> {
_ => {
// Index doesn't matter, since this is just for naming and these never get bound
let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind };
- *self.region_map.entry(br).or_insert_with(|| name(br))
+ *self
+ .region_map
+ .entry(br)
+ .or_insert_with(|| name(None, self.current_index, br))
}
}
}
@@ -2139,23 +2147,31 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
where
T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>,
{
- fn name_by_region_index(index: usize) -> Symbol {
- match index {
- 0 => Symbol::intern("'r"),
- 1 => Symbol::intern("'s"),
- i => Symbol::intern(&format!("'t{}", i - 2)),
+ fn name_by_region_index(
+ index: usize,
+ available_names: &mut Vec<Symbol>,
+ num_available: usize,
+ ) -> Symbol {
+ if let Some(name) = available_names.pop() {
+ name
+ } else {
+ Symbol::intern(&format!("'z{}", index - num_available))
}
}
+ debug!("name_all_regions");
+
// Replace any anonymous late-bound regions with named
// variants, using new unique identifiers, so that we can
// clearly differentiate between named and unnamed regions in
// the output. We'll probably want to tweak this over time to
// decide just how much information to give.
if self.binder_depth == 0 {
- self.prepare_late_bound_region_info(value);
+ self.prepare_region_info(value);
}
+ debug!("self.used_region_names: {:?}", &self.used_region_names);
+
let mut empty = true;
let mut start_or_continue = |cx: &mut Self, start: &str, cont: &str| {
let w = if empty {
@@ -2172,13 +2188,30 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
define_scoped_cx!(self);
+ let possible_names =
+ ('a'..='z').rev().map(|s| Symbol::intern(&format!("'{s}"))).collect::<Vec<_>>();
+
+ let mut available_names = possible_names
+ .into_iter()
+ .filter(|name| !self.used_region_names.contains(&name))
+ .collect::<Vec<_>>();
+ debug!(?available_names);
+ let num_available = available_names.len();
+
let mut region_index = self.region_index;
- let mut next_name = |this: &Self| loop {
- let name = name_by_region_index(region_index);
- region_index += 1;
- if !this.used_region_names.contains(&name) {
- break name;
+ let mut next_name = |this: &Self| {
+ let mut name;
+
+ loop {
+ name = name_by_region_index(region_index, &mut available_names, num_available);
+ region_index += 1;
+
+ if !this.used_region_names.contains(&name) {
+ break;
+ }
}
+
+ name
};
// If we want to print verbosely, then print *all* binders, even if they
@@ -2199,6 +2232,7 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
ty::BrAnon(_) | ty::BrEnv => {
start_or_continue(&mut self, "for<", ", ");
let name = next_name(&self);
+ debug!(?name);
do_continue(&mut self, name);
ty::BrNamed(CRATE_DEF_ID.to_def_id(), name)
}
@@ -2227,24 +2261,63 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
})
} else {
let tcx = self.tcx;
- let mut name = |br: ty::BoundRegion| {
- start_or_continue(&mut self, "for<", ", ");
- let kind = match br.kind {
+
+ // Closure used in `RegionFolder` to create names for anonymous late-bound
+ // regions. We use two `DebruijnIndex`es (one for the currently folded
+ // late-bound region and the other for the binder level) to determine
+ // whether a name has already been created for the currently folded region,
+ // see issue #102392.
+ let mut name = |lifetime_idx: Option<ty::DebruijnIndex>,
+ binder_level_idx: ty::DebruijnIndex,
+ br: ty::BoundRegion| {
+ let (name, kind) = match br.kind {
ty::BrAnon(_) | ty::BrEnv => {
let name = next_name(&self);
- do_continue(&mut self, name);
- ty::BrNamed(CRATE_DEF_ID.to_def_id(), name)
+
+ if let Some(lt_idx) = lifetime_idx {
+ if lt_idx > binder_level_idx {
+ let kind = ty::BrNamed(CRATE_DEF_ID.to_def_id(), name);
+ return tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: br.var, kind },
+ ));
+ }
+ }
+
+ (name, ty::BrNamed(CRATE_DEF_ID.to_def_id(), name))
}
ty::BrNamed(def_id, kw::UnderscoreLifetime) => {
let name = next_name(&self);
- do_continue(&mut self, name);
- ty::BrNamed(def_id, name)
+
+ if let Some(lt_idx) = lifetime_idx {
+ if lt_idx > binder_level_idx {
+ let kind = ty::BrNamed(def_id, name);
+ return tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: br.var, kind },
+ ));
+ }
+ }
+
+ (name, ty::BrNamed(def_id, name))
}
ty::BrNamed(_, name) => {
- do_continue(&mut self, name);
- br.kind
+ if let Some(lt_idx) = lifetime_idx {
+ if lt_idx > binder_level_idx {
+ let kind = br.kind;
+ return tcx.mk_region(ty::ReLateBound(
+ ty::INNERMOST,
+ ty::BoundRegion { var: br.var, kind },
+ ));
+ }
+ }
+
+ (name, br.kind)
}
};
+
+ start_or_continue(&mut self, "for<", ", ");
+ do_continue(&mut self, name);
tcx.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BoundRegion { var: br.var, kind }))
};
let mut folder = RegionFolder {
@@ -2292,29 +2365,37 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
Ok(inner)
}
- fn prepare_late_bound_region_info<T>(&mut self, value: &ty::Binder<'tcx, T>)
+ fn prepare_region_info<T>(&mut self, value: &ty::Binder<'tcx, T>)
where
T: TypeVisitable<'tcx>,
{
- struct LateBoundRegionNameCollector<'a, 'tcx> {
- used_region_names: &'a mut FxHashSet<Symbol>,
+ struct RegionNameCollector<'tcx> {
+ used_region_names: FxHashSet<Symbol>,
type_collector: SsoHashSet<Ty<'tcx>>,
}
- impl<'tcx> ty::visit::TypeVisitor<'tcx> for LateBoundRegionNameCollector<'_, 'tcx> {
+ impl<'tcx> RegionNameCollector<'tcx> {
+ fn new() -> Self {
+ RegionNameCollector {
+ used_region_names: Default::default(),
+ type_collector: SsoHashSet::new(),
+ }
+ }
+ }
+
+ impl<'tcx> ty::visit::TypeVisitor<'tcx> for RegionNameCollector<'tcx> {
type BreakTy = ();
fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> {
trace!("address: {:p}", r.0.0);
- if let ty::ReLateBound(_, ty::BoundRegion { kind: ty::BrNamed(_, name), .. }) = *r {
- self.used_region_names.insert(name);
- } else if let ty::RePlaceholder(ty::PlaceholderRegion {
- name: ty::BrNamed(_, name),
- ..
- }) = *r
- {
+
+ // Collect all named lifetimes. These allow us to prevent duplication
+ // of already existing lifetime names when introducing names for
+ // anonymous late-bound regions.
+ if let Some(name) = r.get_name() {
self.used_region_names.insert(name);
}
+
r.super_visit_with(self)
}
@@ -2330,12 +2411,9 @@ impl<'tcx> FmtPrinter<'_, 'tcx> {
}
}
- self.used_region_names.clear();
- let mut collector = LateBoundRegionNameCollector {
- used_region_names: &mut self.used_region_names,
- type_collector: SsoHashSet::new(),
- };
+ let mut collector = RegionNameCollector::new();
value.visit_with(&mut collector);
+ self.used_region_names = collector.used_region_names;
self.region_index = 0;
}
}
@@ -2637,8 +2715,8 @@ define_print_and_forward_display! {
print_value_path(closure_def_id, &[]),
write("` implements the trait `{}`", kind))
}
- ty::PredicateKind::ConstEvaluatable(uv) => {
- p!("the constant `", print_value_path(uv.def.did, uv.substs), "` can be evaluated")
+ ty::PredicateKind::ConstEvaluatable(ct) => {
+ p!("the constant `", print(ct), "` can be evaluated")
}
ty::PredicateKind::ConstEquate(c1, c2) => {
p!("the constant `", print(c1), "` equals `", print(c2), "`")
@@ -2662,7 +2740,7 @@ fn for_each_def(tcx: TyCtxt<'_>, mut collect_fn: impl for<'b> FnMut(&'b Ident, N
// Iterate all local crate items no matter where they are defined.
let hir = tcx.hir();
for id in hir.items() {
- if matches!(tcx.def_kind(id.def_id), DefKind::Use) {
+ if matches!(tcx.def_kind(id.owner_id), DefKind::Use) {
continue;
}
@@ -2671,7 +2749,7 @@ fn for_each_def(tcx: TyCtxt<'_>, mut collect_fn: impl for<'b> FnMut(&'b Ident, N
continue;
}
- let def_id = item.def_id.to_def_id();
+ let def_id = item.owner_id.to_def_id();
let ns = tcx.def_kind(def_id).ns().unwrap_or(Namespace::TypeNS);
collect_fn(&item.ident, ns, def_id);
}
diff --git a/compiler/rustc_middle/src/ty/query.rs b/compiler/rustc_middle/src/ty/query.rs
index a300a8df2..ec90590ad 100644
--- a/compiler/rustc_middle/src/ty/query.rs
+++ b/compiler/rustc_middle/src/ty/query.rs
@@ -1,11 +1,11 @@
use crate::dep_graph;
use crate::infer::canonical::{self, Canonical};
-use crate::lint::LintLevelMap;
+use crate::lint::LintExpectation;
use crate::metadata::ModChild;
use crate::middle::codegen_fn_attrs::CodegenFnAttrs;
use crate::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo};
use crate::middle::lib_features::LibFeatures;
-use crate::middle::privacy::AccessLevels;
+use crate::middle::privacy::EffectiveVisibilities;
use crate::middle::resolve_lifetime::{ObjectLifetimeDefault, Region, ResolveLifetimes};
use crate::middle::stability::{self, DeprecationEntry};
use crate::mir;
@@ -32,7 +32,7 @@ use crate::ty::layout::TyAndLayout;
use crate::ty::subst::{GenericArg, SubstsRef};
use crate::ty::util::AlwaysRequiresDrop;
use crate::ty::GeneratorDiagnosticData;
-use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
+use crate::ty::{self, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt};
use rustc_ast as ast;
use rustc_ast::expand::allocator::AllocatorKind;
use rustc_attr as attr;
@@ -40,17 +40,19 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet};
use rustc_data_structures::steal::Steal;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::sync::Lrc;
+use rustc_data_structures::unord::UnordSet;
use rustc_errors::ErrorGuaranteed;
use rustc_hir as hir;
use rustc_hir::def::DefKind;
use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId};
+use rustc_hir::hir_id::OwnerId;
use rustc_hir::lang_items::{LangItem, LanguageItems};
use rustc_hir::{Crate, ItemLocalId, TraitCandidate};
use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec};
use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion};
use rustc_session::cstore::{CrateDepKind, CrateSource};
use rustc_session::cstore::{ExternCrate, ForeignModule, LinkagePreference, NativeLib};
-use rustc_session::utils::NativeLibKind;
+use rustc_session::lint::LintExpectationId;
use rustc_session::Limits;
use rustc_span::symbol::Symbol;
use rustc_span::{Span, DUMMY_SP};
@@ -275,8 +277,9 @@ macro_rules! define_callbacks {
fn default() -> Self {
Providers {
$($name: |_, key| bug!(
- "`tcx.{}({:?})` unsupported by its crate; \
- perhaps the `{}` query was never assigned a provider function",
+ "`tcx.{}({:?})` is not supported for external or local crate;\n
+ hint: Queries can be either made to the local crate, or the external crate. This error means you tried to use it for one that's not supported (likely the local crate).\n
+ If that's not the case, {} was likely never assigned to a provider function.\n",
stringify!($name),
key,
stringify!($name),
@@ -335,7 +338,7 @@ macro_rules! define_callbacks {
rustc_query_append! { define_callbacks! }
mod sealed {
- use super::{DefId, LocalDefId};
+ use super::{DefId, LocalDefId, OwnerId};
/// An analogue of the `Into` trait that's intended only for query parameters.
///
@@ -365,6 +368,13 @@ mod sealed {
self.to_def_id()
}
}
+
+ impl IntoQueryParam<DefId> for OwnerId {
+ #[inline(always)]
+ fn into_query_param(self) -> DefId {
+ self.to_def_id()
+ }
+ }
}
use sealed::IntoQueryParam;
diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs
index 61c34730d..b25b4bd4f 100644
--- a/compiler/rustc_middle/src/ty/relate.rs
+++ b/compiler/rustc_middle/src/ty/relate.rs
@@ -5,8 +5,8 @@
//! subtyping, type equality, etc.
use crate::ty::error::{ExpectedFound, TypeError};
-use crate::ty::subst::{GenericArg, GenericArgKind, Subst, SubstsRef};
use crate::ty::{self, ImplSubject, Term, TermKind, Ty, TyCtxt, TypeFoldable};
+use crate::ty::{GenericArg, GenericArgKind, SubstsRef};
use rustc_hir as ast;
use rustc_hir::def_id::DefId;
use rustc_span::DUMMY_SP;
@@ -632,11 +632,7 @@ pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>(
// While this is slightly incorrect, it shouldn't matter for `min_const_generics`
// and is the better alternative to waiting until `generic_const_exprs` can
// be stabilized.
- (ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu))
- if au.def == bu.def && au.promoted == bu.promoted =>
- {
- assert_eq!(au.promoted, ());
-
+ (ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu)) if au.def == bu.def => {
let substs = relation.relate_with_variance(
ty::Variance::Invariant,
ty::VarianceDiagInfo::default(),
@@ -644,11 +640,7 @@ pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>(
bu.substs,
)?;
return Ok(tcx.mk_const(ty::ConstS {
- kind: ty::ConstKind::Unevaluated(ty::Unevaluated {
- def: au.def,
- substs,
- promoted: (),
- }),
+ kind: ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def: au.def, substs }),
ty: a.ty(),
}));
}
diff --git a/compiler/rustc_middle/src/ty/rvalue_scopes.rs b/compiler/rustc_middle/src/ty/rvalue_scopes.rs
index e86dafae3..e79b79a25 100644
--- a/compiler/rustc_middle/src/ty/rvalue_scopes.rs
+++ b/compiler/rustc_middle/src/ty/rvalue_scopes.rs
@@ -3,7 +3,7 @@ use rustc_data_structures::fx::FxHashMap;
use rustc_hir as hir;
/// `RvalueScopes` is a mapping from sub-expressions to _extended_ lifetime as determined by
-/// rules laid out in `rustc_typeck::check::rvalue_scopes`.
+/// rules laid out in `rustc_hir_analysis::check::rvalue_scopes`.
#[derive(TyEncodable, TyDecodable, Clone, Debug, Default, Eq, PartialEq, HashStable)]
pub struct RvalueScopes {
map: FxHashMap<hir::ItemLocalId, Option<Scope>>,
diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs
index 84d6a8b97..2cad333e3 100644
--- a/compiler/rustc_middle/src/ty/structural_impls.rs
+++ b/compiler/rustc_middle/src/ty/structural_impls.rs
@@ -166,8 +166,8 @@ impl<'tcx> fmt::Debug for ty::PredicateKind<'tcx> {
ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => {
write!(f, "ClosureKind({:?}, {:?}, {:?})", closure_def_id, closure_substs, kind)
}
- ty::PredicateKind::ConstEvaluatable(uv) => {
- write!(f, "ConstEvaluatable({:?}, {:?})", uv.def, uv.substs)
+ ty::PredicateKind::ConstEvaluatable(ct) => {
+ write!(f, "ConstEvaluatable({ct:?})")
}
ty::PredicateKind::ConstEquate(c1, c2) => write!(f, "ConstEquate({:?}, {:?})", c1, c2),
ty::PredicateKind::TypeWellFormedFromEnv(ty) => {
@@ -560,18 +560,6 @@ impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Box<[T]> {
}
}
-impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::EarlyBinder<T> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- self.try_map_bound(|ty| ty.try_fold_with(folder))
- }
-}
-
-impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for ty::EarlyBinder<T> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.as_ref().0.visit_with(visitor)
- }
-}
-
impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::Binder<'tcx, T> {
fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
folder.try_fold_binder(self)
@@ -844,45 +832,8 @@ impl<'tcx> TypeVisitable<'tcx> for InferConst<'tcx> {
}
}
-impl<'tcx> TypeFoldable<'tcx> for ty::Unevaluated<'tcx> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- folder.try_fold_unevaluated(self)
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for ty::Unevaluated<'tcx> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- visitor.visit_unevaluated(*self)
- }
-}
-
-impl<'tcx> TypeSuperFoldable<'tcx> for ty::Unevaluated<'tcx> {
- fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>(
- self,
- folder: &mut F,
- ) -> Result<Self, F::Error> {
- Ok(ty::Unevaluated {
- def: self.def,
- substs: self.substs.try_fold_with(folder)?,
- promoted: self.promoted,
- })
- }
-}
-
-impl<'tcx> TypeSuperVisitable<'tcx> for ty::Unevaluated<'tcx> {
+impl<'tcx> TypeSuperVisitable<'tcx> for ty::UnevaluatedConst<'tcx> {
fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
self.substs.visit_with(visitor)
}
}
-
-impl<'tcx> TypeFoldable<'tcx> for ty::Unevaluated<'tcx, ()> {
- fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> {
- Ok(self.expand().try_fold_with(folder)?.shrink())
- }
-}
-
-impl<'tcx> TypeVisitable<'tcx> for ty::Unevaluated<'tcx, ()> {
- fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
- self.expand().visit_with(visitor)
- }
-}
diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs
index 36e560850..cf420bafe 100644
--- a/compiler/rustc_middle/src/ty/sty.rs
+++ b/compiler/rustc_middle/src/ty/sty.rs
@@ -3,7 +3,7 @@
#![allow(rustc::usage_of_ty_tykind)]
use crate::infer::canonical::Canonical;
-use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef};
+use crate::ty::subst::{GenericArg, InternalSubsts, SubstsRef};
use crate::ty::visit::ValidateBoundVars;
use crate::ty::InferTy::*;
use crate::ty::{
@@ -19,7 +19,7 @@ use rustc_hir as hir;
use rustc_hir::def_id::DefId;
use rustc_index::vec::Idx;
use rustc_macros::HashStable;
-use rustc_span::symbol::{kw, Symbol};
+use rustc_span::symbol::{kw, sym, Symbol};
use rustc_target::abi::VariantIdx;
use rustc_target::spec::abi;
use std::borrow::Cow;
@@ -85,6 +85,17 @@ impl BoundRegionKind {
_ => false,
}
}
+
+ pub fn get_name(&self) -> Option<Symbol> {
+ if self.is_named() {
+ match *self {
+ BoundRegionKind::BrNamed(_, name) => return Some(name),
+ _ => unreachable!(),
+ }
+ }
+
+ None
+ }
}
pub trait Article {
@@ -304,7 +315,7 @@ impl<'tcx> ClosureSubsts<'tcx> {
/// closure.
// FIXME(eddyb) this should be unnecessary, as the shallowly resolved
// type is known at the time of the creation of `ClosureSubsts`,
- // see `rustc_typeck::check::closure`.
+ // see `rustc_hir_analysis::check::closure`.
pub fn sig_as_fn_ptr_ty(self) -> Ty<'tcx> {
self.split().closure_sig_as_fn_ptr_ty.expect_ty()
}
@@ -551,7 +562,7 @@ impl<'tcx> GeneratorSubsts<'tcx> {
layout.variant_fields.iter().map(move |variant| {
variant
.iter()
- .map(move |field| EarlyBinder(layout.field_tys[*field]).subst(tcx, self.substs))
+ .map(move |field| ty::EarlyBinder(layout.field_tys[*field]).subst(tcx, self.substs))
})
}
@@ -915,73 +926,6 @@ impl<'tcx> PolyExistentialTraitRef<'tcx> {
}
}
-#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
-#[derive(Encodable, Decodable, HashStable)]
-pub struct EarlyBinder<T>(pub T);
-
-impl<T> EarlyBinder<T> {
- pub fn as_ref(&self) -> EarlyBinder<&T> {
- EarlyBinder(&self.0)
- }
-
- pub fn map_bound_ref<F, U>(&self, f: F) -> EarlyBinder<U>
- where
- F: FnOnce(&T) -> U,
- {
- self.as_ref().map_bound(f)
- }
-
- pub fn map_bound<F, U>(self, f: F) -> EarlyBinder<U>
- where
- F: FnOnce(T) -> U,
- {
- let value = f(self.0);
- EarlyBinder(value)
- }
-
- pub fn try_map_bound<F, U, E>(self, f: F) -> Result<EarlyBinder<U>, E>
- where
- F: FnOnce(T) -> Result<U, E>,
- {
- let value = f(self.0)?;
- Ok(EarlyBinder(value))
- }
-
- pub fn rebind<U>(&self, value: U) -> EarlyBinder<U> {
- EarlyBinder(value)
- }
-}
-
-impl<T> EarlyBinder<Option<T>> {
- pub fn transpose(self) -> Option<EarlyBinder<T>> {
- self.0.map(|v| EarlyBinder(v))
- }
-}
-
-impl<T, U> EarlyBinder<(T, U)> {
- pub fn transpose_tuple2(self) -> (EarlyBinder<T>, EarlyBinder<U>) {
- (EarlyBinder(self.0.0), EarlyBinder(self.0.1))
- }
-}
-
-pub struct EarlyBinderIter<T> {
- t: T,
-}
-
-impl<T: IntoIterator> EarlyBinder<T> {
- pub fn transpose_iter(self) -> EarlyBinderIter<T::IntoIter> {
- EarlyBinderIter { t: self.0.into_iter() }
- }
-}
-
-impl<T: Iterator> Iterator for EarlyBinderIter<T> {
- type Item = EarlyBinder<T::Item>;
-
- fn next(&mut self) -> Option<Self::Item> {
- self.t.next().map(|i| EarlyBinder(i))
- }
-}
-
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)]
#[derive(HashStable)]
pub enum BoundVariableKind {
@@ -1200,9 +1144,13 @@ pub struct ProjectionTy<'tcx> {
impl<'tcx> ProjectionTy<'tcx> {
pub fn trait_def_id(&self, tcx: TyCtxt<'tcx>) -> DefId {
- let parent = tcx.parent(self.item_def_id);
- assert_eq!(tcx.def_kind(parent), DefKind::Trait);
- parent
+ match tcx.def_kind(self.item_def_id) {
+ DefKind::AssocTy | DefKind::AssocConst => tcx.parent(self.item_def_id),
+ DefKind::ImplTraitPlaceholder => {
+ tcx.parent(tcx.impl_trait_in_trait_parent(self.item_def_id))
+ }
+ kind => bug!("unexpected DefKind in ProjectionTy: {kind:?}"),
+ }
}
/// Extracts the underlying trait reference and own substs from this projection.
@@ -1213,6 +1161,7 @@ impl<'tcx> ProjectionTy<'tcx> {
tcx: TyCtxt<'tcx>,
) -> (ty::TraitRef<'tcx>, &'tcx [ty::GenericArg<'tcx>]) {
let def_id = tcx.parent(self.item_def_id);
+ assert_eq!(tcx.def_kind(def_id), DefKind::Trait);
let trait_generics = tcx.generics_of(def_id);
(
ty::TraitRef { def_id, substs: self.substs.truncate_to(tcx, trait_generics) },
@@ -1508,6 +1457,23 @@ impl<'tcx> Region<'tcx> {
*self.0.0
}
+ pub fn get_name(self) -> Option<Symbol> {
+ if self.has_name() {
+ let name = match *self {
+ ty::ReEarlyBound(ebr) => Some(ebr.name),
+ ty::ReLateBound(_, br) => br.kind.get_name(),
+ ty::ReFree(fr) => fr.bound_region.get_name(),
+ ty::ReStatic => Some(kw::StaticLifetime),
+ ty::RePlaceholder(placeholder) => placeholder.name.get_name(),
+ _ => None,
+ };
+
+ return name;
+ }
+
+ None
+ }
+
/// Is this region named by the user?
pub fn has_name(self) -> bool {
match *self {
@@ -2156,7 +2122,7 @@ impl<'tcx> Ty<'tcx> {
///
/// Note that during type checking, we use an inference variable
/// to represent the closure kind, because it has not yet been
- /// inferred. Once upvar inference (in `rustc_typeck/src/check/upvar.rs`)
+ /// inferred. Once upvar inference (in `rustc_hir_analysis/src/check/upvar.rs`)
/// is complete, that type variable will be unified.
pub fn to_opt_closure_kind(self) -> Option<ty::ClosureKind> {
match self.kind() {
@@ -2239,7 +2205,10 @@ impl<'tcx> Ty<'tcx> {
// These aren't even `Clone`
ty::Str | ty::Slice(..) | ty::Foreign(..) | ty::Dynamic(..) => false,
- ty::Int(..) | ty::Uint(..) | ty::Float(..) => true,
+ ty::Infer(ty::InferTy::FloatVar(_) | ty::InferTy::IntVar(_))
+ | ty::Int(..)
+ | ty::Uint(..)
+ | ty::Float(..) => true,
// The voldemort ZSTs are fine.
ty::FnDef(..) => true,
@@ -2274,6 +2243,35 @@ impl<'tcx> Ty<'tcx> {
}
}
}
+
+ // If `self` is a primitive, return its [`Symbol`].
+ pub fn primitive_symbol(self) -> Option<Symbol> {
+ match self.kind() {
+ ty::Bool => Some(sym::bool),
+ ty::Char => Some(sym::char),
+ ty::Float(f) => match f {
+ ty::FloatTy::F32 => Some(sym::f32),
+ ty::FloatTy::F64 => Some(sym::f64),
+ },
+ ty::Int(f) => match f {
+ ty::IntTy::Isize => Some(sym::isize),
+ ty::IntTy::I8 => Some(sym::i8),
+ ty::IntTy::I16 => Some(sym::i16),
+ ty::IntTy::I32 => Some(sym::i32),
+ ty::IntTy::I64 => Some(sym::i64),
+ ty::IntTy::I128 => Some(sym::i128),
+ },
+ ty::Uint(f) => match f {
+ ty::UintTy::Usize => Some(sym::usize),
+ ty::UintTy::U8 => Some(sym::u8),
+ ty::UintTy::U16 => Some(sym::u16),
+ ty::UintTy::U32 => Some(sym::u32),
+ ty::UintTy::U64 => Some(sym::u64),
+ ty::UintTy::U128 => Some(sym::u128),
+ },
+ _ => None,
+ }
+ }
}
/// Extra information about why we ended up with a particular variance.
diff --git a/compiler/rustc_middle/src/ty/subst.rs b/compiler/rustc_middle/src/ty/subst.rs
index 8e69bf067..0660e9b79 100644
--- a/compiler/rustc_middle/src/ty/subst.rs
+++ b/compiler/rustc_middle/src/ty/subst.rs
@@ -1,12 +1,12 @@
// Type substitutions.
-use crate::mir;
use crate::ty::codec::{TyDecoder, TyEncoder};
use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder, TypeSuperFoldable};
use crate::ty::sty::{ClosureSubsts, GeneratorSubsts, InlineConstSubsts};
use crate::ty::visit::{TypeVisitable, TypeVisitor};
use crate::ty::{self, Lift, List, ParamConst, Ty, TyCtxt};
+use rustc_data_structures::captures::Captures;
use rustc_data_structures::intern::{Interned, WithStableHash};
use rustc_hir::def_id::DefId;
use rustc_macros::HashStable;
@@ -189,6 +189,14 @@ impl<'tcx> GenericArg<'tcx> {
_ => bug!("expected a const, but found another kind"),
}
}
+
+ pub fn is_non_region_infer(self) -> bool {
+ match self.unpack() {
+ GenericArgKind::Lifetime(_) => false,
+ GenericArgKind::Type(ty) => ty.is_ty_infer(),
+ GenericArgKind::Const(ct) => ct.is_ct_infer(),
+ }
+ }
}
impl<'a, 'tcx> Lift<'tcx> for GenericArg<'a> {
@@ -492,23 +500,107 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<Ty<'tcx>> {
}
impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for &'tcx ty::List<T> {
+ #[inline]
fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> {
self.iter().try_for_each(|t| t.visit_with(visitor))
}
}
-// Just call `foo.subst(tcx, substs)` to perform a substitution across `foo`.
-#[rustc_on_unimplemented(message = "Calling `subst` must now be done through an `EarlyBinder`")]
-pub trait Subst<'tcx>: Sized {
- type Inner;
+#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
+#[derive(Encodable, Decodable, HashStable)]
+pub struct EarlyBinder<T>(pub T);
+
+/// For early binders, you should first call `subst` before using any visitors.
+impl<'tcx, T> !TypeFoldable<'tcx> for ty::EarlyBinder<T> {}
+impl<'tcx, T> !TypeVisitable<'tcx> for ty::EarlyBinder<T> {}
+
+impl<T> EarlyBinder<T> {
+ pub fn as_ref(&self) -> EarlyBinder<&T> {
+ EarlyBinder(&self.0)
+ }
- fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self::Inner;
+ pub fn map_bound_ref<F, U>(&self, f: F) -> EarlyBinder<U>
+ where
+ F: FnOnce(&T) -> U,
+ {
+ self.as_ref().map_bound(f)
+ }
+
+ pub fn map_bound<F, U>(self, f: F) -> EarlyBinder<U>
+ where
+ F: FnOnce(T) -> U,
+ {
+ let value = f(self.0);
+ EarlyBinder(value)
+ }
+
+ pub fn try_map_bound<F, U, E>(self, f: F) -> Result<EarlyBinder<U>, E>
+ where
+ F: FnOnce(T) -> Result<U, E>,
+ {
+ let value = f(self.0)?;
+ Ok(EarlyBinder(value))
+ }
+
+ pub fn rebind<U>(&self, value: U) -> EarlyBinder<U> {
+ EarlyBinder(value)
+ }
}
-impl<'tcx, T: TypeFoldable<'tcx>> Subst<'tcx> for ty::EarlyBinder<T> {
- type Inner = T;
+impl<T> EarlyBinder<Option<T>> {
+ pub fn transpose(self) -> Option<EarlyBinder<T>> {
+ self.0.map(|v| EarlyBinder(v))
+ }
+}
+
+impl<T, U> EarlyBinder<(T, U)> {
+ pub fn transpose_tuple2(self) -> (EarlyBinder<T>, EarlyBinder<U>) {
+ (EarlyBinder(self.0.0), EarlyBinder(self.0.1))
+ }
+}
- fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self::Inner {
+impl<'tcx, 's, T: IntoIterator<Item = I>, I: TypeFoldable<'tcx>> EarlyBinder<T> {
+ pub fn subst_iter(
+ self,
+ tcx: TyCtxt<'tcx>,
+ substs: &'s [GenericArg<'tcx>],
+ ) -> impl Iterator<Item = I> + Captures<'s> + Captures<'tcx> {
+ self.0.into_iter().map(move |t| EarlyBinder(t).subst(tcx, substs))
+ }
+}
+
+impl<'tcx, 's, 'a, T: IntoIterator<Item = &'a I>, I: Copy + TypeFoldable<'tcx> + 'a>
+ EarlyBinder<T>
+{
+ pub fn subst_iter_copied(
+ self,
+ tcx: TyCtxt<'tcx>,
+ substs: &'s [GenericArg<'tcx>],
+ ) -> impl Iterator<Item = I> + Captures<'s> + Captures<'tcx> + Captures<'a> {
+ self.0.into_iter().map(move |t| EarlyBinder(*t).subst(tcx, substs))
+ }
+}
+
+pub struct EarlyBinderIter<T> {
+ t: T,
+}
+
+impl<T: IntoIterator> EarlyBinder<T> {
+ pub fn transpose_iter(self) -> EarlyBinderIter<T::IntoIter> {
+ EarlyBinderIter { t: self.0.into_iter() }
+ }
+}
+
+impl<T: Iterator> Iterator for EarlyBinderIter<T> {
+ type Item = EarlyBinder<T::Item>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.t.next().map(|i| EarlyBinder(i))
+ }
+}
+
+impl<'tcx, T: TypeFoldable<'tcx>> ty::EarlyBinder<T> {
+ pub fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> T {
let mut folder = SubstFolder { tcx, substs, binders_passed: 0 };
self.0.fold_with(&mut folder)
}
@@ -544,9 +636,21 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> {
#[cold]
#[inline(never)]
- fn region_param_out_of_range(data: ty::EarlyBoundRegion) -> ! {
+ fn region_param_out_of_range(data: ty::EarlyBoundRegion, substs: &[GenericArg<'_>]) -> ! {
bug!(
- "Region parameter out of range when substituting in region {} (index={})",
+ "Region parameter out of range when substituting in region {} (index={}, substs = {:?})",
+ data.name,
+ data.index,
+ substs,
+ )
+ }
+
+ #[cold]
+ #[inline(never)]
+ fn region_param_invalid(data: ty::EarlyBoundRegion, other: GenericArgKind<'_>) -> ! {
+ bug!(
+ "Unexpected parameter {:?} when substituting in region {} (index={})",
+ other,
data.name,
data.index
)
@@ -562,7 +666,8 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
let rk = self.substs.get(data.index as usize).map(|k| k.unpack());
match rk {
Some(GenericArgKind::Lifetime(lt)) => self.shift_region_through_binders(lt),
- _ => region_param_out_of_range(data),
+ Some(other) => region_param_invalid(data, other),
+ None => region_param_out_of_range(data, self.substs),
}
}
_ => r,
@@ -587,11 +692,6 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> {
c.super_fold_with(self)
}
}
-
- #[inline]
- fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> {
- c.super_fold_with(self)
- }
}
impl<'a, 'tcx> SubstFolder<'a, 'tcx> {
diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs
index 23ad4d27d..f72e236ed 100644
--- a/compiler/rustc_middle/src/ty/util.rs
+++ b/compiler/rustc_middle/src/ty/util.rs
@@ -2,12 +2,11 @@
use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags;
use crate::ty::layout::IntegerExt;
-use crate::ty::query::TyCtxtAt;
-use crate::ty::subst::{GenericArgKind, Subst, SubstsRef};
use crate::ty::{
self, DefIdTree, FallibleTypeFolder, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable,
TypeVisitable,
};
+use crate::ty::{GenericArgKind, SubstsRef};
use rustc_apfloat::Float as _;
use rustc_ast as ast;
use rustc_attr::{self as attr, SignedInt, UnsignedInt};
@@ -821,12 +820,8 @@ impl<'tcx> Ty<'tcx> {
/// does copies even when the type actually doesn't satisfy the
/// full requirements for the `Copy` trait (cc #29149) -- this
/// winds up being reported as an error during NLL borrow check.
- pub fn is_copy_modulo_regions(
- self,
- tcx_at: TyCtxtAt<'tcx>,
- param_env: ty::ParamEnv<'tcx>,
- ) -> bool {
- self.is_trivially_pure_clone_copy() || tcx_at.is_copy_raw(param_env.and(self))
+ pub fn is_copy_modulo_regions(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_pure_clone_copy() || tcx.is_copy_raw(param_env.and(self))
}
/// Checks whether values of this type `T` have a size known at
@@ -835,8 +830,8 @@ impl<'tcx> Ty<'tcx> {
/// over-approximation in generic contexts, where one can have
/// strange rules like `<T as Foo<'static>>::Bar: Sized` that
/// actually carry lifetime requirements.
- pub fn is_sized(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
- self.is_trivially_sized(tcx_at.tcx) || tcx_at.is_sized_raw(param_env.and(self))
+ pub fn is_sized(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_sized(tcx) || tcx.is_sized_raw(param_env.and(self))
}
/// Checks whether values of this type `T` implement the `Freeze`
@@ -846,8 +841,8 @@ impl<'tcx> Ty<'tcx> {
/// optimization as well as the rules around static values. Note
/// that the `Freeze` trait is not exposed to end users and is
/// effectively an implementation detail.
- pub fn is_freeze(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
- self.is_trivially_freeze() || tcx_at.is_freeze_raw(param_env.and(self))
+ pub fn is_freeze(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_freeze() || tcx.is_freeze_raw(param_env.and(self))
}
/// Fast path helper for testing if a type is `Freeze`.
@@ -886,8 +881,8 @@ impl<'tcx> Ty<'tcx> {
}
/// Checks whether values of this type `T` implement the `Unpin` trait.
- pub fn is_unpin(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
- self.is_trivially_unpin() || tcx_at.is_unpin_raw(param_env.and(self))
+ pub fn is_unpin(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool {
+ self.is_trivially_unpin() || tcx.is_unpin_raw(param_env.and(self))
}
/// Fast path helper for testing if a type is `Unpin`.
@@ -958,7 +953,7 @@ impl<'tcx> Ty<'tcx> {
}
}
- /// Checks if `ty` has has a significant drop.
+ /// Checks if `ty` has a significant drop.
///
/// Note that this method can return false even if `ty` has a destructor
/// attached; even if that is the case then the adt has been marked with
@@ -1289,12 +1284,24 @@ pub fn is_doc_hidden(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
.any(|items| items.iter().any(|item| item.has_name(sym::hidden)))
}
+/// Determines whether an item is annotated with `doc(notable_trait)`.
+pub fn is_doc_notable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
+ tcx.get_attrs(def_id, sym::doc)
+ .filter_map(|attr| attr.meta_item_list())
+ .any(|items| items.iter().any(|item| item.has_name(sym::notable_trait)))
+}
+
/// Determines whether an item is an intrinsic by Abi.
pub fn is_intrinsic(tcx: TyCtxt<'_>, def_id: DefId) -> bool {
matches!(tcx.fn_sig(def_id).abi(), Abi::RustIntrinsic | Abi::PlatformIntrinsic)
}
pub fn provide(providers: &mut ty::query::Providers) {
- *providers =
- ty::query::Providers { normalize_opaque_types, is_doc_hidden, is_intrinsic, ..*providers }
+ *providers = ty::query::Providers {
+ normalize_opaque_types,
+ is_doc_hidden,
+ is_doc_notable_trait,
+ is_intrinsic,
+ ..*providers
+ }
}
diff --git a/compiler/rustc_middle/src/ty/visit.rs b/compiler/rustc_middle/src/ty/visit.rs
index 5f8cb5782..c09f71f9a 100644
--- a/compiler/rustc_middle/src/ty/visit.rs
+++ b/compiler/rustc_middle/src/ty/visit.rs
@@ -10,8 +10,7 @@
//!
//! There are three groups of traits involved in each traversal.
//! - `TypeVisitable`. This is implemented once for many types, including:
-//! - Types of interest, for which the the methods delegate to the
-//! visitor.
+//! - Types of interest, for which the methods delegate to the visitor.
//! - All other types, including generic containers like `Vec` and `Option`.
//! It defines a "skeleton" of how they should be visited.
//! - `TypeSuperVisitable`. This is implemented only for each type of interest,
@@ -39,7 +38,6 @@
//! - ty.super_visit_with(visitor)
//! - u.visit_with(visitor)
//! ```
-use crate::mir;
use crate::ty::{self, flags::FlagComputation, Binder, Ty, TyCtxt, TypeFlags};
use rustc_errors::ErrorGuaranteed;
@@ -104,8 +102,8 @@ pub trait TypeVisitable<'tcx>: fmt::Debug + Clone {
None
}
}
- fn has_param_types_or_consts(&self) -> bool {
- self.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_CT_PARAM)
+ fn has_non_region_param(&self) -> bool {
+ self.has_type_flags(TypeFlags::NEEDS_SUBST - TypeFlags::HAS_RE_PARAM)
}
fn has_infer_regions(&self) -> bool {
self.has_type_flags(TypeFlags::HAS_RE_INFER)
@@ -113,8 +111,8 @@ pub trait TypeVisitable<'tcx>: fmt::Debug + Clone {
fn has_infer_types(&self) -> bool {
self.has_type_flags(TypeFlags::HAS_TY_INFER)
}
- fn has_infer_types_or_consts(&self) -> bool {
- self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_CT_INFER)
+ fn has_non_region_infer(&self) -> bool {
+ self.has_type_flags(TypeFlags::NEEDS_INFER - TypeFlags::HAS_RE_INFER)
}
fn needs_infer(&self) -> bool {
self.has_type_flags(TypeFlags::NEEDS_INFER)
@@ -199,17 +197,9 @@ pub trait TypeVisitor<'tcx>: Sized {
c.super_visit_with(self)
}
- fn visit_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ControlFlow<Self::BreakTy> {
- uv.super_visit_with(self)
- }
-
fn visit_predicate(&mut self, p: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
p.super_visit_with(self)
}
-
- fn visit_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> ControlFlow<Self::BreakTy> {
- c.super_visit_with(self)
- }
}
///////////////////////////////////////////////////////////////////////////
@@ -597,18 +587,6 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor {
#[inline]
#[instrument(level = "trace", ret)]
- fn visit_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ControlFlow<Self::BreakTy> {
- let flags = FlagComputation::for_unevaluated_const(uv);
- trace!(r.flags=?flags);
- if flags.intersects(self.flags) {
- ControlFlow::Break(FoundFlags)
- } else {
- ControlFlow::CONTINUE
- }
- }
-
- #[inline]
- #[instrument(level = "trace", ret)]
fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> {
debug!(
"HasTypeFlagsVisitor: predicate={:?} predicate.flags={:?} self.flags={:?}",
diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs
index a3e11bbf0..91db9698c 100644
--- a/compiler/rustc_middle/src/ty/walk.rs
+++ b/compiler/rustc_middle/src/ty/walk.rs
@@ -112,6 +112,22 @@ impl<'tcx> Ty<'tcx> {
}
}
+impl<'tcx> ty::Const<'tcx> {
+ /// Iterator that walks `self` and any types reachable from
+ /// `self`, in depth-first order. Note that just walks the types
+ /// that appear in `self`, it does not descend into the fields of
+ /// structs or variants. For example:
+ ///
+ /// ```text
+ /// isize => { isize }
+ /// Foo<Bar<isize>> => { Foo<Bar<isize>>, Bar<isize>, isize }
+ /// [isize] => { [isize], isize }
+ /// ```
+ pub fn walk(self) -> TypeWalker<'tcx> {
+ TypeWalker::new(self.into())
+ }
+}
+
/// We push `GenericArg`s on the stack in reverse order so as to
/// maintain a pre-order traversal. As of the time of this
/// writing, the fact that the traversal is pre-order is not
diff --git a/compiler/rustc_middle/src/values.rs b/compiler/rustc_middle/src/values.rs
index 7fbe9ae2a..f4b4c3fb0 100644
--- a/compiler/rustc_middle/src/values.rs
+++ b/compiler/rustc_middle/src/values.rs
@@ -1,8 +1,18 @@
-use rustc_middle::ty::{self, AdtSizedConstraint, Ty, TyCtxt};
+use rustc_data_structures::fx::FxHashSet;
+use rustc_errors::{pluralize, struct_span_err, Applicability, MultiSpan};
+use rustc_hir as hir;
+use rustc_hir::def::DefKind;
+use rustc_middle::ty::Representability;
+use rustc_middle::ty::{self, DefIdTree, Ty, TyCtxt};
+use rustc_query_system::query::QueryInfo;
use rustc_query_system::Value;
+use rustc_span::def_id::LocalDefId;
+use rustc_span::Span;
+
+use std::fmt::Write;
impl<'tcx> Value<TyCtxt<'tcx>> for Ty<'_> {
- fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo]) -> Self {
// SAFETY: This is never called when `Self` is not `Ty<'tcx>`.
// FIXME: Represent the above fact in the trait system somehow.
unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(tcx.ty_error()) }
@@ -10,7 +20,7 @@ impl<'tcx> Value<TyCtxt<'tcx>> for Ty<'_> {
}
impl<'tcx> Value<TyCtxt<'tcx>> for ty::SymbolName<'_> {
- fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo]) -> Self {
// SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`.
// FIXME: Represent the above fact in the trait system somehow.
unsafe {
@@ -21,20 +31,8 @@ impl<'tcx> Value<TyCtxt<'tcx>> for ty::SymbolName<'_> {
}
}
-impl<'tcx> Value<TyCtxt<'tcx>> for AdtSizedConstraint<'_> {
- fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
- // SAFETY: This is never called when `Self` is not `AdtSizedConstraint<'tcx>`.
- // FIXME: Represent the above fact in the trait system somehow.
- unsafe {
- std::mem::transmute::<AdtSizedConstraint<'tcx>, AdtSizedConstraint<'_>>(
- AdtSizedConstraint(tcx.intern_type_list(&[tcx.ty_error()])),
- )
- }
- }
-}
-
impl<'tcx> Value<TyCtxt<'tcx>> for ty::Binder<'_, ty::FnSig<'_>> {
- fn from_cycle_error(tcx: TyCtxt<'tcx>) -> Self {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo]) -> Self {
let err = tcx.ty_error();
// FIXME(compiler-errors): It would be nice if we could get the
// query key, so we could at least generate a fn signature that
@@ -52,3 +50,153 @@ impl<'tcx> Value<TyCtxt<'tcx>> for ty::Binder<'_, ty::FnSig<'_>> {
unsafe { std::mem::transmute::<ty::PolyFnSig<'tcx>, ty::Binder<'_, ty::FnSig<'_>>>(fn_sig) }
}
}
+
+impl<'tcx> Value<TyCtxt<'tcx>> for Representability {
+ fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo]) -> Self {
+ let mut item_and_field_ids = Vec::new();
+ let mut representable_ids = FxHashSet::default();
+ for info in cycle {
+ if info.query.name == "representability"
+ && let Some(field_id) = info.query.def_id
+ && let Some(field_id) = field_id.as_local()
+ && let Some(DefKind::Field) = info.query.def_kind
+ {
+ let parent_id = tcx.parent(field_id.to_def_id());
+ let item_id = match tcx.def_kind(parent_id) {
+ DefKind::Variant => tcx.parent(parent_id),
+ _ => parent_id,
+ };
+ item_and_field_ids.push((item_id.expect_local(), field_id));
+ }
+ }
+ for info in cycle {
+ if info.query.name == "representability_adt_ty"
+ && let Some(def_id) = info.query.ty_adt_id
+ && let Some(def_id) = def_id.as_local()
+ && !item_and_field_ids.iter().any(|&(id, _)| id == def_id)
+ {
+ representable_ids.insert(def_id);
+ }
+ }
+ recursive_type_error(tcx, item_and_field_ids, &representable_ids);
+ Representability::Infinite
+ }
+}
+
+// item_and_field_ids should form a cycle where each field contains the
+// type in the next element in the list
+pub fn recursive_type_error(
+ tcx: TyCtxt<'_>,
+ mut item_and_field_ids: Vec<(LocalDefId, LocalDefId)>,
+ representable_ids: &FxHashSet<LocalDefId>,
+) {
+ const ITEM_LIMIT: usize = 5;
+
+ // Rotate the cycle so that the item with the lowest span is first
+ let start_index = item_and_field_ids
+ .iter()
+ .enumerate()
+ .min_by_key(|&(_, &(id, _))| tcx.def_span(id))
+ .unwrap()
+ .0;
+ item_and_field_ids.rotate_left(start_index);
+
+ let cycle_len = item_and_field_ids.len();
+ let show_cycle_len = cycle_len.min(ITEM_LIMIT);
+
+ let mut err_span = MultiSpan::from_spans(
+ item_and_field_ids[..show_cycle_len]
+ .iter()
+ .map(|(id, _)| tcx.def_span(id.to_def_id()))
+ .collect(),
+ );
+ let mut suggestion = Vec::with_capacity(show_cycle_len * 2);
+ for i in 0..show_cycle_len {
+ let (_, field_id) = item_and_field_ids[i];
+ let (next_item_id, _) = item_and_field_ids[(i + 1) % cycle_len];
+ // Find the span(s) that contain the next item in the cycle
+ let hir_id = tcx.hir().local_def_id_to_hir_id(field_id);
+ let hir::Node::Field(field) = tcx.hir().get(hir_id) else { bug!("expected field") };
+ let mut found = Vec::new();
+ find_item_ty_spans(tcx, field.ty, next_item_id, &mut found, representable_ids);
+
+ // Couldn't find the type. Maybe it's behind a type alias?
+ // In any case, we'll just suggest boxing the whole field.
+ if found.is_empty() {
+ found.push(field.ty.span);
+ }
+
+ for span in found {
+ err_span.push_span_label(span, "recursive without indirection");
+ // FIXME(compiler-errors): This suggestion might be erroneous if Box is shadowed
+ suggestion.push((span.shrink_to_lo(), "Box<".to_string()));
+ suggestion.push((span.shrink_to_hi(), ">".to_string()));
+ }
+ }
+ let items_list = {
+ let mut s = String::new();
+ for (i, (item_id, _)) in item_and_field_ids.iter().enumerate() {
+ let path = tcx.def_path_str(item_id.to_def_id());
+ write!(&mut s, "`{path}`").unwrap();
+ if i == (ITEM_LIMIT - 1) && cycle_len > ITEM_LIMIT {
+ write!(&mut s, " and {} more", cycle_len - 5).unwrap();
+ break;
+ }
+ if cycle_len > 1 && i < cycle_len - 2 {
+ s.push_str(", ");
+ } else if cycle_len > 1 && i == cycle_len - 2 {
+ s.push_str(" and ")
+ }
+ }
+ s
+ };
+ let mut err = struct_span_err!(
+ tcx.sess,
+ err_span,
+ E0072,
+ "recursive type{} {} {} infinite size",
+ pluralize!(cycle_len),
+ items_list,
+ pluralize!("has", cycle_len),
+ );
+ err.multipart_suggestion(
+ "insert some indirection (e.g., a `Box`, `Rc`, or `&`) to break the cycle",
+ suggestion,
+ Applicability::HasPlaceholders,
+ );
+ err.emit();
+}
+
+fn find_item_ty_spans(
+ tcx: TyCtxt<'_>,
+ ty: &hir::Ty<'_>,
+ needle: LocalDefId,
+ spans: &mut Vec<Span>,
+ seen_representable: &FxHashSet<LocalDefId>,
+) {
+ match ty.kind {
+ hir::TyKind::Path(hir::QPath::Resolved(_, path)) => {
+ if let Some(def_id) = path.res.opt_def_id() {
+ let check_params = def_id.as_local().map_or(true, |def_id| {
+ if def_id == needle {
+ spans.push(ty.span);
+ }
+ seen_representable.contains(&def_id)
+ });
+ if check_params && let Some(args) = path.segments.last().unwrap().args {
+ let params_in_repr = tcx.params_in_repr(def_id);
+ for (i, arg) in args.args.iter().enumerate() {
+ if let hir::GenericArg::Type(ty) = arg && params_in_repr.contains(i as u32) {
+ find_item_ty_spans(tcx, ty, needle, spans, seen_representable);
+ }
+ }
+ }
+ }
+ }
+ hir::TyKind::Array(ty, _) => find_item_ty_spans(tcx, ty, needle, spans, seen_representable),
+ hir::TyKind::Tup(tys) => {
+ tys.iter().for_each(|ty| find_item_ty_spans(tcx, ty, needle, spans, seen_representable))
+ }
+ _ => {}
+ }
+}