diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:11:28 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:11:28 +0000 |
commit | 94a0819fe3a0d679c3042a77bfe6a2afc505daea (patch) | |
tree | 2b827afe6a05f3538db3f7803a88c4587fe85648 /compiler/rustc_middle | |
parent | Adding upstream version 1.64.0+dfsg1. (diff) | |
download | rustc-94a0819fe3a0d679c3042a77bfe6a2afc505daea.tar.xz rustc-94a0819fe3a0d679c3042a77bfe6a2afc505daea.zip |
Adding upstream version 1.66.0+dfsg1.upstream/1.66.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_middle')
90 files changed, 4195 insertions, 5998 deletions
diff --git a/compiler/rustc_middle/Cargo.toml b/compiler/rustc_middle/Cargo.toml index 008d2c709..de916ea8c 100644 --- a/compiler/rustc_middle/Cargo.toml +++ b/compiler/rustc_middle/Cargo.toml @@ -7,34 +7,33 @@ edition = "2021" doctest = false [dependencies] -rustc_arena = { path = "../rustc_arena" } bitflags = "1.2.1" +chalk-ir = "0.80.0" either = "1.5.0" gsgdt = "0.1.2" -tracing = "0.1" -rustc-rayon = { version = "0.4.0", optional = true } -rustc-rayon-core = { version = "0.4.0", optional = true } polonius-engine = "0.13.0" rustc_apfloat = { path = "../rustc_apfloat" } +rustc_arena = { path = "../rustc_arena" } +rustc_ast = { path = "../rustc_ast" } rustc_attr = { path = "../rustc_attr" } -rustc_feature = { path = "../rustc_feature" } -rustc_hir = { path = "../rustc_hir" } -rustc_target = { path = "../rustc_target" } -rustc_macros = { path = "../rustc_macros" } rustc_data_structures = { path = "../rustc_data_structures" } -rustc_query_system = { path = "../rustc_query_system" } rustc_errors = { path = "../rustc_errors" } +rustc_feature = { path = "../rustc_feature" } rustc_graphviz = { path = "../rustc_graphviz" } +rustc_hir = { path = "../rustc_hir" } rustc_index = { path = "../rustc_index" } +rustc_macros = { path = "../rustc_macros" } +rustc_query_system = { path = "../rustc_query_system" } +rustc-rayon-core = { version = "0.4.0", optional = true } +rustc-rayon = { version = "0.4.0", optional = true } rustc_serialize = { path = "../rustc_serialize" } -rustc_ast = { path = "../rustc_ast" } -rustc_span = { path = "../rustc_span" } -chalk-ir = "0.80.0" -smallvec = { version = "1.8.1", features = ["union", "may_dangle"] } rustc_session = { path = "../rustc_session" } +rustc_span = { path = "../rustc_span" } +rustc_target = { path = "../rustc_target" } rustc_type_ir = { path = "../rustc_type_ir" } -rand = "0.8.4" -rand_xoshiro = "0.6.0" +smallvec = { version = "1.8.1", features = ["union", "may_dangle"] } +thin-vec = "0.2.8" +tracing = "0.1" [features] rustc_use_parallel_compiler = ["rustc-rayon", "rustc-rayon-core"] diff --git a/compiler/rustc_middle/benches/lib.rs b/compiler/rustc_middle/benches/lib.rs deleted file mode 100644 index 237751bcb..000000000 --- a/compiler/rustc_middle/benches/lib.rs +++ /dev/null @@ -1,54 +0,0 @@ -#![feature(test)] - -extern crate test; - -use test::Bencher; - -// Static/dynamic method dispatch - -struct Struct { - field: isize, -} - -trait Trait { - fn method(&self) -> isize; -} - -impl Trait for Struct { - fn method(&self) -> isize { - self.field - } -} - -#[bench] -fn trait_vtable_method_call(b: &mut Bencher) { - let s = Struct { field: 10 }; - let t = &s as &dyn Trait; - b.iter(|| t.method()); -} - -#[bench] -fn trait_static_method_call(b: &mut Bencher) { - let s = Struct { field: 10 }; - b.iter(|| s.method()); -} - -// Overhead of various match forms - -#[bench] -fn option_some(b: &mut Bencher) { - let x = Some(10); - b.iter(|| match x { - Some(y) => y, - None => 11, - }); -} - -#[bench] -fn vec_pattern(b: &mut Bencher) { - let x = [1, 2, 3, 4, 5, 6]; - b.iter(|| match x { - [1, 2, 3, ..] => 10, - _ => 11, - }); -} diff --git a/compiler/rustc_middle/src/arena.rs b/compiler/rustc_middle/src/arena.rs index b94de537d..f8aae86fe 100644 --- a/compiler/rustc_middle/src/arena.rs +++ b/compiler/rustc_middle/src/arena.rs @@ -77,7 +77,7 @@ macro_rules! arena_types { rustc_middle::infer::canonical::QueryResponse<'tcx, rustc_middle::ty::Ty<'tcx>> >, [] all_traits: Vec<rustc_hir::def_id::DefId>, - [] privacy_access_levels: rustc_middle::middle::privacy::AccessLevels, + [] effective_visibilities: rustc_middle::middle::privacy::EffectiveVisibilities, [] foreign_module: rustc_session::cstore::ForeignModule, [] foreign_modules: Vec<rustc_session::cstore::ForeignModule>, [] upvars_mentioned: rustc_data_structures::fx::FxIndexMap<rustc_hir::HirId, rustc_hir::Upvar>, @@ -96,11 +96,14 @@ macro_rules! arena_types { // since we need to allocate this type on both the `rustc_hir` arena // (during lowering) and the `librustc_middle` arena (for decoding MIR) [decode] asm_template: rustc_ast::InlineAsmTemplatePiece, - [decode] used_trait_imports: rustc_data_structures::fx::FxHashSet<rustc_hir::def_id::LocalDefId>, + [decode] used_trait_imports: rustc_data_structures::unord::UnordSet<rustc_hir::def_id::LocalDefId>, [decode] is_late_bound_map: rustc_data_structures::fx::FxIndexSet<rustc_hir::def_id::LocalDefId>, [decode] impl_source: rustc_middle::traits::ImplSource<'tcx, ()>, - [] dep_kind: rustc_middle::dep_graph::DepKindStruct, + [] dep_kind: rustc_middle::dep_graph::DepKindStruct<'tcx>, + + [decode] trait_impl_trait_tys: rustc_data_structures::fx::FxHashMap<rustc_hir::def_id::DefId, rustc_middle::ty::Ty<'tcx>>, + [] bit_set_u32: rustc_index::bit_set::BitSet<u32>, ]); ) } diff --git a/compiler/rustc_middle/src/dep_graph/dep_node.rs b/compiler/rustc_middle/src/dep_graph/dep_node.rs index 2d095438f..6b5568269 100644 --- a/compiler/rustc_middle/src/dep_graph/dep_node.rs +++ b/compiler/rustc_middle/src/dep_graph/dep_node.rs @@ -62,93 +62,18 @@ use crate::ty::TyCtxt; use rustc_data_structures::fingerprint::Fingerprint; use rustc_hir::def_id::{CrateNum, DefId, LocalDefId}; use rustc_hir::definitions::DefPathHash; -use rustc_hir::HirId; +use rustc_hir::{HirId, ItemLocalId, OwnerId}; use rustc_query_system::dep_graph::FingerprintStyle; use rustc_span::symbol::Symbol; use std::hash::Hash; pub use rustc_query_system::dep_graph::{DepContext, DepNodeParams}; -/// This struct stores metadata about each DepKind. -/// -/// Information is retrieved by indexing the `DEP_KINDS` array using the integer value -/// of the `DepKind`. Overall, this allows to implement `DepContext` using this manual -/// jump table instead of large matches. -pub struct DepKindStruct { - /// Anonymous queries cannot be replayed from one compiler invocation to the next. - /// When their result is needed, it is recomputed. They are useful for fine-grained - /// dependency tracking, and caching within one compiler invocation. - pub is_anon: bool, - - /// Eval-always queries do not track their dependencies, and are always recomputed, even if - /// their inputs have not changed since the last compiler invocation. The result is still - /// cached within one compiler invocation. - pub is_eval_always: bool, - - /// Whether the query key can be recovered from the hashed fingerprint. - /// See [DepNodeParams] trait for the behaviour of each key type. - pub fingerprint_style: FingerprintStyle, - - /// The red/green evaluation system will try to mark a specific DepNode in the - /// dependency graph as green by recursively trying to mark the dependencies of - /// that `DepNode` as green. While doing so, it will sometimes encounter a `DepNode` - /// where we don't know if it is red or green and we therefore actually have - /// to recompute its value in order to find out. Since the only piece of - /// information that we have at that point is the `DepNode` we are trying to - /// re-evaluate, we need some way to re-run a query from just that. This is what - /// `force_from_dep_node()` implements. - /// - /// In the general case, a `DepNode` consists of a `DepKind` and an opaque - /// GUID/fingerprint that will uniquely identify the node. This GUID/fingerprint - /// is usually constructed by computing a stable hash of the query-key that the - /// `DepNode` corresponds to. Consequently, it is not in general possible to go - /// back from hash to query-key (since hash functions are not reversible). For - /// this reason `force_from_dep_node()` is expected to fail from time to time - /// because we just cannot find out, from the `DepNode` alone, what the - /// corresponding query-key is and therefore cannot re-run the query. - /// - /// The system deals with this case letting `try_mark_green` fail which forces - /// the root query to be re-evaluated. - /// - /// Now, if `force_from_dep_node()` would always fail, it would be pretty useless. - /// Fortunately, we can use some contextual information that will allow us to - /// reconstruct query-keys for certain kinds of `DepNode`s. In particular, we - /// enforce by construction that the GUID/fingerprint of certain `DepNode`s is a - /// valid `DefPathHash`. Since we also always build a huge table that maps every - /// `DefPathHash` in the current codebase to the corresponding `DefId`, we have - /// everything we need to re-run the query. - /// - /// Take the `mir_promoted` query as an example. Like many other queries, it - /// just has a single parameter: the `DefId` of the item it will compute the - /// validated MIR for. Now, when we call `force_from_dep_node()` on a `DepNode` - /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode` - /// is actually a `DefPathHash`, and can therefore just look up the corresponding - /// `DefId` in `tcx.def_path_hash_to_def_id`. - pub force_from_dep_node: Option<fn(tcx: TyCtxt<'_>, dep_node: DepNode) -> bool>, - - /// Invoke a query to put the on-disk cached value in memory. - pub try_load_from_on_disk_cache: Option<fn(TyCtxt<'_>, DepNode)>, -} - -impl DepKind { - #[inline(always)] - pub fn fingerprint_style(self, tcx: TyCtxt<'_>) -> FingerprintStyle { - // Only fetch the DepKindStruct once. - let data = tcx.query_kind(self); - if data.is_anon { - return FingerprintStyle::Opaque; - } - data.fingerprint_style - } -} - macro_rules! define_dep_nodes { - (<$tcx:tt> - $( - [$($attrs:tt)*] - $variant:ident $(( $tuple_arg_ty:ty $(,)? ))* - ,)* - ) => ( + ( + $($(#[$attr:meta])* + [$($modifiers:tt)*] fn $variant:ident($($K:tt)*) -> $V:ty,)*) => { + #[macro_export] macro_rules! make_dep_kind_array { ($mod:ident) => {[ $($mod::$variant()),* ]}; @@ -158,10 +83,10 @@ macro_rules! define_dep_nodes { #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Encodable, Decodable)] #[allow(non_camel_case_types)] pub enum DepKind { - $($variant),* + $( $( #[$attr] )* $variant),* } - fn dep_kind_from_label_string(label: &str) -> Result<DepKind, ()> { + pub(super) fn dep_kind_from_label_string(label: &str) -> Result<DepKind, ()> { match label { $(stringify!($variant) => Ok(DepKind::$variant),)* _ => Err(()), @@ -176,24 +101,17 @@ macro_rules! define_dep_nodes { pub const $variant: &str = stringify!($variant); )* } - ); + }; } -rustc_dep_node_append!([define_dep_nodes!][ <'tcx> - // We use this for most things when incr. comp. is turned off. - [] Null, - - // We use this to create a forever-red node. - [] Red, - - [anon] TraitSelect, - - // WARNING: if `Symbol` is changed, make sure you update `make_compile_codegen_unit` below. - [] CompileCodegenUnit(Symbol), - - // WARNING: if `MonoItem` is changed, make sure you update `make_compile_mono_item` below. - // Only used by rustc_codegen_cranelift - [] CompileMonoItem(MonoItem), +rustc_query_append!(define_dep_nodes![ + /// We use this for most things when incr. comp. is turned off. + [] fn Null() -> (), + /// We use this to create a forever-red node. + [] fn Red() -> (), + [] fn TraitSelect() -> (), + [] fn CompileCodegenUnit() -> (), + [] fn CompileMonoItem() -> (), ]); // WARNING: `construct` is generic and does not know that `CompileCodegenUnit` takes `Symbol`s as keys. @@ -223,11 +141,6 @@ static_assert_size!(DepNode, 18); static_assert_size!(DepNode, 24); pub trait DepNodeExt: Sized { - /// Construct a DepNode from the given DepKind and DefPathHash. This - /// method will assert that the given DepKind actually requires a - /// single DefId/DefPathHash parameter. - fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> Self; - /// Extracts the DefId corresponding to this DepNode. This will work /// if two conditions are met: /// @@ -252,14 +165,6 @@ pub trait DepNodeExt: Sized { } impl DepNodeExt for DepNode { - /// Construct a DepNode from the given DepKind and DefPathHash. This - /// method will assert that the given DepKind actually requires a - /// single DefId/DefPathHash parameter. - fn from_def_path_hash(tcx: TyCtxt<'_>, def_path_hash: DefPathHash, kind: DepKind) -> DepNode { - debug_assert!(kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash); - DepNode { kind, hash: def_path_hash.0.into() } - } - /// Extracts the DefId corresponding to this DepNode. This will work /// if two conditions are met: /// @@ -271,7 +176,7 @@ impl DepNodeExt for DepNode { /// refers to something from the previous compilation session that /// has been removed. fn extract_def_id<'tcx>(&self, tcx: TyCtxt<'tcx>) -> Option<DefId> { - if self.kind.fingerprint_style(tcx) == FingerprintStyle::DefPathHash { + if tcx.fingerprint_style(self.kind) == FingerprintStyle::DefPathHash { Some(tcx.def_path_hash_to_def_id(DefPathHash(self.hash.into()), &mut || { panic!("Failed to extract DefId: {:?} {}", self.kind, self.hash) })) @@ -288,8 +193,8 @@ impl DepNodeExt for DepNode { ) -> Result<DepNode, ()> { let kind = dep_kind_from_label_string(label)?; - match kind.fingerprint_style(tcx) { - FingerprintStyle::Opaque => Err(()), + match tcx.fingerprint_style(kind) { + FingerprintStyle::Opaque | FingerprintStyle::HirId => Err(()), FingerprintStyle::Unit => Ok(DepNode::new_no_params(tcx, kind)), FingerprintStyle::DefPathHash => { Ok(DepNode::from_def_path_hash(tcx, def_path_hash, kind)) @@ -364,6 +269,28 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for LocalDefId { } } +impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for OwnerId { + #[inline(always)] + fn fingerprint_style() -> FingerprintStyle { + FingerprintStyle::DefPathHash + } + + #[inline(always)] + fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint { + self.to_def_id().to_fingerprint(tcx) + } + + #[inline(always)] + fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String { + self.to_def_id().to_debug_str(tcx) + } + + #[inline(always)] + fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> { + dep_node.extract_def_id(tcx).map(|id| OwnerId { def_id: id.expect_local() }) + } +} + impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for CrateNum { #[inline(always)] fn fingerprint_style() -> FingerprintStyle { @@ -417,7 +344,7 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for (DefId, DefId) { impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId { #[inline(always)] fn fingerprint_style() -> FingerprintStyle { - FingerprintStyle::Opaque + FingerprintStyle::HirId } // We actually would not need to specialize the implementation of this @@ -426,10 +353,36 @@ impl<'tcx> DepNodeParams<TyCtxt<'tcx>> for HirId { #[inline(always)] fn to_fingerprint(&self, tcx: TyCtxt<'tcx>) -> Fingerprint { let HirId { owner, local_id } = *self; - let def_path_hash = tcx.def_path_hash(owner.to_def_id()); - let local_id = Fingerprint::from_smaller_hash(local_id.as_u32().into()); + Fingerprint::new( + // `owner` is local, so is completely defined by the local hash + def_path_hash.local_hash(), + local_id.as_u32().into(), + ) + } - def_path_hash.0.combine(local_id) + #[inline(always)] + fn to_debug_str(&self, tcx: TyCtxt<'tcx>) -> String { + let HirId { owner, local_id } = *self; + format!("{}.{}", tcx.def_path_str(owner.to_def_id()), local_id.as_u32()) + } + + #[inline(always)] + fn recover(tcx: TyCtxt<'tcx>, dep_node: &DepNode) -> Option<Self> { + if tcx.fingerprint_style(dep_node.kind) == FingerprintStyle::HirId { + let (local_hash, local_id) = Fingerprint::from(dep_node.hash).as_value(); + let def_path_hash = DefPathHash::new(tcx.sess.local_stable_crate_id(), local_hash); + let def_id = tcx + .def_path_hash_to_def_id(def_path_hash, &mut || { + panic!("Failed to extract HirId: {:?} {}", dep_node.kind, dep_node.hash) + }) + .expect_local(); + let local_id = local_id + .try_into() + .unwrap_or_else(|_| panic!("local id should be u32, found {:?}", local_id)); + Some(HirId { owner: OwnerId { def_id }, local_id: ItemLocalId::from_u32(local_id) }) + } else { + None + } } } diff --git a/compiler/rustc_middle/src/dep_graph/mod.rs b/compiler/rustc_middle/src/dep_graph/mod.rs index c8b3b52b0..2e62bebc8 100644 --- a/compiler/rustc_middle/src/dep_graph/mod.rs +++ b/compiler/rustc_middle/src/dep_graph/mod.rs @@ -11,15 +11,17 @@ pub use rustc_query_system::dep_graph::{ SerializedDepNodeIndex, WorkProduct, WorkProductId, }; -pub use dep_node::{label_strs, DepKind, DepKindStruct, DepNode, DepNodeExt}; +pub use dep_node::{label_strs, DepKind, DepNode, DepNodeExt}; pub(crate) use dep_node::{make_compile_codegen_unit, make_compile_mono_item}; pub type DepGraph = rustc_query_system::dep_graph::DepGraph<DepKind>; + pub type TaskDeps = rustc_query_system::dep_graph::TaskDeps<DepKind>; pub type TaskDepsRef<'a> = rustc_query_system::dep_graph::TaskDepsRef<'a, DepKind>; pub type DepGraphQuery = rustc_query_system::dep_graph::DepGraphQuery<DepKind>; pub type SerializedDepGraph = rustc_query_system::dep_graph::SerializedDepGraph<DepKind>; pub type EdgeFilter = rustc_query_system::dep_graph::debug::EdgeFilter<DepKind>; +pub type DepKindStruct<'tcx> = rustc_query_system::dep_graph::DepKindStruct<TyCtxt<'tcx>>; impl rustc_query_system::dep_graph::DepKind for DepKind { const NULL: Self = DepKind::Null; @@ -91,50 +93,8 @@ impl<'tcx> DepContext for TyCtxt<'tcx> { self.sess } - #[inline(always)] - fn fingerprint_style(&self, kind: DepKind) -> rustc_query_system::dep_graph::FingerprintStyle { - kind.fingerprint_style(*self) - } - - #[inline(always)] - fn is_eval_always(&self, kind: DepKind) -> bool { - self.query_kind(kind).is_eval_always - } - - fn try_force_from_dep_node(&self, dep_node: DepNode) -> bool { - debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node); - - // We must avoid ever having to call `force_from_dep_node()` for a - // `DepNode::codegen_unit`: - // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we - // would always end up having to evaluate the first caller of the - // `codegen_unit` query that *is* reconstructible. This might very well be - // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just - // to re-trigger calling the `codegen_unit` query with the right key. At - // that point we would already have re-done all the work we are trying to - // avoid doing in the first place. - // The solution is simple: Just explicitly call the `codegen_unit` query for - // each CGU, right after partitioning. This way `try_mark_green` will always - // hit the cache instead of having to go through `force_from_dep_node`. - // This assertion makes sure, we actually keep applying the solution above. - debug_assert!( - dep_node.kind != DepKind::codegen_unit, - "calling force_from_dep_node() on DepKind::codegen_unit" - ); - - let cb = self.query_kind(dep_node.kind); - if let Some(f) = cb.force_from_dep_node { - f(*self, dep_node); - true - } else { - false - } - } - - fn try_load_from_on_disk_cache(&self, dep_node: DepNode) { - let cb = self.query_kind(dep_node.kind); - if let Some(f) = cb.try_load_from_on_disk_cache { - f(*self, dep_node) - } + #[inline] + fn dep_kind_info(&self, dep_kind: DepKind) -> &DepKindStruct<'tcx> { + &self.query_kinds[dep_kind as usize] } } diff --git a/compiler/rustc_middle/src/error.rs b/compiler/rustc_middle/src/error.rs new file mode 100644 index 000000000..a7a7ac059 --- /dev/null +++ b/compiler/rustc_middle/src/error.rs @@ -0,0 +1,57 @@ +use rustc_macros::Diagnostic; +use rustc_span::Span; + +use crate::ty::Ty; + +#[derive(Diagnostic)] +#[diag(middle_drop_check_overflow, code = "E0320")] +#[note] +pub struct DropCheckOverflow<'tcx> { + #[primary_span] + pub span: Span, + pub ty: Ty<'tcx>, + pub overflow_ty: Ty<'tcx>, +} + +#[derive(Diagnostic)] +#[diag(middle_opaque_hidden_type_mismatch)] +pub struct OpaqueHiddenTypeMismatch<'tcx> { + pub self_ty: Ty<'tcx>, + pub other_ty: Ty<'tcx>, + #[primary_span] + #[label] + pub other_span: Span, + #[subdiagnostic] + pub sub: TypeMismatchReason, +} + +#[derive(Subdiagnostic)] +pub enum TypeMismatchReason { + #[label(middle_conflict_types)] + ConflictType { + #[primary_span] + span: Span, + }, + #[note(middle_previous_use_here)] + PreviousUse { + #[primary_span] + span: Span, + }, +} + +#[derive(Diagnostic)] +#[diag(middle_limit_invalid)] +pub struct LimitInvalid<'a> { + #[primary_span] + pub span: Span, + #[label] + pub value_span: Span, + pub error_str: &'a str, +} + +#[derive(Diagnostic)] +#[diag(middle_const_eval_non_int)] +pub struct ConstEvalNonIntError { + #[primary_span] + pub span: Span, +} diff --git a/compiler/rustc_middle/src/hir/map/mod.rs b/compiler/rustc_middle/src/hir/map/mod.rs index 47b04c33e..83a4d16d7 100644 --- a/compiler/rustc_middle/src/hir/map/mod.rs +++ b/compiler/rustc_middle/src/hir/map/mod.rs @@ -14,31 +14,9 @@ use rustc_index::vec::Idx; use rustc_middle::hir::nested_filter; use rustc_span::def_id::StableCrateId; use rustc_span::symbol::{kw, sym, Ident, Symbol}; -use rustc_span::Span; +use rustc_span::{Span, DUMMY_SP}; use rustc_target::spec::abi::Abi; -fn fn_decl<'hir>(node: Node<'hir>) -> Option<&'hir FnDecl<'hir>> { - match node { - Node::Item(Item { kind: ItemKind::Fn(sig, _, _), .. }) - | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, _), .. }) - | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, _), .. }) => Some(&sig.decl), - Node::Expr(Expr { kind: ExprKind::Closure(Closure { fn_decl, .. }), .. }) - | Node::ForeignItem(ForeignItem { kind: ForeignItemKind::Fn(fn_decl, ..), .. }) => { - Some(fn_decl) - } - _ => None, - } -} - -pub fn fn_sig<'hir>(node: Node<'hir>) -> Option<&'hir FnSig<'hir>> { - match &node { - Node::Item(Item { kind: ItemKind::Fn(sig, _, _), .. }) - | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, _), .. }) - | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, _), .. }) => Some(sig), - _ => None, - } -} - #[inline] pub fn associated_body<'hir>(node: Node<'hir>) -> Option<BodyId> { match node { @@ -83,7 +61,7 @@ pub struct ParentHirIterator<'hir> { } impl<'hir> Iterator for ParentHirIterator<'hir> { - type Item = (HirId, Node<'hir>); + type Item = HirId; fn next(&mut self) -> Option<Self::Item> { if self.current_id == CRATE_HIR_ID { @@ -99,10 +77,7 @@ impl<'hir> Iterator for ParentHirIterator<'hir> { } self.current_id = parent_id; - if let Some(node) = self.map.find(parent_id) { - return Some((parent_id, node)); - } - // If this `HirId` doesn't have an entry, skip it and look for its `parent_id`. + return Some(parent_id); } } } @@ -115,7 +90,7 @@ pub struct ParentOwnerIterator<'hir> { } impl<'hir> Iterator for ParentOwnerIterator<'hir> { - type Item = (LocalDefId, OwnerNode<'hir>); + type Item = (OwnerId, OwnerNode<'hir>); fn next(&mut self) -> Option<Self::Item> { if self.current_id.local_id.index() != 0 { @@ -129,13 +104,13 @@ impl<'hir> Iterator for ParentOwnerIterator<'hir> { } loop { // There are nodes that do not have entries, so we need to skip them. - let parent_id = self.map.def_key(self.current_id.owner).parent; + let parent_id = self.map.def_key(self.current_id.owner.def_id).parent; - let parent_id = parent_id.map_or(CRATE_HIR_ID.owner, |local_def_index| { + let parent_id = parent_id.map_or(CRATE_OWNER_ID, |local_def_index| { let def_id = LocalDefId { local_def_index }; self.map.local_def_id_to_hir_id(def_id).owner }); - self.current_id = HirId::make_owner(parent_id); + self.current_id = HirId::make_owner(parent_id.def_id); // If this `HirId` doesn't have an entry, skip it and look for its `parent_id`. if let Some(node) = self.map.tcx.hir_owner(self.current_id.owner) { @@ -146,25 +121,30 @@ impl<'hir> Iterator for ParentOwnerIterator<'hir> { } impl<'hir> Map<'hir> { + #[inline] pub fn krate(self) -> &'hir Crate<'hir> { self.tcx.hir_crate(()) } + #[inline] pub fn root_module(self) -> &'hir Mod<'hir> { - match self.tcx.hir_owner(CRATE_DEF_ID).map(|o| o.node) { + match self.tcx.hir_owner(CRATE_OWNER_ID).map(|o| o.node) { Some(OwnerNode::Crate(item)) => item, _ => bug!(), } } + #[inline] pub fn items(self) -> impl Iterator<Item = ItemId> + 'hir { self.tcx.hir_crate_items(()).items.iter().copied() } + #[inline] pub fn module_items(self, module: LocalDefId) -> impl Iterator<Item = ItemId> + 'hir { self.tcx.hir_module_items(module).items() } + #[inline] pub fn par_for_each_item(self, f: impl Fn(ItemId) + Sync + Send) { par_for_each_in(&self.tcx.hir_crate_items(()).items[..], |id| f(*id)); } @@ -203,7 +183,7 @@ impl<'hir> Map<'hir> { #[inline] pub fn opt_local_def_id(self, hir_id: HirId) -> Option<LocalDefId> { if hir_id.local_id == ItemLocalId::new(0) { - Some(hir_id.owner) + Some(hir_id.owner.def_id) } else { self.tcx .hir_owner_nodes(hir_id.owner) @@ -229,7 +209,13 @@ impl<'hir> Map<'hir> { ItemKind::Fn(..) => DefKind::Fn, ItemKind::Macro(_, macro_kind) => DefKind::Macro(macro_kind), ItemKind::Mod(..) => DefKind::Mod, - ItemKind::OpaqueTy(..) => DefKind::OpaqueTy, + ItemKind::OpaqueTy(ref opaque) => { + if opaque.in_trait { + DefKind::ImplTraitPlaceholder + } else { + DefKind::OpaqueTy + } + } ItemKind::TyAlias(..) => DefKind::TyAlias, ItemKind::Enum(..) => DefKind::Enum, ItemKind::Struct(..) => DefKind::Struct, @@ -255,7 +241,7 @@ impl<'hir> Map<'hir> { Node::ImplItem(item) => match item.kind { ImplItemKind::Const(..) => DefKind::AssocConst, ImplItemKind::Fn(..) => DefKind::AssocFn, - ImplItemKind::TyAlias(..) => DefKind::AssocTy, + ImplItemKind::Type(..) => DefKind::AssocTy, }, Node::Variant(_) => DefKind::Variant, Node::Ctor(variant_data) => { @@ -297,6 +283,8 @@ impl<'hir> Map<'hir> { | Node::Infer(_) | Node::TraitRef(_) | Node::Pat(_) + | Node::PatField(_) + | Node::ExprField(_) | Node::Local(_) | Node::Param(_) | Node::Arm(_) @@ -306,6 +294,9 @@ impl<'hir> Map<'hir> { Some(def_kind) } + /// Finds the id of the parent node to this one. + /// + /// If calling repeatedly and iterating over parents, prefer [`Map::parent_iter`]. pub fn find_parent_node(self, id: HirId) -> Option<HirId> { if id.local_id == ItemLocalId::from_u32(0) { Some(self.tcx.hir_owner_parent(id.owner)) @@ -313,6 +304,8 @@ impl<'hir> Map<'hir> { let owner = self.tcx.hir_owner_nodes(id.owner).as_owner()?; let node = owner.nodes[id.local_id].as_ref()?; let hir_id = HirId { owner: id.owner, local_id: node.parent }; + // HIR indexing should have checked that. + debug_assert_ne!(id.local_id, node.parent); Some(hir_id) } } @@ -356,24 +349,24 @@ impl<'hir> Map<'hir> { } pub fn get_generics(self, id: LocalDefId) -> Option<&'hir Generics<'hir>> { - let node = self.tcx.hir_owner(id)?; + let node = self.tcx.hir_owner(OwnerId { def_id: id })?; node.node.generics() } pub fn item(self, id: ItemId) -> &'hir Item<'hir> { - self.tcx.hir_owner(id.def_id).unwrap().node.expect_item() + self.tcx.hir_owner(id.owner_id).unwrap().node.expect_item() } pub fn trait_item(self, id: TraitItemId) -> &'hir TraitItem<'hir> { - self.tcx.hir_owner(id.def_id).unwrap().node.expect_trait_item() + self.tcx.hir_owner(id.owner_id).unwrap().node.expect_trait_item() } pub fn impl_item(self, id: ImplItemId) -> &'hir ImplItem<'hir> { - self.tcx.hir_owner(id.def_id).unwrap().node.expect_impl_item() + self.tcx.hir_owner(id.owner_id).unwrap().node.expect_impl_item() } pub fn foreign_item(self, id: ForeignItemId) -> &'hir ForeignItem<'hir> { - self.tcx.hir_owner(id.def_id).unwrap().node.expect_foreign_item() + self.tcx.hir_owner(id.owner_id).unwrap().node.expect_foreign_item() } pub fn body(self, id: BodyId) -> &'hir Body<'hir> { @@ -382,7 +375,7 @@ impl<'hir> Map<'hir> { pub fn fn_decl_by_hir_id(self, hir_id: HirId) -> Option<&'hir FnDecl<'hir>> { if let Some(node) = self.find(hir_id) { - fn_decl(node) + node.fn_decl() } else { bug!("no node for hir_id `{}`", hir_id) } @@ -390,15 +383,15 @@ impl<'hir> Map<'hir> { pub fn fn_sig_by_hir_id(self, hir_id: HirId) -> Option<&'hir FnSig<'hir>> { if let Some(node) = self.find(hir_id) { - fn_sig(node) + node.fn_sig() } else { bug!("no node for hir_id `{}`", hir_id) } } pub fn enclosing_body_owner(self, hir_id: HirId) -> LocalDefId { - for (parent, _) in self.parent_iter(hir_id) { - if let Some(body) = self.find(parent).map(associated_body).flatten() { + for (_, node) in self.parent_iter(hir_id) { + if let Some(body) = associated_body(node) { return self.body_owner_def_id(body); } } @@ -487,11 +480,13 @@ impl<'hir> Map<'hir> { /// Returns an iterator of the `DefId`s for all body-owners in this /// crate. If you would prefer to iterate over the bodies /// themselves, you can do `self.hir().krate().body_ids.iter()`. + #[inline] pub fn body_owners(self) -> impl Iterator<Item = LocalDefId> + 'hir { self.tcx.hir_crate_items(()).body_owners.iter().copied() } - pub fn par_body_owners<F: Fn(LocalDefId) + Sync + Send>(self, f: F) { + #[inline] + pub fn par_body_owners(self, f: impl Fn(LocalDefId) + Sync + Send) { par_for_each_in(&self.tcx.hir_crate_items(()).body_owners[..], |&def_id| f(def_id)); } @@ -499,7 +494,9 @@ impl<'hir> Map<'hir> { let def_kind = self.tcx.def_kind(def_id); match def_kind { DefKind::Trait | DefKind::TraitAlias => def_id, - DefKind::TyParam | DefKind::ConstParam => self.tcx.local_parent(def_id), + DefKind::LifetimeParam | DefKind::TyParam | DefKind::ConstParam => { + self.tcx.local_parent(def_id) + } _ => bug!("ty_param_owner: {:?} is a {:?} not a type parameter", def_id, def_kind), } } @@ -508,7 +505,9 @@ impl<'hir> Map<'hir> { let def_kind = self.tcx.def_kind(def_id); match def_kind { DefKind::Trait | DefKind::TraitAlias => kw::SelfUpper, - DefKind::TyParam | DefKind::ConstParam => self.tcx.item_name(def_id.to_def_id()), + DefKind::LifetimeParam | DefKind::TyParam | DefKind::ConstParam => { + self.tcx.item_name(def_id.to_def_id()) + } _ => bug!("ty_param_name: {:?} is a {:?} not a type parameter", def_id, def_kind), } } @@ -530,7 +529,7 @@ impl<'hir> Map<'hir> { pub fn get_module(self, module: LocalDefId) -> (&'hir Mod<'hir>, Span, HirId) { let hir_id = HirId::make_owner(module); - match self.tcx.hir_owner(module).map(|o| o.node) { + match self.tcx.hir_owner(hir_id.owner).map(|o| o.node) { Some(OwnerNode::Item(&Item { span, kind: ItemKind::Mod(ref m), .. })) => { (m, span, hir_id) } @@ -620,39 +619,33 @@ impl<'hir> Map<'hir> { pub fn for_each_module(self, mut f: impl FnMut(LocalDefId)) { let crate_items = self.tcx.hir_crate_items(()); for module in crate_items.submodules.iter() { - f(*module) + f(module.def_id) } } - #[cfg(not(parallel_compiler))] #[inline] - pub fn par_for_each_module(self, f: impl Fn(LocalDefId)) { - self.for_each_module(f) - } - - #[cfg(parallel_compiler)] - pub fn par_for_each_module(self, f: impl Fn(LocalDefId) + Sync) { - use rustc_data_structures::sync::{par_iter, ParallelIterator}; - par_iter_submodules(self.tcx, CRATE_DEF_ID, &f); - - fn par_iter_submodules<F>(tcx: TyCtxt<'_>, module: LocalDefId, f: &F) - where - F: Fn(LocalDefId) + Sync, - { - (*f)(module); - let items = tcx.hir_module_items(module); - par_iter(&items.submodules[..]).for_each(|&sm| par_iter_submodules(tcx, sm, f)); - } + pub fn par_for_each_module(self, f: impl Fn(LocalDefId) + Sync + Send) { + let crate_items = self.tcx.hir_crate_items(()); + par_for_each_in(&crate_items.submodules[..], |module| f(module.def_id)) } /// Returns an iterator for the nodes in the ancestor tree of the `current_id` /// until the crate root is reached. Prefer this over your own loop using `get_parent_node`. - pub fn parent_iter(self, current_id: HirId) -> ParentHirIterator<'hir> { + #[inline] + pub fn parent_id_iter(self, current_id: HirId) -> impl Iterator<Item = HirId> + 'hir { ParentHirIterator { current_id, map: self } } /// Returns an iterator for the nodes in the ancestor tree of the `current_id` /// until the crate root is reached. Prefer this over your own loop using `get_parent_node`. + #[inline] + pub fn parent_iter(self, current_id: HirId) -> impl Iterator<Item = (HirId, Node<'hir>)> { + self.parent_id_iter(current_id).filter_map(move |id| Some((id, self.find(id)?))) + } + + /// Returns an iterator for the nodes in the ancestor tree of the `current_id` + /// until the crate root is reached. Prefer this over your own loop using `get_parent_node`. + #[inline] pub fn parent_owner_iter(self, current_id: HirId) -> ParentOwnerIterator<'hir> { ParentOwnerIterator { current_id, map: self } } @@ -732,27 +725,27 @@ impl<'hir> Map<'hir> { None } - /// Retrieves the `HirId` for `id`'s parent item, or `id` itself if no + /// Retrieves the `OwnerId` for `id`'s parent item, or `id` itself if no /// parent item is in this map. The "parent item" is the closest parent node /// in the HIR which is recorded by the map and is an item, either an item /// in a module, trait, or impl. - pub fn get_parent_item(self, hir_id: HirId) -> LocalDefId { + pub fn get_parent_item(self, hir_id: HirId) -> OwnerId { if let Some((def_id, _node)) = self.parent_owner_iter(hir_id).next() { def_id } else { - CRATE_DEF_ID + CRATE_OWNER_ID } } - /// Returns the `HirId` of `id`'s nearest module parent, or `id` itself if no + /// Returns the `OwnerId` of `id`'s nearest module parent, or `id` itself if no /// module parent is in this map. - pub(super) fn get_module_parent_node(self, hir_id: HirId) -> LocalDefId { + pub(super) fn get_module_parent_node(self, hir_id: HirId) -> OwnerId { for (def_id, node) in self.parent_owner_iter(hir_id) { if let OwnerNode::Item(&Item { kind: ItemKind::Mod(_), .. }) = node { return def_id; } } - CRATE_DEF_ID + CRATE_OWNER_ID } /// When on an if expression, a match arm tail expression or a match arm, give back @@ -825,30 +818,30 @@ impl<'hir> Map<'hir> { } bug!( "expected foreign mod or inlined parent, found {}", - self.node_to_string(HirId::make_owner(parent)) + self.node_to_string(HirId::make_owner(parent.def_id)) ) } - pub fn expect_owner(self, id: LocalDefId) -> OwnerNode<'hir> { + pub fn expect_owner(self, id: OwnerId) -> OwnerNode<'hir> { self.tcx.hir_owner(id).unwrap_or_else(|| bug!("expected owner for {:?}", id)).node } pub fn expect_item(self, id: LocalDefId) -> &'hir Item<'hir> { - match self.tcx.hir_owner(id) { + match self.tcx.hir_owner(OwnerId { def_id: id }) { Some(Owner { node: OwnerNode::Item(item), .. }) => item, _ => bug!("expected item, found {}", self.node_to_string(HirId::make_owner(id))), } } pub fn expect_impl_item(self, id: LocalDefId) -> &'hir ImplItem<'hir> { - match self.tcx.hir_owner(id) { + match self.tcx.hir_owner(OwnerId { def_id: id }) { Some(Owner { node: OwnerNode::ImplItem(item), .. }) => item, _ => bug!("expected impl item, found {}", self.node_to_string(HirId::make_owner(id))), } } pub fn expect_trait_item(self, id: LocalDefId) -> &'hir TraitItem<'hir> { - match self.tcx.hir_owner(id) { + match self.tcx.hir_owner(OwnerId { def_id: id }) { Some(Owner { node: OwnerNode::TraitItem(item), .. }) => item, _ => bug!("expected trait item, found {}", self.node_to_string(HirId::make_owner(id))), } @@ -861,11 +854,14 @@ impl<'hir> Map<'hir> { } } - pub fn expect_foreign_item(self, id: LocalDefId) -> &'hir ForeignItem<'hir> { + pub fn expect_foreign_item(self, id: OwnerId) -> &'hir ForeignItem<'hir> { match self.tcx.hir_owner(id) { Some(Owner { node: OwnerNode::ForeignItem(item), .. }) => item, _ => { - bug!("expected foreign item, found {}", self.node_to_string(HirId::make_owner(id))) + bug!( + "expected foreign item, found {}", + self.node_to_string(HirId::make_owner(id.def_id)) + ) } } } @@ -945,9 +941,19 @@ impl<'hir> Map<'hir> { let span = match self.find(hir_id)? { // Function-like. - Node::Item(Item { kind: ItemKind::Fn(sig, ..), .. }) - | Node::TraitItem(TraitItem { kind: TraitItemKind::Fn(sig, ..), .. }) - | Node::ImplItem(ImplItem { kind: ImplItemKind::Fn(sig, ..), .. }) => sig.span, + Node::Item(Item { kind: ItemKind::Fn(sig, ..), span: outer_span, .. }) + | Node::TraitItem(TraitItem { + kind: TraitItemKind::Fn(sig, ..), + span: outer_span, + .. + }) + | Node::ImplItem(ImplItem { + kind: ImplItemKind::Fn(sig, ..), span: outer_span, .. + }) => { + // Ensure that the returned span has the item's SyntaxContext, and not the + // SyntaxContext of the visibility. + sig.span.find_ancestor_in_same_ctxt(*outer_span).unwrap_or(*outer_span) + } // Constants and Statics. Node::Item(Item { kind: @@ -989,7 +995,11 @@ impl<'hir> Map<'hir> { } // Other cases. Node::Item(item) => match &item.kind { - ItemKind::Use(path, _) => path.span, + ItemKind::Use(path, _) => { + // Ensure that the returned span has the item's SyntaxContext, and not the + // SyntaxContext of the path. + path.span.find_ancestor_in_same_ctxt(item.span).unwrap_or(item.span) + } _ => named_span(item.span, item.ident, item.kind.generics()), }, Node::Variant(variant) => named_span(variant.span, variant.ident, None), @@ -999,11 +1009,17 @@ impl<'hir> Map<'hir> { _ => named_span(item.span, item.ident, None), }, Node::Ctor(_) => return self.opt_span(self.get_parent_node(hir_id)), - Node::Expr(Expr { kind: ExprKind::Closure(Closure { fn_decl_span, .. }), .. }) => { - *fn_decl_span + Node::Expr(Expr { + kind: ExprKind::Closure(Closure { fn_decl_span, .. }), + span, + .. + }) => { + // Ensure that the returned span has the item's SyntaxContext. + fn_decl_span.find_ancestor_in_same_ctxt(*span).unwrap_or(*span) } _ => self.span_with_body(hir_id), }; + debug_assert_eq!(span.ctxt(), self.span_with_body(hir_id).ctxt()); Some(span) } @@ -1020,6 +1036,7 @@ impl<'hir> Map<'hir> { Node::Field(field) => field.span, Node::AnonConst(constant) => self.body(constant.body).value.span, Node::Expr(expr) => expr.span, + Node::ExprField(field) => field.span, Node::Stmt(stmt) => stmt.span, Node::PathSegment(seg) => { let ident_span = seg.ident.span; @@ -1030,6 +1047,7 @@ impl<'hir> Map<'hir> { Node::TypeBinding(tb) => tb.span, Node::TraitRef(tr) => tr.path.span, Node::Pat(pat) => pat.span, + Node::PatField(field) => field.span, Node::Arm(arm) => arm.span, Node::Block(block) => block.span, Node::Ctor(..) => self.span_with_body(self.get_parent_node(hir_id)), @@ -1137,7 +1155,7 @@ pub(super) fn crate_hash(tcx: TyCtxt<'_>, crate_num: CrateNum) -> Svh { .filter_map(|(def_id, info)| { let _ = info.as_owner()?; let def_path_hash = definitions.def_path_hash(def_id); - let span = resolutions.source_span[def_id]; + let span = resolutions.source_span.get(def_id).unwrap_or(&DUMMY_SP); debug_assert_eq!(span.parent(), None); Some((def_path_hash, span)) }) @@ -1204,7 +1222,13 @@ fn hir_id_to_string(map: Map<'_>, id: HirId) -> String { ItemKind::ForeignMod { .. } => "foreign mod", ItemKind::GlobalAsm(..) => "global asm", ItemKind::TyAlias(..) => "ty", - ItemKind::OpaqueTy(..) => "opaque type", + ItemKind::OpaqueTy(ref opaque) => { + if opaque.in_trait { + "opaque type in trait" + } else { + "opaque type" + } + } ItemKind::Enum(..) => "enum", ItemKind::Struct(..) => "struct", ItemKind::Union(..) => "union", @@ -1220,7 +1244,7 @@ fn hir_id_to_string(map: Map<'_>, id: HirId) -> String { format!("assoc const {} in {}{}", ii.ident, path_str(), id_str) } ImplItemKind::Fn(..) => format!("method {} in {}{}", ii.ident, path_str(), id_str), - ImplItemKind::TyAlias(_) => { + ImplItemKind::Type(_) => { format!("assoc type {} in {}{}", ii.ident, path_str(), id_str) } }, @@ -1241,12 +1265,14 @@ fn hir_id_to_string(map: Map<'_>, id: HirId) -> String { } Some(Node::AnonConst(_)) => node_str("const"), Some(Node::Expr(_)) => node_str("expr"), + Some(Node::ExprField(_)) => node_str("expr field"), Some(Node::Stmt(_)) => node_str("stmt"), Some(Node::PathSegment(_)) => node_str("path segment"), Some(Node::Ty(_)) => node_str("type"), Some(Node::TypeBinding(_)) => node_str("type binding"), Some(Node::TraitRef(_)) => node_str("trait ref"), Some(Node::Pat(_)) => node_str("pat"), + Some(Node::PatField(_)) => node_str("pattern field"), Some(Node::Param(_)) => node_str("param"), Some(Node::Arm(_)) => node_str("arm"), Some(Node::Block(_)) => node_str("block"), @@ -1291,7 +1317,7 @@ pub(crate) fn hir_crate_items(tcx: TyCtxt<'_>, _: ()) -> ModuleItems { // A "crate collector" and "module collector" start at a // module item (the former starts at the crate root) but only // the former needs to collect it. ItemCollector does not do this for us. - collector.submodules.push(CRATE_DEF_ID); + collector.submodules.push(CRATE_OWNER_ID); tcx.hir().walk_toplevel_module(&mut collector); let ItemCollector { @@ -1319,7 +1345,7 @@ struct ItemCollector<'tcx> { // otherwise it collects items in some module. crate_collector: bool, tcx: TyCtxt<'tcx>, - submodules: Vec<LocalDefId>, + submodules: Vec<OwnerId>, items: Vec<ItemId>, trait_items: Vec<TraitItemId>, impl_items: Vec<ImplItemId>, @@ -1351,14 +1377,14 @@ impl<'hir> Visitor<'hir> for ItemCollector<'hir> { fn visit_item(&mut self, item: &'hir Item<'hir>) { if associated_body(Node::Item(item)).is_some() { - self.body_owners.push(item.def_id); + self.body_owners.push(item.owner_id.def_id); } self.items.push(item.item_id()); // Items that are modules are handled here instead of in visit_mod. if let ItemKind::Mod(module) = &item.kind { - self.submodules.push(item.def_id); + self.submodules.push(item.owner_id); // A module collector does not recurse inside nested modules. if self.crate_collector { intravisit::walk_mod(self, module, item.hir_id()); @@ -1387,7 +1413,7 @@ impl<'hir> Visitor<'hir> for ItemCollector<'hir> { fn visit_trait_item(&mut self, item: &'hir TraitItem<'hir>) { if associated_body(Node::TraitItem(item)).is_some() { - self.body_owners.push(item.def_id); + self.body_owners.push(item.owner_id.def_id); } self.trait_items.push(item.trait_item_id()); @@ -1396,7 +1422,7 @@ impl<'hir> Visitor<'hir> for ItemCollector<'hir> { fn visit_impl_item(&mut self, item: &'hir ImplItem<'hir>) { if associated_body(Node::ImplItem(item)).is_some() { - self.body_owners.push(item.def_id); + self.body_owners.push(item.owner_id.def_id); } self.impl_items.push(item.impl_item_id()); diff --git a/compiler/rustc_middle/src/hir/mod.rs b/compiler/rustc_middle/src/hir/mod.rs index 211a61471..1c6264ad0 100644 --- a/compiler/rustc_middle/src/hir/mod.rs +++ b/compiler/rustc_middle/src/hir/mod.rs @@ -39,7 +39,7 @@ impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Owner<'tcx> { /// bodies. The Ids are in visitor order. This is used to partition a pass between modules. #[derive(Debug, HashStable, Encodable, Decodable)] pub struct ModuleItems { - submodules: Box<[LocalDefId]>, + submodules: Box<[OwnerId]>, items: Box<[ItemId]>, trait_items: Box<[TraitItemId]>, impl_items: Box<[ImplItemId]>, @@ -67,10 +67,10 @@ impl ModuleItems { pub fn definitions(&self) -> impl Iterator<Item = LocalDefId> + '_ { self.items .iter() - .map(|id| id.def_id) - .chain(self.trait_items.iter().map(|id| id.def_id)) - .chain(self.impl_items.iter().map(|id| id.def_id)) - .chain(self.foreign_items.iter().map(|id| id.def_id)) + .map(|id| id.owner_id.def_id) + .chain(self.trait_items.iter().map(|id| id.owner_id.def_id)) + .chain(self.impl_items.iter().map(|id| id.owner_id.def_id)) + .chain(self.foreign_items.iter().map(|id| id.owner_id.def_id)) } pub fn par_items(&self, f: impl Fn(ItemId) + Send + Sync) { @@ -97,7 +97,7 @@ impl<'tcx> TyCtxt<'tcx> { } pub fn parent_module(self, id: HirId) -> LocalDefId { - self.parent_module_from_def_id(id.owner) + self.parent_module_from_def_id(id.owner.def_id) } pub fn impl_subject(self, def_id: DefId) -> ImplSubject<'tcx> { @@ -110,13 +110,13 @@ impl<'tcx> TyCtxt<'tcx> { pub fn provide(providers: &mut Providers) { providers.parent_module_from_def_id = |tcx, id| { let hir = tcx.hir(); - hir.get_module_parent_node(hir.local_def_id_to_hir_id(id)) + hir.get_module_parent_node(hir.local_def_id_to_hir_id(id)).def_id }; providers.hir_crate_items = map::hir_crate_items; providers.crate_hash = map::crate_hash; providers.hir_module_items = map::hir_module_items; providers.hir_owner = |tcx, id| { - let owner = tcx.hir_crate(()).owners.get(id)?.as_owner()?; + let owner = tcx.hir_crate(()).owners.get(id.def_id)?.as_owner()?; let node = owner.node(); Some(Owner { node, hash_without_bodies: owner.nodes.hash_without_bodies }) }; @@ -128,21 +128,24 @@ pub fn provide(providers: &mut Providers) { MaybeOwner::NonOwner(hir_id) => hir_id, } }; - providers.hir_owner_nodes = |tcx, id| tcx.hir_crate(()).owners[id].map(|i| &i.nodes); + providers.hir_owner_nodes = |tcx, id| tcx.hir_crate(()).owners[id.def_id].map(|i| &i.nodes); providers.hir_owner_parent = |tcx, id| { // Accessing the local_parent is ok since its value is hashed as part of `id`'s DefPathHash. - tcx.opt_local_parent(id).map_or(CRATE_HIR_ID, |parent| { + tcx.opt_local_parent(id.def_id).map_or(CRATE_HIR_ID, |parent| { let mut parent_hir_id = tcx.hir().local_def_id_to_hir_id(parent); - if let Some(local_id) = - tcx.hir_crate(()).owners[parent_hir_id.owner].unwrap().parenting.get(&id) + if let Some(local_id) = tcx.hir_crate(()).owners[parent_hir_id.owner.def_id] + .unwrap() + .parenting + .get(&id.def_id) { parent_hir_id.local_id = *local_id; } parent_hir_id }) }; - providers.hir_attrs = - |tcx, id| tcx.hir_crate(()).owners[id].as_owner().map_or(AttributeMap::EMPTY, |o| &o.attrs); + providers.hir_attrs = |tcx, id| { + tcx.hir_crate(()).owners[id.def_id].as_owner().map_or(AttributeMap::EMPTY, |o| &o.attrs) + }; providers.source_span = |tcx, def_id| tcx.resolutions(()).source_span.get(def_id).copied().unwrap_or(DUMMY_SP); providers.def_span = |tcx, def_id| { @@ -177,6 +180,7 @@ pub fn provide(providers: &mut Providers) { let id = id.expect_local(); tcx.resolutions(()).expn_that_defined.get(&id).copied().unwrap_or(ExpnId::root()) }; - providers.in_scope_traits_map = - |tcx, id| tcx.hir_crate(()).owners[id].as_owner().map(|owner_info| &owner_info.trait_map); + providers.in_scope_traits_map = |tcx, id| { + tcx.hir_crate(()).owners[id.def_id].as_owner().map(|owner_info| &owner_info.trait_map) + }; } diff --git a/compiler/rustc_middle/src/infer/canonical.rs b/compiler/rustc_middle/src/infer/canonical.rs index 200de9079..d3cf519b6 100644 --- a/compiler/rustc_middle/src/infer/canonical.rs +++ b/compiler/rustc_middle/src/infer/canonical.rs @@ -22,6 +22,7 @@ //! [c]: https://rust-lang.github.io/chalk/book/canonical_queries/canonicalization.html use crate::infer::MemberConstraint; +use crate::mir::ConstraintCategory; use crate::ty::subst::GenericArg; use crate::ty::{self, BoundVar, List, Region, Ty, TyCtxt}; use rustc_index::vec::IndexVec; @@ -43,6 +44,15 @@ pub struct Canonical<'tcx, V> { pub type CanonicalVarInfos<'tcx> = &'tcx List<CanonicalVarInfo<'tcx>>; +impl<'tcx> ty::TypeFoldable<'tcx> for CanonicalVarInfos<'tcx> { + fn try_fold_with<F: ty::FallibleTypeFolder<'tcx>>( + self, + folder: &mut F, + ) -> Result<Self, F::Error> { + ty::util::fold_list(self, folder, |tcx, v| tcx.intern_canonical_var_infos(v)) + } +} + /// A set of values corresponding to the canonical variables from some /// `Canonical`. You can give these values to /// `canonical_value.substitute` to substitute them into the canonical @@ -89,6 +99,7 @@ impl<'tcx> Default for OriginalQueryValues<'tcx> { /// a copy of the canonical value in some other inference context, /// with fresh inference variables replacing the canonical values. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)] +#[derive(TypeFoldable, TypeVisitable)] pub struct CanonicalVarInfo<'tcx> { pub kind: CanonicalVarKind<'tcx>, } @@ -114,6 +125,7 @@ impl<'tcx> CanonicalVarInfo<'tcx> { /// in the type-theory sense of the term -- i.e., a "meta" type system /// that analyzes type-like values. #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TyDecodable, TyEncodable, HashStable)] +#[derive(TypeFoldable, TypeVisitable)] pub enum CanonicalVarKind<'tcx> { /// Some kind of type inference variable. Ty(CanonicalTyVarKind), @@ -290,20 +302,15 @@ impl<'tcx, V> Canonical<'tcx, V> { } } -pub type QueryOutlivesConstraint<'tcx> = - ty::Binder<'tcx, ty::OutlivesPredicate<GenericArg<'tcx>, Region<'tcx>>>; +pub type QueryOutlivesConstraint<'tcx> = ( + ty::Binder<'tcx, ty::OutlivesPredicate<GenericArg<'tcx>, Region<'tcx>>>, + ConstraintCategory<'tcx>, +); TrivialTypeTraversalAndLiftImpls! { for <'tcx> { crate::infer::canonical::Certainty, - crate::infer::canonical::CanonicalVarInfo<'tcx>, - crate::infer::canonical::CanonicalVarKind<'tcx>, - } -} - -TrivialTypeTraversalImpls! { - for <'tcx> { - crate::infer::canonical::CanonicalVarInfos<'tcx>, + crate::infer::canonical::CanonicalTyVarKind, } } diff --git a/compiler/rustc_middle/src/infer/unify_key.rs b/compiler/rustc_middle/src/infer/unify_key.rs index f2627885d..41d8c7ffd 100644 --- a/compiler/rustc_middle/src/infer/unify_key.rs +++ b/compiler/rustc_middle/src/infer/unify_key.rs @@ -129,7 +129,7 @@ impl<'tcx> UnifyKey for ty::ConstVid<'tcx> { } impl<'tcx> UnifyValue for ConstVarValue<'tcx> { - type Error = (ty::Const<'tcx>, ty::Const<'tcx>); + type Error = NoError; fn unify_values(&value1: &Self, &value2: &Self) -> Result<Self, Self::Error> { Ok(match (value1.val, value2.val) { diff --git a/compiler/rustc_middle/src/lib.rs b/compiler/rustc_middle/src/lib.rs index ef06c457b..a58cbc376 100644 --- a/compiler/rustc_middle/src/lib.rs +++ b/compiler/rustc_middle/src/lib.rs @@ -26,28 +26,24 @@ #![feature(allocator_api)] #![feature(array_windows)] #![feature(assert_matches)] -#![feature(backtrace)] #![feature(box_patterns)] #![feature(core_intrinsics)] #![feature(discriminant_kind)] #![feature(exhaustive_patterns)] #![feature(get_mut_unchecked)] -#![feature(generic_associated_types)] #![feature(if_let_guard)] -#![feature(map_first_last)] #![feature(negative_impls)] #![feature(never_type)] #![feature(extern_types)] #![feature(new_uninit)] #![feature(once_cell)] #![feature(let_chains)] -#![feature(let_else)] #![feature(min_specialization)] #![feature(trusted_len)] #![feature(type_alias_impl_trait)] #![feature(associated_type_bounds)] #![feature(rustc_attrs)] -#![feature(half_open_range_patterns)] +#![cfg_attr(bootstrap, feature(half_open_range_patterns))] #![feature(control_flow_enum)] #![feature(associated_type_defaults)] #![feature(trusted_step)] @@ -59,6 +55,7 @@ #![feature(drain_filter)] #![feature(intra_doc_pointers)] #![feature(yeet_expr)] +#![feature(result_option_inspect)] #![feature(const_option)] #![recursion_limit = "512"] #![allow(rustc::potential_query_instability)] @@ -87,6 +84,7 @@ pub mod query; pub mod arena; #[macro_use] pub mod dep_graph; +pub(crate) mod error; pub mod hir; pub mod infer; pub mod lint; @@ -96,6 +94,7 @@ pub mod mir; pub mod thir; pub mod traits; pub mod ty; +mod values; pub mod util { pub mod bug; diff --git a/compiler/rustc_middle/src/lint.rs b/compiler/rustc_middle/src/lint.rs index 2f45222de..79522bd0b 100644 --- a/compiler/rustc_middle/src/lint.rs +++ b/compiler/rustc_middle/src/lint.rs @@ -1,20 +1,20 @@ use std::cmp; use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; -use rustc_errors::{Diagnostic, DiagnosticId, LintDiagnosticBuilder, MultiSpan}; -use rustc_hir::HirId; -use rustc_index::vec::IndexVec; -use rustc_query_system::ich::StableHashingContext; +use rustc_data_structures::sorted_map::SortedMap; +use rustc_errors::{Diagnostic, DiagnosticBuilder, DiagnosticId, DiagnosticMessage, MultiSpan}; +use rustc_hir::{HirId, ItemLocalId}; use rustc_session::lint::{ builtin::{self, FORBIDDEN_LINT_GROUPS}, - FutureIncompatibilityReason, Level, Lint, LintExpectationId, LintId, + FutureIncompatibilityReason, Level, Lint, LintId, }; use rustc_session::Session; use rustc_span::hygiene::MacroKind; use rustc_span::source_map::{DesugaringKind, ExpnKind}; use rustc_span::{symbol, Span, Symbol, DUMMY_SP}; +use crate::ty::TyCtxt; + /// How a lint level was set. #[derive(Clone, Copy, PartialEq, Eq, HashStable, Debug)] pub enum LintLevelSource { @@ -23,7 +23,12 @@ pub enum LintLevelSource { Default, /// Lint level was set by an attribute. - Node(Symbol, Span, Option<Symbol> /* RFC 2383 reason */), + Node { + name: Symbol, + span: Span, + /// RFC 2383 reason + reason: Option<Symbol>, + }, /// Lint level was set by a command-line flag. /// The provided `Level` is the level specified on the command line. @@ -35,7 +40,7 @@ impl LintLevelSource { pub fn name(&self) -> Symbol { match *self { LintLevelSource::Default => symbol::kw::Default, - LintLevelSource::Node(name, _, _) => name, + LintLevelSource::Node { name, .. } => name, LintLevelSource::CommandLine(name, _) => name, } } @@ -43,7 +48,7 @@ impl LintLevelSource { pub fn span(&self) -> Span { match *self { LintLevelSource::Default => DUMMY_SP, - LintLevelSource::Node(_, span, _) => span, + LintLevelSource::Node { span, .. } => span, LintLevelSource::CommandLine(_, _) => DUMMY_SP, } } @@ -52,145 +57,137 @@ impl LintLevelSource { /// A tuple of a lint level and its source. pub type LevelAndSource = (Level, LintLevelSource); -#[derive(Debug, HashStable)] -pub struct LintLevelSets { - pub list: IndexVec<LintStackIndex, LintSet>, - pub lint_cap: Level, -} - -rustc_index::newtype_index! { - #[derive(HashStable)] - pub struct LintStackIndex { - const COMMAND_LINE = 0, - } -} - -#[derive(Debug, HashStable)] -pub struct LintSet { - // -A,-W,-D flags, a `Symbol` for the flag itself and `Level` for which - // flag. - pub specs: FxHashMap<LintId, LevelAndSource>, - - pub parent: LintStackIndex, +/// Return type for the `shallow_lint_levels_on` query. +/// +/// This map represents the set of allowed lints and allowance levels given +/// by the attributes for *a single HirId*. +#[derive(Default, Debug, HashStable)] +pub struct ShallowLintLevelMap { + pub specs: SortedMap<ItemLocalId, FxHashMap<LintId, LevelAndSource>>, } -impl LintLevelSets { - pub fn new() -> Self { - LintLevelSets { list: IndexVec::new(), lint_cap: Level::Forbid } - } - - pub fn get_lint_level( - &self, - lint: &'static Lint, - idx: LintStackIndex, - aux: Option<&FxHashMap<LintId, LevelAndSource>>, - sess: &Session, - ) -> LevelAndSource { - let (level, mut src) = self.get_lint_id_level(LintId::of(lint), idx, aux); - - // If `level` is none then we actually assume the default level for this - // lint. - let mut level = level.unwrap_or_else(|| lint.default_level(sess.edition())); - - // If we're about to issue a warning, check at the last minute for any - // directives against the warnings "lint". If, for example, there's an - // `allow(warnings)` in scope then we want to respect that instead. - // - // We exempt `FORBIDDEN_LINT_GROUPS` from this because it specifically - // triggers in cases (like #80988) where you have `forbid(warnings)`, - // and so if we turned that into an error, it'd defeat the purpose of the - // future compatibility warning. - if level == Level::Warn && LintId::of(lint) != LintId::of(FORBIDDEN_LINT_GROUPS) { - let (warnings_level, warnings_src) = - self.get_lint_id_level(LintId::of(builtin::WARNINGS), idx, aux); - if let Some(configured_warning_level) = warnings_level { - if configured_warning_level != Level::Warn { - level = configured_warning_level; - src = warnings_src; - } +/// From an initial level and source, verify the effect of special annotations: +/// `warnings` lint level and lint caps. +/// +/// The return of this function is suitable for diagnostics. +pub fn reveal_actual_level( + level: Option<Level>, + src: &mut LintLevelSource, + sess: &Session, + lint: LintId, + probe_for_lint_level: impl FnOnce(LintId) -> (Option<Level>, LintLevelSource), +) -> Level { + // If `level` is none then we actually assume the default level for this lint. + let mut level = level.unwrap_or_else(|| lint.lint.default_level(sess.edition())); + + // If we're about to issue a warning, check at the last minute for any + // directives against the warnings "lint". If, for example, there's an + // `allow(warnings)` in scope then we want to respect that instead. + // + // We exempt `FORBIDDEN_LINT_GROUPS` from this because it specifically + // triggers in cases (like #80988) where you have `forbid(warnings)`, + // and so if we turned that into an error, it'd defeat the purpose of the + // future compatibility warning. + if level == Level::Warn && lint != LintId::of(FORBIDDEN_LINT_GROUPS) { + let (warnings_level, warnings_src) = probe_for_lint_level(LintId::of(builtin::WARNINGS)); + if let Some(configured_warning_level) = warnings_level { + if configured_warning_level != Level::Warn { + level = configured_warning_level; + *src = warnings_src; } } + } - // Ensure that we never exceed the `--cap-lints` argument - // unless the source is a --force-warn - level = if let LintLevelSource::CommandLine(_, Level::ForceWarn(_)) = src { - level - } else { - cmp::min(level, self.lint_cap) - }; - - if let Some(driver_level) = sess.driver_lint_caps.get(&LintId::of(lint)) { - // Ensure that we never exceed driver level. - level = cmp::min(*driver_level, level); - } + // Ensure that we never exceed the `--cap-lints` argument unless the source is a --force-warn + level = if let LintLevelSource::CommandLine(_, Level::ForceWarn(_)) = src { + level + } else { + cmp::min(level, sess.opts.lint_cap.unwrap_or(Level::Forbid)) + }; - (level, src) + if let Some(driver_level) = sess.driver_lint_caps.get(&lint) { + // Ensure that we never exceed driver level. + level = cmp::min(*driver_level, level); } - pub fn get_lint_id_level( + level +} + +impl ShallowLintLevelMap { + /// Perform a deep probe in the HIR tree looking for the actual level for the lint. + /// This lint level is not usable for diagnostics, it needs to be corrected by + /// `reveal_actual_level` beforehand. + #[instrument(level = "trace", skip(self, tcx), ret)] + fn probe_for_lint_level( &self, + tcx: TyCtxt<'_>, id: LintId, - mut idx: LintStackIndex, - aux: Option<&FxHashMap<LintId, LevelAndSource>>, + start: HirId, ) -> (Option<Level>, LintLevelSource) { - if let Some(specs) = aux { - if let Some(&(level, src)) = specs.get(&id) { - return (Some(level), src); - } + if let Some(map) = self.specs.get(&start.local_id) + && let Some(&(level, src)) = map.get(&id) + { + return (Some(level), src); } - loop { - let LintSet { ref specs, parent } = self.list[idx]; - if let Some(&(level, src)) = specs.get(&id) { - return (Some(level), src); + + let mut owner = start.owner; + let mut specs = &self.specs; + + for parent in tcx.hir().parent_id_iter(start) { + if parent.owner != owner { + owner = parent.owner; + specs = &tcx.shallow_lint_levels_on(owner).specs; } - if idx == COMMAND_LINE { - return (None, LintLevelSource::Default); + if let Some(map) = specs.get(&parent.local_id) + && let Some(&(level, src)) = map.get(&id) + { + return (Some(level), src); } - idx = parent; } - } -} -#[derive(Debug)] -pub struct LintLevelMap { - /// This is a collection of lint expectations as described in RFC 2383, that - /// can be fulfilled during this compilation session. This means that at least - /// one expected lint is currently registered in the lint store. - /// - /// The [`LintExpectationId`] is stored as a part of the [`Expect`](Level::Expect) - /// lint level. - pub lint_expectations: Vec<(LintExpectationId, LintExpectation)>, - pub sets: LintLevelSets, - pub id_to_set: FxHashMap<HirId, LintStackIndex>, -} + (None, LintLevelSource::Default) + } -impl LintLevelMap { - /// If the `id` was previously registered with `register_id` when building - /// this `LintLevelMap` this returns the corresponding lint level and source - /// of the lint level for the lint provided. - /// - /// If the `id` was not previously registered, returns `None`. If `None` is - /// returned then the parent of `id` should be acquired and this function - /// should be called again. - pub fn level_and_source( + /// Fetch and return the user-visible lint level for the given lint at the given HirId. + #[instrument(level = "trace", skip(self, tcx), ret)] + pub fn lint_level_id_at_node( &self, - lint: &'static Lint, - id: HirId, - session: &Session, - ) -> Option<LevelAndSource> { - self.id_to_set.get(&id).map(|idx| self.sets.get_lint_level(lint, *idx, None, session)) + tcx: TyCtxt<'_>, + lint: LintId, + cur: HirId, + ) -> (Level, LintLevelSource) { + let (level, mut src) = self.probe_for_lint_level(tcx, lint, cur); + let level = reveal_actual_level(level, &mut src, tcx.sess, lint, |lint| { + self.probe_for_lint_level(tcx, lint, cur) + }); + (level, src) } } -impl<'a> HashStable<StableHashingContext<'a>> for LintLevelMap { - #[inline] - fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - let LintLevelMap { ref sets, ref id_to_set, ref lint_expectations } = *self; +impl TyCtxt<'_> { + /// Fetch and return the user-visible lint level for the given lint at the given HirId. + pub fn lint_level_at_node(self, lint: &'static Lint, id: HirId) -> (Level, LintLevelSource) { + self.shallow_lint_levels_on(id.owner).lint_level_id_at_node(self, LintId::of(lint), id) + } - id_to_set.hash_stable(hcx, hasher); - lint_expectations.hash_stable(hcx, hasher); + /// Walks upwards from `id` to find a node which might change lint levels with attributes. + /// It stops at `bound` and just returns it if reached. + pub fn maybe_lint_level_root_bounded(self, mut id: HirId, bound: HirId) -> HirId { + let hir = self.hir(); + loop { + if id == bound { + return bound; + } - hcx.while_hashing_spans(true, |hcx| sets.hash_stable(hcx, hasher)) + if hir.attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) { + return id; + } + let next = hir.get_parent_node(id); + if next == id { + bug!("lint traversal reached the root of the crate"); + } + id = next; + } } } @@ -261,11 +258,11 @@ pub fn explain_lint_level_source( )); } } - LintLevelSource::Node(lint_attr_name, src, reason) => { + LintLevelSource::Node { name: lint_attr_name, span, reason, .. } => { if let Some(rationale) = reason { err.note(rationale.as_str()); } - err.span_note_once(src, "the lint level is defined here"); + err.span_note_once(span, "the lint level is defined here"); if lint_attr_name.as_str() != name { let level_str = level.as_str(); err.note_once(&format!( @@ -277,23 +274,65 @@ pub fn explain_lint_level_source( } } -pub fn struct_lint_level<'s, 'd>( - sess: &'s Session, +/// The innermost function for emitting lints. +/// +/// If you are loocking to implement a lint, look for higher level functions, +/// for example: +/// - [`TyCtxt::emit_spanned_lint`] +/// - [`TyCtxt::struct_span_lint_hir`] +/// - [`TyCtxt::emit_lint`] +/// - [`TyCtxt::struct_lint_node`] +/// - `LintContext::lookup` +/// +/// ## `decorate` signature +/// +/// The return value of `decorate` is ignored by this function. So what is the +/// point of returning `&'b mut DiagnosticBuilder<'a, ()>`? +/// +/// There are 2 reasons for this signature. +/// +/// First of all, it prevents accidental use of `.emit()` -- it's clear that the +/// builder will be later used and shouldn't be emitted right away (this is +/// especially important because the old API expected you to call `.emit()` in +/// the closure). +/// +/// Second of all, it makes the most common case of adding just a single label +/// /suggestion much nicer, since [`DiagnosticBuilder`] methods return +/// `&mut DiagnosticBuilder`, you can just chain methods, without needed +/// awkward `{ ...; }`: +/// ```ignore pseudo-code +/// struct_lint_level( +/// ..., +/// |lint| lint.span_label(sp, "lbl") +/// // ^^^^^^^^^^^^^^^^^^^^^ returns `&mut DiagnosticBuilder` by default +/// ) +/// ``` +pub fn struct_lint_level( + sess: &Session, lint: &'static Lint, level: Level, src: LintLevelSource, span: Option<MultiSpan>, - decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>) + 'd, + msg: impl Into<DiagnosticMessage>, + decorate: impl for<'a, 'b> FnOnce( + &'b mut DiagnosticBuilder<'a, ()>, + ) -> &'b mut DiagnosticBuilder<'a, ()>, ) { // Avoid codegen bloat from monomorphization by immediately doing dyn dispatch of `decorate` to // the "real" work. - fn struct_lint_level_impl<'s, 'd>( - sess: &'s Session, + fn struct_lint_level_impl( + sess: &Session, lint: &'static Lint, level: Level, src: LintLevelSource, span: Option<MultiSpan>, - decorate: Box<dyn for<'b> FnOnce(LintDiagnosticBuilder<'b, ()>) + 'd>, + msg: impl Into<DiagnosticMessage>, + decorate: Box< + dyn '_ + + for<'a, 'b> FnOnce( + &'b mut DiagnosticBuilder<'a, ()>, + ) -> &'b mut DiagnosticBuilder<'a, ()>, + >, ) { // Check for future incompatibility lints and issue a stronger warning. let future_incompatible = lint.future_incompatible; @@ -344,6 +383,8 @@ pub fn struct_lint_level<'s, 'd>( (Level::Deny | Level::Forbid, None) => sess.diagnostic().struct_err_lint(""), }; + err.set_is_lint(); + // If this code originates in a foreign macro, aka something that this crate // did not itself author, then it's likely that there's nothing this crate // can do about it. We probably want to skip the lint entirely. @@ -366,6 +407,10 @@ pub fn struct_lint_level<'s, 'd>( } } + // Delay evaluating and setting the primary message until after we've + // suppressed the lint due to macros. + err.set_primary_message(msg); + // Lint diagnostics that are covered by the expect level will not be emitted outside // the compiler. It is therefore not necessary to add any information for the user. // This will therefore directly call the decorate function which will in turn emit @@ -373,12 +418,12 @@ pub fn struct_lint_level<'s, 'd>( if let Level::Expect(_) = level { let name = lint.name_lower(); err.code(DiagnosticId::Lint { name, has_future_breakage, is_force_warn: false }); - decorate(LintDiagnosticBuilder::new(err)); + + decorate(&mut err); + err.emit(); return; } - explain_lint_level_source(lint, level, src, &mut err); - let name = lint.name_lower(); let is_force_warn = matches!(level, Level::ForceWarn(_)); err.code(DiagnosticId::Lint { name, has_future_breakage, is_force_warn }); @@ -417,10 +462,12 @@ pub fn struct_lint_level<'s, 'd>( } } - // Finally, run `decorate`. This function is also responsible for emitting the diagnostic. - decorate(LintDiagnosticBuilder::new(err)); + // Finally, run `decorate`. + decorate(&mut err); + explain_lint_level_source(lint, level, src, &mut *err); + err.emit() } - struct_lint_level_impl(sess, lint, level, src, span, Box::new(decorate)) + struct_lint_level_impl(sess, lint, level, src, span, msg, Box::new(decorate)) } /// Returns whether `span` originates in a foreign crate's external macro. @@ -432,7 +479,9 @@ pub fn in_external_macro(sess: &Session, span: Span) -> bool { match expn_data.kind { ExpnKind::Inlined | ExpnKind::Root - | ExpnKind::Desugaring(DesugaringKind::ForLoop | DesugaringKind::WhileLoop) => false, + | ExpnKind::Desugaring( + DesugaringKind::ForLoop | DesugaringKind::WhileLoop | DesugaringKind::OpaqueTy, + ) => false, ExpnKind::AstPass(_) | ExpnKind::Desugaring(_) => true, // well, it's "external" ExpnKind::Macro(MacroKind::Bang, _) => { // Dummy span for the `def_site` means it's an external macro. diff --git a/compiler/rustc_middle/src/macros.rs b/compiler/rustc_middle/src/macros.rs index 0e85c60a3..01fe72de6 100644 --- a/compiler/rustc_middle/src/macros.rs +++ b/compiler/rustc_middle/src/macros.rs @@ -54,13 +54,22 @@ macro_rules! TrivialTypeTraversalImpls { impl<$tcx> $crate::ty::fold::TypeFoldable<$tcx> for $ty { fn try_fold_with<F: $crate::ty::fold::FallibleTypeFolder<$tcx>>( self, - _: &mut F - ) -> ::std::result::Result<$ty, F::Error> { + _: &mut F, + ) -> ::std::result::Result<Self, F::Error> { Ok(self) } + + #[inline] + fn fold_with<F: $crate::ty::fold::TypeFolder<$tcx>>( + self, + _: &mut F, + ) -> Self { + self + } } impl<$tcx> $crate::ty::visit::TypeVisitable<$tcx> for $ty { + #[inline] fn visit_with<F: $crate::ty::visit::TypeVisitor<$tcx>>( &self, _: &mut F) diff --git a/compiler/rustc_middle/src/metadata.rs b/compiler/rustc_middle/src/metadata.rs index c8e78747d..5ff014c78 100644 --- a/compiler/rustc_middle/src/metadata.rs +++ b/compiler/rustc_middle/src/metadata.rs @@ -2,6 +2,7 @@ use crate::ty; use rustc_hir::def::Res; use rustc_macros::HashStable; +use rustc_span::def_id::DefId; use rustc_span::symbol::Ident; use rustc_span::Span; @@ -18,7 +19,7 @@ pub struct ModChild { /// Local variables cannot be exported, so this `Res` doesn't need the ID parameter. pub res: Res<!>, /// Visibility of the item. - pub vis: ty::Visibility, + pub vis: ty::Visibility<DefId>, /// Span of the item. pub span: Span, /// A proper `macro_rules` item (not a reexport). diff --git a/compiler/rustc_middle/src/middle/lang_items.rs b/compiler/rustc_middle/src/middle/lang_items.rs index cc9706f2d..31c20fa14 100644 --- a/compiler/rustc_middle/src/middle/lang_items.rs +++ b/compiler/rustc_middle/src/middle/lang_items.rs @@ -18,11 +18,11 @@ impl<'tcx> TyCtxt<'tcx> { /// Returns the `DefId` for a given `LangItem`. /// If not found, fatally aborts compilation. pub fn require_lang_item(self, lang_item: LangItem, span: Option<Span>) -> DefId { - self.lang_items().require(lang_item).unwrap_or_else(|msg| { + self.lang_items().require(lang_item).unwrap_or_else(|err| { if let Some(span) = span { - self.sess.span_fatal(span, &msg) + self.sess.span_fatal(span, err.to_string()) } else { - self.sess.fatal(&msg) + self.sess.fatal(err.to_string()) } }) } diff --git a/compiler/rustc_middle/src/middle/limits.rs b/compiler/rustc_middle/src/middle/limits.rs index acced0492..12aef66bc 100644 --- a/compiler/rustc_middle/src/middle/limits.rs +++ b/compiler/rustc_middle/src/middle/limits.rs @@ -10,6 +10,7 @@ //! just peeks and looks for that attribute. use crate::bug; +use crate::error::LimitInvalid; use crate::ty; use rustc_ast::Attribute; use rustc_session::Session; @@ -37,7 +38,7 @@ pub fn provide(providers: &mut ty::query::Providers) { tcx.hir().krate_attrs(), tcx.sess, sym::const_eval_limit, - 1_000_000, + 2_000_000, ), } } @@ -56,9 +57,6 @@ fn get_limit(krate_attrs: &[Attribute], sess: &Session, name: Symbol, default: u match s.as_str().parse() { Ok(n) => return Limit::new(n), Err(e) => { - let mut err = - sess.struct_span_err(attr.span, "`limit` must be a non-negative integer"); - let value_span = attr .meta() .and_then(|meta| meta.name_value_literal_span()) @@ -74,9 +72,7 @@ fn get_limit(krate_attrs: &[Attribute], sess: &Session, name: Symbol, default: u IntErrorKind::Zero => bug!("zero is a valid `limit`"), kind => bug!("unimplemented IntErrorKind variant: {:?}", kind), }; - - err.span_label(value_span, error_str); - err.emit(); + sess.emit_err(LimitInvalid { span: attr.span, value_span, error_str }); } } } diff --git a/compiler/rustc_middle/src/middle/privacy.rs b/compiler/rustc_middle/src/middle/privacy.rs index 751c7f464..9c68c7504 100644 --- a/compiler/rustc_middle/src/middle/privacy.rs +++ b/compiler/rustc_middle/src/middle/privacy.rs @@ -1,64 +1,218 @@ //! A pass that checks to make sure private fields and methods aren't used //! outside their scopes. This pass will also generate a set of exported items //! which are available for use externally when compiled as a library. - +use crate::ty::{DefIdTree, Visibility}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_macros::HashStable; use rustc_query_system::ich::StableHashingContext; -use rustc_span::def_id::LocalDefId; +use rustc_span::def_id::{DefId, LocalDefId}; use std::hash::Hash; -/// Represents the levels of accessibility an item can have. +/// Represents the levels of effective visibility an item can have. /// -/// The variants are sorted in ascending order of accessibility. +/// The variants are sorted in ascending order of directness. #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, HashStable)] -pub enum AccessLevel { - /// Superset of `AccessLevel::Reachable` used to mark impl Trait items. - ReachableFromImplTrait, - /// Exported items + items participating in various kinds of public interfaces, - /// but not directly nameable. For example, if function `fn f() -> T {...}` is - /// public, then type `T` is reachable. Its values can be obtained by other crates - /// even if the type itself is not nameable. +pub enum Level { + /// Superset of `Reachable` including items leaked through return position `impl Trait`. + ReachableThroughImplTrait, + /// Item is either reexported, or leaked through any kind of interface. + /// For example, if function `fn f() -> T {...}` is directly public, then type `T` is publicly + /// reachable and its values can be obtained by other crates even if the type itself is not + /// nameable. Reachable, - /// Public items + items accessible to other crates with the help of `pub use` re-exports. - Exported, - /// Items accessible to other crates directly, without the help of re-exports. - Public, + /// Item is accessible either directly, or with help of `use` reexports. + Reexported, + /// Item is directly accessible, without help of reexports. + Direct, +} + +impl Level { + pub fn all_levels() -> [Level; 4] { + [Level::Direct, Level::Reexported, Level::Reachable, Level::ReachableThroughImplTrait] + } +} + +#[derive(Clone, Copy, PartialEq, Eq, Debug, HashStable)] +pub struct EffectiveVisibility { + direct: Visibility, + reexported: Visibility, + reachable: Visibility, + reachable_through_impl_trait: Visibility, +} + +impl EffectiveVisibility { + pub fn at_level(&self, level: Level) -> &Visibility { + match level { + Level::Direct => &self.direct, + Level::Reexported => &self.reexported, + Level::Reachable => &self.reachable, + Level::ReachableThroughImplTrait => &self.reachable_through_impl_trait, + } + } + + fn at_level_mut(&mut self, level: Level) -> &mut Visibility { + match level { + Level::Direct => &mut self.direct, + Level::Reexported => &mut self.reexported, + Level::Reachable => &mut self.reachable, + Level::ReachableThroughImplTrait => &mut self.reachable_through_impl_trait, + } + } + + pub fn is_public_at_level(&self, level: Level) -> bool { + self.at_level(level).is_public() + } + + pub fn from_vis(vis: Visibility) -> EffectiveVisibility { + EffectiveVisibility { + direct: vis, + reexported: vis, + reachable: vis, + reachable_through_impl_trait: vis, + } + } } -/// Holds a map of accessibility levels for reachable HIR nodes. +/// Holds a map of effective visibilities for reachable HIR nodes. #[derive(Debug, Clone)] -pub struct AccessLevels<Id = LocalDefId> { - pub map: FxHashMap<Id, AccessLevel>, +pub struct EffectiveVisibilities<Id = LocalDefId> { + map: FxHashMap<Id, EffectiveVisibility>, } -impl<Id: Hash + Eq> AccessLevels<Id> { - /// See `AccessLevel::Reachable`. +impl<Id: Hash + Eq + Copy> EffectiveVisibilities<Id> { + pub fn is_public_at_level(&self, id: Id, level: Level) -> bool { + self.effective_vis(id) + .map_or(false, |effective_vis| effective_vis.is_public_at_level(level)) + } + + /// See `Level::Reachable`. pub fn is_reachable(&self, id: Id) -> bool { - self.map.get(&id) >= Some(&AccessLevel::Reachable) + self.is_public_at_level(id, Level::Reachable) } - /// See `AccessLevel::Exported`. + /// See `Level::Reexported`. pub fn is_exported(&self, id: Id) -> bool { - self.map.get(&id) >= Some(&AccessLevel::Exported) + self.is_public_at_level(id, Level::Reexported) + } + + /// See `Level::Direct`. + pub fn is_directly_public(&self, id: Id) -> bool { + self.is_public_at_level(id, Level::Direct) + } + + pub fn public_at_level(&self, id: Id) -> Option<Level> { + self.effective_vis(id).and_then(|effective_vis| { + for level in Level::all_levels() { + if effective_vis.is_public_at_level(level) { + return Some(level); + } + } + None + }) + } + + pub fn effective_vis(&self, id: Id) -> Option<&EffectiveVisibility> { + self.map.get(&id) } - /// See `AccessLevel::Public`. - pub fn is_public(&self, id: Id) -> bool { - self.map.get(&id) >= Some(&AccessLevel::Public) + pub fn iter(&self) -> impl Iterator<Item = (&Id, &EffectiveVisibility)> { + self.map.iter() + } + + pub fn map_id<OutId: Hash + Eq + Copy>( + &self, + f: impl Fn(Id) -> OutId, + ) -> EffectiveVisibilities<OutId> { + EffectiveVisibilities { map: self.map.iter().map(|(k, v)| (f(*k), *v)).collect() } + } + + pub fn set_public_at_level( + &mut self, + id: Id, + default_vis: impl FnOnce() -> Visibility, + level: Level, + ) { + let mut effective_vis = self + .effective_vis(id) + .copied() + .unwrap_or_else(|| EffectiveVisibility::from_vis(default_vis())); + for l in Level::all_levels() { + if l <= level { + *effective_vis.at_level_mut(l) = Visibility::Public; + } + } + self.map.insert(id, effective_vis); + } +} + +impl<Id: Hash + Eq + Copy + Into<DefId>> EffectiveVisibilities<Id> { + // `parent_id` is not necessarily a parent in source code tree, + // it is the node from which the maximum effective visibility is inherited. + pub fn update( + &mut self, + id: Id, + nominal_vis: Visibility, + default_vis: impl FnOnce() -> Visibility, + parent_id: Id, + level: Level, + tree: impl DefIdTree, + ) -> bool { + let mut changed = false; + let mut current_effective_vis = self.effective_vis(id).copied().unwrap_or_else(|| { + if id.into().is_crate_root() { + EffectiveVisibility::from_vis(Visibility::Public) + } else { + EffectiveVisibility::from_vis(default_vis()) + } + }); + if let Some(inherited_effective_vis) = self.effective_vis(parent_id) { + let mut inherited_effective_vis_at_prev_level = + *inherited_effective_vis.at_level(level); + let mut calculated_effective_vis = inherited_effective_vis_at_prev_level; + for l in Level::all_levels() { + if level >= l { + let inherited_effective_vis_at_level = *inherited_effective_vis.at_level(l); + let current_effective_vis_at_level = current_effective_vis.at_level_mut(l); + // effective visibility for id shouldn't be recalculated if + // inherited from parent_id effective visibility isn't changed at next level + if !(inherited_effective_vis_at_prev_level == inherited_effective_vis_at_level + && level != l) + { + calculated_effective_vis = + if nominal_vis.is_at_least(inherited_effective_vis_at_level, tree) { + inherited_effective_vis_at_level + } else { + nominal_vis + }; + } + // effective visibility can't be decreased at next update call for the + // same id + if *current_effective_vis_at_level != calculated_effective_vis + && calculated_effective_vis + .is_at_least(*current_effective_vis_at_level, tree) + { + changed = true; + *current_effective_vis_at_level = calculated_effective_vis; + } + inherited_effective_vis_at_prev_level = inherited_effective_vis_at_level; + } + } + } + self.map.insert(id, current_effective_vis); + changed } } -impl<Id> Default for AccessLevels<Id> { +impl<Id> Default for EffectiveVisibilities<Id> { fn default() -> Self { - AccessLevels { map: Default::default() } + EffectiveVisibilities { map: Default::default() } } } -impl<'a> HashStable<StableHashingContext<'a>> for AccessLevels { +impl<'a> HashStable<StableHashingContext<'a>> for EffectiveVisibilities { fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { - let AccessLevels { ref map } = *self; + let EffectiveVisibilities { ref map } = *self; map.hash_stable(hcx, hasher); } } diff --git a/compiler/rustc_middle/src/middle/resolve_lifetime.rs b/compiler/rustc_middle/src/middle/resolve_lifetime.rs index 9b2f44567..c3bf1c717 100644 --- a/compiler/rustc_middle/src/middle/resolve_lifetime.rs +++ b/compiler/rustc_middle/src/middle/resolve_lifetime.rs @@ -2,15 +2,15 @@ use crate::ty; -use rustc_data_structures::fx::{FxHashMap, FxHashSet}; -use rustc_hir::def_id::{DefId, LocalDefId}; -use rustc_hir::ItemLocalId; +use rustc_data_structures::fx::FxHashMap; +use rustc_hir::def_id::DefId; +use rustc_hir::{ItemLocalId, OwnerId}; use rustc_macros::HashStable; #[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable, Debug, HashStable)] pub enum Region { Static, - EarlyBound(/* index */ u32, /* lifetime decl */ DefId), + EarlyBound(/* lifetime decl */ DefId), LateBound(ty::DebruijnIndex, /* late-bound index */ u32, /* lifetime decl */ DefId), Free(DefId, /* lifetime decl */ DefId), } @@ -35,7 +35,13 @@ impl<T: PartialEq> Set1<T> { } } -pub type ObjectLifetimeDefault = Set1<Region>; +#[derive(Copy, Clone, Debug, HashStable, Encodable, Decodable)] +pub enum ObjectLifetimeDefault { + Empty, + Static, + Ambiguous, + Param(DefId), +} /// Maps the id of each lifetime reference to the lifetime decl /// that it corresponds to. @@ -43,12 +49,7 @@ pub type ObjectLifetimeDefault = Set1<Region>; pub struct ResolveLifetimes { /// Maps from every use of a named (not anonymous) lifetime to a /// `Region` describing how that region is bound - pub defs: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Region>>, - - /// Set of lifetime def ids that are late-bound; a region can - /// be late-bound if (a) it does NOT appear in a where-clause and - /// (b) it DOES appear in the arguments. - pub late_bound: FxHashMap<LocalDefId, FxHashSet<LocalDefId>>, + pub defs: FxHashMap<OwnerId, FxHashMap<ItemLocalId, Region>>, - pub late_bound_vars: FxHashMap<LocalDefId, FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>>, + pub late_bound_vars: FxHashMap<OwnerId, FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>>, } diff --git a/compiler/rustc_middle/src/middle/stability.rs b/compiler/rustc_middle/src/middle/stability.rs index 414912dd0..61bc089e4 100644 --- a/compiler/rustc_middle/src/middle/stability.rs +++ b/compiler/rustc_middle/src/middle/stability.rs @@ -5,7 +5,7 @@ pub use self::StabilityLevel::*; use crate::ty::{self, DefIdTree, TyCtxt}; use rustc_ast::NodeId; -use rustc_attr::{self as attr, ConstStability, Deprecation, Stability}; +use rustc_attr::{self as attr, ConstStability, DefaultBodyStability, Deprecation, Stability}; use rustc_data_structures::fx::FxHashMap; use rustc_errors::{Applicability, Diagnostic}; use rustc_feature::GateIssue; @@ -61,6 +61,7 @@ pub struct Index { /// are filled by the annotator. pub stab_map: FxHashMap<LocalDefId, Stability>, pub const_stab_map: FxHashMap<LocalDefId, ConstStability>, + pub default_body_stab_map: FxHashMap<LocalDefId, DefaultBodyStability>, pub depr_map: FxHashMap<LocalDefId, DeprecationEntry>, /// Mapping from feature name to feature name based on the `implied_by` field of `#[unstable]` /// attributes. If a `#[unstable(feature = "implier", implied_by = "impliee")]` attribute @@ -86,6 +87,10 @@ impl Index { self.const_stab_map.get(&def_id).copied() } + pub fn local_default_body_stability(&self, def_id: LocalDefId) -> Option<DefaultBodyStability> { + self.default_body_stab_map.get(&def_id).copied() + } + pub fn local_deprecation_entry(&self, def_id: LocalDefId) -> Option<DeprecationEntry> { self.depr_map.get(&def_id).cloned() } @@ -248,13 +253,12 @@ fn late_report_deprecation( return; } let method_span = method_span.unwrap_or(span); - tcx.struct_span_lint_hir(lint, hir_id, method_span, |lint| { - let mut diag = lint.build(message); + tcx.struct_span_lint_hir(lint, hir_id, method_span, message, |diag| { if let hir::Node::Expr(_) = tcx.hir().get(hir_id) { let kind = tcx.def_kind(def_id).descr(def_id); - deprecation_suggestion(&mut diag, kind, suggestion, method_span); + deprecation_suggestion(diag, kind, suggestion, method_span); } - diag.emit(); + diag }); } @@ -288,7 +292,7 @@ fn skip_stability_check_due_to_privacy(tcx: TyCtxt<'_>, def_id: DefId) -> bool { // These are not visible outside crate; therefore // stability markers are irrelevant, if even present. - ty::Visibility::Restricted(..) | ty::Visibility::Invisible => true, + ty::Visibility::Restricted(..) => true, } } @@ -416,6 +420,12 @@ impl<'tcx> TyCtxt<'tcx> { return EvalResult::Allow; } + // Only the cross-crate scenario matters when checking unstable APIs + let cross_crate = !def_id.is_local(); + if !cross_crate { + return EvalResult::Allow; + } + let stability = self.lookup_stability(def_id); debug!( "stability: \ @@ -423,12 +433,6 @@ impl<'tcx> TyCtxt<'tcx> { def_id, span, stability ); - // Only the cross-crate scenario matters when checking unstable APIs - let cross_crate = !def_id.is_local(); - if !cross_crate { - return EvalResult::Allow; - } - // Issue #38412: private items lack stability markers. if skip_stability_check_due_to_privacy(self, def_id) { return EvalResult::Allow; @@ -492,6 +496,62 @@ impl<'tcx> TyCtxt<'tcx> { } } + /// Evaluates the default-impl stability of an item. + /// + /// Returns `EvalResult::Allow` if the item's default implementation is stable, or unstable but the corresponding + /// `#![feature]` has been provided. Returns `EvalResult::Deny` which describes the offending + /// unstable feature otherwise. + pub fn eval_default_body_stability(self, def_id: DefId, span: Span) -> EvalResult { + let is_staged_api = self.lookup_stability(def_id.krate.as_def_id()).is_some(); + if !is_staged_api { + return EvalResult::Allow; + } + + // Only the cross-crate scenario matters when checking unstable APIs + let cross_crate = !def_id.is_local(); + if !cross_crate { + return EvalResult::Allow; + } + + let stability = self.lookup_default_body_stability(def_id); + debug!( + "body stability: inspecting def_id={def_id:?} span={span:?} of stability={stability:?}" + ); + + // Issue #38412: private items lack stability markers. + if skip_stability_check_due_to_privacy(self, def_id) { + return EvalResult::Allow; + } + + match stability { + Some(DefaultBodyStability { + level: attr::Unstable { reason, issue, is_soft, .. }, + feature, + }) => { + if span.allows_unstable(feature) { + debug!("body stability: skipping span={:?} since it is internal", span); + return EvalResult::Allow; + } + if self.features().active(feature) { + return EvalResult::Allow; + } + + EvalResult::Deny { + feature, + reason: reason.to_opt_reason(), + issue, + suggestion: None, + is_soft, + } + } + Some(_) => { + // Stable APIs are always ok to call + EvalResult::Allow + } + None => EvalResult::Unmarked, + } + } + /// Checks if an item is stable or error out. /// /// If the item defined by `def_id` is unstable and the corresponding `#![feature]` does not @@ -560,9 +620,7 @@ impl<'tcx> TyCtxt<'tcx> { unmarked: impl FnOnce(Span, DefId), ) -> bool { let soft_handler = |lint, span, msg: &_| { - self.struct_span_lint_hir(lint, id.unwrap_or(hir::CRATE_HIR_ID), span, |lint| { - lint.build(msg).emit(); - }) + self.struct_span_lint_hir(lint, id.unwrap_or(hir::CRATE_HIR_ID), span, msg, |lint| lint) }; let eval_result = self.eval_stability_allow_unstable(def_id, id, span, method_span, allow_unstable); diff --git a/compiler/rustc_middle/src/mir/basic_blocks.rs b/compiler/rustc_middle/src/mir/basic_blocks.rs index 78080fcd5..752cbdeae 100644 --- a/compiler/rustc_middle/src/mir/basic_blocks.rs +++ b/compiler/rustc_middle/src/mir/basic_blocks.rs @@ -86,7 +86,7 @@ impl<'tcx> BasicBlocks<'tcx> { /// /// You will only ever need this if you have also called [`BasicBlocks::as_mut_preserves_cfg`]. /// All other methods that allow you to mutate the basic blocks also call this method - /// themselves, thereby avoiding any risk of accidentaly cache invalidation. + /// themselves, thereby avoiding any risk of accidentally cache invalidation. pub fn invalidate_cfg_cache(&mut self) { self.predecessor_cache.invalidate(); self.switch_source_cache.invalidate(); diff --git a/compiler/rustc_middle/src/mir/generic_graph.rs b/compiler/rustc_middle/src/mir/generic_graph.rs index f3621cd99..d1f3561c0 100644 --- a/compiler/rustc_middle/src/mir/generic_graph.rs +++ b/compiler/rustc_middle/src/mir/generic_graph.rs @@ -12,14 +12,14 @@ pub fn mir_fn_to_generic_graph<'tcx>(tcx: TyCtxt<'tcx>, body: &Body<'_>) -> Grap // Nodes let nodes: Vec<Node> = body - .basic_blocks() + .basic_blocks .iter_enumerated() .map(|(block, _)| bb_to_graph_node(block, body, dark_mode)) .collect(); // Edges let mut edges = Vec::new(); - for (source, _) in body.basic_blocks().iter_enumerated() { + for (source, _) in body.basic_blocks.iter_enumerated() { let def_id = body.source.def_id(); let terminator = body[source].terminator(); let labels = terminator.kind.fmt_successor_labels(); diff --git a/compiler/rustc_middle/src/mir/interpret/allocation.rs b/compiler/rustc_middle/src/mir/interpret/allocation.rs index db7e0fb8a..37ec04b07 100644 --- a/compiler/rustc_middle/src/mir/interpret/allocation.rs +++ b/compiler/rustc_middle/src/mir/interpret/allocation.rs @@ -16,8 +16,8 @@ use rustc_target::abi::{Align, HasDataLayout, Size}; use super::{ read_target_uint, write_target_uint, AllocId, InterpError, InterpResult, Pointer, Provenance, - ResourceExhaustionInfo, Scalar, ScalarMaybeUninit, ScalarSizeMismatch, UndefinedBehaviorInfo, - UninitBytesAccess, UnsupportedOpInfo, + ResourceExhaustionInfo, Scalar, ScalarSizeMismatch, UndefinedBehaviorInfo, UninitBytesAccess, + UnsupportedOpInfo, }; use crate::ty; @@ -34,11 +34,11 @@ pub struct Allocation<Prov = AllocId, Extra = ()> { /// The actual bytes of the allocation. /// Note that the bytes of a pointer represent the offset of the pointer. bytes: Box<[u8]>, - /// Maps from byte addresses to extra data for each pointer. + /// Maps from byte addresses to extra provenance data for each pointer. /// Only the first byte of a pointer is inserted into the map; i.e., /// every entry in this map applies to `pointer_size` consecutive bytes starting /// at the given offset. - relocations: Relocations<Prov>, + provenance: ProvenanceMap<Prov>, /// Denotes which part of this allocation is initialized. init_mask: InitMask, /// The alignment of the allocation to detect unaligned reads. @@ -84,7 +84,7 @@ impl hash::Hash for Allocation { } // Hash the other fields as usual. - self.relocations.hash(state); + self.provenance.hash(state); self.init_mask.hash(state); self.align.hash(state); self.mutability.hash(state); @@ -130,6 +130,8 @@ pub enum AllocError { ReadPointerAsBytes, /// Partially overwriting a pointer. PartialPointerOverwrite(Size), + /// Partially copying a pointer. + PartialPointerCopy(Size), /// Using uninitialized data where it is not allowed. InvalidUninitBytes(Option<UninitBytesAccess>), } @@ -152,6 +154,9 @@ impl AllocError { PartialPointerOverwrite(offset) => InterpError::Unsupported( UnsupportedOpInfo::PartialPointerOverwrite(Pointer::new(alloc_id, offset)), ), + PartialPointerCopy(offset) => InterpError::Unsupported( + UnsupportedOpInfo::PartialPointerCopy(Pointer::new(alloc_id, offset)), + ), InvalidUninitBytes(info) => InterpError::UndefinedBehavior( UndefinedBehaviorInfo::InvalidUninitBytes(info.map(|b| (alloc_id, b))), ), @@ -211,7 +216,7 @@ impl<Prov> Allocation<Prov> { let size = Size::from_bytes(bytes.len()); Self { bytes, - relocations: Relocations::new(), + provenance: ProvenanceMap::new(), init_mask: InitMask::new(size, true), align, mutability, @@ -246,7 +251,7 @@ impl<Prov> Allocation<Prov> { let bytes = unsafe { bytes.assume_init() }; Ok(Allocation { bytes, - relocations: Relocations::new(), + provenance: ProvenanceMap::new(), init_mask: InitMask::new(size, false), align, mutability: Mutability::Mut, @@ -266,22 +271,22 @@ impl Allocation { ) -> Result<Allocation<Prov, Extra>, Err> { // Compute new pointer provenance, which also adjusts the bytes. let mut bytes = self.bytes; - let mut new_relocations = Vec::with_capacity(self.relocations.0.len()); + let mut new_provenance = Vec::with_capacity(self.provenance.0.len()); let ptr_size = cx.data_layout().pointer_size.bytes_usize(); let endian = cx.data_layout().endian; - for &(offset, alloc_id) in self.relocations.iter() { + for &(offset, alloc_id) in self.provenance.iter() { let idx = offset.bytes_usize(); let ptr_bytes = &mut bytes[idx..idx + ptr_size]; let bits = read_target_uint(endian, ptr_bytes).unwrap(); let (ptr_prov, ptr_offset) = adjust_ptr(Pointer::new(alloc_id, Size::from_bytes(bits)))?.into_parts(); write_target_uint(endian, ptr_bytes, ptr_offset.bytes().into()).unwrap(); - new_relocations.push((offset, ptr_prov)); + new_provenance.push((offset, ptr_prov)); } // Create allocation. Ok(Allocation { bytes, - relocations: Relocations::from_presorted(new_relocations), + provenance: ProvenanceMap::from_presorted(new_provenance), init_mask: self.init_mask, align: self.align, mutability: self.mutability, @@ -300,8 +305,8 @@ impl<Prov, Extra> Allocation<Prov, Extra> { Size::from_bytes(self.len()) } - /// Looks at a slice which may describe uninitialized bytes or describe a relocation. This differs - /// from `get_bytes_with_uninit_and_ptr` in that it does no relocation checks (even on the + /// Looks at a slice which may contain uninitialized bytes or provenance. This differs + /// from `get_bytes_with_uninit_and_ptr` in that it does no provenance checks (even on the /// edges) at all. /// This must not be used for reads affecting the interpreter execution. pub fn inspect_with_uninit_and_ptr_outside_interpreter(&self, range: Range<usize>) -> &[u8] { @@ -313,74 +318,47 @@ impl<Prov, Extra> Allocation<Prov, Extra> { &self.init_mask } - /// Returns the relocation list. - pub fn relocations(&self) -> &Relocations<Prov> { - &self.relocations + /// Returns the provenance map. + pub fn provenance(&self) -> &ProvenanceMap<Prov> { + &self.provenance } } /// Byte accessors. impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { /// This is the entirely abstraction-violating way to just grab the raw bytes without - /// caring about relocations. It just deduplicates some code between `read_scalar` - /// and `get_bytes_internal`. - fn get_bytes_even_more_internal(&self, range: AllocRange) -> &[u8] { - &self.bytes[range.start.bytes_usize()..range.end().bytes_usize()] - } - - /// The last argument controls whether we error out when there are uninitialized or pointer - /// bytes. However, we *always* error when there are relocations overlapping the edges of the - /// range. - /// - /// You should never call this, call `get_bytes` or `get_bytes_with_uninit_and_ptr` instead, + /// caring about provenance or initialization. /// /// This function also guarantees that the resulting pointer will remain stable /// even when new allocations are pushed to the `HashMap`. `mem_copy_repeatedly` relies /// on that. - /// - /// It is the caller's responsibility to check bounds and alignment beforehand. - fn get_bytes_internal( - &self, - cx: &impl HasDataLayout, - range: AllocRange, - check_init_and_ptr: bool, - ) -> AllocResult<&[u8]> { - if check_init_and_ptr { - self.check_init(range)?; - self.check_relocations(cx, range)?; - } else { - // We still don't want relocations on the *edges*. - self.check_relocation_edges(cx, range)?; - } - - Ok(self.get_bytes_even_more_internal(range)) + #[inline] + pub fn get_bytes_unchecked(&self, range: AllocRange) -> &[u8] { + &self.bytes[range.start.bytes_usize()..range.end().bytes_usize()] } - /// Checks that these bytes are initialized and not pointer bytes, and then return them - /// as a slice. + /// Checks that these bytes are initialized, and then strip provenance (if possible) and return + /// them. /// /// It is the caller's responsibility to check bounds and alignment beforehand. /// Most likely, you want to use the `PlaceTy` and `OperandTy`-based methods /// on `InterpCx` instead. #[inline] - pub fn get_bytes(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult<&[u8]> { - self.get_bytes_internal(cx, range, true) - } - - /// It is the caller's responsibility to handle uninitialized and pointer bytes. - /// However, this still checks that there are no relocations on the *edges*. - /// - /// It is the caller's responsibility to check bounds and alignment beforehand. - #[inline] - pub fn get_bytes_with_uninit_and_ptr( + pub fn get_bytes_strip_provenance( &self, cx: &impl HasDataLayout, range: AllocRange, ) -> AllocResult<&[u8]> { - self.get_bytes_internal(cx, range, false) + self.check_init(range)?; + if !Prov::OFFSET_IS_ADDR { + if self.range_has_provenance(cx, range) { + return Err(AllocError::ReadPointerAsBytes); + } + } + Ok(self.get_bytes_unchecked(range)) } - /// Just calling this already marks everything as defined and removes relocations, + /// Just calling this already marks everything as defined and removes provenance, /// so be sure to actually put data there! /// /// It is the caller's responsibility to check bounds and alignment beforehand. @@ -392,7 +370,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { range: AllocRange, ) -> AllocResult<&mut [u8]> { self.mark_init(range, true); - self.clear_relocations(cx, range)?; + self.clear_provenance(cx, range)?; Ok(&mut self.bytes[range.start.bytes_usize()..range.end().bytes_usize()]) } @@ -404,7 +382,7 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { range: AllocRange, ) -> AllocResult<*mut [u8]> { self.mark_init(range, true); - self.clear_relocations(cx, range)?; + self.clear_provenance(cx, range)?; assert!(range.end().bytes_usize() <= self.bytes.len()); // need to do our own bounds-check let begin_ptr = self.bytes.as_mut_ptr().wrapping_add(range.start.bytes_usize()); @@ -415,28 +393,6 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { /// Reading and writing. impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { - /// Validates that `ptr.offset` and `ptr.offset + size` do not point to the middle of a - /// relocation. If `allow_uninit`/`allow_ptr` is `false`, also enforces that the memory in the - /// given range contains no uninitialized bytes/relocations. - pub fn check_bytes( - &self, - cx: &impl HasDataLayout, - range: AllocRange, - allow_uninit: bool, - allow_ptr: bool, - ) -> AllocResult { - // Check bounds and relocations on the edges. - self.get_bytes_with_uninit_and_ptr(cx, range)?; - // Check uninit and ptr. - if !allow_uninit { - self.check_init(range)?; - } - if !allow_ptr { - self.check_relocations(cx, range)?; - } - Ok(()) - } - /// Reads a *non-ZST* scalar. /// /// If `read_provenance` is `true`, this will also read provenance; otherwise (if the machine @@ -452,47 +408,55 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { cx: &impl HasDataLayout, range: AllocRange, read_provenance: bool, - ) -> AllocResult<ScalarMaybeUninit<Prov>> { - if read_provenance { - assert_eq!(range.size, cx.data_layout().pointer_size); - } - + ) -> AllocResult<Scalar<Prov>> { // First and foremost, if anything is uninit, bail. if self.is_init(range).is_err() { - // This inflates uninitialized bytes to the entire scalar, even if only a few - // bytes are uninitialized. - return Ok(ScalarMaybeUninit::Uninit); + return Err(AllocError::InvalidUninitBytes(None)); } - // If we are doing a pointer read, and there is a relocation exactly where we - // are reading, then we can put data and relocation back together and return that. - if read_provenance && let Some(&prov) = self.relocations.get(&range.start) { - // We already checked init and relocations, so we can use this function. - let bytes = self.get_bytes_even_more_internal(range); - let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap(); - let ptr = Pointer::new(prov, Size::from_bytes(bits)); - return Ok(ScalarMaybeUninit::from_pointer(ptr, cx)); - } + // Get the integer part of the result. We HAVE TO check provenance before returning this! + let bytes = self.get_bytes_unchecked(range); + let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap(); - // If we are *not* reading a pointer, and we can just ignore relocations, - // then do exactly that. - if !read_provenance && Prov::OFFSET_IS_ADDR { - // We just strip provenance. - let bytes = self.get_bytes_even_more_internal(range); - let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap(); - return Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size))); + if read_provenance { + assert_eq!(range.size, cx.data_layout().pointer_size); + + // When reading data with provenance, the easy case is finding provenance exactly where we + // are reading, then we can put data and provenance back together and return that. + if let Some(&prov) = self.provenance.get(&range.start) { + // Now we can return the bits, with their appropriate provenance. + let ptr = Pointer::new(prov, Size::from_bytes(bits)); + return Ok(Scalar::from_pointer(ptr, cx)); + } + + // If we can work on pointers byte-wise, join the byte-wise provenances. + if Prov::OFFSET_IS_ADDR { + let mut prov = self.offset_get_provenance(cx, range.start); + for offset in 1..range.size.bytes() { + let this_prov = + self.offset_get_provenance(cx, range.start + Size::from_bytes(offset)); + prov = Prov::join(prov, this_prov); + } + // Now use this provenance. + let ptr = Pointer::new(prov, Size::from_bytes(bits)); + return Ok(Scalar::from_maybe_pointer(ptr, cx)); + } + } else { + // We are *not* reading a pointer. + // If we can just ignore provenance, do exactly that. + if Prov::OFFSET_IS_ADDR { + // We just strip provenance. + return Ok(Scalar::from_uint(bits, range.size)); + } } - // It's complicated. Better make sure there is no provenance anywhere. - // FIXME: If !OFFSET_IS_ADDR, this is the best we can do. But if OFFSET_IS_ADDR, then - // `read_pointer` is true and we ideally would distinguish the following two cases: - // - The entire `range` is covered by 2 relocations for the same provenance. - // Then we should return a pointer with that provenance. - // - The range has inhomogeneous provenance. Then we should return just the - // underlying bits. - let bytes = self.get_bytes(cx, range)?; - let bits = read_target_uint(cx.data_layout().endian, bytes).unwrap(); - Ok(ScalarMaybeUninit::Scalar(Scalar::from_uint(bits, range.size))) + // Fallback path for when we cannot treat provenance bytewise or ignore it. + assert!(!Prov::OFFSET_IS_ADDR); + if self.range_has_provenance(cx, range) { + return Err(AllocError::ReadPointerAsBytes); + } + // There is no provenance, we can just return the bits. + Ok(Scalar::from_uint(bits, range.size)) } /// Writes a *non-ZST* scalar. @@ -507,17 +471,10 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { &mut self, cx: &impl HasDataLayout, range: AllocRange, - val: ScalarMaybeUninit<Prov>, + val: Scalar<Prov>, ) -> AllocResult { assert!(self.mutability == Mutability::Mut); - let val = match val { - ScalarMaybeUninit::Scalar(scalar) => scalar, - ScalarMaybeUninit::Uninit => { - return self.write_uninit(cx, range); - } - }; - // `to_bits_or_ptr_internal` is the right method because we just want to store this data // as-is into memory. let (bytes, provenance) = match val.to_bits_or_ptr_internal(range.size)? { @@ -532,9 +489,9 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { let dst = self.get_bytes_mut(cx, range)?; write_target_uint(endian, dst, bytes).unwrap(); - // See if we have to also write a relocation. + // See if we have to also store some provenance. if let Some(provenance) = provenance { - self.relocations.0.insert(range.start, provenance); + self.provenance.0.insert(range.start, provenance); } Ok(()) @@ -543,64 +500,65 @@ impl<Prov: Provenance, Extra> Allocation<Prov, Extra> { /// Write "uninit" to the given memory range. pub fn write_uninit(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult { self.mark_init(range, false); - self.clear_relocations(cx, range)?; + self.clear_provenance(cx, range)?; return Ok(()); } } -/// Relocations. +/// Provenance. impl<Prov: Copy, Extra> Allocation<Prov, Extra> { - /// Returns all relocations overlapping with the given pointer-offset pair. - fn get_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] { + /// Returns all provenance overlapping with the given pointer-offset pair. + fn range_get_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> &[(Size, Prov)] { // We have to go back `pointer_size - 1` bytes, as that one would still overlap with // the beginning of this range. let start = range.start.bytes().saturating_sub(cx.data_layout().pointer_size.bytes() - 1); - self.relocations.range(Size::from_bytes(start)..range.end()) + self.provenance.range(Size::from_bytes(start)..range.end()) } - /// Returns whether this allocation has relocations overlapping with the given range. - /// - /// Note: this function exists to allow `get_relocations` to be private, in order to somewhat - /// limit access to relocations outside of the `Allocation` abstraction. - /// - pub fn has_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool { - !self.get_relocations(cx, range).is_empty() + /// Get the provenance of a single byte. + fn offset_get_provenance(&self, cx: &impl HasDataLayout, offset: Size) -> Option<Prov> { + let prov = self.range_get_provenance(cx, alloc_range(offset, Size::from_bytes(1))); + assert!(prov.len() <= 1); + prov.first().map(|(_offset, prov)| *prov) } - /// Checks that there are no relocations overlapping with the given range. - #[inline(always)] - fn check_relocations(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult { - if self.has_relocations(cx, range) { Err(AllocError::ReadPointerAsBytes) } else { Ok(()) } + /// Returns whether this allocation has progrnance overlapping with the given range. + /// + /// Note: this function exists to allow `range_get_provenance` to be private, in order to somewhat + /// limit access to provenance outside of the `Allocation` abstraction. + /// + pub fn range_has_provenance(&self, cx: &impl HasDataLayout, range: AllocRange) -> bool { + !self.range_get_provenance(cx, range).is_empty() } - /// Removes all relocations inside the given range. - /// If there are relocations overlapping with the edges, they + /// Removes all provenance inside the given range. + /// If there is provenance overlapping with the edges, it /// are removed as well *and* the bytes they cover are marked as /// uninitialized. This is a somewhat odd "spooky action at a distance", /// but it allows strictly more code to run than if we would just error /// immediately in that case. - fn clear_relocations(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult + fn clear_provenance(&mut self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult where Prov: Provenance, { - // Find the start and end of the given range and its outermost relocations. + // Find the start and end of the given range and its outermost provenance. let (first, last) = { - // Find all relocations overlapping the given range. - let relocations = self.get_relocations(cx, range); - if relocations.is_empty() { + // Find all provenance overlapping the given range. + let provenance = self.range_get_provenance(cx, range); + if provenance.is_empty() { return Ok(()); } ( - relocations.first().unwrap().0, - relocations.last().unwrap().0 + cx.data_layout().pointer_size, + provenance.first().unwrap().0, + provenance.last().unwrap().0 + cx.data_layout().pointer_size, ) }; let start = range.start; let end = range.end(); - // We need to handle clearing the relocations from parts of a pointer. - // FIXME: Miri should preserve partial relocations; see + // We need to handle clearing the provenance from parts of a pointer. + // FIXME: Miri should preserve partial provenance; see // https://github.com/rust-lang/miri/issues/2181. if first < start { if Prov::ERR_ON_PARTIAL_PTR_OVERWRITE { @@ -623,41 +581,32 @@ impl<Prov: Copy, Extra> Allocation<Prov, Extra> { self.init_mask.set_range(end, last, false); } - // Forget all the relocations. - // Since relocations do not overlap, we know that removing until `last` (exclusive) is fine, - // i.e., this will not remove any other relocations just after the ones we care about. - self.relocations.0.remove_range(first..last); + // Forget all the provenance. + // Since provenance do not overlap, we know that removing until `last` (exclusive) is fine, + // i.e., this will not remove any other provenance just after the ones we care about. + self.provenance.0.remove_range(first..last); Ok(()) } - - /// Errors if there are relocations overlapping with the edges of the - /// given memory range. - #[inline] - fn check_relocation_edges(&self, cx: &impl HasDataLayout, range: AllocRange) -> AllocResult { - self.check_relocations(cx, alloc_range(range.start, Size::ZERO))?; - self.check_relocations(cx, alloc_range(range.end(), Size::ZERO))?; - Ok(()) - } } -/// "Relocations" stores the provenance information of pointers stored in memory. +/// Stores the provenance information of pointers stored in memory. #[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)] -pub struct Relocations<Prov = AllocId>(SortedMap<Size, Prov>); +pub struct ProvenanceMap<Prov = AllocId>(SortedMap<Size, Prov>); -impl<Prov> Relocations<Prov> { +impl<Prov> ProvenanceMap<Prov> { pub fn new() -> Self { - Relocations(SortedMap::new()) + ProvenanceMap(SortedMap::new()) } - // The caller must guarantee that the given relocations are already sorted + // The caller must guarantee that the given provenance list is already sorted // by address and contain no duplicates. pub fn from_presorted(r: Vec<(Size, Prov)>) -> Self { - Relocations(SortedMap::from_presorted_elements(r)) + ProvenanceMap(SortedMap::from_presorted_elements(r)) } } -impl<Prov> Deref for Relocations<Prov> { +impl<Prov> Deref for ProvenanceMap<Prov> { type Target = SortedMap<Size, Prov>; fn deref(&self) -> &Self::Target { @@ -665,36 +614,36 @@ impl<Prov> Deref for Relocations<Prov> { } } -/// A partial, owned list of relocations to transfer into another allocation. +/// A partial, owned list of provenance to transfer into another allocation. /// /// Offsets are already adjusted to the destination allocation. -pub struct AllocationRelocations<Prov> { - dest_relocations: Vec<(Size, Prov)>, +pub struct AllocationProvenance<Prov> { + dest_provenance: Vec<(Size, Prov)>, } impl<Prov: Copy, Extra> Allocation<Prov, Extra> { - pub fn prepare_relocation_copy( + pub fn prepare_provenance_copy( &self, cx: &impl HasDataLayout, src: AllocRange, dest: Size, count: u64, - ) -> AllocationRelocations<Prov> { - let relocations = self.get_relocations(cx, src); - if relocations.is_empty() { - return AllocationRelocations { dest_relocations: Vec::new() }; + ) -> AllocationProvenance<Prov> { + let provenance = self.range_get_provenance(cx, src); + if provenance.is_empty() { + return AllocationProvenance { dest_provenance: Vec::new() }; } let size = src.size; - let mut new_relocations = Vec::with_capacity(relocations.len() * (count as usize)); + let mut new_provenance = Vec::with_capacity(provenance.len() * (count as usize)); // If `count` is large, this is rather wasteful -- we are allocating a big array here, which // is mostly filled with redundant information since it's just N copies of the same `Prov`s - // at slightly adjusted offsets. The reason we do this is so that in `mark_relocation_range` + // at slightly adjusted offsets. The reason we do this is so that in `mark_provenance_range` // we can use `insert_presorted`. That wouldn't work with an `Iterator` that just produces - // the right sequence of relocations for all N copies. + // the right sequence of provenance for all N copies. for i in 0..count { - new_relocations.extend(relocations.iter().map(|&(offset, reloc)| { + new_provenance.extend(provenance.iter().map(|&(offset, reloc)| { // compute offset for current repetition let dest_offset = dest + size * i; // `Size` operations ( @@ -705,17 +654,17 @@ impl<Prov: Copy, Extra> Allocation<Prov, Extra> { })); } - AllocationRelocations { dest_relocations: new_relocations } + AllocationProvenance { dest_provenance: new_provenance } } - /// Applies a relocation copy. - /// The affected range, as defined in the parameters to `prepare_relocation_copy` is expected - /// to be clear of relocations. + /// Applies a provenance copy. + /// The affected range, as defined in the parameters to `prepare_provenance_copy` is expected + /// to be clear of provenance. /// /// This is dangerous to use as it can violate internal `Allocation` invariants! /// It only exists to support an efficient implementation of `mem_copy_repeatedly`. - pub fn mark_relocation_range(&mut self, relocations: AllocationRelocations<Prov>) { - self.relocations.0.insert_presorted(relocations.dest_relocations); + pub fn mark_provenance_range(&mut self, provenance: AllocationProvenance<Prov>) { + self.provenance.0.insert_presorted(provenance.dest_provenance); } } diff --git a/compiler/rustc_middle/src/mir/interpret/error.rs b/compiler/rustc_middle/src/mir/interpret/error.rs index cecb55578..b5a50cc15 100644 --- a/compiler/rustc_middle/src/mir/interpret/error.rs +++ b/compiler/rustc_middle/src/mir/interpret/error.rs @@ -401,14 +401,18 @@ impl fmt::Display for UndefinedBehaviorInfo { pub enum UnsupportedOpInfo { /// Free-form case. Only for errors that are never caught! Unsupported(String), - /// Encountered a pointer where we needed raw bytes. - ReadPointerAsBytes, /// Overwriting parts of a pointer; the resulting state cannot be represented in our /// `Allocation` data structure. See <https://github.com/rust-lang/miri/issues/2181>. PartialPointerOverwrite(Pointer<AllocId>), + /// Attempting to `copy` parts of a pointer to somewhere else; the resulting state cannot be + /// represented in our `Allocation` data structure. See + /// <https://github.com/rust-lang/miri/issues/2181>. + PartialPointerCopy(Pointer<AllocId>), // // The variants below are only reachable from CTFE/const prop, miri will never emit them. // + /// Encountered a pointer where we needed raw bytes. + ReadPointerAsBytes, /// Accessing thread local statics ThreadLocalStatic(DefId), /// Accessing an unsupported extern static. @@ -420,10 +424,13 @@ impl fmt::Display for UnsupportedOpInfo { use UnsupportedOpInfo::*; match self { Unsupported(ref msg) => write!(f, "{msg}"), - ReadPointerAsBytes => write!(f, "unable to turn pointer into raw bytes"), PartialPointerOverwrite(ptr) => { write!(f, "unable to overwrite parts of a pointer in memory at {ptr:?}") } + PartialPointerCopy(ptr) => { + write!(f, "unable to copy parts of a pointer from memory at {ptr:?}") + } + ReadPointerAsBytes => write!(f, "unable to turn pointer into raw bytes"), ThreadLocalStatic(did) => write!(f, "cannot access thread local static ({did:?})"), ReadExternStatic(did) => write!(f, "cannot read from extern static ({did:?})"), } @@ -472,12 +479,7 @@ impl<T: Any> AsAny for T { } /// A trait for machine-specific errors (or other "machine stop" conditions). -pub trait MachineStopType: AsAny + fmt::Display + Send { - /// If `true`, emit a hard error instead of going through the `CONST_ERR` lint - fn is_hard_err(&self) -> bool { - false - } -} +pub trait MachineStopType: AsAny + fmt::Display + Send {} impl dyn MachineStopType { #[inline(always)] @@ -536,16 +538,4 @@ impl InterpError<'_> { | InterpError::UndefinedBehavior(UndefinedBehaviorInfo::Ub(_)) ) } - - /// Should this error be reported as a hard error, preventing compilation, or a soft error, - /// causing a deny-by-default lint? - pub fn is_hard_err(&self) -> bool { - use InterpError::*; - match *self { - MachineStop(ref err) => err.is_hard_err(), - UndefinedBehavior(_) => true, - ResourceExhaustion(ResourceExhaustionInfo::MemoryExhausted) => true, - _ => false, - } - } } diff --git a/compiler/rustc_middle/src/mir/interpret/mod.rs b/compiler/rustc_middle/src/mir/interpret/mod.rs index 967f8ece1..5e3dfcbcc 100644 --- a/compiler/rustc_middle/src/mir/interpret/mod.rs +++ b/compiler/rustc_middle/src/mir/interpret/mod.rs @@ -124,11 +124,11 @@ pub use self::error::{ UninitBytesAccess, UnsupportedOpInfo, }; -pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar, ScalarMaybeUninit}; +pub use self::value::{get_slice_bytes, ConstAlloc, ConstValue, Scalar}; pub use self::allocation::{ alloc_range, AllocRange, Allocation, ConstAllocation, InitChunk, InitChunkIter, InitMask, - Relocations, + ProvenanceMap, }; pub use self::pointer::{Pointer, PointerArithmetic, Provenance}; @@ -137,7 +137,7 @@ pub use self::pointer::{Pointer, PointerArithmetic, Provenance}; /// - A constant /// - A static #[derive(Copy, Clone, Debug, Eq, PartialEq, Hash, TyEncodable, TyDecodable)] -#[derive(HashStable, Lift)] +#[derive(HashStable, Lift, TypeFoldable, TypeVisitable)] pub struct GlobalId<'tcx> { /// For a constant or static, the `Instance` of the item itself. /// For a promoted global, the `Instance` of the function they belong to. diff --git a/compiler/rustc_middle/src/mir/interpret/pointer.rs b/compiler/rustc_middle/src/mir/interpret/pointer.rs index 384954cbb..23c2ce647 100644 --- a/compiler/rustc_middle/src/mir/interpret/pointer.rs +++ b/compiler/rustc_middle/src/mir/interpret/pointer.rs @@ -43,7 +43,7 @@ pub trait PointerArithmetic: HasDataLayout { let val = val as i64; // Now wrap-around into the machine_isize range. if val > self.machine_isize_max() { - // This can only happen the the ptr size is < 64, so we know max_usize_plus_1 fits into + // This can only happen if the ptr size is < 64, so we know max_usize_plus_1 fits into // i64. debug_assert!(self.pointer_size().bits() < 64); let max_usize_plus_1 = 1u128 << self.pointer_size().bits(); @@ -107,8 +107,12 @@ impl<T: HasDataLayout> PointerArithmetic for T {} /// pointer), but `derive` adds some unnecessary bounds. pub trait Provenance: Copy + fmt::Debug { /// Says whether the `offset` field of `Pointer`s with this provenance is the actual physical address. - /// If `true, ptr-to-int casts work by simply discarding the provenance. - /// If `false`, ptr-to-int casts are not supported. The offset *must* be relative in that case. + /// - If `false`, the offset *must* be relative. This means the bytes representing a pointer are + /// different from what the Abstract Machine prescribes, so the interpreter must prevent any + /// operation that would inspect the underlying bytes of a pointer, such as ptr-to-int + /// transmutation. A `ReadPointerAsBytes` error will be raised in such situations. + /// - If `true`, the interpreter will permit operations to inspect the underlying bytes of a + /// pointer, and implement ptr-to-int transmutation by stripping provenance. const OFFSET_IS_ADDR: bool; /// We also use this trait to control whether to abort execution when a pointer is being partially overwritten @@ -125,6 +129,9 @@ pub trait Provenance: Copy + fmt::Debug { /// Otherwise this function is best-effort (but must agree with `Machine::ptr_get_alloc`). /// (Identifying the offset in that allocation, however, is harder -- use `Memory::ptr_get_alloc` for that.) fn get_alloc_id(self) -> Option<AllocId>; + + /// Defines the 'join' of provenance: what happens when doing a pointer load and different bytes have different provenance. + fn join(left: Option<Self>, right: Option<Self>) -> Option<Self>; } impl Provenance for AllocId { @@ -152,6 +159,10 @@ impl Provenance for AllocId { fn get_alloc_id(self) -> Option<AllocId> { Some(self) } + + fn join(_left: Option<Self>, _right: Option<Self>) -> Option<Self> { + panic!("merging provenance is not supported when `OFFSET_IS_ADDR` is false") + } } /// Represents a pointer in the Miri engine. diff --git a/compiler/rustc_middle/src/mir/interpret/queries.rs b/compiler/rustc_middle/src/mir/interpret/queries.rs index 786927e2d..473894ac1 100644 --- a/compiler/rustc_middle/src/mir/interpret/queries.rs +++ b/compiler/rustc_middle/src/mir/interpret/queries.rs @@ -4,7 +4,9 @@ use crate::mir; use crate::ty::subst::InternalSubsts; use crate::ty::visit::TypeVisitable; use crate::ty::{self, query::TyCtxtAt, query::TyCtxtEnsure, TyCtxt}; +use rustc_hir::def::DefKind; use rustc_hir::def_id::DefId; +use rustc_session::lint; use rustc_span::{Span, DUMMY_SP}; impl<'tcx> TyCtxt<'tcx> { @@ -36,7 +38,7 @@ impl<'tcx> TyCtxt<'tcx> { pub fn const_eval_resolve( self, param_env: ty::ParamEnv<'tcx>, - ct: ty::Unevaluated<'tcx>, + ct: mir::UnevaluatedConst<'tcx>, span: Option<Span>, ) -> EvalToConstValueResult<'tcx> { // Cannot resolve `Unevaluated` constants that contain inference @@ -45,11 +47,15 @@ impl<'tcx> TyCtxt<'tcx> { // // When trying to evaluate constants containing inference variables, // use `Infcx::const_eval_resolve` instead. - if ct.substs.has_infer_types_or_consts() { + if ct.substs.has_non_region_infer() { bug!("did not expect inference variables here"); } - match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) { + match ty::Instance::resolve_opt_const_arg( + self, param_env, + // FIXME: maybe have a separate version for resolving mir::UnevaluatedConst? + ct.def, ct.substs, + ) { Ok(Some(instance)) => { let cid = GlobalId { instance, promoted: ct.promoted }; self.const_eval_global_id(param_env, cid, span) @@ -63,7 +69,7 @@ impl<'tcx> TyCtxt<'tcx> { pub fn const_eval_resolve_for_typeck( self, param_env: ty::ParamEnv<'tcx>, - ct: ty::Unevaluated<'tcx>, + ct: ty::UnevaluatedConst<'tcx>, span: Option<Span>, ) -> EvalToValTreeResult<'tcx> { // Cannot resolve `Unevaluated` constants that contain inference @@ -72,14 +78,36 @@ impl<'tcx> TyCtxt<'tcx> { // // When trying to evaluate constants containing inference variables, // use `Infcx::const_eval_resolve` instead. - if ct.substs.has_infer_types_or_consts() { + if ct.substs.has_non_region_infer() { bug!("did not expect inference variables here"); } match ty::Instance::resolve_opt_const_arg(self, param_env, ct.def, ct.substs) { Ok(Some(instance)) => { - let cid = GlobalId { instance, promoted: ct.promoted }; - self.const_eval_global_id_for_typeck(param_env, cid, span) + let cid = GlobalId { instance, promoted: None }; + self.const_eval_global_id_for_typeck(param_env, cid, span).inspect(|_| { + // We are emitting the lint here instead of in `is_const_evaluatable` + // as we normalize obligations before checking them, and normalization + // uses this function to evaluate this constant. + // + // @lcnr believes that successfully evaluating even though there are + // used generic parameters is a bug of evaluation, so checking for it + // here does feel somewhat sensible. + if !self.features().generic_const_exprs && ct.substs.has_non_region_param() { + assert!(matches!(self.def_kind(ct.def.did), DefKind::AnonConst)); + let mir_body = self.mir_for_ctfe_opt_const_arg(ct.def); + if mir_body.is_polymorphic { + let Some(local_def_id) = ct.def.did.as_local() else { return }; + self.struct_span_lint_hir( + lint::builtin::CONST_EVALUATABLE_UNCHECKED, + self.hir().local_def_id_to_hir_id(local_def_id), + self.def_span(ct.def.did), + "cannot use constants which depend on generic parameters in types", + |err| err, + ) + } + } + }) } Ok(None) => Err(ErrorHandled::TooGeneric), Err(error_reported) => Err(ErrorHandled::Reported(error_reported)), @@ -211,7 +239,7 @@ impl<'tcx> TyCtxt<'tcx> { self, param_env: ty::ParamEnv<'tcx>, constant: mir::ConstantKind<'tcx>, - ) -> mir::DestructuredMirConstant<'tcx> { + ) -> mir::DestructuredConstant<'tcx> { self.try_destructure_mir_constant(param_env.and(constant)).unwrap() } } diff --git a/compiler/rustc_middle/src/mir/interpret/value.rs b/compiler/rustc_middle/src/mir/interpret/value.rs index 834c114ee..ac5fddb7a 100644 --- a/compiler/rustc_middle/src/mir/interpret/value.rs +++ b/compiler/rustc_middle/src/mir/interpret/value.rs @@ -8,7 +8,7 @@ use rustc_apfloat::{ use rustc_macros::HashStable; use rustc_target::abi::{HasDataLayout, Size}; -use crate::ty::{Lift, ParamEnv, ScalarInt, Ty, TyCtxt}; +use crate::ty::{ParamEnv, ScalarInt, Ty, TyCtxt}; use super::{ AllocId, AllocRange, ConstAllocation, InterpResult, Pointer, PointerArithmetic, Provenance, @@ -27,7 +27,7 @@ pub struct ConstAlloc<'tcx> { /// Represents a constant value in Rust. `Scalar` and `Slice` are optimizations for /// array length computations, enum discriminants and the pattern matching logic. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)] -#[derive(HashStable)] +#[derive(HashStable, Lift)] pub enum ConstValue<'tcx> { /// Used only for types with `layout::abi::Scalar` ABI. /// @@ -53,22 +53,6 @@ pub enum ConstValue<'tcx> { #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] static_assert_size!(ConstValue<'_>, 32); -impl<'a, 'tcx> Lift<'tcx> for ConstValue<'a> { - type Lifted = ConstValue<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ConstValue<'tcx>> { - Some(match self { - ConstValue::Scalar(s) => ConstValue::Scalar(s), - ConstValue::ZeroSized => ConstValue::ZeroSized, - ConstValue::Slice { data, start, end } => { - ConstValue::Slice { data: tcx.lift(data)?, start, end } - } - ConstValue::ByRef { alloc, offset } => { - ConstValue::ByRef { alloc: tcx.lift(alloc)?, offset } - } - }) - } -} - impl<'tcx> ConstValue<'tcx> { #[inline] pub fn try_to_scalar(&self) -> Option<Scalar<AllocId>> { @@ -79,7 +63,7 @@ impl<'tcx> ConstValue<'tcx> { } pub fn try_to_scalar_int(&self) -> Option<ScalarInt> { - Some(self.try_to_scalar()?.assert_int()) + self.try_to_scalar()?.try_to_int().ok() } pub fn try_to_bits(&self, size: Size) -> Option<u128> { @@ -130,9 +114,7 @@ pub enum Scalar<Prov = AllocId> { /// The raw bytes of a simple value. Int(ScalarInt), - /// A pointer into an `Allocation`. An `Allocation` in the `memory` module has a list of - /// relocations, but a `Scalar` is only large enough to contain one, so we just represent the - /// relocation and its associated offset together as a `Pointer` here. + /// A pointer. /// /// We also store the size of the pointer, such that a `Scalar` always knows how big it is. /// The size is always the pointer size of the current target, but this is not information @@ -368,6 +350,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> { } #[inline(always)] + #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) pub fn assert_int(self) -> ScalarInt { self.try_to_int().unwrap() } @@ -389,6 +372,7 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> { } #[inline(always)] + #[cfg_attr(debug_assertions, track_caller)] // only in debug builds due to perf (see #98980) pub fn assert_bits(self, target_size: Size) -> u128 { self.to_bits(target_size).unwrap() } @@ -502,145 +486,12 @@ impl<'tcx, Prov: Provenance> Scalar<Prov> { } } -#[derive(Clone, Copy, Eq, PartialEq, TyEncodable, TyDecodable, HashStable, Hash)] -pub enum ScalarMaybeUninit<Prov = AllocId> { - Scalar(Scalar<Prov>), - Uninit, -} - -#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] -static_assert_size!(ScalarMaybeUninit, 24); - -impl<Prov> From<Scalar<Prov>> for ScalarMaybeUninit<Prov> { - #[inline(always)] - fn from(s: Scalar<Prov>) -> Self { - ScalarMaybeUninit::Scalar(s) - } -} - -// We want the `Debug` output to be readable as it is used by `derive(Debug)` for -// all the Miri types. -impl<Prov: Provenance> fmt::Debug for ScalarMaybeUninit<Prov> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ScalarMaybeUninit::Uninit => write!(f, "<uninitialized>"), - ScalarMaybeUninit::Scalar(s) => write!(f, "{:?}", s), - } - } -} - -impl<Prov: Provenance> fmt::LowerHex for ScalarMaybeUninit<Prov> { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ScalarMaybeUninit::Uninit => write!(f, "uninitialized bytes"), - ScalarMaybeUninit::Scalar(s) => write!(f, "{:x}", s), - } - } -} - -impl<Prov> ScalarMaybeUninit<Prov> { - #[inline] - pub fn from_pointer(ptr: Pointer<Prov>, cx: &impl HasDataLayout) -> Self { - ScalarMaybeUninit::Scalar(Scalar::from_pointer(ptr, cx)) - } - - #[inline] - pub fn from_maybe_pointer(ptr: Pointer<Option<Prov>>, cx: &impl HasDataLayout) -> Self { - ScalarMaybeUninit::Scalar(Scalar::from_maybe_pointer(ptr, cx)) - } - - #[inline] - pub fn check_init<'tcx>(self) -> InterpResult<'tcx, Scalar<Prov>> { - match self { - ScalarMaybeUninit::Scalar(scalar) => Ok(scalar), - ScalarMaybeUninit::Uninit => throw_ub!(InvalidUninitBytes(None)), - } - } -} - -impl<'tcx, Prov: Provenance> ScalarMaybeUninit<Prov> { - #[inline(always)] - pub fn to_pointer(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, Pointer<Option<Prov>>> { - self.check_init()?.to_pointer(cx) - } - - #[inline(always)] - pub fn to_bool(self) -> InterpResult<'tcx, bool> { - self.check_init()?.to_bool() - } - - #[inline(always)] - pub fn to_char(self) -> InterpResult<'tcx, char> { - self.check_init()?.to_char() - } - - #[inline(always)] - pub fn to_f32(self) -> InterpResult<'tcx, Single> { - self.check_init()?.to_f32() - } - - #[inline(always)] - pub fn to_f64(self) -> InterpResult<'tcx, Double> { - self.check_init()?.to_f64() - } - - #[inline(always)] - pub fn to_u8(self) -> InterpResult<'tcx, u8> { - self.check_init()?.to_u8() - } - - #[inline(always)] - pub fn to_u16(self) -> InterpResult<'tcx, u16> { - self.check_init()?.to_u16() - } - - #[inline(always)] - pub fn to_u32(self) -> InterpResult<'tcx, u32> { - self.check_init()?.to_u32() - } - - #[inline(always)] - pub fn to_u64(self) -> InterpResult<'tcx, u64> { - self.check_init()?.to_u64() - } - - #[inline(always)] - pub fn to_machine_usize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, u64> { - self.check_init()?.to_machine_usize(cx) - } - - #[inline(always)] - pub fn to_i8(self) -> InterpResult<'tcx, i8> { - self.check_init()?.to_i8() - } - - #[inline(always)] - pub fn to_i16(self) -> InterpResult<'tcx, i16> { - self.check_init()?.to_i16() - } - - #[inline(always)] - pub fn to_i32(self) -> InterpResult<'tcx, i32> { - self.check_init()?.to_i32() - } - - #[inline(always)] - pub fn to_i64(self) -> InterpResult<'tcx, i64> { - self.check_init()?.to_i64() - } - - #[inline(always)] - pub fn to_machine_isize(self, cx: &impl HasDataLayout) -> InterpResult<'tcx, i64> { - self.check_init()?.to_machine_isize(cx) - } -} - /// Gets the bytes of a constant slice value. pub fn get_slice_bytes<'tcx>(cx: &impl HasDataLayout, val: ConstValue<'tcx>) -> &'tcx [u8] { if let ConstValue::Slice { data, start, end } = val { let len = end - start; data.inner() - .get_bytes( + .get_bytes_strip_provenance( cx, AllocRange { start: Size::from_bytes(start), size: Size::from_bytes(len) }, ) diff --git a/compiler/rustc_middle/src/mir/mod.rs b/compiler/rustc_middle/src/mir/mod.rs index 7ab71f900..79db35a76 100644 --- a/compiler/rustc_middle/src/mir/mod.rs +++ b/compiler/rustc_middle/src/mir/mod.rs @@ -3,22 +3,22 @@ //! [rustc dev guide]: https://rustc-dev-guide.rust-lang.org/mir/index.html use crate::mir::interpret::{ - AllocRange, ConstAllocation, ConstValue, GlobalAlloc, LitToConstInput, Scalar, + AllocRange, ConstAllocation, ConstValue, ErrorHandled, GlobalAlloc, LitToConstInput, Scalar, }; use crate::mir::visit::MirVisitable; use crate::ty::codec::{TyDecoder, TyEncoder}; -use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable}; +use crate::ty::fold::{FallibleTypeFolder, TypeFoldable}; use crate::ty::print::{FmtPrinter, Printer}; -use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef}; -use crate::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor}; +use crate::ty::visit::{TypeVisitable, TypeVisitor}; use crate::ty::{self, List, Ty, TyCtxt}; use crate::ty::{AdtDef, InstanceDef, ScalarInt, UserTypeAnnotationIndex}; +use crate::ty::{GenericArg, InternalSubsts, SubstsRef}; use rustc_data_structures::captures::Captures; use rustc_errors::ErrorGuaranteed; use rustc_hir::def::{CtorKind, Namespace}; use rustc_hir::def_id::{DefId, LocalDefId, CRATE_DEF_ID}; -use rustc_hir::{self, GeneratorKind}; +use rustc_hir::{self, GeneratorKind, ImplicitSelfKind}; use rustc_hir::{self as hir, HirId}; use rustc_session::Session; use rustc_target::abi::{Size, VariantIdx}; @@ -116,11 +116,6 @@ pub trait MirPass<'tcx> { fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>); - /// If this pass causes the MIR to enter a new phase, return that phase. - fn phase_change(&self) -> Option<MirPhase> { - None - } - fn is_mir_dump_enabled(&self) -> bool { true } @@ -128,8 +123,49 @@ pub trait MirPass<'tcx> { impl MirPhase { /// Gets the index of the current MirPhase within the set of all `MirPhase`s. + /// + /// FIXME(JakobDegen): Return a `(usize, usize)` instead. pub fn phase_index(&self) -> usize { - *self as usize + const BUILT_PHASE_COUNT: usize = 1; + const ANALYSIS_PHASE_COUNT: usize = 2; + match self { + MirPhase::Built => 1, + MirPhase::Analysis(analysis_phase) => { + 1 + BUILT_PHASE_COUNT + (*analysis_phase as usize) + } + MirPhase::Runtime(runtime_phase) => { + 1 + BUILT_PHASE_COUNT + ANALYSIS_PHASE_COUNT + (*runtime_phase as usize) + } + } + } +} + +impl Display for MirPhase { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + MirPhase::Built => write!(f, "built"), + MirPhase::Analysis(p) => write!(f, "analysis-{}", p), + MirPhase::Runtime(p) => write!(f, "runtime-{}", p), + } + } +} + +impl Display for AnalysisPhase { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + AnalysisPhase::Initial => write!(f, "initial"), + AnalysisPhase::PostCleanup => write!(f, "post_cleanup"), + } + } +} + +impl Display for RuntimePhase { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + match self { + RuntimePhase::Initial => write!(f, "initial"), + RuntimePhase::PostCleanup => write!(f, "post_cleanup"), + RuntimePhase::Optimized => write!(f, "optimized"), + } } } @@ -195,6 +231,9 @@ pub struct Body<'tcx> { /// us to see the difference and forego optimization on the inlined promoted items. pub phase: MirPhase, + /// How many passses we have executed since starting the current phase. Used for debug output. + pub pass_count: usize, + pub source: MirSource<'tcx>, /// A list of source scopes; these are referenced by statements @@ -280,6 +319,7 @@ impl<'tcx> Body<'tcx> { let mut body = Body { phase: MirPhase::Built, + pass_count: 1, source, basic_blocks: BasicBlocks::new(basic_blocks), source_scopes, @@ -301,7 +341,7 @@ impl<'tcx> Body<'tcx> { is_polymorphic: false, tainted_by_errors, }; - body.is_polymorphic = body.has_param_types_or_consts(); + body.is_polymorphic = body.has_non_region_param(); body } @@ -313,6 +353,7 @@ impl<'tcx> Body<'tcx> { pub fn new_cfg_only(basic_blocks: IndexVec<BasicBlock, BasicBlockData<'tcx>>) -> Self { let mut body = Body { phase: MirPhase::Built, + pass_count: 1, source: MirSource::item(CRATE_DEF_ID.to_def_id()), basic_blocks: BasicBlocks::new(basic_blocks), source_scopes: IndexVec::new(), @@ -327,16 +368,11 @@ impl<'tcx> Body<'tcx> { is_polymorphic: false, tainted_by_errors: None, }; - body.is_polymorphic = body.has_param_types_or_consts(); + body.is_polymorphic = body.has_non_region_param(); body } #[inline] - pub fn basic_blocks(&self) -> &IndexVec<BasicBlock, BasicBlockData<'tcx>> { - &self.basic_blocks - } - - #[inline] pub fn basic_blocks_mut(&mut self) -> &mut IndexVec<BasicBlock, BasicBlockData<'tcx>> { self.basic_blocks.as_mut() } @@ -490,7 +526,7 @@ impl<'tcx> Index<BasicBlock> for Body<'tcx> { #[inline] fn index(&self, index: BasicBlock) -> &BasicBlockData<'tcx> { - &self.basic_blocks()[index] + &self.basic_blocks[index] } } @@ -646,22 +682,6 @@ pub enum BindingForm<'tcx> { RefForGuard, } -/// Represents what type of implicit self a function has, if any. -#[derive(Clone, Copy, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)] -pub enum ImplicitSelfKind { - /// Represents a `fn x(self);`. - Imm, - /// Represents a `fn x(mut self);`. - Mut, - /// Represents a `fn x(&self);`. - ImmRef, - /// Represents a `fn x(&mut self);`. - MutRef, - /// Represents when a function does not have a self argument or - /// when a function has a `self: X` argument. - None, -} - TrivialTypeTraversalAndLiftImpls! { BindingForm<'tcx>, } mod binding_form_impl { @@ -832,10 +852,6 @@ pub struct LocalDecl<'tcx> { pub source_info: SourceInfo, } -// `LocalDecl` is used a lot. Make sure it doesn't unintentionally get bigger. -#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] -static_assert_size!(LocalDecl<'_>, 56); - /// Extra information about a some locals that's used for diagnostics and for /// classifying variables into local variables, statics, etc, which is needed e.g. /// for unsafety checking. @@ -1310,10 +1326,6 @@ pub struct Statement<'tcx> { pub kind: StatementKind<'tcx>, } -// `Statement` is used a lot. Make sure it doesn't unintentionally get bigger. -#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] -static_assert_size!(Statement<'_>, 32); - impl Statement<'_> { /// Changes a statement to a nop. This is both faster than deleting instructions and avoids /// invalidating statement indices in `Location`s. @@ -1363,13 +1375,7 @@ impl Debug for Statement<'_> { write!(fmt, "Coverage::{:?} for {:?}", kind, rgn) } Coverage(box ref coverage) => write!(fmt, "Coverage::{:?}", coverage.kind), - CopyNonOverlapping(box crate::mir::CopyNonOverlapping { - ref src, - ref dst, - ref count, - }) => { - write!(fmt, "copy_nonoverlapping(src={:?}, dst={:?}, count={:?})", src, dst, count) - } + Intrinsic(box ref intrinsic) => write!(fmt, "{intrinsic}"), Nop => write!(fmt, "nop"), } } @@ -1403,6 +1409,7 @@ impl<V, T> ProjectionElem<V, T> { Self::Field(_, _) | Self::Index(_) + | Self::OpaqueCast(_) | Self::ConstantIndex { .. } | Self::Subslice { .. } | Self::Downcast(_, _) => false, @@ -1450,7 +1457,7 @@ pub struct PlaceRef<'tcx> { // Once we stop implementing `Ord` for `DefId`, // this impl will be unnecessary. Until then, we'll // leave this impl in place to prevent re-adding a -// dependnecy on the `Ord` impl for `DefId` +// dependency on the `Ord` impl for `DefId` impl<'tcx> !PartialOrd for PlaceRef<'tcx> {} impl<'tcx> Place<'tcx> { @@ -1471,7 +1478,9 @@ impl<'tcx> Place<'tcx> { /// It's guaranteed to be in the first place pub fn has_deref(&self) -> bool { // To make sure this is not accidently used in wrong mir phase - debug_assert!(!self.projection[1..].contains(&PlaceElem::Deref)); + debug_assert!( + self.projection.is_empty() || !self.projection[1..].contains(&PlaceElem::Deref) + ); self.projection.first() == Some(&PlaceElem::Deref) } @@ -1531,6 +1540,7 @@ impl<'tcx> Place<'tcx> { } impl From<Local> for Place<'_> { + #[inline] fn from(local: Local) -> Self { Place { local, projection: List::empty() } } @@ -1594,7 +1604,9 @@ impl Debug for Place<'_> { fn fmt(&self, fmt: &mut Formatter<'_>) -> fmt::Result { for elem in self.projection.iter().rev() { match elem { - ProjectionElem::Downcast(_, _) | ProjectionElem::Field(_, _) => { + ProjectionElem::OpaqueCast(_) + | ProjectionElem::Downcast(_, _) + | ProjectionElem::Field(_, _) => { write!(fmt, "(").unwrap(); } ProjectionElem::Deref => { @@ -1610,6 +1622,9 @@ impl Debug for Place<'_> { for elem in self.projection.iter() { match elem { + ProjectionElem::OpaqueCast(ty) => { + write!(fmt, " as {})", ty)?; + } ProjectionElem::Downcast(Some(name), _index) => { write!(fmt, " as {})", name)?; } @@ -1847,7 +1862,15 @@ impl<'tcx> Rvalue<'tcx> { | Rvalue::AddressOf(_, _) | Rvalue::Len(_) | Rvalue::Cast( - CastKind::Misc | CastKind::Pointer(_) | CastKind::PointerFromExposedAddress, + CastKind::IntToInt + | CastKind::FloatToInt + | CastKind::FloatToFloat + | CastKind::IntToFloat + | CastKind::FnPtrToPtr + | CastKind::PtrToPtr + | CastKind::Pointer(_) + | CastKind::PointerFromExposedAddress + | CastKind::DynStar, _, _, ) @@ -2047,6 +2070,7 @@ impl<'tcx> Debug for Rvalue<'tcx> { /// particular, one must be wary of `NaN`! #[derive(Clone, Copy, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)] +#[derive(TypeFoldable, TypeVisitable)] pub struct Constant<'tcx> { pub span: Span, @@ -2061,10 +2085,14 @@ pub struct Constant<'tcx> { } #[derive(Clone, Copy, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable, Debug)] -#[derive(Lift)] +#[derive(Lift, TypeFoldable, TypeVisitable)] pub enum ConstantKind<'tcx> { /// This constant came from the type system Ty(ty::Const<'tcx>), + + /// An unevaluated mir constant which is not part of the type system. + Unevaluated(UnevaluatedConst<'tcx>, Ty<'tcx>), + /// This constant cannot go back into the type system, as it represents /// something the type system cannot handle (e.g. pointers). Val(interpret::ConstValue<'tcx>, Ty<'tcx>), @@ -2090,20 +2118,11 @@ impl<'tcx> Constant<'tcx> { } impl<'tcx> ConstantKind<'tcx> { - /// Returns `None` if the constant is not trivially safe for use in the type system. - #[inline] - pub fn const_for_ty(&self) -> Option<ty::Const<'tcx>> { - match self { - ConstantKind::Ty(c) => Some(*c), - ConstantKind::Val(..) => None, - } - } - #[inline(always)] pub fn ty(&self) -> Ty<'tcx> { match self { ConstantKind::Ty(c) => c.ty(), - ConstantKind::Val(_, ty) => *ty, + ConstantKind::Val(_, ty) | ConstantKind::Unevaluated(_, ty) => *ty, } } @@ -2115,6 +2134,7 @@ impl<'tcx> ConstantKind<'tcx> { _ => None, }, ConstantKind::Val(val, _) => Some(val), + ConstantKind::Unevaluated(..) => None, } } @@ -2129,6 +2149,7 @@ impl<'tcx> ConstantKind<'tcx> { _ => None, }, ConstantKind::Val(val, _) => val.try_to_scalar(), + ConstantKind::Unevaluated(..) => None, } } @@ -2161,6 +2182,14 @@ impl<'tcx> ConstantKind<'tcx> { } } Self::Val(_, _) => self, + Self::Unevaluated(uneval, ty) => { + // FIXME: We might want to have a `try_eval`-like function on `Unevaluated` + match tcx.const_eval_resolve(param_env, uneval, None) { + Ok(val) => Self::Val(val, ty), + Err(ErrorHandled::TooGeneric | ErrorHandled::Linted) => self, + Err(_) => Self::Ty(tcx.const_error(ty)), + } + } } } @@ -2186,6 +2215,18 @@ impl<'tcx> ConstantKind<'tcx> { tcx.layout_of(param_env.with_reveal_all_normalized(tcx).and(ty)).ok()?.size; val.try_to_bits(size) } + Self::Unevaluated(uneval, ty) => { + match tcx.const_eval_resolve(param_env, *uneval, None) { + Ok(val) => { + let size = tcx + .layout_of(param_env.with_reveal_all_normalized(tcx).and(*ty)) + .ok()? + .size; + val.try_to_bits(size) + } + Err(_) => None, + } + } } } @@ -2194,6 +2235,12 @@ impl<'tcx> ConstantKind<'tcx> { match self { Self::Ty(ct) => ct.try_eval_bool(tcx, param_env), Self::Val(val, _) => val.try_to_bool(), + Self::Unevaluated(uneval, _) => { + match tcx.const_eval_resolve(param_env, *uneval, None) { + Ok(val) => val.try_to_bool(), + Err(_) => None, + } + } } } @@ -2202,6 +2249,12 @@ impl<'tcx> ConstantKind<'tcx> { match self { Self::Ty(ct) => ct.try_eval_usize(tcx, param_env), Self::Val(val, _) => val.try_to_machine_usize(tcx), + Self::Unevaluated(uneval, _) => { + match tcx.const_eval_resolve(param_env, *uneval, None) { + Ok(val) => val.try_to_machine_usize(tcx), + Err(_) => None, + } + } } } @@ -2259,7 +2312,7 @@ impl<'tcx> ConstantKind<'tcx> { Self::from_opt_const_arg_anon_const(tcx, ty::WithOptConstParam::unknown(def_id), param_env) } - #[instrument(skip(tcx), level = "debug")] + #[instrument(skip(tcx), level = "debug", ret)] pub fn from_inline_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self { let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); let body_id = match tcx.hir().get(hir_id) { @@ -2297,21 +2350,18 @@ impl<'tcx> ConstantKind<'tcx> { let substs = ty::InlineConstSubsts::new(tcx, ty::InlineConstSubstsParts { parent_substs, ty }) .substs; - let uneval_const = tcx.mk_const(ty::ConstS { - kind: ty::ConstKind::Unevaluated(ty::Unevaluated { - def: ty::WithOptConstParam::unknown(def_id).to_global(), - substs, - promoted: None, - }), - ty, - }); - debug!(?uneval_const); - debug_assert!(!uneval_const.has_free_regions()); - Self::Ty(uneval_const) + let uneval = UnevaluatedConst { + def: ty::WithOptConstParam::unknown(def_id).to_global(), + substs, + promoted: None, + }; + debug_assert!(!uneval.has_free_regions()); + + Self::Unevaluated(uneval, ty) } - #[instrument(skip(tcx), level = "debug")] + #[instrument(skip(tcx), level = "debug", ret)] fn from_opt_const_arg_anon_const( tcx: TyCtxt<'tcx>, def: ty::WithOptConstParam<LocalDefId>, @@ -2389,29 +2439,26 @@ impl<'tcx> ConstantKind<'tcx> { let hir_id = tcx.hir().local_def_id_to_hir_id(def.did); let span = tcx.hir().span(hir_id); - let uneval = ty::Unevaluated::new(def.to_global(), substs); + let uneval = UnevaluatedConst::new(def.to_global(), substs); debug!(?span, ?param_env); match tcx.const_eval_resolve(param_env, uneval, Some(span)) { Ok(val) => { - debug!("evaluated const value: {:?}", val); + debug!("evaluated const value"); Self::Val(val, ty) } Err(_) => { debug!("error encountered during evaluation"); // Error was handled in `const_eval_resolve`. Here we just create a // new unevaluated const and error hard later in codegen - let ty_const = tcx.mk_const(ty::ConstS { - kind: ty::ConstKind::Unevaluated(ty::Unevaluated { + Self::Unevaluated( + UnevaluatedConst { def: def.to_global(), substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()), promoted: None, - }), + }, ty, - }); - debug!(?ty_const); - - Self::Ty(ty_const) + ) } } } @@ -2422,11 +2469,40 @@ impl<'tcx> ConstantKind<'tcx> { let const_val = tcx.valtree_to_const_val((c.ty(), valtree)); Self::Val(const_val, c.ty()) } + ty::ConstKind::Unevaluated(uv) => Self::Unevaluated(uv.expand(), c.ty()), _ => Self::Ty(c), } } } +/// An unevaluated (potentially generic) constant used in MIR. +#[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)] +#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)] +pub struct UnevaluatedConst<'tcx> { + pub def: ty::WithOptConstParam<DefId>, + pub substs: SubstsRef<'tcx>, + pub promoted: Option<Promoted>, +} + +impl<'tcx> UnevaluatedConst<'tcx> { + // FIXME: probably should get rid of this method. It's also wrong to + // shrink and then later expand a promoted. + #[inline] + pub fn shrink(self) -> ty::UnevaluatedConst<'tcx> { + ty::UnevaluatedConst { def: self.def, substs: self.substs } + } +} + +impl<'tcx> UnevaluatedConst<'tcx> { + #[inline] + pub fn new( + def: ty::WithOptConstParam<DefId>, + substs: SubstsRef<'tcx>, + ) -> UnevaluatedConst<'tcx> { + UnevaluatedConst { def, substs, promoted: Default::default() } + } +} + /// A collection of projections into user types. /// /// They are projections because a binding can occur a part of a @@ -2576,8 +2652,6 @@ impl UserTypeProjection { } } -TrivialTypeTraversalAndLiftImpls! { ProjectionKind, } - impl<'tcx> TypeFoldable<'tcx> for UserTypeProjection { fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { Ok(UserTypeProjection { @@ -2622,6 +2696,11 @@ impl<'tcx> Display for ConstantKind<'tcx> { match *self { ConstantKind::Ty(c) => pretty_print_const(c, fmt, true), ConstantKind::Val(val, ty) => pretty_print_const_value(val, ty, fmt, true), + // FIXME(valtrees): Correctly print mir constants. + ConstantKind::Unevaluated(..) => { + fmt.write_str("_")?; + Ok(()) + } } } } @@ -2643,15 +2722,7 @@ fn pretty_print_const<'tcx>( } fn pretty_print_byte_str(fmt: &mut Formatter<'_>, byte_str: &[u8]) -> fmt::Result { - fmt.write_str("b\"")?; - for &c in byte_str { - for e in std::ascii::escape_default(c) { - fmt.write_char(e as char)?; - } - } - fmt.write_str("\"")?; - - Ok(()) + write!(fmt, "b\"{}\"", byte_str.escape_ascii()) } fn comma_sep<'tcx>(fmt: &mut Formatter<'_>, elems: Vec<ConstantKind<'tcx>>) -> fmt::Result { @@ -2691,8 +2762,8 @@ fn pretty_print_const_value<'tcx>( match inner.kind() { ty::Slice(t) => { if *t == u8_type { - // The `inspect` here is okay since we checked the bounds, and there are - // no relocations (we have an active slice reference here). We don't use + // The `inspect` here is okay since we checked the bounds, and `u8` carries + // no provenance (we have an active slice reference here). We don't use // this result to affect interpreter execution. let byte_str = data .inner() @@ -2702,8 +2773,8 @@ fn pretty_print_const_value<'tcx>( } } ty::Str => { - // The `inspect` here is okay since we checked the bounds, and there are no - // relocations (we have an active `str` reference here). We don't use this + // The `inspect` here is okay since we checked the bounds, and `str` carries + // no provenance (we have an active `str` reference here). We don't use this // result to affect interpreter execution. let slice = data .inner() @@ -2718,14 +2789,14 @@ fn pretty_print_const_value<'tcx>( let n = n.kind().try_to_bits(tcx.data_layout.pointer_size).unwrap(); // cast is ok because we already checked for pointer size (32 or 64 bit) above let range = AllocRange { start: offset, size: Size::from_bytes(n) }; - let byte_str = alloc.inner().get_bytes(&tcx, range).unwrap(); + let byte_str = alloc.inner().get_bytes_strip_provenance(&tcx, range).unwrap(); fmt.write_str("*")?; pretty_print_byte_str(fmt, byte_str)?; return Ok(()); } // Aggregates, printed as array/tuple/struct/variant construction syntax. // - // NB: the `has_param_types_or_consts` check ensures that we can use + // NB: the `has_non_region_param` check ensures that we can use // the `destructure_const` query with an empty `ty::ParamEnv` without // introducing ICEs (e.g. via `layout_of`) from missing bounds. // E.g. `transmute([0usize; 2]): (u8, *mut T)` needs to know `T: Sized` @@ -2733,7 +2804,7 @@ fn pretty_print_const_value<'tcx>( // // FIXME(eddyb) for `--emit=mir`/`-Z dump-mir`, we should provide the // correct `ty::ParamEnv` to allow printing *all* constant values. - (_, ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) if !ty.has_param_types_or_consts() => { + (_, ty::Array(..) | ty::Tuple(..) | ty::Adt(..)) if !ty.has_non_region_param() => { let ct = tcx.lift(ct).unwrap(); let ty = tcx.lift(ty).unwrap(); if let Some(contents) = tcx.try_destructure_mir_constant( @@ -2898,3 +2969,18 @@ impl Location { } } } + +// Some nodes are used a lot. Make sure they don't unintentionally get bigger. +#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] +mod size_asserts { + use super::*; + use rustc_data_structures::static_assert_size; + // tidy-alphabetical-start + static_assert_size!(BasicBlockData<'_>, 144); + static_assert_size!(LocalDecl<'_>, 56); + static_assert_size!(Statement<'_>, 32); + static_assert_size!(StatementKind<'_>, 16); + static_assert_size!(Terminator<'_>, 112); + static_assert_size!(TerminatorKind<'_>, 96); + // tidy-alphabetical-end +} diff --git a/compiler/rustc_middle/src/mir/mono.rs b/compiler/rustc_middle/src/mir/mono.rs index 21ae121e1..15a24aa4a 100644 --- a/compiler/rustc_middle/src/mir/mono.rs +++ b/compiler/rustc_middle/src/mir/mono.rs @@ -81,7 +81,7 @@ impl<'tcx> MonoItem<'tcx> { MonoItem::Fn(instance) => tcx.symbol_name(instance), MonoItem::Static(def_id) => tcx.symbol_name(Instance::mono(tcx, def_id)), MonoItem::GlobalAsm(item_id) => { - SymbolName::new(tcx, &format!("global_asm_{:?}", item_id.def_id)) + SymbolName::new(tcx, &format!("global_asm_{:?}", item_id.owner_id)) } } } @@ -182,7 +182,7 @@ impl<'tcx> MonoItem<'tcx> { match *self { MonoItem::Fn(Instance { def, .. }) => def.def_id().as_local(), MonoItem::Static(def_id) => def_id.as_local(), - MonoItem::GlobalAsm(item_id) => Some(item_id.def_id), + MonoItem::GlobalAsm(item_id) => Some(item_id.owner_id.def_id), } .map(|def_id| tcx.def_span(def_id)) } @@ -373,7 +373,7 @@ impl<'tcx> CodegenUnit<'tcx> { } } MonoItem::Static(def_id) => def_id.as_local().map(Idx::index), - MonoItem::GlobalAsm(item_id) => Some(item_id.def_id.index()), + MonoItem::GlobalAsm(item_id) => Some(item_id.owner_id.def_id.index()), }, item.symbol_name(tcx), ) diff --git a/compiler/rustc_middle/src/mir/patch.rs b/compiler/rustc_middle/src/mir/patch.rs index 15496842d..24fe3b472 100644 --- a/compiler/rustc_middle/src/mir/patch.rs +++ b/compiler/rustc_middle/src/mir/patch.rs @@ -19,7 +19,7 @@ pub struct MirPatch<'tcx> { impl<'tcx> MirPatch<'tcx> { pub fn new(body: &Body<'tcx>) -> Self { let mut result = MirPatch { - patch_map: IndexVec::from_elem(None, body.basic_blocks()), + patch_map: IndexVec::from_elem(None, &body.basic_blocks), new_blocks: vec![], new_statements: vec![], new_locals: vec![], @@ -29,7 +29,7 @@ impl<'tcx> MirPatch<'tcx> { }; // Check if we already have a resume block - for (bb, block) in body.basic_blocks().iter_enumerated() { + for (bb, block) in body.basic_blocks.iter_enumerated() { if let TerminatorKind::Resume = block.terminator().kind && block.statements.is_empty() { result.resume_block = Some(bb); break; @@ -61,14 +61,14 @@ impl<'tcx> MirPatch<'tcx> { } pub fn terminator_loc(&self, body: &Body<'tcx>, bb: BasicBlock) -> Location { - let offset = match bb.index().checked_sub(body.basic_blocks().len()) { + let offset = match bb.index().checked_sub(body.basic_blocks.len()) { Some(index) => self.new_blocks[index].statements.len(), None => body[bb].statements.len(), }; Location { block: bb, statement_index: offset } } - pub fn new_local_with_info( + pub fn new_internal_with_info( &mut self, ty: Ty<'tcx>, span: Span, @@ -76,14 +76,17 @@ impl<'tcx> MirPatch<'tcx> { ) -> Local { let index = self.next_local; self.next_local += 1; - let mut new_decl = LocalDecl::new(ty, span); + let mut new_decl = LocalDecl::new(ty, span).internal(); new_decl.local_info = local_info; self.new_locals.push(new_decl); Local::new(index as usize) } pub fn new_temp(&mut self, ty: Ty<'tcx>, span: Span) -> Local { - self.new_local_with_info(ty, span, None) + let index = self.next_local; + self.next_local += 1; + self.new_locals.push(LocalDecl::new(ty, span)); + Local::new(index as usize) } pub fn new_internal(&mut self, ty: Ty<'tcx>, span: Span) -> Local { @@ -126,7 +129,7 @@ impl<'tcx> MirPatch<'tcx> { debug!( "MirPatch: {} new blocks, starting from index {}", self.new_blocks.len(), - body.basic_blocks().len() + body.basic_blocks.len() ); let bbs = if self.patch_map.is_empty() && self.new_blocks.is_empty() { body.basic_blocks.as_mut_preserves_cfg() @@ -147,7 +150,6 @@ impl<'tcx> MirPatch<'tcx> { let mut delta = 0; let mut last_bb = START_BLOCK; - let mut stmts_and_targets: Vec<(Statement<'_>, BasicBlock)> = Vec::new(); for (mut loc, stmt) in new_statements { if loc.block != last_bb { delta = 0; @@ -156,27 +158,11 @@ impl<'tcx> MirPatch<'tcx> { debug!("MirPatch: adding statement {:?} at loc {:?}+{}", stmt, loc, delta); loc.statement_index += delta; let source_info = Self::source_info_for_index(&body[loc.block], loc); - - // For mir-opt `Derefer` to work in all cases we need to - // get terminator's targets and apply the statement to all of them. - if loc.statement_index > body[loc.block].statements.len() { - let term = body[loc.block].terminator(); - for i in term.successors() { - stmts_and_targets.push((Statement { source_info, kind: stmt.clone() }, i)); - } - delta += 1; - continue; - } - body[loc.block] .statements .insert(loc.statement_index, Statement { source_info, kind: stmt }); delta += 1; } - - for (stmt, target) in stmts_and_targets.into_iter().rev() { - body[target].statements.insert(0, stmt); - } } pub fn source_info_for_index(data: &BasicBlockData<'_>, loc: Location) -> SourceInfo { @@ -187,7 +173,7 @@ impl<'tcx> MirPatch<'tcx> { } pub fn source_info_for_location(&self, body: &Body<'tcx>, loc: Location) -> SourceInfo { - let data = match loc.block.index().checked_sub(body.basic_blocks().len()) { + let data = match loc.block.index().checked_sub(body.basic_blocks.len()) { Some(new) => &self.new_blocks[new], None => &body[loc.block], }; diff --git a/compiler/rustc_middle/src/mir/pretty.rs b/compiler/rustc_middle/src/mir/pretty.rs index 0ce41337b..05dcfba77 100644 --- a/compiler/rustc_middle/src/mir/pretty.rs +++ b/compiler/rustc_middle/src/mir/pretty.rs @@ -318,10 +318,10 @@ where F: FnMut(PassWhere, &mut dyn Write) -> io::Result<()>, { write_mir_intro(tcx, body, w)?; - for block in body.basic_blocks().indices() { + for block in body.basic_blocks.indices() { extra_data(PassWhere::BeforeBlock(block), w)?; write_basic_block(tcx, block, body, extra_data, w)?; - if block.index() + 1 != body.basic_blocks().len() { + if block.index() + 1 != body.basic_blocks.len() { writeln!(w)?; } } @@ -464,12 +464,13 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> { let val = match literal { ConstantKind::Ty(ct) => match ct.kind() { ty::ConstKind::Param(p) => format!("Param({})", p), - ty::ConstKind::Unevaluated(uv) => format!( - "Unevaluated({}, {:?}, {:?})", - self.tcx.def_path_str(uv.def.did), - uv.substs, - uv.promoted, - ), + ty::ConstKind::Unevaluated(uv) => { + format!( + "Unevaluated({}, {:?})", + self.tcx.def_path_str(uv.def.did), + uv.substs, + ) + } ty::ConstKind::Value(val) => format!("Value({})", fmt_valtree(&val)), ty::ConstKind::Error(_) => "Error".to_string(), // These variants shouldn't exist in the MIR. @@ -477,6 +478,14 @@ impl<'tcx> Visitor<'tcx> for ExtraComments<'tcx> { | ty::ConstKind::Infer(_) | ty::ConstKind::Bound(..) => bug!("unexpected MIR constant: {:?}", literal), }, + ConstantKind::Unevaluated(uv, _) => { + format!( + "Unevaluated({}, {:?}, {:?})", + self.tcx.def_path_str(uv.def.did), + uv.substs, + uv.promoted, + ) + } // To keep the diffs small, we render this like we render `ty::Const::Value`. // // This changes once `ty::Const::Value` is represented using valtrees. @@ -676,7 +685,7 @@ pub fn write_allocations<'tcx>( fn alloc_ids_from_alloc( alloc: ConstAllocation<'_>, ) -> impl DoubleEndedIterator<Item = AllocId> + '_ { - alloc.inner().relocations().values().map(|id| *id) + alloc.inner().provenance().values().map(|id| *id) } fn alloc_ids_from_const_val(val: ConstValue<'_>) -> impl Iterator<Item = AllocId> + '_ { @@ -696,9 +705,9 @@ pub fn write_allocations<'tcx>( struct CollectAllocIds(BTreeSet<AllocId>); impl<'tcx> Visitor<'tcx> for CollectAllocIds { - fn visit_constant(&mut self, c: &Constant<'tcx>, loc: Location) { + fn visit_constant(&mut self, c: &Constant<'tcx>, _: Location) { match c.literal { - ConstantKind::Ty(c) => self.visit_const(c, loc), + ConstantKind::Ty(_) | ConstantKind::Unevaluated(..) => {} ConstantKind::Val(val, _) => { self.0.extend(alloc_ids_from_const_val(val)); } @@ -778,7 +787,7 @@ pub fn write_allocations<'tcx>( /// If the allocation is small enough to fit into a single line, no start address is given. /// After the hex dump, an ascii dump follows, replacing all unprintable characters (control /// characters or characters whose value is larger than 127) with a `.` -/// This also prints relocations adequately. +/// This also prints provenance adequately. pub fn display_allocation<'a, 'tcx, Prov, Extra>( tcx: TyCtxt<'tcx>, alloc: &'a Allocation<Prov, Extra>, @@ -873,34 +882,34 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>( if i != line_start { write!(w, " ")?; } - if let Some(&prov) = alloc.relocations().get(&i) { - // Memory with a relocation must be defined + if let Some(&prov) = alloc.provenance().get(&i) { + // Memory with provenance must be defined assert!(alloc.init_mask().is_range_initialized(i, i + ptr_size).is_ok()); let j = i.bytes_usize(); let offset = alloc .inspect_with_uninit_and_ptr_outside_interpreter(j..j + ptr_size.bytes_usize()); let offset = read_target_uint(tcx.data_layout.endian, offset).unwrap(); let offset = Size::from_bytes(offset); - let relocation_width = |bytes| bytes * 3; + let provenance_width = |bytes| bytes * 3; let ptr = Pointer::new(prov, offset); let mut target = format!("{:?}", ptr); - if target.len() > relocation_width(ptr_size.bytes_usize() - 1) { + if target.len() > provenance_width(ptr_size.bytes_usize() - 1) { // This is too long, try to save some space. target = format!("{:#?}", ptr); } if ((i - line_start) + ptr_size).bytes_usize() > BYTES_PER_LINE { - // This branch handles the situation where a relocation starts in the current line + // This branch handles the situation where a provenance starts in the current line // but ends in the next one. let remainder = Size::from_bytes(BYTES_PER_LINE) - (i - line_start); let overflow = ptr_size - remainder; - let remainder_width = relocation_width(remainder.bytes_usize()) - 2; - let overflow_width = relocation_width(overflow.bytes_usize() - 1) + 1; + let remainder_width = provenance_width(remainder.bytes_usize()) - 2; + let overflow_width = provenance_width(overflow.bytes_usize() - 1) + 1; ascii.push('╾'); for _ in 0..remainder.bytes() - 1 { ascii.push('─'); } if overflow_width > remainder_width && overflow_width >= target.len() { - // The case where the relocation fits into the part in the next line + // The case where the provenance fits into the part in the next line write!(w, "╾{0:─^1$}", "", remainder_width)?; line_start = write_allocation_newline(w, line_start, &ascii, pos_width, prefix)?; @@ -921,11 +930,11 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>( i += ptr_size; continue; } else { - // This branch handles a relocation that starts and ends in the current line. - let relocation_width = relocation_width(ptr_size.bytes_usize() - 1); - oversized_ptr(&mut target, relocation_width); + // This branch handles a provenance that starts and ends in the current line. + let provenance_width = provenance_width(ptr_size.bytes_usize() - 1); + oversized_ptr(&mut target, provenance_width); ascii.push('╾'); - write!(w, "╾{0:─^1$}╼", target, relocation_width)?; + write!(w, "╾{0:─^1$}╼", target, provenance_width)?; for _ in 0..ptr_size.bytes() - 2 { ascii.push('─'); } @@ -935,7 +944,7 @@ fn write_allocation_bytes<'tcx, Prov: Provenance, Extra>( } else if alloc.init_mask().is_range_initialized(i, i + Size::from_bytes(1)).is_ok() { let j = i.bytes_usize(); - // Checked definedness (and thus range) and relocations. This access also doesn't + // Checked definedness (and thus range) and provenance. This access also doesn't // influence interpreter execution but is only for debugging. let c = alloc.inspect_with_uninit_and_ptr_outside_interpreter(j..j + 1)[0]; write!(w, "{:02x}", c)?; diff --git a/compiler/rustc_middle/src/mir/query.rs b/compiler/rustc_middle/src/mir/query.rs index dd9f8795f..efd7357af 100644 --- a/compiler/rustc_middle/src/mir/query.rs +++ b/compiler/rustc_middle/src/mir/query.rs @@ -2,7 +2,7 @@ use crate::mir::{Body, ConstantKind, Promoted}; use crate::ty::{self, OpaqueHiddenType, Ty, TyCtxt}; -use rustc_data_structures::fx::FxHashMap; +use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::vec_map::VecMap; use rustc_errors::ErrorGuaranteed; use rustc_hir as hir; @@ -115,21 +115,6 @@ pub enum UnusedUnsafe { /// `unsafe` block nested under another (used) `unsafe` block /// > ``… because it's nested under this `unsafe` block`` InUnsafeBlock(hir::HirId), - /// `unsafe` block nested under `unsafe fn` - /// > ``… because it's nested under this `unsafe fn` `` - /// - /// the second HirId here indicates the first usage of the `unsafe` block, - /// which allows retrieval of the LintLevelSource for why that operation would - /// have been permitted without the block - InUnsafeFn(hir::HirId, hir::HirId), -} - -#[derive(Copy, Clone, PartialEq, TyEncodable, TyDecodable, HashStable, Debug)] -pub enum UsedUnsafeBlockData { - SomeDisallowedInUnsafeFn, - // the HirId here indicates the first usage of the `unsafe` block - // (i.e. the one that's first encountered in the MIR traversal of the unsafety check) - AllAllowedInUnsafeFn(hir::HirId), } #[derive(TyEncodable, TyDecodable, HashStable, Debug)] @@ -138,10 +123,7 @@ pub struct UnsafetyCheckResult { pub violations: Vec<UnsafetyViolation>, /// Used `unsafe` blocks in this function. This is used for the "unused_unsafe" lint. - /// - /// The keys are the used `unsafe` blocks, the UnusedUnsafeKind indicates whether - /// or not any of the usages happen at a place that doesn't allow `unsafe_op_in_unsafe_fn`. - pub used_unsafe_blocks: FxHashMap<hir::HirId, UsedUnsafeBlockData>, + pub used_unsafe_blocks: FxHashSet<hir::HirId>, /// This is `Some` iff the item is not a closure. pub unused_unsafes: Option<Vec<(hir::HirId, UnusedUnsafe)>>, @@ -345,7 +327,7 @@ rustc_data_structures::static_assert_size!(ConstraintCategory<'_>, 16); /// /// See also `rustc_const_eval::borrow_check::constraints`. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)] -#[derive(TyEncodable, TyDecodable, HashStable)] +#[derive(TyEncodable, TyDecodable, HashStable, Lift, TypeVisitable, TypeFoldable)] pub enum ConstraintCategory<'tcx> { Return(ReturnConstraint), Yield, @@ -387,7 +369,7 @@ pub enum ConstraintCategory<'tcx> { } #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, Hash)] -#[derive(TyEncodable, TyDecodable, HashStable)] +#[derive(TyEncodable, TyDecodable, HashStable, TypeVisitable, TypeFoldable)] pub enum ReturnConstraint { Normal, ClosureUpvar(Field), @@ -410,16 +392,9 @@ pub enum ClosureOutlivesSubject<'tcx> { Region(ty::RegionVid), } -/// The constituent parts of a type level constant of kind ADT or array. -#[derive(Copy, Clone, Debug, HashStable)] -pub struct DestructuredConst<'tcx> { - pub variant: Option<VariantIdx>, - pub fields: &'tcx [ty::Const<'tcx>], -} - /// The constituent parts of a mir constant of kind ADT or array. #[derive(Copy, Clone, Debug, HashStable)] -pub struct DestructuredMirConstant<'tcx> { +pub struct DestructuredConstant<'tcx> { pub variant: Option<VariantIdx>, pub fields: &'tcx [ConstantKind<'tcx>], } diff --git a/compiler/rustc_middle/src/mir/spanview.rs b/compiler/rustc_middle/src/mir/spanview.rs index 4418b848e..4e06d9101 100644 --- a/compiler/rustc_middle/src/mir/spanview.rs +++ b/compiler/rustc_middle/src/mir/spanview.rs @@ -105,7 +105,7 @@ where } let body_span = hir_body.unwrap().value.span; let mut span_viewables = Vec::new(); - for (bb, data) in body.basic_blocks().iter_enumerated() { + for (bb, data) in body.basic_blocks.iter_enumerated() { match spanview { MirSpanview::Statement => { for (i, statement) in data.statements.iter().enumerate() { @@ -249,7 +249,7 @@ pub fn statement_kind_name(statement: &Statement<'_>) -> &'static str { Retag(..) => "Retag", AscribeUserType(..) => "AscribeUserType", Coverage(..) => "Coverage", - CopyNonOverlapping(..) => "CopyNonOverlapping", + Intrinsic(..) => "Intrinsic", Nop => "Nop", } } diff --git a/compiler/rustc_middle/src/mir/syntax.rs b/compiler/rustc_middle/src/mir/syntax.rs index eb90169d0..85ef51f12 100644 --- a/compiler/rustc_middle/src/mir/syntax.rs +++ b/compiler/rustc_middle/src/mir/syntax.rs @@ -23,75 +23,111 @@ use rustc_span::symbol::Symbol; use rustc_span::Span; use rustc_target::asm::InlineAsmRegOrRegClass; -/// The various "big phases" that MIR goes through. +/// Represents the "flavors" of MIR. /// -/// These phases all describe dialects of MIR. Since all MIR uses the same datastructures, the -/// dialects forbid certain variants or values in certain phases. The sections below summarize the -/// changes, but do not document them thoroughly. The full documentation is found in the appropriate -/// documentation for the thing the change is affecting. +/// All flavors of MIR use the same data structure, but there are some important differences. These +/// differences come in two forms: Dialects and phases. /// -/// Warning: ordering of variants is significant. +/// Dialects represent a stronger distinction than phases. This is because the transitions between +/// dialects are semantic changes, and therefore technically *lowerings* between distinct IRs. In +/// other words, the same [`Body`](crate::mir::Body) might be well-formed for multiple dialects, but +/// have different semantic meaning and different behavior at runtime. +/// +/// Each dialect additionally has a number of phases. However, phase changes never involve semantic +/// changes. If some MIR is well-formed both before and after a phase change, it is also guaranteed +/// that it has the same semantic meaning. In this sense, phase changes can only add additional +/// restrictions on what MIR is well-formed. +/// +/// When adding phases, remember to update [`MirPhase::phase_index`]. #[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(HashStable)] pub enum MirPhase { - /// The dialect of MIR used during all phases before `DropsLowered` is the same. This is also - /// the MIR that analysis such as borrowck uses. - /// - /// One important thing to remember about the behavior of this section of MIR is that drop terminators - /// (including drop and replace) are *conditional*. The elaborate drops pass will then replace each - /// instance of a drop terminator with a nop, an unconditional drop, or a drop conditioned on a drop - /// flag. Of course, this means that it is important that the drop elaboration can accurately recognize - /// when things are initialized and when things are de-initialized. That means any code running on this - /// version of MIR must be sure to produce output that drop elaboration can reason about. See the - /// section on the drop terminatorss for more details. - Built = 0, - // FIXME(oli-obk): it's unclear whether we still need this phase (and its corresponding query). - // We used to have this for pre-miri MIR based const eval. - Const = 1, - /// This phase checks the MIR for promotable elements and takes them out of the main MIR body - /// by creating a new MIR body per promoted element. After this phase (and thus the termination - /// of the `mir_promoted` query), these promoted elements are available in the `promoted_mir` - /// query. - ConstsPromoted = 2, - /// After this projections may only contain deref projections as the first element. - Derefered = 3, - /// Beginning with this phase, the following variants are disallowed: - /// * [`TerminatorKind::DropAndReplace`] + /// The MIR that is generated by MIR building. + /// + /// The only things that operate on this dialect are unsafeck, the various MIR lints, and const + /// qualifs. + /// + /// This has no distinct phases. + Built, + /// The MIR used for most analysis. + /// + /// The only semantic change between analysis and built MIR is constant promotion. In built MIR, + /// sequences of statements that would generally be subject to constant promotion are + /// semantically constants, while in analysis MIR all constants are explicit. + /// + /// The result of const promotion is available from the `mir_promoted` and `promoted_mir` queries. + /// + /// This is the version of MIR used by borrowck and friends. + Analysis(AnalysisPhase), + /// The MIR used for CTFE, optimizations, and codegen. + /// + /// The semantic changes that occur in the lowering from analysis to runtime MIR are as follows: + /// + /// - Drops: In analysis MIR, `Drop` terminators represent *conditional* drops; roughly speaking, + /// if dataflow analysis determines that the place being dropped is uninitialized, the drop will + /// not be executed. The exact semantics of this aren't written down anywhere, which means they + /// are essentially "what drop elaboration does." In runtime MIR, the drops are unconditional; + /// when a `Drop` terminator is reached, if the type has drop glue that drop glue is always + /// executed. This may be UB if the underlying place is not initialized. + /// - Packed drops: Places might in general be misaligned - in most cases this is UB, the exception + /// is fields of packed structs. In analysis MIR, `Drop(P)` for a `P` that might be misaligned + /// for this reason implicitly moves `P` to a temporary before dropping. Runtime MIR has no such + /// rules, and dropping a misaligned place is simply UB. + /// - Unwinding: in analysis MIR, unwinding from a function which may not unwind aborts. In runtime + /// MIR, this is UB. + /// - Retags: If `-Zmir-emit-retag` is enabled, analysis MIR has "implicit" retags in the same way + /// that Rust itself has them. Where exactly these are is generally subject to change, and so we + /// don't document this here. Runtime MIR has all retags explicit. + /// - Generator bodies: In analysis MIR, locals may actually be behind a pointer that user code has + /// access to. This occurs in generator bodies. Such locals do not behave like other locals, + /// because they eg may be aliased in surprising ways. Runtime MIR has no such special locals - + /// all generator bodies are lowered and so all places that look like locals really are locals. + /// + /// Also note that the lint pass which reports eg `200_u8 + 200_u8` as an error is run as a part + /// of analysis to runtime MIR lowering. To ensure lints are reported reliably, this means that + /// transformations which may supress such errors should not run on analysis MIR. + Runtime(RuntimePhase), +} + +/// See [`MirPhase::Analysis`]. +#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)] +#[derive(HashStable)] +pub enum AnalysisPhase { + Initial = 0, + /// Beginning in this phase, the following variants are disallowed: /// * [`TerminatorKind::FalseUnwind`] /// * [`TerminatorKind::FalseEdge`] /// * [`StatementKind::FakeRead`] /// * [`StatementKind::AscribeUserType`] /// * [`Rvalue::Ref`] with `BorrowKind::Shallow` /// - /// And the following variant is allowed: - /// * [`StatementKind::Retag`] - /// - /// Furthermore, `Drop` now uses explicit drop flags visible in the MIR and reaching a `Drop` - /// terminator means that the auto-generated drop glue will be invoked. Also, `Copy` operands - /// are allowed for non-`Copy` types. - DropsLowered = 4, - /// Beginning with this phase, the following variant is disallowed: + /// Furthermore, `Deref` projections must be the first projection within any place (if they + /// appear at all) + PostCleanup = 1, +} + +/// See [`MirPhase::Runtime`]. +#[derive(Copy, Clone, TyEncodable, TyDecodable, Debug, PartialEq, Eq, PartialOrd, Ord)] +#[derive(HashStable)] +pub enum RuntimePhase { + /// In addition to the semantic changes, beginning with this phase, the following variants are + /// disallowed: + /// * [`TerminatorKind::DropAndReplace`] + /// * [`TerminatorKind::Yield`] + /// * [`TerminatorKind::GeneratorDrop`] /// * [`Rvalue::Aggregate`] for any `AggregateKind` except `Array` /// - /// And the following variant is allowed: + /// And the following variants are allowed: + /// * [`StatementKind::Retag`] /// * [`StatementKind::SetDiscriminant`] - Deaggregated = 5, - /// Before this phase, generators are in the "source code" form, featuring `yield` statements - /// and such. With this phase change, they are transformed into a proper state machine. Running - /// optimizations before this change can be potentially dangerous because the source code is to - /// some extent a "lie." In particular, `yield` terminators effectively make the value of all - /// locals visible to the caller. This means that dead store elimination before them, or code - /// motion across them, is not correct in general. This is also exasperated by type checking - /// having pre-computed a list of the types that it thinks are ok to be live across a yield - /// point - this is necessary to decide eg whether autotraits are implemented. Introducing new - /// types across a yield point will lead to ICEs becaues of this. - /// - /// Beginning with this phase, the following variants are disallowed: - /// * [`TerminatorKind::Yield`] - /// * [`TerminatorKind::GeneratorDrop`] + /// * [`StatementKind::Deinit`] + /// + /// Furthermore, `Copy` operands are allowed for non-`Copy` types. + Initial = 0, + /// Beginning with this phase, the following variant is disallowed: /// * [`ProjectionElem::Deref`] of `Box` - GeneratorsLowered = 6, - Optimized = 7, + PostCleanup = 1, + Optimized = 2, } /////////////////////////////////////////////////////////////////////////// @@ -292,12 +328,40 @@ pub enum StatementKind<'tcx> { /// executed. Coverage(Box<Coverage>), + /// Denotes a call to an intrinsic that does not require an unwind path and always returns. + /// This avoids adding a new block and a terminator for simple intrinsics. + Intrinsic(Box<NonDivergingIntrinsic<'tcx>>), + + /// No-op. Useful for deleting instructions without affecting statement indices. + Nop, +} + +#[derive( + Clone, + TyEncodable, + TyDecodable, + Debug, + PartialEq, + Hash, + HashStable, + TypeFoldable, + TypeVisitable +)] +pub enum NonDivergingIntrinsic<'tcx> { + /// Denotes a call to the intrinsic function `assume`. + /// + /// The operand must be a boolean. Optimizers may use the value of the boolean to backtrack its + /// computation to infer information about other variables. So if the boolean came from a + /// `x < y` operation, subsequent operations on `x` and `y` could elide various bound checks. + /// If the argument is `false`, this operation is equivalent to `TerminatorKind::Unreachable`. + Assume(Operand<'tcx>), + /// Denotes a call to the intrinsic function `copy_nonoverlapping`. /// /// First, all three operands are evaluated. `src` and `dest` must each be a reference, pointer, /// or `Box` pointing to the same type `T`. `count` must evaluate to a `usize`. Then, `src` and /// `dest` are dereferenced, and `count * size_of::<T>()` bytes beginning with the first byte of - /// the `src` place are copied to the continguous range of bytes beginning with the first byte + /// the `src` place are copied to the contiguous range of bytes beginning with the first byte /// of `dest`. /// /// **Needs clarification**: In what order are operands computed and dereferenced? It should @@ -305,10 +369,18 @@ pub enum StatementKind<'tcx> { /// /// **Needs clarification**: Is this typed or not, ie is there a typed load and store involved? /// I vaguely remember Ralf saying somewhere that he thought it should not be. - CopyNonOverlapping(Box<CopyNonOverlapping<'tcx>>), + CopyNonOverlapping(CopyNonOverlapping<'tcx>), +} - /// No-op. Useful for deleting instructions without affecting statement indices. - Nop, +impl std::fmt::Display for NonDivergingIntrinsic<'_> { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::Assume(op) => write!(f, "assume({op:?})"), + Self::CopyNonOverlapping(CopyNonOverlapping { src, dst, count }) => { + write!(f, "copy_nonoverlapping(dst = {dst:?}, src = {src:?}, count = {count:?})") + } + } + } } /// Describes what kind of retag is to be performed. @@ -343,7 +415,7 @@ pub enum FakeReadCause { /// Some(closure_def_id). /// Otherwise, the value of the optional LocalDefId will be None. // - // We can use LocaDefId here since fake read statements are removed + // We can use LocalDefId here since fake read statements are removed // before codegen in the `CleanupNonCodegenStatements` pass. ForMatchedPlace(Option<LocalDefId>), @@ -417,7 +489,7 @@ pub struct CopyNonOverlapping<'tcx> { /// must also be `cleanup`. This is a part of the type system and checked statically, so it is /// still an error to have such an edge in the CFG even if it's known that it won't be taken at /// runtime. -#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)] +#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)] pub enum TerminatorKind<'tcx> { /// Block has one successor; we continue execution there. Goto { target: BasicBlock }, @@ -670,7 +742,7 @@ pub enum TerminatorKind<'tcx> { } /// Information about an assertion failure. -#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)] +#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)] pub enum AssertKind<O> { BoundsCheck { len: O, index: O }, Overflow(BinOp, O, O), @@ -758,6 +830,9 @@ pub type AssertMessage<'tcx> = AssertKind<Operand<'tcx>>; /// generator has more than one variant, the parent place's variant index must be set, indicating /// which variant is being used. If it has just one variant, the variant index may or may not be /// included - the single possible variant is inferred if it is not included. +/// - [`OpaqueCast`](ProjectionElem::OpaqueCast): This projection changes the place's type to the +/// given one, and makes no other changes. A `OpaqueCast` projection on any type other than an +/// opaque type from the current crate is not well-formed. /// - [`ConstantIndex`](ProjectionElem::ConstantIndex): Computes an offset in units of `T` into the /// place as described in the documentation for the `ProjectionElem`. The resulting address is /// the parent's address plus that offset, and the type is `T`. This is only legal if the parent @@ -792,7 +867,7 @@ pub type AssertMessage<'tcx> = AssertKind<Operand<'tcx>>; /// /// Rust currently requires that every place obey those two rules. This is checked by MIRI and taken /// advantage of by codegen (via `gep inbounds`). That is possibly subject to change. -#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, HashStable)] +#[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, HashStable, TypeFoldable, TypeVisitable)] pub struct Place<'tcx> { pub local: Local, @@ -801,7 +876,7 @@ pub struct Place<'tcx> { } #[derive(Copy, Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -#[derive(TyEncodable, TyDecodable, HashStable)] +#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)] pub enum ProjectionElem<V, T> { Deref, Field(Field, T), @@ -857,6 +932,10 @@ pub enum ProjectionElem<V, T> { /// /// The included Symbol is the name of the variant, used for printing MIR. Downcast(Option<Symbol>, VariantIdx), + + /// Like an explicit cast from an opaque type to a concrete type, but without + /// requiring an intermediate variable. + OpaqueCast(T), } /// Alias for projections as they appear in places, where the base is a place @@ -884,7 +963,7 @@ pub type PlaceElem<'tcx> = ProjectionElem<Local, Ty<'tcx>>; /// **Needs clarifiation:** Is loading a place that has its variant index set well-formed? Miri /// currently implements it, but it seems like this may be something to check against in the /// validator. -#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable)] +#[derive(Clone, PartialEq, TyEncodable, TyDecodable, Hash, HashStable, TypeFoldable, TypeVisitable)] pub enum Operand<'tcx> { /// Creates a value by loading the given place. /// @@ -915,7 +994,7 @@ pub enum Operand<'tcx> { /// Computing any rvalue begins by evaluating the places and operands in some order (**Needs /// clarification**: Which order?). These are then used to produce a "value" - the same kind of /// value that an [`Operand`] produces. -#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)] +#[derive(Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, TypeFoldable, TypeVisitable)] pub enum Rvalue<'tcx> { /// Yields the operand unchanged Use(Operand<'tcx>), @@ -1068,11 +1147,18 @@ pub enum CastKind { /// All sorts of pointer-to-pointer casts. Note that reference-to-raw-ptr casts are /// translated into `&raw mut/const *r`, i.e., they are not actually casts. Pointer(PointerCast), - /// Remaining unclassified casts. - Misc, + /// Cast into a dyn* object. + DynStar, + IntToInt, + FloatToInt, + FloatToFloat, + IntToFloat, + PtrToPtr, + FnPtrToPtr, } #[derive(Clone, Debug, PartialEq, Eq, TyEncodable, TyDecodable, Hash, HashStable)] +#[derive(TypeFoldable, TypeVisitable)] pub enum AggregateKind<'tcx> { /// The type is of the element Array(Ty<'tcx>), @@ -1159,10 +1245,11 @@ pub enum BinOp { #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] mod size_asserts { use super::*; - // These are in alphabetical order, which is easy to maintain. - static_assert_size!(AggregateKind<'_>, 48); + // tidy-alphabetical-start + static_assert_size!(AggregateKind<'_>, 40); static_assert_size!(Operand<'_>, 24); static_assert_size!(Place<'_>, 16); static_assert_size!(PlaceElem<'_>, 24); static_assert_size!(Rvalue<'_>, 40); + // tidy-alphabetical-end } diff --git a/compiler/rustc_middle/src/mir/tcx.rs b/compiler/rustc_middle/src/mir/tcx.rs index 405003156..fa3adafd4 100644 --- a/compiler/rustc_middle/src/mir/tcx.rs +++ b/compiler/rustc_middle/src/mir/tcx.rs @@ -4,7 +4,6 @@ */ use crate::mir::*; -use crate::ty::subst::Subst; use crate::ty::{self, Ty, TyCtxt}; use rustc_hir as hir; use rustc_target::abi::VariantIdx; @@ -57,7 +56,7 @@ impl<'tcx> PlaceTy<'tcx> { /// `PlaceElem`, where we can just use the `Ty` that is already /// stored inline on field projection elems. pub fn projection_ty(self, tcx: TyCtxt<'tcx>, elem: PlaceElem<'tcx>) -> PlaceTy<'tcx> { - self.projection_ty_core(tcx, ty::ParamEnv::empty(), &elem, |_, _, ty| ty) + self.projection_ty_core(tcx, ty::ParamEnv::empty(), &elem, |_, _, ty| ty, |_, ty| ty) } /// `place_ty.projection_ty_core(tcx, elem, |...| { ... })` @@ -71,6 +70,7 @@ impl<'tcx> PlaceTy<'tcx> { param_env: ty::ParamEnv<'tcx>, elem: &ProjectionElem<V, T>, mut handle_field: impl FnMut(&Self, Field, T) -> Ty<'tcx>, + mut handle_opaque_cast: impl FnMut(&Self, T) -> Ty<'tcx>, ) -> PlaceTy<'tcx> where V: ::std::fmt::Debug, @@ -109,6 +109,7 @@ impl<'tcx> PlaceTy<'tcx> { PlaceTy { ty: self.ty, variant_index: Some(index) } } ProjectionElem::Field(f, fty) => PlaceTy::from_ty(handle_field(&self, f, fty)), + ProjectionElem::OpaqueCast(ty) => PlaceTy::from_ty(handle_opaque_cast(&self, ty)), }; debug!("projection_ty self: {:?} elem: {:?} yields: {:?}", self, elem, answer); answer diff --git a/compiler/rustc_middle/src/mir/terminator.rs b/compiler/rustc_middle/src/mir/terminator.rs index 9ccf5aea6..4ea333cff 100644 --- a/compiler/rustc_middle/src/mir/terminator.rs +++ b/compiler/rustc_middle/src/mir/terminator.rs @@ -14,7 +14,7 @@ use std::slice; pub use super::query::*; -#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq, PartialOrd)] +#[derive(Debug, Clone, TyEncodable, TyDecodable, Hash, HashStable, PartialEq)] pub struct SwitchTargets { /// Possible values. The locations to branch to in each case /// are found in the corresponding indices from the `targets` vector. @@ -102,7 +102,7 @@ impl<'a> Iterator for SwitchTargetsIter<'a> { impl<'a> ExactSizeIterator for SwitchTargetsIter<'a> {} -#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)] +#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)] pub struct Terminator<'tcx> { pub source_info: SourceInfo, pub kind: TerminatorKind<'tcx>, diff --git a/compiler/rustc_middle/src/mir/traversal.rs b/compiler/rustc_middle/src/mir/traversal.rs index 627dc32f3..55b2c5927 100644 --- a/compiler/rustc_middle/src/mir/traversal.rs +++ b/compiler/rustc_middle/src/mir/traversal.rs @@ -37,7 +37,7 @@ impl<'a, 'tcx> Preorder<'a, 'tcx> { Preorder { body, - visited: BitSet::new_empty(body.basic_blocks().len()), + visited: BitSet::new_empty(body.basic_blocks.len()), worklist, root_is_start_block: root == START_BLOCK, } @@ -71,7 +71,7 @@ impl<'a, 'tcx> Iterator for Preorder<'a, 'tcx> { fn size_hint(&self) -> (usize, Option<usize>) { // All the blocks, minus the number of blocks we've visited. - let upper = self.body.basic_blocks().len() - self.visited.count(); + let upper = self.body.basic_blocks.len() - self.visited.count(); let lower = if self.root_is_start_block { // We will visit all remaining blocks exactly once. diff --git a/compiler/rustc_middle/src/mir/type_foldable.rs b/compiler/rustc_middle/src/mir/type_foldable.rs index 82a6b0c50..4c0974f86 100644 --- a/compiler/rustc_middle/src/mir/type_foldable.rs +++ b/compiler/rustc_middle/src/mir/type_foldable.rs @@ -1,8 +1,9 @@ //! `TypeFoldable` implementations for MIR types +use rustc_ast::InlineAsmTemplatePiece; + use super::*; use crate::ty; -use rustc_data_structures::functor::IdFunctor; TrivialTypeTraversalAndLiftImpls! { BlockTailInfo, @@ -13,96 +14,33 @@ TrivialTypeTraversalAndLiftImpls! { SourceScope, SourceScopeLocalData, UserTypeAnnotationIndex, + BorrowKind, + CastKind, + BinOp, + NullOp, + UnOp, + hir::Movability, + BasicBlock, + SwitchTargets, + GeneratorKind, + GeneratorSavedLocal, } -impl<'tcx> TypeFoldable<'tcx> for Terminator<'tcx> { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - use crate::mir::TerminatorKind::*; - - let kind = match self.kind { - Goto { target } => Goto { target }, - SwitchInt { discr, switch_ty, targets } => SwitchInt { - discr: discr.try_fold_with(folder)?, - switch_ty: switch_ty.try_fold_with(folder)?, - targets, - }, - Drop { place, target, unwind } => { - Drop { place: place.try_fold_with(folder)?, target, unwind } - } - DropAndReplace { place, value, target, unwind } => DropAndReplace { - place: place.try_fold_with(folder)?, - value: value.try_fold_with(folder)?, - target, - unwind, - }, - Yield { value, resume, resume_arg, drop } => Yield { - value: value.try_fold_with(folder)?, - resume, - resume_arg: resume_arg.try_fold_with(folder)?, - drop, - }, - Call { func, args, destination, target, cleanup, from_hir_call, fn_span } => Call { - func: func.try_fold_with(folder)?, - args: args.try_fold_with(folder)?, - destination: destination.try_fold_with(folder)?, - target, - cleanup, - from_hir_call, - fn_span, - }, - Assert { cond, expected, msg, target, cleanup } => { - use AssertKind::*; - let msg = match msg { - BoundsCheck { len, index } => BoundsCheck { - len: len.try_fold_with(folder)?, - index: index.try_fold_with(folder)?, - }, - Overflow(op, l, r) => { - Overflow(op, l.try_fold_with(folder)?, r.try_fold_with(folder)?) - } - OverflowNeg(op) => OverflowNeg(op.try_fold_with(folder)?), - DivisionByZero(op) => DivisionByZero(op.try_fold_with(folder)?), - RemainderByZero(op) => RemainderByZero(op.try_fold_with(folder)?), - ResumedAfterReturn(_) | ResumedAfterPanic(_) => msg, - }; - Assert { cond: cond.try_fold_with(folder)?, expected, msg, target, cleanup } - } - GeneratorDrop => GeneratorDrop, - Resume => Resume, - Abort => Abort, - Return => Return, - Unreachable => Unreachable, - FalseEdge { real_target, imaginary_target } => { - FalseEdge { real_target, imaginary_target } - } - FalseUnwind { real_target, unwind } => FalseUnwind { real_target, unwind }, - InlineAsm { template, operands, options, line_spans, destination, cleanup } => { - InlineAsm { - template, - operands: operands.try_fold_with(folder)?, - options, - line_spans, - destination, - cleanup, - } - } - }; - Ok(Terminator { source_info: self.source_info, kind }) +TrivialTypeTraversalImpls! { + for <'tcx> { + ConstValue<'tcx>, } } -impl<'tcx> TypeFoldable<'tcx> for GeneratorKind { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> { +impl<'tcx> TypeFoldable<'tcx> for &'tcx [InlineAsmTemplatePiece] { + fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> { Ok(self) } } -impl<'tcx> TypeFoldable<'tcx> for Place<'tcx> { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - Ok(Place { - local: self.local.try_fold_with(folder)?, - projection: self.projection.try_fold_with(folder)?, - }) +impl<'tcx> TypeFoldable<'tcx> for &'tcx [Span] { + fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> { + Ok(self) } } @@ -112,129 +50,8 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<PlaceElem<'tcx>> { } } -impl<'tcx> TypeFoldable<'tcx> for Rvalue<'tcx> { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - use crate::mir::Rvalue::*; - Ok(match self { - Use(op) => Use(op.try_fold_with(folder)?), - Repeat(op, len) => Repeat(op.try_fold_with(folder)?, len.try_fold_with(folder)?), - ThreadLocalRef(did) => ThreadLocalRef(did.try_fold_with(folder)?), - Ref(region, bk, place) => { - Ref(region.try_fold_with(folder)?, bk, place.try_fold_with(folder)?) - } - CopyForDeref(place) => CopyForDeref(place.try_fold_with(folder)?), - AddressOf(mutability, place) => AddressOf(mutability, place.try_fold_with(folder)?), - Len(place) => Len(place.try_fold_with(folder)?), - Cast(kind, op, ty) => Cast(kind, op.try_fold_with(folder)?, ty.try_fold_with(folder)?), - BinaryOp(op, box (rhs, lhs)) => { - BinaryOp(op, Box::new((rhs.try_fold_with(folder)?, lhs.try_fold_with(folder)?))) - } - CheckedBinaryOp(op, box (rhs, lhs)) => CheckedBinaryOp( - op, - Box::new((rhs.try_fold_with(folder)?, lhs.try_fold_with(folder)?)), - ), - UnaryOp(op, val) => UnaryOp(op, val.try_fold_with(folder)?), - Discriminant(place) => Discriminant(place.try_fold_with(folder)?), - NullaryOp(op, ty) => NullaryOp(op, ty.try_fold_with(folder)?), - Aggregate(kind, fields) => { - let kind = kind.try_map_id(|kind| { - Ok(match kind { - AggregateKind::Array(ty) => AggregateKind::Array(ty.try_fold_with(folder)?), - AggregateKind::Tuple => AggregateKind::Tuple, - AggregateKind::Adt(def, v, substs, user_ty, n) => AggregateKind::Adt( - def, - v, - substs.try_fold_with(folder)?, - user_ty.try_fold_with(folder)?, - n, - ), - AggregateKind::Closure(id, substs) => { - AggregateKind::Closure(id, substs.try_fold_with(folder)?) - } - AggregateKind::Generator(id, substs, movablity) => { - AggregateKind::Generator(id, substs.try_fold_with(folder)?, movablity) - } - }) - })?; - Aggregate(kind, fields.try_fold_with(folder)?) - } - ShallowInitBox(op, ty) => { - ShallowInitBox(op.try_fold_with(folder)?, ty.try_fold_with(folder)?) - } - }) - } -} - -impl<'tcx> TypeFoldable<'tcx> for Operand<'tcx> { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - Ok(match self { - Operand::Copy(place) => Operand::Copy(place.try_fold_with(folder)?), - Operand::Move(place) => Operand::Move(place.try_fold_with(folder)?), - Operand::Constant(c) => Operand::Constant(c.try_fold_with(folder)?), - }) - } -} - -impl<'tcx> TypeFoldable<'tcx> for PlaceElem<'tcx> { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - use crate::mir::ProjectionElem::*; - - Ok(match self { - Deref => Deref, - Field(f, ty) => Field(f, ty.try_fold_with(folder)?), - Index(v) => Index(v.try_fold_with(folder)?), - Downcast(symbol, variantidx) => Downcast(symbol, variantidx), - ConstantIndex { offset, min_length, from_end } => { - ConstantIndex { offset, min_length, from_end } - } - Subslice { from, to, from_end } => Subslice { from, to, from_end }, - }) - } -} - -impl<'tcx> TypeFoldable<'tcx> for Field { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> { - Ok(self) - } -} - -impl<'tcx> TypeFoldable<'tcx> for GeneratorSavedLocal { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> { - Ok(self) - } -} - impl<'tcx, R: Idx, C: Idx> TypeFoldable<'tcx> for BitMatrix<R, C> { fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> { Ok(self) } } - -impl<'tcx> TypeFoldable<'tcx> for Constant<'tcx> { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - Ok(Constant { - span: self.span, - user_ty: self.user_ty.try_fold_with(folder)?, - literal: self.literal.try_fold_with(folder)?, - }) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ConstantKind<'tcx> { - #[inline(always)] - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - folder.try_fold_mir_const(self) - } -} - -impl<'tcx> TypeSuperFoldable<'tcx> for ConstantKind<'tcx> { - fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>( - self, - folder: &mut F, - ) -> Result<Self, F::Error> { - match self { - ConstantKind::Ty(c) => Ok(ConstantKind::Ty(c.try_fold_with(folder)?)), - ConstantKind::Val(v, t) => Ok(ConstantKind::Val(v, t.try_fold_with(folder)?)), - } - } -} diff --git a/compiler/rustc_middle/src/mir/type_visitable.rs b/compiler/rustc_middle/src/mir/type_visitable.rs index 6a0801cb0..e7cd497b2 100644 --- a/compiler/rustc_middle/src/mir/type_visitable.rs +++ b/compiler/rustc_middle/src/mir/type_visitable.rs @@ -1,190 +1,9 @@ //! `TypeVisitable` implementations for MIR types use super::*; -use crate::ty; - -impl<'tcx> TypeVisitable<'tcx> for Terminator<'tcx> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - use crate::mir::TerminatorKind::*; - - match self.kind { - SwitchInt { ref discr, switch_ty, .. } => { - discr.visit_with(visitor)?; - switch_ty.visit_with(visitor) - } - Drop { ref place, .. } => place.visit_with(visitor), - DropAndReplace { ref place, ref value, .. } => { - place.visit_with(visitor)?; - value.visit_with(visitor) - } - Yield { ref value, .. } => value.visit_with(visitor), - Call { ref func, ref args, ref destination, .. } => { - destination.visit_with(visitor)?; - func.visit_with(visitor)?; - args.visit_with(visitor) - } - Assert { ref cond, ref msg, .. } => { - cond.visit_with(visitor)?; - use AssertKind::*; - match msg { - BoundsCheck { ref len, ref index } => { - len.visit_with(visitor)?; - index.visit_with(visitor) - } - Overflow(_, l, r) => { - l.visit_with(visitor)?; - r.visit_with(visitor) - } - OverflowNeg(op) | DivisionByZero(op) | RemainderByZero(op) => { - op.visit_with(visitor) - } - ResumedAfterReturn(_) | ResumedAfterPanic(_) => ControlFlow::CONTINUE, - } - } - InlineAsm { ref operands, .. } => operands.visit_with(visitor), - Goto { .. } - | Resume - | Abort - | Return - | GeneratorDrop - | Unreachable - | FalseEdge { .. } - | FalseUnwind { .. } => ControlFlow::CONTINUE, - } - } -} - -impl<'tcx> TypeVisitable<'tcx> for GeneratorKind { - fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> { - ControlFlow::CONTINUE - } -} - -impl<'tcx> TypeVisitable<'tcx> for Place<'tcx> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - self.local.visit_with(visitor)?; - self.projection.visit_with(visitor) - } -} - -impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<PlaceElem<'tcx>> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - self.iter().try_for_each(|t| t.visit_with(visitor)) - } -} - -impl<'tcx> TypeVisitable<'tcx> for Rvalue<'tcx> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - use crate::mir::Rvalue::*; - match *self { - Use(ref op) => op.visit_with(visitor), - CopyForDeref(ref place) => { - let op = &Operand::Copy(*place); - op.visit_with(visitor) - } - Repeat(ref op, _) => op.visit_with(visitor), - ThreadLocalRef(did) => did.visit_with(visitor), - Ref(region, _, ref place) => { - region.visit_with(visitor)?; - place.visit_with(visitor) - } - AddressOf(_, ref place) => place.visit_with(visitor), - Len(ref place) => place.visit_with(visitor), - Cast(_, ref op, ty) => { - op.visit_with(visitor)?; - ty.visit_with(visitor) - } - BinaryOp(_, box (ref rhs, ref lhs)) | CheckedBinaryOp(_, box (ref rhs, ref lhs)) => { - rhs.visit_with(visitor)?; - lhs.visit_with(visitor) - } - UnaryOp(_, ref val) => val.visit_with(visitor), - Discriminant(ref place) => place.visit_with(visitor), - NullaryOp(_, ty) => ty.visit_with(visitor), - Aggregate(ref kind, ref fields) => { - match **kind { - AggregateKind::Array(ty) => { - ty.visit_with(visitor)?; - } - AggregateKind::Tuple => {} - AggregateKind::Adt(_, _, substs, user_ty, _) => { - substs.visit_with(visitor)?; - user_ty.visit_with(visitor)?; - } - AggregateKind::Closure(_, substs) => { - substs.visit_with(visitor)?; - } - AggregateKind::Generator(_, substs, _) => { - substs.visit_with(visitor)?; - } - } - fields.visit_with(visitor) - } - ShallowInitBox(ref op, ty) => { - op.visit_with(visitor)?; - ty.visit_with(visitor) - } - } - } -} - -impl<'tcx> TypeVisitable<'tcx> for Operand<'tcx> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - match *self { - Operand::Copy(ref place) | Operand::Move(ref place) => place.visit_with(visitor), - Operand::Constant(ref c) => c.visit_with(visitor), - } - } -} - -impl<'tcx> TypeVisitable<'tcx> for PlaceElem<'tcx> { - fn visit_with<Vs: TypeVisitor<'tcx>>(&self, visitor: &mut Vs) -> ControlFlow<Vs::BreakTy> { - use crate::mir::ProjectionElem::*; - - match self { - Field(_, ty) => ty.visit_with(visitor), - Index(v) => v.visit_with(visitor), - _ => ControlFlow::CONTINUE, - } - } -} - -impl<'tcx> TypeVisitable<'tcx> for Field { - fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> { - ControlFlow::CONTINUE - } -} - -impl<'tcx> TypeVisitable<'tcx> for GeneratorSavedLocal { - fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> { - ControlFlow::CONTINUE - } -} impl<'tcx, R: Idx, C: Idx> TypeVisitable<'tcx> for BitMatrix<R, C> { fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> { ControlFlow::CONTINUE } } - -impl<'tcx> TypeVisitable<'tcx> for Constant<'tcx> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - self.literal.visit_with(visitor)?; - self.user_ty.visit_with(visitor) - } -} - -impl<'tcx> TypeVisitable<'tcx> for ConstantKind<'tcx> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - visitor.visit_mir_const(*self) - } -} - -impl<'tcx> TypeSuperVisitable<'tcx> for ConstantKind<'tcx> { - fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - match *self { - ConstantKind::Ty(c) => c.visit_with(visitor), - ConstantKind::Val(_, t) => t.visit_with(visitor), - } - } -} diff --git a/compiler/rustc_middle/src/mir/visit.rs b/compiler/rustc_middle/src/mir/visit.rs index 891608764..ddcf3711b 100644 --- a/compiler/rustc_middle/src/mir/visit.rs +++ b/compiler/rustc_middle/src/mir/visit.rs @@ -80,6 +80,8 @@ macro_rules! make_mir_visitor { self.super_body(body); } + extra_body_methods!($($mutability)?); + fn visit_basic_block_data( &mut self, block: BasicBlock, @@ -235,14 +237,6 @@ macro_rules! make_mir_visitor { self.super_region(region); } - fn visit_const( - &mut self, - constant: $(& $mutability)? ty::Const<'tcx>, - _: Location, - ) { - self.super_const(constant); - } - fn visit_substs( &mut self, substs: & $($mutability)? SubstsRef<'tcx>, @@ -287,63 +281,7 @@ macro_rules! make_mir_visitor { &mut self, body: &$($mutability)? Body<'tcx>, ) { - let span = body.span; - if let Some(gen) = &$($mutability)? body.generator { - if let Some(yield_ty) = $(& $mutability)? gen.yield_ty { - self.visit_ty( - yield_ty, - TyContext::YieldTy(SourceInfo::outermost(span)) - ); - } - } - - // for best performance, we want to use an iterator rather - // than a for-loop, to avoid calling `body::Body::invalidate` for - // each basic block. - #[allow(unused_macro_rules)] - macro_rules! basic_blocks { - (mut) => (body.basic_blocks_mut().iter_enumerated_mut()); - () => (body.basic_blocks().iter_enumerated()); - } - for (bb, data) in basic_blocks!($($mutability)?) { - self.visit_basic_block_data(bb, data); - } - - for scope in &$($mutability)? body.source_scopes { - self.visit_source_scope_data(scope); - } - - self.visit_ty( - $(& $mutability)? body.return_ty(), - TyContext::ReturnTy(SourceInfo::outermost(body.span)) - ); - - for local in body.local_decls.indices() { - self.visit_local_decl(local, & $($mutability)? body.local_decls[local]); - } - - #[allow(unused_macro_rules)] - macro_rules! type_annotations { - (mut) => (body.user_type_annotations.iter_enumerated_mut()); - () => (body.user_type_annotations.iter_enumerated()); - } - - for (index, annotation) in type_annotations!($($mutability)?) { - self.visit_user_type_annotation( - index, annotation - ); - } - - for var_debug_info in &$($mutability)? body.var_debug_info { - self.visit_var_debug_info(var_debug_info); - } - - self.visit_span($(& $mutability)? body.span); - - for const_ in &$($mutability)? body.required_consts { - let location = START_BLOCK.start_location(); - self.visit_constant(const_, location); - } + super_body!(self, body, $($mutability, true)?); } fn super_basic_block_data(&mut self, @@ -479,14 +417,15 @@ macro_rules! make_mir_visitor { location ) } - StatementKind::CopyNonOverlapping(box crate::mir::CopyNonOverlapping{ - src, - dst, - count, - }) => { - self.visit_operand(src, location); - self.visit_operand(dst, location); - self.visit_operand(count, location) + StatementKind::Intrinsic(box ref $($mutability)? intrinsic) => { + match intrinsic { + NonDivergingIntrinsic::Assume(op) => self.visit_operand(op, location), + NonDivergingIntrinsic::CopyNonOverlapping(CopyNonOverlapping { src, dst, count }) => { + self.visit_operand(src, location); + self.visit_operand(dst, location); + self.visit_operand(count, location); + } + } } StatementKind::Nop => {} } @@ -930,8 +869,9 @@ macro_rules! make_mir_visitor { self.visit_span($(& $mutability)? *span); drop(user_ty); // no visit method for this match literal { - ConstantKind::Ty(ct) => self.visit_const($(& $mutability)? *ct, location), + ConstantKind::Ty(_) => {} ConstantKind::Val(_, ty) => self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)), + ConstantKind::Unevaluated(_, ty) => self.visit_ty($(& $mutability)? *ty, TyContext::Location(location)), } } @@ -969,9 +909,6 @@ macro_rules! make_mir_visitor { fn super_region(&mut self, _region: $(& $mutability)? ty::Region<'tcx>) { } - fn super_const(&mut self, _const: $(& $mutability)? ty::Const<'tcx>) { - } - fn super_substs(&mut self, _substs: & $($mutability)? SubstsRef<'tcx>) { } @@ -982,12 +919,7 @@ macro_rules! make_mir_visitor { body: &$($mutability)? Body<'tcx>, location: Location ) { - #[allow(unused_macro_rules)] - macro_rules! basic_blocks { - (mut) => (body.basic_blocks_mut()); - () => (body.basic_blocks()); - } - let basic_block = & $($mutability)? basic_blocks!($($mutability)?)[location.block]; + let basic_block = & $($mutability)? basic_blocks!(body, $($mutability, true)?)[location.block]; if basic_block.statements.len() == location.statement_index { if let Some(ref $($mutability)? terminator) = basic_block.terminator { self.visit_terminator(terminator, location) @@ -1002,6 +934,94 @@ macro_rules! make_mir_visitor { } } +macro_rules! basic_blocks { + ($body:ident, mut, true) => { + $body.basic_blocks.as_mut() + }; + ($body:ident, mut, false) => { + $body.basic_blocks.as_mut_preserves_cfg() + }; + ($body:ident,) => { + $body.basic_blocks + }; +} + +macro_rules! basic_blocks_iter { + ($body:ident, mut, $invalidate:tt) => { + basic_blocks!($body, mut, $invalidate).iter_enumerated_mut() + }; + ($body:ident,) => { + basic_blocks!($body,).iter_enumerated() + }; +} + +macro_rules! extra_body_methods { + (mut) => { + fn visit_body_preserves_cfg(&mut self, body: &mut Body<'tcx>) { + self.super_body_preserves_cfg(body); + } + + fn super_body_preserves_cfg(&mut self, body: &mut Body<'tcx>) { + super_body!(self, body, mut, false); + } + }; + () => {}; +} + +macro_rules! super_body { + ($self:ident, $body:ident, $($mutability:ident, $invalidate:tt)?) => { + let span = $body.span; + if let Some(gen) = &$($mutability)? $body.generator { + if let Some(yield_ty) = $(& $mutability)? gen.yield_ty { + $self.visit_ty( + yield_ty, + TyContext::YieldTy(SourceInfo::outermost(span)) + ); + } + } + + for (bb, data) in basic_blocks_iter!($body, $($mutability, $invalidate)?) { + $self.visit_basic_block_data(bb, data); + } + + for scope in &$($mutability)? $body.source_scopes { + $self.visit_source_scope_data(scope); + } + + $self.visit_ty( + $(& $mutability)? $body.return_ty(), + TyContext::ReturnTy(SourceInfo::outermost($body.span)) + ); + + for local in $body.local_decls.indices() { + $self.visit_local_decl(local, & $($mutability)? $body.local_decls[local]); + } + + #[allow(unused_macro_rules)] + macro_rules! type_annotations { + (mut) => ($body.user_type_annotations.iter_enumerated_mut()); + () => ($body.user_type_annotations.iter_enumerated()); + } + + for (index, annotation) in type_annotations!($($mutability)?) { + $self.visit_user_type_annotation( + index, annotation + ); + } + + for var_debug_info in &$($mutability)? $body.var_debug_info { + $self.visit_var_debug_info(var_debug_info); + } + + $self.visit_span($(& $mutability)? $body.span); + + for const_ in &$($mutability)? $body.required_consts { + let location = START_BLOCK.start_location(); + $self.visit_constant(const_, location); + } + } +} + macro_rules! visit_place_fns { (mut) => { fn tcx<'a>(&'a self) -> TyCtxt<'tcx>; @@ -1064,6 +1084,11 @@ macro_rules! visit_place_fns { self.visit_ty(&mut new_ty, TyContext::Location(location)); if ty != new_ty { Some(PlaceElem::Field(field, new_ty)) } else { None } } + PlaceElem::OpaqueCast(ty) => { + let mut new_ty = ty; + self.visit_ty(&mut new_ty, TyContext::Location(location)); + if ty != new_ty { Some(PlaceElem::OpaqueCast(new_ty)) } else { None } + } PlaceElem::Deref | PlaceElem::ConstantIndex { .. } | PlaceElem::Subslice { .. } @@ -1133,7 +1158,7 @@ macro_rules! visit_place_fns { location: Location, ) { match elem { - ProjectionElem::Field(_field, ty) => { + ProjectionElem::OpaqueCast(ty) | ProjectionElem::Field(_, ty) => { self.visit_ty(ty, TyContext::Location(location)); } ProjectionElem::Index(local) => { diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index d8483e7e4..3d720f09b 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -4,6 +4,9 @@ //! ["Queries: demand-driven compilation"](https://rustc-dev-guide.rust-lang.org/query.html). //! This chapter includes instructions for adding new queries. +use crate::ty::{self, print::describe_as_module, TyCtxt}; +use rustc_span::def_id::LOCAL_CRATE; + // Each of these queries corresponds to a function pointer field in the // `Providers` struct for requesting a value of that type, and a method // on `tcx: TyCtxt` (and `tcx.at(span)`) for doing that request in a way @@ -17,19 +20,19 @@ // as they will raise an fatal error on query cycles instead. rustc_queries! { query trigger_delay_span_bug(key: DefId) -> () { - desc { "trigger a delay span bug" } + desc { "triggering a delay span bug" } } - query resolutions(_: ()) -> &'tcx ty::ResolverOutputs { + query resolutions(_: ()) -> &'tcx ty::ResolverGlobalCtxt { eval_always no_hash - desc { "get the resolver outputs" } + desc { "getting the resolver outputs" } } query resolver_for_lowering(_: ()) -> &'tcx Steal<ty::ResolverAstLowering> { eval_always no_hash - desc { "get the resolver for lowering" } + desc { "getting the resolver for lowering" } } /// Return the span for a definition. @@ -37,7 +40,7 @@ rustc_queries! { /// This span is meant for dep-tracking rather than diagnostics. It should not be used outside /// of rustc_middle::hir::source_map. query source_span(key: LocalDefId) -> Span { - desc { "get the source span" } + desc { "getting the source span" } } /// Represents crate as a whole (as distinct from the top-level crate module). @@ -47,16 +50,16 @@ rustc_queries! { /// To avoid this fate, do not call `tcx.hir().krate()`; instead, /// prefer wrappers like `tcx.visit_all_items_in_krate()`. query hir_crate(key: ()) -> Crate<'tcx> { - storage(ArenaCacheSelector<'tcx>) + arena_cache eval_always - desc { "get the crate HIR" } + desc { "getting the crate HIR" } } /// All items in the crate. query hir_crate_items(_: ()) -> rustc_middle::hir::ModuleItems { - storage(ArenaCacheSelector<'tcx>) + arena_cache eval_always - desc { "get HIR crate items" } + desc { "getting HIR crate items" } } /// The items in a module. @@ -64,8 +67,8 @@ rustc_queries! { /// This can be conveniently accessed by `tcx.hir().visit_item_likes_in_module`. /// Avoid calling this query directly. query hir_module_items(key: LocalDefId) -> rustc_middle::hir::ModuleItems { - storage(ArenaCacheSelector<'tcx>) - desc { |tcx| "HIR module items in `{}`", tcx.def_path_str(key.to_def_id()) } + arena_cache + desc { |tcx| "getting HIR module items in `{}`", tcx.def_path_str(key.to_def_id()) } cache_on_disk_if { true } } @@ -73,8 +76,8 @@ rustc_queries! { /// /// This can be conveniently accessed by methods on `tcx.hir()`. /// Avoid calling this query directly. - query hir_owner(key: LocalDefId) -> Option<crate::hir::Owner<'tcx>> { - desc { |tcx| "HIR owner of `{}`", tcx.def_path_str(key.to_def_id()) } + query hir_owner(key: hir::OwnerId) -> Option<crate::hir::Owner<'tcx>> { + desc { |tcx| "getting HIR owner of `{}`", tcx.def_path_str(key.to_def_id()) } } /// Gives access to the HIR ID for the given `LocalDefId` owner `key`. @@ -82,31 +85,31 @@ rustc_queries! { /// This can be conveniently accessed by methods on `tcx.hir()`. /// Avoid calling this query directly. query local_def_id_to_hir_id(key: LocalDefId) -> hir::HirId { - desc { |tcx| "HIR ID of `{}`", tcx.def_path_str(key.to_def_id()) } + desc { |tcx| "getting HIR ID of `{}`", tcx.def_path_str(key.to_def_id()) } } /// Gives access to the HIR node's parent for the HIR owner `key`. /// /// This can be conveniently accessed by methods on `tcx.hir()`. /// Avoid calling this query directly. - query hir_owner_parent(key: LocalDefId) -> hir::HirId { - desc { |tcx| "HIR parent of `{}`", tcx.def_path_str(key.to_def_id()) } + query hir_owner_parent(key: hir::OwnerId) -> hir::HirId { + desc { |tcx| "getting HIR parent of `{}`", tcx.def_path_str(key.to_def_id()) } } /// Gives access to the HIR nodes and bodies inside the HIR owner `key`. /// /// This can be conveniently accessed by methods on `tcx.hir()`. /// Avoid calling this query directly. - query hir_owner_nodes(key: LocalDefId) -> hir::MaybeOwner<&'tcx hir::OwnerNodes<'tcx>> { - desc { |tcx| "HIR owner items in `{}`", tcx.def_path_str(key.to_def_id()) } + query hir_owner_nodes(key: hir::OwnerId) -> hir::MaybeOwner<&'tcx hir::OwnerNodes<'tcx>> { + desc { |tcx| "getting HIR owner items in `{}`", tcx.def_path_str(key.to_def_id()) } } /// Gives access to the HIR attributes inside the HIR owner `key`. /// /// This can be conveniently accessed by methods on `tcx.hir()`. /// Avoid calling this query directly. - query hir_attrs(key: LocalDefId) -> &'tcx hir::AttributeMap<'tcx> { - desc { |tcx| "HIR owner attributes in `{}`", tcx.def_path_str(key.to_def_id()) } + query hir_attrs(key: hir::OwnerId) -> &'tcx hir::AttributeMap<'tcx> { + desc { |tcx| "getting HIR owner attributes in `{}`", tcx.def_path_str(key.to_def_id()) } } /// Computes the `DefId` of the corresponding const parameter in case the `key` is a @@ -135,7 +138,7 @@ rustc_queries! { /// Given the def_id of a const-generic parameter, computes the associated default const /// parameter. e.g. `fn example<const N: usize=3>` called on `N` would return `3`. query const_param_default(param: DefId) -> ty::Const<'tcx> { - desc { |tcx| "compute const default for a given parameter `{}`", tcx.def_path_str(param) } + desc { |tcx| "computing const default for a given parameter `{}`", tcx.def_path_str(param) } cache_on_disk_if { param.is_local() } separate_provide_extern } @@ -161,6 +164,14 @@ rustc_queries! { separate_provide_extern } + query collect_trait_impl_trait_tys(key: DefId) + -> Result<&'tcx FxHashMap<DefId, Ty<'tcx>>, ErrorGuaranteed> + { + desc { "comparing an impl and trait method signature, inferring any hidden `impl Trait` types in the process" } + cache_on_disk_if { key.is_local() } + separate_provide_extern + } + query analysis(key: ()) -> Result<(), ErrorGuaranteed> { eval_always desc { "running analysis passes on this crate" } @@ -189,7 +200,7 @@ rustc_queries! { /// associated generics. query generics_of(key: DefId) -> ty::Generics { desc { |tcx| "computing generics of `{}`", tcx.def_path_str(key) } - storage(ArenaCacheSelector<'tcx>) + arena_cache cache_on_disk_if { key.is_local() } separate_provide_extern } @@ -261,24 +272,29 @@ rustc_queries! { } query native_libraries(_: CrateNum) -> Vec<NativeLib> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "looking up the native libraries of a linked crate" } separate_provide_extern } - query lint_levels(_: ()) -> LintLevelMap { - storage(ArenaCacheSelector<'tcx>) - eval_always - desc { "computing the lint levels for items in this crate" } + query shallow_lint_levels_on(key: hir::OwnerId) -> rustc_middle::lint::ShallowLintLevelMap { + eval_always // fetches `resolutions` + arena_cache + desc { |tcx| "looking up lint levels for `{}`", tcx.def_path_str(key.to_def_id()) } + } + + query lint_expectations(_: ()) -> Vec<(LintExpectationId, LintExpectation)> { + arena_cache + desc { "computing `#[expect]`ed lints in this crate" } } query parent_module_from_def_id(key: LocalDefId) -> LocalDefId { eval_always - desc { |tcx| "parent module of `{}`", tcx.def_path_str(key.to_def_id()) } + desc { |tcx| "getting the parent module of `{}`", tcx.def_path_str(key.to_def_id()) } } query expn_that_defined(key: DefId) -> rustc_span::ExpnId { - desc { |tcx| "expansion that defined `{}`", tcx.def_path_str(key) } + desc { |tcx| "getting the expansion that defined `{}`", tcx.def_path_str(key) } separate_provide_extern } @@ -288,6 +304,32 @@ rustc_queries! { separate_provide_extern } + /// Checks whether a type is representable or infinitely sized + query representability(_: LocalDefId) -> rustc_middle::ty::Representability { + desc { "checking if `{}` is representable", tcx.def_path_str(key.to_def_id()) } + // infinitely sized types will cause a cycle + cycle_delay_bug + // we don't want recursive representability calls to be forced with + // incremental compilation because, if a cycle occurs, we need the + // entire cycle to be in memory for diagnostics + anon + } + + /// An implementation detail for the `representability` query + query representability_adt_ty(_: Ty<'tcx>) -> rustc_middle::ty::Representability { + desc { "checking if `{}` is representable", key } + cycle_delay_bug + anon + } + + /// Set of param indexes for type params that are in the type's representation + query params_in_repr(key: DefId) -> rustc_index::bit_set::BitSet<u32> { + desc { "finding type parameters in the representation" } + arena_cache + no_hash + separate_provide_extern + } + /// Fetch the THIR for a given body. If typeck for that body failed, returns an empty `Thir`. query thir_body(key: ty::WithOptConstParam<LocalDefId>) -> Result<(&'tcx Steal<thir::Thir<'tcx>>, thir::ExprId), ErrorGuaranteed> @@ -300,7 +342,7 @@ rustc_queries! { /// Create a THIR tree for debugging. query thir_tree(key: ty::WithOptConstParam<LocalDefId>) -> String { no_hash - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { |tcx| "constructing THIR tree for `{}`", tcx.def_path_str(key.did.to_def_id()) } } @@ -308,7 +350,7 @@ rustc_queries! { /// them. This includes all the body owners, but also things like struct /// constructors. query mir_keys(_: ()) -> rustc_data_structures::fx::FxIndexSet<LocalDefId> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "getting a list of all mir_keys" } } @@ -341,7 +383,7 @@ rustc_queries! { /// See the README for the `mir` module for details. query mir_const(key: ty::WithOptConstParam<LocalDefId>) -> &'tcx Steal<mir::Body<'tcx>> { desc { - |tcx| "processing MIR for {}`{}`", + |tcx| "preparing {}`{}` for borrow checking", if key.const_param_did.is_some() { "the const argument " } else { "" }, tcx.def_path_str(key.did.to_def_id()), } @@ -353,7 +395,7 @@ rustc_queries! { key: DefId ) -> Result<Option<&'tcx [ty::abstract_const::Node<'tcx>]>, ErrorGuaranteed> { desc { - |tcx| "building an abstract representation for {}", tcx.def_path_str(key), + |tcx| "building an abstract representation for `{}`", tcx.def_path_str(key), } separate_provide_extern } @@ -363,16 +405,16 @@ rustc_queries! { ) -> Result<Option<&'tcx [ty::abstract_const::Node<'tcx>]>, ErrorGuaranteed> { desc { |tcx| - "building an abstract representation for the const argument {}", + "building an abstract representation for the const argument `{}`", tcx.def_path_str(key.0.to_def_id()), } } query try_unify_abstract_consts(key: - ty::ParamEnvAnd<'tcx, (ty::Unevaluated<'tcx, ()>, ty::Unevaluated<'tcx, ()> + ty::ParamEnvAnd<'tcx, (ty::UnevaluatedConst<'tcx>, ty::UnevaluatedConst<'tcx> )>) -> bool { desc { - |tcx| "trying to unify the generic constants {} and {}", + |tcx| "trying to unify the generic constants `{}` and `{}`", tcx.def_path_str(key.value.0.def.did), tcx.def_path_str(key.value.1.def.did) } } @@ -394,7 +436,7 @@ rustc_queries! { query mir_for_ctfe_of_const_arg(key: (LocalDefId, DefId)) -> &'tcx mir::Body<'tcx> { desc { - |tcx| "MIR for CTFE of the const argument `{}`", + |tcx| "caching MIR for CTFE of the const argument `{}`", tcx.def_path_str(key.0.to_def_id()) } } @@ -406,7 +448,7 @@ rustc_queries! { ) { no_hash desc { - |tcx| "processing {}`{}`", + |tcx| "processing MIR for {}`{}`", if key.const_param_did.is_some() { "the const argument " } else { "" }, tcx.def_path_str(key.did.to_def_id()), } @@ -415,9 +457,9 @@ rustc_queries! { query symbols_for_closure_captures( key: (LocalDefId, LocalDefId) ) -> Vec<rustc_span::Symbol> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { - |tcx| "symbols for captures of closure `{}` in `{}`", + |tcx| "finding symbols for captures of closure `{}` in `{}`", tcx.def_path_str(key.1.to_def_id()), tcx.def_path_str(key.0.to_def_id()) } @@ -435,7 +477,7 @@ rustc_queries! { /// MIR pass (assuming the -Cinstrument-coverage option is enabled). query coverageinfo(key: ty::InstanceDef<'tcx>) -> mir::CoverageInfo { desc { |tcx| "retrieving coverage info from MIR for `{}`", tcx.def_path_str(key.def_id()) } - storage(ArenaCacheSelector<'tcx>) + arena_cache } /// Returns the `CodeRegions` for a function that has instrumented coverage, in case the @@ -445,7 +487,7 @@ rustc_queries! { |tcx| "retrieving the covered `CodeRegion`s, if instrumented, for `{}`", tcx.def_path_str(key) } - storage(ArenaCacheSelector<'tcx>) + arena_cache cache_on_disk_if { key.is_local() } } @@ -479,12 +521,12 @@ rustc_queries! { // queries). Making it anonymous avoids hashing the result, which // may save a bit of time. anon - desc { "erasing regions from `{:?}`", ty } + desc { "erasing regions from `{}`", ty } } query wasm_import_module_map(_: CrateNum) -> FxHashMap<DefId, String> { - storage(ArenaCacheSelector<'tcx>) - desc { "wasm import module map" } + arena_cache + desc { "getting wasm import module map" } } /// Maps from the `DefId` of an item (trait/struct/enum/fn) to the @@ -559,7 +601,7 @@ rustc_queries! { query trait_def(key: DefId) -> ty::TraitDef { desc { |tcx| "computing trait definition for `{}`", tcx.def_path_str(key) } - storage(ArenaCacheSelector<'tcx>) + arena_cache cache_on_disk_if { key.is_local() } separate_provide_extern } @@ -574,16 +616,8 @@ rustc_queries! { separate_provide_extern } - // The cycle error here should be reported as an error by `check_representable`. - // We consider the type as Sized in the meanwhile to avoid - // further errors (done in impl Value for AdtSizedConstraint). - // Use `cycle_delay_bug` to delay the cycle error here to be emitted later - // in case we accidentally otherwise don't emit an error. - query adt_sized_constraint( - key: DefId - ) -> AdtSizedConstraint<'tcx> { + query adt_sized_constraint(key: DefId) -> &'tcx [Ty<'tcx>] { desc { |tcx| "computing `Sized` constraints for `{}`", tcx.def_path_str(key) } - cycle_delay_bug } query adt_dtorck_constraint( @@ -637,7 +671,7 @@ rustc_queries! { /// Gets a map with the variance of every item; use `item_variance` instead. query crate_variances(_: ()) -> ty::CrateVariancesMap<'tcx> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "computing the variances for items in this crate" } } @@ -650,7 +684,7 @@ rustc_queries! { /// Maps from thee `DefId` of a type to its (inferred) outlives. query inferred_outlives_crate(_: ()) -> ty::CratePredicatesMap<'tcx> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "computing the inferred outlives predicates for items in this crate" } } @@ -664,15 +698,15 @@ rustc_queries! { /// Maps from a trait item to the trait item "descriptor". query associated_item(key: DefId) -> ty::AssocItem { desc { |tcx| "computing associated item data for `{}`", tcx.def_path_str(key) } - storage(ArenaCacheSelector<'tcx>) + arena_cache cache_on_disk_if { key.is_local() } separate_provide_extern } /// Collects the associated items defined on a trait or impl. query associated_items(key: DefId) -> ty::AssocItems<'tcx> { - storage(ArenaCacheSelector<'tcx>) - desc { |tcx| "collecting associated items of {}", tcx.def_path_str(key) } + arena_cache + desc { |tcx| "collecting associated items of `{}`", tcx.def_path_str(key) } } /// Maps from associated items on a trait to the corresponding associated @@ -697,8 +731,8 @@ rustc_queries! { /// The map returned for `tcx.impl_item_implementor_ids(impl_id)` would be ///`{ trait_f: impl_f, trait_g: impl_g }` query impl_item_implementor_ids(impl_id: DefId) -> FxHashMap<DefId, DefId> { - storage(ArenaCacheSelector<'tcx>) - desc { |tcx| "comparing impl items against trait for {}", tcx.def_path_str(impl_id) } + arena_cache + desc { |tcx| "comparing impl items against trait for `{}`", tcx.def_path_str(impl_id) } } /// Given an `impl_id`, return the trait it implements. @@ -765,11 +799,20 @@ rustc_queries! { desc { |tcx| "processing `{}`", tcx.def_path_str(key.to_def_id()) } } + /// Returns the types assumed to be well formed while "inside" of the given item. + /// + /// Note that we've liberated the late bound regions of function signatures, so + /// this can not be used to check whether these types are well formed. + query assumed_wf_types(key: DefId) -> &'tcx ty::List<Ty<'tcx>> { + desc { |tcx| "computing the implied bounds of `{}`", tcx.def_path_str(key) } + } + /// Computes the signature of the function. query fn_sig(key: DefId) -> ty::PolyFnSig<'tcx> { desc { |tcx| "computing function signature of `{}`", tcx.def_path_str(key) } cache_on_disk_if { key.is_local() } separate_provide_extern + cycle_delay_bug } /// Performs lint checking for the module. @@ -809,8 +852,8 @@ rustc_queries! { desc { |tcx| "checking privacy in {}", describe_as_module(key, tcx) } } - query check_mod_liveness(key: LocalDefId) -> () { - desc { |tcx| "checking liveness of variables in {}", describe_as_module(key, tcx) } + query check_liveness(key: DefId) { + desc { |tcx| "checking liveness of variables in `{}`", tcx.def_path_str(key) } } /// Return the live symbols in the crate for dead code check. @@ -821,8 +864,8 @@ rustc_queries! { FxHashSet<LocalDefId>, FxHashMap<LocalDefId, Vec<(DefId, DefId)>> ) { - storage(ArenaCacheSelector<'tcx>) - desc { "find live symbols in crate" } + arena_cache + desc { "finding live symbols in crate" } } query check_mod_deathness(key: LocalDefId) -> () { @@ -867,17 +910,10 @@ rustc_queries! { query diagnostic_only_typeck(key: LocalDefId) -> &'tcx ty::TypeckResults<'tcx> { desc { |tcx| "type-checking `{}`", tcx.def_path_str(key.to_def_id()) } cache_on_disk_if { true } - load_cached(tcx, id) { - let typeck_results: Option<ty::TypeckResults<'tcx>> = tcx - .on_disk_cache().as_ref() - .and_then(|c| c.try_load_query_result(*tcx, id)); - - typeck_results.map(|x| &*tcx.arena.alloc(x)) - } } - query used_trait_imports(key: LocalDefId) -> &'tcx FxHashSet<LocalDefId> { - desc { |tcx| "used_trait_imports `{}`", tcx.def_path_str(key.to_def_id()) } + query used_trait_imports(key: LocalDefId) -> &'tcx UnordSet<LocalDefId> { + desc { |tcx| "finding used_trait_imports `{}`", tcx.def_path_str(key.to_def_id()) } cache_on_disk_if { true } } @@ -905,8 +941,8 @@ rustc_queries! { /// Gets a complete map from all types to their inherent impls. /// Not meant to be used directly outside of coherence. query crate_inherent_impls(k: ()) -> CrateInherentImpls { - storage(ArenaCacheSelector<'tcx>) - desc { "all inherent impls defined in crate" } + arena_cache + desc { "finding all inherent impls defined in crate" } } /// Checks all types in the crate for overlap in their inherent impls. Reports errors. @@ -993,8 +1029,10 @@ rustc_queries! { /// Tries to destructure an `mir::ConstantKind` ADT or array into its variant index /// and its field values. - query try_destructure_mir_constant(key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>>) -> Option<mir::DestructuredMirConstant<'tcx>> { - desc { "destructuring mir constant"} + query try_destructure_mir_constant( + key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>> + ) -> Option<mir::DestructuredConstant<'tcx>> { + desc { "destructuring MIR constant"} remap_env_constness } @@ -1003,12 +1041,12 @@ rustc_queries! { query deref_mir_constant( key: ty::ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>> ) -> mir::ConstantKind<'tcx> { - desc { "dereferencing mir constant" } + desc { "dereferencing MIR constant" } remap_env_constness } query const_caller_location(key: (rustc_span::Symbol, u32, u32)) -> ConstValue<'tcx> { - desc { "get a &core::panic::Location referring to a span" } + desc { "getting a &core::panic::Location referring to a span" } } // FIXME get rid of this with valtrees @@ -1027,10 +1065,10 @@ rustc_queries! { cache_on_disk_if { key.is_local() } } - /// Performs part of the privacy check and computes "access levels". - query privacy_access_levels(_: ()) -> &'tcx AccessLevels { + /// Performs part of the privacy check and computes effective visibilities. + query effective_visibilities(_: ()) -> &'tcx EffectiveVisibilities { eval_always - desc { "privacy access levels" } + desc { "checking effective visibilities" } } query check_private_in_public(_: ()) -> () { eval_always @@ -1038,7 +1076,7 @@ rustc_queries! { } query reachable_set(_: ()) -> FxHashSet<LocalDefId> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "reachability" } } @@ -1050,7 +1088,7 @@ rustc_queries! { /// Generates a MIR body for the shim. query mir_shims(key: ty::InstanceDef<'tcx>) -> mir::Body<'tcx> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { |tcx| "generating MIR shim for `{}`", tcx.def_path_str(key.def_id()) } } @@ -1094,6 +1132,11 @@ rustc_queries! { separate_provide_extern } + query lookup_default_body_stability(def_id: DefId) -> Option<attr::DefaultBodyStability> { + desc { |tcx| "looking up default body stability of `{}`", tcx.def_path_str(def_id) } + separate_provide_extern + } + query should_inherit_track_caller(def_id: DefId) -> bool { desc { |tcx| "computing should_inherit_track_caller of `{}`", tcx.def_path_str(def_id) } } @@ -1109,6 +1152,11 @@ rustc_queries! { desc { |tcx| "checking whether `{}` is `doc(hidden)`", tcx.def_path_str(def_id) } } + /// Determines whether an item is annotated with `doc(notable_trait)`. + query is_doc_notable_trait(def_id: DefId) -> bool { + desc { |tcx| "checking whether `{}` is `doc(notable_trait)`", tcx.def_path_str(def_id) } + } + /// Returns the attributes on the item at `def_id`. /// /// Do not use this directly, use `tcx.get_attrs` instead. @@ -1119,7 +1167,7 @@ rustc_queries! { query codegen_fn_attrs(def_id: DefId) -> CodegenFnAttrs { desc { |tcx| "computing codegen attributes of `{}`", tcx.def_path_str(def_id) } - storage(ArenaCacheSelector<'tcx>) + arena_cache cache_on_disk_if { def_id.is_local() } separate_provide_extern } @@ -1136,8 +1184,8 @@ rustc_queries! { /// Gets the rendered value of the specified constant or associated constant. /// Used by rustdoc. query rendered_const(def_id: DefId) -> String { - storage(ArenaCacheSelector<'tcx>) - desc { |tcx| "rendering constant intializer of `{}`", tcx.def_path_str(def_id) } + arena_cache + desc { |tcx| "rendering constant initializer of `{}`", tcx.def_path_str(def_id) } cache_on_disk_if { def_id.is_local() } separate_provide_extern } @@ -1148,29 +1196,29 @@ rustc_queries! { } query is_ctfe_mir_available(key: DefId) -> bool { - desc { |tcx| "checking if item has ctfe mir available: `{}`", tcx.def_path_str(key) } + desc { |tcx| "checking if item has CTFE MIR available: `{}`", tcx.def_path_str(key) } cache_on_disk_if { key.is_local() } separate_provide_extern } query is_mir_available(key: DefId) -> bool { - desc { |tcx| "checking if item has mir available: `{}`", tcx.def_path_str(key) } + desc { |tcx| "checking if item has MIR available: `{}`", tcx.def_path_str(key) } cache_on_disk_if { key.is_local() } separate_provide_extern } query own_existential_vtable_entries( - key: ty::PolyExistentialTraitRef<'tcx> + key: DefId ) -> &'tcx [DefId] { - desc { |tcx| "finding all existential vtable entries for trait {}", tcx.def_path_str(key.def_id()) } + desc { |tcx| "finding all existential vtable entries for trait `{}`", tcx.def_path_str(key) } } query vtable_entries(key: ty::PolyTraitRef<'tcx>) -> &'tcx [ty::VtblEntry<'tcx>] { - desc { |tcx| "finding all vtable entries for trait {}", tcx.def_path_str(key.def_id()) } + desc { |tcx| "finding all vtable entries for trait `{}`", tcx.def_path_str(key.def_id()) } } - query vtable_trait_upcasting_coercion_new_vptr_slot(key: (ty::Ty<'tcx>, ty::Ty<'tcx>)) -> Option<usize> { - desc { |tcx| "finding the slot within vtable for trait object {} vtable ptr during trait upcasting coercion from {} vtable", + query vtable_trait_upcasting_coercion_new_vptr_slot(key: (Ty<'tcx>, Ty<'tcx>)) -> Option<usize> { + desc { |tcx| "finding the slot within vtable for trait object `{}` vtable ptr during trait upcasting coercion from `{}` vtable", key.1, key.0 } } @@ -1181,34 +1229,31 @@ rustc_queries! { } } - query codegen_fulfill_obligation( + query codegen_select_candidate( key: (ty::ParamEnv<'tcx>, ty::PolyTraitRef<'tcx>) ) -> Result<&'tcx ImplSource<'tcx, ()>, traits::CodegenObligationError> { cache_on_disk_if { true } - desc { |tcx| - "checking if `{}` fulfills its obligations", - tcx.def_path_str(key.1.def_id()) - } + desc { |tcx| "computing candidate for `{}`", key.1 } } /// Return all `impl` blocks in the current crate. query all_local_trait_impls(_: ()) -> &'tcx rustc_data_structures::fx::FxIndexMap<DefId, Vec<LocalDefId>> { - desc { "local trait impls" } + desc { "finding local trait impls" } } /// Given a trait `trait_id`, return all known `impl` blocks. query trait_impls_of(trait_id: DefId) -> ty::trait_def::TraitImpls { - storage(ArenaCacheSelector<'tcx>) - desc { |tcx| "trait impls of `{}`", tcx.def_path_str(trait_id) } + arena_cache + desc { |tcx| "finding trait impls of `{}`", tcx.def_path_str(trait_id) } } query specialization_graph_of(trait_id: DefId) -> specialization_graph::Graph { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { |tcx| "building specialization graph of trait `{}`", tcx.def_path_str(trait_id) } cache_on_disk_if { true } } query object_safety_violations(trait_id: DefId) -> &'tcx [traits::ObjectSafetyViolation] { - desc { |tcx| "determine object safety of trait `{}`", tcx.def_path_str(trait_id) } + desc { |tcx| "determining object safety of trait `{}`", tcx.def_path_str(trait_id) } } /// Gets the ParameterEnvironment for a given item; this environment @@ -1266,7 +1311,7 @@ rustc_queries! { /// correctly. query has_structural_eq_impls(ty: Ty<'tcx>) -> bool { desc { - "computing whether `{:?}` implements `PartialStructuralEq` and `StructuralEq`", + "computing whether `{}` implements `PartialStructuralEq` and `StructuralEq`", ty } } @@ -1295,6 +1340,7 @@ rustc_queries! { query layout_of( key: ty::ParamEnvAnd<'tcx, Ty<'tcx>> ) -> Result<ty::layout::TyAndLayout<'tcx>, ty::layout::LayoutError<'tcx>> { + depth_limit desc { "computing layout of `{}`", key.value } remap_env_constness } @@ -1324,13 +1370,13 @@ rustc_queries! { query dylib_dependency_formats(_: CrateNum) -> &'tcx [(CrateNum, LinkagePreference)] { - desc { "dylib dependency formats of crate" } + desc { "getting dylib dependency formats of crate" } separate_provide_extern } query dependency_formats(_: ()) -> Lrc<crate::middle::dependency_format::Dependencies> { - storage(ArenaCacheSelector<'tcx>) - desc { "get the linkage format of all dependencies" } + arena_cache + desc { "getting the linkage format of all dependencies" } } query is_compiler_builtins(_: CrateNum) -> bool { @@ -1352,31 +1398,31 @@ rustc_queries! { } query is_profiler_runtime(_: CrateNum) -> bool { fatal_cycle - desc { "query a crate is `#![profiler_runtime]`" } + desc { "checking if a crate is `#![profiler_runtime]`" } separate_provide_extern } query has_ffi_unwind_calls(key: LocalDefId) -> bool { - desc { |tcx| "check if `{}` contains FFI-unwind calls", tcx.def_path_str(key.to_def_id()) } + desc { |tcx| "checking if `{}` contains FFI-unwind calls", tcx.def_path_str(key.to_def_id()) } cache_on_disk_if { true } } query required_panic_strategy(_: CrateNum) -> Option<PanicStrategy> { fatal_cycle - desc { "query a crate's required panic strategy" } + desc { "getting a crate's required panic strategy" } separate_provide_extern } query panic_in_drop_strategy(_: CrateNum) -> PanicStrategy { fatal_cycle - desc { "query a crate's configured panic-in-drop strategy" } + desc { "getting a crate's configured panic-in-drop strategy" } separate_provide_extern } query is_no_builtins(_: CrateNum) -> bool { fatal_cycle - desc { "test whether a crate has `#![no_builtins]`" } + desc { "getting whether a crate has `#![no_builtins]`" } separate_provide_extern } query symbol_mangling_version(_: CrateNum) -> SymbolManglingVersion { fatal_cycle - desc { "query a crate's symbol mangling version" } + desc { "getting a crate's symbol mangling version" } separate_provide_extern } @@ -1389,9 +1435,9 @@ rustc_queries! { query specializes(_: (DefId, DefId)) -> bool { desc { "computing whether impls specialize one another" } } - query in_scope_traits_map(_: LocalDefId) + query in_scope_traits_map(_: hir::OwnerId) -> Option<&'tcx FxHashMap<ItemLocalId, Box<[TraitCandidate]>>> { - desc { "traits in scope at a block" } + desc { "getting traits in scope at a block" } } query module_reexports(def_id: LocalDefId) -> Option<&'tcx [ModChild]> { @@ -1404,7 +1450,7 @@ rustc_queries! { separate_provide_extern } - query check_well_formed(key: LocalDefId) -> () { + query check_well_formed(key: hir::OwnerId) -> () { desc { |tcx| "checking that `{}` is well-formed", tcx.def_path_str(key.to_def_id()) } } @@ -1422,7 +1468,7 @@ rustc_queries! { // like the compiler-generated `main` function and so on. query reachable_non_generics(_: CrateNum) -> DefIdMap<SymbolExportInfo> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "looking up the exported symbols of a crate" } separate_provide_extern } @@ -1445,7 +1491,7 @@ rustc_queries! { /// `upstream_monomorphizations_for`, `upstream_drop_glue_for`, or, even /// better, `Instance::upstream_monomorphization()`. query upstream_monomorphizations(_: ()) -> DefIdMap<FxHashMap<SubstsRef<'tcx>, CrateNum>> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "collecting available upstream monomorphizations" } } @@ -1459,7 +1505,7 @@ rustc_queries! { query upstream_monomorphizations_for(def_id: DefId) -> Option<&'tcx FxHashMap<SubstsRef<'tcx>, CrateNum>> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { |tcx| "collecting available upstream monomorphizations for `{}`", tcx.def_path_str(def_id), @@ -1487,7 +1533,7 @@ rustc_queries! { } query foreign_modules(_: CrateNum) -> FxHashMap<DefId, ForeignModule> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "looking up the foreign modules of a linked crate" } separate_provide_extern } @@ -1513,13 +1559,13 @@ rustc_queries! { separate_provide_extern } query extra_filename(_: CrateNum) -> String { - storage(ArenaCacheSelector<'tcx>) + arena_cache eval_always desc { "looking up the extra filename for a crate" } separate_provide_extern } query crate_extern_paths(_: CrateNum) -> Vec<PathBuf> { - storage(ArenaCacheSelector<'tcx>) + arena_cache eval_always desc { "looking up the paths for extern crates" } separate_provide_extern @@ -1541,15 +1587,8 @@ rustc_queries! { separate_provide_extern } - query is_dllimport_foreign_item(def_id: DefId) -> bool { - desc { |tcx| "is_dllimport_foreign_item({})", tcx.def_path_str(def_id) } - } - query is_statically_included_foreign_item(def_id: DefId) -> bool { - desc { |tcx| "is_statically_included_foreign_item({})", tcx.def_path_str(def_id) } - } - query native_library_kind(def_id: DefId) - -> Option<NativeLibKind> { - desc { |tcx| "native_library_kind({})", tcx.def_path_str(def_id) } + query native_library(def_id: DefId) -> Option<&'tcx NativeLib> { + desc { |tcx| "getting the native library for `{}`", tcx.def_path_str(def_id) } } /// Does lifetime resolution, but does not descend into trait items. This @@ -1558,48 +1597,59 @@ rustc_queries! { /// the same lifetimes and is responsible for diagnostics. /// See `rustc_resolve::late::lifetimes for details. query resolve_lifetimes_trait_definition(_: LocalDefId) -> ResolveLifetimes { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "resolving lifetimes for a trait definition" } } /// Does lifetime resolution on items. Importantly, we can't resolve /// lifetimes directly on things like trait methods, because of trait params. /// See `rustc_resolve::late::lifetimes for details. query resolve_lifetimes(_: LocalDefId) -> ResolveLifetimes { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "resolving lifetimes" } } - query named_region_map(_: LocalDefId) -> + query named_region_map(_: hir::OwnerId) -> Option<&'tcx FxHashMap<ItemLocalId, Region>> { desc { "looking up a named region" } } query is_late_bound_map(_: LocalDefId) -> Option<&'tcx FxIndexSet<LocalDefId>> { desc { "testing if a region is late bound" } } - /// For a given item (like a struct), gets the default lifetimes to be used + /// For a given item's generic parameter, gets the default lifetimes to be used /// for each parameter if a trait object were to be passed for that parameter. - /// For example, for `struct Foo<'a, T, U>`, this would be `['static, 'static]`. - /// For `struct Foo<'a, T: 'a, U>`, this would instead be `['a, 'static]`. - query object_lifetime_defaults(_: LocalDefId) -> Option<&'tcx [ObjectLifetimeDefault]> { - desc { "looking up lifetime defaults for a region on an item" } + /// For example, for `T` in `struct Foo<'a, T>`, this would be `'static`. + /// For `T` in `struct Foo<'a, T: 'a>`, this would instead be `'a`. + /// This query will panic if passed something that is not a type parameter. + query object_lifetime_default(key: DefId) -> ObjectLifetimeDefault { + desc { "looking up lifetime defaults for generic parameter `{}`", tcx.def_path_str(key) } + separate_provide_extern } - query late_bound_vars_map(_: LocalDefId) + query late_bound_vars_map(_: hir::OwnerId) -> Option<&'tcx FxHashMap<ItemLocalId, Vec<ty::BoundVariableKind>>> { desc { "looking up late bound vars" } } - query visibility(def_id: DefId) -> ty::Visibility { + /// Computes the visibility of the provided `def_id`. + /// + /// If the item from the `def_id` doesn't have a visibility, it will panic. For example + /// a generic type parameter will panic if you call this method on it: + /// + /// ``` + /// pub trait Foo<T: Debug> {} + /// ``` + /// + /// In here, if you call `visibility` on `T`, it'll panic. + query visibility(def_id: DefId) -> ty::Visibility<DefId> { desc { |tcx| "computing visibility of `{}`", tcx.def_path_str(def_id) } separate_provide_extern } - /// Computes the set of modules from which this type is visibly uninhabited. - /// To check whether a type is uninhabited at all (not just from a given module), you could - /// check whether the forest is empty. - query type_uninhabited_from( - key: ty::ParamEnvAnd<'tcx, Ty<'tcx>> - ) -> ty::inhabitedness::DefIdForest<'tcx> { - desc { "computing the inhabitedness of `{:?}`", key } - remap_env_constness + query inhabited_predicate_adt(key: DefId) -> ty::inhabitedness::InhabitedPredicate<'tcx> { + desc { "computing the uninhabited predicate of `{:?}`", key } + } + + /// Do not call this query directly: invoke `Ty::inhabited_predicate` instead. + query inhabited_predicate_type(key: Ty<'tcx>) -> ty::inhabitedness::InhabitedPredicate<'tcx> { + desc { "computing the uninhabited predicate of `{}`", key } } query dep_kind(_: CrateNum) -> CrateDepKind { @@ -1623,7 +1673,7 @@ rustc_queries! { } query lib_features(_: ()) -> LibFeatures { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "calculating the lib features map" } } query defined_lib_features(_: CrateNum) -> &'tcx [(Symbol, Option<Symbol>)] { @@ -1631,25 +1681,25 @@ rustc_queries! { separate_provide_extern } query stability_implications(_: CrateNum) -> FxHashMap<Symbol, Symbol> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "calculating the implications between `#[unstable]` features defined in a crate" } separate_provide_extern } /// Whether the function is an intrinsic query is_intrinsic(def_id: DefId) -> bool { - desc { |tcx| "is_intrinsic({})", tcx.def_path_str(def_id) } + desc { |tcx| "checking whether `{}` is an intrinsic", tcx.def_path_str(def_id) } separate_provide_extern } /// Returns the lang items defined in another crate by loading it from metadata. query get_lang_items(_: ()) -> LanguageItems { - storage(ArenaCacheSelector<'tcx>) + arena_cache eval_always desc { "calculating the lang items map" } } /// Returns all diagnostic items defined in all crates. query all_diagnostic_items(_: ()) -> rustc_hir::diagnostic_items::DiagnosticItems { - storage(ArenaCacheSelector<'tcx>) + arena_cache eval_always desc { "calculating the diagnostic items map" } } @@ -1662,7 +1712,7 @@ rustc_queries! { /// Returns the diagnostic items defined in a crate. query diagnostic_items(_: CrateNum) -> rustc_hir::diagnostic_items::DiagnosticItems { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "calculating the diagnostic items map in a crate" } separate_provide_extern } @@ -1672,11 +1722,11 @@ rustc_queries! { separate_provide_extern } query visible_parent_map(_: ()) -> DefIdMap<DefId> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "calculating the visible parent map" } } query trimmed_def_paths(_: ()) -> FxHashMap<DefId, Symbol> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "calculating trimmed def paths" } } query missing_extern_crate_item(_: CrateNum) -> bool { @@ -1685,14 +1735,14 @@ rustc_queries! { separate_provide_extern } query used_crate_source(_: CrateNum) -> Lrc<CrateSource> { - storage(ArenaCacheSelector<'tcx>) + arena_cache eval_always desc { "looking at the source for a crate" } separate_provide_extern } /// Returns the debugger visualizers defined for this crate. query debugger_visualizers(_: CrateNum) -> Vec<rustc_span::DebuggerVisualizerFile> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { "looking up the debugger visualizers for this crate" } separate_provide_extern } @@ -1704,12 +1754,12 @@ rustc_queries! { /// is marked as a private dependency query is_private_dep(c: CrateNum) -> bool { eval_always - desc { "check whether crate {} is a private dependency", c } + desc { "checking whether crate `{}` is a private dependency", c } separate_provide_extern } query allocator_kind(_: ()) -> Option<AllocatorKind> { eval_always - desc { "allocator kind for the current crate" } + desc { "getting the allocator kind for the current crate" } } query upvars_mentioned(def_id: DefId) -> Option<&'tcx FxIndexMap<hir::HirId, hir::Upvar>> { @@ -1722,11 +1772,11 @@ rustc_queries! { desc { "looking up all possibly unused extern crates" } } query names_imported_by_glob_use(def_id: LocalDefId) -> &'tcx FxHashSet<Symbol> { - desc { |tcx| "names_imported_by_glob_use for `{}`", tcx.def_path_str(def_id.to_def_id()) } + desc { |tcx| "finding names imported by glob use for `{}`", tcx.def_path_str(def_id.to_def_id()) } } query stability_index(_: ()) -> stability::Index { - storage(ArenaCacheSelector<'tcx>) + arena_cache eval_always desc { "calculating the stability index for the local crate" } } @@ -1748,7 +1798,7 @@ rustc_queries! { /// correspond to a publicly visible symbol in `cnum` machine code. /// - The `exported_symbols` sets of different crates do not intersect. query exported_symbols(cnum: CrateNum) -> &'tcx [(ExportedSymbol<'tcx>, SymbolExportInfo)] { - desc { "exported_symbols" } + desc { "collecting exported symbols for crate `{}`", cnum} cache_on_disk_if { *cnum == LOCAL_CRATE } separate_provide_extern } @@ -1757,6 +1807,7 @@ rustc_queries! { eval_always desc { "collect_and_partition_mono_items" } } + query is_codegened_item(def_id: DefId) -> bool { desc { |tcx| "determining whether `{}` needs codegen", tcx.def_path_str(def_id) } } @@ -1764,12 +1815,13 @@ rustc_queries! { /// All items participating in code generation together with items inlined into them. query codegened_and_inlined_items(_: ()) -> &'tcx DefIdSet { eval_always - desc { "codegened_and_inlined_items" } + desc { "collecting codegened and inlined items" } } - query codegen_unit(_: Symbol) -> &'tcx CodegenUnit<'tcx> { - desc { "codegen_unit" } + query codegen_unit(sym: Symbol) -> &'tcx CodegenUnit<'tcx> { + desc { "getting codegen unit `{sym}`" } } + query unused_generic_params(key: ty::InstanceDef<'tcx>) -> FiniteBitSet<u32> { cache_on_disk_if { key.def_id().is_local() } desc { @@ -1778,6 +1830,7 @@ rustc_queries! { } separate_provide_extern } + query backend_optimization_level(_: ()) -> OptLevel { desc { "optimization level used by backend" } } @@ -1788,7 +1841,7 @@ rustc_queries! { /// has been destroyed. query output_filenames(_: ()) -> &'tcx Arc<OutputFilenames> { eval_always - desc { "output_filenames" } + desc { "getting output filenames" } } /// Do not call this query directly: invoke `normalize` instead. @@ -1798,7 +1851,7 @@ rustc_queries! { &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, NormalizationResult<'tcx>>>, NoSolution, > { - desc { "normalizing `{:?}`", goal } + desc { "normalizing `{}`", goal.value.value } remap_env_constness } @@ -1810,21 +1863,13 @@ rustc_queries! { remap_env_constness } - /// Do not call this query directly: invoke `try_normalize_erasing_regions` instead. - query try_normalize_mir_const_after_erasing_regions( - goal: ParamEnvAnd<'tcx, mir::ConstantKind<'tcx>> - ) -> Result<mir::ConstantKind<'tcx>, NoSolution> { - desc { "normalizing `{}`", goal.value } - remap_env_constness - } - query implied_outlives_bounds( goal: CanonicalTyGoal<'tcx> ) -> Result< &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Vec<OutlivesBound<'tcx>>>>, NoSolution, > { - desc { "computing implied outlives bounds for `{:?}`", goal } + desc { "computing implied outlives bounds for `{}`", goal.value.value } remap_env_constness } @@ -1836,7 +1881,7 @@ rustc_queries! { &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, DropckOutlivesResult<'tcx>>>, NoSolution, > { - desc { "computing dropck types for `{:?}`", goal } + desc { "computing dropck types for `{}`", goal.value.value } remap_env_constness } @@ -1864,7 +1909,7 @@ rustc_queries! { &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>, NoSolution, > { - desc { "evaluating `type_op_ascribe_user_type` `{:?}`", goal } + desc { "evaluating `type_op_ascribe_user_type` `{:?}`", goal.value.value } remap_env_constness } @@ -1875,7 +1920,7 @@ rustc_queries! { &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>, NoSolution, > { - desc { "evaluating `type_op_eq` `{:?}`", goal } + desc { "evaluating `type_op_eq` `{:?}`", goal.value.value } remap_env_constness } @@ -1886,7 +1931,7 @@ rustc_queries! { &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>, NoSolution, > { - desc { "evaluating `type_op_subtype` `{:?}`", goal } + desc { "evaluating `type_op_subtype` `{:?}`", goal.value.value } remap_env_constness } @@ -1897,7 +1942,7 @@ rustc_queries! { &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ()>>, NoSolution, > { - desc { "evaluating `type_op_prove_predicate` `{:?}`", goal } + desc { "evaluating `type_op_prove_predicate` `{:?}`", goal.value.value } } /// Do not call this query directly: part of the `Normalize` type-op @@ -1907,7 +1952,7 @@ rustc_queries! { &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, Ty<'tcx>>>, NoSolution, > { - desc { "normalizing `{:?}`", goal } + desc { "normalizing `{}`", goal.value.value.value } remap_env_constness } @@ -1918,7 +1963,7 @@ rustc_queries! { &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::Predicate<'tcx>>>, NoSolution, > { - desc { "normalizing `{:?}`", goal } + desc { "normalizing `{:?}`", goal.value.value.value } remap_env_constness } @@ -1929,7 +1974,7 @@ rustc_queries! { &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::PolyFnSig<'tcx>>>, NoSolution, > { - desc { "normalizing `{:?}`", goal } + desc { "normalizing `{:?}`", goal.value.value.value } remap_env_constness } @@ -1940,26 +1985,34 @@ rustc_queries! { &'tcx Canonical<'tcx, canonical::QueryResponse<'tcx, ty::FnSig<'tcx>>>, NoSolution, > { - desc { "normalizing `{:?}`", goal } + desc { "normalizing `{:?}`", goal.value.value.value } remap_env_constness } query subst_and_check_impossible_predicates(key: (DefId, SubstsRef<'tcx>)) -> bool { desc { |tcx| - "impossible substituted predicates:`{}`", + "checking impossible substituted predicates: `{}`", tcx.def_path_str(key.0) } } + query is_impossible_method(key: (DefId, DefId)) -> bool { + desc { |tcx| + "checking if `{}` is impossible to call within `{}`", + tcx.def_path_str(key.1), + tcx.def_path_str(key.0), + } + } + query method_autoderef_steps( goal: CanonicalTyGoal<'tcx> ) -> MethodAutoderefStepsResult<'tcx> { - desc { "computing autoderef types for `{:?}`", goal } + desc { "computing autoderef types for `{}`", goal.value.value } remap_env_constness } query supported_target_features(_: CrateNum) -> FxHashMap<String, Option<Symbol>> { - storage(ArenaCacheSelector<'tcx>) + arena_cache eval_always desc { "looking up supported target features" } } @@ -2002,7 +2055,7 @@ rustc_queries! { } query normalize_opaque_types(key: &'tcx ty::List<ty::Predicate<'tcx>>) -> &'tcx ty::List<ty::Predicate<'tcx>> { - desc { "normalizing opaque types in {:?}", key } + desc { "normalizing opaque types in `{:?}`", key } } /// Checks whether a type is definitely uninhabited. This is @@ -2012,7 +2065,7 @@ rustc_queries! { /// will be `Abi::Uninhabited`. (Note that uninhabited types may have nonzero /// size, to account for partial initialisation. See #49298 for details.) query conservative_is_privately_uninhabited(key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>) -> bool { - desc { "conservatively checking if {:?} is privately uninhabited", key } + desc { "conservatively checking if `{}` is privately uninhabited", key.value } remap_env_constness } @@ -2029,32 +2082,43 @@ rustc_queries! { /// all of the cases that the normal `ty::Ty`-based wfcheck does. This is fine, /// because the `ty::Ty`-based wfcheck is always run. query diagnostic_hir_wf_check(key: (ty::Predicate<'tcx>, traits::WellFormedLoc)) -> Option<traits::ObligationCause<'tcx>> { - storage(ArenaCacheSelector<'tcx>) + arena_cache eval_always no_hash - desc { "performing HIR wf-checking for predicate {:?} at item {:?}", key.0, key.1 } + desc { "performing HIR wf-checking for predicate `{:?}` at item `{:?}`", key.0, key.1 } } /// The list of backend features computed from CLI flags (`-Ctarget-cpu`, `-Ctarget-feature`, /// `--target` and similar). query global_backend_features(_: ()) -> Vec<String> { - storage(ArenaCacheSelector<'tcx>) + arena_cache eval_always desc { "computing the backend features for CLI flags" } } query generator_diagnostic_data(key: DefId) -> Option<GeneratorDiagnosticData<'tcx>> { - storage(ArenaCacheSelector<'tcx>) + arena_cache desc { |tcx| "looking up generator diagnostic data of `{}`", tcx.def_path_str(key) } separate_provide_extern } query permits_uninit_init(key: TyAndLayout<'tcx>) -> bool { - desc { "checking to see if {:?} permits being left uninit", key.ty } + desc { "checking to see if `{}` permits being left uninit", key.ty } } query permits_zero_init(key: TyAndLayout<'tcx>) -> bool { - desc { "checking to see if {:?} permits being left zeroed", key.ty } + desc { "checking to see if `{}` permits being left zeroed", key.ty } + } + + query compare_assoc_const_impl_item_with_trait_item( + key: (LocalDefId, DefId) + ) -> Result<(), ErrorGuaranteed> { + desc { |tcx| "checking assoc const `{}` has the same type as trait item", tcx.def_path_str(key.0.to_def_id()) } + } + + query deduced_param_attrs(def_id: DefId) -> &'tcx [ty::DeducedParamAttrs] { + desc { |tcx| "deducing parameter attributes for {}", tcx.def_path_str(def_id) } + separate_provide_extern } } diff --git a/compiler/rustc_middle/src/thir.rs b/compiler/rustc_middle/src/thir.rs index b856af1d8..ea7a507d7 100644 --- a/compiler/rustc_middle/src/thir.rs +++ b/compiler/rustc_middle/src/thir.rs @@ -15,50 +15,33 @@ use rustc_hir::def_id::DefId; use rustc_hir::RangeEnd; use rustc_index::newtype_index; use rustc_index::vec::IndexVec; -use rustc_middle::infer::canonical::Canonical; use rustc_middle::middle::region; use rustc_middle::mir::interpret::AllocId; use rustc_middle::mir::{self, BinOp, BorrowKind, FakeReadCause, Field, Mutability, UnOp}; use rustc_middle::ty::adjustment::PointerCast; use rustc_middle::ty::subst::SubstsRef; -use rustc_middle::ty::CanonicalUserTypeAnnotation; -use rustc_middle::ty::{self, AdtDef, Ty, UpvarSubsts, UserType}; -use rustc_span::{Span, Symbol, DUMMY_SP}; +use rustc_middle::ty::{self, AdtDef, Ty, UpvarSubsts}; +use rustc_middle::ty::{CanonicalUserType, CanonicalUserTypeAnnotation}; +use rustc_span::def_id::LocalDefId; +use rustc_span::{sym, Span, Symbol, DUMMY_SP}; use rustc_target::abi::VariantIdx; use rustc_target::asm::InlineAsmRegOrRegClass; - -use rustc_span::def_id::LocalDefId; use std::fmt; use std::ops::Index; pub mod visit; -newtype_index! { - /// An index to an [`Arm`] stored in [`Thir::arms`] - #[derive(HashStable)] - pub struct ArmId { - DEBUG_FORMAT = "a{}" - } -} - -newtype_index! { - /// An index to an [`Expr`] stored in [`Thir::exprs`] - #[derive(HashStable)] - pub struct ExprId { - DEBUG_FORMAT = "e{}" - } -} - -newtype_index! { - #[derive(HashStable)] - /// An index to a [`Stmt`] stored in [`Thir::stmts`] - pub struct StmtId { - DEBUG_FORMAT = "s{}" - } -} - macro_rules! thir_with_elements { - ($($name:ident: $id:ty => $value:ty,)*) => { + ($($name:ident: $id:ty => $value:ty => $format:literal,)*) => { + $( + newtype_index! { + #[derive(HashStable)] + pub struct $id { + DEBUG_FORMAT = $format + } + } + )* + /// A container for a THIR body. /// /// This can be indexed directly by any THIR index (e.g. [`ExprId`]). @@ -90,10 +73,29 @@ macro_rules! thir_with_elements { } } +pub const UPVAR_ENV_PARAM: ParamId = ParamId::from_u32(0); + thir_with_elements! { - arms: ArmId => Arm<'tcx>, - exprs: ExprId => Expr<'tcx>, - stmts: StmtId => Stmt<'tcx>, + arms: ArmId => Arm<'tcx> => "a{}", + blocks: BlockId => Block => "b{}", + exprs: ExprId => Expr<'tcx> => "e{}", + stmts: StmtId => Stmt<'tcx> => "s{}", + params: ParamId => Param<'tcx> => "p{}", +} + +/// Description of a type-checked function parameter. +#[derive(Clone, Debug, HashStable)] +pub struct Param<'tcx> { + /// The pattern that appears in the parameter list, or None for implicit parameters. + pub pat: Option<Box<Pat<'tcx>>>, + /// The possibly inferred type. + pub ty: Ty<'tcx>, + /// Span of the explicitly provided type, or None if inferred for closures. + pub ty_span: Option<Span>, + /// Whether this param is `self`, and how it is bound. + pub self_kind: Option<hir::ImplicitSelfKind>, + /// HirId for lints. + pub hir_id: Option<hir::HirId>, } #[derive(Copy, Clone, Debug, HashStable)] @@ -121,8 +123,10 @@ pub struct Block { pub safety_mode: BlockSafety, } +type UserTy<'tcx> = Option<Box<CanonicalUserType<'tcx>>>; + #[derive(Clone, Debug, HashStable)] -pub struct Adt<'tcx> { +pub struct AdtExpr<'tcx> { /// The ADT we're constructing. pub adt_def: AdtDef<'tcx>, /// The variant of the ADT. @@ -131,13 +135,30 @@ pub struct Adt<'tcx> { /// Optional user-given substs: for something like `let x = /// Bar::<T> { ... }`. - pub user_ty: Option<Canonical<'tcx, UserType<'tcx>>>, + pub user_ty: UserTy<'tcx>, pub fields: Box<[FieldExpr]>, /// The base, e.g. `Foo {x: 1, .. base}`. pub base: Option<FruInfo<'tcx>>, } +#[derive(Clone, Debug, HashStable)] +pub struct ClosureExpr<'tcx> { + pub closure_id: LocalDefId, + pub substs: UpvarSubsts<'tcx>, + pub upvars: Box<[ExprId]>, + pub movability: Option<hir::Movability>, + pub fake_reads: Vec<(ExprId, FakeReadCause, hir::HirId)>, +} + +#[derive(Clone, Debug, HashStable)] +pub struct InlineAsmExpr<'tcx> { + pub template: &'tcx [InlineAsmTemplatePiece], + pub operands: Box<[InlineAsmOperand<'tcx>]>, + pub options: InlineAsmOptions, + pub line_spans: &'tcx [Span], +} + #[derive(Copy, Clone, Debug, HashStable)] pub enum BlockSafety { Safe, @@ -177,13 +198,13 @@ pub enum StmtKind<'tcx> { /// `let <PAT> = ...` /// /// If a type annotation is included, it is added as an ascription pattern. - pattern: Pat<'tcx>, + pattern: Box<Pat<'tcx>>, /// `let pat: ty = <INIT>` initializer: Option<ExprId>, - /// `let pat: ty = <INIT> else { <ELSE> } - else_block: Option<Block>, + /// `let pat: ty = <INIT> else { <ELSE> }` + else_block: Option<BlockId>, /// The lint level for this `let` statement. lint_level: LintLevel, @@ -298,7 +319,7 @@ pub enum ExprKind<'tcx> { }, Let { expr: ExprId, - pat: Pat<'tcx>, + pat: Box<Pat<'tcx>>, }, /// A `match` expression. Match { @@ -307,7 +328,7 @@ pub enum ExprKind<'tcx> { }, /// A block. Block { - body: Block, + block: BlockId, }, /// An assignment: `lhs = rhs`. Assign { @@ -387,27 +408,21 @@ pub enum ExprKind<'tcx> { fields: Box<[ExprId]>, }, /// An ADT constructor, e.g. `Foo {x: 1, y: 2}`. - Adt(Box<Adt<'tcx>>), + Adt(Box<AdtExpr<'tcx>>), /// A type ascription on a place. PlaceTypeAscription { source: ExprId, /// Type that the user gave to this expression - user_ty: Option<Canonical<'tcx, UserType<'tcx>>>, + user_ty: UserTy<'tcx>, }, /// A type ascription on a value, e.g. `42: i32`. ValueTypeAscription { source: ExprId, /// Type that the user gave to this expression - user_ty: Option<Canonical<'tcx, UserType<'tcx>>>, + user_ty: UserTy<'tcx>, }, /// A closure definition. - Closure { - closure_id: LocalDefId, - substs: UpvarSubsts<'tcx>, - upvars: Box<[ExprId]>, - movability: Option<hir::Movability>, - fake_reads: Vec<(ExprId, FakeReadCause, hir::HirId)>, - }, + Closure(Box<ClosureExpr<'tcx>>), /// A literal. Literal { lit: &'tcx hir::Lit, @@ -416,17 +431,17 @@ pub enum ExprKind<'tcx> { /// For literals that don't correspond to anything in the HIR NonHirLiteral { lit: ty::ScalarInt, - user_ty: Option<Canonical<'tcx, UserType<'tcx>>>, + user_ty: UserTy<'tcx>, }, /// A literal of a ZST type. ZstLiteral { - user_ty: Option<Canonical<'tcx, UserType<'tcx>>>, + user_ty: UserTy<'tcx>, }, /// Associated constants and named constants NamedConst { def_id: DefId, substs: SubstsRef<'tcx>, - user_ty: Option<Canonical<'tcx, UserType<'tcx>>>, + user_ty: UserTy<'tcx>, }, ConstParam { param: ty::ParamConst, @@ -443,12 +458,7 @@ pub enum ExprKind<'tcx> { def_id: DefId, }, /// Inline assembly, i.e. `asm!()`. - InlineAsm { - template: &'tcx [InlineAsmTemplatePiece], - operands: Box<[InlineAsmOperand<'tcx>]>, - options: InlineAsmOptions, - line_spans: &'tcx [Span], - }, + InlineAsm(Box<InlineAsmExpr<'tcx>>), /// An expression taking a reference to a thread local. ThreadLocalRef(DefId), /// A `yield` expression. @@ -475,7 +485,7 @@ pub struct FruInfo<'tcx> { /// A `match` arm. #[derive(Clone, Debug, HashStable)] pub struct Arm<'tcx> { - pub pattern: Pat<'tcx>, + pub pattern: Box<Pat<'tcx>>, pub guard: Option<Guard<'tcx>>, pub body: ExprId, pub lint_level: LintLevel, @@ -487,7 +497,7 @@ pub struct Arm<'tcx> { #[derive(Clone, Debug, HashStable)] pub enum Guard<'tcx> { If(ExprId), - IfLet(Pat<'tcx>, ExprId), + IfLet(Box<Pat<'tcx>>, ExprId), } #[derive(Copy, Clone, Debug, HashStable)] @@ -542,19 +552,28 @@ pub enum BindingMode { #[derive(Clone, Debug, HashStable)] pub struct FieldPat<'tcx> { pub field: Field, - pub pattern: Pat<'tcx>, + pub pattern: Box<Pat<'tcx>>, } #[derive(Clone, Debug, HashStable)] pub struct Pat<'tcx> { pub ty: Ty<'tcx>, pub span: Span, - pub kind: Box<PatKind<'tcx>>, + pub kind: PatKind<'tcx>, } impl<'tcx> Pat<'tcx> { pub fn wildcard_from_ty(ty: Ty<'tcx>) -> Self { - Pat { ty, span: DUMMY_SP, kind: Box::new(PatKind::Wild) } + Pat { ty, span: DUMMY_SP, kind: PatKind::Wild } + } + + pub fn simple_ident(&self) -> Option<Symbol> { + match self.kind { + PatKind::Binding { name, mode: BindingMode::ByValue, subpattern: None, .. } => { + Some(name) + } + _ => None, + } } } @@ -589,7 +608,7 @@ pub enum PatKind<'tcx> { AscribeUserType { ascription: Ascription<'tcx>, - subpattern: Pat<'tcx>, + subpattern: Box<Pat<'tcx>>, }, /// `x`, `ref x`, `x @ P`, etc. @@ -599,7 +618,7 @@ pub enum PatKind<'tcx> { mode: BindingMode, var: LocalVarId, ty: Ty<'tcx>, - subpattern: Option<Pat<'tcx>>, + subpattern: Option<Box<Pat<'tcx>>>, /// Is this the leftmost occurrence of the binding, i.e., is `var` the /// `HirId` of this pattern? is_primary: bool, @@ -622,7 +641,7 @@ pub enum PatKind<'tcx> { /// `box P`, `&P`, `&mut P`, etc. Deref { - subpattern: Pat<'tcx>, + subpattern: Box<Pat<'tcx>>, }, /// One of the following: @@ -636,32 +655,32 @@ pub enum PatKind<'tcx> { value: mir::ConstantKind<'tcx>, }, - Range(PatRange<'tcx>), + Range(Box<PatRange<'tcx>>), /// Matches against a slice, checking the length and extracting elements. /// irrefutable when there is a slice pattern and both `prefix` and `suffix` are empty. /// e.g., `&[ref xs @ ..]`. Slice { - prefix: Vec<Pat<'tcx>>, - slice: Option<Pat<'tcx>>, - suffix: Vec<Pat<'tcx>>, + prefix: Box<[Box<Pat<'tcx>>]>, + slice: Option<Box<Pat<'tcx>>>, + suffix: Box<[Box<Pat<'tcx>>]>, }, /// Fixed match against an array; irrefutable. Array { - prefix: Vec<Pat<'tcx>>, - slice: Option<Pat<'tcx>>, - suffix: Vec<Pat<'tcx>>, + prefix: Box<[Box<Pat<'tcx>>]>, + slice: Option<Box<Pat<'tcx>>>, + suffix: Box<[Box<Pat<'tcx>>]>, }, /// An or-pattern, e.g. `p | q`. /// Invariant: `pats.len() >= 2`. Or { - pats: Vec<Pat<'tcx>>, + pats: Box<[Box<Pat<'tcx>>]>, }, } -#[derive(Copy, Clone, Debug, PartialEq, HashStable)] +#[derive(Clone, Debug, PartialEq, HashStable)] pub struct PatRange<'tcx> { pub lo: mir::ConstantKind<'tcx>, pub hi: mir::ConstantKind<'tcx>, @@ -682,7 +701,7 @@ impl<'tcx> fmt::Display for Pat<'tcx> { }; let mut start_or_comma = || start_or_continue(", "); - match *self.kind { + match self.kind { PatKind::Wild => write!(f, "_"), PatKind::AscribeUserType { ref subpattern, .. } => write!(f, "{}: _", subpattern), PatKind::Binding { mutability, name, mode, ref subpattern, .. } => { @@ -703,17 +722,32 @@ impl<'tcx> fmt::Display for Pat<'tcx> { Ok(()) } PatKind::Variant { ref subpatterns, .. } | PatKind::Leaf { ref subpatterns } => { - let variant = match *self.kind { - PatKind::Variant { adt_def, variant_index, .. } => { - Some(adt_def.variant(variant_index)) - } - _ => self.ty.ty_adt_def().and_then(|adt| { - if !adt.is_enum() { Some(adt.non_enum_variant()) } else { None } + let variant_and_name = match self.kind { + PatKind::Variant { adt_def, variant_index, .. } => ty::tls::with(|tcx| { + let variant = adt_def.variant(variant_index); + let adt_did = adt_def.did(); + let name = if tcx.get_diagnostic_item(sym::Option) == Some(adt_did) + || tcx.get_diagnostic_item(sym::Result) == Some(adt_did) + { + variant.name.to_string() + } else { + format!("{}::{}", tcx.def_path_str(adt_def.did()), variant.name) + }; + Some((variant, name)) + }), + _ => self.ty.ty_adt_def().and_then(|adt_def| { + if !adt_def.is_enum() { + ty::tls::with(|tcx| { + Some((adt_def.non_enum_variant(), tcx.def_path_str(adt_def.did()))) + }) + } else { + None + } }), }; - if let Some(variant) = variant { - write!(f, "{}", variant.name)?; + if let Some((variant, name)) = &variant_and_name { + write!(f, "{}", name)?; // Only for Adt we can have `S {...}`, // which we handle separately here. @@ -722,7 +756,7 @@ impl<'tcx> fmt::Display for Pat<'tcx> { let mut printed = 0; for p in subpatterns { - if let PatKind::Wild = *p.pattern.kind { + if let PatKind::Wild = p.pattern.kind { continue; } let name = variant.fields[p.field.index()].name; @@ -738,8 +772,9 @@ impl<'tcx> fmt::Display for Pat<'tcx> { } } - let num_fields = variant.map_or(subpatterns.len(), |v| v.fields.len()); - if num_fields != 0 || variant.is_none() { + let num_fields = + variant_and_name.as_ref().map_or(subpatterns.len(), |(v, _)| v.fields.len()); + if num_fields != 0 || variant_and_name.is_none() { write!(f, "(")?; for i in 0..num_fields { write!(f, "{}", start_or_comma())?; @@ -775,7 +810,7 @@ impl<'tcx> fmt::Display for Pat<'tcx> { write!(f, "{}", subpattern) } PatKind::Constant { value } => write!(f, "{}", value), - PatKind::Range(PatRange { lo, hi, end }) => { + PatKind::Range(box PatRange { lo, hi, end }) => { write!(f, "{}", lo)?; write!(f, "{}", end)?; write!(f, "{}", hi) @@ -783,24 +818,24 @@ impl<'tcx> fmt::Display for Pat<'tcx> { PatKind::Slice { ref prefix, ref slice, ref suffix } | PatKind::Array { ref prefix, ref slice, ref suffix } => { write!(f, "[")?; - for p in prefix { + for p in prefix.iter() { write!(f, "{}{}", start_or_comma(), p)?; } if let Some(ref slice) = *slice { write!(f, "{}", start_or_comma())?; - match *slice.kind { + match slice.kind { PatKind::Wild => {} _ => write!(f, "{}", slice)?, } write!(f, "..")?; } - for p in suffix { + for p in suffix.iter() { write!(f, "{}{}", start_or_comma(), p)?; } write!(f, "]") } PatKind::Or { ref pats } => { - for pat in pats { + for pat in pats.iter() { write!(f, "{}{}", start_or_continue(" | "), pat)?; } Ok(()) @@ -813,9 +848,13 @@ impl<'tcx> fmt::Display for Pat<'tcx> { #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] mod size_asserts { use super::*; - // These are in alphabetical order, which is easy to maintain. - rustc_data_structures::static_assert_size!(Block, 56); - rustc_data_structures::static_assert_size!(Expr<'_>, 104); - rustc_data_structures::static_assert_size!(Pat<'_>, 24); - rustc_data_structures::static_assert_size!(Stmt<'_>, 120); + // tidy-alphabetical-start + static_assert_size!(Block, 56); + static_assert_size!(Expr<'_>, 64); + static_assert_size!(ExprKind<'_>, 40); + static_assert_size!(Pat<'_>, 72); + static_assert_size!(PatKind<'_>, 56); + static_assert_size!(Stmt<'_>, 48); + static_assert_size!(StmtKind<'_>, 40); + // tidy-alphabetical-end } diff --git a/compiler/rustc_middle/src/thir/visit.rs b/compiler/rustc_middle/src/thir/visit.rs index 97249fdd1..79a0e75aa 100644 --- a/compiler/rustc_middle/src/thir/visit.rs +++ b/compiler/rustc_middle/src/thir/visit.rs @@ -1,5 +1,6 @@ use super::{ - Arm, Block, Expr, ExprKind, Guard, InlineAsmOperand, Pat, PatKind, Stmt, StmtKind, Thir, + AdtExpr, Arm, Block, ClosureExpr, Expr, ExprKind, Guard, InlineAsmExpr, InlineAsmOperand, Pat, + PatKind, Stmt, StmtKind, Thir, }; pub trait Visitor<'a, 'tcx: 'a>: Sized { @@ -75,7 +76,7 @@ pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Exp visitor.visit_arm(&visitor.thir()[arm]); } } - Block { ref body } => visitor.visit_block(body), + Block { block } => visitor.visit_block(&visitor.thir()[block]), Assign { lhs, rhs } | AssignOp { lhs, rhs, op: _ } => { visitor.visit_expr(&visitor.thir()[lhs]); visitor.visit_expr(&visitor.thir()[rhs]); @@ -108,7 +109,7 @@ pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Exp visitor.visit_expr(&visitor.thir()[field]); } } - Adt(box crate::thir::Adt { + Adt(box AdtExpr { ref fields, ref base, adt_def: _, @@ -126,14 +127,20 @@ pub fn walk_expr<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, expr: &Exp PlaceTypeAscription { source, user_ty: _ } | ValueTypeAscription { source, user_ty: _ } => { visitor.visit_expr(&visitor.thir()[source]) } - Closure { closure_id: _, substs: _, upvars: _, movability: _, fake_reads: _ } => {} + Closure(box ClosureExpr { + closure_id: _, + substs: _, + upvars: _, + movability: _, + fake_reads: _, + }) => {} Literal { lit: _, neg: _ } => {} NonHirLiteral { lit: _, user_ty: _ } => {} ZstLiteral { user_ty: _ } => {} NamedConst { def_id: _, substs: _, user_ty: _ } => {} ConstParam { param: _, def_id: _ } => {} StaticRef { alloc_id: _, ty: _, def_id: _ } => {} - InlineAsm { ref operands, template: _, options: _, line_spans: _ } => { + InlineAsm(box InlineAsmExpr { ref operands, template: _, options: _, line_spans: _ }) => { for op in &**operands { use InlineAsmOperand::*; match op { @@ -174,7 +181,7 @@ pub fn walk_stmt<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, stmt: &Stm } visitor.visit_pat(pattern); if let Some(block) = else_block { - visitor.visit_block(block) + visitor.visit_block(&visitor.thir()[*block]) } } } @@ -204,7 +211,7 @@ pub fn walk_arm<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, arm: &Arm<' pub fn walk_pat<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, pat: &Pat<'tcx>) { use PatKind::*; - match pat.kind.as_ref() { + match &pat.kind { AscribeUserType { subpattern, ascription: _ } | Deref { subpattern } | Binding { @@ -225,18 +232,18 @@ pub fn walk_pat<'a, 'tcx: 'a, V: Visitor<'a, 'tcx>>(visitor: &mut V, pat: &Pat<' Constant { value: _ } => {} Range(_) => {} Slice { prefix, slice, suffix } | Array { prefix, slice, suffix } => { - for subpattern in prefix { + for subpattern in prefix.iter() { visitor.visit_pat(&subpattern); } if let Some(pat) = slice { - visitor.visit_pat(pat); + visitor.visit_pat(&pat); } - for subpattern in suffix { + for subpattern in suffix.iter() { visitor.visit_pat(&subpattern); } } Or { pats } => { - for pat in pats { + for pat in pats.iter() { visitor.visit_pat(&pat); } } diff --git a/compiler/rustc_middle/src/traits/mod.rs b/compiler/rustc_middle/src/traits/mod.rs index 72b848c3e..e73d44bbb 100644 --- a/compiler/rustc_middle/src/traits/mod.rs +++ b/compiler/rustc_middle/src/traits/mod.rs @@ -10,9 +10,10 @@ mod structural_impls; pub mod util; use crate::infer::canonical::Canonical; +use crate::mir::ConstraintCategory; use crate::ty::abstract_const::NotConstEvaluatable; use crate::ty::subst::SubstsRef; -use crate::ty::{self, AdtKind, Predicate, Ty, TyCtxt}; +use crate::ty::{self, AdtKind, Ty, TyCtxt}; use rustc_data_structures::sync::Lrc; use rustc_errors::{Applicability, Diagnostic}; @@ -183,6 +184,16 @@ impl<'tcx> ObligationCause<'tcx> { variant(DerivedObligationCause { parent_trait_pred, parent_code: self.code }).into(); self } + + pub fn to_constraint_category(&self) -> ConstraintCategory<'tcx> { + match self.code() { + MatchImpl(cause, _) => cause.to_constraint_category(), + AscribeUserTypeProvePredicate(predicate_span) => { + ConstraintCategory::Predicate(*predicate_span) + } + _ => ConstraintCategory::BoringNoLocation, + } + } } #[derive(Clone, Debug, PartialEq, Eq, Hash, Lift)] @@ -234,13 +245,23 @@ pub enum ObligationCauseCode<'tcx> { /// This is the trait reference from the given projection. ProjectionWf(ty::ProjectionTy<'tcx>), - /// In an impl of trait `X` for type `Y`, type `Y` must - /// also implement all supertraits of `X`. + /// Must satisfy all of the where-clause predicates of the + /// given item. ItemObligation(DefId), - /// Like `ItemObligation`, but with extra detail on the source of the obligation. + /// Like `ItemObligation`, but carries the span of the + /// predicate when it can be identified. BindingObligation(DefId, Span), + /// Like `ItemObligation`, but carries the `HirId` of the + /// expression that caused the obligation, and the `usize` + /// indicates exactly which predicate it is in the list of + /// instantiated predicates. + ExprItemObligation(DefId, rustc_hir::HirId, usize), + + /// Combines `ExprItemObligation` and `BindingObligation`. + ExprBindingObligation(DefId, Span, rustc_hir::HirId, usize), + /// A type like `&'a T` is WF only if `T: 'a`. ReferenceOutlivesReferent(Ty<'tcx>), @@ -406,8 +427,10 @@ pub enum ObligationCauseCode<'tcx> { BinOp { rhs_span: Option<Span>, is_lit: bool, - output_pred: Option<Predicate<'tcx>>, + output_ty: Option<Ty<'tcx>>, }, + + AscribeUserTypeProvePredicate(Span), } /// The 'location' at which we try to perform HIR-based wf checking. @@ -459,6 +482,13 @@ impl<'tcx> ObligationCauseCode<'tcx> { _ => None, } } + + pub fn peel_match_impls(&self) -> &Self { + match self { + MatchImpl(cause, _) => cause.code(), + _ => self, + } + } } // `ObligationCauseCode` is used a lot. Make sure it doesn't unintentionally get bigger. @@ -568,11 +598,6 @@ pub type SelectionResult<'tcx, T> = Result<Option<T>, SelectionError<'tcx>>; /// // type parameters, ImplSource will carry resolutions for those as well: /// concrete.clone(); // ImplSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])]) /// -/// // Case A: ImplSource points at a specific impl. Only possible when -/// // type is concretely known. If the impl itself has bounded -/// // type parameters, ImplSource will carry resolutions for those as well: -/// concrete.clone(); // ImplSource(Impl_1, [ImplSource(Impl_2, [ImplSource(Impl_3)])]) -/// /// // Case B: ImplSource must be provided by caller. This applies when /// // type is a type parameter. /// param.clone(); // ImplSource::Param @@ -648,7 +673,7 @@ impl<'tcx, N> ImplSource<'tcx, N> { ImplSource::Object(d) => d.nested, ImplSource::FnPointer(d) => d.nested, ImplSource::DiscriminantKind(ImplSourceDiscriminantKindData) - | ImplSource::Pointee(ImplSourcePointeeData) => Vec::new(), + | ImplSource::Pointee(ImplSourcePointeeData) => vec![], ImplSource::TraitAlias(d) => d.nested, ImplSource::TraitUpcasting(d) => d.nested, ImplSource::ConstDestruct(i) => i.nested, @@ -893,6 +918,12 @@ impl ObjectSafetyViolation { } ObjectSafetyViolation::Method( name, + MethodViolationCode::ReferencesImplTraitInTrait, + _, + ) => format!("method `{}` references an `impl Trait` type in its return type", name) + .into(), + ObjectSafetyViolation::Method( + name, MethodViolationCode::WhereClauseReferencesSelf, _, ) => { @@ -997,6 +1028,9 @@ pub enum MethodViolationCode { /// e.g., `fn foo(&self) -> Self` ReferencesSelfOutput, + /// e.g., `fn foo(&self) -> impl Sized` + ReferencesImplTraitInTrait, + /// e.g., `fn foo(&self) where Self: Clone` WhereClauseReferencesSelf, @@ -1007,7 +1041,7 @@ pub enum MethodViolationCode { UndispatchableReceiver(Option<Span>), } -/// These are the error cases for `codegen_fulfill_obligation`. +/// These are the error cases for `codegen_select_candidate`. #[derive(Copy, Clone, Debug, Hash, HashStable, Encodable, Decodable)] pub enum CodegenObligationError { /// Ambiguity can happen when monomorphizing during trans diff --git a/compiler/rustc_middle/src/traits/query.rs b/compiler/rustc_middle/src/traits/query.rs index 1f9b474ad..fb152b63f 100644 --- a/compiler/rustc_middle/src/traits/query.rs +++ b/compiler/rustc_middle/src/traits/query.rs @@ -5,11 +5,12 @@ //! The providers for the queries defined here can be found in //! `rustc_traits`. +use crate::error::DropCheckOverflow; use crate::infer::canonical::{Canonical, QueryResponse}; use crate::ty::error::TypeError; -use crate::ty::subst::GenericArg; +use crate::ty::subst::{GenericArg, SubstsRef}; use crate::ty::{self, Ty, TyCtxt}; -use rustc_errors::struct_span_err; +use rustc_hir::def_id::DefId; use rustc_span::source_map::Span; use std::iter::FromIterator; @@ -117,15 +118,7 @@ pub struct DropckOutlivesResult<'tcx> { impl<'tcx> DropckOutlivesResult<'tcx> { pub fn report_overflows(&self, tcx: TyCtxt<'tcx>, span: Span, ty: Ty<'tcx>) { if let Some(overflow_ty) = self.overflows.get(0) { - let mut err = struct_span_err!( - tcx.sess, - span, - E0320, - "overflow while adding drop-check rules for {}", - ty, - ); - err.note(&format!("overflowed on {}", overflow_ty)); - err.emit(); + tcx.sess.emit_err(DropCheckOverflow { span, ty, overflow_ty: *overflow_ty }); } } @@ -227,4 +220,5 @@ pub enum OutlivesBound<'tcx> { RegionSubRegion(ty::Region<'tcx>, ty::Region<'tcx>), RegionSubParam(ty::Region<'tcx>, ty::ParamTy), RegionSubProjection(ty::Region<'tcx>, ty::ProjectionTy<'tcx>), + RegionSubOpaque(ty::Region<'tcx>, DefId, SubstsRef<'tcx>), } diff --git a/compiler/rustc_middle/src/traits/select.rs b/compiler/rustc_middle/src/traits/select.rs index e836ba47e..85ead3171 100644 --- a/compiler/rustc_middle/src/traits/select.rs +++ b/compiler/rustc_middle/src/traits/select.rs @@ -115,12 +115,13 @@ pub enum SelectionCandidate<'tcx> { ParamCandidate(ty::PolyTraitPredicate<'tcx>), ImplCandidate(DefId), - AutoImplCandidate(DefId), + AutoImplCandidate, /// This is a trait matching with a projected type as `Self`, and we found /// an applicable bound in the trait definition. The `usize` is an index - /// into the list returned by `tcx.item_bounds`. - ProjectionCandidate(usize), + /// into the list returned by `tcx.item_bounds`. The constness is the + /// constness of the bound in the trait. + ProjectionCandidate(usize, ty::BoundConstness), /// Implementation of a `Fn`-family trait by one of the anonymous types /// generated for an `||` expression. @@ -142,7 +143,7 @@ pub enum SelectionCandidate<'tcx> { /// Builtin implementation of `Pointee`. PointeeCandidate, - TraitAliasCandidate(DefId), + TraitAliasCandidate, /// Matching `dyn Trait` with a supertrait of `Trait`. The index is the /// position in the iterator returned by diff --git a/compiler/rustc_middle/src/traits/specialization_graph.rs b/compiler/rustc_middle/src/traits/specialization_graph.rs index 2465f8e25..0a2819fee 100644 --- a/compiler/rustc_middle/src/traits/specialization_graph.rs +++ b/compiler/rustc_middle/src/traits/specialization_graph.rs @@ -115,7 +115,7 @@ impl Node { matches!(self, Node::Trait(..)) } - /// Trys to find the associated item that implements `trait_item_def_id` + /// Tries to find the associated item that implements `trait_item_def_id` /// defined in this node. /// /// If this returns `None`, the item can potentially still be found in diff --git a/compiler/rustc_middle/src/ty/abstract_const.rs b/compiler/rustc_middle/src/ty/abstract_const.rs index bed809930..1aa4df778 100644 --- a/compiler/rustc_middle/src/ty/abstract_const.rs +++ b/compiler/rustc_middle/src/ty/abstract_const.rs @@ -1,7 +1,7 @@ //! A subset of a mir body used for const evaluatability checking. use crate::mir; use crate::ty::visit::TypeVisitable; -use crate::ty::{self, subst::Subst, DelaySpanBugEmitted, EarlyBinder, SubstsRef, Ty, TyCtxt}; +use crate::ty::{self, DelaySpanBugEmitted, EarlyBinder, SubstsRef, Ty, TyCtxt}; use rustc_errors::ErrorGuaranteed; use rustc_hir::def_id::DefId; use std::cmp; @@ -30,7 +30,7 @@ pub struct AbstractConst<'tcx> { impl<'tcx> AbstractConst<'tcx> { pub fn new( tcx: TyCtxt<'tcx>, - uv: ty::Unevaluated<'tcx, ()>, + uv: ty::UnevaluatedConst<'tcx>, ) -> Result<Option<AbstractConst<'tcx>>, ErrorGuaranteed> { let inner = tcx.thir_abstract_const_opt_const_arg(uv.def)?; debug!("AbstractConst::new({:?}) = {:?}", uv, inner); @@ -42,7 +42,7 @@ impl<'tcx> AbstractConst<'tcx> { ct: ty::Const<'tcx>, ) -> Result<Option<AbstractConst<'tcx>>, ErrorGuaranteed> { match ct.kind() { - ty::ConstKind::Unevaluated(uv) => AbstractConst::new(tcx, uv.shrink()), + ty::ConstKind::Unevaluated(uv) => AbstractConst::new(tcx, uv), ty::ConstKind::Error(DelaySpanBugEmitted { reported, .. }) => Err(reported), _ => Ok(None), } @@ -71,16 +71,16 @@ impl<'tcx> AbstractConst<'tcx> { walk_abstract_const::<!, _>(tcx, self, |node| { match node.root(tcx) { Node::Leaf(leaf) => { - if leaf.has_infer_types_or_consts() { + if leaf.has_non_region_infer() { failure_kind = FailureKind::MentionsInfer; - } else if leaf.has_param_types_or_consts() { + } else if leaf.has_non_region_param() { failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam); } } Node::Cast(_, _, ty) => { - if ty.has_infer_types_or_consts() { + if ty.has_non_region_infer() { failure_kind = FailureKind::MentionsInfer; - } else if ty.has_param_types_or_consts() { + } else if ty.has_non_region_param() { failure_kind = cmp::min(failure_kind, FailureKind::MentionsParam); } } diff --git a/compiler/rustc_middle/src/ty/adjustment.rs b/compiler/rustc_middle/src/ty/adjustment.rs index d36cf2fe3..4682ac96b 100644 --- a/compiler/rustc_middle/src/ty/adjustment.rs +++ b/compiler/rustc_middle/src/ty/adjustment.rs @@ -77,7 +77,7 @@ pub enum PointerCast { /// At some point, of course, `Box` should move out of the compiler, in which /// case this is analogous to transforming a struct. E.g., `Box<[i32; 4]>` -> /// `Box<[i32]>` is an `Adjust::Unsize` with the target `Box<[i32]>`. -#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)] +#[derive(Clone, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct Adjustment<'tcx> { pub kind: Adjust<'tcx>, pub target: Ty<'tcx>, @@ -89,7 +89,7 @@ impl<'tcx> Adjustment<'tcx> { } } -#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)] +#[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)] pub enum Adjust<'tcx> { /// Go from ! to any type. NeverToAny, @@ -101,6 +101,9 @@ pub enum Adjust<'tcx> { Borrow(AutoBorrow<'tcx>), Pointer(PointerCast), + + /// Cast into a dyn* object. + DynStar, } /// An overloaded autoderef step, representing a `Deref(Mut)::deref(_mut)` @@ -108,7 +111,7 @@ pub enum Adjust<'tcx> { /// The target type is `U` in both cases, with the region and mutability /// being those shared by both the receiver and the returned reference. #[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)] -#[derive(TypeFoldable, TypeVisitable)] +#[derive(TypeFoldable, TypeVisitable, Lift)] pub struct OverloadedDeref<'tcx> { pub region: ty::Region<'tcx>, pub mutbl: hir::Mutability, @@ -167,7 +170,7 @@ impl From<AutoBorrowMutability> for hir::Mutability { } #[derive(Copy, Clone, PartialEq, Debug, TyEncodable, TyDecodable, HashStable)] -#[derive(TypeFoldable, TypeVisitable)] +#[derive(TypeFoldable, TypeVisitable, Lift)] pub enum AutoBorrow<'tcx> { /// Converts from T to &T. Ref(ty::Region<'tcx>, AutoBorrowMutability), diff --git a/compiler/rustc_middle/src/ty/adt.rs b/compiler/rustc_middle/src/ty/adt.rs index 2e596b275..b0a2412ab 100644 --- a/compiler/rustc_middle/src/ty/adt.rs +++ b/compiler/rustc_middle/src/ty/adt.rs @@ -26,9 +26,6 @@ use super::{ Destructor, FieldDef, GenericPredicates, ReprOptions, Ty, TyCtxt, VariantDef, VariantDiscr, }; -#[derive(Copy, Clone, HashStable, Debug)] -pub struct AdtSizedConstraint<'tcx>(pub &'tcx [Ty<'tcx>]); - bitflags! { #[derive(HashStable, TyEncodable, TyDecodable)] pub struct AdtFlags: u32 { @@ -332,13 +329,13 @@ impl<'tcx> AdtDef<'tcx> { self.flags().contains(AdtFlags::IS_PHANTOM_DATA) } - /// Returns `true` if this is Box<T>. + /// Returns `true` if this is `Box<T>`. #[inline] pub fn is_box(self) -> bool { self.flags().contains(AdtFlags::IS_BOX) } - /// Returns `true` if this is UnsafeCell<T>. + /// Returns `true` if this is `UnsafeCell<T>`. #[inline] pub fn is_unsafe_cell(self) -> bool { self.flags().contains(AdtFlags::IS_UNSAFE_CELL) @@ -438,7 +435,8 @@ impl<'tcx> AdtDef<'tcx> { | Res::Def(DefKind::Union, _) | Res::Def(DefKind::TyAlias, _) | Res::Def(DefKind::AssocTy, _) - | Res::SelfTy { .. } + | Res::SelfTyParam { .. } + | Res::SelfTyAlias { .. } | Res::SelfCtor(..) => self.non_enum_variant(), _ => bug!("unexpected res {:?} in variant_of_res", res), } @@ -457,11 +455,9 @@ impl<'tcx> AdtDef<'tcx> { Some(Discr { val: b, ty }) } else { info!("invalid enum discriminant: {:#?}", val); - crate::mir::interpret::struct_error( - tcx.at(tcx.def_span(expr_did)), - "constant evaluation of enum discriminant resulted in non-integer", - ) - .emit(); + tcx.sess.emit_err(crate::error::ConstEvalNonIntError { + span: tcx.def_span(expr_did), + }); None } } @@ -564,6 +560,13 @@ impl<'tcx> AdtDef<'tcx> { /// Due to normalization being eager, this applies even if /// the associated type is behind a pointer (e.g., issue #31299). pub fn sized_constraint(self, tcx: TyCtxt<'tcx>) -> ty::EarlyBinder<&'tcx [Ty<'tcx>]> { - ty::EarlyBinder(tcx.adt_sized_constraint(self.did()).0) + ty::EarlyBinder(tcx.adt_sized_constraint(self.did())) } } + +#[derive(Clone, Copy, Debug)] +#[derive(HashStable)] +pub enum Representability { + Representable, + Infinite, +} diff --git a/compiler/rustc_middle/src/ty/assoc.rs b/compiler/rustc_middle/src/ty/assoc.rs index c97156ac1..55ee5bd2f 100644 --- a/compiler/rustc_middle/src/ty/assoc.rs +++ b/compiler/rustc_middle/src/ty/assoc.rs @@ -42,7 +42,7 @@ impl AssocItem { } #[inline] - pub fn visibility(&self, tcx: TyCtxt<'_>) -> Visibility { + pub fn visibility(&self, tcx: TyCtxt<'_>) -> Visibility<DefId> { tcx.visibility(self.def_id) } diff --git a/compiler/rustc_middle/src/ty/binding.rs b/compiler/rustc_middle/src/ty/binding.rs index 3d65429f2..a5b05a4f9 100644 --- a/compiler/rustc_middle/src/ty/binding.rs +++ b/compiler/rustc_middle/src/ty/binding.rs @@ -1,6 +1,4 @@ -use rustc_hir::BindingAnnotation; -use rustc_hir::BindingAnnotation::*; -use rustc_hir::Mutability; +use rustc_hir::{BindingAnnotation, ByRef, Mutability}; #[derive(Clone, PartialEq, TyEncodable, TyDecodable, Debug, Copy, HashStable)] pub enum BindingMode { @@ -11,12 +9,10 @@ pub enum BindingMode { TrivialTypeTraversalAndLiftImpls! { BindingMode, } impl BindingMode { - pub fn convert(ba: BindingAnnotation) -> BindingMode { - match ba { - Unannotated => BindingMode::BindByValue(Mutability::Not), - Mutable => BindingMode::BindByValue(Mutability::Mut), - Ref => BindingMode::BindByReference(Mutability::Not), - RefMut => BindingMode::BindByReference(Mutability::Mut), + pub fn convert(BindingAnnotation(by_ref, mutbl): BindingAnnotation) -> BindingMode { + match by_ref { + ByRef::No => BindingMode::BindByValue(mutbl), + ByRef::Yes => BindingMode::BindByReference(mutbl), } } } diff --git a/compiler/rustc_middle/src/ty/cast.rs b/compiler/rustc_middle/src/ty/cast.rs index c4b743dd4..e65585955 100644 --- a/compiler/rustc_middle/src/ty/cast.rs +++ b/compiler/rustc_middle/src/ty/cast.rs @@ -2,6 +2,7 @@ // typeck and codegen. use crate::ty::{self, Ty}; +use rustc_middle::mir; use rustc_macros::HashStable; @@ -33,10 +34,12 @@ pub enum CastTy<'tcx> { FnPtr, /// Raw pointers. Ptr(ty::TypeAndMut<'tcx>), + /// Casting into a `dyn*` value. + DynStar, } /// Cast Kind. See [RFC 401](https://rust-lang.github.io/rfcs/0401-coercions.html) -/// (or librustc_typeck/check/cast.rs). +/// (or rustc_hir_analysis/check/cast.rs). #[derive(Copy, Clone, Debug, TyEncodable, TyDecodable, HashStable)] pub enum CastKind { CoercionCast, @@ -50,6 +53,7 @@ pub enum CastKind { ArrayPtrCast, FnPtrPtrCast, FnPtrAddrCast, + DynStarCast, } impl<'tcx> CastTy<'tcx> { @@ -67,7 +71,33 @@ impl<'tcx> CastTy<'tcx> { ty::Adt(d, _) if d.is_enum() && d.is_payloadfree() => Some(CastTy::Int(IntTy::CEnum)), ty::RawPtr(mt) => Some(CastTy::Ptr(mt)), ty::FnPtr(..) => Some(CastTy::FnPtr), + ty::Dynamic(_, _, ty::DynStar) => Some(CastTy::DynStar), _ => None, } } } + +/// Returns `mir::CastKind` from the given parameters. +pub fn mir_cast_kind<'tcx>(from_ty: Ty<'tcx>, cast_ty: Ty<'tcx>) -> mir::CastKind { + let from = CastTy::from_ty(from_ty); + let cast = CastTy::from_ty(cast_ty); + let cast_kind = match (from, cast) { + (Some(CastTy::Ptr(_) | CastTy::FnPtr), Some(CastTy::Int(_))) => { + mir::CastKind::PointerExposeAddress + } + (Some(CastTy::Int(_)), Some(CastTy::Ptr(_))) => mir::CastKind::PointerFromExposedAddress, + (_, Some(CastTy::DynStar)) => mir::CastKind::DynStar, + (Some(CastTy::Int(_)), Some(CastTy::Int(_))) => mir::CastKind::IntToInt, + (Some(CastTy::FnPtr), Some(CastTy::Ptr(_))) => mir::CastKind::FnPtrToPtr, + + (Some(CastTy::Float), Some(CastTy::Int(_))) => mir::CastKind::FloatToInt, + (Some(CastTy::Int(_)), Some(CastTy::Float)) => mir::CastKind::IntToFloat, + (Some(CastTy::Float), Some(CastTy::Float)) => mir::CastKind::FloatToFloat, + (Some(CastTy::Ptr(_)), Some(CastTy::Ptr(_))) => mir::CastKind::PtrToPtr, + + (_, _) => { + bug!("Attempting to cast non-castable types {:?} and {:?}", from_ty, cast_ty) + } + }; + cast_kind +} diff --git a/compiler/rustc_middle/src/ty/codec.rs b/compiler/rustc_middle/src/ty/codec.rs index 51137c526..14ec88b7e 100644 --- a/compiler/rustc_middle/src/ty/codec.rs +++ b/compiler/rustc_middle/src/ty/codec.rs @@ -455,6 +455,7 @@ impl_arena_copy_decoder! {<'tcx> rustc_span::def_id::DefId, rustc_span::def_id::LocalDefId, (rustc_middle::middle::exported_symbols::ExportedSymbol<'tcx>, rustc_middle::middle::exported_symbols::SymbolExportInfo), + ty::DeducedParamAttrs, } #[macro_export] diff --git a/compiler/rustc_middle/src/ty/consts.rs b/compiler/rustc_middle/src/ty/consts.rs index f8792edc0..f998e6083 100644 --- a/compiler/rustc_middle/src/ty/consts.rs +++ b/compiler/rustc_middle/src/ty/consts.rs @@ -1,9 +1,6 @@ use crate::mir::interpret::LitToConstInput; use crate::mir::ConstantKind; -use crate::ty::{ - self, InlineConstSubsts, InlineConstSubstsParts, InternalSubsts, ParamEnv, ParamEnvAnd, Ty, - TyCtxt, TypeVisitable, -}; +use crate::ty::{self, InternalSubsts, ParamEnv, ParamEnvAnd, Ty, TyCtxt}; use rustc_data_structures::intern::Interned; use rustc_errors::ErrorGuaranteed; use rustc_hir as hir; @@ -41,7 +38,7 @@ pub struct ConstS<'tcx> { } #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] -static_assert_size!(ConstS<'_>, 48); +static_assert_size!(ConstS<'_>, 40); impl<'tcx> Const<'tcx> { #[inline] @@ -65,8 +62,6 @@ impl<'tcx> Const<'tcx> { tcx: TyCtxt<'tcx>, def: ty::WithOptConstParam<LocalDefId>, ) -> Self { - debug!("Const::from_anon_const(def={:?})", def); - let body_id = match tcx.hir().get_by_def_id(def.did) { hir::Node::AnonConst(ac) => ac.body, _ => span_bug!( @@ -83,10 +78,9 @@ impl<'tcx> Const<'tcx> { match Self::try_eval_lit_or_param(tcx, ty, expr) { Some(v) => v, None => tcx.mk_const(ty::ConstS { - kind: ty::ConstKind::Unevaluated(ty::Unevaluated { + kind: ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def: def.to_global(), substs: InternalSubsts::identity_for_item(tcx, def.did.to_def_id()), - promoted: None, }), ty, }), @@ -153,46 +147,6 @@ impl<'tcx> Const<'tcx> { } } - pub fn from_inline_const(tcx: TyCtxt<'tcx>, def_id: LocalDefId) -> Self { - debug!("Const::from_inline_const(def_id={:?})", def_id); - - let hir_id = tcx.hir().local_def_id_to_hir_id(def_id); - - let body_id = match tcx.hir().get(hir_id) { - hir::Node::AnonConst(ac) => ac.body, - _ => span_bug!( - tcx.def_span(def_id.to_def_id()), - "from_inline_const can only process anonymous constants" - ), - }; - - let expr = &tcx.hir().body(body_id).value; - - let ty = tcx.typeck(def_id).node_type(hir_id); - - let ret = match Self::try_eval_lit_or_param(tcx, ty, expr) { - Some(v) => v, - None => { - let typeck_root_def_id = tcx.typeck_root_def_id(def_id.to_def_id()); - let parent_substs = - tcx.erase_regions(InternalSubsts::identity_for_item(tcx, typeck_root_def_id)); - let substs = - InlineConstSubsts::new(tcx, InlineConstSubstsParts { parent_substs, ty }) - .substs; - tcx.mk_const(ty::ConstS { - kind: ty::ConstKind::Unevaluated(ty::Unevaluated { - def: ty::WithOptConstParam::unknown(def_id).to_global(), - substs, - promoted: None, - }), - ty, - }) - } - }; - debug_assert!(!ret.has_free_regions()); - ret - } - /// Interns the given value as a constant. #[inline] pub fn from_value(tcx: TyCtxt<'tcx>, val: ty::ValTree<'tcx>, ty: Ty<'tcx>) -> Self { @@ -309,6 +263,10 @@ impl<'tcx> Const<'tcx> { self.try_eval_usize(tcx, param_env) .unwrap_or_else(|| bug!("expected usize, got {:#?}", self)) } + + pub fn is_ct_infer(self) -> bool { + matches!(self.kind(), ty::ConstKind::Infer(_)) + } } pub fn const_param_default<'tcx>(tcx: TyCtxt<'tcx>, def_id: DefId) -> Const<'tcx> { diff --git a/compiler/rustc_middle/src/ty/consts/kind.rs b/compiler/rustc_middle/src/ty/consts/kind.rs index cb0137d2e..4ab761e07 100644 --- a/compiler/rustc_middle/src/ty/consts/kind.rs +++ b/compiler/rustc_middle/src/ty/consts/kind.rs @@ -1,50 +1,52 @@ use std::convert::TryInto; +use crate::mir; use crate::mir::interpret::{AllocId, ConstValue, Scalar}; -use crate::mir::Promoted; use crate::ty::subst::{InternalSubsts, SubstsRef}; use crate::ty::ParamEnv; use crate::ty::{self, TyCtxt, TypeVisitable}; +use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_errors::ErrorGuaranteed; use rustc_hir::def_id::DefId; use rustc_macros::HashStable; use rustc_target::abi::Size; use super::ScalarInt; -/// An unevaluated, potentially generic, constant. + +/// An unevaluated (potentially generic) constant used in the type-system. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Lift)] -#[derive(Hash, HashStable)] -pub struct Unevaluated<'tcx, P = Option<Promoted>> { +#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)] +pub struct UnevaluatedConst<'tcx> { pub def: ty::WithOptConstParam<DefId>, pub substs: SubstsRef<'tcx>, - pub promoted: P, } -impl<'tcx> Unevaluated<'tcx> { - #[inline] - pub fn shrink(self) -> Unevaluated<'tcx, ()> { - debug_assert_eq!(self.promoted, None); - Unevaluated { def: self.def, substs: self.substs, promoted: () } +impl rustc_errors::IntoDiagnosticArg for UnevaluatedConst<'_> { + fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> { + format!("{:?}", self).into_diagnostic_arg() } } -impl<'tcx> Unevaluated<'tcx, ()> { +impl<'tcx> UnevaluatedConst<'tcx> { #[inline] - pub fn expand(self) -> Unevaluated<'tcx> { - Unevaluated { def: self.def, substs: self.substs, promoted: None } + pub fn expand(self) -> mir::UnevaluatedConst<'tcx> { + mir::UnevaluatedConst { def: self.def, substs: self.substs, promoted: None } } } -impl<'tcx, P: Default> Unevaluated<'tcx, P> { +impl<'tcx> UnevaluatedConst<'tcx> { #[inline] - pub fn new(def: ty::WithOptConstParam<DefId>, substs: SubstsRef<'tcx>) -> Unevaluated<'tcx, P> { - Unevaluated { def, substs, promoted: Default::default() } + pub fn new( + def: ty::WithOptConstParam<DefId>, + substs: SubstsRef<'tcx>, + ) -> UnevaluatedConst<'tcx> { + UnevaluatedConst { def, substs } } } /// Represents a constant in Rust. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable)] -#[derive(Hash, HashStable)] +#[derive(Hash, HashStable, TypeFoldable, TypeVisitable)] pub enum ConstKind<'tcx> { /// A const generic parameter. Param(ty::ParamConst), @@ -60,7 +62,7 @@ pub enum ConstKind<'tcx> { /// Used in the HIR by using `Unevaluated` everywhere and later normalizing to one of the other /// variants when the code is monomorphic enough for that. - Unevaluated(Unevaluated<'tcx>), + Unevaluated(UnevaluatedConst<'tcx>), /// Used to hold computed value. Value(ty::ValTree<'tcx>), @@ -71,7 +73,7 @@ pub enum ConstKind<'tcx> { } #[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] -static_assert_size!(ConstKind<'_>, 40); +static_assert_size!(ConstKind<'_>, 32); impl<'tcx> ConstKind<'tcx> { #[inline] @@ -107,7 +109,6 @@ impl<'tcx> ConstKind<'tcx> { /// An inference variable for a const, for use in const generics. #[derive(Copy, Clone, Debug, Eq, PartialEq, PartialOrd, Ord, TyEncodable, TyDecodable, Hash)] -#[derive(HashStable)] pub enum InferConst<'tcx> { /// Infer the value of the const. Var(ty::ConstVid<'tcx>), @@ -115,6 +116,15 @@ pub enum InferConst<'tcx> { Fresh(u32), } +impl<CTX> HashStable<CTX> for InferConst<'_> { + fn hash_stable(&self, hcx: &mut CTX, hasher: &mut StableHasher) { + match self { + InferConst::Var(_) => panic!("const variables should not be hashed: {self:?}"), + InferConst::Fresh(i) => i.hash_stable(hcx, hasher), + } + } +} + enum EvalMode { Typeck, Mir, @@ -174,6 +184,7 @@ impl<'tcx> ConstKind<'tcx> { param_env: ParamEnv<'tcx>, eval_mode: EvalMode, ) -> Option<Result<EvalResult<'tcx>, ErrorGuaranteed>> { + assert!(!self.has_escaping_bound_vars(), "escaping vars in {self:?}"); if let ConstKind::Unevaluated(unevaluated) = self { use crate::mir::interpret::ErrorHandled; @@ -194,10 +205,9 @@ impl<'tcx> ConstKind<'tcx> { // FIXME(eddyb, skinny121) pass `InferCtxt` into here when it's available, so that // we can call `infcx.const_eval_resolve` which handles inference variables. let param_env_and = if param_env_and.needs_infer() { - tcx.param_env(unevaluated.def.did).and(ty::Unevaluated { + tcx.param_env(unevaluated.def.did).and(ty::UnevaluatedConst { def: unevaluated.def, substs: InternalSubsts::identity_for_item(tcx, unevaluated.def.did), - promoted: unevaluated.promoted, }) } else { param_env_and @@ -221,7 +231,7 @@ impl<'tcx> ConstKind<'tcx> { } } EvalMode::Mir => { - match tcx.const_eval_resolve(param_env, unevaluated, None) { + match tcx.const_eval_resolve(param_env, unevaluated.expand(), None) { // NOTE(eddyb) `val` contains no lifetimes/types/consts, // and we use the original type, so nothing from `substs` // (which may be identity substs, see above), diff --git a/compiler/rustc_middle/src/ty/consts/valtree.rs b/compiler/rustc_middle/src/ty/consts/valtree.rs index 93707bb18..a803fca0d 100644 --- a/compiler/rustc_middle/src/ty/consts/valtree.rs +++ b/compiler/rustc_middle/src/ty/consts/valtree.rs @@ -18,7 +18,7 @@ use rustc_macros::{HashStable, TyDecodable, TyEncodable}; /// `ValTree` does not have this problem with representation, as it only contains integers or /// lists of (nested) `ValTree`. pub enum ValTree<'tcx> { - /// ZSTs, integers, `bool`, `char` are represented as scalars. + /// integers, `bool`, `char` are represented as scalars. /// See the `ScalarInt` documentation for how `ScalarInt` guarantees that equal values /// of these types have the same representation. Leaf(ScalarInt), @@ -27,8 +27,11 @@ pub enum ValTree<'tcx> { // dont use SliceOrStr for now /// The fields of any kind of aggregate. Structs, tuples and arrays are represented by /// listing their fields' values in order. + /// /// Enums are represented by storing their discriminant as a field, followed by all /// the fields of the variant. + /// + /// ZST types are represented as an empty slice. Branch(&'tcx [ValTree<'tcx>]), } diff --git a/compiler/rustc_middle/src/ty/context.rs b/compiler/rustc_middle/src/ty/context.rs index 0a0f45ce1..3d7e2a083 100644 --- a/compiler/rustc_middle/src/ty/context.rs +++ b/compiler/rustc_middle/src/ty/context.rs @@ -1,10 +1,10 @@ //! Type context book-keeping. use crate::arena::Arena; -use crate::dep_graph::{DepGraph, DepKind, DepKindStruct}; +use crate::dep_graph::{DepGraph, DepKindStruct}; use crate::hir::place::Place as HirPlace; use crate::infer::canonical::{Canonical, CanonicalVarInfo, CanonicalVarInfos}; -use crate::lint::{struct_lint_level, LintLevelSource}; +use crate::lint::struct_lint_level; use crate::middle::codegen_fn_attrs::CodegenFnAttrs; use crate::middle::resolve_lifetime; use crate::middle::stability; @@ -15,14 +15,15 @@ use crate::mir::{ use crate::thir::Thir; use crate::traits; use crate::ty::query::{self, TyCtxtAt}; -use crate::ty::subst::{GenericArg, GenericArgKind, InternalSubsts, Subst, SubstsRef, UserSubsts}; use crate::ty::{ self, AdtDef, AdtDefData, AdtKind, Binder, BindingMode, BoundVar, CanonicalPolyFnSig, ClosureSizeProfileData, Const, ConstS, ConstVid, DefIdTree, ExistentialPredicate, FloatTy, FloatVar, FloatVid, GenericParamDefKind, InferConst, InferTy, IntTy, IntVar, IntVid, List, ParamConst, ParamTy, PolyFnSig, Predicate, PredicateKind, PredicateS, ProjectionTy, Region, RegionKind, ReprOptions, TraitObjectVisitor, Ty, TyKind, TyS, TyVar, TyVid, TypeAndMut, UintTy, + Visibility, }; +use crate::ty::{GenericArg, GenericArgKind, InternalSubsts, SubstsRef, UserSubsts}; use rustc_ast as ast; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; @@ -33,12 +34,16 @@ use rustc_data_structures::sharded::{IntoPointer, ShardedHashMap}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; use rustc_data_structures::steal::Steal; use rustc_data_structures::sync::{self, Lock, Lrc, ReadGuard, RwLock, WorkerLocal}; +use rustc_data_structures::unord::UnordSet; use rustc_data_structures::vec_map::VecMap; -use rustc_errors::{DecorateLint, ErrorGuaranteed, LintDiagnosticBuilder, MultiSpan}; +use rustc_errors::{ + DecorateLint, DiagnosticBuilder, DiagnosticMessage, ErrorGuaranteed, MultiSpan, +}; use rustc_hir as hir; use rustc_hir::def::{DefKind, Res}; use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, LocalDefId, LOCAL_CRATE}; use rustc_hir::definitions::Definitions; +use rustc_hir::hir_id::OwnerId; use rustc_hir::intravisit::Visitor; use rustc_hir::lang_items::LangItem; use rustc_hir::{ @@ -52,7 +57,7 @@ use rustc_query_system::ich::StableHashingContext; use rustc_serialize::opaque::{FileEncodeResult, FileEncoder}; use rustc_session::config::{CrateType, OutputFilenames}; use rustc_session::cstore::CrateStoreDyn; -use rustc_session::lint::{Level, Lint}; +use rustc_session::lint::Lint; use rustc_session::Limit; use rustc_session::Session; use rustc_span::def_id::{DefPathHash, StableCrateId}; @@ -62,7 +67,7 @@ use rustc_span::{Span, DUMMY_SP}; use rustc_target::abi::{Layout, LayoutS, TargetDataLayout, VariantIdx}; use rustc_target::spec::abi; use rustc_type_ir::sty::TyKind::*; -use rustc_type_ir::{InternAs, InternIteratorElement, Interner, TypeFlags}; +use rustc_type_ir::{DynKind, InternAs, InternIteratorElement, Interner, TypeFlags}; use std::any::Any; use std::borrow::Borrow; @@ -75,7 +80,7 @@ use std::mem; use std::ops::{Bound, Deref}; use std::sync::Arc; -use super::{ImplPolarity, RvalueScopes}; +use super::{ImplPolarity, ResolverOutputs, RvalueScopes}; pub trait OnDiskCache<'tcx>: rustc_data_structures::sync::Sync { /// Creates a new `OnDiskCache` instance from the serialized data in `data`. @@ -194,9 +199,9 @@ impl<'tcx> CtxtInterners<'tcx> { .intern(kind, |kind| { let flags = super::flags::FlagComputation::for_kind(&kind); - // It's impossible to hash inference regions (and will ICE), so we don't need to try to cache them. + // It's impossible to hash inference variables (and will ICE), so we don't need to try to cache them. // Without incremental, we rarely stable-hash types, so let's not do it proactively. - let stable_hash = if flags.flags.intersects(TypeFlags::HAS_RE_INFER) + let stable_hash = if flags.flags.intersects(TypeFlags::NEEDS_INFER) || sess.opts.incremental.is_none() { Fingerprint::ZERO @@ -275,9 +280,6 @@ pub struct CommonTypes<'tcx> { } pub struct CommonLifetimes<'tcx> { - /// `ReEmpty` in the root universe. - pub re_root_empty: Region<'tcx>, - /// `ReStatic` pub re_static: Region<'tcx>, @@ -290,7 +292,7 @@ pub struct CommonConsts<'tcx> { } pub struct LocalTableInContext<'a, V> { - hir_owner: LocalDefId, + hir_owner: OwnerId, data: &'a ItemLocalMap<V>, } @@ -302,7 +304,7 @@ pub struct LocalTableInContext<'a, V> { /// would result in lookup errors, or worse, in silently wrong data being /// stored/returned. #[inline] -fn validate_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) { +fn validate_hir_id_for_typeck_results(hir_owner: OwnerId, hir_id: hir::HirId) { if hir_id.owner != hir_owner { invalid_hir_id_for_typeck_results(hir_owner, hir_id); } @@ -310,7 +312,7 @@ fn validate_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) #[cold] #[inline(never)] -fn invalid_hir_id_for_typeck_results(hir_owner: LocalDefId, hir_id: hir::HirId) { +fn invalid_hir_id_for_typeck_results(hir_owner: OwnerId, hir_id: hir::HirId) { ty::tls::with(|tcx| { bug!( "node {} with HirId::owner {:?} cannot be placed in TypeckResults with hir_owner {:?}", @@ -346,7 +348,7 @@ impl<'a, V> ::std::ops::Index<hir::HirId> for LocalTableInContext<'a, V> { } pub struct LocalTableInContextMut<'a, V> { - hir_owner: LocalDefId, + hir_owner: OwnerId, data: &'a mut ItemLocalMap<V>, } @@ -418,7 +420,7 @@ pub struct GeneratorDiagnosticData<'tcx> { #[derive(TyEncodable, TyDecodable, Debug, HashStable)] pub struct TypeckResults<'tcx> { /// The `HirId::owner` all `ItemLocalId`s in this table are relative to. - pub hir_owner: LocalDefId, + pub hir_owner: OwnerId, /// Resolved definitions for `<T>::X` associated paths and /// method calls, including those of overloaded operators. @@ -530,19 +532,17 @@ pub struct TypeckResults<'tcx> { /// This is used for warning unused imports. During type /// checking, this `Lrc` should not be cloned: it must have a ref-count /// of 1 so that we can insert things into the set mutably. - pub used_trait_imports: Lrc<FxHashSet<LocalDefId>>, + pub used_trait_imports: Lrc<UnordSet<LocalDefId>>, /// If any errors occurred while type-checking this body, /// this field will be set to `Some(ErrorGuaranteed)`. pub tainted_by_errors: Option<ErrorGuaranteed>, /// All the opaque types that have hidden types set - /// by this function. For return-position-impl-trait we also store the - /// type here, so that mir-borrowck can figure out hidden types, + /// by this function. We also store the + /// type here, so that mir-borrowck can use it as a hint for figuring out hidden types, /// even if they are only set in dead code (which doesn't show up in MIR). - /// For type-alias-impl-trait, this map is only used to prevent query cycles, - /// so the hidden types are all `None`. - pub concrete_opaque_types: VecMap<LocalDefId, Option<Ty<'tcx>>>, + pub concrete_opaque_types: VecMap<LocalDefId, ty::OpaqueHiddenType<'tcx>>, /// Tracks the minimum captures required for a closure; /// see `MinCaptureInformationMap` for more details. @@ -574,7 +574,7 @@ pub struct TypeckResults<'tcx> { /// Tracks the rvalue scoping rules which defines finer scoping for rvalue expressions /// by applying extended parameter rules. - /// Details may be find in `rustc_typeck::check::rvalue_scopes`. + /// Details may be find in `rustc_hir_analysis::check::rvalue_scopes`. pub rvalue_scopes: RvalueScopes, /// Stores the type, expression, span and optional scope span of all types @@ -593,7 +593,7 @@ pub struct TypeckResults<'tcx> { } impl<'tcx> TypeckResults<'tcx> { - pub fn new(hir_owner: LocalDefId) -> TypeckResults<'tcx> { + pub fn new(hir_owner: OwnerId) -> TypeckResults<'tcx> { TypeckResults { hir_owner, type_dependent_defs: Default::default(), @@ -874,7 +874,7 @@ pub type CanonicalUserTypeAnnotations<'tcx> = #[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct CanonicalUserTypeAnnotation<'tcx> { - pub user_ty: CanonicalUserType<'tcx>, + pub user_ty: Box<CanonicalUserType<'tcx>>, pub span: Span, pub inferred_ty: Ty<'tcx>, } @@ -986,11 +986,7 @@ impl<'tcx> CommonLifetimes<'tcx> { )) }; - CommonLifetimes { - re_root_empty: mk(ty::ReEmpty(ty::UniverseIndex::ROOT)), - re_static: mk(ty::ReStatic), - re_erased: mk(ty::ReErased), - } + CommonLifetimes { re_static: mk(ty::ReStatic), re_erased: mk(ty::ReErased) } } } @@ -1072,10 +1068,9 @@ pub struct GlobalCtxt<'tcx> { pub consts: CommonConsts<'tcx>, definitions: RwLock<Definitions>, - cstore: Box<CrateStoreDyn>, /// Output of the resolver. - pub(crate) untracked_resolutions: ty::ResolverOutputs, + pub(crate) untracked_resolutions: ty::ResolverGlobalCtxt, untracked_resolver_for_lowering: Steal<ty::ResolverAstLowering>, /// The entire crate as AST. This field serves as the input for the hir_crate query, /// which lowers it from AST to HIR. It must not be read or used by anything else. @@ -1089,7 +1084,7 @@ pub struct GlobalCtxt<'tcx> { pub queries: &'tcx dyn query::QueryEngine<'tcx>, pub query_caches: query::QueryCaches<'tcx>, - query_kinds: &'tcx [DepKindStruct], + pub(crate) query_kinds: &'tcx [DepKindStruct<'tcx>], // Internal caches for metadata decoding. No need to track deps on this. pub ty_rcache: Lock<FxHashMap<ty::CReaderCacheKey, Ty<'tcx>>>, @@ -1238,27 +1233,29 @@ impl<'tcx> TyCtxt<'tcx> { lint_store: Lrc<dyn Any + sync::Send + sync::Sync>, arena: &'tcx WorkerLocal<Arena<'tcx>>, hir_arena: &'tcx WorkerLocal<hir::Arena<'tcx>>, - definitions: Definitions, - cstore: Box<CrateStoreDyn>, - untracked_resolutions: ty::ResolverOutputs, - untracked_resolver_for_lowering: ty::ResolverAstLowering, + resolver_outputs: ResolverOutputs, krate: Lrc<ast::Crate>, dep_graph: DepGraph, on_disk_cache: Option<&'tcx dyn OnDiskCache<'tcx>>, queries: &'tcx dyn query::QueryEngine<'tcx>, - query_kinds: &'tcx [DepKindStruct], + query_kinds: &'tcx [DepKindStruct<'tcx>], crate_name: &str, output_filenames: OutputFilenames, ) -> GlobalCtxt<'tcx> { + let ResolverOutputs { + definitions, + global_ctxt: untracked_resolutions, + ast_lowering: untracked_resolver_for_lowering, + } = resolver_outputs; let data_layout = TargetDataLayout::parse(&s.target).unwrap_or_else(|err| { - s.fatal(&err); + s.emit_fatal(err); }); let interners = CtxtInterners::new(arena); let common_types = CommonTypes::new( &interners, s, &definitions, - &*cstore, + &*untracked_resolutions.cstore, // This is only used to create a stable hashing context. &untracked_resolutions.source_span, ); @@ -1273,7 +1270,6 @@ impl<'tcx> TyCtxt<'tcx> { interners, dep_graph, definitions: RwLock::new(definitions), - cstore, prof: s.prof.clone(), types: common_types, lifetimes: common_lifetimes, @@ -1296,10 +1292,6 @@ impl<'tcx> TyCtxt<'tcx> { } } - pub(crate) fn query_kind(self, k: DepKind) -> &'tcx DepKindStruct { - &self.query_kinds[k as usize] - } - /// Constructs a `TyKind::Error` type and registers a `delay_span_bug` to ensure it gets used. #[track_caller] pub fn ty_error(self) -> Ty<'tcx> { @@ -1378,7 +1370,7 @@ impl<'tcx> TyCtxt<'tcx> { if let Some(id) = id.as_local() { self.definitions_untracked().def_key(id) } else { - self.cstore.def_key(id) + self.untracked_resolutions.cstore.def_key(id) } } @@ -1392,7 +1384,7 @@ impl<'tcx> TyCtxt<'tcx> { if let Some(id) = id.as_local() { self.definitions_untracked().def_path(id) } else { - self.cstore.def_path(id) + self.untracked_resolutions.cstore.def_path(id) } } @@ -1402,7 +1394,7 @@ impl<'tcx> TyCtxt<'tcx> { if let Some(def_id) = def_id.as_local() { self.definitions_untracked().def_path_hash(def_id) } else { - self.cstore.def_path_hash(def_id) + self.untracked_resolutions.cstore.def_path_hash(def_id) } } @@ -1411,7 +1403,7 @@ impl<'tcx> TyCtxt<'tcx> { if crate_num == LOCAL_CRATE { self.sess.local_stable_crate_id() } else { - self.cstore.stable_crate_id(crate_num) + self.untracked_resolutions.cstore.stable_crate_id(crate_num) } } @@ -1422,7 +1414,7 @@ impl<'tcx> TyCtxt<'tcx> { if stable_crate_id == self.sess.local_stable_crate_id() { LOCAL_CRATE } else { - self.cstore.stable_crate_id_to_crate_num(stable_crate_id) + self.untracked_resolutions.cstore.stable_crate_id_to_crate_num(stable_crate_id) } } @@ -1441,8 +1433,9 @@ impl<'tcx> TyCtxt<'tcx> { } else { // If this is a DefPathHash from an upstream crate, let the CrateStore map // it to a DefId. - let cnum = self.cstore.stable_crate_id_to_crate_num(stable_crate_id); - self.cstore.def_path_hash_to_def_id(cnum, hash) + let cstore = &*self.untracked_resolutions.cstore; + let cnum = cstore.stable_crate_id_to_crate_num(stable_crate_id); + cstore.def_path_hash_to_def_id(cnum, hash) } } @@ -1454,7 +1447,7 @@ impl<'tcx> TyCtxt<'tcx> { let (crate_name, stable_crate_id) = if def_id.is_local() { (self.crate_name, self.sess.local_stable_crate_id()) } else { - let cstore = &self.cstore; + let cstore = &*self.untracked_resolutions.cstore; (cstore.crate_name(def_id.krate), cstore.stable_crate_id(def_id.krate)) }; @@ -1498,17 +1491,17 @@ impl<'tcx> TyCtxt<'tcx> { // Create a dependency to the crate to be sure we re-execute this when the amount of // definitions change. self.ensure().hir_crate(()); - // Leak a read lock once we start iterating on definitions, to prevent adding new onces + // Leak a read lock once we start iterating on definitions, to prevent adding new ones // while iterating. If some query needs to add definitions, it should be `ensure`d above. let definitions = self.definitions.leak(); definitions.iter_local_def_id() } pub fn def_path_table(self) -> &'tcx rustc_hir::definitions::DefPathTable { - // Create a dependency to the crate to be sure we reexcute this when the amount of + // Create a dependency to the crate to be sure we re-execute this when the amount of // definitions change. self.ensure().hir_crate(()); - // Leak a read lock once we start iterating on definitions, to prevent adding new onces + // Leak a read lock once we start iterating on definitions, to prevent adding new ones // while iterating. If some query needs to add definitions, it should be `ensure`d above. let definitions = self.definitions.leak(); definitions.def_path_table() @@ -1517,10 +1510,10 @@ impl<'tcx> TyCtxt<'tcx> { pub fn def_path_hash_to_def_index_map( self, ) -> &'tcx rustc_hir::def_path_hash_map::DefPathHashMap { - // Create a dependency to the crate to be sure we reexcute this when the amount of + // Create a dependency to the crate to be sure we re-execute this when the amount of // definitions change. self.ensure().hir_crate(()); - // Leak a read lock once we start iterating on definitions, to prevent adding new onces + // Leak a read lock once we start iterating on definitions, to prevent adding new ones // while iterating. If some query needs to add definitions, it should be `ensure`d above. let definitions = self.definitions.leak(); definitions.def_path_hash_to_def_index_map() @@ -1529,7 +1522,7 @@ impl<'tcx> TyCtxt<'tcx> { /// Note that this is *untracked* and should only be used within the query /// system if the result is otherwise tracked through queries pub fn cstore_untracked(self) -> &'tcx CrateStoreDyn { - &*self.cstore + &*self.untracked_resolutions.cstore } /// Note that this is *untracked* and should only be used within the query @@ -1555,7 +1548,7 @@ impl<'tcx> TyCtxt<'tcx> { let hcx = StableHashingContext::new( self.sess, &*definitions, - &*self.cstore, + &*self.untracked_resolutions.cstore, &self.untracked_resolutions.source_span, ); f(hcx) @@ -1596,7 +1589,7 @@ impl<'tcx> TyCtxt<'tcx> { }) } - // Returns the `DefId` and the `BoundRegionKind` corresponding to the given region. + /// Returns the `DefId` and the `BoundRegionKind` corresponding to the given region. pub fn is_suitable_region(self, region: Region<'tcx>) -> Option<FreeRegionInfo> { let (suitable_region_binding_scope, bound_region) = match *region { ty::ReFree(ref free_region) => { @@ -1728,6 +1721,11 @@ impl<'tcx> TyCtxt<'tcx> { .chain(self.crates(()).iter().copied()) .flat_map(move |cnum| self.traits_in_crate(cnum).iter().copied()) } + + #[inline] + pub fn local_visibility(self, def_id: LocalDefId) -> Visibility { + self.visibility(def_id).expect_local() + } } /// A trait implemented for all `X<'a>` types that can be safely and @@ -1821,7 +1819,9 @@ nop_list_lift! {bound_variable_kinds; ty::BoundVariableKind => ty::BoundVariable // This is the impl for `&'a InternalSubsts<'a>`. nop_list_lift! {substs; GenericArg<'a> => GenericArg<'tcx>} -CloneLiftImpls! { for<'tcx> { Constness, traits::WellFormedLoc, } } +CloneLiftImpls! { for<'tcx> { + Constness, traits::WellFormedLoc, ImplPolarity, crate::mir::ReturnConstraint, +} } pub mod tls { use super::{ptr_eq, GlobalCtxt, TyCtxt}; @@ -1829,9 +1829,9 @@ pub mod tls { use crate::dep_graph::TaskDepsRef; use crate::ty::query; use rustc_data_structures::sync::{self, Lock}; - use rustc_data_structures::thin_vec::ThinVec; use rustc_errors::Diagnostic; use std::mem; + use thin_vec::ThinVec; #[cfg(not(parallel_compiler))] use std::cell::Cell; @@ -1857,8 +1857,8 @@ pub mod tls { /// This is updated by `JobOwner::start` in `ty::query::plumbing` when executing a query. pub diagnostics: Option<&'a Lock<ThinVec<Diagnostic>>>, - /// Used to prevent layout from recursing too deeply. - pub layout_depth: usize, + /// Used to prevent queries from calling too deeply. + pub query_depth: usize, /// The current dep graph task. This is used to add dependencies to queries /// when executing them. @@ -1872,7 +1872,7 @@ pub mod tls { tcx, query: None, diagnostics: None, - layout_depth: 0, + query_depth: 0, task_deps: TaskDepsRef::Ignore, } } @@ -2366,7 +2366,7 @@ impl<'tcx> TyCtxt<'tcx> { st, self.sess, &self.definitions.read(), - &*self.cstore, + &*self.untracked_resolutions.cstore, // This is only used to create a stable hashing context. &self.untracked_resolutions.source_span, ) @@ -2546,8 +2546,9 @@ impl<'tcx> TyCtxt<'tcx> { self, obj: &'tcx List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>>, reg: ty::Region<'tcx>, + repr: DynKind, ) -> Ty<'tcx> { - self.mk_ty(Dynamic(obj, reg)) + self.mk_ty(Dynamic(obj, reg, repr)) } #[inline] @@ -2810,44 +2811,6 @@ impl<'tcx> TyCtxt<'tcx> { iter.intern_with(|xs| self.intern_bound_variable_kinds(xs)) } - /// Walks upwards from `id` to find a node which might change lint levels with attributes. - /// It stops at `bound` and just returns it if reached. - pub fn maybe_lint_level_root_bounded(self, mut id: HirId, bound: HirId) -> HirId { - let hir = self.hir(); - loop { - if id == bound { - return bound; - } - - if hir.attrs(id).iter().any(|attr| Level::from_attr(attr).is_some()) { - return id; - } - let next = hir.get_parent_node(id); - if next == id { - bug!("lint traversal reached the root of the crate"); - } - id = next; - } - } - - pub fn lint_level_at_node( - self, - lint: &'static Lint, - mut id: hir::HirId, - ) -> (Level, LintLevelSource) { - let sets = self.lint_levels(()); - loop { - if let Some(pair) = sets.level_and_source(lint, id, self.sess) { - return pair; - } - let next = self.hir().get_parent_node(id); - if next == id { - bug!("lint traversal reached the root of the crate"); - } - id = next; - } - } - /// Emit a lint at `span` from a lint struct (some type that implements `DecorateLint`, /// typically generated by `#[derive(LintDiagnostic)]`). pub fn emit_spanned_lint( @@ -2857,18 +2820,28 @@ impl<'tcx> TyCtxt<'tcx> { span: impl Into<MultiSpan>, decorator: impl for<'a> DecorateLint<'a, ()>, ) { - self.struct_span_lint_hir(lint, hir_id, span, |diag| decorator.decorate_lint(diag)) + self.struct_span_lint_hir(lint, hir_id, span, decorator.msg(), |diag| { + decorator.decorate_lint(diag) + }) } + /// Emit a lint at the appropriate level for a hir node, with an associated span. + /// + /// Return value of the `decorate` closure is ignored, see [`struct_lint_level`] for a detailed explanation. + /// + /// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature pub fn struct_span_lint_hir( self, lint: &'static Lint, hir_id: HirId, span: impl Into<MultiSpan>, - decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>), + msg: impl Into<DiagnosticMessage>, + decorate: impl for<'a, 'b> FnOnce( + &'b mut DiagnosticBuilder<'a, ()>, + ) -> &'b mut DiagnosticBuilder<'a, ()>, ) { let (level, src) = self.lint_level_at_node(lint, hir_id); - struct_lint_level(self.sess, lint, level, src, Some(span.into()), decorate); + struct_lint_level(self.sess, lint, level, src, Some(span.into()), msg, decorate); } /// Emit a lint from a lint struct (some type that implements `DecorateLint`, typically @@ -2879,17 +2852,25 @@ impl<'tcx> TyCtxt<'tcx> { id: HirId, decorator: impl for<'a> DecorateLint<'a, ()>, ) { - self.struct_lint_node(lint, id, |diag| decorator.decorate_lint(diag)) + self.struct_lint_node(lint, id, decorator.msg(), |diag| decorator.decorate_lint(diag)) } + /// Emit a lint at the appropriate level for a hir node. + /// + /// Return value of the `decorate` closure is ignored, see [`struct_lint_level`] for a detailed explanation. + /// + /// [`struct_lint_level`]: rustc_middle::lint::struct_lint_level#decorate-signature pub fn struct_lint_node( self, lint: &'static Lint, id: HirId, - decorate: impl for<'a> FnOnce(LintDiagnosticBuilder<'a, ()>), + msg: impl Into<DiagnosticMessage>, + decorate: impl for<'a, 'b> FnOnce( + &'b mut DiagnosticBuilder<'a, ()>, + ) -> &'b mut DiagnosticBuilder<'a, ()>, ) { let (level, src) = self.lint_level_at_node(lint, id); - struct_lint_level(self.sess, lint, level, src, None, decorate); + struct_lint_level(self.sess, lint, level, src, None, msg, decorate); } pub fn in_scope_traits(self, id: HirId) -> Option<&'tcx [TraitCandidate]> { @@ -2904,7 +2885,7 @@ impl<'tcx> TyCtxt<'tcx> { } pub fn is_late_bound(self, id: HirId) -> bool { - self.is_late_bound_map(id.owner).map_or(false, |set| { + self.is_late_bound_map(id.owner.def_id).map_or(false, |set| { let def_id = self.hir().local_def_id(id); set.contains(&def_id) }) @@ -2975,6 +2956,21 @@ impl<'tcx> TyCtxtAt<'tcx> { } } +/// Parameter attributes that can only be determined by examining the body of a function instead +/// of just its signature. +/// +/// These can be useful for optimization purposes when a function is directly called. We compute +/// them and store them into the crate metadata so that downstream crates can make use of them. +/// +/// Right now, we only have `read_only`, but `no_capture` and `no_alias` might be useful in the +/// future. +#[derive(Clone, Copy, PartialEq, Debug, Default, TyDecodable, TyEncodable, HashStable)] +pub struct DeducedParamAttrs { + /// The parameter is marked immutable in the function and contains no `UnsafeCell` (i.e. its + /// type is freeze). + pub read_only: bool, +} + // We are comparing types with different invariant lifetimes, so `ptr::eq` // won't work for us. fn ptr_eq<T, U>(t: *const T, u: *const U) -> bool { diff --git a/compiler/rustc_middle/src/ty/diagnostics.rs b/compiler/rustc_middle/src/ty/diagnostics.rs index dd2f43210..b8fd01e6a 100644 --- a/compiler/rustc_middle/src/ty/diagnostics.rs +++ b/compiler/rustc_middle/src/ty/diagnostics.rs @@ -102,13 +102,25 @@ pub fn suggest_arbitrary_trait_bound<'tcx>( generics: &hir::Generics<'_>, err: &mut Diagnostic, trait_pred: PolyTraitPredicate<'tcx>, + associated_ty: Option<(&'static str, Ty<'tcx>)>, ) -> bool { if !trait_pred.is_suggestable(tcx, false) { return false; } let param_name = trait_pred.skip_binder().self_ty().to_string(); - let constraint = trait_pred.print_modifiers_and_trait_path().to_string(); + let mut constraint = trait_pred.print_modifiers_and_trait_path().to_string(); + + if let Some((name, term)) = associated_ty { + // FIXME: this case overlaps with code in TyCtxt::note_and_explain_type_err. + // That should be extracted into a helper function. + if constraint.ends_with('>') { + constraint = format!("{}, {} = {}>", &constraint[..constraint.len() - 1], name, term); + } else { + constraint.push_str(&format!("<{} = {}>", name, term)); + } + } + let param = generics.params.iter().find(|p| p.name.ident().as_str() == param_name); // Skip, there is a param named Self @@ -396,7 +408,7 @@ impl<'v> hir::intravisit::Visitor<'v> for TraitObjectVisitor<'v> { ) => { self.0.push(ty); } - hir::TyKind::OpaqueDef(item_id, _) => { + hir::TyKind::OpaqueDef(item_id, _, _) => { self.0.push(ty); let item = self.1.item(item_id); hir::intravisit::walk_item(self, item); @@ -455,7 +467,7 @@ impl<'tcx> TypeVisitor<'tcx> for IsSuggestableVisitor<'tcx> { } } - Dynamic(dty, _) => { + Dynamic(dty, _, _) => { for pred in *dty { match pred.skip_binder() { ExistentialPredicate::Trait(_) | ExistentialPredicate::Projection(_) => { @@ -499,3 +511,11 @@ impl<'tcx> TypeVisitor<'tcx> for IsSuggestableVisitor<'tcx> { c.super_visit_with(self) } } + +#[derive(Diagnostic)] +#[diag(borrowck_const_not_used_in_type_alias)] +pub(super) struct ConstNotUsedTraitAlias { + pub ct: String, + #[primary_span] + pub span: Span, +} diff --git a/compiler/rustc_middle/src/ty/erase_regions.rs b/compiler/rustc_middle/src/ty/erase_regions.rs index 3226950e7..ffdac93bc 100644 --- a/compiler/rustc_middle/src/ty/erase_regions.rs +++ b/compiler/rustc_middle/src/ty/erase_regions.rs @@ -1,4 +1,3 @@ -use crate::mir; use crate::ty::fold::{TypeFoldable, TypeFolder, TypeSuperFoldable}; use crate::ty::visit::TypeVisitable; use crate::ty::{self, Ty, TyCtxt, TypeFlags}; @@ -67,8 +66,4 @@ impl<'tcx> TypeFolder<'tcx> for RegionEraserVisitor<'tcx> { _ => self.tcx.lifetimes.re_erased, } } - - fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> { - c.super_fold_with(self) - } } diff --git a/compiler/rustc_middle/src/ty/error.rs b/compiler/rustc_middle/src/ty/error.rs index 4b0bc3c11..4e6cdb786 100644 --- a/compiler/rustc_middle/src/ty/error.rs +++ b/compiler/rustc_middle/src/ty/error.rs @@ -2,6 +2,7 @@ use crate::traits::{ObligationCause, ObligationCauseCode}; use crate::ty::diagnostics::suggest_constraining_type_param; use crate::ty::print::{FmtPrinter, Printer}; use crate::ty::{self, BoundRegionKind, Region, Ty, TyCtxt}; +use hir::def::DefKind; use rustc_errors::Applicability::{MachineApplicable, MaybeIncorrect}; use rustc_errors::{pluralize, Diagnostic, MultiSpan}; use rustc_hir as hir; @@ -13,7 +14,7 @@ use rustc_target::spec::abi; use std::borrow::Cow; use std::fmt; -#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable, TypeVisitable)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, TypeFoldable, TypeVisitable, Lift)] pub struct ExpectedFound<T> { pub expected: T, pub found: T, @@ -30,7 +31,8 @@ impl<T> ExpectedFound<T> { } // Data structures used in type unification -#[derive(Clone, Debug, TypeFoldable, TypeVisitable)] +#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, Lift)] +#[rustc_pass_by_value] pub enum TypeError<'tcx> { Mismatch, ConstnessMismatch(ExpectedFound<ty::BoundConstness>), @@ -73,6 +75,18 @@ pub enum TypeError<'tcx> { TargetFeatureCast(DefId), } +impl TypeError<'_> { + pub fn involves_regions(self) -> bool { + match self { + TypeError::RegionsDoesNotOutlive(_, _) + | TypeError::RegionsInsufficientlyPolymorphic(_, _) + | TypeError::RegionsOverlyPolymorphic(_, _) + | TypeError::RegionsPlaceholderMismatch => true, + _ => false, + } + } +} + /// Explains the source of a type err in a short, human readable way. This is meant to be placed /// in parentheses after some larger message. You should also invoke `note_and_explain_type_err()` /// afterwards to present additional details, particularly when it comes to lifetime-related @@ -211,7 +225,7 @@ impl<'tcx> fmt::Display for TypeError<'tcx> { } impl<'tcx> TypeError<'tcx> { - pub fn must_include_note(&self) -> bool { + pub fn must_include_note(self) -> bool { use self::TypeError::*; match self { CyclicTy(_) | CyclicConst(_) | UnsafetyMismatch(_) | ConstnessMismatch(_) @@ -263,10 +277,23 @@ impl<'tcx> Ty<'tcx> { } ty::Slice(ty) if ty.is_simple_ty() => format!("slice `{}`", self).into(), ty::Slice(_) => "slice".into(), - ty::RawPtr(_) => "*-ptr".into(), + ty::RawPtr(tymut) => { + let tymut_string = match tymut.mutbl { + hir::Mutability::Mut => tymut.to_string(), + hir::Mutability::Not => format!("const {}", tymut.ty), + }; + + if tymut_string != "_" && (tymut.ty.is_simple_text() || tymut_string.len() < "const raw pointer".len()) { + format!("`*{}`", tymut_string).into() + } else { + // Unknown type name, it's long or has type arguments + "raw pointer".into() + } + }, ty::Ref(_, ty, mutbl) => { let tymut = ty::TypeAndMut { ty, mutbl }; let tymut_string = tymut.to_string(); + if tymut_string != "_" && (ty.is_simple_text() || tymut_string.len() < "mutable reference".len()) { @@ -347,7 +374,7 @@ impl<'tcx> TyCtxt<'tcx> { pub fn note_and_explain_type_err( self, diag: &mut Diagnostic, - err: &TypeError<'tcx>, + err: TypeError<'tcx>, cause: &ObligationCause<'tcx>, sp: Span, body_owner_def_id: DefId, @@ -512,7 +539,7 @@ impl<T> Trait<T> for X { diag.span_label(p_span, "this type parameter"); } } - (ty::Projection(proj_ty), _) => { + (ty::Projection(proj_ty), _) if self.def_kind(proj_ty.item_def_id) != DefKind::ImplTraitPlaceholder => { self.expected_projection( diag, proj_ty, @@ -521,7 +548,7 @@ impl<T> Trait<T> for X { cause.code(), ); } - (_, ty::Projection(proj_ty)) => { + (_, ty::Projection(proj_ty)) if self.def_kind(proj_ty.item_def_id) != DefKind::ImplTraitPlaceholder => { let msg = format!( "consider constraining the associated type `{}` to `{}`", values.found, values.expected, @@ -568,7 +595,7 @@ impl<T> Trait<T> for X { } TargetFeatureCast(def_id) => { let target_spans = - self.get_attrs(*def_id, sym::target_feature).map(|attr| attr.span); + self.get_attrs(def_id, sym::target_feature).map(|attr| attr.span); diag.note( "functions with `#[target_feature]` can only be coerced to `unsafe` function pointers" ); @@ -640,7 +667,7 @@ impl<T> Trait<T> for X { self, diag: &mut Diagnostic, proj_ty: &ty::ProjectionTy<'tcx>, - values: &ExpectedFound<Ty<'tcx>>, + values: ExpectedFound<Ty<'tcx>>, body_owner_def_id: DefId, cause_code: &ObligationCauseCode<'_>, ) { @@ -834,7 +861,7 @@ fn foo(&self) -> Self::T { String::new() } // When `body_owner` is an `impl` or `trait` item, look in its associated types for // `expected` and point at it. let parent_id = self.hir().get_parent_item(hir_id); - let item = self.hir().find_by_def_id(parent_id); + let item = self.hir().find_by_def_id(parent_id.def_id); debug!("expected_projection parent item {:?}", item); match item { Some(hir::Node::Item(hir::Item { kind: hir::ItemKind::Trait(.., items), .. })) => { @@ -845,9 +872,9 @@ fn foo(&self) -> Self::T { String::new() } // FIXME: account for returning some type in a trait fn impl that has // an assoc type as a return type (#72076). if let hir::Defaultness::Default { has_value: true } = - self.impl_defaultness(item.id.def_id) + self.impl_defaultness(item.id.owner_id) { - if self.type_of(item.id.def_id) == found { + if self.type_of(item.id.owner_id) == found { diag.span_label( item.span, "associated type defaults can't be assumed inside the \ @@ -867,7 +894,7 @@ fn foo(&self) -> Self::T { String::new() } })) => { for item in &items[..] { if let hir::AssocItemKind::Type = item.kind { - if self.type_of(item.id.def_id) == found { + if self.type_of(item.id.owner_id) == found { diag.span_label(item.span, "expected this associated type"); return true; } diff --git a/compiler/rustc_middle/src/ty/fast_reject.rs b/compiler/rustc_middle/src/ty/fast_reject.rs index 8d019a3ba..3be0bc4de 100644 --- a/compiler/rustc_middle/src/ty/fast_reject.rs +++ b/compiler/rustc_middle/src/ty/fast_reject.rs @@ -132,7 +132,7 @@ pub fn simplify_type<'tcx>( // don't unify with anything else as long as they are fully normalized. // // We will have to be careful with lazy normalization here. - TreatParams::AsPlaceholder if !ty.has_infer_types_or_consts() => { + TreatParams::AsPlaceholder if !ty.has_non_region_infer() => { debug!("treating `{}` as a placeholder", ty); Some(PlaceholderSimplifiedType) } @@ -384,14 +384,7 @@ impl DeepRejectCtxt { // they might unify with any value. ty::ConstKind::Unevaluated(_) | ty::ConstKind::Error(_) => true, ty::ConstKind::Value(obl) => match k { - ty::ConstKind::Value(imp) => { - // FIXME(valtrees): Once we have valtrees, we can just - // compare them directly here. - match (obl.try_to_scalar_int(), imp.try_to_scalar_int()) { - (Some(obl), Some(imp)) => obl == imp, - _ => true, - } - } + ty::ConstKind::Value(imp) => obl == imp, _ => true, }, diff --git a/compiler/rustc_middle/src/ty/flags.rs b/compiler/rustc_middle/src/ty/flags.rs index ea6bb8a7a..7201737be 100644 --- a/compiler/rustc_middle/src/ty/flags.rs +++ b/compiler/rustc_middle/src/ty/flags.rs @@ -1,5 +1,5 @@ use crate::ty::subst::{GenericArg, GenericArgKind}; -use crate::ty::{self, InferConst, Term, Ty, TypeFlags}; +use crate::ty::{self, InferConst, Ty, TypeFlags}; use std::slice; #[derive(Debug)] @@ -34,12 +34,6 @@ impl FlagComputation { result.flags } - pub fn for_unevaluated_const(uv: ty::Unevaluated<'_>) -> TypeFlags { - let mut result = FlagComputation::new(); - result.add_unevaluated_const(uv); - result.flags - } - fn add_flags(&mut self, flags: TypeFlags) { self.flags = self.flags | flags; } @@ -171,7 +165,7 @@ impl FlagComputation { self.add_substs(substs); } - &ty::Dynamic(obj, r) => { + &ty::Dynamic(obj, r, _) => { for predicate in obj.iter() { self.bound_computation(predicate, |computation, predicate| match predicate { ty::ExistentialPredicate::Trait(tr) => computation.add_substs(tr.substs), @@ -243,9 +237,9 @@ impl FlagComputation { } ty::PredicateKind::Projection(ty::ProjectionPredicate { projection_ty, term }) => { self.add_projection_ty(projection_ty); - match term { - Term::Ty(ty) => self.add_ty(ty), - Term::Const(c) => self.add_const(c), + match term.unpack() { + ty::TermKind::Ty(ty) => self.add_ty(ty), + ty::TermKind::Const(c) => self.add_const(c), } } ty::PredicateKind::WellFormed(arg) => { @@ -256,7 +250,7 @@ impl FlagComputation { self.add_substs(substs); } ty::PredicateKind::ConstEvaluatable(uv) => { - self.add_unevaluated_const(uv); + self.add_const(uv); } ty::PredicateKind::ConstEquate(expected, found) => { self.add_const(expected); @@ -289,7 +283,10 @@ impl FlagComputation { fn add_const(&mut self, c: ty::Const<'_>) { self.add_ty(c.ty()); match c.kind() { - ty::ConstKind::Unevaluated(unevaluated) => self.add_unevaluated_const(unevaluated), + ty::ConstKind::Unevaluated(uv) => { + self.add_substs(uv.substs); + self.add_flags(TypeFlags::HAS_CT_PROJECTION); + } ty::ConstKind::Infer(infer) => { self.add_flags(TypeFlags::STILL_FURTHER_SPECIALIZABLE); match infer { @@ -313,16 +310,11 @@ impl FlagComputation { } } - fn add_unevaluated_const<P>(&mut self, ct: ty::Unevaluated<'_, P>) { - self.add_substs(ct.substs); - self.add_flags(TypeFlags::HAS_CT_PROJECTION); - } - fn add_existential_projection(&mut self, projection: &ty::ExistentialProjection<'_>) { self.add_substs(projection.substs); - match projection.term { - ty::Term::Ty(ty) => self.add_ty(ty), - ty::Term::Const(ct) => self.add_const(ct), + match projection.term.unpack() { + ty::TermKind::Ty(ty) => self.add_ty(ty), + ty::TermKind::Const(ct) => self.add_const(ct), } } diff --git a/compiler/rustc_middle/src/ty/fold.rs b/compiler/rustc_middle/src/ty/fold.rs index 5e96e278b..54f1499eb 100644 --- a/compiler/rustc_middle/src/ty/fold.rs +++ b/compiler/rustc_middle/src/ty/fold.rs @@ -13,8 +13,7 @@ //! //! There are three groups of traits involved in each traversal. //! - `TypeFoldable`. This is implemented once for many types, including: -//! - Types of interest, for which the the methods delegate to the -//! folder. +//! - Types of interest, for which the methods delegate to the folder. //! - All other types, including generic containers like `Vec` and `Option`. //! It defines a "skeleton" of how they should be folded. //! - `TypeSuperFoldable`. This is implemented only for each type of interest, @@ -43,7 +42,6 @@ //! - ty.super_fold_with(folder) //! - u.fold_with(folder) //! ``` -use crate::mir; use crate::ty::{self, Binder, BoundTy, Ty, TyCtxt, TypeVisitable}; use rustc_data_structures::fx::FxIndexMap; use rustc_hir::def_id::DefId; @@ -128,17 +126,9 @@ pub trait TypeFolder<'tcx>: FallibleTypeFolder<'tcx, Error = !> { c.super_fold_with(self) } - fn fold_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ty::Unevaluated<'tcx> { - uv.super_fold_with(self) - } - fn fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> ty::Predicate<'tcx> { p.super_fold_with(self) } - - fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> { - bug!("most type folders should not be folding MIR datastructures: {:?}", c) - } } /// This trait is implemented for every folding traversal. There is a fold @@ -172,26 +162,12 @@ pub trait FallibleTypeFolder<'tcx>: Sized { c.try_super_fold_with(self) } - fn try_fold_unevaluated( - &mut self, - c: ty::Unevaluated<'tcx>, - ) -> Result<ty::Unevaluated<'tcx>, Self::Error> { - c.try_super_fold_with(self) - } - fn try_fold_predicate( &mut self, p: ty::Predicate<'tcx>, ) -> Result<ty::Predicate<'tcx>, Self::Error> { p.try_super_fold_with(self) } - - fn try_fold_mir_const( - &mut self, - c: mir::ConstantKind<'tcx>, - ) -> Result<mir::ConstantKind<'tcx>, Self::Error> { - bug!("most type folders should not be folding MIR datastructures: {:?}", c) - } } // This blanket implementation of the fallible trait for infallible folders @@ -225,23 +201,9 @@ where Ok(self.fold_const(c)) } - fn try_fold_unevaluated( - &mut self, - c: ty::Unevaluated<'tcx>, - ) -> Result<ty::Unevaluated<'tcx>, !> { - Ok(self.fold_unevaluated(c)) - } - fn try_fold_predicate(&mut self, p: ty::Predicate<'tcx>) -> Result<ty::Predicate<'tcx>, !> { Ok(self.fold_predicate(p)) } - - fn try_fold_mir_const( - &mut self, - c: mir::ConstantKind<'tcx>, - ) -> Result<mir::ConstantKind<'tcx>, !> { - Ok(self.fold_mir_const(c)) - } } /////////////////////////////////////////////////////////////////////////// @@ -302,6 +264,17 @@ impl<'tcx> TyCtxt<'tcx> { { value.fold_with(&mut RegionFolder::new(self, &mut f)) } + + pub fn super_fold_regions<T>( + self, + value: T, + mut f: impl FnMut(ty::Region<'tcx>, ty::DebruijnIndex) -> ty::Region<'tcx>, + ) -> T + where + T: TypeSuperFoldable<'tcx>, + { + value.super_fold_with(&mut RegionFolder::new(self, &mut f)) + } } /// Folds over the substructure of a type, visiting its component @@ -353,7 +326,7 @@ impl<'a, 'tcx> TypeFolder<'tcx> for RegionFolder<'a, 'tcx> { t } - #[instrument(skip(self), level = "debug")] + #[instrument(skip(self), level = "debug", ret)] fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { match *r { ty::ReLateBound(debruijn, _) if debruijn < self.current_index => { @@ -377,17 +350,13 @@ pub trait BoundVarReplacerDelegate<'tcx> { fn replace_const(&mut self, bv: ty::BoundVar, ty: Ty<'tcx>) -> ty::Const<'tcx>; } -pub struct FnMutDelegate<R, T, C> { - pub regions: R, - pub types: T, - pub consts: C, +pub struct FnMutDelegate<'a, 'tcx> { + pub regions: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a), + pub types: &'a mut (dyn FnMut(ty::BoundTy) -> Ty<'tcx> + 'a), + pub consts: &'a mut (dyn FnMut(ty::BoundVar, Ty<'tcx>) -> ty::Const<'tcx> + 'a), } -impl<'tcx, R, T, C> BoundVarReplacerDelegate<'tcx> for FnMutDelegate<R, T, C> -where - R: FnMut(ty::BoundRegion) -> ty::Region<'tcx>, - T: FnMut(ty::BoundTy) -> Ty<'tcx>, - C: FnMut(ty::BoundVar, Ty<'tcx>) -> ty::Const<'tcx>, -{ + +impl<'a, 'tcx> BoundVarReplacerDelegate<'tcx> for FnMutDelegate<'a, 'tcx> { fn replace_region(&mut self, br: ty::BoundRegion) -> ty::Region<'tcx> { (self.regions)(br) } @@ -511,7 +480,7 @@ impl<'tcx> TyCtxt<'tcx> { pub fn replace_late_bound_regions_uncached<T, F>( self, value: Binder<'tcx, T>, - replace_regions: F, + mut replace_regions: F, ) -> T where F: FnMut(ty::BoundRegion) -> ty::Region<'tcx>, @@ -522,9 +491,9 @@ impl<'tcx> TyCtxt<'tcx> { value } else { let delegate = FnMutDelegate { - regions: replace_regions, - types: |b| bug!("unexpected bound ty in binder: {b:?}"), - consts: |b, ty| bug!("unexpected bound ct in binder: {b:?} {ty}"), + regions: &mut replace_regions, + types: &mut |b| bug!("unexpected bound ty in binder: {b:?}"), + consts: &mut |b, ty| bug!("unexpected bound ct in binder: {b:?} {ty}"), }; let mut replacer = BoundVarReplacer::new(self, delegate); value.fold_with(&mut replacer) @@ -584,19 +553,19 @@ impl<'tcx> TyCtxt<'tcx> { self.replace_escaping_bound_vars_uncached( value, FnMutDelegate { - regions: |r: ty::BoundRegion| { + regions: &mut |r: ty::BoundRegion| { self.mk_region(ty::ReLateBound( ty::INNERMOST, ty::BoundRegion { var: shift_bv(r.var), kind: r.kind }, )) }, - types: |t: ty::BoundTy| { + types: &mut |t: ty::BoundTy| { self.mk_ty(ty::Bound( ty::INNERMOST, ty::BoundTy { var: shift_bv(t.var), kind: t.kind }, )) }, - consts: |c, ty: Ty<'tcx>| { + consts: &mut |c, ty: Ty<'tcx>| { self.mk_const(ty::ConstS { kind: ty::ConstKind::Bound(ty::INNERMOST, shift_bv(c)), ty, diff --git a/compiler/rustc_middle/src/ty/generics.rs b/compiler/rustc_middle/src/ty/generics.rs index add2df258..19754d145 100644 --- a/compiler/rustc_middle/src/ty/generics.rs +++ b/compiler/rustc_middle/src/ty/generics.rs @@ -1,7 +1,5 @@ -use crate::middle::resolve_lifetime::ObjectLifetimeDefault; use crate::ty; -use crate::ty::subst::{Subst, SubstsRef}; -use crate::ty::EarlyBinder; +use crate::ty::{EarlyBinder, SubstsRef}; use rustc_ast as ast; use rustc_data_structures::fx::FxHashMap; use rustc_hir::def_id::DefId; @@ -13,7 +11,7 @@ use super::{EarlyBoundRegion, InstantiatedPredicates, ParamConst, ParamTy, Predi #[derive(Clone, Debug, TyEncodable, TyDecodable, HashStable)] pub enum GenericParamDefKind { Lifetime, - Type { has_default: bool, object_lifetime_default: ObjectLifetimeDefault, synthetic: bool }, + Type { has_default: bool, synthetic: bool }, Const { has_default: bool }, } @@ -28,8 +26,9 @@ impl GenericParamDefKind { pub fn to_ord(&self) -> ast::ParamKindOrd { match self { GenericParamDefKind::Lifetime => ast::ParamKindOrd::Lifetime, - GenericParamDefKind::Type { .. } => ast::ParamKindOrd::Type, - GenericParamDefKind::Const { .. } => ast::ParamKindOrd::Const, + GenericParamDefKind::Type { .. } | GenericParamDefKind::Const { .. } => { + ast::ParamKindOrd::TypeOrConst + } } } @@ -122,6 +121,21 @@ pub struct Generics { } impl<'tcx> Generics { + /// Looks through the generics and all parents to find the index of the + /// given param def-id. This is in comparison to the `param_def_id_to_index` + /// struct member, which only stores information about this item's own + /// generics. + pub fn param_def_id_to_index(&self, tcx: TyCtxt<'tcx>, def_id: DefId) -> Option<u32> { + if let Some(idx) = self.param_def_id_to_index.get(&def_id) { + Some(*idx) + } else if let Some(parent) = self.parent { + let parent = tcx.generics_of(parent); + parent.param_def_id_to_index(tcx, def_id) + } else { + None + } + } + #[inline] pub fn count(&self) -> usize { self.parent_count + self.params.len() @@ -252,7 +266,7 @@ impl<'tcx> Generics { // Filter the default arguments. // // This currently uses structural equality instead - // of semantic equivalance. While not ideal, that's + // of semantic equivalence. While not ideal, that's // good enough for now as this should only be used // for diagnostics anyways. own_params.end -= self @@ -314,6 +328,7 @@ impl<'tcx> GenericPredicates<'tcx> { } } + #[instrument(level = "debug", skip(self, tcx))] fn instantiate_into( &self, tcx: TyCtxt<'tcx>, diff --git a/compiler/rustc_middle/src/ty/impls_ty.rs b/compiler/rustc_middle/src/ty/impls_ty.rs index cd00b26b8..d1c0d62ac 100644 --- a/compiler/rustc_middle/src/ty/impls_ty.rs +++ b/compiler/rustc_middle/src/ty/impls_ty.rs @@ -113,7 +113,7 @@ impl<'a> HashStable<StableHashingContext<'a>> for mir::interpret::AllocId { } // `Relocations` with default type parameters is a sorted map. -impl<'a, Prov> HashStable<StableHashingContext<'a>> for mir::interpret::Relocations<Prov> +impl<'a, Prov> HashStable<StableHashingContext<'a>> for mir::interpret::ProvenanceMap<Prov> where Prov: HashStable<StableHashingContext<'a>>, { diff --git a/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs b/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs deleted file mode 100644 index c4ad698ba..000000000 --- a/compiler/rustc_middle/src/ty/inhabitedness/def_id_forest.rs +++ /dev/null @@ -1,145 +0,0 @@ -use crate::ty::context::TyCtxt; -use crate::ty::{DefId, DefIdTree}; -use rustc_span::def_id::CRATE_DEF_ID; -use smallvec::SmallVec; -use std::mem; - -use DefIdForest::*; - -/// Represents a forest of `DefId`s closed under the ancestor relation. That is, -/// if a `DefId` representing a module is contained in the forest then all -/// `DefId`s defined in that module or submodules are also implicitly contained -/// in the forest. -/// -/// This is used to represent a set of modules in which a type is visibly -/// uninhabited. -/// -/// We store the minimal set of `DefId`s required to represent the whole set. If A and B are -/// `DefId`s in the `DefIdForest`, and A is a parent of B, then only A will be stored. When this is -/// used with `type_uninhabited_from`, there will very rarely be more than one `DefId` stored. -#[derive(Copy, Clone, HashStable, Debug)] -pub enum DefIdForest<'a> { - Empty, - Single(DefId), - /// This variant is very rare. - /// Invariant: >1 elements - Multiple(&'a [DefId]), -} - -/// Tests whether a slice of roots contains a given DefId. -#[inline] -fn slice_contains<'tcx>(tcx: TyCtxt<'tcx>, slice: &[DefId], id: DefId) -> bool { - slice.iter().any(|root_id| tcx.is_descendant_of(id, *root_id)) -} - -impl<'tcx> DefIdForest<'tcx> { - /// Creates an empty forest. - pub fn empty() -> DefIdForest<'tcx> { - DefIdForest::Empty - } - - /// Creates a forest consisting of a single tree representing the entire - /// crate. - #[inline] - pub fn full() -> DefIdForest<'tcx> { - DefIdForest::from_id(CRATE_DEF_ID.to_def_id()) - } - - /// Creates a forest containing a `DefId` and all its descendants. - pub fn from_id(id: DefId) -> DefIdForest<'tcx> { - DefIdForest::Single(id) - } - - fn as_slice(&self) -> &[DefId] { - match self { - Empty => &[], - Single(id) => std::slice::from_ref(id), - Multiple(root_ids) => root_ids, - } - } - - // Only allocates in the rare `Multiple` case. - fn from_vec(tcx: TyCtxt<'tcx>, root_ids: SmallVec<[DefId; 1]>) -> DefIdForest<'tcx> { - match &root_ids[..] { - [] => Empty, - [id] => Single(*id), - _ => DefIdForest::Multiple(tcx.arena.alloc_from_iter(root_ids)), - } - } - - /// Tests whether the forest is empty. - pub fn is_empty(&self) -> bool { - match self { - Empty => true, - Single(..) | Multiple(..) => false, - } - } - - /// Iterate over the set of roots. - fn iter(&self) -> impl Iterator<Item = DefId> + '_ { - self.as_slice().iter().copied() - } - - /// Tests whether the forest contains a given DefId. - pub fn contains(&self, tcx: TyCtxt<'tcx>, id: DefId) -> bool { - slice_contains(tcx, self.as_slice(), id) - } - - /// Calculate the intersection of a collection of forests. - pub fn intersection<I>(tcx: TyCtxt<'tcx>, iter: I) -> DefIdForest<'tcx> - where - I: IntoIterator<Item = DefIdForest<'tcx>>, - { - let mut iter = iter.into_iter(); - let mut ret: SmallVec<[_; 1]> = if let Some(first) = iter.next() { - SmallVec::from_slice(first.as_slice()) - } else { - return DefIdForest::full(); - }; - - let mut next_ret: SmallVec<[_; 1]> = SmallVec::new(); - for next_forest in iter { - // No need to continue if the intersection is already empty. - if ret.is_empty() || next_forest.is_empty() { - return DefIdForest::empty(); - } - - // We keep the elements in `ret` that are also in `next_forest`. - next_ret.extend(ret.iter().copied().filter(|&id| next_forest.contains(tcx, id))); - // We keep the elements in `next_forest` that are also in `ret`. - next_ret.extend(next_forest.iter().filter(|&id| slice_contains(tcx, &ret, id))); - - mem::swap(&mut next_ret, &mut ret); - next_ret.clear(); - } - DefIdForest::from_vec(tcx, ret) - } - - /// Calculate the union of a collection of forests. - pub fn union<I>(tcx: TyCtxt<'tcx>, iter: I) -> DefIdForest<'tcx> - where - I: IntoIterator<Item = DefIdForest<'tcx>>, - { - let mut ret: SmallVec<[_; 1]> = SmallVec::new(); - let mut next_ret: SmallVec<[_; 1]> = SmallVec::new(); - for next_forest in iter { - // Union with the empty set is a no-op. - if next_forest.is_empty() { - continue; - } - - // We add everything in `ret` that is not in `next_forest`. - next_ret.extend(ret.iter().copied().filter(|&id| !next_forest.contains(tcx, id))); - // We add everything in `next_forest` that we haven't added yet. - for id in next_forest.iter() { - if !slice_contains(tcx, &next_ret, id) { - next_ret.push(id); - } - } - - mem::swap(&mut next_ret, &mut ret); - next_ret.clear(); - } - DefIdForest::from_vec(tcx, ret) - } -} diff --git a/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs new file mode 100644 index 000000000..b7aa45572 --- /dev/null +++ b/compiler/rustc_middle/src/ty/inhabitedness/inhabited_predicate.rs @@ -0,0 +1,204 @@ +use crate::ty::context::TyCtxt; +use crate::ty::{self, DefId, DefIdTree, ParamEnv, Ty}; + +/// Represents whether some type is inhabited in a given context. +/// Examples of uninhabited types are `!`, `enum Void {}`, or a struct +/// containing either of those types. +/// A type's inhabitedness may depend on the `ParamEnv` as well as what types +/// are visible in the current module. +#[derive(Clone, Copy, Debug, PartialEq, HashStable)] +pub enum InhabitedPredicate<'tcx> { + /// Inhabited + True, + /// Uninhabited + False, + /// Uninhabited when a const value is non-zero. This occurs when there is an + /// array of uninhabited items, but the array is inhabited if it is empty. + ConstIsZero(ty::Const<'tcx>), + /// Uninhabited if within a certain module. This occurs when an uninhabited + /// type has restricted visibility. + NotInModule(DefId), + /// Inhabited if some generic type is inhabited. + /// These are replaced by calling [`Self::subst`]. + GenericType(Ty<'tcx>), + /// A AND B + And(&'tcx [InhabitedPredicate<'tcx>; 2]), + /// A OR B + Or(&'tcx [InhabitedPredicate<'tcx>; 2]), +} + +impl<'tcx> InhabitedPredicate<'tcx> { + /// Returns true if the corresponding type is inhabited in the given + /// `ParamEnv` and module + pub fn apply(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>, module_def_id: DefId) -> bool { + let Ok(result) = self + .apply_inner::<!>(tcx, param_env, &|id| Ok(tcx.is_descendant_of(module_def_id, id))); + result + } + + /// Same as `apply`, but returns `None` if self contains a module predicate + pub fn apply_any_module(self, tcx: TyCtxt<'tcx>, param_env: ParamEnv<'tcx>) -> Option<bool> { + self.apply_inner(tcx, param_env, &|_| Err(())).ok() + } + + fn apply_inner<E>( + self, + tcx: TyCtxt<'tcx>, + param_env: ParamEnv<'tcx>, + in_module: &impl Fn(DefId) -> Result<bool, E>, + ) -> Result<bool, E> { + match self { + Self::False => Ok(false), + Self::True => Ok(true), + Self::ConstIsZero(const_) => match const_.try_eval_usize(tcx, param_env) { + None | Some(0) => Ok(true), + Some(1..) => Ok(false), + }, + Self::NotInModule(id) => in_module(id).map(|in_mod| !in_mod), + Self::GenericType(_) => Ok(true), + Self::And([a, b]) => try_and(a, b, |x| x.apply_inner(tcx, param_env, in_module)), + Self::Or([a, b]) => try_or(a, b, |x| x.apply_inner(tcx, param_env, in_module)), + } + } + + pub fn and(self, tcx: TyCtxt<'tcx>, other: Self) -> Self { + self.reduce_and(tcx, other).unwrap_or_else(|| Self::And(tcx.arena.alloc([self, other]))) + } + + pub fn or(self, tcx: TyCtxt<'tcx>, other: Self) -> Self { + self.reduce_or(tcx, other).unwrap_or_else(|| Self::Or(tcx.arena.alloc([self, other]))) + } + + pub fn all(tcx: TyCtxt<'tcx>, iter: impl IntoIterator<Item = Self>) -> Self { + let mut result = Self::True; + for pred in iter { + if matches!(pred, Self::False) { + return Self::False; + } + result = result.and(tcx, pred); + } + result + } + + pub fn any(tcx: TyCtxt<'tcx>, iter: impl IntoIterator<Item = Self>) -> Self { + let mut result = Self::False; + for pred in iter { + if matches!(pred, Self::True) { + return Self::True; + } + result = result.or(tcx, pred); + } + result + } + + fn reduce_and(self, tcx: TyCtxt<'tcx>, other: Self) -> Option<Self> { + match (self, other) { + (Self::True, a) | (a, Self::True) => Some(a), + (Self::False, _) | (_, Self::False) => Some(Self::False), + (Self::ConstIsZero(a), Self::ConstIsZero(b)) if a == b => Some(Self::ConstIsZero(a)), + (Self::NotInModule(a), Self::NotInModule(b)) if a == b => Some(Self::NotInModule(a)), + (Self::NotInModule(a), Self::NotInModule(b)) if tcx.is_descendant_of(a, b) => { + Some(Self::NotInModule(b)) + } + (Self::NotInModule(a), Self::NotInModule(b)) if tcx.is_descendant_of(b, a) => { + Some(Self::NotInModule(a)) + } + (Self::GenericType(a), Self::GenericType(b)) if a == b => Some(Self::GenericType(a)), + (Self::And(&[a, b]), c) | (c, Self::And(&[a, b])) => { + if let Some(ac) = a.reduce_and(tcx, c) { + Some(ac.and(tcx, b)) + } else if let Some(bc) = b.reduce_and(tcx, c) { + Some(Self::And(tcx.arena.alloc([a, bc]))) + } else { + None + } + } + _ => None, + } + } + + fn reduce_or(self, tcx: TyCtxt<'tcx>, other: Self) -> Option<Self> { + match (self, other) { + (Self::True, _) | (_, Self::True) => Some(Self::True), + (Self::False, a) | (a, Self::False) => Some(a), + (Self::ConstIsZero(a), Self::ConstIsZero(b)) if a == b => Some(Self::ConstIsZero(a)), + (Self::NotInModule(a), Self::NotInModule(b)) if a == b => Some(Self::NotInModule(a)), + (Self::NotInModule(a), Self::NotInModule(b)) if tcx.is_descendant_of(a, b) => { + Some(Self::NotInModule(a)) + } + (Self::NotInModule(a), Self::NotInModule(b)) if tcx.is_descendant_of(b, a) => { + Some(Self::NotInModule(b)) + } + (Self::GenericType(a), Self::GenericType(b)) if a == b => Some(Self::GenericType(a)), + (Self::Or(&[a, b]), c) | (c, Self::Or(&[a, b])) => { + if let Some(ac) = a.reduce_or(tcx, c) { + Some(ac.or(tcx, b)) + } else if let Some(bc) = b.reduce_or(tcx, c) { + Some(Self::Or(tcx.arena.alloc([a, bc]))) + } else { + None + } + } + _ => None, + } + } + + /// Replaces generic types with its corresponding predicate + pub fn subst(self, tcx: TyCtxt<'tcx>, substs: ty::SubstsRef<'tcx>) -> Self { + self.subst_opt(tcx, substs).unwrap_or(self) + } + + fn subst_opt(self, tcx: TyCtxt<'tcx>, substs: ty::SubstsRef<'tcx>) -> Option<Self> { + match self { + Self::ConstIsZero(c) => { + let c = ty::EarlyBinder(c).subst(tcx, substs); + let pred = match c.kind().try_to_machine_usize(tcx) { + Some(0) => Self::True, + Some(1..) => Self::False, + None => Self::ConstIsZero(c), + }; + Some(pred) + } + Self::GenericType(t) => { + Some(ty::EarlyBinder(t).subst(tcx, substs).inhabited_predicate(tcx)) + } + Self::And(&[a, b]) => match a.subst_opt(tcx, substs) { + None => b.subst_opt(tcx, substs).map(|b| a.and(tcx, b)), + Some(InhabitedPredicate::False) => Some(InhabitedPredicate::False), + Some(a) => Some(a.and(tcx, b.subst_opt(tcx, substs).unwrap_or(b))), + }, + Self::Or(&[a, b]) => match a.subst_opt(tcx, substs) { + None => b.subst_opt(tcx, substs).map(|b| a.or(tcx, b)), + Some(InhabitedPredicate::True) => Some(InhabitedPredicate::True), + Some(a) => Some(a.or(tcx, b.subst_opt(tcx, substs).unwrap_or(b))), + }, + _ => None, + } + } +} + +// this is basically like `f(a)? && f(b)?` but different in the case of +// `Ok(false) && Err(_) -> Ok(false)` +fn try_and<T, E>(a: T, b: T, f: impl Fn(T) -> Result<bool, E>) -> Result<bool, E> { + let a = f(a); + if matches!(a, Ok(false)) { + return Ok(false); + } + match (a, f(b)) { + (_, Ok(false)) | (Ok(false), _) => Ok(false), + (Ok(true), Ok(true)) => Ok(true), + (Err(e), _) | (_, Err(e)) => Err(e), + } +} + +fn try_or<T, E>(a: T, b: T, f: impl Fn(T) -> Result<bool, E>) -> Result<bool, E> { + let a = f(a); + if matches!(a, Ok(true)) { + return Ok(true); + } + match (a, f(b)) { + (_, Ok(true)) | (Ok(true), _) => Ok(true), + (Ok(false), Ok(false)) => Ok(false), + (Err(e), _) | (_, Err(e)) => Err(e), + } +} diff --git a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs index 3d22f5a04..279a728ea 100644 --- a/compiler/rustc_middle/src/ty/inhabitedness/mod.rs +++ b/compiler/rustc_middle/src/ty/inhabitedness/mod.rs @@ -1,57 +1,60 @@ -pub use self::def_id_forest::DefIdForest; +//! This module contains logic for determining whether a type is inhabited or +//! uninhabited. The [`InhabitedPredicate`] type captures the minimum +//! information needed to determine whether a type is inhabited given a +//! `ParamEnv` and module ID. +//! +//! # Example +//! ```rust +//! enum Void {} +//! mod a { +//! pub mod b { +//! pub struct SecretlyUninhabited { +//! _priv: !, +//! } +//! } +//! } +//! +//! mod c { +//! pub struct AlsoSecretlyUninhabited { +//! _priv: Void, +//! } +//! mod d { +//! } +//! } +//! +//! struct Foo { +//! x: a::b::SecretlyUninhabited, +//! y: c::AlsoSecretlyUninhabited, +//! } +//! ``` +//! In this code, the type `Foo` will only be visibly uninhabited inside the +//! modules `b`, `c` and `d`. Calling `uninhabited_predicate` on `Foo` will +//! return `NotInModule(b) AND NotInModule(c)`. +//! +//! We need this information for pattern-matching on `Foo` or types that contain +//! `Foo`. +//! +//! # Example +//! ```rust +//! let foo_result: Result<T, Foo> = ... ; +//! let Ok(t) = foo_result; +//! ``` +//! This code should only compile in modules where the uninhabitedness of `Foo` +//! is visible. -use crate::ty; use crate::ty::context::TyCtxt; -use crate::ty::{AdtDef, FieldDef, Ty, VariantDef}; -use crate::ty::{AdtKind, Visibility}; -use crate::ty::{DefId, SubstsRef}; +use crate::ty::{self, DefId, Ty, VariantDef, Visibility}; use rustc_type_ir::sty::TyKind::*; -mod def_id_forest; +pub mod inhabited_predicate; -// The methods in this module calculate `DefIdForest`s of modules in which an -// `AdtDef`/`VariantDef`/`FieldDef` is visibly uninhabited. -// -// # Example -// ```rust -// enum Void {} -// mod a { -// pub mod b { -// pub struct SecretlyUninhabited { -// _priv: !, -// } -// } -// } -// -// mod c { -// pub struct AlsoSecretlyUninhabited { -// _priv: Void, -// } -// mod d { -// } -// } -// -// struct Foo { -// x: a::b::SecretlyUninhabited, -// y: c::AlsoSecretlyUninhabited, -// } -// ``` -// In this code, the type `Foo` will only be visibly uninhabited inside the -// modules `b`, `c` and `d`. Calling `uninhabited_from` on `Foo` or its `AdtDef` will -// return the forest of modules {`b`, `c`->`d`} (represented in a `DefIdForest` by the -// set {`b`, `c`}). -// -// We need this information for pattern-matching on `Foo` or types that contain -// `Foo`. -// -// # Example -// ```rust -// let foo_result: Result<T, Foo> = ... ; -// let Ok(t) = foo_result; -// ``` -// This code should only compile in modules where the uninhabitedness of `Foo` is -// visible. +pub use inhabited_predicate::InhabitedPredicate; + +pub(crate) fn provide(providers: &mut ty::query::Providers) { + *providers = + ty::query::Providers { inhabited_predicate_adt, inhabited_predicate_type, ..*providers }; +} impl<'tcx> TyCtxt<'tcx> { /// Checks whether a type is visibly uninhabited from a particular module. @@ -100,135 +103,92 @@ impl<'tcx> TyCtxt<'tcx> { ty: Ty<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> bool { - // To check whether this type is uninhabited at all (not just from the - // given node), you could check whether the forest is empty. - // ``` - // forest.is_empty() - // ``` - ty.uninhabited_from(self, param_env).contains(self, module) + !ty.inhabited_predicate(self).apply(self, param_env, module) } } -impl<'tcx> AdtDef<'tcx> { - /// Calculates the forest of `DefId`s from which this ADT is visibly uninhabited. - fn uninhabited_from( - self, - tcx: TyCtxt<'tcx>, - substs: SubstsRef<'tcx>, - param_env: ty::ParamEnv<'tcx>, - ) -> DefIdForest<'tcx> { - // Non-exhaustive ADTs from other crates are always considered inhabited. - if self.is_variant_list_non_exhaustive() && !self.did().is_local() { - DefIdForest::empty() - } else { - DefIdForest::intersection( - tcx, - self.variants() - .iter() - .map(|v| v.uninhabited_from(tcx, substs, self.adt_kind(), param_env)), - ) +/// Returns an `InhabitedPredicate` that is generic over type parameters and +/// requires calling [`InhabitedPredicate::subst`] +fn inhabited_predicate_adt(tcx: TyCtxt<'_>, def_id: DefId) -> InhabitedPredicate<'_> { + if let Some(def_id) = def_id.as_local() { + if matches!(tcx.representability(def_id), ty::Representability::Infinite) { + return InhabitedPredicate::True; } } + let adt = tcx.adt_def(def_id); + InhabitedPredicate::any( + tcx, + adt.variants().iter().map(|variant| variant.inhabited_predicate(tcx, adt)), + ) } impl<'tcx> VariantDef { /// Calculates the forest of `DefId`s from which this variant is visibly uninhabited. - pub fn uninhabited_from( + pub fn inhabited_predicate( &self, tcx: TyCtxt<'tcx>, - substs: SubstsRef<'tcx>, - adt_kind: AdtKind, - param_env: ty::ParamEnv<'tcx>, - ) -> DefIdForest<'tcx> { - let is_enum = match adt_kind { - // For now, `union`s are never considered uninhabited. - // The precise semantics of inhabitedness with respect to unions is currently undecided. - AdtKind::Union => return DefIdForest::empty(), - AdtKind::Enum => true, - AdtKind::Struct => false, - }; - // Non-exhaustive variants from other crates are always considered inhabited. + adt: ty::AdtDef<'_>, + ) -> InhabitedPredicate<'tcx> { + debug_assert!(!adt.is_union()); if self.is_field_list_non_exhaustive() && !self.def_id.is_local() { - DefIdForest::empty() - } else { - DefIdForest::union( - tcx, - self.fields.iter().map(|f| f.uninhabited_from(tcx, substs, is_enum, param_env)), - ) + // Non-exhaustive variants from other crates are always considered inhabited. + return InhabitedPredicate::True; } - } -} - -impl<'tcx> FieldDef { - /// Calculates the forest of `DefId`s from which this field is visibly uninhabited. - fn uninhabited_from( - &self, - tcx: TyCtxt<'tcx>, - substs: SubstsRef<'tcx>, - is_enum: bool, - param_env: ty::ParamEnv<'tcx>, - ) -> DefIdForest<'tcx> { - let data_uninhabitedness = move || self.ty(tcx, substs).uninhabited_from(tcx, param_env); - // FIXME(canndrew): Currently enum fields are (incorrectly) stored with - // `Visibility::Invisible` so we need to override `self.vis` if we're - // dealing with an enum. - if is_enum { - data_uninhabitedness() - } else { - match self.vis { - Visibility::Invisible => DefIdForest::empty(), - Visibility::Restricted(from) => { - let forest = DefIdForest::from_id(from); - let iter = Some(forest).into_iter().chain(Some(data_uninhabitedness())); - DefIdForest::intersection(tcx, iter) + InhabitedPredicate::all( + tcx, + self.fields.iter().map(|field| { + let pred = tcx.type_of(field.did).inhabited_predicate(tcx); + if adt.is_enum() { + return pred; } - Visibility::Public => data_uninhabitedness(), - } - } + match field.vis { + Visibility::Public => pred, + Visibility::Restricted(from) => { + pred.or(tcx, InhabitedPredicate::NotInModule(from)) + } + } + }), + ) } } impl<'tcx> Ty<'tcx> { - /// Calculates the forest of `DefId`s from which this type is visibly uninhabited. - fn uninhabited_from( - self, - tcx: TyCtxt<'tcx>, - param_env: ty::ParamEnv<'tcx>, - ) -> DefIdForest<'tcx> { - tcx.type_uninhabited_from(param_env.and(self)) + pub fn inhabited_predicate(self, tcx: TyCtxt<'tcx>) -> InhabitedPredicate<'tcx> { + match self.kind() { + // For now, union`s are always considered inhabited + Adt(adt, _) if adt.is_union() => InhabitedPredicate::True, + // Non-exhaustive ADTs from other crates are always considered inhabited + Adt(adt, _) if adt.is_variant_list_non_exhaustive() && !adt.did().is_local() => { + InhabitedPredicate::True + } + Never => InhabitedPredicate::False, + Param(_) | Projection(_) => InhabitedPredicate::GenericType(self), + Tuple(tys) if tys.is_empty() => InhabitedPredicate::True, + // use a query for more complex cases + Adt(..) | Array(..) | Tuple(_) => tcx.inhabited_predicate_type(self), + // references and other types are inhabited + _ => InhabitedPredicate::True, + } } } -// Query provider for `type_uninhabited_from`. -pub(crate) fn type_uninhabited_from<'tcx>( - tcx: TyCtxt<'tcx>, - key: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, -) -> DefIdForest<'tcx> { - let ty = key.value; - let param_env = key.param_env; +/// N.B. this query should only be called through `Ty::inhabited_predicate` +fn inhabited_predicate_type<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) -> InhabitedPredicate<'tcx> { match *ty.kind() { - Adt(def, substs) => def.uninhabited_from(tcx, substs, param_env), + Adt(adt, substs) => tcx.inhabited_predicate_adt(adt.did()).subst(tcx, substs), - Never => DefIdForest::full(), - - Tuple(ref tys) => { - DefIdForest::union(tcx, tys.iter().map(|ty| ty.uninhabited_from(tcx, param_env))) + Tuple(tys) => { + InhabitedPredicate::all(tcx, tys.iter().map(|ty| ty.inhabited_predicate(tcx))) } - Array(ty, len) => match len.try_eval_usize(tcx, param_env) { - Some(0) | None => DefIdForest::empty(), - // If the array is definitely non-empty, it's uninhabited if - // the type of its elements is uninhabited. - Some(1..) => ty.uninhabited_from(tcx, param_env), + // If we can evaluate the array length before having a `ParamEnv`, then + // we can simplify the predicate. This is an optimization. + Array(ty, len) => match len.kind().try_to_machine_usize(tcx) { + Some(0) => InhabitedPredicate::True, + Some(1..) => ty.inhabited_predicate(tcx), + None => ty.inhabited_predicate(tcx).or(tcx, InhabitedPredicate::ConstIsZero(len)), }, - // References to uninitialised memory are valid for any type, including - // uninhabited types, in unsafe code, so we treat all references as - // inhabited. - // The precise semantics of inhabitedness with respect to references is currently - // undecided. - Ref(..) => DefIdForest::empty(), - - _ => DefIdForest::empty(), + _ => bug!("unexpected TyKind, use `Ty::inhabited_predicate`"), } } diff --git a/compiler/rustc_middle/src/ty/instance.rs b/compiler/rustc_middle/src/ty/instance.rs index 53218225d..6c1414f7b 100644 --- a/compiler/rustc_middle/src/ty/instance.rs +++ b/compiler/rustc_middle/src/ty/instance.rs @@ -1,9 +1,7 @@ use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags; use crate::ty::print::{FmtPrinter, Printer}; -use crate::ty::subst::{InternalSubsts, Subst}; -use crate::ty::{ - self, EarlyBinder, SubstsRef, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable, TypeVisitable, -}; +use crate::ty::{self, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable, TypeVisitable}; +use crate::ty::{EarlyBinder, InternalSubsts, SubstsRef}; use rustc_errors::ErrorGuaranteed; use rustc_hir::def::Namespace; use rustc_hir::def_id::{CrateNum, DefId}; @@ -20,14 +18,14 @@ use std::fmt; /// simply couples a potentially generic `InstanceDef` with some substs, and codegen and const eval /// will do all required substitution as they run. #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)] -#[derive(HashStable, Lift)] +#[derive(HashStable, Lift, TypeFoldable, TypeVisitable)] pub struct Instance<'tcx> { pub def: InstanceDef<'tcx>, pub substs: SubstsRef<'tcx>, } #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable)] +#[derive(TyEncodable, TyDecodable, HashStable, TypeFoldable, TypeVisitable, Lift)] pub enum InstanceDef<'tcx> { /// A user-defined callable item. /// diff --git a/compiler/rustc_middle/src/ty/layout.rs b/compiler/rustc_middle/src/ty/layout.rs index ad78d24e9..3312f44c6 100644 --- a/compiler/rustc_middle/src/ty/layout.rs +++ b/compiler/rustc_middle/src/ty/layout.rs @@ -1,38 +1,23 @@ use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags; -use crate::mir::{GeneratorLayout, GeneratorSavedLocal}; use crate::ty::normalize_erasing_regions::NormalizationError; -use crate::ty::subst::Subst; -use crate::ty::{self, subst::SubstsRef, EarlyBinder, ReprOptions, Ty, TyCtxt, TypeVisitable}; +use crate::ty::{self, ReprOptions, Ty, TyCtxt, TypeVisitable}; use rustc_ast as ast; use rustc_attr as attr; +use rustc_errors::{DiagnosticBuilder, Handler, IntoDiagnostic}; use rustc_hir as hir; use rustc_hir::def_id::DefId; -use rustc_hir::lang_items::LangItem; -use rustc_index::bit_set::BitSet; -use rustc_index::vec::{Idx, IndexVec}; -use rustc_session::{config::OptLevel, DataTypeKind, FieldInfo, SizeKind, VariantInfo}; -use rustc_span::symbol::Symbol; +use rustc_index::vec::Idx; +use rustc_session::config::OptLevel; use rustc_span::{Span, DUMMY_SP}; -use rustc_target::abi::call::{ - ArgAbi, ArgAttribute, ArgAttributes, ArgExtension, Conv, FnAbi, PassMode, Reg, RegKind, -}; +use rustc_target::abi::call::FnAbi; use rustc_target::abi::*; use rustc_target::spec::{abi::Abi as SpecAbi, HasTargetSpec, PanicStrategy, Target}; -use std::cmp; +use std::cmp::{self}; use std::fmt; -use std::iter; use std::num::NonZeroUsize; use std::ops::Bound; -use rand::{seq::SliceRandom, SeedableRng}; -use rand_xoshiro::Xoshiro128StarStar; - -pub fn provide(providers: &mut ty::query::Providers) { - *providers = - ty::query::Providers { layout_of, fn_abi_of_fn_ptr, fn_abi_of_instance, ..*providers }; -} - pub trait IntegerExt { fn to_ty<'tcx>(&self, tcx: TyCtxt<'tcx>, signed: bool) -> Ty<'tcx>; fn from_attr<C: HasDataLayout>(cx: &C, ity: attr::IntType) -> Integer; @@ -204,6 +189,31 @@ pub enum LayoutError<'tcx> { NormalizationFailure(Ty<'tcx>, NormalizationError<'tcx>), } +impl<'a> IntoDiagnostic<'a, !> for LayoutError<'a> { + fn into_diagnostic(self, handler: &'a Handler) -> DiagnosticBuilder<'a, !> { + let mut diag = handler.struct_fatal(""); + + match self { + LayoutError::Unknown(ty) => { + diag.set_arg("ty", ty); + diag.set_primary_message(rustc_errors::fluent::middle_unknown_layout); + } + LayoutError::SizeOverflow(ty) => { + diag.set_arg("ty", ty); + diag.set_primary_message(rustc_errors::fluent::middle_values_too_big); + } + LayoutError::NormalizationFailure(ty, e) => { + diag.set_arg("ty", ty); + diag.set_arg("failure_ty", e.get_type_for_failure()); + diag.set_primary_message(rustc_errors::fluent::middle_cannot_be_normalized); + } + } + diag + } +} + +// FIXME: Once the other errors that embed this error have been converted to translateable +// diagnostics, this Display impl should be removed. impl<'tcx> fmt::Display for LayoutError<'tcx> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { @@ -221,1842 +231,12 @@ impl<'tcx> fmt::Display for LayoutError<'tcx> { } } -/// Enforce some basic invariants on layouts. -fn sanity_check_layout<'tcx>( - tcx: TyCtxt<'tcx>, - param_env: ty::ParamEnv<'tcx>, - layout: &TyAndLayout<'tcx>, -) { - // Type-level uninhabitedness should always imply ABI uninhabitedness. - if tcx.conservative_is_privately_uninhabited(param_env.and(layout.ty)) { - assert!(layout.abi.is_uninhabited()); - } - - if layout.size.bytes() % layout.align.abi.bytes() != 0 { - bug!("size is not a multiple of align, in the following layout:\n{layout:#?}"); - } - - if cfg!(debug_assertions) { - fn check_layout_abi<'tcx>(tcx: TyCtxt<'tcx>, layout: Layout<'tcx>) { - match layout.abi() { - Abi::Scalar(scalar) => { - // No padding in scalars. - assert_eq!( - layout.align().abi, - scalar.align(&tcx).abi, - "alignment mismatch between ABI and layout in {layout:#?}" - ); - assert_eq!( - layout.size(), - scalar.size(&tcx), - "size mismatch between ABI and layout in {layout:#?}" - ); - } - Abi::Vector { count, element } => { - // No padding in vectors. Alignment can be strengthened, though. - assert!( - layout.align().abi >= element.align(&tcx).abi, - "alignment mismatch between ABI and layout in {layout:#?}" - ); - let size = element.size(&tcx) * count; - assert_eq!( - layout.size(), - size.align_to(tcx.data_layout().vector_align(size).abi), - "size mismatch between ABI and layout in {layout:#?}" - ); - } - Abi::ScalarPair(scalar1, scalar2) => { - // Sanity-check scalar pairs. These are a bit more flexible and support - // padding, but we can at least ensure both fields actually fit into the layout - // and the alignment requirement has not been weakened. - let align1 = scalar1.align(&tcx).abi; - let align2 = scalar2.align(&tcx).abi; - assert!( - layout.align().abi >= cmp::max(align1, align2), - "alignment mismatch between ABI and layout in {layout:#?}", - ); - let field2_offset = scalar1.size(&tcx).align_to(align2); - assert!( - layout.size() >= field2_offset + scalar2.size(&tcx), - "size mismatch between ABI and layout in {layout:#?}" - ); - } - Abi::Uninhabited | Abi::Aggregate { .. } => {} // Nothing to check. - } - } - - check_layout_abi(tcx, layout.layout); - - if let Variants::Multiple { variants, .. } = &layout.variants { - for variant in variants { - check_layout_abi(tcx, *variant); - // No nested "multiple". - assert!(matches!(variant.variants(), Variants::Single { .. })); - // Skip empty variants. - if variant.size() == Size::ZERO - || variant.fields().count() == 0 - || variant.abi().is_uninhabited() - { - // These are never actually accessed anyway, so we can skip them. (Note that - // sometimes, variants with fields have size 0, and sometimes, variants without - // fields have non-0 size.) - continue; - } - // Variants should have the same or a smaller size as the full thing. - if variant.size() > layout.size { - bug!( - "Type with size {} bytes has variant with size {} bytes: {layout:#?}", - layout.size.bytes(), - variant.size().bytes(), - ) - } - // The top-level ABI and the ABI of the variants should be coherent. - let abi_coherent = match (layout.abi, variant.abi()) { - (Abi::Scalar(..), Abi::Scalar(..)) => true, - (Abi::ScalarPair(..), Abi::ScalarPair(..)) => true, - (Abi::Uninhabited, _) => true, - (Abi::Aggregate { .. }, _) => true, - _ => false, - }; - if !abi_coherent { - bug!( - "Variant ABI is incompatible with top-level ABI:\nvariant={:#?}\nTop-level: {layout:#?}", - variant - ); - } - } - } - } -} - -#[instrument(skip(tcx, query), level = "debug")] -fn layout_of<'tcx>( - tcx: TyCtxt<'tcx>, - query: ty::ParamEnvAnd<'tcx, Ty<'tcx>>, -) -> Result<TyAndLayout<'tcx>, LayoutError<'tcx>> { - ty::tls::with_related_context(tcx, move |icx| { - let (param_env, ty) = query.into_parts(); - debug!(?ty); - - if !tcx.recursion_limit().value_within_limit(icx.layout_depth) { - tcx.sess.fatal(&format!("overflow representing the type `{}`", ty)); - } - - // Update the ImplicitCtxt to increase the layout_depth - let icx = ty::tls::ImplicitCtxt { layout_depth: icx.layout_depth + 1, ..icx.clone() }; - - ty::tls::enter_context(&icx, |_| { - let param_env = param_env.with_reveal_all_normalized(tcx); - let unnormalized_ty = ty; - - // FIXME: We might want to have two different versions of `layout_of`: - // One that can be called after typecheck has completed and can use - // `normalize_erasing_regions` here and another one that can be called - // before typecheck has completed and uses `try_normalize_erasing_regions`. - let ty = match tcx.try_normalize_erasing_regions(param_env, ty) { - Ok(t) => t, - Err(normalization_error) => { - return Err(LayoutError::NormalizationFailure(ty, normalization_error)); - } - }; - - if ty != unnormalized_ty { - // Ensure this layout is also cached for the normalized type. - return tcx.layout_of(param_env.and(ty)); - } - - let cx = LayoutCx { tcx, param_env }; - - let layout = cx.layout_of_uncached(ty)?; - let layout = TyAndLayout { ty, layout }; - - cx.record_layout_for_printing(layout); - - sanity_check_layout(tcx, param_env, &layout); - - Ok(layout) - }) - }) -} - +#[derive(Clone, Copy)] pub struct LayoutCx<'tcx, C> { pub tcx: C, pub param_env: ty::ParamEnv<'tcx>, } -#[derive(Copy, Clone, Debug)] -enum StructKind { - /// A tuple, closure, or univariant which cannot be coerced to unsized. - AlwaysSized, - /// A univariant, the last field of which may be coerced to unsized. - MaybeUnsized, - /// A univariant, but with a prefix of an arbitrary size & alignment (e.g., enum tag). - Prefixed(Size, Align), -} - -// Invert a bijective mapping, i.e. `invert(map)[y] = x` if `map[x] = y`. -// This is used to go between `memory_index` (source field order to memory order) -// and `inverse_memory_index` (memory order to source field order). -// See also `FieldsShape::Arbitrary::memory_index` for more details. -// FIXME(eddyb) build a better abstraction for permutations, if possible. -fn invert_mapping(map: &[u32]) -> Vec<u32> { - let mut inverse = vec![0; map.len()]; - for i in 0..map.len() { - inverse[map[i] as usize] = i as u32; - } - inverse -} - -impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { - fn scalar_pair(&self, a: Scalar, b: Scalar) -> LayoutS<'tcx> { - let dl = self.data_layout(); - let b_align = b.align(dl); - let align = a.align(dl).max(b_align).max(dl.aggregate_align); - let b_offset = a.size(dl).align_to(b_align.abi); - let size = (b_offset + b.size(dl)).align_to(align.abi); - - // HACK(nox): We iter on `b` and then `a` because `max_by_key` - // returns the last maximum. - let largest_niche = Niche::from_scalar(dl, b_offset, b) - .into_iter() - .chain(Niche::from_scalar(dl, Size::ZERO, a)) - .max_by_key(|niche| niche.available(dl)); - - LayoutS { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields: FieldsShape::Arbitrary { - offsets: vec![Size::ZERO, b_offset], - memory_index: vec![0, 1], - }, - abi: Abi::ScalarPair(a, b), - largest_niche, - align, - size, - } - } - - fn univariant_uninterned( - &self, - ty: Ty<'tcx>, - fields: &[TyAndLayout<'_>], - repr: &ReprOptions, - kind: StructKind, - ) -> Result<LayoutS<'tcx>, LayoutError<'tcx>> { - let dl = self.data_layout(); - let pack = repr.pack; - if pack.is_some() && repr.align.is_some() { - self.tcx.sess.delay_span_bug(DUMMY_SP, "struct cannot be packed and aligned"); - return Err(LayoutError::Unknown(ty)); - } - - let mut align = if pack.is_some() { dl.i8_align } else { dl.aggregate_align }; - - let mut inverse_memory_index: Vec<u32> = (0..fields.len() as u32).collect(); - - let optimize = !repr.inhibit_struct_field_reordering_opt(); - if optimize { - let end = - if let StructKind::MaybeUnsized = kind { fields.len() - 1 } else { fields.len() }; - let optimizing = &mut inverse_memory_index[..end]; - let field_align = |f: &TyAndLayout<'_>| { - if let Some(pack) = pack { f.align.abi.min(pack) } else { f.align.abi } - }; - - // If `-Z randomize-layout` was enabled for the type definition we can shuffle - // the field ordering to try and catch some code making assumptions about layouts - // we don't guarantee - if repr.can_randomize_type_layout() { - // `ReprOptions.layout_seed` is a deterministic seed that we can use to - // randomize field ordering with - let mut rng = Xoshiro128StarStar::seed_from_u64(repr.field_shuffle_seed); - - // Shuffle the ordering of the fields - optimizing.shuffle(&mut rng); - - // Otherwise we just leave things alone and actually optimize the type's fields - } else { - match kind { - StructKind::AlwaysSized | StructKind::MaybeUnsized => { - optimizing.sort_by_key(|&x| { - // Place ZSTs first to avoid "interesting offsets", - // especially with only one or two non-ZST fields. - let f = &fields[x as usize]; - (!f.is_zst(), cmp::Reverse(field_align(f))) - }); - } - - StructKind::Prefixed(..) => { - // Sort in ascending alignment so that the layout stays optimal - // regardless of the prefix - optimizing.sort_by_key(|&x| field_align(&fields[x as usize])); - } - } - - // FIXME(Kixiron): We can always shuffle fields within a given alignment class - // regardless of the status of `-Z randomize-layout` - } - } - - // inverse_memory_index holds field indices by increasing memory offset. - // That is, if field 5 has offset 0, the first element of inverse_memory_index is 5. - // We now write field offsets to the corresponding offset slot; - // field 5 with offset 0 puts 0 in offsets[5]. - // At the bottom of this function, we invert `inverse_memory_index` to - // produce `memory_index` (see `invert_mapping`). - - let mut sized = true; - let mut offsets = vec![Size::ZERO; fields.len()]; - let mut offset = Size::ZERO; - let mut largest_niche = None; - let mut largest_niche_available = 0; - - if let StructKind::Prefixed(prefix_size, prefix_align) = kind { - let prefix_align = - if let Some(pack) = pack { prefix_align.min(pack) } else { prefix_align }; - align = align.max(AbiAndPrefAlign::new(prefix_align)); - offset = prefix_size.align_to(prefix_align); - } - - for &i in &inverse_memory_index { - let field = fields[i as usize]; - if !sized { - self.tcx.sess.delay_span_bug( - DUMMY_SP, - &format!( - "univariant: field #{} of `{}` comes after unsized field", - offsets.len(), - ty - ), - ); - } - - if field.is_unsized() { - sized = false; - } - - // Invariant: offset < dl.obj_size_bound() <= 1<<61 - let field_align = if let Some(pack) = pack { - field.align.min(AbiAndPrefAlign::new(pack)) - } else { - field.align - }; - offset = offset.align_to(field_align.abi); - align = align.max(field_align); - - debug!("univariant offset: {:?} field: {:#?}", offset, field); - offsets[i as usize] = offset; - - if let Some(mut niche) = field.largest_niche { - let available = niche.available(dl); - if available > largest_niche_available { - largest_niche_available = available; - niche.offset += offset; - largest_niche = Some(niche); - } - } - - offset = offset.checked_add(field.size, dl).ok_or(LayoutError::SizeOverflow(ty))?; - } - - if let Some(repr_align) = repr.align { - align = align.max(AbiAndPrefAlign::new(repr_align)); - } - - debug!("univariant min_size: {:?}", offset); - let min_size = offset; - - // As stated above, inverse_memory_index holds field indices by increasing offset. - // This makes it an already-sorted view of the offsets vec. - // To invert it, consider: - // If field 5 has offset 0, offsets[0] is 5, and memory_index[5] should be 0. - // Field 5 would be the first element, so memory_index is i: - // Note: if we didn't optimize, it's already right. - - let memory_index = - if optimize { invert_mapping(&inverse_memory_index) } else { inverse_memory_index }; - - let size = min_size.align_to(align.abi); - let mut abi = Abi::Aggregate { sized }; - - // Unpack newtype ABIs and find scalar pairs. - if sized && size.bytes() > 0 { - // All other fields must be ZSTs. - let mut non_zst_fields = fields.iter().enumerate().filter(|&(_, f)| !f.is_zst()); - - match (non_zst_fields.next(), non_zst_fields.next(), non_zst_fields.next()) { - // We have exactly one non-ZST field. - (Some((i, field)), None, None) => { - // Field fills the struct and it has a scalar or scalar pair ABI. - if offsets[i].bytes() == 0 && align.abi == field.align.abi && size == field.size - { - match field.abi { - // For plain scalars, or vectors of them, we can't unpack - // newtypes for `#[repr(C)]`, as that affects C ABIs. - Abi::Scalar(_) | Abi::Vector { .. } if optimize => { - abi = field.abi; - } - // But scalar pairs are Rust-specific and get - // treated as aggregates by C ABIs anyway. - Abi::ScalarPair(..) => { - abi = field.abi; - } - _ => {} - } - } - } - - // Two non-ZST fields, and they're both scalars. - (Some((i, a)), Some((j, b)), None) => { - match (a.abi, b.abi) { - (Abi::Scalar(a), Abi::Scalar(b)) => { - // Order by the memory placement, not source order. - let ((i, a), (j, b)) = if offsets[i] < offsets[j] { - ((i, a), (j, b)) - } else { - ((j, b), (i, a)) - }; - let pair = self.scalar_pair(a, b); - let pair_offsets = match pair.fields { - FieldsShape::Arbitrary { ref offsets, ref memory_index } => { - assert_eq!(memory_index, &[0, 1]); - offsets - } - _ => bug!(), - }; - if offsets[i] == pair_offsets[0] - && offsets[j] == pair_offsets[1] - && align == pair.align - && size == pair.size - { - // We can use `ScalarPair` only when it matches our - // already computed layout (including `#[repr(C)]`). - abi = pair.abi; - } - } - _ => {} - } - } - - _ => {} - } - } - - if fields.iter().any(|f| f.abi.is_uninhabited()) { - abi = Abi::Uninhabited; - } - - Ok(LayoutS { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields: FieldsShape::Arbitrary { offsets, memory_index }, - abi, - largest_niche, - align, - size, - }) - } - - fn layout_of_uncached(&self, ty: Ty<'tcx>) -> Result<Layout<'tcx>, LayoutError<'tcx>> { - let tcx = self.tcx; - let param_env = self.param_env; - let dl = self.data_layout(); - let scalar_unit = |value: Primitive| { - let size = value.size(dl); - assert!(size.bits() <= 128); - Scalar::Initialized { value, valid_range: WrappingRange::full(size) } - }; - let scalar = - |value: Primitive| tcx.intern_layout(LayoutS::scalar(self, scalar_unit(value))); - - let univariant = |fields: &[TyAndLayout<'_>], repr: &ReprOptions, kind| { - Ok(tcx.intern_layout(self.univariant_uninterned(ty, fields, repr, kind)?)) - }; - debug_assert!(!ty.has_infer_types_or_consts()); - - Ok(match *ty.kind() { - // Basic scalars. - ty::Bool => tcx.intern_layout(LayoutS::scalar( - self, - Scalar::Initialized { - value: Int(I8, false), - valid_range: WrappingRange { start: 0, end: 1 }, - }, - )), - ty::Char => tcx.intern_layout(LayoutS::scalar( - self, - Scalar::Initialized { - value: Int(I32, false), - valid_range: WrappingRange { start: 0, end: 0x10FFFF }, - }, - )), - ty::Int(ity) => scalar(Int(Integer::from_int_ty(dl, ity), true)), - ty::Uint(ity) => scalar(Int(Integer::from_uint_ty(dl, ity), false)), - ty::Float(fty) => scalar(match fty { - ty::FloatTy::F32 => F32, - ty::FloatTy::F64 => F64, - }), - ty::FnPtr(_) => { - let mut ptr = scalar_unit(Pointer); - ptr.valid_range_mut().start = 1; - tcx.intern_layout(LayoutS::scalar(self, ptr)) - } - - // The never type. - ty::Never => tcx.intern_layout(LayoutS { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields: FieldsShape::Primitive, - abi: Abi::Uninhabited, - largest_niche: None, - align: dl.i8_align, - size: Size::ZERO, - }), - - // Potentially-wide pointers. - ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => { - let mut data_ptr = scalar_unit(Pointer); - if !ty.is_unsafe_ptr() { - data_ptr.valid_range_mut().start = 1; - } - - let pointee = tcx.normalize_erasing_regions(param_env, pointee); - if pointee.is_sized(tcx.at(DUMMY_SP), param_env) { - return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr))); - } - - let unsized_part = tcx.struct_tail_erasing_lifetimes(pointee, param_env); - let metadata = match unsized_part.kind() { - ty::Foreign(..) => { - return Ok(tcx.intern_layout(LayoutS::scalar(self, data_ptr))); - } - ty::Slice(_) | ty::Str => scalar_unit(Int(dl.ptr_sized_integer(), false)), - ty::Dynamic(..) => { - let mut vtable = scalar_unit(Pointer); - vtable.valid_range_mut().start = 1; - vtable - } - _ => return Err(LayoutError::Unknown(unsized_part)), - }; - - // Effectively a (ptr, meta) tuple. - tcx.intern_layout(self.scalar_pair(data_ptr, metadata)) - } - - // Arrays and slices. - ty::Array(element, mut count) => { - if count.has_projections() { - count = tcx.normalize_erasing_regions(param_env, count); - if count.has_projections() { - return Err(LayoutError::Unknown(ty)); - } - } - - let count = count.try_eval_usize(tcx, param_env).ok_or(LayoutError::Unknown(ty))?; - let element = self.layout_of(element)?; - let size = - element.size.checked_mul(count, dl).ok_or(LayoutError::SizeOverflow(ty))?; - - let abi = - if count != 0 && tcx.conservative_is_privately_uninhabited(param_env.and(ty)) { - Abi::Uninhabited - } else { - Abi::Aggregate { sized: true } - }; - - let largest_niche = if count != 0 { element.largest_niche } else { None }; - - tcx.intern_layout(LayoutS { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields: FieldsShape::Array { stride: element.size, count }, - abi, - largest_niche, - align: element.align, - size, - }) - } - ty::Slice(element) => { - let element = self.layout_of(element)?; - tcx.intern_layout(LayoutS { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields: FieldsShape::Array { stride: element.size, count: 0 }, - abi: Abi::Aggregate { sized: false }, - largest_niche: None, - align: element.align, - size: Size::ZERO, - }) - } - ty::Str => tcx.intern_layout(LayoutS { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields: FieldsShape::Array { stride: Size::from_bytes(1), count: 0 }, - abi: Abi::Aggregate { sized: false }, - largest_niche: None, - align: dl.i8_align, - size: Size::ZERO, - }), - - // Odd unit types. - ty::FnDef(..) => univariant(&[], &ReprOptions::default(), StructKind::AlwaysSized)?, - ty::Dynamic(..) | ty::Foreign(..) => { - let mut unit = self.univariant_uninterned( - ty, - &[], - &ReprOptions::default(), - StructKind::AlwaysSized, - )?; - match unit.abi { - Abi::Aggregate { ref mut sized } => *sized = false, - _ => bug!(), - } - tcx.intern_layout(unit) - } - - ty::Generator(def_id, substs, _) => self.generator_layout(ty, def_id, substs)?, - - ty::Closure(_, ref substs) => { - let tys = substs.as_closure().upvar_tys(); - univariant( - &tys.map(|ty| self.layout_of(ty)).collect::<Result<Vec<_>, _>>()?, - &ReprOptions::default(), - StructKind::AlwaysSized, - )? - } - - ty::Tuple(tys) => { - let kind = - if tys.len() == 0 { StructKind::AlwaysSized } else { StructKind::MaybeUnsized }; - - univariant( - &tys.iter().map(|k| self.layout_of(k)).collect::<Result<Vec<_>, _>>()?, - &ReprOptions::default(), - kind, - )? - } - - // SIMD vector types. - ty::Adt(def, substs) if def.repr().simd() => { - if !def.is_struct() { - // Should have yielded E0517 by now. - tcx.sess.delay_span_bug( - DUMMY_SP, - "#[repr(simd)] was applied to an ADT that is not a struct", - ); - return Err(LayoutError::Unknown(ty)); - } - - // Supported SIMD vectors are homogeneous ADTs with at least one field: - // - // * #[repr(simd)] struct S(T, T, T, T); - // * #[repr(simd)] struct S { x: T, y: T, z: T, w: T } - // * #[repr(simd)] struct S([T; 4]) - // - // where T is a primitive scalar (integer/float/pointer). - - // SIMD vectors with zero fields are not supported. - // (should be caught by typeck) - if def.non_enum_variant().fields.is_empty() { - tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty)); - } - - // Type of the first ADT field: - let f0_ty = def.non_enum_variant().fields[0].ty(tcx, substs); - - // Heterogeneous SIMD vectors are not supported: - // (should be caught by typeck) - for fi in &def.non_enum_variant().fields { - if fi.ty(tcx, substs) != f0_ty { - tcx.sess.fatal(&format!("monomorphising heterogeneous SIMD type `{}`", ty)); - } - } - - // The element type and number of elements of the SIMD vector - // are obtained from: - // - // * the element type and length of the single array field, if - // the first field is of array type, or - // - // * the homogenous field type and the number of fields. - let (e_ty, e_len, is_array) = if let ty::Array(e_ty, _) = f0_ty.kind() { - // First ADT field is an array: - - // SIMD vectors with multiple array fields are not supported: - // (should be caught by typeck) - if def.non_enum_variant().fields.len() != 1 { - tcx.sess.fatal(&format!( - "monomorphising SIMD type `{}` with more than one array field", - ty - )); - } - - // Extract the number of elements from the layout of the array field: - let FieldsShape::Array { count, .. } = self.layout_of(f0_ty)?.layout.fields() else { - return Err(LayoutError::Unknown(ty)); - }; - - (*e_ty, *count, true) - } else { - // First ADT field is not an array: - (f0_ty, def.non_enum_variant().fields.len() as _, false) - }; - - // SIMD vectors of zero length are not supported. - // Additionally, lengths are capped at 2^16 as a fixed maximum backends must - // support. - // - // Can't be caught in typeck if the array length is generic. - if e_len == 0 { - tcx.sess.fatal(&format!("monomorphising SIMD type `{}` of zero length", ty)); - } else if e_len > MAX_SIMD_LANES { - tcx.sess.fatal(&format!( - "monomorphising SIMD type `{}` of length greater than {}", - ty, MAX_SIMD_LANES, - )); - } - - // Compute the ABI of the element type: - let e_ly = self.layout_of(e_ty)?; - let Abi::Scalar(e_abi) = e_ly.abi else { - // This error isn't caught in typeck, e.g., if - // the element type of the vector is generic. - tcx.sess.fatal(&format!( - "monomorphising SIMD type `{}` with a non-primitive-scalar \ - (integer/float/pointer) element type `{}`", - ty, e_ty - )) - }; - - // Compute the size and alignment of the vector: - let size = e_ly.size.checked_mul(e_len, dl).ok_or(LayoutError::SizeOverflow(ty))?; - let align = dl.vector_align(size); - let size = size.align_to(align.abi); - - // Compute the placement of the vector fields: - let fields = if is_array { - FieldsShape::Arbitrary { offsets: vec![Size::ZERO], memory_index: vec![0] } - } else { - FieldsShape::Array { stride: e_ly.size, count: e_len } - }; - - tcx.intern_layout(LayoutS { - variants: Variants::Single { index: VariantIdx::new(0) }, - fields, - abi: Abi::Vector { element: e_abi, count: e_len }, - largest_niche: e_ly.largest_niche, - size, - align, - }) - } - - // ADTs. - ty::Adt(def, substs) => { - // Cache the field layouts. - let variants = def - .variants() - .iter() - .map(|v| { - v.fields - .iter() - .map(|field| self.layout_of(field.ty(tcx, substs))) - .collect::<Result<Vec<_>, _>>() - }) - .collect::<Result<IndexVec<VariantIdx, _>, _>>()?; - - if def.is_union() { - if def.repr().pack.is_some() && def.repr().align.is_some() { - self.tcx.sess.delay_span_bug( - tcx.def_span(def.did()), - "union cannot be packed and aligned", - ); - return Err(LayoutError::Unknown(ty)); - } - - let mut align = - if def.repr().pack.is_some() { dl.i8_align } else { dl.aggregate_align }; - - if let Some(repr_align) = def.repr().align { - align = align.max(AbiAndPrefAlign::new(repr_align)); - } - - let optimize = !def.repr().inhibit_union_abi_opt(); - let mut size = Size::ZERO; - let mut abi = Abi::Aggregate { sized: true }; - let index = VariantIdx::new(0); - for field in &variants[index] { - assert!(!field.is_unsized()); - align = align.max(field.align); - - // If all non-ZST fields have the same ABI, forward this ABI - if optimize && !field.is_zst() { - // Discard valid range information and allow undef - let field_abi = match field.abi { - Abi::Scalar(x) => Abi::Scalar(x.to_union()), - Abi::ScalarPair(x, y) => { - Abi::ScalarPair(x.to_union(), y.to_union()) - } - Abi::Vector { element: x, count } => { - Abi::Vector { element: x.to_union(), count } - } - Abi::Uninhabited | Abi::Aggregate { .. } => { - Abi::Aggregate { sized: true } - } - }; - - if size == Size::ZERO { - // first non ZST: initialize 'abi' - abi = field_abi; - } else if abi != field_abi { - // different fields have different ABI: reset to Aggregate - abi = Abi::Aggregate { sized: true }; - } - } - - size = cmp::max(size, field.size); - } - - if let Some(pack) = def.repr().pack { - align = align.min(AbiAndPrefAlign::new(pack)); - } - - return Ok(tcx.intern_layout(LayoutS { - variants: Variants::Single { index }, - fields: FieldsShape::Union( - NonZeroUsize::new(variants[index].len()) - .ok_or(LayoutError::Unknown(ty))?, - ), - abi, - largest_niche: None, - align, - size: size.align_to(align.abi), - })); - } - - // A variant is absent if it's uninhabited and only has ZST fields. - // Present uninhabited variants only require space for their fields, - // but *not* an encoding of the discriminant (e.g., a tag value). - // See issue #49298 for more details on the need to leave space - // for non-ZST uninhabited data (mostly partial initialization). - let absent = |fields: &[TyAndLayout<'_>]| { - let uninhabited = fields.iter().any(|f| f.abi.is_uninhabited()); - let is_zst = fields.iter().all(|f| f.is_zst()); - uninhabited && is_zst - }; - let (present_first, present_second) = { - let mut present_variants = variants - .iter_enumerated() - .filter_map(|(i, v)| if absent(v) { None } else { Some(i) }); - (present_variants.next(), present_variants.next()) - }; - let present_first = match present_first { - Some(present_first) => present_first, - // Uninhabited because it has no variants, or only absent ones. - None if def.is_enum() => { - return Ok(tcx.layout_of(param_env.and(tcx.types.never))?.layout); - } - // If it's a struct, still compute a layout so that we can still compute the - // field offsets. - None => VariantIdx::new(0), - }; - - let is_struct = !def.is_enum() || - // Only one variant is present. - (present_second.is_none() && - // Representation optimizations are allowed. - !def.repr().inhibit_enum_layout_opt()); - if is_struct { - // Struct, or univariant enum equivalent to a struct. - // (Typechecking will reject discriminant-sizing attrs.) - - let v = present_first; - let kind = if def.is_enum() || variants[v].is_empty() { - StructKind::AlwaysSized - } else { - let param_env = tcx.param_env(def.did()); - let last_field = def.variant(v).fields.last().unwrap(); - let always_sized = - tcx.type_of(last_field.did).is_sized(tcx.at(DUMMY_SP), param_env); - if !always_sized { - StructKind::MaybeUnsized - } else { - StructKind::AlwaysSized - } - }; - - let mut st = self.univariant_uninterned(ty, &variants[v], &def.repr(), kind)?; - st.variants = Variants::Single { index: v }; - - if def.is_unsafe_cell() { - let hide_niches = |scalar: &mut _| match scalar { - Scalar::Initialized { value, valid_range } => { - *valid_range = WrappingRange::full(value.size(dl)) - } - // Already doesn't have any niches - Scalar::Union { .. } => {} - }; - match &mut st.abi { - Abi::Uninhabited => {} - Abi::Scalar(scalar) => hide_niches(scalar), - Abi::ScalarPair(a, b) => { - hide_niches(a); - hide_niches(b); - } - Abi::Vector { element, count: _ } => hide_niches(element), - Abi::Aggregate { sized: _ } => {} - } - st.largest_niche = None; - return Ok(tcx.intern_layout(st)); - } - - let (start, end) = self.tcx.layout_scalar_valid_range(def.did()); - match st.abi { - Abi::Scalar(ref mut scalar) | Abi::ScalarPair(ref mut scalar, _) => { - // the asserts ensure that we are not using the - // `#[rustc_layout_scalar_valid_range(n)]` - // attribute to widen the range of anything as that would probably - // result in UB somewhere - // FIXME(eddyb) the asserts are probably not needed, - // as larger validity ranges would result in missed - // optimizations, *not* wrongly assuming the inner - // value is valid. e.g. unions enlarge validity ranges, - // because the values may be uninitialized. - if let Bound::Included(start) = start { - // FIXME(eddyb) this might be incorrect - it doesn't - // account for wrap-around (end < start) ranges. - let valid_range = scalar.valid_range_mut(); - assert!(valid_range.start <= start); - valid_range.start = start; - } - if let Bound::Included(end) = end { - // FIXME(eddyb) this might be incorrect - it doesn't - // account for wrap-around (end < start) ranges. - let valid_range = scalar.valid_range_mut(); - assert!(valid_range.end >= end); - valid_range.end = end; - } - - // Update `largest_niche` if we have introduced a larger niche. - let niche = Niche::from_scalar(dl, Size::ZERO, *scalar); - if let Some(niche) = niche { - match st.largest_niche { - Some(largest_niche) => { - // Replace the existing niche even if they're equal, - // because this one is at a lower offset. - if largest_niche.available(dl) <= niche.available(dl) { - st.largest_niche = Some(niche); - } - } - None => st.largest_niche = Some(niche), - } - } - } - _ => assert!( - start == Bound::Unbounded && end == Bound::Unbounded, - "nonscalar layout for layout_scalar_valid_range type {:?}: {:#?}", - def, - st, - ), - } - - return Ok(tcx.intern_layout(st)); - } - - // At this point, we have handled all unions and - // structs. (We have also handled univariant enums - // that allow representation optimization.) - assert!(def.is_enum()); - - // The current code for niche-filling relies on variant indices - // instead of actual discriminants, so dataful enums with - // explicit discriminants (RFC #2363) would misbehave. - let no_explicit_discriminants = def - .variants() - .iter_enumerated() - .all(|(i, v)| v.discr == ty::VariantDiscr::Relative(i.as_u32())); - - let mut niche_filling_layout = None; - - // Niche-filling enum optimization. - if !def.repr().inhibit_enum_layout_opt() && no_explicit_discriminants { - let mut dataful_variant = None; - let mut niche_variants = VariantIdx::MAX..=VariantIdx::new(0); - - // Find one non-ZST variant. - 'variants: for (v, fields) in variants.iter_enumerated() { - if absent(fields) { - continue 'variants; - } - for f in fields { - if !f.is_zst() { - if dataful_variant.is_none() { - dataful_variant = Some(v); - continue 'variants; - } else { - dataful_variant = None; - break 'variants; - } - } - } - niche_variants = *niche_variants.start().min(&v)..=v; - } - - if niche_variants.start() > niche_variants.end() { - dataful_variant = None; - } - - if let Some(i) = dataful_variant { - let count = (niche_variants.end().as_u32() - - niche_variants.start().as_u32() - + 1) as u128; - - // Find the field with the largest niche - let niche_candidate = variants[i] - .iter() - .enumerate() - .filter_map(|(j, field)| Some((j, field.largest_niche?))) - .max_by_key(|(_, niche)| niche.available(dl)); - - if let Some((field_index, niche, (niche_start, niche_scalar))) = - niche_candidate.and_then(|(field_index, niche)| { - Some((field_index, niche, niche.reserve(self, count)?)) - }) - { - let mut align = dl.aggregate_align; - let st = variants - .iter_enumerated() - .map(|(j, v)| { - let mut st = self.univariant_uninterned( - ty, - v, - &def.repr(), - StructKind::AlwaysSized, - )?; - st.variants = Variants::Single { index: j }; - - align = align.max(st.align); - - Ok(tcx.intern_layout(st)) - }) - .collect::<Result<IndexVec<VariantIdx, _>, _>>()?; - - let offset = st[i].fields().offset(field_index) + niche.offset; - - // Align the total size to the largest alignment. - let size = st[i].size().align_to(align.abi); - - let abi = if st.iter().all(|v| v.abi().is_uninhabited()) { - Abi::Uninhabited - } else if align == st[i].align() && size == st[i].size() { - // When the total alignment and size match, we can use the - // same ABI as the scalar variant with the reserved niche. - match st[i].abi() { - Abi::Scalar(_) => Abi::Scalar(niche_scalar), - Abi::ScalarPair(first, second) => { - // Only the niche is guaranteed to be initialised, - // so use union layout for the other primitive. - if offset.bytes() == 0 { - Abi::ScalarPair(niche_scalar, second.to_union()) - } else { - Abi::ScalarPair(first.to_union(), niche_scalar) - } - } - _ => Abi::Aggregate { sized: true }, - } - } else { - Abi::Aggregate { sized: true } - }; - - let largest_niche = Niche::from_scalar(dl, offset, niche_scalar); - - niche_filling_layout = Some(LayoutS { - variants: Variants::Multiple { - tag: niche_scalar, - tag_encoding: TagEncoding::Niche { - dataful_variant: i, - niche_variants, - niche_start, - }, - tag_field: 0, - variants: st, - }, - fields: FieldsShape::Arbitrary { - offsets: vec![offset], - memory_index: vec![0], - }, - abi, - largest_niche, - size, - align, - }); - } - } - } - - let (mut min, mut max) = (i128::MAX, i128::MIN); - let discr_type = def.repr().discr_type(); - let bits = Integer::from_attr(self, discr_type).size().bits(); - for (i, discr) in def.discriminants(tcx) { - if variants[i].iter().any(|f| f.abi.is_uninhabited()) { - continue; - } - let mut x = discr.val as i128; - if discr_type.is_signed() { - // sign extend the raw representation to be an i128 - x = (x << (128 - bits)) >> (128 - bits); - } - if x < min { - min = x; - } - if x > max { - max = x; - } - } - // We might have no inhabited variants, so pretend there's at least one. - if (min, max) == (i128::MAX, i128::MIN) { - min = 0; - max = 0; - } - assert!(min <= max, "discriminant range is {}...{}", min, max); - let (min_ity, signed) = Integer::repr_discr(tcx, ty, &def.repr(), min, max); - - let mut align = dl.aggregate_align; - let mut size = Size::ZERO; - - // We're interested in the smallest alignment, so start large. - let mut start_align = Align::from_bytes(256).unwrap(); - assert_eq!(Integer::for_align(dl, start_align), None); - - // repr(C) on an enum tells us to make a (tag, union) layout, - // so we need to grow the prefix alignment to be at least - // the alignment of the union. (This value is used both for - // determining the alignment of the overall enum, and the - // determining the alignment of the payload after the tag.) - let mut prefix_align = min_ity.align(dl).abi; - if def.repr().c() { - for fields in &variants { - for field in fields { - prefix_align = prefix_align.max(field.align.abi); - } - } - } - - // Create the set of structs that represent each variant. - let mut layout_variants = variants - .iter_enumerated() - .map(|(i, field_layouts)| { - let mut st = self.univariant_uninterned( - ty, - &field_layouts, - &def.repr(), - StructKind::Prefixed(min_ity.size(), prefix_align), - )?; - st.variants = Variants::Single { index: i }; - // Find the first field we can't move later - // to make room for a larger discriminant. - for field in - st.fields.index_by_increasing_offset().map(|j| field_layouts[j]) - { - if !field.is_zst() || field.align.abi.bytes() != 1 { - start_align = start_align.min(field.align.abi); - break; - } - } - size = cmp::max(size, st.size); - align = align.max(st.align); - Ok(st) - }) - .collect::<Result<IndexVec<VariantIdx, _>, _>>()?; - - // Align the maximum variant size to the largest alignment. - size = size.align_to(align.abi); - - if size.bytes() >= dl.obj_size_bound() { - return Err(LayoutError::SizeOverflow(ty)); - } - - let typeck_ity = Integer::from_attr(dl, def.repr().discr_type()); - if typeck_ity < min_ity { - // It is a bug if Layout decided on a greater discriminant size than typeck for - // some reason at this point (based on values discriminant can take on). Mostly - // because this discriminant will be loaded, and then stored into variable of - // type calculated by typeck. Consider such case (a bug): typeck decided on - // byte-sized discriminant, but layout thinks we need a 16-bit to store all - // discriminant values. That would be a bug, because then, in codegen, in order - // to store this 16-bit discriminant into 8-bit sized temporary some of the - // space necessary to represent would have to be discarded (or layout is wrong - // on thinking it needs 16 bits) - bug!( - "layout decided on a larger discriminant type ({:?}) than typeck ({:?})", - min_ity, - typeck_ity - ); - // However, it is fine to make discr type however large (as an optimisation) - // after this point – we’ll just truncate the value we load in codegen. - } - - // Check to see if we should use a different type for the - // discriminant. We can safely use a type with the same size - // as the alignment of the first field of each variant. - // We increase the size of the discriminant to avoid LLVM copying - // padding when it doesn't need to. This normally causes unaligned - // load/stores and excessive memcpy/memset operations. By using a - // bigger integer size, LLVM can be sure about its contents and - // won't be so conservative. - - // Use the initial field alignment - let mut ity = if def.repr().c() || def.repr().int.is_some() { - min_ity - } else { - Integer::for_align(dl, start_align).unwrap_or(min_ity) - }; - - // If the alignment is not larger than the chosen discriminant size, - // don't use the alignment as the final size. - if ity <= min_ity { - ity = min_ity; - } else { - // Patch up the variants' first few fields. - let old_ity_size = min_ity.size(); - let new_ity_size = ity.size(); - for variant in &mut layout_variants { - match variant.fields { - FieldsShape::Arbitrary { ref mut offsets, .. } => { - for i in offsets { - if *i <= old_ity_size { - assert_eq!(*i, old_ity_size); - *i = new_ity_size; - } - } - // We might be making the struct larger. - if variant.size <= old_ity_size { - variant.size = new_ity_size; - } - } - _ => bug!(), - } - } - } - - let tag_mask = ity.size().unsigned_int_max(); - let tag = Scalar::Initialized { - value: Int(ity, signed), - valid_range: WrappingRange { - start: (min as u128 & tag_mask), - end: (max as u128 & tag_mask), - }, - }; - let mut abi = Abi::Aggregate { sized: true }; - - if layout_variants.iter().all(|v| v.abi.is_uninhabited()) { - abi = Abi::Uninhabited; - } else if tag.size(dl) == size { - // Make sure we only use scalar layout when the enum is entirely its - // own tag (i.e. it has no padding nor any non-ZST variant fields). - abi = Abi::Scalar(tag); - } else { - // Try to use a ScalarPair for all tagged enums. - let mut common_prim = None; - let mut common_prim_initialized_in_all_variants = true; - for (field_layouts, layout_variant) in iter::zip(&variants, &layout_variants) { - let FieldsShape::Arbitrary { ref offsets, .. } = layout_variant.fields else { - bug!(); - }; - let mut fields = - iter::zip(field_layouts, offsets).filter(|p| !p.0.is_zst()); - let (field, offset) = match (fields.next(), fields.next()) { - (None, None) => { - common_prim_initialized_in_all_variants = false; - continue; - } - (Some(pair), None) => pair, - _ => { - common_prim = None; - break; - } - }; - let prim = match field.abi { - Abi::Scalar(scalar) => { - common_prim_initialized_in_all_variants &= - matches!(scalar, Scalar::Initialized { .. }); - scalar.primitive() - } - _ => { - common_prim = None; - break; - } - }; - if let Some(pair) = common_prim { - // This is pretty conservative. We could go fancier - // by conflating things like i32 and u32, or even - // realising that (u8, u8) could just cohabit with - // u16 or even u32. - if pair != (prim, offset) { - common_prim = None; - break; - } - } else { - common_prim = Some((prim, offset)); - } - } - if let Some((prim, offset)) = common_prim { - let prim_scalar = if common_prim_initialized_in_all_variants { - scalar_unit(prim) - } else { - // Common prim might be uninit. - Scalar::Union { value: prim } - }; - let pair = self.scalar_pair(tag, prim_scalar); - let pair_offsets = match pair.fields { - FieldsShape::Arbitrary { ref offsets, ref memory_index } => { - assert_eq!(memory_index, &[0, 1]); - offsets - } - _ => bug!(), - }; - if pair_offsets[0] == Size::ZERO - && pair_offsets[1] == *offset - && align == pair.align - && size == pair.size - { - // We can use `ScalarPair` only when it matches our - // already computed layout (including `#[repr(C)]`). - abi = pair.abi; - } - } - } - - // If we pick a "clever" (by-value) ABI, we might have to adjust the ABI of the - // variants to ensure they are consistent. This is because a downcast is - // semantically a NOP, and thus should not affect layout. - if matches!(abi, Abi::Scalar(..) | Abi::ScalarPair(..)) { - for variant in &mut layout_variants { - // We only do this for variants with fields; the others are not accessed anyway. - // Also do not overwrite any already existing "clever" ABIs. - if variant.fields.count() > 0 - && matches!(variant.abi, Abi::Aggregate { .. }) - { - variant.abi = abi; - // Also need to bump up the size and alignment, so that the entire value fits in here. - variant.size = cmp::max(variant.size, size); - variant.align.abi = cmp::max(variant.align.abi, align.abi); - } - } - } - - let largest_niche = Niche::from_scalar(dl, Size::ZERO, tag); - - let layout_variants = - layout_variants.into_iter().map(|v| tcx.intern_layout(v)).collect(); - - let tagged_layout = LayoutS { - variants: Variants::Multiple { - tag, - tag_encoding: TagEncoding::Direct, - tag_field: 0, - variants: layout_variants, - }, - fields: FieldsShape::Arbitrary { - offsets: vec![Size::ZERO], - memory_index: vec![0], - }, - largest_niche, - abi, - align, - size, - }; - - let best_layout = match (tagged_layout, niche_filling_layout) { - (tagged_layout, Some(niche_filling_layout)) => { - // Pick the smaller layout; otherwise, - // pick the layout with the larger niche; otherwise, - // pick tagged as it has simpler codegen. - cmp::min_by_key(tagged_layout, niche_filling_layout, |layout| { - let niche_size = layout.largest_niche.map_or(0, |n| n.available(dl)); - (layout.size, cmp::Reverse(niche_size)) - }) - } - (tagged_layout, None) => tagged_layout, - }; - - tcx.intern_layout(best_layout) - } - - // Types with no meaningful known layout. - ty::Projection(_) | ty::Opaque(..) => { - // NOTE(eddyb) `layout_of` query should've normalized these away, - // if that was possible, so there's no reason to try again here. - return Err(LayoutError::Unknown(ty)); - } - - ty::Placeholder(..) | ty::GeneratorWitness(..) | ty::Infer(_) => { - bug!("Layout::compute: unexpected type `{}`", ty) - } - - ty::Bound(..) | ty::Param(_) | ty::Error(_) => { - return Err(LayoutError::Unknown(ty)); - } - }) - } -} - -/// Overlap eligibility and variant assignment for each GeneratorSavedLocal. -#[derive(Clone, Debug, PartialEq)] -enum SavedLocalEligibility { - Unassigned, - Assigned(VariantIdx), - // FIXME: Use newtype_index so we aren't wasting bytes - Ineligible(Option<u32>), -} - -// When laying out generators, we divide our saved local fields into two -// categories: overlap-eligible and overlap-ineligible. -// -// Those fields which are ineligible for overlap go in a "prefix" at the -// beginning of the layout, and always have space reserved for them. -// -// Overlap-eligible fields are only assigned to one variant, so we lay -// those fields out for each variant and put them right after the -// prefix. -// -// Finally, in the layout details, we point to the fields from the -// variants they are assigned to. It is possible for some fields to be -// included in multiple variants. No field ever "moves around" in the -// layout; its offset is always the same. -// -// Also included in the layout are the upvars and the discriminant. -// These are included as fields on the "outer" layout; they are not part -// of any variant. -impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { - /// Compute the eligibility and assignment of each local. - fn generator_saved_local_eligibility( - &self, - info: &GeneratorLayout<'tcx>, - ) -> (BitSet<GeneratorSavedLocal>, IndexVec<GeneratorSavedLocal, SavedLocalEligibility>) { - use SavedLocalEligibility::*; - - let mut assignments: IndexVec<GeneratorSavedLocal, SavedLocalEligibility> = - IndexVec::from_elem_n(Unassigned, info.field_tys.len()); - - // The saved locals not eligible for overlap. These will get - // "promoted" to the prefix of our generator. - let mut ineligible_locals = BitSet::new_empty(info.field_tys.len()); - - // Figure out which of our saved locals are fields in only - // one variant. The rest are deemed ineligible for overlap. - for (variant_index, fields) in info.variant_fields.iter_enumerated() { - for local in fields { - match assignments[*local] { - Unassigned => { - assignments[*local] = Assigned(variant_index); - } - Assigned(idx) => { - // We've already seen this local at another suspension - // point, so it is no longer a candidate. - trace!( - "removing local {:?} in >1 variant ({:?}, {:?})", - local, - variant_index, - idx - ); - ineligible_locals.insert(*local); - assignments[*local] = Ineligible(None); - } - Ineligible(_) => {} - } - } - } - - // Next, check every pair of eligible locals to see if they - // conflict. - for local_a in info.storage_conflicts.rows() { - let conflicts_a = info.storage_conflicts.count(local_a); - if ineligible_locals.contains(local_a) { - continue; - } - - for local_b in info.storage_conflicts.iter(local_a) { - // local_a and local_b are storage live at the same time, therefore they - // cannot overlap in the generator layout. The only way to guarantee - // this is if they are in the same variant, or one is ineligible - // (which means it is stored in every variant). - if ineligible_locals.contains(local_b) - || assignments[local_a] == assignments[local_b] - { - continue; - } - - // If they conflict, we will choose one to make ineligible. - // This is not always optimal; it's just a greedy heuristic that - // seems to produce good results most of the time. - let conflicts_b = info.storage_conflicts.count(local_b); - let (remove, other) = - if conflicts_a > conflicts_b { (local_a, local_b) } else { (local_b, local_a) }; - ineligible_locals.insert(remove); - assignments[remove] = Ineligible(None); - trace!("removing local {:?} due to conflict with {:?}", remove, other); - } - } - - // Count the number of variants in use. If only one of them, then it is - // impossible to overlap any locals in our layout. In this case it's - // always better to make the remaining locals ineligible, so we can - // lay them out with the other locals in the prefix and eliminate - // unnecessary padding bytes. - { - let mut used_variants = BitSet::new_empty(info.variant_fields.len()); - for assignment in &assignments { - if let Assigned(idx) = assignment { - used_variants.insert(*idx); - } - } - if used_variants.count() < 2 { - for assignment in assignments.iter_mut() { - *assignment = Ineligible(None); - } - ineligible_locals.insert_all(); - } - } - - // Write down the order of our locals that will be promoted to the prefix. - { - for (idx, local) in ineligible_locals.iter().enumerate() { - assignments[local] = Ineligible(Some(idx as u32)); - } - } - debug!("generator saved local assignments: {:?}", assignments); - - (ineligible_locals, assignments) - } - - /// Compute the full generator layout. - fn generator_layout( - &self, - ty: Ty<'tcx>, - def_id: hir::def_id::DefId, - substs: SubstsRef<'tcx>, - ) -> Result<Layout<'tcx>, LayoutError<'tcx>> { - use SavedLocalEligibility::*; - let tcx = self.tcx; - let subst_field = |ty: Ty<'tcx>| EarlyBinder(ty).subst(tcx, substs); - - let Some(info) = tcx.generator_layout(def_id) else { - return Err(LayoutError::Unknown(ty)); - }; - let (ineligible_locals, assignments) = self.generator_saved_local_eligibility(&info); - - // Build a prefix layout, including "promoting" all ineligible - // locals as part of the prefix. We compute the layout of all of - // these fields at once to get optimal packing. - let tag_index = substs.as_generator().prefix_tys().count(); - - // `info.variant_fields` already accounts for the reserved variants, so no need to add them. - let max_discr = (info.variant_fields.len() - 1) as u128; - let discr_int = Integer::fit_unsigned(max_discr); - let discr_int_ty = discr_int.to_ty(tcx, false); - let tag = Scalar::Initialized { - value: Primitive::Int(discr_int, false), - valid_range: WrappingRange { start: 0, end: max_discr }, - }; - let tag_layout = self.tcx.intern_layout(LayoutS::scalar(self, tag)); - let tag_layout = TyAndLayout { ty: discr_int_ty, layout: tag_layout }; - - let promoted_layouts = ineligible_locals - .iter() - .map(|local| subst_field(info.field_tys[local])) - .map(|ty| tcx.mk_maybe_uninit(ty)) - .map(|ty| self.layout_of(ty)); - let prefix_layouts = substs - .as_generator() - .prefix_tys() - .map(|ty| self.layout_of(ty)) - .chain(iter::once(Ok(tag_layout))) - .chain(promoted_layouts) - .collect::<Result<Vec<_>, _>>()?; - let prefix = self.univariant_uninterned( - ty, - &prefix_layouts, - &ReprOptions::default(), - StructKind::AlwaysSized, - )?; - - let (prefix_size, prefix_align) = (prefix.size, prefix.align); - - // Split the prefix layout into the "outer" fields (upvars and - // discriminant) and the "promoted" fields. Promoted fields will - // get included in each variant that requested them in - // GeneratorLayout. - debug!("prefix = {:#?}", prefix); - let (outer_fields, promoted_offsets, promoted_memory_index) = match prefix.fields { - FieldsShape::Arbitrary { mut offsets, memory_index } => { - let mut inverse_memory_index = invert_mapping(&memory_index); - - // "a" (`0..b_start`) and "b" (`b_start..`) correspond to - // "outer" and "promoted" fields respectively. - let b_start = (tag_index + 1) as u32; - let offsets_b = offsets.split_off(b_start as usize); - let offsets_a = offsets; - - // Disentangle the "a" and "b" components of `inverse_memory_index` - // by preserving the order but keeping only one disjoint "half" each. - // FIXME(eddyb) build a better abstraction for permutations, if possible. - let inverse_memory_index_b: Vec<_> = - inverse_memory_index.iter().filter_map(|&i| i.checked_sub(b_start)).collect(); - inverse_memory_index.retain(|&i| i < b_start); - let inverse_memory_index_a = inverse_memory_index; - - // Since `inverse_memory_index_{a,b}` each only refer to their - // respective fields, they can be safely inverted - let memory_index_a = invert_mapping(&inverse_memory_index_a); - let memory_index_b = invert_mapping(&inverse_memory_index_b); - - let outer_fields = - FieldsShape::Arbitrary { offsets: offsets_a, memory_index: memory_index_a }; - (outer_fields, offsets_b, memory_index_b) - } - _ => bug!(), - }; - - let mut size = prefix.size; - let mut align = prefix.align; - let variants = info - .variant_fields - .iter_enumerated() - .map(|(index, variant_fields)| { - // Only include overlap-eligible fields when we compute our variant layout. - let variant_only_tys = variant_fields - .iter() - .filter(|local| match assignments[**local] { - Unassigned => bug!(), - Assigned(v) if v == index => true, - Assigned(_) => bug!("assignment does not match variant"), - Ineligible(_) => false, - }) - .map(|local| subst_field(info.field_tys[*local])); - - let mut variant = self.univariant_uninterned( - ty, - &variant_only_tys - .map(|ty| self.layout_of(ty)) - .collect::<Result<Vec<_>, _>>()?, - &ReprOptions::default(), - StructKind::Prefixed(prefix_size, prefix_align.abi), - )?; - variant.variants = Variants::Single { index }; - - let FieldsShape::Arbitrary { offsets, memory_index } = variant.fields else { - bug!(); - }; - - // Now, stitch the promoted and variant-only fields back together in - // the order they are mentioned by our GeneratorLayout. - // Because we only use some subset (that can differ between variants) - // of the promoted fields, we can't just pick those elements of the - // `promoted_memory_index` (as we'd end up with gaps). - // So instead, we build an "inverse memory_index", as if all of the - // promoted fields were being used, but leave the elements not in the - // subset as `INVALID_FIELD_IDX`, which we can filter out later to - // obtain a valid (bijective) mapping. - const INVALID_FIELD_IDX: u32 = !0; - let mut combined_inverse_memory_index = - vec![INVALID_FIELD_IDX; promoted_memory_index.len() + memory_index.len()]; - let mut offsets_and_memory_index = iter::zip(offsets, memory_index); - let combined_offsets = variant_fields - .iter() - .enumerate() - .map(|(i, local)| { - let (offset, memory_index) = match assignments[*local] { - Unassigned => bug!(), - Assigned(_) => { - let (offset, memory_index) = - offsets_and_memory_index.next().unwrap(); - (offset, promoted_memory_index.len() as u32 + memory_index) - } - Ineligible(field_idx) => { - let field_idx = field_idx.unwrap() as usize; - (promoted_offsets[field_idx], promoted_memory_index[field_idx]) - } - }; - combined_inverse_memory_index[memory_index as usize] = i as u32; - offset - }) - .collect(); - - // Remove the unused slots and invert the mapping to obtain the - // combined `memory_index` (also see previous comment). - combined_inverse_memory_index.retain(|&i| i != INVALID_FIELD_IDX); - let combined_memory_index = invert_mapping(&combined_inverse_memory_index); - - variant.fields = FieldsShape::Arbitrary { - offsets: combined_offsets, - memory_index: combined_memory_index, - }; - - size = size.max(variant.size); - align = align.max(variant.align); - Ok(tcx.intern_layout(variant)) - }) - .collect::<Result<IndexVec<VariantIdx, _>, _>>()?; - - size = size.align_to(align.abi); - - let abi = - if prefix.abi.is_uninhabited() || variants.iter().all(|v| v.abi().is_uninhabited()) { - Abi::Uninhabited - } else { - Abi::Aggregate { sized: true } - }; - - let layout = tcx.intern_layout(LayoutS { - variants: Variants::Multiple { - tag, - tag_encoding: TagEncoding::Direct, - tag_field: tag_index, - variants, - }, - fields: outer_fields, - abi, - largest_niche: prefix.largest_niche, - size, - align, - }); - debug!("generator layout ({:?}): {:#?}", ty, layout); - Ok(layout) - } - - /// This is invoked by the `layout_of` query to record the final - /// layout of each type. - #[inline(always)] - fn record_layout_for_printing(&self, layout: TyAndLayout<'tcx>) { - // If we are running with `-Zprint-type-sizes`, maybe record layouts - // for dumping later. - if self.tcx.sess.opts.unstable_opts.print_type_sizes { - self.record_layout_for_printing_outlined(layout) - } - } - - fn record_layout_for_printing_outlined(&self, layout: TyAndLayout<'tcx>) { - // Ignore layouts that are done with non-empty environments or - // non-monomorphic layouts, as the user only wants to see the stuff - // resulting from the final codegen session. - if layout.ty.has_param_types_or_consts() || !self.param_env.caller_bounds().is_empty() { - return; - } - - // (delay format until we actually need it) - let record = |kind, packed, opt_discr_size, variants| { - let type_desc = format!("{:?}", layout.ty); - self.tcx.sess.code_stats.record_type_size( - kind, - type_desc, - layout.align.abi, - layout.size, - packed, - opt_discr_size, - variants, - ); - }; - - let adt_def = match *layout.ty.kind() { - ty::Adt(ref adt_def, _) => { - debug!("print-type-size t: `{:?}` process adt", layout.ty); - adt_def - } - - ty::Closure(..) => { - debug!("print-type-size t: `{:?}` record closure", layout.ty); - record(DataTypeKind::Closure, false, None, vec![]); - return; - } - - _ => { - debug!("print-type-size t: `{:?}` skip non-nominal", layout.ty); - return; - } - }; - - let adt_kind = adt_def.adt_kind(); - let adt_packed = adt_def.repr().pack.is_some(); - - let build_variant_info = |n: Option<Symbol>, flds: &[Symbol], layout: TyAndLayout<'tcx>| { - let mut min_size = Size::ZERO; - let field_info: Vec<_> = flds - .iter() - .enumerate() - .map(|(i, &name)| { - let field_layout = layout.field(self, i); - let offset = layout.fields.offset(i); - let field_end = offset + field_layout.size; - if min_size < field_end { - min_size = field_end; - } - FieldInfo { - name, - offset: offset.bytes(), - size: field_layout.size.bytes(), - align: field_layout.align.abi.bytes(), - } - }) - .collect(); - - VariantInfo { - name: n, - kind: if layout.is_unsized() { SizeKind::Min } else { SizeKind::Exact }, - align: layout.align.abi.bytes(), - size: if min_size.bytes() == 0 { layout.size.bytes() } else { min_size.bytes() }, - fields: field_info, - } - }; - - match layout.variants { - Variants::Single { index } => { - if !adt_def.variants().is_empty() && layout.fields != FieldsShape::Primitive { - debug!( - "print-type-size `{:#?}` variant {}", - layout, - adt_def.variant(index).name - ); - let variant_def = &adt_def.variant(index); - let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect(); - record( - adt_kind.into(), - adt_packed, - None, - vec![build_variant_info(Some(variant_def.name), &fields, layout)], - ); - } else { - // (This case arises for *empty* enums; so give it - // zero variants.) - record(adt_kind.into(), adt_packed, None, vec![]); - } - } - - Variants::Multiple { tag, ref tag_encoding, .. } => { - debug!( - "print-type-size `{:#?}` adt general variants def {}", - layout.ty, - adt_def.variants().len() - ); - let variant_infos: Vec<_> = adt_def - .variants() - .iter_enumerated() - .map(|(i, variant_def)| { - let fields: Vec<_> = variant_def.fields.iter().map(|f| f.name).collect(); - build_variant_info( - Some(variant_def.name), - &fields, - layout.for_variant(self, i), - ) - }) - .collect(); - record( - adt_kind.into(), - adt_packed, - match tag_encoding { - TagEncoding::Direct => Some(tag.size(self)), - _ => None, - }, - variant_infos, - ); - } - } - } -} - /// Type size "skeleton", i.e., the only information determining a type's size. /// While this is conservative, (aside from constant sizes, only pointers, /// newtypes thereof and null pointer optimized enums are allowed), it is @@ -2083,7 +263,7 @@ impl<'tcx> SizeSkeleton<'tcx> { tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>, ) -> Result<SizeSkeleton<'tcx>, LayoutError<'tcx>> { - debug_assert!(!ty.has_infer_types_or_consts()); + debug_assert!(!ty.has_non_region_infer()); // First try computing a static layout. let err = match tcx.layout_of(param_env.and(ty)) { @@ -2099,7 +279,7 @@ impl<'tcx> SizeSkeleton<'tcx> { let tail = tcx.struct_tail_erasing_lifetimes(pointee, param_env); match tail.kind() { ty::Param(_) | ty::Projection(_) => { - debug_assert!(tail.has_param_types_or_consts()); + debug_assert!(tail.has_non_region_param()); Ok(SizeSkeleton::Pointer { non_zero, tail: tcx.erase_regions(tail) }) } _ => bug!( @@ -2468,7 +648,9 @@ where | ty::FnDef(..) | ty::GeneratorWitness(..) | ty::Foreign(..) - | ty::Dynamic(..) => bug!("TyAndLayout::field({:?}): not applicable", this), + | ty::Dynamic(_, _, ty::Dyn) => { + bug!("TyAndLayout::field({:?}): not applicable", this) + } // Potentially-fat pointers. ty::Ref(_, pointee, _) | ty::RawPtr(ty::TypeAndMut { ty: pointee, .. }) => { @@ -2497,7 +679,7 @@ where match tcx.struct_tail_erasing_lifetimes(pointee, cx.param_env()).kind() { ty::Slice(_) | ty::Str => TyMaybeWithLayout::Ty(tcx.types.usize), - ty::Dynamic(_, _) => { + ty::Dynamic(_, _, ty::Dyn) => { TyMaybeWithLayout::Ty(tcx.mk_imm_ref( tcx.lifetimes.re_static, tcx.mk_array(tcx.types.usize, 3), @@ -2566,6 +748,22 @@ where } } + ty::Dynamic(_, _, ty::DynStar) => { + if i == 0 { + TyMaybeWithLayout::Ty(tcx.types.usize) + } else if i == 1 { + // FIXME(dyn-star) same FIXME as above applies here too + TyMaybeWithLayout::Ty( + tcx.mk_imm_ref( + tcx.lifetimes.re_static, + tcx.mk_array(tcx.types.usize, 3), + ), + ) + } else { + bug!("no field {i} on dyn*") + } + } + ty::Projection(_) | ty::Bound(..) | ty::Placeholder(..) @@ -2632,7 +830,7 @@ where } else { match mt { hir::Mutability::Not => { - if ty.is_freeze(tcx.at(DUMMY_SP), cx.param_env()) { + if ty.is_freeze(tcx, cx.param_env()) { PointerKind::Frozen } else { PointerKind::SharedMutable @@ -2643,7 +841,7 @@ where // noalias, as another pointer to the structure can be obtained, that // is not based-on the original reference. We consider all !Unpin // types to be potentially self-referential here. - if ty.is_unpin(tcx.at(DUMMY_SP), cx.param_env()) { + if ty.is_unpin(tcx, cx.param_env()) { PointerKind::UniqueBorrowed } else { PointerKind::UniqueBorrowedPinned @@ -2674,11 +872,11 @@ where // using more niches than just null (e.g., the first page of // the address space, or unaligned pointers). Variants::Multiple { - tag_encoding: TagEncoding::Niche { dataful_variant, .. }, + tag_encoding: TagEncoding::Niche { untagged_variant, .. }, tag_field, .. } if this.fields.offset(tag_field) == offset => { - Some(this.for_variant(cx, dataful_variant)) + Some(this.for_variant(cx, untagged_variant)) } _ => Some(this), }; @@ -2755,111 +953,6 @@ where } } -impl<'tcx> ty::Instance<'tcx> { - // NOTE(eddyb) this is private to avoid using it from outside of - // `fn_abi_of_instance` - any other uses are either too high-level - // for `Instance` (e.g. typeck would use `Ty::fn_sig` instead), - // or should go through `FnAbi` instead, to avoid losing any - // adjustments `fn_abi_of_instance` might be performing. - fn fn_sig_for_fn_abi( - &self, - tcx: TyCtxt<'tcx>, - param_env: ty::ParamEnv<'tcx>, - ) -> ty::PolyFnSig<'tcx> { - let ty = self.ty(tcx, param_env); - match *ty.kind() { - ty::FnDef(..) => { - // HACK(davidtwco,eddyb): This is a workaround for polymorphization considering - // parameters unused if they show up in the signature, but not in the `mir::Body` - // (i.e. due to being inside a projection that got normalized, see - // `src/test/ui/polymorphization/normalized_sig_types.rs`), and codegen not keeping - // track of a polymorphization `ParamEnv` to allow normalizing later. - let mut sig = match *ty.kind() { - ty::FnDef(def_id, substs) => tcx - .normalize_erasing_regions(tcx.param_env(def_id), tcx.bound_fn_sig(def_id)) - .subst(tcx, substs), - _ => unreachable!(), - }; - - if let ty::InstanceDef::VTableShim(..) = self.def { - // Modify `fn(self, ...)` to `fn(self: *mut Self, ...)`. - sig = sig.map_bound(|mut sig| { - let mut inputs_and_output = sig.inputs_and_output.to_vec(); - inputs_and_output[0] = tcx.mk_mut_ptr(inputs_and_output[0]); - sig.inputs_and_output = tcx.intern_type_list(&inputs_and_output); - sig - }); - } - sig - } - ty::Closure(def_id, substs) => { - let sig = substs.as_closure().sig(); - - let bound_vars = tcx.mk_bound_variable_kinds( - sig.bound_vars() - .iter() - .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))), - ); - let br = ty::BoundRegion { - var: ty::BoundVar::from_usize(bound_vars.len() - 1), - kind: ty::BoundRegionKind::BrEnv, - }; - let env_region = ty::ReLateBound(ty::INNERMOST, br); - let env_ty = tcx.closure_env_ty(def_id, substs, env_region).unwrap(); - - let sig = sig.skip_binder(); - ty::Binder::bind_with_vars( - tcx.mk_fn_sig( - iter::once(env_ty).chain(sig.inputs().iter().cloned()), - sig.output(), - sig.c_variadic, - sig.unsafety, - sig.abi, - ), - bound_vars, - ) - } - ty::Generator(_, substs, _) => { - let sig = substs.as_generator().poly_sig(); - - let bound_vars = tcx.mk_bound_variable_kinds( - sig.bound_vars() - .iter() - .chain(iter::once(ty::BoundVariableKind::Region(ty::BrEnv))), - ); - let br = ty::BoundRegion { - var: ty::BoundVar::from_usize(bound_vars.len() - 1), - kind: ty::BoundRegionKind::BrEnv, - }; - let env_region = ty::ReLateBound(ty::INNERMOST, br); - let env_ty = tcx.mk_mut_ref(tcx.mk_region(env_region), ty); - - let pin_did = tcx.require_lang_item(LangItem::Pin, None); - let pin_adt_ref = tcx.adt_def(pin_did); - let pin_substs = tcx.intern_substs(&[env_ty.into()]); - let env_ty = tcx.mk_adt(pin_adt_ref, pin_substs); - - let sig = sig.skip_binder(); - let state_did = tcx.require_lang_item(LangItem::GeneratorState, None); - let state_adt_ref = tcx.adt_def(state_did); - let state_substs = tcx.intern_substs(&[sig.yield_ty.into(), sig.return_ty.into()]); - let ret_ty = tcx.mk_adt(state_adt_ref, state_substs); - ty::Binder::bind_with_vars( - tcx.mk_fn_sig( - [env_ty, sig.resume_ty].iter(), - &ret_ty, - false, - hir::Unsafety::Normal, - rustc_target::spec::abi::Abi::Rust, - ), - bound_vars, - ) - } - _ => bug!("unexpected type {:?} in Instance::fn_sig", ty), - } - } -} - /// Calculates whether a function's ABI can unwind or not. /// /// This takes two primary parameters: @@ -2907,6 +1000,7 @@ impl<'tcx> ty::Instance<'tcx> { /// with `-Cpanic=abort` will look like they can't unwind when in fact they /// might (from a foreign exception or similar). #[inline] +#[tracing::instrument(level = "debug", skip(tcx))] pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: SpecAbi) -> bool { if let Some(did) = fn_def_id { // Special attribute for functions which can't unwind. @@ -3001,40 +1095,6 @@ pub fn fn_can_unwind<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: Option<DefId>, abi: Spe } } -#[inline] -pub fn conv_from_spec_abi(tcx: TyCtxt<'_>, abi: SpecAbi) -> Conv { - use rustc_target::spec::abi::Abi::*; - match tcx.sess.target.adjust_abi(abi) { - RustIntrinsic | PlatformIntrinsic | Rust | RustCall => Conv::Rust, - RustCold => Conv::RustCold, - - // It's the ABI's job to select this, not ours. - System { .. } => bug!("system abi should be selected elsewhere"), - EfiApi => bug!("eficall abi should be selected elsewhere"), - - Stdcall { .. } => Conv::X86Stdcall, - Fastcall { .. } => Conv::X86Fastcall, - Vectorcall { .. } => Conv::X86VectorCall, - Thiscall { .. } => Conv::X86ThisCall, - C { .. } => Conv::C, - Unadjusted => Conv::C, - Win64 { .. } => Conv::X86_64Win64, - SysV64 { .. } => Conv::X86_64SysV, - Aapcs { .. } => Conv::ArmAapcs, - CCmseNonSecureCall => Conv::CCmseNonSecureCall, - PtxKernel => Conv::PtxKernel, - Msp430Interrupt => Conv::Msp430Intr, - X86Interrupt => Conv::X86Intr, - AmdGpuKernel => Conv::AmdGpuKernel, - AvrInterrupt => Conv::AvrInterrupt, - AvrNonBlockingInterrupt => Conv::AvrNonBlockingInterrupt, - Wasm => Conv::C, - - // These API constants ought to be more specific... - Cdecl { .. } => Conv::C, - } -} - /// Error produced by attempting to compute or adjust a `FnAbi`. #[derive(Copy, Clone, Debug, HashStable)] pub enum FnAbiError<'tcx> { @@ -3066,6 +1126,12 @@ impl<'tcx> fmt::Display for FnAbiError<'tcx> { } } +impl<'tcx> IntoDiagnostic<'tcx, !> for FnAbiError<'tcx> { + fn into_diagnostic(self, handler: &'tcx Handler) -> DiagnosticBuilder<'tcx, !> { + handler.struct_fatal(self.to_string()) + } +} + // FIXME(eddyb) maybe use something like this for an unified `fn_abi_of`, not // just for error handling. #[derive(Debug)] @@ -3123,6 +1189,7 @@ pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> { /// NB: that includes virtual calls, which are represented by "direct calls" /// to an `InstanceDef::Virtual` instance (of `<dyn Trait as Trait>::fn`). #[inline] + #[tracing::instrument(level = "debug", skip(self))] fn fn_abi_of_instance( &self, instance: ty::Instance<'tcx>, @@ -3146,359 +1213,3 @@ pub trait FnAbiOf<'tcx>: FnAbiOfHelpers<'tcx> { } impl<'tcx, C: FnAbiOfHelpers<'tcx>> FnAbiOf<'tcx> for C {} - -fn fn_abi_of_fn_ptr<'tcx>( - tcx: TyCtxt<'tcx>, - query: ty::ParamEnvAnd<'tcx, (ty::PolyFnSig<'tcx>, &'tcx ty::List<Ty<'tcx>>)>, -) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> { - let (param_env, (sig, extra_args)) = query.into_parts(); - - LayoutCx { tcx, param_env }.fn_abi_new_uncached(sig, extra_args, None, None, false) -} - -fn fn_abi_of_instance<'tcx>( - tcx: TyCtxt<'tcx>, - query: ty::ParamEnvAnd<'tcx, (ty::Instance<'tcx>, &'tcx ty::List<Ty<'tcx>>)>, -) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> { - let (param_env, (instance, extra_args)) = query.into_parts(); - - let sig = instance.fn_sig_for_fn_abi(tcx, param_env); - - let caller_location = if instance.def.requires_caller_location(tcx) { - Some(tcx.caller_location_ty()) - } else { - None - }; - - LayoutCx { tcx, param_env }.fn_abi_new_uncached( - sig, - extra_args, - caller_location, - Some(instance.def_id()), - matches!(instance.def, ty::InstanceDef::Virtual(..)), - ) -} - -impl<'tcx> LayoutCx<'tcx, TyCtxt<'tcx>> { - // FIXME(eddyb) perhaps group the signature/type-containing (or all of them?) - // arguments of this method, into a separate `struct`. - fn fn_abi_new_uncached( - &self, - sig: ty::PolyFnSig<'tcx>, - extra_args: &[Ty<'tcx>], - caller_location: Option<Ty<'tcx>>, - fn_def_id: Option<DefId>, - // FIXME(eddyb) replace this with something typed, like an `enum`. - force_thin_self_ptr: bool, - ) -> Result<&'tcx FnAbi<'tcx, Ty<'tcx>>, FnAbiError<'tcx>> { - debug!("fn_abi_new_uncached({:?}, {:?})", sig, extra_args); - - let sig = self.tcx.normalize_erasing_late_bound_regions(self.param_env, sig); - - let conv = conv_from_spec_abi(self.tcx(), sig.abi); - - let mut inputs = sig.inputs(); - let extra_args = if sig.abi == RustCall { - assert!(!sig.c_variadic && extra_args.is_empty()); - - if let Some(input) = sig.inputs().last() { - if let ty::Tuple(tupled_arguments) = input.kind() { - inputs = &sig.inputs()[0..sig.inputs().len() - 1]; - tupled_arguments - } else { - bug!( - "argument to function with \"rust-call\" ABI \ - is not a tuple" - ); - } - } else { - bug!( - "argument to function with \"rust-call\" ABI \ - is not a tuple" - ); - } - } else { - assert!(sig.c_variadic || extra_args.is_empty()); - extra_args - }; - - let target = &self.tcx.sess.target; - let target_env_gnu_like = matches!(&target.env[..], "gnu" | "musl" | "uclibc"); - let win_x64_gnu = target.os == "windows" && target.arch == "x86_64" && target.env == "gnu"; - let linux_s390x_gnu_like = - target.os == "linux" && target.arch == "s390x" && target_env_gnu_like; - let linux_sparc64_gnu_like = - target.os == "linux" && target.arch == "sparc64" && target_env_gnu_like; - let linux_powerpc_gnu_like = - target.os == "linux" && target.arch == "powerpc" && target_env_gnu_like; - use SpecAbi::*; - let rust_abi = matches!(sig.abi, RustIntrinsic | PlatformIntrinsic | Rust | RustCall); - - // Handle safe Rust thin and fat pointers. - let adjust_for_rust_scalar = |attrs: &mut ArgAttributes, - scalar: Scalar, - layout: TyAndLayout<'tcx>, - offset: Size, - is_return: bool| { - // Booleans are always a noundef i1 that needs to be zero-extended. - if scalar.is_bool() { - attrs.ext(ArgExtension::Zext); - attrs.set(ArgAttribute::NoUndef); - return; - } - - // Scalars which have invalid values cannot be undef. - if !scalar.is_always_valid(self) { - attrs.set(ArgAttribute::NoUndef); - } - - // Only pointer types handled below. - let Scalar::Initialized { value: Pointer, valid_range} = scalar else { return }; - - if !valid_range.contains(0) { - attrs.set(ArgAttribute::NonNull); - } - - if let Some(pointee) = layout.pointee_info_at(self, offset) { - if let Some(kind) = pointee.safe { - attrs.pointee_align = Some(pointee.align); - - // `Box` (`UniqueBorrowed`) are not necessarily dereferenceable - // for the entire duration of the function as they can be deallocated - // at any time. Same for shared mutable references. If LLVM had a - // way to say "dereferenceable on entry" we could use it here. - attrs.pointee_size = match kind { - PointerKind::UniqueBorrowed - | PointerKind::UniqueBorrowedPinned - | PointerKind::Frozen => pointee.size, - PointerKind::SharedMutable | PointerKind::UniqueOwned => Size::ZERO, - }; - - // `Box`, `&T`, and `&mut T` cannot be undef. - // Note that this only applies to the value of the pointer itself; - // this attribute doesn't make it UB for the pointed-to data to be undef. - attrs.set(ArgAttribute::NoUndef); - - // The aliasing rules for `Box<T>` are still not decided, but currently we emit - // `noalias` for it. This can be turned off using an unstable flag. - // See https://github.com/rust-lang/unsafe-code-guidelines/issues/326 - let noalias_for_box = - self.tcx().sess.opts.unstable_opts.box_noalias.unwrap_or(true); - - // `&mut` pointer parameters never alias other parameters, - // or mutable global data - // - // `&T` where `T` contains no `UnsafeCell<U>` is immutable, - // and can be marked as both `readonly` and `noalias`, as - // LLVM's definition of `noalias` is based solely on memory - // dependencies rather than pointer equality - // - // Due to past miscompiles in LLVM, we apply a separate NoAliasMutRef attribute - // for UniqueBorrowed arguments, so that the codegen backend can decide whether - // or not to actually emit the attribute. It can also be controlled with the - // `-Zmutable-noalias` debugging option. - let no_alias = match kind { - PointerKind::SharedMutable - | PointerKind::UniqueBorrowed - | PointerKind::UniqueBorrowedPinned => false, - PointerKind::UniqueOwned => noalias_for_box, - PointerKind::Frozen => !is_return, - }; - if no_alias { - attrs.set(ArgAttribute::NoAlias); - } - - if kind == PointerKind::Frozen && !is_return { - attrs.set(ArgAttribute::ReadOnly); - } - - if kind == PointerKind::UniqueBorrowed && !is_return { - attrs.set(ArgAttribute::NoAliasMutRef); - } - } - } - }; - - let arg_of = |ty: Ty<'tcx>, arg_idx: Option<usize>| -> Result<_, FnAbiError<'tcx>> { - let is_return = arg_idx.is_none(); - - let layout = self.layout_of(ty)?; - let layout = if force_thin_self_ptr && arg_idx == Some(0) { - // Don't pass the vtable, it's not an argument of the virtual fn. - // Instead, pass just the data pointer, but give it the type `*const/mut dyn Trait` - // or `&/&mut dyn Trait` because this is special-cased elsewhere in codegen - make_thin_self_ptr(self, layout) - } else { - layout - }; - - let mut arg = ArgAbi::new(self, layout, |layout, scalar, offset| { - let mut attrs = ArgAttributes::new(); - adjust_for_rust_scalar(&mut attrs, scalar, *layout, offset, is_return); - attrs - }); - - if arg.layout.is_zst() { - // For some forsaken reason, x86_64-pc-windows-gnu - // doesn't ignore zero-sized struct arguments. - // The same is true for {s390x,sparc64,powerpc}-unknown-linux-{gnu,musl,uclibc}. - if is_return - || rust_abi - || (!win_x64_gnu - && !linux_s390x_gnu_like - && !linux_sparc64_gnu_like - && !linux_powerpc_gnu_like) - { - arg.mode = PassMode::Ignore; - } - } - - Ok(arg) - }; - - let mut fn_abi = FnAbi { - ret: arg_of(sig.output(), None)?, - args: inputs - .iter() - .copied() - .chain(extra_args.iter().copied()) - .chain(caller_location) - .enumerate() - .map(|(i, ty)| arg_of(ty, Some(i))) - .collect::<Result<_, _>>()?, - c_variadic: sig.c_variadic, - fixed_count: inputs.len(), - conv, - can_unwind: fn_can_unwind(self.tcx(), fn_def_id, sig.abi), - }; - self.fn_abi_adjust_for_abi(&mut fn_abi, sig.abi)?; - debug!("fn_abi_new_uncached = {:?}", fn_abi); - Ok(self.tcx.arena.alloc(fn_abi)) - } - - fn fn_abi_adjust_for_abi( - &self, - fn_abi: &mut FnAbi<'tcx, Ty<'tcx>>, - abi: SpecAbi, - ) -> Result<(), FnAbiError<'tcx>> { - if abi == SpecAbi::Unadjusted { - return Ok(()); - } - - if abi == SpecAbi::Rust - || abi == SpecAbi::RustCall - || abi == SpecAbi::RustIntrinsic - || abi == SpecAbi::PlatformIntrinsic - { - let fixup = |arg: &mut ArgAbi<'tcx, Ty<'tcx>>| { - if arg.is_ignore() { - return; - } - - match arg.layout.abi { - Abi::Aggregate { .. } => {} - - // This is a fun case! The gist of what this is doing is - // that we want callers and callees to always agree on the - // ABI of how they pass SIMD arguments. If we were to *not* - // make these arguments indirect then they'd be immediates - // in LLVM, which means that they'd used whatever the - // appropriate ABI is for the callee and the caller. That - // means, for example, if the caller doesn't have AVX - // enabled but the callee does, then passing an AVX argument - // across this boundary would cause corrupt data to show up. - // - // This problem is fixed by unconditionally passing SIMD - // arguments through memory between callers and callees - // which should get them all to agree on ABI regardless of - // target feature sets. Some more information about this - // issue can be found in #44367. - // - // Note that the platform intrinsic ABI is exempt here as - // that's how we connect up to LLVM and it's unstable - // anyway, we control all calls to it in libstd. - Abi::Vector { .. } - if abi != SpecAbi::PlatformIntrinsic - && self.tcx.sess.target.simd_types_indirect => - { - arg.make_indirect(); - return; - } - - _ => return, - } - - let size = arg.layout.size; - if arg.layout.is_unsized() || size > Pointer.size(self) { - arg.make_indirect(); - } else { - // We want to pass small aggregates as immediates, but using - // a LLVM aggregate type for this leads to bad optimizations, - // so we pick an appropriately sized integer type instead. - arg.cast_to(Reg { kind: RegKind::Integer, size }); - } - }; - fixup(&mut fn_abi.ret); - for arg in &mut fn_abi.args { - fixup(arg); - } - } else { - fn_abi.adjust_for_foreign_abi(self, abi)?; - } - - Ok(()) - } -} - -fn make_thin_self_ptr<'tcx>( - cx: &(impl HasTyCtxt<'tcx> + HasParamEnv<'tcx>), - layout: TyAndLayout<'tcx>, -) -> TyAndLayout<'tcx> { - let tcx = cx.tcx(); - let fat_pointer_ty = if layout.is_unsized() { - // unsized `self` is passed as a pointer to `self` - // FIXME (mikeyhew) change this to use &own if it is ever added to the language - tcx.mk_mut_ptr(layout.ty) - } else { - match layout.abi { - Abi::ScalarPair(..) => (), - _ => bug!("receiver type has unsupported layout: {:?}", layout), - } - - // In the case of Rc<Self>, we need to explicitly pass a *mut RcBox<Self> - // with a Scalar (not ScalarPair) ABI. This is a hack that is understood - // elsewhere in the compiler as a method on a `dyn Trait`. - // To get the type `*mut RcBox<Self>`, we just keep unwrapping newtypes until we - // get a built-in pointer type - let mut fat_pointer_layout = layout; - 'descend_newtypes: while !fat_pointer_layout.ty.is_unsafe_ptr() - && !fat_pointer_layout.ty.is_region_ptr() - { - for i in 0..fat_pointer_layout.fields.count() { - let field_layout = fat_pointer_layout.field(cx, i); - - if !field_layout.is_zst() { - fat_pointer_layout = field_layout; - continue 'descend_newtypes; - } - } - - bug!("receiver has no non-zero-sized fields {:?}", fat_pointer_layout); - } - - fat_pointer_layout.ty - }; - - // we now have a type like `*mut RcBox<dyn Trait>` - // change its layout to that of `*mut ()`, a thin pointer, but keep the same type - // this is understood as a special case elsewhere in the compiler - let unit_ptr_ty = tcx.mk_mut_ptr(tcx.mk_unit()); - - TyAndLayout { - ty: fat_pointer_ty, - - // NOTE(eddyb) using an empty `ParamEnv`, and `unwrap`-ing the `Result` - // should always work because the type is always `*mut ()`. - ..tcx.layout_of(ty::ParamEnv::reveal_all().and(unit_ptr_ty)).unwrap() - } -} diff --git a/compiler/rustc_middle/src/ty/list.rs b/compiler/rustc_middle/src/ty/list.rs index db3b5cfd1..79365ef28 100644 --- a/compiler/rustc_middle/src/ty/list.rs +++ b/compiler/rustc_middle/src/ty/list.rs @@ -65,6 +65,10 @@ impl<T> List<T> { pub fn len(&self) -> usize { self.len } + + pub fn as_slice(&self) -> &[T] { + self + } } impl<T: Copy> List<T> { diff --git a/compiler/rustc_middle/src/ty/mod.rs b/compiler/rustc_middle/src/ty/mod.rs index 02da02568..a42d05706 100644 --- a/compiler/rustc_middle/src/ty/mod.rs +++ b/compiler/rustc_middle/src/ty/mod.rs @@ -15,8 +15,9 @@ pub use self::AssocItemContainer::*; pub use self::BorrowKind::*; pub use self::IntVarValue::*; pub use self::Variance::*; +use crate::error::{OpaqueHiddenTypeMismatch, TypeMismatchReason}; use crate::metadata::ModChild; -use crate::middle::privacy::AccessLevels; +use crate::middle::privacy::EffectiveVisibilities; use crate::mir::{Body, GeneratorLayout}; use crate::traits::{self, Reveal}; use crate::ty; @@ -25,6 +26,7 @@ use crate::ty::util::Discr; pub use adt::*; pub use assoc::*; pub use generics::*; +use hir::OpaqueTyOrigin; use rustc_ast as ast; use rustc_ast::node_id::NodeMap; use rustc_attr as attr; @@ -36,10 +38,13 @@ use rustc_data_structures::tagged_ptr::CopyTaggedPtr; use rustc_hir as hir; use rustc_hir::def::{CtorKind, CtorOf, DefKind, LifetimeRes, Res}; use rustc_hir::def_id::{CrateNum, DefId, LocalDefId, LocalDefIdMap}; +use rustc_hir::definitions::Definitions; use rustc_hir::Node; use rustc_index::vec::IndexVec; use rustc_macros::HashStable; use rustc_query_system::ich::StableHashingContext; +use rustc_serialize::{Decodable, Encodable}; +use rustc_session::cstore::CrateStoreDyn; use rustc_span::hygiene::MacroKind; use rustc_span::symbol::{kw, sym, Ident, Symbol}; use rustc_span::{ExpnId, Span}; @@ -49,10 +54,14 @@ pub use vtable::*; use std::fmt::Debug; use std::hash::{Hash, Hasher}; +use std::marker::PhantomData; +use std::mem; +use std::num::NonZeroUsize; use std::ops::ControlFlow; use std::{fmt, str}; pub use crate::ty::diagnostics::*; +pub use rustc_type_ir::DynKind::*; pub use rustc_type_ir::InferTy::*; pub use rustc_type_ir::RegionKind::*; pub use rustc_type_ir::TyKind::*; @@ -67,11 +76,11 @@ pub use self::closure::{ CAPTURE_STRUCT_LOCAL, }; pub use self::consts::{ - Const, ConstInt, ConstKind, ConstS, InferConst, ScalarInt, Unevaluated, ValTree, + Const, ConstInt, ConstKind, ConstS, InferConst, ScalarInt, UnevaluatedConst, ValTree, }; pub use self::context::{ tls, CanonicalUserType, CanonicalUserTypeAnnotation, CanonicalUserTypeAnnotations, - CtxtInterners, DelaySpanBugEmitted, FreeRegionInfo, GeneratorDiagnosticData, + CtxtInterners, DeducedParamAttrs, DelaySpanBugEmitted, FreeRegionInfo, GeneratorDiagnosticData, GeneratorInteriorTypeCause, GlobalCtxt, Lift, OnDiskCache, TyCtxt, TypeckResults, UserType, UserTypeAnnotationIndex, }; @@ -83,9 +92,9 @@ pub use self::sty::BoundRegionKind::*; pub use self::sty::{ Article, Binder, BoundRegion, BoundRegionKind, BoundTy, BoundTyKind, BoundVar, BoundVariableKind, CanonicalPolyFnSig, ClosureSubsts, ClosureSubstsParts, ConstVid, - EarlyBinder, EarlyBoundRegion, ExistentialPredicate, ExistentialProjection, - ExistentialTraitRef, FnSig, FreeRegion, GenSig, GeneratorSubsts, GeneratorSubstsParts, - InlineConstSubsts, InlineConstSubstsParts, ParamConst, ParamTy, PolyExistentialProjection, + EarlyBoundRegion, ExistentialPredicate, ExistentialProjection, ExistentialTraitRef, FnSig, + FreeRegion, GenSig, GeneratorSubsts, GeneratorSubstsParts, InlineConstSubsts, + InlineConstSubstsParts, ParamConst, ParamTy, PolyExistentialProjection, PolyExistentialTraitRef, PolyFnSig, PolyGenSig, PolyTraitRef, ProjectionTy, Region, RegionKind, RegionVid, TraitRef, TyKind, TypeAndMut, UpvarSubsts, VarianceDiagInfo, }; @@ -125,6 +134,7 @@ mod generics; mod impls_ty; mod instance; mod list; +mod opaque_types; mod parameterized; mod rvalue_scopes; mod structural_impls; @@ -134,8 +144,15 @@ mod sty; pub type RegisteredTools = FxHashSet<Ident>; -#[derive(Debug)] pub struct ResolverOutputs { + pub definitions: Definitions, + pub global_ctxt: ResolverGlobalCtxt, + pub ast_lowering: ResolverAstLowering, +} + +#[derive(Debug)] +pub struct ResolverGlobalCtxt { + pub cstore: Box<CrateStoreDyn>, pub visibilities: FxHashMap<LocalDefId, Visibility>, /// This field is used to decide whether we should make `PRIVATE_IN_PUBLIC` a hard error. pub has_pub_restricted: bool, @@ -143,7 +160,7 @@ pub struct ResolverOutputs { pub expn_that_defined: FxHashMap<LocalDefId, ExpnId>, /// Reference span for definitions. pub source_span: IndexVec<LocalDefId, Span>, - pub access_levels: AccessLevels, + pub effective_visibilities: EffectiveVisibilities, pub extern_crate_map: FxHashMap<LocalDefId, CrateNum>, pub maybe_unused_trait_imports: FxIndexSet<LocalDefId>, pub maybe_unused_extern_crates: Vec<(LocalDefId, Span)>, @@ -177,11 +194,6 @@ pub struct ResolverAstLowering { pub label_res_map: NodeMap<ast::NodeId>, /// Resolutions for lifetimes. pub lifetimes_res_map: NodeMap<LifetimeRes>, - /// Mapping from generics `def_id`s to TAIT generics `def_id`s. - /// For each captured lifetime (e.g., 'a), we create a new lifetime parameter that is a generic - /// defined on the TAIT, so we have type Foo<'a1> = ... and we establish a mapping in this - /// field from the original parameter 'a to the new parameter 'a1. - pub generics_def_id_map: Vec<FxHashMap<LocalDefId, LocalDefId>>, /// Lifetime parameters that lowering will have to introduce. pub extra_lifetime_params_map: NodeMap<Vec<(Ident, ast::NodeId, LifetimeRes)>>, @@ -262,13 +274,11 @@ impl fmt::Display for ImplPolarity { } #[derive(Clone, Debug, PartialEq, Eq, Copy, Hash, Encodable, Decodable, HashStable)] -pub enum Visibility { +pub enum Visibility<Id = LocalDefId> { /// Visible everywhere (including in other crates). Public, /// Visible only in the given crate-local module. - Restricted(DefId), - /// Not visible anywhere in the local crate. This is the visibility of private external items. - Invisible, + Restricted(Id), } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, HashStable, TyEncodable, TyDecodable)] @@ -359,31 +369,45 @@ impl<'tcx> DefIdTree for TyCtxt<'tcx> { } } -impl Visibility { - /// Returns `true` if an item with this visibility is accessible from the given block. - pub fn is_accessible_from<T: DefIdTree>(self, module: DefId, tree: T) -> bool { - let restriction = match self { - // Public items are visible everywhere. - Visibility::Public => return true, - // Private items from other crates are visible nowhere. - Visibility::Invisible => return false, - // Restricted items are visible in an arbitrary local module. - Visibility::Restricted(other) if other.krate != module.krate => return false, - Visibility::Restricted(module) => module, - }; +impl<Id> Visibility<Id> { + pub fn is_public(self) -> bool { + matches!(self, Visibility::Public) + } - tree.is_descendant_of(module, restriction) + pub fn map_id<OutId>(self, f: impl FnOnce(Id) -> OutId) -> Visibility<OutId> { + match self { + Visibility::Public => Visibility::Public, + Visibility::Restricted(id) => Visibility::Restricted(f(id)), + } + } +} + +impl<Id: Into<DefId>> Visibility<Id> { + pub fn to_def_id(self) -> Visibility<DefId> { + self.map_id(Into::into) + } + + /// Returns `true` if an item with this visibility is accessible from the given module. + pub fn is_accessible_from(self, module: impl Into<DefId>, tree: impl DefIdTree) -> bool { + match self { + // Public items are visible everywhere. + Visibility::Public => true, + Visibility::Restricted(id) => tree.is_descendant_of(module.into(), id.into()), + } } /// Returns `true` if this visibility is at least as accessible as the given visibility - pub fn is_at_least<T: DefIdTree>(self, vis: Visibility, tree: T) -> bool { - let vis_restriction = match vis { - Visibility::Public => return self == Visibility::Public, - Visibility::Invisible => return true, - Visibility::Restricted(module) => module, - }; + pub fn is_at_least(self, vis: Visibility<impl Into<DefId>>, tree: impl DefIdTree) -> bool { + match vis { + Visibility::Public => self.is_public(), + Visibility::Restricted(id) => self.is_accessible_from(id, tree), + } + } +} - self.is_accessible_from(vis_restriction, tree) +impl Visibility<DefId> { + pub fn expect_local(self) -> Visibility { + self.map_id(|id| id.expect_local()) } // Returns `true` if this item is visible anywhere in the local crate. @@ -391,13 +415,8 @@ impl Visibility { match self { Visibility::Public => true, Visibility::Restricted(def_id) => def_id.is_local(), - Visibility::Invisible => false, } } - - pub fn is_public(self) -> bool { - matches!(self, Visibility::Public) - } } /// The crate variances map is computed during typeck and contains the @@ -468,15 +487,6 @@ pub(crate) struct TyS<'tcx> { outer_exclusive_binder: ty::DebruijnIndex, } -// `TyS` is used a lot. Make sure it doesn't unintentionally get bigger. -#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] -static_assert_size!(TyS<'_>, 40); - -// We are actually storing a stable hash cache next to the type, so let's -// also check the full size -#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] -static_assert_size!(WithStableHash<TyS<'_>>, 56); - /// Use this rather than `TyS`, whenever possible. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, HashStable)] #[rustc_diagnostic_item = "Ty"] @@ -533,10 +543,6 @@ pub(crate) struct PredicateS<'tcx> { outer_exclusive_binder: ty::DebruijnIndex, } -// This type is used a lot. Make sure it doesn't unintentionally get bigger. -#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] -static_assert_size!(PredicateS<'_>, 56); - /// Use this rather than `PredicateS`, whenever possible. #[derive(Clone, Copy, PartialEq, Eq, Hash)] #[rustc_pass_by_value] @@ -593,6 +599,29 @@ impl<'tcx> Predicate<'tcx> { } self } + + /// Whether this projection can be soundly normalized. + /// + /// Wf predicates must not be normalized, as normalization + /// can remove required bounds which would cause us to + /// unsoundly accept some programs. See #91068. + #[inline] + pub fn allow_normalization(self) -> bool { + match self.kind().skip_binder() { + PredicateKind::WellFormed(_) => false, + PredicateKind::Trait(_) + | PredicateKind::RegionOutlives(_) + | PredicateKind::TypeOutlives(_) + | PredicateKind::Projection(_) + | PredicateKind::ObjectSafe(_) + | PredicateKind::ClosureKind(_, _, _) + | PredicateKind::Subtype(_) + | PredicateKind::Coerce(_) + | PredicateKind::ConstEvaluatable(_) + | PredicateKind::ConstEquate(_, _) + | PredicateKind::TypeWellFormedFromEnv(_) => true, + } + } } impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Predicate<'tcx> { @@ -617,7 +646,7 @@ impl rustc_errors::IntoDiagnosticArg for Predicate<'_> { } #[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] pub enum PredicateKind<'tcx> { /// Corresponds to `where Foo: Bar<A, B, C>`. `Foo` here would be /// the `Self` type of the trait reference and `A`, `B`, and `C` @@ -663,7 +692,7 @@ pub enum PredicateKind<'tcx> { Coerce(CoercePredicate<'tcx>), /// Constant initializer must evaluate successfully. - ConstEvaluatable(ty::Unevaluated<'tcx, ()>), + ConstEvaluatable(ty::Const<'tcx>), /// Constants must be equal. The first component is the const that is expected. ConstEquate(Const<'tcx>, Const<'tcx>), @@ -789,7 +818,7 @@ impl<'tcx> Predicate<'tcx> { } #[derive(Clone, Copy, PartialEq, Eq, Hash, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct TraitPredicate<'tcx> { pub trait_ref: TraitRef<'tcx>, @@ -842,6 +871,11 @@ impl<'tcx> TraitPredicate<'tcx> { (BoundConstness::ConstIfConst, hir::Constness::NotConst) => false, } } + + pub fn without_const(mut self) -> Self { + self.constness = BoundConstness::NotConst; + self + } } impl<'tcx> PolyTraitPredicate<'tcx> { @@ -869,7 +903,7 @@ impl<'tcx> PolyTraitPredicate<'tcx> { } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct OutlivesPredicate<A, B>(pub A, pub B); // `A: B` pub type RegionOutlivesPredicate<'tcx> = OutlivesPredicate<ty::Region<'tcx>, ty::Region<'tcx>>; pub type TypeOutlivesPredicate<'tcx> = OutlivesPredicate<Ty<'tcx>, ty::Region<'tcx>>; @@ -880,7 +914,7 @@ pub type PolyTypeOutlivesPredicate<'tcx> = ty::Binder<'tcx, TypeOutlivesPredicat /// whether the `a` type is the type that we should label as "expected" when /// presenting user diagnostics. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct SubtypePredicate<'tcx> { pub a_is_expected: bool, pub a: Ty<'tcx>, @@ -890,49 +924,142 @@ pub type PolySubtypePredicate<'tcx> = ty::Binder<'tcx, SubtypePredicate<'tcx>>; /// Encodes that we have to coerce *from* the `a` type to the `b` type. #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct CoercePredicate<'tcx> { pub a: Ty<'tcx>, pub b: Ty<'tcx>, } pub type PolyCoercePredicate<'tcx> = ty::Binder<'tcx, CoercePredicate<'tcx>>; -#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] -pub enum Term<'tcx> { - Ty(Ty<'tcx>), - Const(Const<'tcx>), +#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub struct Term<'tcx> { + ptr: NonZeroUsize, + marker: PhantomData<(Ty<'tcx>, Const<'tcx>)>, +} + +impl Debug for Term<'_> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let data = if let Some(ty) = self.ty() { + format!("Term::Ty({:?})", ty) + } else if let Some(ct) = self.ct() { + format!("Term::Ct({:?})", ct) + } else { + unreachable!() + }; + f.write_str(&data) + } } impl<'tcx> From<Ty<'tcx>> for Term<'tcx> { fn from(ty: Ty<'tcx>) -> Self { - Term::Ty(ty) + TermKind::Ty(ty).pack() } } impl<'tcx> From<Const<'tcx>> for Term<'tcx> { fn from(c: Const<'tcx>) -> Self { - Term::Const(c) + TermKind::Const(c).pack() + } +} + +impl<'a, 'tcx> HashStable<StableHashingContext<'a>> for Term<'tcx> { + fn hash_stable(&self, hcx: &mut StableHashingContext<'a>, hasher: &mut StableHasher) { + self.unpack().hash_stable(hcx, hasher); + } +} + +impl<'tcx> TypeFoldable<'tcx> for Term<'tcx> { + fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { + Ok(self.unpack().try_fold_with(folder)?.pack()) + } +} + +impl<'tcx> TypeVisitable<'tcx> for Term<'tcx> { + fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { + self.unpack().visit_with(visitor) + } +} + +impl<'tcx, E: TyEncoder<I = TyCtxt<'tcx>>> Encodable<E> for Term<'tcx> { + fn encode(&self, e: &mut E) { + self.unpack().encode(e) + } +} + +impl<'tcx, D: TyDecoder<I = TyCtxt<'tcx>>> Decodable<D> for Term<'tcx> { + fn decode(d: &mut D) -> Self { + let res: TermKind<'tcx> = Decodable::decode(d); + res.pack() } } impl<'tcx> Term<'tcx> { + #[inline] + pub fn unpack(self) -> TermKind<'tcx> { + let ptr = self.ptr.get(); + // SAFETY: use of `Interned::new_unchecked` here is ok because these + // pointers were originally created from `Interned` types in `pack()`, + // and this is just going in the other direction. + unsafe { + match ptr & TAG_MASK { + TYPE_TAG => TermKind::Ty(Ty(Interned::new_unchecked( + &*((ptr & !TAG_MASK) as *const WithStableHash<ty::TyS<'tcx>>), + ))), + CONST_TAG => TermKind::Const(ty::Const(Interned::new_unchecked( + &*((ptr & !TAG_MASK) as *const ty::ConstS<'tcx>), + ))), + _ => core::intrinsics::unreachable(), + } + } + } + pub fn ty(&self) -> Option<Ty<'tcx>> { - if let Term::Ty(ty) = self { Some(*ty) } else { None } + if let TermKind::Ty(ty) = self.unpack() { Some(ty) } else { None } } pub fn ct(&self) -> Option<Const<'tcx>> { - if let Term::Const(c) = self { Some(*c) } else { None } + if let TermKind::Const(c) = self.unpack() { Some(c) } else { None } } pub fn into_arg(self) -> GenericArg<'tcx> { - match self { - Term::Ty(ty) => ty.into(), - Term::Const(c) => c.into(), + match self.unpack() { + TermKind::Ty(ty) => ty.into(), + TermKind::Const(c) => c.into(), } } } +const TAG_MASK: usize = 0b11; +const TYPE_TAG: usize = 0b00; +const CONST_TAG: usize = 0b01; + +#[derive(Debug, Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord, TyEncodable, TyDecodable)] +#[derive(HashStable, TypeFoldable, TypeVisitable)] +pub enum TermKind<'tcx> { + Ty(Ty<'tcx>), + Const(Const<'tcx>), +} + +impl<'tcx> TermKind<'tcx> { + #[inline] + fn pack(self) -> Term<'tcx> { + let (tag, ptr) = match self { + TermKind::Ty(ty) => { + // Ensure we can use the tag bits. + assert_eq!(mem::align_of_val(&*ty.0.0) & TAG_MASK, 0); + (TYPE_TAG, ty.0.0 as *const WithStableHash<ty::TyS<'tcx>> as usize) + } + TermKind::Const(ct) => { + // Ensure we can use the tag bits. + assert_eq!(mem::align_of_val(&*ct.0.0) & TAG_MASK, 0); + (CONST_TAG, ct.0.0 as *const ty::ConstS<'tcx> as usize) + } + }; + + Term { ptr: unsafe { NonZeroUsize::new_unchecked(ptr | tag) }, marker: PhantomData } + } +} + /// This kind of predicate has no *direct* correspondent in the /// syntax, but it roughly corresponds to the syntactic forms: /// @@ -946,7 +1073,7 @@ impl<'tcx> Term<'tcx> { /// Form #2 eventually yields one of these `ProjectionPredicate` /// instances to normalize the LHS. #[derive(Copy, Clone, PartialEq, Eq, Hash, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct ProjectionPredicate<'tcx> { pub projection_ty: ProjectionTy<'tcx>, pub term: Term<'tcx>, @@ -1002,6 +1129,12 @@ pub trait ToPredicate<'tcx> { fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx>; } +impl<'tcx> ToPredicate<'tcx> for Predicate<'tcx> { + fn to_predicate(self, _tcx: TyCtxt<'tcx>) -> Predicate<'tcx> { + self + } +} + impl<'tcx> ToPredicate<'tcx> for Binder<'tcx, PredicateKind<'tcx>> { #[inline(always)] fn to_predicate(self, tcx: TyCtxt<'tcx>) -> Predicate<'tcx> { @@ -1166,20 +1299,117 @@ pub struct OpaqueHiddenType<'tcx> { impl<'tcx> OpaqueHiddenType<'tcx> { pub fn report_mismatch(&self, other: &Self, tcx: TyCtxt<'tcx>) { // Found different concrete types for the opaque type. - let mut err = tcx.sess.struct_span_err( - other.span, - "concrete type differs from previous defining opaque type use", - ); - err.span_label(other.span, format!("expected `{}`, got `{}`", self.ty, other.ty)); - if self.span == other.span { - err.span_label( - self.span, - "this expression supplies two conflicting concrete types for the same opaque type", - ); + let sub_diag = if self.span == other.span { + TypeMismatchReason::ConflictType { span: self.span } } else { - err.span_note(self.span, "previous use here"); - } - err.emit(); + TypeMismatchReason::PreviousUse { span: self.span } + }; + tcx.sess.emit_err(OpaqueHiddenTypeMismatch { + self_ty: self.ty, + other_ty: other.ty, + other_span: other.span, + sub: sub_diag, + }); + } + + #[instrument(level = "debug", skip(tcx), ret)] + pub fn remap_generic_params_to_declaration_params( + self, + opaque_type_key: OpaqueTypeKey<'tcx>, + tcx: TyCtxt<'tcx>, + // typeck errors have subpar spans for opaque types, so delay error reporting until borrowck. + ignore_errors: bool, + origin: OpaqueTyOrigin, + ) -> Self { + let OpaqueTypeKey { def_id, substs } = opaque_type_key; + + // Use substs to build up a reverse map from regions to their + // identity mappings. This is necessary because of `impl + // Trait` lifetimes are computed by replacing existing + // lifetimes with 'static and remapping only those used in the + // `impl Trait` return type, resulting in the parameters + // shifting. + let id_substs = InternalSubsts::identity_for_item(tcx, def_id.to_def_id()); + debug!(?id_substs); + + let map = substs.iter().zip(id_substs); + + let map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>> = match origin { + // HACK: The HIR lowering for async fn does not generate + // any `+ Captures<'x>` bounds for the `impl Future<...>`, so all async fns with lifetimes + // would now fail to compile. We should probably just make hir lowering fill this in properly. + OpaqueTyOrigin::AsyncFn(_) => map.collect(), + OpaqueTyOrigin::FnReturn(_) | OpaqueTyOrigin::TyAlias => { + // Opaque types may only use regions that are bound. So for + // ```rust + // type Foo<'a, 'b, 'c> = impl Trait<'a> + 'b; + // ``` + // we may not use `'c` in the hidden type. + struct OpaqueTypeLifetimeCollector<'tcx> { + lifetimes: FxHashSet<ty::Region<'tcx>>, + } + + impl<'tcx> ty::TypeVisitor<'tcx> for OpaqueTypeLifetimeCollector<'tcx> { + fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> { + self.lifetimes.insert(r); + r.super_visit_with(self) + } + } + + let mut collector = OpaqueTypeLifetimeCollector { lifetimes: Default::default() }; + + for pred in tcx.bound_explicit_item_bounds(def_id.to_def_id()).transpose_iter() { + let pred = pred.map_bound(|(pred, _)| *pred).subst(tcx, id_substs); + + trace!(pred=?pred.kind()); + + // We only ignore opaque type substs if the opaque type is the outermost type. + // The opaque type may be nested within itself via recursion in e.g. + // type Foo<'a> = impl PartialEq<Foo<'a>>; + // which thus mentions `'a` and should thus accept hidden types that borrow 'a + // instead of requiring an additional `+ 'a`. + match pred.kind().skip_binder() { + ty::PredicateKind::Trait(TraitPredicate { + trait_ref: ty::TraitRef { def_id: _, substs }, + constness: _, + polarity: _, + }) => { + trace!(?substs); + for subst in &substs[1..] { + subst.visit_with(&mut collector); + } + } + ty::PredicateKind::Projection(ty::ProjectionPredicate { + projection_ty: ty::ProjectionTy { substs, item_def_id: _ }, + term, + }) => { + for subst in &substs[1..] { + subst.visit_with(&mut collector); + } + term.visit_with(&mut collector); + } + _ => { + pred.visit_with(&mut collector); + } + } + } + let lifetimes = collector.lifetimes; + trace!(?lifetimes); + map.filter(|(_, v)| { + let ty::GenericArgKind::Lifetime(lt) = v.unpack() else { + return true; + }; + lifetimes.contains(<) + }) + .collect() + } + }; + debug!("map = {:#?}", map); + + // Convert the type from the function into a type valid outside + // the function, by replacing invalid regions with 'static, + // after producing an error for each of them. + self.fold_with(&mut opaque_types::ReverseMapper::new(tcx, map, self.span, ignore_errors)) } } @@ -1411,7 +1641,7 @@ impl<'tcx> TypeFoldable<'tcx> for ParamEnv<'tcx> { Ok(ParamEnv::new( self.caller_bounds().try_fold_with(folder)?, self.reveal().try_fold_with(folder)?, - self.constness().try_fold_with(folder)?, + self.constness(), )) } } @@ -1419,8 +1649,7 @@ impl<'tcx> TypeFoldable<'tcx> for ParamEnv<'tcx> { impl<'tcx> TypeVisitable<'tcx> for ParamEnv<'tcx> { fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { self.caller_bounds().visit_with(visitor)?; - self.reveal().visit_with(visitor)?; - self.constness().visit_with(visitor) + self.reveal().visit_with(visitor) } } @@ -1577,7 +1806,7 @@ impl<'tcx> PolyTraitRef<'tcx> { } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash, TypeFoldable, TypeVisitable)] -#[derive(HashStable)] +#[derive(HashStable, Lift)] pub struct ParamEnvAnd<'tcx, T> { pub param_env: ParamEnv<'tcx>, pub value: T, @@ -1779,7 +2008,7 @@ pub enum VariantDiscr { pub struct FieldDef { pub did: DefId, pub name: Symbol, - pub vis: Visibility, + pub vis: Visibility<DefId>, } impl PartialEq for FieldDef { @@ -2256,7 +2485,11 @@ impl<'tcx> TyCtxt<'tcx> { } pub fn get_attr(self, did: DefId, attr: Symbol) -> Option<&'tcx ast::Attribute> { - self.get_attrs(did, attr).next() + if cfg!(debug_assertions) && !rustc_feature::is_valid_for_get_attr(attr) { + bug!("get_attr: unexpected called with DefId `{:?}`, attr `{:?}`", did, attr); + } else { + self.get_attrs(did, attr).next() + } } /// Determines whether an item is annotated with an attribute. @@ -2358,6 +2591,25 @@ impl<'tcx> TyCtxt<'tcx> { (ident, scope) } + /// Returns `true` if the debuginfo for `span` should be collapsed to the outermost expansion + /// site. Only applies when `Span` is the result of macro expansion. + /// + /// - If the `collapse_debuginfo` feature is enabled then debuginfo is not collapsed by default + /// and only when a macro definition is annotated with `#[collapse_debuginfo]`. + /// - If `collapse_debuginfo` is not enabled, then debuginfo is collapsed by default. + /// + /// When `-Zdebug-macros` is provided then debuginfo will never be collapsed. + pub fn should_collapse_debuginfo(self, span: Span) -> bool { + !self.sess.opts.unstable_opts.debug_macros + && if self.features().collapse_debuginfo { + span.in_macro_expansion_with_collapse_debuginfo() + } else { + // Inlined spans should not be collapsed as that leads to all of the + // inlined code being attributed to the inline callsite. + span.from_expansion() && !span.is_inlined() + } + } + pub fn is_object_safe(self, key: DefId) -> bool { self.object_safety_violations(key).is_empty() } @@ -2372,6 +2624,14 @@ impl<'tcx> TyCtxt<'tcx> { pub fn is_const_default_method(self, def_id: DefId) -> bool { matches!(self.trait_of_item(def_id), Some(trait_id) if self.has_attr(trait_id, sym::const_trait)) } + + pub fn impl_trait_in_trait_parent(self, mut def_id: DefId) -> DefId { + while let def_kind = self.def_kind(def_id) && def_kind != DefKind::AssocFn { + debug_assert_eq!(def_kind, DefKind::ImplTraitPlaceholder); + def_id = self.parent(def_id); + } + def_id + } } /// Yields the parent function's `LocalDefId` if `def_id` is an `impl Trait` definition. @@ -2445,7 +2705,7 @@ pub fn provide(providers: &mut ty::query::Providers) { closure::provide(providers); context::provide(providers); erase_regions::provide(providers); - layout::provide(providers); + inhabitedness::provide(providers); util::provide(providers); print::provide(providers); super::util::bug::provide(providers); @@ -2453,7 +2713,6 @@ pub fn provide(providers: &mut ty::query::Providers) { *providers = ty::query::Providers { trait_impls_of: trait_def::trait_impls_of_provider, incoherent_impls: trait_def::incoherent_impls_provider, - type_uninhabited_from: inhabitedness::type_uninhabited_from, const_param_default: consts::const_param_default, vtable_allocation: vtable::vtable_allocation_provider, ..*providers @@ -2516,3 +2775,15 @@ pub struct DestructuredConst<'tcx> { pub variant: Option<VariantIdx>, pub fields: &'tcx [ty::Const<'tcx>], } + +// Some types are used a lot. Make sure they don't unintentionally get bigger. +#[cfg(all(target_arch = "x86_64", target_pointer_width = "64"))] +mod size_asserts { + use super::*; + use rustc_data_structures::static_assert_size; + // tidy-alphabetical-start + static_assert_size!(PredicateS<'_>, 48); + static_assert_size!(TyS<'_>, 40); + static_assert_size!(WithStableHash<TyS<'_>>, 56); + // tidy-alphabetical-end +} diff --git a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs index 9d8a81165..ee13920d5 100644 --- a/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs +++ b/compiler/rustc_middle/src/ty/normalize_erasing_regions.rs @@ -10,8 +10,7 @@ use crate::mir; use crate::traits::query::NoSolution; use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder}; -use crate::ty::subst::{Subst, SubstsRef}; -use crate::ty::{self, EarlyBinder, Ty, TyCtxt}; +use crate::ty::{self, EarlyBinder, SubstsRef, Ty, TyCtxt}; #[derive(Debug, Copy, Clone, HashStable, TyEncodable, TyDecodable)] pub enum NormalizationError<'tcx> { @@ -36,6 +35,7 @@ impl<'tcx> TyCtxt<'tcx> { /// /// This should only be used outside of type inference. For example, /// it assumes that normalization will succeed. + #[tracing::instrument(level = "debug", skip(self, param_env))] pub fn normalize_erasing_regions<T>(self, param_env: ty::ParamEnv<'tcx>, value: T) -> T where T: TypeFoldable<'tcx>, @@ -100,6 +100,7 @@ impl<'tcx> TyCtxt<'tcx> { /// N.B., currently, higher-ranked type bounds inhibit /// normalization. Therefore, each time we erase them in /// codegen, we need to normalize the contents. + #[tracing::instrument(level = "debug", skip(self, param_env))] pub fn normalize_erasing_late_bound_regions<T>( self, param_env: ty::ParamEnv<'tcx>, @@ -188,13 +189,11 @@ struct NormalizeAfterErasingRegionsFolder<'tcx> { } impl<'tcx> NormalizeAfterErasingRegionsFolder<'tcx> { - #[instrument(skip(self), level = "debug")] fn normalize_generic_arg_after_erasing_regions( &self, arg: ty::GenericArg<'tcx>, ) -> ty::GenericArg<'tcx> { let arg = self.param_env.and(arg); - debug!(?arg); self.tcx.try_normalize_generic_arg_after_erasing_regions(arg).unwrap_or_else(|_| bug!( "Failed to normalize {:?}, maybe try to call `try_normalize_erasing_regions` instead", @@ -215,15 +214,6 @@ impl<'tcx> TypeFolder<'tcx> for NormalizeAfterErasingRegionsFolder<'tcx> { fn fold_const(&mut self, c: ty::Const<'tcx>) -> ty::Const<'tcx> { self.normalize_generic_arg_after_erasing_regions(c.into()).expect_const() } - - #[inline] - fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> { - // FIXME: This *probably* needs canonicalization too! - let arg = self.param_env.and(c); - self.tcx - .try_normalize_mir_const_after_erasing_regions(arg) - .unwrap_or_else(|_| bug!("failed to normalize {:?}", c)) - } } struct TryNormalizeAfterErasingRegionsFolder<'tcx> { @@ -268,16 +258,4 @@ impl<'tcx> FallibleTypeFolder<'tcx> for TryNormalizeAfterErasingRegionsFolder<'t Err(_) => Err(NormalizationError::Const(c)), } } - - fn try_fold_mir_const( - &mut self, - c: mir::ConstantKind<'tcx>, - ) -> Result<mir::ConstantKind<'tcx>, Self::Error> { - // FIXME: This *probably* needs canonicalization too! - let arg = self.param_env.and(c); - match self.tcx.try_normalize_mir_const_after_erasing_regions(arg) { - Ok(c) => Ok(c), - Err(_) => Err(NormalizationError::ConstantKind(c)), - } - } } diff --git a/compiler/rustc_middle/src/ty/opaque_types.rs b/compiler/rustc_middle/src/ty/opaque_types.rs new file mode 100644 index 000000000..b05c63109 --- /dev/null +++ b/compiler/rustc_middle/src/ty/opaque_types.rs @@ -0,0 +1,218 @@ +use rustc_data_structures::fx::FxHashMap; +use rustc_middle::ty::fold::{TypeFolder, TypeSuperFoldable}; +use rustc_middle::ty::subst::{GenericArg, GenericArgKind}; +use rustc_middle::ty::{self, Ty, TyCtxt, TypeFoldable}; +use rustc_span::Span; + +/// Converts generic params of a TypeFoldable from one +/// item's generics to another. Usually from a function's generics +/// list to the opaque type's own generics. +pub(super) struct ReverseMapper<'tcx> { + tcx: TyCtxt<'tcx>, + map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>>, + /// see call sites to fold_kind_no_missing_regions_error + /// for an explanation of this field. + do_not_error: bool, + + /// We do not want to emit any errors in typeck because + /// the spans in typeck are subpar at the moment. + /// Borrowck will do the same work again (this time with + /// lifetime information) and thus report better errors. + ignore_errors: bool, + + /// Span of function being checked. + span: Span, +} + +impl<'tcx> ReverseMapper<'tcx> { + pub(super) fn new( + tcx: TyCtxt<'tcx>, + map: FxHashMap<GenericArg<'tcx>, GenericArg<'tcx>>, + span: Span, + ignore_errors: bool, + ) -> Self { + Self { tcx, map, do_not_error: false, ignore_errors, span } + } + + fn fold_kind_no_missing_regions_error(&mut self, kind: GenericArg<'tcx>) -> GenericArg<'tcx> { + assert!(!self.do_not_error); + self.do_not_error = true; + let kind = kind.fold_with(self); + self.do_not_error = false; + kind + } + + fn fold_kind_normally(&mut self, kind: GenericArg<'tcx>) -> GenericArg<'tcx> { + assert!(!self.do_not_error); + kind.fold_with(self) + } +} + +impl<'tcx> TypeFolder<'tcx> for ReverseMapper<'tcx> { + fn tcx(&self) -> TyCtxt<'tcx> { + self.tcx + } + + #[instrument(skip(self), level = "debug")] + fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { + match *r { + // Ignore bound regions and `'static` regions that appear in the + // type, we only need to remap regions that reference lifetimes + // from the function declaration. + // This would ignore `'r` in a type like `for<'r> fn(&'r u32)`. + ty::ReLateBound(..) | ty::ReStatic => return r, + + // If regions have been erased (by writeback), don't try to unerase + // them. + ty::ReErased => return r, + + // The regions that we expect from borrow checking. + ty::ReEarlyBound(_) | ty::ReFree(_) => {} + + ty::RePlaceholder(_) | ty::ReVar(_) => { + // All of the regions in the type should either have been + // erased by writeback, or mapped back to named regions by + // borrow checking. + bug!("unexpected region kind in opaque type: {:?}", r); + } + } + + match self.map.get(&r.into()).map(|k| k.unpack()) { + Some(GenericArgKind::Lifetime(r1)) => r1, + Some(u) => panic!("region mapped to unexpected kind: {:?}", u), + None if self.do_not_error => self.tcx.lifetimes.re_static, + None => { + self.tcx + .sess + .struct_span_err(self.span, "non-defining opaque type use in defining scope") + .span_label( + self.span, + format!( + "lifetime `{}` is part of concrete type but not used in \ + parameter list of the `impl Trait` type alias", + r + ), + ) + .emit(); + + self.tcx().lifetimes.re_static + } + } + } + + fn fold_ty(&mut self, ty: Ty<'tcx>) -> Ty<'tcx> { + match *ty.kind() { + ty::Closure(def_id, substs) => { + // I am a horrible monster and I pray for death. When + // we encounter a closure here, it is always a closure + // from within the function that we are currently + // type-checking -- one that is now being encapsulated + // in an opaque type. Ideally, we would + // go through the types/lifetimes that it references + // and treat them just like we would any other type, + // which means we would error out if we find any + // reference to a type/region that is not in the + // "reverse map". + // + // **However,** in the case of closures, there is a + // somewhat subtle (read: hacky) consideration. The + // problem is that our closure types currently include + // all the lifetime parameters declared on the + // enclosing function, even if they are unused by the + // closure itself. We can't readily filter them out, + // so here we replace those values with `'empty`. This + // can't really make a difference to the rest of the + // compiler; those regions are ignored for the + // outlives relation, and hence don't affect trait + // selection or auto traits, and they are erased + // during codegen. + + let generics = self.tcx.generics_of(def_id); + let substs = self.tcx.mk_substs(substs.iter().enumerate().map(|(index, kind)| { + if index < generics.parent_count { + // Accommodate missing regions in the parent kinds... + self.fold_kind_no_missing_regions_error(kind) + } else { + // ...but not elsewhere. + self.fold_kind_normally(kind) + } + })); + + self.tcx.mk_closure(def_id, substs) + } + + ty::Generator(def_id, substs, movability) => { + let generics = self.tcx.generics_of(def_id); + let substs = self.tcx.mk_substs(substs.iter().enumerate().map(|(index, kind)| { + if index < generics.parent_count { + // Accommodate missing regions in the parent kinds... + self.fold_kind_no_missing_regions_error(kind) + } else { + // ...but not elsewhere. + self.fold_kind_normally(kind) + } + })); + + self.tcx.mk_generator(def_id, substs, movability) + } + + ty::Param(param) => { + // Look it up in the substitution list. + match self.map.get(&ty.into()).map(|k| k.unpack()) { + // Found it in the substitution list; replace with the parameter from the + // opaque type. + Some(GenericArgKind::Type(t1)) => t1, + Some(u) => panic!("type mapped to unexpected kind: {:?}", u), + None => { + debug!(?param, ?self.map); + if !self.ignore_errors { + self.tcx + .sess + .struct_span_err( + self.span, + &format!( + "type parameter `{}` is part of concrete type but not \ + used in parameter list for the `impl Trait` type alias", + ty + ), + ) + .emit(); + } + + self.tcx().ty_error() + } + } + } + + _ => ty.super_fold_with(self), + } + } + + fn fold_const(&mut self, ct: ty::Const<'tcx>) -> ty::Const<'tcx> { + trace!("checking const {:?}", ct); + // Find a const parameter + match ct.kind() { + ty::ConstKind::Param(..) => { + // Look it up in the substitution list. + match self.map.get(&ct.into()).map(|k| k.unpack()) { + // Found it in the substitution list, replace with the parameter from the + // opaque type. + Some(GenericArgKind::Const(c1)) => c1, + Some(u) => panic!("const mapped to unexpected kind: {:?}", u), + None => { + if !self.ignore_errors { + self.tcx.sess.emit_err(ty::ConstNotUsedTraitAlias { + ct: ct.to_string(), + span: self.span, + }); + } + + self.tcx().const_error(ct.ty()) + } + } + } + + _ => ct, + } + } +} diff --git a/compiler/rustc_middle/src/ty/parameterized.rs b/compiler/rustc_middle/src/ty/parameterized.rs index e189ee2fc..e1e705a92 100644 --- a/compiler/rustc_middle/src/ty/parameterized.rs +++ b/compiler/rustc_middle/src/ty/parameterized.rs @@ -1,4 +1,5 @@ -use rustc_hir::def_id::DefId; +use rustc_data_structures::fx::FxHashMap; +use rustc_hir::def_id::{DefId, DefIndex}; use rustc_index::vec::{Idx, IndexVec}; use crate::middle::exported_symbols::ExportedSymbol; @@ -29,6 +30,10 @@ impl<I: Idx + 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for IndexVe type Value<'tcx> = IndexVec<I, T::Value<'tcx>>; } +impl<I: 'static, T: ParameterizedOverTcx> ParameterizedOverTcx for FxHashMap<I, T> { + type Value<'tcx> = FxHashMap<I, T::Value<'tcx>>; +} + impl<T: ParameterizedOverTcx> ParameterizedOverTcx for ty::Binder<'static, T> { type Value<'tcx> = ty::Binder<'tcx, T::Value<'tcx>>; } @@ -53,17 +58,21 @@ trivially_parameterized_over_tcx! { crate::metadata::ModChild, crate::middle::codegen_fn_attrs::CodegenFnAttrs, crate::middle::exported_symbols::SymbolExportInfo, + crate::middle::resolve_lifetime::ObjectLifetimeDefault, crate::mir::ConstQualifs, + ty::AssocItemContainer, + ty::DeducedParamAttrs, ty::Generics, ty::ImplPolarity, ty::ReprOptions, ty::TraitDef, - ty::Visibility, + ty::Visibility<DefIndex>, ty::adjustment::CoerceUnsizedInfo, ty::fast_reject::SimplifiedTypeGen<DefId>, rustc_ast::Attribute, rustc_ast::MacArgs, rustc_attr::ConstStability, + rustc_attr::DefaultBodyStability, rustc_attr::Deprecation, rustc_attr::Stability, rustc_hir::Constness, @@ -74,6 +83,7 @@ trivially_parameterized_over_tcx! { rustc_hir::def::DefKind, rustc_hir::def_id::DefIndex, rustc_hir::definitions::DefKey, + rustc_index::bit_set::BitSet<u32>, rustc_index::bit_set::FiniteBitSet<u32>, rustc_session::cstore::ForeignModule, rustc_session::cstore::LinkagePreference, diff --git a/compiler/rustc_middle/src/ty/print/mod.rs b/compiler/rustc_middle/src/ty/print/mod.rs index d57cf8f01..44b9548db 100644 --- a/compiler/rustc_middle/src/ty/print/mod.rs +++ b/compiler/rustc_middle/src/ty/print/mod.rs @@ -1,9 +1,9 @@ -use crate::ty::subst::{GenericArg, Subst}; +use crate::ty::GenericArg; use crate::ty::{self, DefIdTree, Ty, TyCtxt}; use rustc_data_structures::fx::FxHashSet; use rustc_data_structures::sso::SsoHashSet; -use rustc_hir::def_id::{CrateNum, DefId}; +use rustc_hir::def_id::{CrateNum, DefId, LocalDefId}; use rustc_hir::definitions::{DefPathData, DisambiguatedDefPathData}; // `pretty` is a separate module only for organization. @@ -325,3 +325,12 @@ impl<'tcx, P: Printer<'tcx>> Print<'tcx, P> for ty::Const<'tcx> { cx.print_const(*self) } } + +// This is only used by query descriptions +pub fn describe_as_module(def_id: LocalDefId, tcx: TyCtxt<'_>) -> String { + if def_id.is_top_level_module() { + "top-level module".to_string() + } else { + format!("module `{}`", tcx.def_path_str(def_id.to_def_id())) + } +} diff --git a/compiler/rustc_middle/src/ty/print/pretty.rs b/compiler/rustc_middle/src/ty/print/pretty.rs index 7f2e81a71..ef9aa236b 100644 --- a/compiler/rustc_middle/src/ty/print/pretty.rs +++ b/compiler/rustc_middle/src/ty/print/pretty.rs @@ -1,9 +1,9 @@ use crate::mir::interpret::{AllocRange, GlobalAlloc, Pointer, Provenance, Scalar}; -use crate::ty::subst::{GenericArg, GenericArgKind, Subst}; use crate::ty::{ - self, ConstInt, DefIdTree, ParamConst, ScalarInt, Term, Ty, TyCtxt, TypeFoldable, + self, ConstInt, DefIdTree, ParamConst, ScalarInt, Term, TermKind, Ty, TyCtxt, TypeFoldable, TypeSuperFoldable, TypeSuperVisitable, TypeVisitable, }; +use crate::ty::{GenericArg, GenericArgKind}; use rustc_apfloat::ieee::{Double, Single}; use rustc_data_structures::fx::{FxHashMap, FxIndexMap}; use rustc_data_structures::sso::SsoHashSet; @@ -16,6 +16,7 @@ use rustc_session::cstore::{ExternCrate, ExternCrateSource}; use rustc_span::symbol::{kw, Ident, Symbol}; use rustc_target::abi::Size; use rustc_target::spec::abi::Abi; +use smallvec::SmallVec; use std::cell::Cell; use std::char; @@ -62,6 +63,7 @@ thread_local! { static NO_TRIMMED_PATH: Cell<bool> = const { Cell::new(false) }; static NO_QUERIES: Cell<bool> = const { Cell::new(false) }; static NO_VISIBLE_PATH: Cell<bool> = const { Cell::new(false) }; + static NO_VERBOSE_CONSTANTS: Cell<bool> = const { Cell::new(false) }; } macro_rules! define_helper { @@ -116,6 +118,9 @@ define_helper!( /// Prevent selection of visible paths. `Display` impl of DefId will prefer /// visible (public) reexports of types as paths. fn with_no_visible_paths(NoVisibleGuard, NO_VISIBLE_PATH); + /// Prevent verbose printing of constants. Verbose printing of constants is + /// never desirable in some contexts like `std::any::type_name`. + fn with_no_verbose_constants(NoVerboseConstantsGuard, NO_VERBOSE_CONSTANTS); ); /// The "region highlights" are used to control region printing during @@ -619,12 +624,16 @@ pub trait PrettyPrinter<'tcx>: ty::Adt(def, substs) => { p!(print_def_path(def.did(), substs)); } - ty::Dynamic(data, r) => { + ty::Dynamic(data, r, repr) => { let print_r = self.should_print_region(r); if print_r { p!("("); } - p!("dyn ", print(data)); + match repr { + ty::Dyn => p!("dyn "), + ty::DynStar => p!("dyn* "), + } + p!(print(data)); if print_r { p!(" + ", print(r), ")"); } @@ -632,7 +641,15 @@ pub trait PrettyPrinter<'tcx>: ty::Foreign(def_id) => { p!(print_def_path(def_id, &[])); } - ty::Projection(ref data) => p!(print(data)), + ty::Projection(ref data) => { + if !(self.tcx().sess.verbose() || NO_QUERIES.with(|q| q.get())) + && self.tcx().def_kind(data.item_def_id) == DefKind::ImplTraitPlaceholder + { + return self.pretty_print_opaque_impl_type(data.item_def_id, data.substs); + } else { + p!(print(data)) + } + } ty::Placeholder(placeholder) => p!(write("Placeholder({:?})", placeholder)), ty::Opaque(def_id, substs) => { // FIXME(eddyb) print this with `print_def_path`. @@ -746,7 +763,7 @@ pub trait PrettyPrinter<'tcx>: } ty::Array(ty, sz) => { p!("[", print(ty), "; "); - if self.tcx().sess.verbose() { + if !NO_VERBOSE_CONSTANTS.with(|flag| flag.get()) && self.tcx().sess.verbose() { p!(write("{:?}", sz)); } else if let ty::ConstKind::Unevaluated(..) = sz.kind() { // Do not try to evaluate unevaluated constants. If we are const evaluating an @@ -782,9 +799,9 @@ pub trait PrettyPrinter<'tcx>: let mut traits = FxIndexMap::default(); let mut fn_traits = FxIndexMap::default(); let mut is_sized = false; + let mut lifetimes = SmallVec::<[ty::Region<'tcx>; 1]>::new(); - for predicate in bounds.transpose_iter().map(|e| e.map_bound(|(p, _)| *p)) { - let predicate = predicate.subst(tcx, substs); + for (predicate, _) in bounds.subst_iter_copied(tcx, substs) { let bound_predicate = predicate.kind(); match bound_predicate.skip_binder() { @@ -813,6 +830,9 @@ pub trait PrettyPrinter<'tcx>: &mut fn_traits, ); } + ty::PredicateKind::TypeOutlives(outlives) => { + lifetimes.push(outlives.1); + } _ => {} } } @@ -855,7 +875,7 @@ pub trait PrettyPrinter<'tcx>: } p!(")"); - if let Term::Ty(ty) = return_ty.skip_binder() { + if let Some(ty) = return_ty.skip_binder().ty() { if !ty.is_unit() { p!(" -> ", print(return_ty)); } @@ -916,12 +936,14 @@ pub trait PrettyPrinter<'tcx>: // Skip printing `<[generator@] as Generator<_>>::Return` from async blocks, // unless we can find out what generator return type it comes from. let term = if let Some(ty) = term.skip_binder().ty() - && let ty::Projection(ty::ProjectionTy { item_def_id, substs }) = ty.kind() - && Some(*item_def_id) == tcx.lang_items().generator_return() + && let ty::Projection(proj) = ty.kind() + && let Some(assoc) = tcx.opt_associated_item(proj.item_def_id) + && assoc.trait_container(tcx) == tcx.lang_items().gen_trait() + && assoc.name == rustc_span::sym::Return { if let ty::Generator(_, substs, _) = substs.type_at(0).kind() { let return_ty = substs.as_generator().return_ty(); - if !return_ty.is_ty_infer() { + if !return_ty.is_ty_var() { return_ty.into() } else { continue; @@ -942,13 +964,9 @@ pub trait PrettyPrinter<'tcx>: p!(write("{} = ", tcx.associated_item(assoc_item_def_id).name)); - match term { - Term::Ty(ty) => { - p!(print(ty)) - } - Term::Const(c) => { - p!(print(c)); - } + match term.unpack() { + TermKind::Ty(ty) => p!(print(ty)), + TermKind::Const(c) => p!(print(c)), }; } @@ -968,6 +986,11 @@ pub trait PrettyPrinter<'tcx>: write!(self, "Sized")?; } + for re in lifetimes { + write!(self, " + ")?; + self = self.print_region(re)?; + } + Ok(self) } @@ -1080,17 +1103,9 @@ pub trait PrettyPrinter<'tcx>: .generics_of(principal.def_id) .own_substs_no_defaults(cx.tcx(), principal.substs); - // Don't print `'_` if there's no unerased regions. - let print_regions = args.iter().any(|arg| match arg.unpack() { - GenericArgKind::Lifetime(r) => !r.is_erased(), - _ => false, - }); - let mut args = args.iter().cloned().filter(|arg| match arg.unpack() { - GenericArgKind::Lifetime(_) => print_regions, - _ => true, - }); let mut projections = predicates.projection_bounds(); + let mut args = args.iter().cloned(); let arg0 = args.next(); let projection0 = projections.next(); if arg0.is_some() || projection0.is_some() { @@ -1170,7 +1185,7 @@ pub trait PrettyPrinter<'tcx>: ) -> Result<Self::Const, Self::Error> { define_scoped_cx!(self); - if self.tcx().sess.verbose() { + if !NO_VERBOSE_CONSTANTS.with(|flag| flag.get()) && self.tcx().sess.verbose() { p!(write("Const({:?}: {:?})", ct.kind(), ct.ty())); return Ok(self); } @@ -1193,15 +1208,7 @@ pub trait PrettyPrinter<'tcx>: } match ct.kind() { - ty::ConstKind::Unevaluated(ty::Unevaluated { - def, - substs, - promoted: Some(promoted), - }) => { - p!(print_value_path(def.did, substs)); - p!(write("::{:?}", promoted)); - } - ty::ConstKind::Unevaluated(ty::Unevaluated { def, substs, promoted: None }) => { + ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def, substs }) => { match self.tcx().def_kind(def.did) { DefKind::Static(..) | DefKind::Const | DefKind::AssocConst => { p!(print_value_path(def.did, substs)) @@ -1275,7 +1282,7 @@ pub trait PrettyPrinter<'tcx>: let range = AllocRange { start: offset, size: Size::from_bytes(len) }; if let Ok(byte_str) = - alloc.inner().get_bytes(&self.tcx(), range) + alloc.inner().get_bytes_strip_provenance(&self.tcx(), range) { p!(pretty_print_byte_str(byte_str)) } else { @@ -1401,14 +1408,7 @@ pub trait PrettyPrinter<'tcx>: } fn pretty_print_byte_str(mut self, byte_str: &'tcx [u8]) -> Result<Self::Const, Self::Error> { - define_scoped_cx!(self); - p!("b\""); - for &c in byte_str { - for e in std::ascii::escape_default(c) { - self.write_char(e as char)?; - } - } - p!("\""); + write!(self, "b\"{}\"", byte_str.escape_ascii())?; Ok(self) } @@ -1420,7 +1420,7 @@ pub trait PrettyPrinter<'tcx>: ) -> Result<Self::Const, Self::Error> { define_scoped_cx!(self); - if self.tcx().sess.verbose() { + if !NO_VERBOSE_CONSTANTS.with(|flag| flag.get()) && self.tcx().sess.verbose() { p!(write("ValTree({:?}: ", valtree), print(ty), ")"); return Ok(self); } @@ -1513,6 +1513,10 @@ pub trait PrettyPrinter<'tcx>: } return Ok(self); } + (ty::ValTree::Leaf(leaf), ty::Ref(_, inner_ty, _)) => { + p!(write("&")); + return self.pretty_print_const_scalar_int(leaf, *inner_ty, print_ty); + } (ty::ValTree::Leaf(leaf), _) => { return self.pretty_print_const_scalar_int(leaf, ty, print_ty); } @@ -1532,6 +1536,34 @@ pub trait PrettyPrinter<'tcx>: } Ok(self) } + + fn pretty_closure_as_impl( + mut self, + closure: ty::ClosureSubsts<'tcx>, + ) -> Result<Self::Const, Self::Error> { + let sig = closure.sig(); + let kind = closure.kind_ty().to_opt_closure_kind().unwrap_or(ty::ClosureKind::Fn); + + write!(self, "impl ")?; + self.wrap_binder(&sig, |sig, mut cx| { + define_scoped_cx!(cx); + + p!(print(kind), "("); + for (i, arg) in sig.inputs()[0].tuple_fields().iter().enumerate() { + if i > 0 { + p!(", "); + } + p!(print(arg)); + } + p!(")"); + + if !sig.output().is_unit() { + p!(" -> ", print(sig.output())); + } + + Ok(cx) + }) + } } // HACK(eddyb) boxed to avoid moving around a large struct by-value. @@ -1545,7 +1577,9 @@ pub struct FmtPrinterData<'a, 'tcx> { in_value: bool, pub print_alloc_ids: bool, + // set of all named (non-anonymous) region names used_region_names: FxHashSet<Symbol>, + region_index: usize, binder_depth: usize, printed_type_count: usize, @@ -1820,22 +1854,11 @@ impl<'tcx> Printer<'tcx> for FmtPrinter<'_, 'tcx> { ) -> Result<Self::Path, Self::Error> { self = print_prefix(self)?; - // Don't print `'_` if there's no unerased regions. - let print_regions = self.tcx.sess.verbose() - || args.iter().any(|arg| match arg.unpack() { - GenericArgKind::Lifetime(r) => !r.is_erased(), - _ => false, - }); - let args = args.iter().cloned().filter(|arg| match arg.unpack() { - GenericArgKind::Lifetime(_) => print_regions, - _ => true, - }); - - if args.clone().next().is_some() { + if args.first().is_some() { if self.in_value { write!(self, "::")?; } - self.generic_delimiters(|cx| cx.comma_sep(args)) + self.generic_delimiters(|cx| cx.comma_sep(args.iter().cloned())) } else { Ok(self) } @@ -1950,7 +1973,7 @@ impl<'tcx> PrettyPrinter<'tcx> for FmtPrinter<'_, 'tcx> { ty::ReVar(_) | ty::ReErased => false, - ty::ReStatic | ty::ReEmpty(_) => true, + ty::ReStatic => true, } } @@ -2034,14 +2057,6 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { p!("'static"); return Ok(self); } - ty::ReEmpty(ty::UniverseIndex::ROOT) => { - p!("'<empty>"); - return Ok(self); - } - ty::ReEmpty(ui) => { - p!(write("'<empty:{:?}>", ui)); - return Ok(self); - } } p!("'_"); @@ -2055,7 +2070,14 @@ struct RegionFolder<'a, 'tcx> { tcx: TyCtxt<'tcx>, current_index: ty::DebruijnIndex, region_map: BTreeMap<ty::BoundRegion, ty::Region<'tcx>>, - name: &'a mut (dyn FnMut(ty::BoundRegion) -> ty::Region<'tcx> + 'a), + name: &'a mut ( + dyn FnMut( + Option<ty::DebruijnIndex>, // Debruijn index of the folded late-bound region + ty::DebruijnIndex, // Index corresponding to binder level + ty::BoundRegion, + ) -> ty::Region<'tcx> + + 'a + ), } impl<'a, 'tcx> ty::TypeFolder<'tcx> for RegionFolder<'a, 'tcx> { @@ -2086,7 +2108,9 @@ impl<'a, 'tcx> ty::TypeFolder<'tcx> for RegionFolder<'a, 'tcx> { fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { let name = &mut self.name; let region = match *r { - ty::ReLateBound(_, br) => *self.region_map.entry(br).or_insert_with(|| name(br)), + ty::ReLateBound(db, br) if db >= self.current_index => { + *self.region_map.entry(br).or_insert_with(|| name(Some(db), self.current_index, br)) + } ty::RePlaceholder(ty::PlaceholderRegion { name: kind, .. }) => { // If this is an anonymous placeholder, don't rename. Otherwise, in some // async fns, we get a `for<'r> Send` bound @@ -2095,7 +2119,10 @@ impl<'a, 'tcx> ty::TypeFolder<'tcx> for RegionFolder<'a, 'tcx> { _ => { // Index doesn't matter, since this is just for naming and these never get bound let br = ty::BoundRegion { var: ty::BoundVar::from_u32(0), kind }; - *self.region_map.entry(br).or_insert_with(|| name(br)) + *self + .region_map + .entry(br) + .or_insert_with(|| name(None, self.current_index, br)) } } } @@ -2120,23 +2147,31 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { where T: Print<'tcx, Self, Output = Self, Error = fmt::Error> + TypeFoldable<'tcx>, { - fn name_by_region_index(index: usize) -> Symbol { - match index { - 0 => Symbol::intern("'r"), - 1 => Symbol::intern("'s"), - i => Symbol::intern(&format!("'t{}", i - 2)), + fn name_by_region_index( + index: usize, + available_names: &mut Vec<Symbol>, + num_available: usize, + ) -> Symbol { + if let Some(name) = available_names.pop() { + name + } else { + Symbol::intern(&format!("'z{}", index - num_available)) } } + debug!("name_all_regions"); + // Replace any anonymous late-bound regions with named // variants, using new unique identifiers, so that we can // clearly differentiate between named and unnamed regions in // the output. We'll probably want to tweak this over time to // decide just how much information to give. if self.binder_depth == 0 { - self.prepare_late_bound_region_info(value); + self.prepare_region_info(value); } + debug!("self.used_region_names: {:?}", &self.used_region_names); + let mut empty = true; let mut start_or_continue = |cx: &mut Self, start: &str, cont: &str| { let w = if empty { @@ -2153,13 +2188,30 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { define_scoped_cx!(self); + let possible_names = + ('a'..='z').rev().map(|s| Symbol::intern(&format!("'{s}"))).collect::<Vec<_>>(); + + let mut available_names = possible_names + .into_iter() + .filter(|name| !self.used_region_names.contains(&name)) + .collect::<Vec<_>>(); + debug!(?available_names); + let num_available = available_names.len(); + let mut region_index = self.region_index; - let mut next_name = |this: &Self| loop { - let name = name_by_region_index(region_index); - region_index += 1; - if !this.used_region_names.contains(&name) { - break name; + let mut next_name = |this: &Self| { + let mut name; + + loop { + name = name_by_region_index(region_index, &mut available_names, num_available); + region_index += 1; + + if !this.used_region_names.contains(&name) { + break; + } } + + name }; // If we want to print verbosely, then print *all* binders, even if they @@ -2180,6 +2232,7 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { ty::BrAnon(_) | ty::BrEnv => { start_or_continue(&mut self, "for<", ", "); let name = next_name(&self); + debug!(?name); do_continue(&mut self, name); ty::BrNamed(CRATE_DEF_ID.to_def_id(), name) } @@ -2208,24 +2261,63 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { }) } else { let tcx = self.tcx; - let mut name = |br: ty::BoundRegion| { - start_or_continue(&mut self, "for<", ", "); - let kind = match br.kind { + + // Closure used in `RegionFolder` to create names for anonymous late-bound + // regions. We use two `DebruijnIndex`es (one for the currently folded + // late-bound region and the other for the binder level) to determine + // whether a name has already been created for the currently folded region, + // see issue #102392. + let mut name = |lifetime_idx: Option<ty::DebruijnIndex>, + binder_level_idx: ty::DebruijnIndex, + br: ty::BoundRegion| { + let (name, kind) = match br.kind { ty::BrAnon(_) | ty::BrEnv => { let name = next_name(&self); - do_continue(&mut self, name); - ty::BrNamed(CRATE_DEF_ID.to_def_id(), name) + + if let Some(lt_idx) = lifetime_idx { + if lt_idx > binder_level_idx { + let kind = ty::BrNamed(CRATE_DEF_ID.to_def_id(), name); + return tcx.mk_region(ty::ReLateBound( + ty::INNERMOST, + ty::BoundRegion { var: br.var, kind }, + )); + } + } + + (name, ty::BrNamed(CRATE_DEF_ID.to_def_id(), name)) } ty::BrNamed(def_id, kw::UnderscoreLifetime) => { let name = next_name(&self); - do_continue(&mut self, name); - ty::BrNamed(def_id, name) + + if let Some(lt_idx) = lifetime_idx { + if lt_idx > binder_level_idx { + let kind = ty::BrNamed(def_id, name); + return tcx.mk_region(ty::ReLateBound( + ty::INNERMOST, + ty::BoundRegion { var: br.var, kind }, + )); + } + } + + (name, ty::BrNamed(def_id, name)) } ty::BrNamed(_, name) => { - do_continue(&mut self, name); - br.kind + if let Some(lt_idx) = lifetime_idx { + if lt_idx > binder_level_idx { + let kind = br.kind; + return tcx.mk_region(ty::ReLateBound( + ty::INNERMOST, + ty::BoundRegion { var: br.var, kind }, + )); + } + } + + (name, br.kind) } }; + + start_or_continue(&mut self, "for<", ", "); + do_continue(&mut self, name); tcx.mk_region(ty::ReLateBound(ty::INNERMOST, ty::BoundRegion { var: br.var, kind })) }; let mut folder = RegionFolder { @@ -2273,29 +2365,37 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { Ok(inner) } - fn prepare_late_bound_region_info<T>(&mut self, value: &ty::Binder<'tcx, T>) + fn prepare_region_info<T>(&mut self, value: &ty::Binder<'tcx, T>) where T: TypeVisitable<'tcx>, { - struct LateBoundRegionNameCollector<'a, 'tcx> { - used_region_names: &'a mut FxHashSet<Symbol>, + struct RegionNameCollector<'tcx> { + used_region_names: FxHashSet<Symbol>, type_collector: SsoHashSet<Ty<'tcx>>, } - impl<'tcx> ty::visit::TypeVisitor<'tcx> for LateBoundRegionNameCollector<'_, 'tcx> { + impl<'tcx> RegionNameCollector<'tcx> { + fn new() -> Self { + RegionNameCollector { + used_region_names: Default::default(), + type_collector: SsoHashSet::new(), + } + } + } + + impl<'tcx> ty::visit::TypeVisitor<'tcx> for RegionNameCollector<'tcx> { type BreakTy = (); fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> { trace!("address: {:p}", r.0.0); - if let ty::ReLateBound(_, ty::BoundRegion { kind: ty::BrNamed(_, name), .. }) = *r { - self.used_region_names.insert(name); - } else if let ty::RePlaceholder(ty::PlaceholderRegion { - name: ty::BrNamed(_, name), - .. - }) = *r - { + + // Collect all named lifetimes. These allow us to prevent duplication + // of already existing lifetime names when introducing names for + // anonymous late-bound regions. + if let Some(name) = r.get_name() { self.used_region_names.insert(name); } + r.super_visit_with(self) } @@ -2311,12 +2411,9 @@ impl<'tcx> FmtPrinter<'_, 'tcx> { } } - self.used_region_names.clear(); - let mut collector = LateBoundRegionNameCollector { - used_region_names: &mut self.used_region_names, - type_collector: SsoHashSet::new(), - }; + let mut collector = RegionNameCollector::new(); value.visit_with(&mut collector); + self.used_region_names = collector.used_region_names; self.region_index = 0; } } @@ -2446,6 +2543,11 @@ impl<'tcx> ty::PolyTraitPredicate<'tcx> { } } +#[derive(Debug, Copy, Clone, TypeFoldable, TypeVisitable, Lift)] +pub struct PrintClosureAsImpl<'tcx> { + pub closure: ty::ClosureSubsts<'tcx>, +} + forward_display_to_print! { ty::Region<'tcx>, Ty<'tcx>, @@ -2538,6 +2640,10 @@ define_print_and_forward_display! { p!(print(self.0.trait_ref.print_only_trait_path())); } + PrintClosureAsImpl<'tcx> { + p!(pretty_closure_as_impl(self.closure)) + } + ty::ParamTy { p!(write("{}", self.name)) } @@ -2567,9 +2673,9 @@ define_print_and_forward_display! { } ty::Term<'tcx> { - match self { - ty::Term::Ty(ty) => p!(print(ty)), - ty::Term::Const(c) => p!(print(c)), + match self.unpack() { + ty::TermKind::Ty(ty) => p!(print(ty)), + ty::TermKind::Const(c) => p!(print(c)), } } @@ -2609,8 +2715,8 @@ define_print_and_forward_display! { print_value_path(closure_def_id, &[]), write("` implements the trait `{}`", kind)) } - ty::PredicateKind::ConstEvaluatable(uv) => { - p!("the constant `", print_value_path(uv.def.did, uv.substs), "` can be evaluated") + ty::PredicateKind::ConstEvaluatable(ct) => { + p!("the constant `", print(ct), "` can be evaluated") } ty::PredicateKind::ConstEquate(c1, c2) => { p!("the constant `", print(c1), "` equals `", print(c2), "`") @@ -2634,7 +2740,7 @@ fn for_each_def(tcx: TyCtxt<'_>, mut collect_fn: impl for<'b> FnMut(&'b Ident, N // Iterate all local crate items no matter where they are defined. let hir = tcx.hir(); for id in hir.items() { - if matches!(tcx.def_kind(id.def_id), DefKind::Use) { + if matches!(tcx.def_kind(id.owner_id), DefKind::Use) { continue; } @@ -2643,7 +2749,7 @@ fn for_each_def(tcx: TyCtxt<'_>, mut collect_fn: impl for<'b> FnMut(&'b Ident, N continue; } - let def_id = item.def_id.to_def_id(); + let def_id = item.owner_id.to_def_id(); let ns = tcx.def_kind(def_id).ns().unwrap_or(Namespace::TypeNS); collect_fn(&item.ident, ns, def_id); } diff --git a/compiler/rustc_middle/src/ty/query.rs b/compiler/rustc_middle/src/ty/query.rs index 2452bcf6a..ec90590ad 100644 --- a/compiler/rustc_middle/src/ty/query.rs +++ b/compiler/rustc_middle/src/ty/query.rs @@ -1,11 +1,11 @@ use crate::dep_graph; use crate::infer::canonical::{self, Canonical}; -use crate::lint::LintLevelMap; +use crate::lint::LintExpectation; use crate::metadata::ModChild; use crate::middle::codegen_fn_attrs::CodegenFnAttrs; use crate::middle::exported_symbols::{ExportedSymbol, SymbolExportInfo}; use crate::middle::lib_features::LibFeatures; -use crate::middle::privacy::AccessLevels; +use crate::middle::privacy::EffectiveVisibilities; use crate::middle::resolve_lifetime::{ObjectLifetimeDefault, Region, ResolveLifetimes}; use crate::middle::stability::{self, DeprecationEntry}; use crate::mir; @@ -32,7 +32,7 @@ use crate::ty::layout::TyAndLayout; use crate::ty::subst::{GenericArg, SubstsRef}; use crate::ty::util::AlwaysRequiresDrop; use crate::ty::GeneratorDiagnosticData; -use crate::ty::{self, AdtSizedConstraint, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt}; +use crate::ty::{self, CrateInherentImpls, ParamEnvAnd, Ty, TyCtxt}; use rustc_ast as ast; use rustc_ast::expand::allocator::AllocatorKind; use rustc_attr as attr; @@ -40,17 +40,19 @@ use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap, FxIndexSet}; use rustc_data_structures::steal::Steal; use rustc_data_structures::svh::Svh; use rustc_data_structures::sync::Lrc; +use rustc_data_structures::unord::UnordSet; use rustc_errors::ErrorGuaranteed; use rustc_hir as hir; use rustc_hir::def::DefKind; use rustc_hir::def_id::{CrateNum, DefId, DefIdMap, DefIdSet, LocalDefId}; +use rustc_hir::hir_id::OwnerId; use rustc_hir::lang_items::{LangItem, LanguageItems}; use rustc_hir::{Crate, ItemLocalId, TraitCandidate}; use rustc_index::{bit_set::FiniteBitSet, vec::IndexVec}; use rustc_session::config::{EntryFnType, OptLevel, OutputFilenames, SymbolManglingVersion}; use rustc_session::cstore::{CrateDepKind, CrateSource}; use rustc_session::cstore::{ExternCrate, ForeignModule, LinkagePreference, NativeLib}; -use rustc_session::utils::NativeLibKind; +use rustc_session::lint::LintExpectationId; use rustc_session::Limits; use rustc_span::symbol::Symbol; use rustc_span::{Span, DUMMY_SP}; @@ -121,8 +123,8 @@ macro_rules! query_storage { ([][$K:ty, $V:ty]) => { <DefaultCacheSelector as CacheSelector<$K, $V>>::Cache }; - ([(storage $ty:ty) $($rest:tt)*][$K:ty, $V:ty]) => { - <$ty as CacheSelector<$K, $V>>::Cache + ([(arena_cache) $($rest:tt)*][$K:ty, $V:ty]) => { + <ArenaCacheSelector<'tcx> as CacheSelector<$K, $V>>::Cache }; ([$other:tt $($modifiers:tt)*][$($args:tt)*]) => { query_storage!([$($modifiers)*][$($args)*]) @@ -173,7 +175,7 @@ macro_rules! opt_remap_env_constness { } macro_rules! define_callbacks { - (<$tcx:tt> + ( $($(#[$attr:meta])* [$($modifiers:tt)*] fn $name:ident($($K:tt)*) -> $V:ty,)*) => { @@ -187,33 +189,33 @@ macro_rules! define_callbacks { pub mod query_keys { use super::*; - $(pub type $name<$tcx> = $($K)*;)* + $(pub type $name<'tcx> = $($K)*;)* } #[allow(nonstandard_style, unused_lifetimes)] pub mod query_values { use super::*; - $(pub type $name<$tcx> = $V;)* + $(pub type $name<'tcx> = $V;)* } #[allow(nonstandard_style, unused_lifetimes)] pub mod query_storage { use super::*; - $(pub type $name<$tcx> = query_storage!([$($modifiers)*][$($K)*, $V]);)* + $(pub type $name<'tcx> = query_storage!([$($modifiers)*][$($K)*, $V]);)* } #[allow(nonstandard_style, unused_lifetimes)] pub mod query_stored { use super::*; - $(pub type $name<$tcx> = <query_storage::$name<$tcx> as QueryStorage>::Stored;)* + $(pub type $name<'tcx> = <query_storage::$name<'tcx> as QueryStorage>::Stored;)* } #[derive(Default)] - pub struct QueryCaches<$tcx> { - $($(#[$attr])* pub $name: query_storage::$name<$tcx>,)* + pub struct QueryCaches<'tcx> { + $($(#[$attr])* pub $name: query_storage::$name<'tcx>,)* } - impl<$tcx> TyCtxtEnsure<$tcx> { + impl<'tcx> TyCtxtEnsure<'tcx> { $($(#[$attr])* #[inline(always)] pub fn $name(self, key: query_helper_param_ty!($($K)*)) { @@ -231,20 +233,20 @@ macro_rules! define_callbacks { })* } - impl<$tcx> TyCtxt<$tcx> { + impl<'tcx> TyCtxt<'tcx> { $($(#[$attr])* #[inline(always)] #[must_use] - pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx> + pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<'tcx> { self.at(DUMMY_SP).$name(key) })* } - impl<$tcx> TyCtxtAt<$tcx> { + impl<'tcx> TyCtxtAt<'tcx> { $($(#[$attr])* #[inline(always)] - pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<$tcx> + pub fn $name(self, key: query_helper_param_ty!($($K)*)) -> query_stored::$name<'tcx> { let key = key.into_query_param(); opt_remap_env_constness!([$($modifiers)*][key]); @@ -275,8 +277,9 @@ macro_rules! define_callbacks { fn default() -> Self { Providers { $($name: |_, key| bug!( - "`tcx.{}({:?})` unsupported by its crate; \ - perhaps the `{}` query was never assigned a provider function", + "`tcx.{}({:?})` is not supported for external or local crate;\n + hint: Queries can be either made to the local crate, or the external crate. This error means you tried to use it for one that's not supported (likely the local crate).\n + If that's not the case, {} was likely never assigned to a provider function.\n", stringify!($name), key, stringify!($name), @@ -311,11 +314,11 @@ macro_rules! define_callbacks { $($(#[$attr])* fn $name( &'tcx self, - tcx: TyCtxt<$tcx>, + tcx: TyCtxt<'tcx>, span: Span, - key: query_keys::$name<$tcx>, + key: query_keys::$name<'tcx>, mode: QueryMode, - ) -> Option<query_stored::$name<$tcx>>;)* + ) -> Option<query_stored::$name<'tcx>>;)* } }; } @@ -332,10 +335,10 @@ macro_rules! define_callbacks { // Queries marked with `fatal_cycle` do not need the latter implementation, // as they will raise an fatal error on query cycles instead. -rustc_query_append! { [define_callbacks!][<'tcx>] } +rustc_query_append! { define_callbacks! } mod sealed { - use super::{DefId, LocalDefId}; + use super::{DefId, LocalDefId, OwnerId}; /// An analogue of the `Into` trait that's intended only for query parameters. /// @@ -365,6 +368,13 @@ mod sealed { self.to_def_id() } } + + impl IntoQueryParam<DefId> for OwnerId { + #[inline(always)] + fn into_query_param(self) -> DefId { + self.to_def_id() + } + } } use sealed::IntoQueryParam; diff --git a/compiler/rustc_middle/src/ty/relate.rs b/compiler/rustc_middle/src/ty/relate.rs index 818affa71..b25b4bd4f 100644 --- a/compiler/rustc_middle/src/ty/relate.rs +++ b/compiler/rustc_middle/src/ty/relate.rs @@ -5,8 +5,8 @@ //! subtyping, type equality, etc. use crate::ty::error::{ExpectedFound, TypeError}; -use crate::ty::subst::{GenericArg, GenericArgKind, Subst, SubstsRef}; -use crate::ty::{self, ImplSubject, Term, Ty, TyCtxt, TypeFoldable}; +use crate::ty::{self, ImplSubject, Term, TermKind, Ty, TyCtxt, TypeFoldable}; +use crate::ty::{GenericArg, GenericArgKind, SubstsRef}; use rustc_hir as ast; use rustc_hir::def_id::DefId; use rustc_span::DUMMY_SP; @@ -441,7 +441,9 @@ pub fn super_relate_tys<'tcx, R: TypeRelation<'tcx>>( (&ty::Foreign(a_id), &ty::Foreign(b_id)) if a_id == b_id => Ok(tcx.mk_foreign(a_id)), - (&ty::Dynamic(a_obj, a_region), &ty::Dynamic(b_obj, b_region)) => { + (&ty::Dynamic(a_obj, a_region, a_repr), &ty::Dynamic(b_obj, b_region, b_repr)) + if a_repr == b_repr => + { let region_bound = relation.with_cause(Cause::ExistentialRegionBound, |relation| { relation.relate_with_variance( ty::Contravariant, @@ -450,7 +452,7 @@ pub fn super_relate_tys<'tcx, R: TypeRelation<'tcx>>( b_region, ) })?; - Ok(tcx.mk_dynamic(relation.relate(a_obj, b_obj)?, region_bound)) + Ok(tcx.mk_dynamic(relation.relate(a_obj, b_obj)?, region_bound, a_repr)) } (&ty::Generator(a_id, a_substs, movability), &ty::Generator(b_id, b_substs, _)) @@ -572,8 +574,8 @@ pub fn super_relate_tys<'tcx, R: TypeRelation<'tcx>>( /// it. pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>( relation: &mut R, - a: ty::Const<'tcx>, - b: ty::Const<'tcx>, + mut a: ty::Const<'tcx>, + mut b: ty::Const<'tcx>, ) -> RelateResult<'tcx, ty::Const<'tcx>> { debug!("{}.super_relate_consts(a = {:?}, b = {:?})", relation.tag(), a, b); let tcx = relation.tcx(); @@ -594,9 +596,16 @@ pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>( ); } - let eagerly_eval = |x: ty::Const<'tcx>| x.eval(tcx, relation.param_env()); - let a = eagerly_eval(a); - let b = eagerly_eval(b); + // HACK(const_generics): We still need to eagerly evaluate consts when + // relating them because during `normalize_param_env_or_error`, + // we may relate an evaluated constant in a obligation against + // an unnormalized (i.e. unevaluated) const in the param-env. + // FIXME(generic_const_exprs): Once we always lazily unify unevaluated constants + // these `eval` calls can be removed. + if !relation.tcx().features().generic_const_exprs { + a = a.eval(tcx, relation.param_env()); + b = b.eval(tcx, relation.param_env()); + } // Currently, the values that can be unified are primitive types, // and those that derive both `PartialEq` and `Eq`, corresponding @@ -617,15 +626,13 @@ pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>( (ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu)) if tcx.features().generic_const_exprs => { - tcx.try_unify_abstract_consts(relation.param_env().and((au.shrink(), bu.shrink()))) + tcx.try_unify_abstract_consts(relation.param_env().and((au, bu))) } // While this is slightly incorrect, it shouldn't matter for `min_const_generics` // and is the better alternative to waiting until `generic_const_exprs` can // be stabilized. - (ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu)) - if au.def == bu.def && au.promoted == bu.promoted => - { + (ty::ConstKind::Unevaluated(au), ty::ConstKind::Unevaluated(bu)) if au.def == bu.def => { let substs = relation.relate_with_variance( ty::Variance::Invariant, ty::VarianceDiagInfo::default(), @@ -633,11 +640,7 @@ pub fn super_relate_consts<'tcx, R: TypeRelation<'tcx>>( bu.substs, )?; return Ok(tcx.mk_const(ty::ConstS { - kind: ty::ConstKind::Unevaluated(ty::Unevaluated { - def: au.def, - substs, - promoted: au.promoted, - }), + kind: ty::ConstKind::Unevaluated(ty::UnevaluatedConst { def: au.def, substs }), ty: a.ty(), })); } @@ -803,15 +806,15 @@ impl<'tcx> Relate<'tcx> for ty::TraitPredicate<'tcx> { } } -impl<'tcx> Relate<'tcx> for ty::Term<'tcx> { +impl<'tcx> Relate<'tcx> for Term<'tcx> { fn relate<R: TypeRelation<'tcx>>( relation: &mut R, a: Self, b: Self, ) -> RelateResult<'tcx, Self> { - Ok(match (a, b) { - (Term::Ty(a), Term::Ty(b)) => relation.relate(a, b)?.into(), - (Term::Const(a), Term::Const(b)) => relation.relate(a, b)?.into(), + Ok(match (a.unpack(), b.unpack()) { + (TermKind::Ty(a), TermKind::Ty(b)) => relation.relate(a, b)?.into(), + (TermKind::Const(a), TermKind::Const(b)) => relation.relate(a, b)?.into(), _ => return Err(TypeError::Mismatch), }) } diff --git a/compiler/rustc_middle/src/ty/rvalue_scopes.rs b/compiler/rustc_middle/src/ty/rvalue_scopes.rs index e86dafae3..e79b79a25 100644 --- a/compiler/rustc_middle/src/ty/rvalue_scopes.rs +++ b/compiler/rustc_middle/src/ty/rvalue_scopes.rs @@ -3,7 +3,7 @@ use rustc_data_structures::fx::FxHashMap; use rustc_hir as hir; /// `RvalueScopes` is a mapping from sub-expressions to _extended_ lifetime as determined by -/// rules laid out in `rustc_typeck::check::rvalue_scopes`. +/// rules laid out in `rustc_hir_analysis::check::rvalue_scopes`. #[derive(TyEncodable, TyDecodable, Clone, Debug, Default, Eq, PartialEq, HashStable)] pub struct RvalueScopes { map: FxHashMap<hir::ItemLocalId, Option<Scope>>, diff --git a/compiler/rustc_middle/src/ty/structural_impls.rs b/compiler/rustc_middle/src/ty/structural_impls.rs index 7660a2f3a..2cad333e3 100644 --- a/compiler/rustc_middle/src/ty/structural_impls.rs +++ b/compiler/rustc_middle/src/ty/structural_impls.rs @@ -3,13 +3,12 @@ //! hand, though we've recently added some macros and proc-macros to help with the tedium. use crate::mir::interpret; -use crate::mir::ProjectionKind; +use crate::mir::{Field, ProjectionKind}; use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeSuperFoldable}; use crate::ty::print::{with_no_trimmed_paths, FmtPrinter, Printer}; use crate::ty::visit::{TypeSuperVisitable, TypeVisitable, TypeVisitor}; -use crate::ty::{self, InferConst, Lift, Term, Ty, TyCtxt}; +use crate::ty::{self, InferConst, Lift, Term, TermKind, Ty, TyCtxt}; use rustc_data_structures::functor::IdFunctor; -use rustc_hir as hir; use rustc_hir::def::Namespace; use rustc_index::vec::{Idx, IndexVec}; @@ -167,8 +166,8 @@ impl<'tcx> fmt::Debug for ty::PredicateKind<'tcx> { ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => { write!(f, "ClosureKind({:?}, {:?}, {:?})", closure_def_id, closure_substs, kind) } - ty::PredicateKind::ConstEvaluatable(uv) => { - write!(f, "ConstEvaluatable({:?}, {:?})", uv.def, uv.substs) + ty::PredicateKind::ConstEvaluatable(ct) => { + write!(f, "ConstEvaluatable({ct:?})") } ty::PredicateKind::ConstEquate(c1, c2) => write!(f, "ConstEquate({:?}, {:?})", c1, c2), ty::PredicateKind::TypeWellFormedFromEnv(ty) => { @@ -238,12 +237,24 @@ TrivialTypeTraversalAndLiftImpls! { crate::ty::Variance, ::rustc_span::Span, ::rustc_errors::ErrorGuaranteed, + Field, + interpret::Scalar, + rustc_target::abi::Size, + ty::DelaySpanBugEmitted, + rustc_type_ir::DebruijnIndex, + ty::BoundVar, + ty::Placeholder<ty::BoundVar>, +} + +TrivialTypeTraversalAndLiftImpls! { + for<'tcx> { + ty::ValTree<'tcx>, + } } /////////////////////////////////////////////////////////////////////////// // Lift implementations -// FIXME(eddyb) replace all the uses of `Option::map` with `?`. impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>> Lift<'tcx> for (A, B) { type Lifted = (A::Lifted, B::Lifted); fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { @@ -261,10 +272,10 @@ impl<'tcx, A: Lift<'tcx>, B: Lift<'tcx>, C: Lift<'tcx>> Lift<'tcx> for (A, B, C) impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Option<T> { type Lifted = Option<T::Lifted>; fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - match self { - Some(x) => tcx.lift(x).map(Some), - None => Some(None), - } + Some(match self { + Some(x) => Some(tcx.lift(x)?), + None => None, + }) } } @@ -281,21 +292,21 @@ impl<'tcx, T: Lift<'tcx>, E: Lift<'tcx>> Lift<'tcx> for Result<T, E> { impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Box<T> { type Lifted = Box<T::Lifted>; fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift(*self).map(Box::new) + Some(Box::new(tcx.lift(*self)?)) } } impl<'tcx, T: Lift<'tcx> + Clone> Lift<'tcx> for Rc<T> { type Lifted = Rc<T::Lifted>; fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift(self.as_ref().clone()).map(Rc::new) + Some(Rc::new(tcx.lift(self.as_ref().clone())?)) } } impl<'tcx, T: Lift<'tcx> + Clone> Lift<'tcx> for Arc<T> { type Lifted = Arc<T::Lifted>; fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift(self.as_ref().clone()).map(Arc::new) + Some(Arc::new(tcx.lift(self.as_ref().clone())?)) } } impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for Vec<T> { @@ -312,159 +323,18 @@ impl<'tcx, I: Idx, T: Lift<'tcx>> Lift<'tcx> for IndexVec<I, T> { } } -impl<'a, 'tcx> Lift<'tcx> for ty::TraitRef<'a> { - type Lifted = ty::TraitRef<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift(self.substs).map(|substs| ty::TraitRef { def_id: self.def_id, substs }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialTraitRef<'a> { - type Lifted = ty::ExistentialTraitRef<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift(self.substs).map(|substs| ty::ExistentialTraitRef { def_id: self.def_id, substs }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialPredicate<'a> { - type Lifted = ty::ExistentialPredicate<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - match self { - ty::ExistentialPredicate::Trait(x) => tcx.lift(x).map(ty::ExistentialPredicate::Trait), - ty::ExistentialPredicate::Projection(x) => { - tcx.lift(x).map(ty::ExistentialPredicate::Projection) - } - ty::ExistentialPredicate::AutoTrait(def_id) => { - Some(ty::ExistentialPredicate::AutoTrait(def_id)) - } - } - } -} - impl<'a, 'tcx> Lift<'tcx> for Term<'a> { type Lifted = ty::Term<'tcx>; fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - Some(match self { - Term::Ty(ty) => Term::Ty(tcx.lift(ty)?), - Term::Const(c) => Term::Const(tcx.lift(c)?), - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::TraitPredicate<'a> { - type Lifted = ty::TraitPredicate<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::TraitPredicate<'tcx>> { - tcx.lift(self.trait_ref).map(|trait_ref| ty::TraitPredicate { - trait_ref, - constness: self.constness, - polarity: self.polarity, - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::SubtypePredicate<'a> { - type Lifted = ty::SubtypePredicate<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::SubtypePredicate<'tcx>> { - tcx.lift((self.a, self.b)).map(|(a, b)| ty::SubtypePredicate { - a_is_expected: self.a_is_expected, - a, - b, - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::CoercePredicate<'a> { - type Lifted = ty::CoercePredicate<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::CoercePredicate<'tcx>> { - tcx.lift((self.a, self.b)).map(|(a, b)| ty::CoercePredicate { a, b }) - } -} - -impl<'tcx, A: Copy + Lift<'tcx>, B: Copy + Lift<'tcx>> Lift<'tcx> for ty::OutlivesPredicate<A, B> { - type Lifted = ty::OutlivesPredicate<A::Lifted, B::Lifted>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift((self.0, self.1)).map(|(a, b)| ty::OutlivesPredicate(a, b)) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionTy<'a> { - type Lifted = ty::ProjectionTy<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionTy<'tcx>> { - tcx.lift(self.substs) - .map(|substs| ty::ProjectionTy { item_def_id: self.item_def_id, substs }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::ProjectionPredicate<'a> { - type Lifted = ty::ProjectionPredicate<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<ty::ProjectionPredicate<'tcx>> { - tcx.lift((self.projection_ty, self.term)) - .map(|(projection_ty, term)| ty::ProjectionPredicate { projection_ty, term }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::ExistentialProjection<'a> { - type Lifted = ty::ExistentialProjection<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift(self.substs).map(|substs| ty::ExistentialProjection { - substs, - term: tcx.lift(self.term).expect("type must lift when substs do"), - item_def_id: self.item_def_id, - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::PredicateKind<'a> { - type Lifted = ty::PredicateKind<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - match self { - ty::PredicateKind::Trait(data) => tcx.lift(data).map(ty::PredicateKind::Trait), - ty::PredicateKind::Subtype(data) => tcx.lift(data).map(ty::PredicateKind::Subtype), - ty::PredicateKind::Coerce(data) => tcx.lift(data).map(ty::PredicateKind::Coerce), - ty::PredicateKind::RegionOutlives(data) => { - tcx.lift(data).map(ty::PredicateKind::RegionOutlives) - } - ty::PredicateKind::TypeOutlives(data) => { - tcx.lift(data).map(ty::PredicateKind::TypeOutlives) - } - ty::PredicateKind::Projection(data) => { - tcx.lift(data).map(ty::PredicateKind::Projection) - } - ty::PredicateKind::WellFormed(ty) => tcx.lift(ty).map(ty::PredicateKind::WellFormed), - ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) => { - tcx.lift(closure_substs).map(|closure_substs| { - ty::PredicateKind::ClosureKind(closure_def_id, closure_substs, kind) - }) + Some( + match self.unpack() { + TermKind::Ty(ty) => TermKind::Ty(tcx.lift(ty)?), + TermKind::Const(c) => TermKind::Const(tcx.lift(c)?), } - ty::PredicateKind::ObjectSafe(trait_def_id) => { - Some(ty::PredicateKind::ObjectSafe(trait_def_id)) - } - ty::PredicateKind::ConstEvaluatable(uv) => { - tcx.lift(uv).map(|uv| ty::PredicateKind::ConstEvaluatable(uv)) - } - ty::PredicateKind::ConstEquate(c1, c2) => { - tcx.lift((c1, c2)).map(|(c1, c2)| ty::PredicateKind::ConstEquate(c1, c2)) - } - ty::PredicateKind::TypeWellFormedFromEnv(ty) => { - tcx.lift(ty).map(ty::PredicateKind::TypeWellFormedFromEnv) - } - } - } -} - -impl<'a, 'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::Binder<'a, T> -where - <T as Lift<'tcx>>::Lifted: TypeVisitable<'tcx>, -{ - type Lifted = ty::Binder<'tcx, T::Lifted>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - let bound_vars = tcx.lift(self.bound_vars()); - tcx.lift(self.skip_binder()) - .zip(bound_vars) - .map(|(value, vars)| ty::Binder::bind_with_vars(value, vars)) + .pack(), + ) } } - impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> { type Lifted = ty::ParamEnv<'tcx>; fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { @@ -473,178 +343,6 @@ impl<'a, 'tcx> Lift<'tcx> for ty::ParamEnv<'a> { } } -impl<'a, 'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::ParamEnvAnd<'a, T> { - type Lifted = ty::ParamEnvAnd<'tcx, T::Lifted>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift(self.param_env).and_then(|param_env| { - tcx.lift(self.value).map(|value| ty::ParamEnvAnd { param_env, value }) - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::ClosureSubsts<'a> { - type Lifted = ty::ClosureSubsts<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift(self.substs).map(|substs| ty::ClosureSubsts { substs }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::GeneratorSubsts<'a> { - type Lifted = ty::GeneratorSubsts<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift(self.substs).map(|substs| ty::GeneratorSubsts { substs }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjustment<'a> { - type Lifted = ty::adjustment::Adjustment<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - let ty::adjustment::Adjustment { kind, target } = self; - tcx.lift(kind).and_then(|kind| { - tcx.lift(target).map(|target| ty::adjustment::Adjustment { kind, target }) - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::Adjust<'a> { - type Lifted = ty::adjustment::Adjust<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - match self { - ty::adjustment::Adjust::NeverToAny => Some(ty::adjustment::Adjust::NeverToAny), - ty::adjustment::Adjust::Pointer(ptr) => Some(ty::adjustment::Adjust::Pointer(ptr)), - ty::adjustment::Adjust::Deref(overloaded) => { - tcx.lift(overloaded).map(ty::adjustment::Adjust::Deref) - } - ty::adjustment::Adjust::Borrow(autoref) => { - tcx.lift(autoref).map(ty::adjustment::Adjust::Borrow) - } - } - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::OverloadedDeref<'a> { - type Lifted = ty::adjustment::OverloadedDeref<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift(self.region).map(|region| ty::adjustment::OverloadedDeref { - region, - mutbl: self.mutbl, - span: self.span, - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::adjustment::AutoBorrow<'a> { - type Lifted = ty::adjustment::AutoBorrow<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - match self { - ty::adjustment::AutoBorrow::Ref(r, m) => { - tcx.lift(r).map(|r| ty::adjustment::AutoBorrow::Ref(r, m)) - } - ty::adjustment::AutoBorrow::RawPtr(m) => Some(ty::adjustment::AutoBorrow::RawPtr(m)), - } - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::GenSig<'a> { - type Lifted = ty::GenSig<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift((self.resume_ty, self.yield_ty, self.return_ty)) - .map(|(resume_ty, yield_ty, return_ty)| ty::GenSig { resume_ty, yield_ty, return_ty }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::FnSig<'a> { - type Lifted = ty::FnSig<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - tcx.lift(self.inputs_and_output).map(|x| ty::FnSig { - inputs_and_output: x, - c_variadic: self.c_variadic, - unsafety: self.unsafety, - abi: self.abi, - }) - } -} - -impl<'tcx, T: Lift<'tcx>> Lift<'tcx> for ty::error::ExpectedFound<T> { - type Lifted = ty::error::ExpectedFound<T::Lifted>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - let ty::error::ExpectedFound { expected, found } = self; - tcx.lift(expected).and_then(|expected| { - tcx.lift(found).map(|found| ty::error::ExpectedFound { expected, found }) - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::error::TypeError<'a> { - type Lifted = ty::error::TypeError<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - use crate::ty::error::TypeError::*; - - Some(match self { - Mismatch => Mismatch, - ConstnessMismatch(x) => ConstnessMismatch(x), - PolarityMismatch(x) => PolarityMismatch(x), - UnsafetyMismatch(x) => UnsafetyMismatch(x), - AbiMismatch(x) => AbiMismatch(x), - Mutability => Mutability, - ArgumentMutability(i) => ArgumentMutability(i), - TupleSize(x) => TupleSize(x), - FixedArraySize(x) => FixedArraySize(x), - ArgCount => ArgCount, - FieldMisMatch(x, y) => FieldMisMatch(x, y), - RegionsDoesNotOutlive(a, b) => { - return tcx.lift((a, b)).map(|(a, b)| RegionsDoesNotOutlive(a, b)); - } - RegionsInsufficientlyPolymorphic(a, b) => { - return tcx.lift(b).map(|b| RegionsInsufficientlyPolymorphic(a, b)); - } - RegionsOverlyPolymorphic(a, b) => { - return tcx.lift(b).map(|b| RegionsOverlyPolymorphic(a, b)); - } - RegionsPlaceholderMismatch => RegionsPlaceholderMismatch, - IntMismatch(x) => IntMismatch(x), - FloatMismatch(x) => FloatMismatch(x), - Traits(x) => Traits(x), - VariadicMismatch(x) => VariadicMismatch(x), - CyclicTy(t) => return tcx.lift(t).map(|t| CyclicTy(t)), - CyclicConst(ct) => return tcx.lift(ct).map(|ct| CyclicConst(ct)), - ProjectionMismatched(x) => ProjectionMismatched(x), - ArgumentSorts(x, i) => return tcx.lift(x).map(|x| ArgumentSorts(x, i)), - Sorts(x) => return tcx.lift(x).map(Sorts), - ExistentialMismatch(x) => return tcx.lift(x).map(ExistentialMismatch), - ConstMismatch(x) => return tcx.lift(x).map(ConstMismatch), - IntrinsicCast => IntrinsicCast, - TargetFeatureCast(x) => TargetFeatureCast(x), - ObjectUnsafeCoercion(x) => return tcx.lift(x).map(ObjectUnsafeCoercion), - }) - } -} - -impl<'a, 'tcx> Lift<'tcx> for ty::InstanceDef<'a> { - type Lifted = ty::InstanceDef<'tcx>; - fn lift_to_tcx(self, tcx: TyCtxt<'tcx>) -> Option<Self::Lifted> { - match self { - ty::InstanceDef::Item(def_id) => Some(ty::InstanceDef::Item(def_id)), - ty::InstanceDef::VTableShim(def_id) => Some(ty::InstanceDef::VTableShim(def_id)), - ty::InstanceDef::ReifyShim(def_id) => Some(ty::InstanceDef::ReifyShim(def_id)), - ty::InstanceDef::Intrinsic(def_id) => Some(ty::InstanceDef::Intrinsic(def_id)), - ty::InstanceDef::FnPtrShim(def_id, ty) => { - Some(ty::InstanceDef::FnPtrShim(def_id, tcx.lift(ty)?)) - } - ty::InstanceDef::Virtual(def_id, n) => Some(ty::InstanceDef::Virtual(def_id, n)), - ty::InstanceDef::ClosureOnceShim { call_once, track_caller } => { - Some(ty::InstanceDef::ClosureOnceShim { call_once, track_caller }) - } - ty::InstanceDef::DropGlue(def_id, ty) => { - Some(ty::InstanceDef::DropGlue(def_id, tcx.lift(ty)?)) - } - ty::InstanceDef::CloneShim(def_id, ty) => { - Some(ty::InstanceDef::CloneShim(def_id, tcx.lift(ty)?)) - } - } - } -} - /////////////////////////////////////////////////////////////////////////// // TypeFoldable implementations. @@ -844,27 +542,21 @@ impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Vec<T> { } } -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<[T]> { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - self.try_map_id(|t| t.try_fold_with(folder)) - } -} - -impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Box<[T]> { +impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for &[T] { fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { self.iter().try_for_each(|t| t.visit_with(visitor)) } } -impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for ty::EarlyBinder<T> { +impl<'tcx, T: TypeFoldable<'tcx>> TypeFoldable<'tcx> for Box<[T]> { fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - self.try_map_bound(|ty| ty.try_fold_with(folder)) + self.try_map_id(|t| t.try_fold_with(folder)) } } -impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for ty::EarlyBinder<T> { +impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for Box<[T]> { fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - self.as_ref().0.visit_with(visitor) + self.iter().try_for_each(|t| t.visit_with(visitor)) } } @@ -901,88 +593,12 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Binder<'tcx, ty::Existentia } } -impl<'tcx> TypeVisitable<'tcx> - for &'tcx ty::List<ty::Binder<'tcx, ty::ExistentialPredicate<'tcx>>> -{ - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - self.iter().try_for_each(|p| p.visit_with(visitor)) - } -} - impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ProjectionKind> { fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { ty::util::fold_list(self, folder, |tcx, v| tcx.intern_projs(v)) } } -impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<ProjectionKind> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - self.iter().try_for_each(|t| t.visit_with(visitor)) - } -} - -impl<'tcx> TypeFoldable<'tcx> for ty::instance::Instance<'tcx> { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - use crate::ty::InstanceDef::*; - Ok(Self { - substs: self.substs.try_fold_with(folder)?, - def: match self.def { - Item(def) => Item(def.try_fold_with(folder)?), - VTableShim(did) => VTableShim(did.try_fold_with(folder)?), - ReifyShim(did) => ReifyShim(did.try_fold_with(folder)?), - Intrinsic(did) => Intrinsic(did.try_fold_with(folder)?), - FnPtrShim(did, ty) => { - FnPtrShim(did.try_fold_with(folder)?, ty.try_fold_with(folder)?) - } - Virtual(did, i) => Virtual(did.try_fold_with(folder)?, i), - ClosureOnceShim { call_once, track_caller } => { - ClosureOnceShim { call_once: call_once.try_fold_with(folder)?, track_caller } - } - DropGlue(did, ty) => { - DropGlue(did.try_fold_with(folder)?, ty.try_fold_with(folder)?) - } - CloneShim(did, ty) => { - CloneShim(did.try_fold_with(folder)?, ty.try_fold_with(folder)?) - } - }, - }) - } -} - -impl<'tcx> TypeVisitable<'tcx> for ty::instance::Instance<'tcx> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - use crate::ty::InstanceDef::*; - self.substs.visit_with(visitor)?; - match self.def { - Item(def) => def.visit_with(visitor), - VTableShim(did) | ReifyShim(did) | Intrinsic(did) | Virtual(did, _) => { - did.visit_with(visitor) - } - FnPtrShim(did, ty) | CloneShim(did, ty) => { - did.visit_with(visitor)?; - ty.visit_with(visitor) - } - DropGlue(did, ty) => { - did.visit_with(visitor)?; - ty.visit_with(visitor) - } - ClosureOnceShim { call_once, track_caller: _ } => call_once.visit_with(visitor), - } - } -} - -impl<'tcx> TypeFoldable<'tcx> for interpret::GlobalId<'tcx> { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - Ok(Self { instance: self.instance.try_fold_with(folder)?, promoted: self.promoted }) - } -} - -impl<'tcx> TypeVisitable<'tcx> for interpret::GlobalId<'tcx> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - self.instance.visit_with(visitor) - } -} - impl<'tcx> TypeFoldable<'tcx> for Ty<'tcx> { fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { folder.try_fold_ty(self) @@ -1005,9 +621,11 @@ impl<'tcx> TypeSuperFoldable<'tcx> for Ty<'tcx> { ty::Array(typ, sz) => ty::Array(typ.try_fold_with(folder)?, sz.try_fold_with(folder)?), ty::Slice(typ) => ty::Slice(typ.try_fold_with(folder)?), ty::Adt(tid, substs) => ty::Adt(tid, substs.try_fold_with(folder)?), - ty::Dynamic(trait_ty, region) => { - ty::Dynamic(trait_ty.try_fold_with(folder)?, region.try_fold_with(folder)?) - } + ty::Dynamic(trait_ty, region, representation) => ty::Dynamic( + trait_ty.try_fold_with(folder)?, + region.try_fold_with(folder)?, + representation, + ), ty::Tuple(ts) => ty::Tuple(ts.try_fold_with(folder)?), ty::FnDef(def_id, substs) => ty::FnDef(def_id, substs.try_fold_with(folder)?), ty::FnPtr(f) => ty::FnPtr(f.try_fold_with(folder)?), @@ -1051,7 +669,7 @@ impl<'tcx> TypeSuperVisitable<'tcx> for Ty<'tcx> { } ty::Slice(typ) => typ.visit_with(visitor), ty::Adt(_, substs) => substs.visit_with(visitor), - ty::Dynamic(ref trait_ty, ref reg) => { + ty::Dynamic(ref trait_ty, ref reg, _) => { trait_ty.visit_with(visitor)?; reg.visit_with(visitor) } @@ -1156,12 +774,6 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> { } } -impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<ty::Predicate<'tcx>> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - self.iter().try_for_each(|p| p.visit_with(visitor)) - } -} - impl<'tcx, T: TypeFoldable<'tcx>, I: Idx> TypeFoldable<'tcx> for IndexVec<I, T> { fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { self.try_map_id(|x| x.try_fold_with(folder)) @@ -1208,34 +820,6 @@ impl<'tcx> TypeSuperVisitable<'tcx> for ty::Const<'tcx> { } } -impl<'tcx> TypeFoldable<'tcx> for ty::ConstKind<'tcx> { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - Ok(match self { - ty::ConstKind::Infer(ic) => ty::ConstKind::Infer(ic.try_fold_with(folder)?), - ty::ConstKind::Param(p) => ty::ConstKind::Param(p.try_fold_with(folder)?), - ty::ConstKind::Unevaluated(uv) => ty::ConstKind::Unevaluated(uv.try_fold_with(folder)?), - ty::ConstKind::Value(_) - | ty::ConstKind::Bound(..) - | ty::ConstKind::Placeholder(..) - | ty::ConstKind::Error(_) => self, - }) - } -} - -impl<'tcx> TypeVisitable<'tcx> for ty::ConstKind<'tcx> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - match *self { - ty::ConstKind::Infer(ic) => ic.visit_with(visitor), - ty::ConstKind::Param(p) => p.visit_with(visitor), - ty::ConstKind::Unevaluated(uv) => uv.visit_with(visitor), - ty::ConstKind::Value(_) - | ty::ConstKind::Bound(..) - | ty::ConstKind::Placeholder(_) - | ty::ConstKind::Error(_) => ControlFlow::CONTINUE, - } - } -} - impl<'tcx> TypeFoldable<'tcx> for InferConst<'tcx> { fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _folder: &mut F) -> Result<Self, F::Error> { Ok(self) @@ -1248,57 +832,8 @@ impl<'tcx> TypeVisitable<'tcx> for InferConst<'tcx> { } } -impl<'tcx> TypeFoldable<'tcx> for ty::Unevaluated<'tcx> { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - folder.try_fold_unevaluated(self) - } -} - -impl<'tcx> TypeVisitable<'tcx> for ty::Unevaluated<'tcx> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - visitor.visit_unevaluated(*self) - } -} - -impl<'tcx> TypeSuperFoldable<'tcx> for ty::Unevaluated<'tcx> { - fn try_super_fold_with<F: FallibleTypeFolder<'tcx>>( - self, - folder: &mut F, - ) -> Result<Self, F::Error> { - Ok(ty::Unevaluated { - def: self.def, - substs: self.substs.try_fold_with(folder)?, - promoted: self.promoted, - }) - } -} - -impl<'tcx> TypeSuperVisitable<'tcx> for ty::Unevaluated<'tcx> { +impl<'tcx> TypeSuperVisitable<'tcx> for ty::UnevaluatedConst<'tcx> { fn super_visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { self.substs.visit_with(visitor) } } - -impl<'tcx> TypeFoldable<'tcx> for ty::Unevaluated<'tcx, ()> { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { - Ok(self.expand().try_fold_with(folder)?.shrink()) - } -} - -impl<'tcx> TypeVisitable<'tcx> for ty::Unevaluated<'tcx, ()> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - self.expand().visit_with(visitor) - } -} - -impl<'tcx> TypeFoldable<'tcx> for hir::Constness { - fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, _: &mut F) -> Result<Self, F::Error> { - Ok(self) - } -} - -impl<'tcx> TypeVisitable<'tcx> for hir::Constness { - fn visit_with<V: TypeVisitor<'tcx>>(&self, _: &mut V) -> ControlFlow<V::BreakTy> { - ControlFlow::CONTINUE - } -} diff --git a/compiler/rustc_middle/src/ty/sty.rs b/compiler/rustc_middle/src/ty/sty.rs index 52c3a3886..cf420bafe 100644 --- a/compiler/rustc_middle/src/ty/sty.rs +++ b/compiler/rustc_middle/src/ty/sty.rs @@ -3,7 +3,7 @@ #![allow(rustc::usage_of_ty_tykind)] use crate::infer::canonical::Canonical; -use crate::ty::subst::{GenericArg, InternalSubsts, Subst, SubstsRef}; +use crate::ty::subst::{GenericArg, InternalSubsts, SubstsRef}; use crate::ty::visit::ValidateBoundVars; use crate::ty::InferTy::*; use crate::ty::{ @@ -11,6 +11,7 @@ use crate::ty::{ TypeVisitor, }; use crate::ty::{List, ParamEnv}; +use hir::def::DefKind; use polonius_engine::Atom; use rustc_data_structures::captures::Captures; use rustc_data_structures::intern::Interned; @@ -18,7 +19,7 @@ use rustc_hir as hir; use rustc_hir::def_id::DefId; use rustc_index::vec::Idx; use rustc_macros::HashStable; -use rustc_span::symbol::{kw, Symbol}; +use rustc_span::symbol::{kw, sym, Symbol}; use rustc_target::abi::VariantIdx; use rustc_target::spec::abi; use std::borrow::Cow; @@ -84,6 +85,17 @@ impl BoundRegionKind { _ => false, } } + + pub fn get_name(&self) -> Option<Symbol> { + if self.is_named() { + match *self { + BoundRegionKind::BrNamed(_, name) => return Some(name), + _ => unreachable!(), + } + } + + None + } } pub trait Article { @@ -201,7 +213,7 @@ static_assert_size!(TyKind<'_>, 32); /// * `GR`: The "return type", which is the type of value returned upon /// completion of the generator. /// * `GW`: The "generator witness". -#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)] +#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, Lift)] pub struct ClosureSubsts<'tcx> { /// Lifetime and type parameters from the enclosing function, /// concatenated with a tuple containing the types of the upvars. @@ -303,7 +315,7 @@ impl<'tcx> ClosureSubsts<'tcx> { /// closure. // FIXME(eddyb) this should be unnecessary, as the shallowly resolved // type is known at the time of the creation of `ClosureSubsts`, - // see `rustc_typeck::check::closure`. + // see `rustc_hir_analysis::check::closure`. pub fn sig_as_fn_ptr_ty(self) -> Ty<'tcx> { self.split().closure_sig_as_fn_ptr_ty.expect_ty() } @@ -325,10 +337,14 @@ impl<'tcx> ClosureSubsts<'tcx> { _ => bug!("closure_sig_as_fn_ptr_ty is not a fn-ptr: {:?}", ty.kind()), } } + + pub fn print_as_impl_trait(self) -> ty::print::PrintClosureAsImpl<'tcx> { + ty::print::PrintClosureAsImpl { closure: self } + } } /// Similar to `ClosureSubsts`; see the above documentation for more. -#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)] +#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, Lift)] pub struct GeneratorSubsts<'tcx> { pub substs: SubstsRef<'tcx>, } @@ -546,7 +562,7 @@ impl<'tcx> GeneratorSubsts<'tcx> { layout.variant_fields.iter().map(move |variant| { variant .iter() - .map(move |field| EarlyBinder(layout.field_tys[*field]).subst(tcx, self.substs)) + .map(move |field| ty::EarlyBinder(layout.field_tys[*field]).subst(tcx, self.substs)) }) } @@ -655,7 +671,7 @@ impl<'tcx> InlineConstSubsts<'tcx> { } #[derive(Debug, Copy, Clone, PartialEq, PartialOrd, Ord, Eq, Hash, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] pub enum ExistentialPredicate<'tcx> { /// E.g., `Iterator`. Trait(ExistentialTraitRef<'tcx>), @@ -687,6 +703,9 @@ impl<'tcx> ExistentialPredicate<'tcx> { } impl<'tcx> Binder<'tcx, ExistentialPredicate<'tcx>> { + /// Given an existential predicate like `?Self: PartialEq<u32>` (e.g., derived from `dyn PartialEq<u32>`), + /// and a concrete type `self_ty`, returns a full predicate where the existentially quantified variable `?Self` + /// has been replaced with `self_ty` (e.g., `self_ty: PartialEq<u32>`, in our example). pub fn with_self_ty(&self, tcx: TyCtxt<'tcx>, self_ty: Ty<'tcx>) -> ty::Predicate<'tcx> { use crate::ty::ToPredicate; match self.skip_binder() { @@ -781,7 +800,7 @@ impl<'tcx> List<ty::Binder<'tcx, ExistentialPredicate<'tcx>>> { /// Trait references also appear in object types like `Foo<U>`, but in /// that case the `Self` parameter is absent from the substitutions. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct TraitRef<'tcx> { pub def_id: DefId, pub substs: SubstsRef<'tcx>, @@ -845,6 +864,12 @@ impl<'tcx> PolyTraitRef<'tcx> { } } +impl rustc_errors::IntoDiagnosticArg for PolyTraitRef<'_> { + fn into_diagnostic_arg(self) -> rustc_errors::DiagnosticArgValue<'static> { + self.to_string().into_diagnostic_arg() + } +} + /// An existential reference to a trait, where `Self` is erased. /// For example, the trait object `Trait<'a, 'b, X, Y>` is: /// ```ignore (illustrative) @@ -853,7 +878,7 @@ impl<'tcx> PolyTraitRef<'tcx> { /// The substitutions don't include the erased `Self`, only trait /// type and lifetime parameters (`[X, Y]` and `['a, 'b]` above). #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct ExistentialTraitRef<'tcx> { pub def_id: DefId, pub substs: SubstsRef<'tcx>, @@ -901,73 +926,6 @@ impl<'tcx> PolyExistentialTraitRef<'tcx> { } } -#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -#[derive(Encodable, Decodable, HashStable)] -pub struct EarlyBinder<T>(pub T); - -impl<T> EarlyBinder<T> { - pub fn as_ref(&self) -> EarlyBinder<&T> { - EarlyBinder(&self.0) - } - - pub fn map_bound_ref<F, U>(&self, f: F) -> EarlyBinder<U> - where - F: FnOnce(&T) -> U, - { - self.as_ref().map_bound(f) - } - - pub fn map_bound<F, U>(self, f: F) -> EarlyBinder<U> - where - F: FnOnce(T) -> U, - { - let value = f(self.0); - EarlyBinder(value) - } - - pub fn try_map_bound<F, U, E>(self, f: F) -> Result<EarlyBinder<U>, E> - where - F: FnOnce(T) -> Result<U, E>, - { - let value = f(self.0)?; - Ok(EarlyBinder(value)) - } - - pub fn rebind<U>(&self, value: U) -> EarlyBinder<U> { - EarlyBinder(value) - } -} - -impl<T> EarlyBinder<Option<T>> { - pub fn transpose(self) -> Option<EarlyBinder<T>> { - self.0.map(|v| EarlyBinder(v)) - } -} - -impl<T, U> EarlyBinder<(T, U)> { - pub fn transpose_tuple2(self) -> (EarlyBinder<T>, EarlyBinder<U>) { - (EarlyBinder(self.0.0), EarlyBinder(self.0.1)) - } -} - -pub struct EarlyBinderIter<T> { - t: T, -} - -impl<T: IntoIterator> EarlyBinder<T> { - pub fn transpose_iter(self) -> EarlyBinderIter<T::IntoIter> { - EarlyBinderIter { t: self.0.into_iter() } - } -} - -impl<T: Iterator> Iterator for EarlyBinderIter<T> { - type Item = EarlyBinder<T::Item>; - - fn next(&mut self) -> Option<Self::Item> { - self.t.next().map(|i| EarlyBinder(i)) - } -} - #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)] #[derive(HashStable)] pub enum BoundVariableKind { @@ -1009,7 +967,7 @@ impl BoundVariableKind { /// /// `Decodable` and `Encodable` are implemented for `Binder<T>` using the `impl_binder_encode_decode!` macro. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] -#[derive(HashStable)] +#[derive(HashStable, Lift)] pub struct Binder<'tcx, T>(T, &'tcx List<BoundVariableKind>); impl<'tcx, T> Binder<'tcx, T> @@ -1171,7 +1129,7 @@ impl<'tcx, T> Binder<'tcx, Option<T>> { /// Represents the projection of an associated type. In explicit UFCS /// form this would be written `<T as Trait<..>>::N`. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct ProjectionTy<'tcx> { /// The parameters of the associated item. pub substs: SubstsRef<'tcx>, @@ -1186,7 +1144,13 @@ pub struct ProjectionTy<'tcx> { impl<'tcx> ProjectionTy<'tcx> { pub fn trait_def_id(&self, tcx: TyCtxt<'tcx>) -> DefId { - tcx.parent(self.item_def_id) + match tcx.def_kind(self.item_def_id) { + DefKind::AssocTy | DefKind::AssocConst => tcx.parent(self.item_def_id), + DefKind::ImplTraitPlaceholder => { + tcx.parent(tcx.impl_trait_in_trait_parent(self.item_def_id)) + } + kind => bug!("unexpected DefKind in ProjectionTy: {kind:?}"), + } } /// Extracts the underlying trait reference and own substs from this projection. @@ -1197,6 +1161,7 @@ impl<'tcx> ProjectionTy<'tcx> { tcx: TyCtxt<'tcx>, ) -> (ty::TraitRef<'tcx>, &'tcx [ty::GenericArg<'tcx>]) { let def_id = tcx.parent(self.item_def_id); + assert_eq!(tcx.def_kind(def_id), DefKind::Trait); let trait_generics = tcx.generics_of(def_id); ( ty::TraitRef { def_id, substs: self.substs.truncate_to(tcx, trait_generics) }, @@ -1221,7 +1186,7 @@ impl<'tcx> ProjectionTy<'tcx> { } } -#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable)] +#[derive(Copy, Clone, Debug, TypeFoldable, TypeVisitable, Lift)] pub struct GenSig<'tcx> { pub resume_ty: Ty<'tcx>, pub yield_ty: Ty<'tcx>, @@ -1237,7 +1202,7 @@ pub type PolyGenSig<'tcx> = Binder<'tcx, GenSig<'tcx>>; /// - `output`: is the return type. /// - `c_variadic`: indicates whether this is a C-variadic function. #[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct FnSig<'tcx> { pub inputs_and_output: &'tcx List<Ty<'tcx>>, pub c_variadic: bool, @@ -1419,7 +1384,7 @@ impl From<BoundVar> for BoundTy { /// A `ProjectionPredicate` for an `ExistentialTraitRef`. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Debug, TyEncodable, TyDecodable)] -#[derive(HashStable, TypeFoldable, TypeVisitable)] +#[derive(HashStable, TypeFoldable, TypeVisitable, Lift)] pub struct ExistentialProjection<'tcx> { pub item_def_id: DefId, pub substs: SubstsRef<'tcx>, @@ -1492,6 +1457,23 @@ impl<'tcx> Region<'tcx> { *self.0.0 } + pub fn get_name(self) -> Option<Symbol> { + if self.has_name() { + let name = match *self { + ty::ReEarlyBound(ebr) => Some(ebr.name), + ty::ReLateBound(_, br) => br.kind.get_name(), + ty::ReFree(fr) => fr.bound_region.get_name(), + ty::ReStatic => Some(kw::StaticLifetime), + ty::RePlaceholder(placeholder) => placeholder.name.get_name(), + _ => None, + }; + + return name; + } + + None + } + /// Is this region named by the user? pub fn has_name(self) -> bool { match *self { @@ -1501,7 +1483,6 @@ impl<'tcx> Region<'tcx> { ty::ReStatic => true, ty::ReVar(..) => false, ty::RePlaceholder(placeholder) => placeholder.name.is_named(), - ty::ReEmpty(_) => false, ty::ReErased => false, } } @@ -1527,11 +1508,6 @@ impl<'tcx> Region<'tcx> { } #[inline] - pub fn is_empty(self) -> bool { - matches!(*self, ty::ReEmpty(..)) - } - - #[inline] pub fn bound_at_or_above_binder(self, index: ty::DebruijnIndex) -> bool { match *self { ty::ReLateBound(debruijn, _) => debruijn >= index, @@ -1562,7 +1538,7 @@ impl<'tcx> Region<'tcx> { flags = flags | TypeFlags::HAS_FREE_REGIONS; flags = flags | TypeFlags::HAS_FREE_LOCAL_REGIONS; } - ty::ReEmpty(_) | ty::ReStatic => { + ty::ReStatic => { flags = flags | TypeFlags::HAS_FREE_REGIONS; } ty::ReLateBound(..) => { @@ -1617,6 +1593,10 @@ impl<'tcx> Region<'tcx> { _ => self.is_free(), } } + + pub fn is_var(self) -> bool { + matches!(self.kind(), ty::ReVar(_)) + } } /// Type utilities @@ -1838,7 +1818,12 @@ impl<'tcx> Ty<'tcx> { #[inline] pub fn is_trait(self) -> bool { - matches!(self.kind(), Dynamic(..)) + matches!(self.kind(), Dynamic(_, _, ty::Dyn)) + } + + #[inline] + pub fn is_dyn_star(self) -> bool { + matches!(self.kind(), Dynamic(_, _, ty::DynStar)) } #[inline] @@ -2137,7 +2122,7 @@ impl<'tcx> Ty<'tcx> { /// /// Note that during type checking, we use an inference variable /// to represent the closure kind, because it has not yet been - /// inferred. Once upvar inference (in `rustc_typeck/src/check/upvar.rs`) + /// inferred. Once upvar inference (in `rustc_hir_analysis/src/check/upvar.rs`) /// is complete, that type variable will be unified. pub fn to_opt_closure_kind(self) -> Option<ty::ClosureKind> { match self.kind() { @@ -2220,7 +2205,10 @@ impl<'tcx> Ty<'tcx> { // These aren't even `Clone` ty::Str | ty::Slice(..) | ty::Foreign(..) | ty::Dynamic(..) => false, - ty::Int(..) | ty::Uint(..) | ty::Float(..) => true, + ty::Infer(ty::InferTy::FloatVar(_) | ty::InferTy::IntVar(_)) + | ty::Int(..) + | ty::Uint(..) + | ty::Float(..) => true, // The voldemort ZSTs are fine. ty::FnDef(..) => true, @@ -2255,6 +2243,35 @@ impl<'tcx> Ty<'tcx> { } } } + + // If `self` is a primitive, return its [`Symbol`]. + pub fn primitive_symbol(self) -> Option<Symbol> { + match self.kind() { + ty::Bool => Some(sym::bool), + ty::Char => Some(sym::char), + ty::Float(f) => match f { + ty::FloatTy::F32 => Some(sym::f32), + ty::FloatTy::F64 => Some(sym::f64), + }, + ty::Int(f) => match f { + ty::IntTy::Isize => Some(sym::isize), + ty::IntTy::I8 => Some(sym::i8), + ty::IntTy::I16 => Some(sym::i16), + ty::IntTy::I32 => Some(sym::i32), + ty::IntTy::I64 => Some(sym::i64), + ty::IntTy::I128 => Some(sym::i128), + }, + ty::Uint(f) => match f { + ty::UintTy::Usize => Some(sym::usize), + ty::UintTy::U8 => Some(sym::u8), + ty::UintTy::U16 => Some(sym::u16), + ty::UintTy::U32 => Some(sym::u32), + ty::UintTy::U64 => Some(sym::u64), + ty::UintTy::U128 => Some(sym::u128), + }, + _ => None, + } + } } /// Extra information about why we ended up with a particular variance. diff --git a/compiler/rustc_middle/src/ty/subst.rs b/compiler/rustc_middle/src/ty/subst.rs index 6262aa180..0660e9b79 100644 --- a/compiler/rustc_middle/src/ty/subst.rs +++ b/compiler/rustc_middle/src/ty/subst.rs @@ -1,12 +1,12 @@ // Type substitutions. -use crate::mir; use crate::ty::codec::{TyDecoder, TyEncoder}; use crate::ty::fold::{FallibleTypeFolder, TypeFoldable, TypeFolder, TypeSuperFoldable}; use crate::ty::sty::{ClosureSubsts, GeneratorSubsts, InlineConstSubsts}; use crate::ty::visit::{TypeVisitable, TypeVisitor}; use crate::ty::{self, Lift, List, ParamConst, Ty, TyCtxt}; +use rustc_data_structures::captures::Captures; use rustc_data_structures::intern::{Interned, WithStableHash}; use rustc_hir::def_id::DefId; use rustc_macros::HashStable; @@ -189,6 +189,14 @@ impl<'tcx> GenericArg<'tcx> { _ => bug!("expected a const, but found another kind"), } } + + pub fn is_non_region_infer(self) -> bool { + match self.unpack() { + GenericArgKind::Lifetime(_) => false, + GenericArgKind::Type(ty) => ty.is_ty_infer(), + GenericArgKind::Const(ct) => ct.is_ct_infer(), + } + } } impl<'a, 'tcx> Lift<'tcx> for GenericArg<'a> { @@ -459,12 +467,6 @@ impl<'tcx> TypeFoldable<'tcx> for SubstsRef<'tcx> { } } -impl<'tcx> TypeVisitable<'tcx> for SubstsRef<'tcx> { - fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { - self.iter().try_for_each(|t| t.visit_with(visitor)) - } -} - impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<Ty<'tcx>> { fn try_fold_with<F: FallibleTypeFolder<'tcx>>(self, folder: &mut F) -> Result<Self, F::Error> { // This code is fairly hot, though not as hot as `SubstsRef`. @@ -497,24 +499,108 @@ impl<'tcx> TypeFoldable<'tcx> for &'tcx ty::List<Ty<'tcx>> { } } -impl<'tcx> TypeVisitable<'tcx> for &'tcx ty::List<Ty<'tcx>> { +impl<'tcx, T: TypeVisitable<'tcx>> TypeVisitable<'tcx> for &'tcx ty::List<T> { + #[inline] fn visit_with<V: TypeVisitor<'tcx>>(&self, visitor: &mut V) -> ControlFlow<V::BreakTy> { self.iter().try_for_each(|t| t.visit_with(visitor)) } } -// Just call `foo.subst(tcx, substs)` to perform a substitution across `foo`. -#[rustc_on_unimplemented(message = "Calling `subst` must now be done through an `EarlyBinder`")] -pub trait Subst<'tcx>: Sized { - type Inner; +#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)] +#[derive(Encodable, Decodable, HashStable)] +pub struct EarlyBinder<T>(pub T); + +/// For early binders, you should first call `subst` before using any visitors. +impl<'tcx, T> !TypeFoldable<'tcx> for ty::EarlyBinder<T> {} +impl<'tcx, T> !TypeVisitable<'tcx> for ty::EarlyBinder<T> {} + +impl<T> EarlyBinder<T> { + pub fn as_ref(&self) -> EarlyBinder<&T> { + EarlyBinder(&self.0) + } + + pub fn map_bound_ref<F, U>(&self, f: F) -> EarlyBinder<U> + where + F: FnOnce(&T) -> U, + { + self.as_ref().map_bound(f) + } + + pub fn map_bound<F, U>(self, f: F) -> EarlyBinder<U> + where + F: FnOnce(T) -> U, + { + let value = f(self.0); + EarlyBinder(value) + } + + pub fn try_map_bound<F, U, E>(self, f: F) -> Result<EarlyBinder<U>, E> + where + F: FnOnce(T) -> Result<U, E>, + { + let value = f(self.0)?; + Ok(EarlyBinder(value)) + } + + pub fn rebind<U>(&self, value: U) -> EarlyBinder<U> { + EarlyBinder(value) + } +} + +impl<T> EarlyBinder<Option<T>> { + pub fn transpose(self) -> Option<EarlyBinder<T>> { + self.0.map(|v| EarlyBinder(v)) + } +} + +impl<T, U> EarlyBinder<(T, U)> { + pub fn transpose_tuple2(self) -> (EarlyBinder<T>, EarlyBinder<U>) { + (EarlyBinder(self.0.0), EarlyBinder(self.0.1)) + } +} - fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self::Inner; +impl<'tcx, 's, T: IntoIterator<Item = I>, I: TypeFoldable<'tcx>> EarlyBinder<T> { + pub fn subst_iter( + self, + tcx: TyCtxt<'tcx>, + substs: &'s [GenericArg<'tcx>], + ) -> impl Iterator<Item = I> + Captures<'s> + Captures<'tcx> { + self.0.into_iter().map(move |t| EarlyBinder(t).subst(tcx, substs)) + } } -impl<'tcx, T: TypeFoldable<'tcx>> Subst<'tcx> for ty::EarlyBinder<T> { - type Inner = T; +impl<'tcx, 's, 'a, T: IntoIterator<Item = &'a I>, I: Copy + TypeFoldable<'tcx> + 'a> + EarlyBinder<T> +{ + pub fn subst_iter_copied( + self, + tcx: TyCtxt<'tcx>, + substs: &'s [GenericArg<'tcx>], + ) -> impl Iterator<Item = I> + Captures<'s> + Captures<'tcx> + Captures<'a> { + self.0.into_iter().map(move |t| EarlyBinder(*t).subst(tcx, substs)) + } +} + +pub struct EarlyBinderIter<T> { + t: T, +} + +impl<T: IntoIterator> EarlyBinder<T> { + pub fn transpose_iter(self) -> EarlyBinderIter<T::IntoIter> { + EarlyBinderIter { t: self.0.into_iter() } + } +} - fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> Self::Inner { +impl<T: Iterator> Iterator for EarlyBinderIter<T> { + type Item = EarlyBinder<T::Item>; + + fn next(&mut self) -> Option<Self::Item> { + self.t.next().map(|i| EarlyBinder(i)) + } +} + +impl<'tcx, T: TypeFoldable<'tcx>> ty::EarlyBinder<T> { + pub fn subst(self, tcx: TyCtxt<'tcx>, substs: &[GenericArg<'tcx>]) -> T { let mut folder = SubstFolder { tcx, substs, binders_passed: 0 }; self.0.fold_with(&mut folder) } @@ -550,9 +636,21 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> { fn fold_region(&mut self, r: ty::Region<'tcx>) -> ty::Region<'tcx> { #[cold] #[inline(never)] - fn region_param_out_of_range(data: ty::EarlyBoundRegion) -> ! { + fn region_param_out_of_range(data: ty::EarlyBoundRegion, substs: &[GenericArg<'_>]) -> ! { + bug!( + "Region parameter out of range when substituting in region {} (index={}, substs = {:?})", + data.name, + data.index, + substs, + ) + } + + #[cold] + #[inline(never)] + fn region_param_invalid(data: ty::EarlyBoundRegion, other: GenericArgKind<'_>) -> ! { bug!( - "Region parameter out of range when substituting in region {} (index={})", + "Unexpected parameter {:?} when substituting in region {} (index={})", + other, data.name, data.index ) @@ -568,7 +666,8 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> { let rk = self.substs.get(data.index as usize).map(|k| k.unpack()); match rk { Some(GenericArgKind::Lifetime(lt)) => self.shift_region_through_binders(lt), - _ => region_param_out_of_range(data), + Some(other) => region_param_invalid(data, other), + None => region_param_out_of_range(data, self.substs), } } _ => r, @@ -593,11 +692,6 @@ impl<'a, 'tcx> TypeFolder<'tcx> for SubstFolder<'a, 'tcx> { c.super_fold_with(self) } } - - #[inline] - fn fold_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> mir::ConstantKind<'tcx> { - c.super_fold_with(self) - } } impl<'a, 'tcx> SubstFolder<'a, 'tcx> { diff --git a/compiler/rustc_middle/src/ty/trait_def.rs b/compiler/rustc_middle/src/ty/trait_def.rs index 541dace5c..ac79949fc 100644 --- a/compiler/rustc_middle/src/ty/trait_def.rs +++ b/compiler/rustc_middle/src/ty/trait_def.rs @@ -256,7 +256,6 @@ pub(super) fn trait_impls_of_provider(tcx: TyCtxt<'_>, trait_id: DefId) -> Trait } // Query provider for `incoherent_impls`. -#[instrument(level = "debug", skip(tcx))] pub(super) fn incoherent_impls_provider(tcx: TyCtxt<'_>, simp: SimplifiedType) -> &[DefId] { let mut impls = Vec::new(); diff --git a/compiler/rustc_middle/src/ty/util.rs b/compiler/rustc_middle/src/ty/util.rs index 591bb7831..f72e236ed 100644 --- a/compiler/rustc_middle/src/ty/util.rs +++ b/compiler/rustc_middle/src/ty/util.rs @@ -2,12 +2,11 @@ use crate::middle::codegen_fn_attrs::CodegenFnAttrFlags; use crate::ty::layout::IntegerExt; -use crate::ty::query::TyCtxtAt; -use crate::ty::subst::{GenericArgKind, Subst, SubstsRef}; use crate::ty::{ self, DefIdTree, FallibleTypeFolder, Ty, TyCtxt, TypeFoldable, TypeFolder, TypeSuperFoldable, TypeVisitable, }; +use crate::ty::{GenericArgKind, SubstsRef}; use rustc_apfloat::Float as _; use rustc_ast as ast; use rustc_attr::{self as attr, SignedInt, UnsignedInt}; @@ -627,7 +626,7 @@ impl<'tcx> TyCtxt<'tcx> { } /// Expands the given impl trait type, stopping if the type is recursive. - #[instrument(skip(self), level = "debug")] + #[instrument(skip(self), level = "debug", ret)] pub fn try_expand_impl_trait_type( self, def_id: DefId, @@ -644,7 +643,6 @@ impl<'tcx> TyCtxt<'tcx> { }; let expanded_type = visitor.expand_opaque_ty(def_id, substs).unwrap(); - trace!(?expanded_type); if visitor.found_recursion { Err(expanded_type) } else { Ok(expanded_type) } } @@ -652,6 +650,13 @@ impl<'tcx> TyCtxt<'tcx> { ty::EarlyBinder(self.type_of(def_id)) } + pub fn bound_trait_impl_trait_tys( + self, + def_id: DefId, + ) -> ty::EarlyBinder<Result<&'tcx FxHashMap<DefId, Ty<'tcx>>, ErrorGuaranteed>> { + ty::EarlyBinder(self.collect_trait_impl_trait_tys(def_id)) + } + pub fn bound_fn_sig(self, def_id: DefId) -> ty::EarlyBinder<ty::PolyFnSig<'tcx>> { ty::EarlyBinder(self.fn_sig(def_id)) } @@ -815,12 +820,8 @@ impl<'tcx> Ty<'tcx> { /// does copies even when the type actually doesn't satisfy the /// full requirements for the `Copy` trait (cc #29149) -- this /// winds up being reported as an error during NLL borrow check. - pub fn is_copy_modulo_regions( - self, - tcx_at: TyCtxtAt<'tcx>, - param_env: ty::ParamEnv<'tcx>, - ) -> bool { - self.is_trivially_pure_clone_copy() || tcx_at.is_copy_raw(param_env.and(self)) + pub fn is_copy_modulo_regions(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool { + self.is_trivially_pure_clone_copy() || tcx.is_copy_raw(param_env.and(self)) } /// Checks whether values of this type `T` have a size known at @@ -829,8 +830,8 @@ impl<'tcx> Ty<'tcx> { /// over-approximation in generic contexts, where one can have /// strange rules like `<T as Foo<'static>>::Bar: Sized` that /// actually carry lifetime requirements. - pub fn is_sized(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool { - self.is_trivially_sized(tcx_at.tcx) || tcx_at.is_sized_raw(param_env.and(self)) + pub fn is_sized(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool { + self.is_trivially_sized(tcx) || tcx.is_sized_raw(param_env.and(self)) } /// Checks whether values of this type `T` implement the `Freeze` @@ -840,8 +841,8 @@ impl<'tcx> Ty<'tcx> { /// optimization as well as the rules around static values. Note /// that the `Freeze` trait is not exposed to end users and is /// effectively an implementation detail. - pub fn is_freeze(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool { - self.is_trivially_freeze() || tcx_at.is_freeze_raw(param_env.and(self)) + pub fn is_freeze(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool { + self.is_trivially_freeze() || tcx.is_freeze_raw(param_env.and(self)) } /// Fast path helper for testing if a type is `Freeze`. @@ -880,8 +881,8 @@ impl<'tcx> Ty<'tcx> { } /// Checks whether values of this type `T` implement the `Unpin` trait. - pub fn is_unpin(self, tcx_at: TyCtxtAt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool { - self.is_trivially_unpin() || tcx_at.is_unpin_raw(param_env.and(self)) + pub fn is_unpin(self, tcx: TyCtxt<'tcx>, param_env: ty::ParamEnv<'tcx>) -> bool { + self.is_trivially_unpin() || tcx.is_unpin_raw(param_env.and(self)) } /// Fast path helper for testing if a type is `Unpin`. @@ -952,7 +953,7 @@ impl<'tcx> Ty<'tcx> { } } - /// Checks if `ty` has has a significant drop. + /// Checks if `ty` has a significant drop. /// /// Note that this method can return false even if `ty` has a destructor /// attached; even if that is the case then the adt has been marked with @@ -1283,12 +1284,24 @@ pub fn is_doc_hidden(tcx: TyCtxt<'_>, def_id: DefId) -> bool { .any(|items| items.iter().any(|item| item.has_name(sym::hidden))) } +/// Determines whether an item is annotated with `doc(notable_trait)`. +pub fn is_doc_notable_trait(tcx: TyCtxt<'_>, def_id: DefId) -> bool { + tcx.get_attrs(def_id, sym::doc) + .filter_map(|attr| attr.meta_item_list()) + .any(|items| items.iter().any(|item| item.has_name(sym::notable_trait))) +} + /// Determines whether an item is an intrinsic by Abi. pub fn is_intrinsic(tcx: TyCtxt<'_>, def_id: DefId) -> bool { matches!(tcx.fn_sig(def_id).abi(), Abi::RustIntrinsic | Abi::PlatformIntrinsic) } pub fn provide(providers: &mut ty::query::Providers) { - *providers = - ty::query::Providers { normalize_opaque_types, is_doc_hidden, is_intrinsic, ..*providers } + *providers = ty::query::Providers { + normalize_opaque_types, + is_doc_hidden, + is_doc_notable_trait, + is_intrinsic, + ..*providers + } } diff --git a/compiler/rustc_middle/src/ty/visit.rs b/compiler/rustc_middle/src/ty/visit.rs index 536506720..c09f71f9a 100644 --- a/compiler/rustc_middle/src/ty/visit.rs +++ b/compiler/rustc_middle/src/ty/visit.rs @@ -10,8 +10,7 @@ //! //! There are three groups of traits involved in each traversal. //! - `TypeVisitable`. This is implemented once for many types, including: -//! - Types of interest, for which the the methods delegate to the -//! visitor. +//! - Types of interest, for which the methods delegate to the visitor. //! - All other types, including generic containers like `Vec` and `Option`. //! It defines a "skeleton" of how they should be visited. //! - `TypeSuperVisitable`. This is implemented only for each type of interest, @@ -39,7 +38,6 @@ //! - ty.super_visit_with(visitor) //! - u.visit_with(visitor) //! ``` -use crate::mir; use crate::ty::{self, flags::FlagComputation, Binder, Ty, TyCtxt, TypeFlags}; use rustc_errors::ErrorGuaranteed; @@ -84,7 +82,7 @@ pub trait TypeVisitable<'tcx>: fmt::Debug + Clone { self.has_vars_bound_at_or_above(ty::INNERMOST) } - #[instrument(level = "trace")] + #[instrument(level = "trace", ret)] fn has_type_flags(&self, flags: TypeFlags) -> bool { self.visit_with(&mut HasTypeFlagsVisitor { flags }).break_value() == Some(FoundFlags) } @@ -104,8 +102,8 @@ pub trait TypeVisitable<'tcx>: fmt::Debug + Clone { None } } - fn has_param_types_or_consts(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_TY_PARAM | TypeFlags::HAS_CT_PARAM) + fn has_non_region_param(&self) -> bool { + self.has_type_flags(TypeFlags::NEEDS_SUBST - TypeFlags::HAS_RE_PARAM) } fn has_infer_regions(&self) -> bool { self.has_type_flags(TypeFlags::HAS_RE_INFER) @@ -113,8 +111,8 @@ pub trait TypeVisitable<'tcx>: fmt::Debug + Clone { fn has_infer_types(&self) -> bool { self.has_type_flags(TypeFlags::HAS_TY_INFER) } - fn has_infer_types_or_consts(&self) -> bool { - self.has_type_flags(TypeFlags::HAS_TY_INFER | TypeFlags::HAS_CT_INFER) + fn has_non_region_infer(&self) -> bool { + self.has_type_flags(TypeFlags::NEEDS_INFER - TypeFlags::HAS_RE_INFER) } fn needs_infer(&self) -> bool { self.has_type_flags(TypeFlags::NEEDS_INFER) @@ -199,17 +197,9 @@ pub trait TypeVisitor<'tcx>: Sized { c.super_visit_with(self) } - fn visit_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ControlFlow<Self::BreakTy> { - uv.super_visit_with(self) - } - fn visit_predicate(&mut self, p: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> { p.super_visit_with(self) } - - fn visit_mir_const(&mut self, c: mir::ConstantKind<'tcx>) -> ControlFlow<Self::BreakTy> { - c.super_visit_with(self) - } } /////////////////////////////////////////////////////////////////////////// @@ -560,7 +550,7 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor { type BreakTy = FoundFlags; #[inline] - #[instrument(skip(self), level = "trace")] + #[instrument(skip(self), level = "trace", ret)] fn visit_ty(&mut self, t: Ty<'tcx>) -> ControlFlow<Self::BreakTy> { let flags = t.flags(); trace!(t.flags=?t.flags()); @@ -572,7 +562,7 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor { } #[inline] - #[instrument(skip(self), level = "trace")] + #[instrument(skip(self), level = "trace", ret)] fn visit_region(&mut self, r: ty::Region<'tcx>) -> ControlFlow<Self::BreakTy> { let flags = r.type_flags(); trace!(r.flags=?flags); @@ -584,7 +574,7 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor { } #[inline] - #[instrument(level = "trace")] + #[instrument(level = "trace", ret)] fn visit_const(&mut self, c: ty::Const<'tcx>) -> ControlFlow<Self::BreakTy> { let flags = FlagComputation::for_const(c); trace!(r.flags=?flags); @@ -596,19 +586,7 @@ impl<'tcx> TypeVisitor<'tcx> for HasTypeFlagsVisitor { } #[inline] - #[instrument(level = "trace")] - fn visit_unevaluated(&mut self, uv: ty::Unevaluated<'tcx>) -> ControlFlow<Self::BreakTy> { - let flags = FlagComputation::for_unevaluated_const(uv); - trace!(r.flags=?flags); - if flags.intersects(self.flags) { - ControlFlow::Break(FoundFlags) - } else { - ControlFlow::CONTINUE - } - } - - #[inline] - #[instrument(level = "trace")] + #[instrument(level = "trace", ret)] fn visit_predicate(&mut self, predicate: ty::Predicate<'tcx>) -> ControlFlow<Self::BreakTy> { debug!( "HasTypeFlagsVisitor: predicate={:?} predicate.flags={:?} self.flags={:?}", @@ -666,7 +644,7 @@ impl<'tcx> TypeVisitor<'tcx> for LateBoundRegionsCollector { // ignore the inputs to a projection, as they may not appear // in the normalized form if self.just_constrained { - if let ty::Projection(..) = t.kind() { + if let ty::Projection(..) | ty::Opaque(..) = t.kind() { return ControlFlow::CONTINUE; } } diff --git a/compiler/rustc_middle/src/ty/vtable.rs b/compiler/rustc_middle/src/ty/vtable.rs index 04a9fd1f7..5ca51c25a 100644 --- a/compiler/rustc_middle/src/ty/vtable.rs +++ b/compiler/rustc_middle/src/ty/vtable.rs @@ -1,7 +1,7 @@ use std::convert::TryFrom; use std::fmt; -use crate::mir::interpret::{alloc_range, AllocId, Allocation, Pointer, Scalar, ScalarMaybeUninit}; +use crate::mir::interpret::{alloc_range, AllocId, Allocation, Pointer, Scalar}; use crate::ty::{self, Instance, PolyTraitRef, Ty, TyCtxt}; use rustc_ast::Mutability; @@ -87,7 +87,7 @@ pub(super) fn vtable_allocation_provider<'tcx>( let instance = ty::Instance::resolve_drop_in_place(tcx, ty); let fn_alloc_id = tcx.create_fn_alloc(instance); let fn_ptr = Pointer::from(fn_alloc_id); - ScalarMaybeUninit::from_pointer(fn_ptr, &tcx) + Scalar::from_pointer(fn_ptr, &tcx) } VtblEntry::MetadataSize => Scalar::from_uint(size, ptr_size).into(), VtblEntry::MetadataAlign => Scalar::from_uint(align, ptr_size).into(), @@ -97,14 +97,14 @@ pub(super) fn vtable_allocation_provider<'tcx>( let instance = instance.polymorphize(tcx); let fn_alloc_id = tcx.create_fn_alloc(instance); let fn_ptr = Pointer::from(fn_alloc_id); - ScalarMaybeUninit::from_pointer(fn_ptr, &tcx) + Scalar::from_pointer(fn_ptr, &tcx) } VtblEntry::TraitVPtr(trait_ref) => { let super_trait_ref = trait_ref .map_bound(|trait_ref| ty::ExistentialTraitRef::erase_self_ty(tcx, trait_ref)); let supertrait_alloc_id = tcx.vtable_allocation((ty, Some(super_trait_ref))); let vptr = Pointer::from(supertrait_alloc_id); - ScalarMaybeUninit::from_pointer(vptr, &tcx) + Scalar::from_pointer(vptr, &tcx) } }; vtable diff --git a/compiler/rustc_middle/src/ty/walk.rs b/compiler/rustc_middle/src/ty/walk.rs index 02fe1f3a7..91db9698c 100644 --- a/compiler/rustc_middle/src/ty/walk.rs +++ b/compiler/rustc_middle/src/ty/walk.rs @@ -112,6 +112,22 @@ impl<'tcx> Ty<'tcx> { } } +impl<'tcx> ty::Const<'tcx> { + /// Iterator that walks `self` and any types reachable from + /// `self`, in depth-first order. Note that just walks the types + /// that appear in `self`, it does not descend into the fields of + /// structs or variants. For example: + /// + /// ```text + /// isize => { isize } + /// Foo<Bar<isize>> => { Foo<Bar<isize>>, Bar<isize>, isize } + /// [isize] => { [isize], isize } + /// ``` + pub fn walk(self) -> TypeWalker<'tcx> { + TypeWalker::new(self.into()) + } +} + /// We push `GenericArg`s on the stack in reverse order so as to /// maintain a pre-order traversal. As of the time of this /// writing, the fact that the traversal is pre-order is not @@ -152,7 +168,7 @@ fn push_inner<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent: GenericArg<'tcx>) ty::Projection(data) => { stack.extend(data.substs.iter().rev()); } - ty::Dynamic(obj, lt) => { + ty::Dynamic(obj, lt, _) => { stack.push(lt.into()); stack.extend(obj.iter().rev().flat_map(|predicate| { let (substs, opt_ty) = match predicate.skip_binder() { @@ -165,9 +181,9 @@ fn push_inner<'tcx>(stack: &mut TypeWalkerStack<'tcx>, parent: GenericArg<'tcx>) } }; - substs.iter().rev().chain(opt_ty.map(|term| match term { - ty::Term::Ty(ty) => ty.into(), - ty::Term::Const(ct) => ct.into(), + substs.iter().rev().chain(opt_ty.map(|term| match term.unpack() { + ty::TermKind::Ty(ty) => ty.into(), + ty::TermKind::Const(ct) => ct.into(), })) })); } diff --git a/compiler/rustc_middle/src/values.rs b/compiler/rustc_middle/src/values.rs new file mode 100644 index 000000000..f4b4c3fb0 --- /dev/null +++ b/compiler/rustc_middle/src/values.rs @@ -0,0 +1,202 @@ +use rustc_data_structures::fx::FxHashSet; +use rustc_errors::{pluralize, struct_span_err, Applicability, MultiSpan}; +use rustc_hir as hir; +use rustc_hir::def::DefKind; +use rustc_middle::ty::Representability; +use rustc_middle::ty::{self, DefIdTree, Ty, TyCtxt}; +use rustc_query_system::query::QueryInfo; +use rustc_query_system::Value; +use rustc_span::def_id::LocalDefId; +use rustc_span::Span; + +use std::fmt::Write; + +impl<'tcx> Value<TyCtxt<'tcx>> for Ty<'_> { + fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo]) -> Self { + // SAFETY: This is never called when `Self` is not `Ty<'tcx>`. + // FIXME: Represent the above fact in the trait system somehow. + unsafe { std::mem::transmute::<Ty<'tcx>, Ty<'_>>(tcx.ty_error()) } + } +} + +impl<'tcx> Value<TyCtxt<'tcx>> for ty::SymbolName<'_> { + fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo]) -> Self { + // SAFETY: This is never called when `Self` is not `SymbolName<'tcx>`. + // FIXME: Represent the above fact in the trait system somehow. + unsafe { + std::mem::transmute::<ty::SymbolName<'tcx>, ty::SymbolName<'_>>(ty::SymbolName::new( + tcx, "<error>", + )) + } + } +} + +impl<'tcx> Value<TyCtxt<'tcx>> for ty::Binder<'_, ty::FnSig<'_>> { + fn from_cycle_error(tcx: TyCtxt<'tcx>, _: &[QueryInfo]) -> Self { + let err = tcx.ty_error(); + // FIXME(compiler-errors): It would be nice if we could get the + // query key, so we could at least generate a fn signature that + // has the right arity. + let fn_sig = ty::Binder::dummy(tcx.mk_fn_sig( + [].into_iter(), + err, + false, + rustc_hir::Unsafety::Normal, + rustc_target::spec::abi::Abi::Rust, + )); + + // SAFETY: This is never called when `Self` is not `ty::Binder<'tcx, ty::FnSig<'tcx>>`. + // FIXME: Represent the above fact in the trait system somehow. + unsafe { std::mem::transmute::<ty::PolyFnSig<'tcx>, ty::Binder<'_, ty::FnSig<'_>>>(fn_sig) } + } +} + +impl<'tcx> Value<TyCtxt<'tcx>> for Representability { + fn from_cycle_error(tcx: TyCtxt<'tcx>, cycle: &[QueryInfo]) -> Self { + let mut item_and_field_ids = Vec::new(); + let mut representable_ids = FxHashSet::default(); + for info in cycle { + if info.query.name == "representability" + && let Some(field_id) = info.query.def_id + && let Some(field_id) = field_id.as_local() + && let Some(DefKind::Field) = info.query.def_kind + { + let parent_id = tcx.parent(field_id.to_def_id()); + let item_id = match tcx.def_kind(parent_id) { + DefKind::Variant => tcx.parent(parent_id), + _ => parent_id, + }; + item_and_field_ids.push((item_id.expect_local(), field_id)); + } + } + for info in cycle { + if info.query.name == "representability_adt_ty" + && let Some(def_id) = info.query.ty_adt_id + && let Some(def_id) = def_id.as_local() + && !item_and_field_ids.iter().any(|&(id, _)| id == def_id) + { + representable_ids.insert(def_id); + } + } + recursive_type_error(tcx, item_and_field_ids, &representable_ids); + Representability::Infinite + } +} + +// item_and_field_ids should form a cycle where each field contains the +// type in the next element in the list +pub fn recursive_type_error( + tcx: TyCtxt<'_>, + mut item_and_field_ids: Vec<(LocalDefId, LocalDefId)>, + representable_ids: &FxHashSet<LocalDefId>, +) { + const ITEM_LIMIT: usize = 5; + + // Rotate the cycle so that the item with the lowest span is first + let start_index = item_and_field_ids + .iter() + .enumerate() + .min_by_key(|&(_, &(id, _))| tcx.def_span(id)) + .unwrap() + .0; + item_and_field_ids.rotate_left(start_index); + + let cycle_len = item_and_field_ids.len(); + let show_cycle_len = cycle_len.min(ITEM_LIMIT); + + let mut err_span = MultiSpan::from_spans( + item_and_field_ids[..show_cycle_len] + .iter() + .map(|(id, _)| tcx.def_span(id.to_def_id())) + .collect(), + ); + let mut suggestion = Vec::with_capacity(show_cycle_len * 2); + for i in 0..show_cycle_len { + let (_, field_id) = item_and_field_ids[i]; + let (next_item_id, _) = item_and_field_ids[(i + 1) % cycle_len]; + // Find the span(s) that contain the next item in the cycle + let hir_id = tcx.hir().local_def_id_to_hir_id(field_id); + let hir::Node::Field(field) = tcx.hir().get(hir_id) else { bug!("expected field") }; + let mut found = Vec::new(); + find_item_ty_spans(tcx, field.ty, next_item_id, &mut found, representable_ids); + + // Couldn't find the type. Maybe it's behind a type alias? + // In any case, we'll just suggest boxing the whole field. + if found.is_empty() { + found.push(field.ty.span); + } + + for span in found { + err_span.push_span_label(span, "recursive without indirection"); + // FIXME(compiler-errors): This suggestion might be erroneous if Box is shadowed + suggestion.push((span.shrink_to_lo(), "Box<".to_string())); + suggestion.push((span.shrink_to_hi(), ">".to_string())); + } + } + let items_list = { + let mut s = String::new(); + for (i, (item_id, _)) in item_and_field_ids.iter().enumerate() { + let path = tcx.def_path_str(item_id.to_def_id()); + write!(&mut s, "`{path}`").unwrap(); + if i == (ITEM_LIMIT - 1) && cycle_len > ITEM_LIMIT { + write!(&mut s, " and {} more", cycle_len - 5).unwrap(); + break; + } + if cycle_len > 1 && i < cycle_len - 2 { + s.push_str(", "); + } else if cycle_len > 1 && i == cycle_len - 2 { + s.push_str(" and ") + } + } + s + }; + let mut err = struct_span_err!( + tcx.sess, + err_span, + E0072, + "recursive type{} {} {} infinite size", + pluralize!(cycle_len), + items_list, + pluralize!("has", cycle_len), + ); + err.multipart_suggestion( + "insert some indirection (e.g., a `Box`, `Rc`, or `&`) to break the cycle", + suggestion, + Applicability::HasPlaceholders, + ); + err.emit(); +} + +fn find_item_ty_spans( + tcx: TyCtxt<'_>, + ty: &hir::Ty<'_>, + needle: LocalDefId, + spans: &mut Vec<Span>, + seen_representable: &FxHashSet<LocalDefId>, +) { + match ty.kind { + hir::TyKind::Path(hir::QPath::Resolved(_, path)) => { + if let Some(def_id) = path.res.opt_def_id() { + let check_params = def_id.as_local().map_or(true, |def_id| { + if def_id == needle { + spans.push(ty.span); + } + seen_representable.contains(&def_id) + }); + if check_params && let Some(args) = path.segments.last().unwrap().args { + let params_in_repr = tcx.params_in_repr(def_id); + for (i, arg) in args.args.iter().enumerate() { + if let hir::GenericArg::Type(ty) = arg && params_in_repr.contains(i as u32) { + find_item_ty_spans(tcx, ty, needle, spans, seen_representable); + } + } + } + } + } + hir::TyKind::Array(ty, _) => find_item_ty_spans(tcx, ty, needle, spans, seen_representable), + hir::TyKind::Tup(tys) => { + tys.iter().for_each(|ty| find_item_ty_spans(tcx, ty, needle, spans, seen_representable)) + } + _ => {} + } +} |