diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:19:13 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:19:13 +0000 |
commit | 218caa410aa38c29984be31a5229b9fa717560ee (patch) | |
tree | c54bd55eeb6e4c508940a30e94c0032fbd45d677 /compiler/rustc_query_system | |
parent | Releasing progress-linux version 1.67.1+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-218caa410aa38c29984be31a5229b9fa717560ee.tar.xz rustc-218caa410aa38c29984be31a5229b9fa717560ee.zip |
Merging upstream version 1.68.2+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_query_system')
-rw-r--r-- | compiler/rustc_query_system/src/dep_graph/debug.rs | 4 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/dep_graph/dep_node.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/dep_graph/graph.rs | 44 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/dep_graph/serialized.rs | 23 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/error.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/ich/hcx.rs | 26 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/caches.rs | 5 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/config.rs | 55 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/job.rs | 110 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/mod.rs | 15 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/plumbing.rs | 200 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/values.rs | 10 |
12 files changed, 239 insertions, 257 deletions
diff --git a/compiler/rustc_query_system/src/dep_graph/debug.rs b/compiler/rustc_query_system/src/dep_graph/debug.rs index f9f3169af..c2c9600f5 100644 --- a/compiler/rustc_query_system/src/dep_graph/debug.rs +++ b/compiler/rustc_query_system/src/dep_graph/debug.rs @@ -29,7 +29,7 @@ impl DepNodeFilter { /// Tests whether `node` meets the filter, returning true if so. pub fn test<K: DepKind>(&self, node: &DepNode<K>) -> bool { - let debug_str = format!("{:?}", node); + let debug_str = format!("{node:?}"); self.text.split('&').map(|s| s.trim()).all(|f| debug_str.contains(f)) } } @@ -46,7 +46,7 @@ impl<K: DepKind> EdgeFilter<K> { pub fn new(test: &str) -> Result<EdgeFilter<K>, Box<dyn Error>> { let parts: Vec<_> = test.split("->").collect(); if parts.len() != 2 { - Err(format!("expected a filter like `a&b -> c&d`, not `{}`", test).into()) + Err(format!("expected a filter like `a&b -> c&d`, not `{test}`").into()) } else { Ok(EdgeFilter { source: DepNodeFilter::new(parts[0]), diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs index d79c5816a..9e1ca6ab5 100644 --- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs +++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs @@ -120,7 +120,7 @@ pub trait DepNodeParams<Tcx: DepContext>: fmt::Debug + Sized { } fn to_debug_str(&self, _: Tcx) -> String { - format!("{:?}", self) + format!("{self:?}") } /// This method tries to recover the query key from the given `DepNode`, diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 38c7c6cce..47b2fd8f8 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -37,7 +37,7 @@ pub struct DepGraph<K: DepKind> { } rustc_index::newtype_index! { - pub struct DepNodeIndex { .. } + pub struct DepNodeIndex {} } impl DepNodeIndex { @@ -46,7 +46,7 @@ impl DepNodeIndex { pub const FOREVER_RED_NODE: DepNodeIndex = DepNodeIndex::from_u32(1); } -impl std::convert::From<DepNodeIndex> for QueryInvocationId { +impl From<DepNodeIndex> for QueryInvocationId { #[inline] fn from(dep_node_index: DepNodeIndex) -> Self { QueryInvocationId(dep_node_index.as_u32()) @@ -316,10 +316,8 @@ impl<K: DepKind> DepGraph<K> { assert!( !self.dep_node_exists(&key), "forcing query with already existing `DepNode`\n\ - - query-key: {:?}\n\ - - dep-node: {:?}", - arg, - key + - query-key: {arg:?}\n\ + - dep-node: {key:?}" ); let task_deps = if cx.dep_context().is_eval_always(key.kind) { @@ -365,8 +363,7 @@ impl<K: DepKind> DepGraph<K> { debug_assert!( data.colors.get(prev_index).is_none(), "DepGraph::with_task() - Duplicate DepNodeColor \ - insertion for {:?}", - key + insertion for {key:?}" ); data.colors.insert(prev_index, color); @@ -447,7 +444,7 @@ impl<K: DepKind> DepGraph<K> { TaskDepsRef::Allow(deps) => deps.lock(), TaskDepsRef::Ignore => return, TaskDepsRef::Forbid => { - panic!("Illegal read of: {:?}", dep_node_index) + panic!("Illegal read of: {dep_node_index:?}") } }; let task_deps = &mut *task_deps; @@ -493,8 +490,8 @@ impl<K: DepKind> DepGraph<K> { /// This is used to remove cycles during type-checking const generic parameters. /// /// As usual in the query system, we consider the current state of the calling query - /// only depends on the list of dependencies up to now. As a consequence, the value - /// that this query gives us can only depend on those dependencies too. Therefore, + /// only depends on the list of dependencies up to now. As a consequence, the value + /// that this query gives us can only depend on those dependencies too. Therefore, /// it is sound to use the current dependency set for the created node. /// /// During replay, the order of the nodes is relevant in the dependency graph. @@ -513,9 +510,9 @@ impl<K: DepKind> DepGraph<K> { hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>, ) -> DepNodeIndex { if let Some(data) = self.data.as_ref() { - // The caller query has more dependencies than the node we are creating. We may + // The caller query has more dependencies than the node we are creating. We may // encounter a case where this created node is marked as green, but the caller query is - // subsequently marked as red or recomputed. In this case, we will end up feeding a + // subsequently marked as red or recomputed. In this case, we will end up feeding a // value to an existing node. // // For sanity, we still check that the loaded stable hash and the new one match. @@ -634,7 +631,7 @@ impl<K: DepKind> DepGraph<K> { if dep_node_debug.borrow().contains_key(&dep_node) { return; } - let debug_str = debug_str_gen(); + let debug_str = self.with_ignore(debug_str_gen); dep_node_debug.borrow_mut().insert(dep_node, debug_str); } @@ -824,12 +821,13 @@ impl<K: DepKind> DepGraph<K> { debug_assert!( data.colors.get(prev_dep_node_index).is_none(), "DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \ - insertion for {:?}", - dep_node + insertion for {dep_node:?}" ); if !side_effects.is_empty() { - self.emit_side_effects(qcx, data, dep_node_index, side_effects); + self.with_query_deserialization(|| { + self.emit_side_effects(qcx, data, dep_node_index, side_effects) + }); } // ... and finally storing a "Green" entry in the color map. @@ -974,7 +972,7 @@ pub struct WorkProduct { // Index type for `DepNodeData`'s edges. rustc_index::newtype_index! { - struct EdgeIndex { .. } + struct EdgeIndex {} } /// `CurrentDepGraph` stores the dependency graph for the current session. It @@ -982,7 +980,7 @@ rustc_index::newtype_index! { /// graph: they are only added. /// /// The nodes in it are identified by a `DepNodeIndex`. We avoid keeping the nodes -/// in memory. This is important, because these graph structures are some of the +/// in memory. This is important, because these graph structures are some of the /// largest in the compiler. /// /// For this reason, we avoid storing `DepNode`s more than once as map @@ -1162,7 +1160,7 @@ impl<K: DepKind> CurrentDepGraph<K> { if let Some(fingerprint) = fingerprint { if fingerprint == prev_graph.fingerprint_by_index(prev_index) { if print_status { - eprintln!("[task::green] {:?}", key); + eprintln!("[task::green] {key:?}"); } // This is a green node: it existed in the previous compilation, @@ -1184,7 +1182,7 @@ impl<K: DepKind> CurrentDepGraph<K> { (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index)))) } else { if print_status { - eprintln!("[task::red] {:?}", key); + eprintln!("[task::red] {key:?}"); } // This is a red node: it existed in the previous compilation, its query @@ -1207,7 +1205,7 @@ impl<K: DepKind> CurrentDepGraph<K> { } } else { if print_status { - eprintln!("[task::unknown] {:?}", key); + eprintln!("[task::unknown] {key:?}"); } // This is a red node, effectively: it existed in the previous compilation @@ -1232,7 +1230,7 @@ impl<K: DepKind> CurrentDepGraph<K> { } } else { if print_status { - eprintln!("[task::new] {:?}", key); + eprintln!("[task::new] {key:?}"); } let fingerprint = fingerprint.unwrap_or(Fingerprint::ZERO); diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs index 3b20ec70d..a81595b24 100644 --- a/compiler/rustc_query_system/src/dep_graph/serialized.rs +++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs @@ -1,14 +1,14 @@ //! The data that we will serialize and deserialize. //! //! The dep-graph is serialized as a sequence of NodeInfo, with the dependencies -//! specified inline. The total number of nodes and edges are stored as the last +//! specified inline. The total number of nodes and edges are stored as the last //! 16 bytes of the file, so we can find them easily at decoding time. //! //! The serialisation is performed on-demand when each node is emitted. Using this //! scheme, we do not need to keep the current graph in memory. //! //! The deserialization is performed manually, in order to convert from the stored -//! sequence of NodeInfos to the different arrays in SerializedDepGraph. Since the +//! sequence of NodeInfos to the different arrays in SerializedDepGraph. Since the //! node and edge count are stored at the end of the file, all the arrays can be //! pre-allocated with the right length. @@ -22,15 +22,13 @@ use rustc_index::vec::{Idx, IndexVec}; use rustc_serialize::opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder}; use rustc_serialize::{Decodable, Decoder, Encodable}; use smallvec::SmallVec; -use std::convert::TryInto; // The maximum value of `SerializedDepNodeIndex` leaves the upper two bits // unused so that we can store multiple index types in `CompressedHybridIndex`, // and use those bits to encode which index type it contains. rustc_index::newtype_index! { - pub struct SerializedDepNodeIndex { - MAX = 0x7FFF_FFFF - } + #[max = 0x7FFF_FFFF] + pub struct SerializedDepNodeIndex {} } /// Data for use when recompiling the **current crate**. @@ -272,17 +270,14 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> { eprintln!("[incremental]"); eprintln!("[incremental] DepGraph Statistics"); - eprintln!("{}", SEPARATOR); + eprintln!("{SEPARATOR}"); eprintln!("[incremental]"); eprintln!("[incremental] Total Node Count: {}", status.total_node_count); eprintln!("[incremental] Total Edge Count: {}", status.total_edge_count); if cfg!(debug_assertions) { - eprintln!("[incremental] Total Edge Reads: {}", total_read_count); - eprintln!( - "[incremental] Total Duplicate Edge Reads: {}", - total_duplicate_read_count - ); + eprintln!("[incremental] Total Edge Reads: {total_read_count}"); + eprintln!("[incremental] Total Duplicate Edge Reads: {total_duplicate_read_count}"); } eprintln!("[incremental]"); @@ -290,7 +285,7 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> { "[incremental] {:<36}| {:<17}| {:<12}| {:<17}|", "Node Kind", "Node Frequency", "Node Count", "Avg. Edge Count" ); - eprintln!("{}", SEPARATOR); + eprintln!("{SEPARATOR}"); for stat in stats { let node_kind_ratio = @@ -306,7 +301,7 @@ impl<K: DepKind + Encodable<FileEncoder>> GraphEncoder<K> { ); } - eprintln!("{}", SEPARATOR); + eprintln!("{SEPARATOR}"); eprintln!("[incremental]"); } } diff --git a/compiler/rustc_query_system/src/error.rs b/compiler/rustc_query_system/src/error.rs index 7a20eaceb..cf2f04c74 100644 --- a/compiler/rustc_query_system/src/error.rs +++ b/compiler/rustc_query_system/src/error.rs @@ -49,7 +49,7 @@ pub struct Cycle { #[primary_span] pub span: Span, pub stack_bottom: String, - #[subdiagnostic(eager)] + #[subdiagnostic] pub cycle_stack: Vec<CycleStack>, #[subdiagnostic] pub stack_count: StackCount, diff --git a/compiler/rustc_query_system/src/ich/hcx.rs b/compiler/rustc_query_system/src/ich/hcx.rs index 6378ec108..163da59ed 100644 --- a/compiler/rustc_query_system/src/ich/hcx.rs +++ b/compiler/rustc_query_system/src/ich/hcx.rs @@ -6,9 +6,8 @@ use rustc_data_structures::stable_hasher::{HashStable, HashingControls, StableHa use rustc_data_structures::sync::Lrc; use rustc_hir as hir; use rustc_hir::def_id::{DefId, LocalDefId}; -use rustc_hir::definitions::{DefPathHash, Definitions}; -use rustc_index::vec::IndexVec; -use rustc_session::cstore::CrateStore; +use rustc_hir::definitions::DefPathHash; +use rustc_session::cstore::Untracked; use rustc_session::Session; use rustc_span::source_map::SourceMap; use rustc_span::symbol::Symbol; @@ -20,9 +19,7 @@ use rustc_span::{BytePos, CachingSourceMapView, SourceFile, Span, SpanData, DUMM /// things (e.g., each `DefId`/`DefPath` is only hashed once). #[derive(Clone)] pub struct StableHashingContext<'a> { - definitions: &'a Definitions, - cstore: &'a dyn CrateStore, - source_span: &'a IndexVec<LocalDefId, Span>, + untracked: &'a Untracked, // The value of `-Z incremental-ignore-spans`. // This field should only be used by `unstable_opts_incremental_ignore_span` incremental_ignore_spans: bool, @@ -49,19 +46,12 @@ pub(super) enum BodyResolver<'tcx> { impl<'a> StableHashingContext<'a> { #[inline] - pub fn new( - sess: &'a Session, - definitions: &'a Definitions, - cstore: &'a dyn CrateStore, - source_span: &'a IndexVec<LocalDefId, Span>, - ) -> Self { + pub fn new(sess: &'a Session, untracked: &'a Untracked) -> Self { let hash_spans_initial = !sess.opts.unstable_opts.incremental_ignore_spans; StableHashingContext { body_resolver: BodyResolver::Forbidden, - definitions, - cstore, - source_span, + untracked, incremental_ignore_spans: sess.opts.unstable_opts.incremental_ignore_spans, caching_source_map: None, raw_source_map: sess.source_map(), @@ -100,13 +90,13 @@ impl<'a> StableHashingContext<'a> { if let Some(def_id) = def_id.as_local() { self.local_def_path_hash(def_id) } else { - self.cstore.def_path_hash(def_id) + self.untracked.cstore.def_path_hash(def_id) } } #[inline] pub fn local_def_path_hash(&self, def_id: LocalDefId) -> DefPathHash { - self.definitions.def_path_hash(def_id) + self.untracked.definitions.read().def_path_hash(def_id) } #[inline] @@ -156,7 +146,7 @@ impl<'a> rustc_span::HashStableContext for StableHashingContext<'a> { #[inline] fn def_span(&self, def_id: LocalDefId) -> Span { - *self.source_span.get(def_id).unwrap_or(&DUMMY_SP) + *self.untracked.source_span.get(def_id).unwrap_or(&DUMMY_SP) } #[inline] diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs index 4c4680b5d..77d0d0314 100644 --- a/compiler/rustc_query_system/src/query/caches.rs +++ b/compiler/rustc_query_system/src/query/caches.rs @@ -9,7 +9,6 @@ use rustc_data_structures::sharded::Sharded; use rustc_data_structures::sync::Lock; use rustc_data_structures::sync::WorkerLocal; use rustc_index::vec::{Idx, IndexVec}; -use std::default::Default; use std::fmt::Debug; use std::hash::Hash; use std::marker::PhantomData; @@ -117,7 +116,7 @@ where let mut lock = self.cache.get_shard_by_value(&key).lock(); #[cfg(not(parallel_compiler))] let mut lock = self.cache.lock(); - // We may be overwriting another value. This is all right, since the dep-graph + // We may be overwriting another value. This is all right, since the dep-graph // will check that the fingerprint matches. lock.insert(key, (value.clone(), index)); value @@ -204,7 +203,7 @@ where let mut lock = self.cache.get_shard_by_value(&key).lock(); #[cfg(not(parallel_compiler))] let mut lock = self.cache.lock(); - // We may be overwriting another value. This is all right, since the dep-graph + // We may be overwriting another value. This is all right, since the dep-graph // will check that the fingerprint matches. lock.insert(key, value); &value.0 diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs index 7d1b62ab1..8c0330e43 100644 --- a/compiler/rustc_query_system/src/query/config.rs +++ b/compiler/rustc_query_system/src/query/config.rs @@ -1,7 +1,6 @@ //! Query configuration and description traits. -use crate::dep_graph::DepNode; -use crate::dep_graph::SerializedDepNodeIndex; +use crate::dep_graph::{DepNode, DepNodeParams, SerializedDepNodeIndex}; use crate::error::HandleCycleError; use crate::ich::StableHashingContext; use crate::query::caches::QueryCache; @@ -11,17 +10,23 @@ use rustc_data_structures::fingerprint::Fingerprint; use std::fmt::Debug; use std::hash::Hash; +pub type HashResult<Qcx, Q> = + Option<fn(&mut StableHashingContext<'_>, &<Q as QueryConfig<Qcx>>::Value) -> Fingerprint>; + +pub type TryLoadFromDisk<Qcx, Q> = + Option<fn(Qcx, SerializedDepNodeIndex) -> Option<<Q as QueryConfig<Qcx>>::Value>>; + pub trait QueryConfig<Qcx: QueryContext> { const NAME: &'static str; - type Key: Eq + Hash + Clone + Debug; + type Key: DepNodeParams<Qcx::DepContext> + Eq + Hash + Clone + Debug; type Value: Debug; type Stored: Debug + Clone + std::borrow::Borrow<Self::Value>; type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>; // Don't use this method to access query results, instead use the methods on TyCtxt - fn query_state<'a>(tcx: Qcx) -> &'a QueryState<Self::Key> + fn query_state<'a>(tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind> where Qcx: 'a; @@ -30,39 +35,27 @@ pub trait QueryConfig<Qcx: QueryContext> { where Qcx: 'a; - // Don't use this method to compute query results, instead use the methods on TyCtxt - fn make_vtable(tcx: Qcx, key: &Self::Key) -> QueryVTable<Qcx, Self::Key, Self::Value>; - fn cache_on_disk(tcx: Qcx::DepContext, key: &Self::Key) -> bool; // Don't use this method to compute query results, instead use the methods on TyCtxt fn execute_query(tcx: Qcx::DepContext, k: Self::Key) -> Self::Stored; -} -#[derive(Copy, Clone)] -pub struct QueryVTable<Qcx: QueryContext, K, V> { - pub anon: bool, - pub dep_kind: Qcx::DepKind, - pub eval_always: bool, - pub depth_limit: bool, - pub feedable: bool, - - pub compute: fn(Qcx::DepContext, K) -> V, - pub hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>, - pub handle_cycle_error: HandleCycleError, - // NOTE: this is also `None` if `cache_on_disk()` returns false, not just if it's unsupported by the query - pub try_load_from_disk: Option<fn(Qcx, SerializedDepNodeIndex) -> Option<V>>, -} + fn compute(tcx: Qcx, key: &Self::Key) -> fn(Qcx::DepContext, Self::Key) -> Self::Value; -impl<Qcx: QueryContext, K, V> QueryVTable<Qcx, K, V> { - pub(crate) fn to_dep_node(&self, tcx: Qcx::DepContext, key: &K) -> DepNode<Qcx::DepKind> - where - K: crate::dep_graph::DepNodeParams<Qcx::DepContext>, - { - DepNode::construct(tcx, self.dep_kind, key) - } + fn try_load_from_disk(qcx: Qcx, idx: &Self::Key) -> TryLoadFromDisk<Qcx, Self>; + + const ANON: bool; + const EVAL_ALWAYS: bool; + const DEPTH_LIMIT: bool; + const FEEDABLE: bool; + + const DEP_KIND: Qcx::DepKind; + const HANDLE_CYCLE_ERROR: HandleCycleError; + + const HASH_RESULT: HashResult<Qcx, Self>; - pub(crate) fn compute(&self, tcx: Qcx::DepContext, key: K) -> V { - (self.compute)(tcx, key) + // Just here for convernience and checking that the key matches the kind, don't override this. + fn construct_dep_node(tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> { + DepNode::construct(tcx, Self::DEP_KIND, key) } } diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index 49bbcf578..a5a2f0093 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -1,6 +1,8 @@ +use crate::dep_graph::DepKind; use crate::error::CycleStack; use crate::query::plumbing::CycleError; use crate::query::{QueryContext, QueryStackFrame}; +use core::marker::PhantomData; use rustc_data_structures::fx::FxHashMap; use rustc_errors::{ @@ -22,54 +24,54 @@ use { rustc_data_structures::{jobserver, OnDrop}, rustc_rayon_core as rayon_core, rustc_span::DUMMY_SP, - std::iter::{self, FromIterator}, - std::{mem, process}, + std::iter, + std::process, }; /// Represents a span and a query key. #[derive(Clone, Debug)] -pub struct QueryInfo { +pub struct QueryInfo<D: DepKind> { /// The span corresponding to the reason for which this query was required. pub span: Span, - pub query: QueryStackFrame, + pub query: QueryStackFrame<D>, } -pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>; +pub type QueryMap<D> = FxHashMap<QueryJobId, QueryJobInfo<D>>; /// A value uniquely identifying an active query job. #[derive(Copy, Clone, Eq, PartialEq, Hash)] pub struct QueryJobId(pub NonZeroU64); impl QueryJobId { - fn query(self, map: &QueryMap) -> QueryStackFrame { + fn query<D: DepKind>(self, map: &QueryMap<D>) -> QueryStackFrame<D> { map.get(&self).unwrap().query.clone() } #[cfg(parallel_compiler)] - fn span(self, map: &QueryMap) -> Span { + fn span<D: DepKind>(self, map: &QueryMap<D>) -> Span { map.get(&self).unwrap().job.span } #[cfg(parallel_compiler)] - fn parent(self, map: &QueryMap) -> Option<QueryJobId> { + fn parent<D: DepKind>(self, map: &QueryMap<D>) -> Option<QueryJobId> { map.get(&self).unwrap().job.parent } #[cfg(parallel_compiler)] - fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> { + fn latch<D: DepKind>(self, map: &QueryMap<D>) -> Option<&QueryLatch<D>> { map.get(&self).unwrap().job.latch.as_ref() } } #[derive(Clone)] -pub struct QueryJobInfo { - pub query: QueryStackFrame, - pub job: QueryJob, +pub struct QueryJobInfo<D: DepKind> { + pub query: QueryStackFrame<D>, + pub job: QueryJob<D>, } /// Represents an active query job. #[derive(Clone)] -pub struct QueryJob { +pub struct QueryJob<D: DepKind> { pub id: QueryJobId, /// The span corresponding to the reason for which this query was required. @@ -80,10 +82,11 @@ pub struct QueryJob { /// The latch that is used to wait on this job. #[cfg(parallel_compiler)] - latch: Option<QueryLatch>, + latch: Option<QueryLatch<D>>, + spooky: core::marker::PhantomData<D>, } -impl QueryJob { +impl<D: DepKind> QueryJob<D> { /// Creates a new query job. #[inline] pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self { @@ -93,11 +96,12 @@ impl QueryJob { parent, #[cfg(parallel_compiler)] latch: None, + spooky: PhantomData, } } #[cfg(parallel_compiler)] - pub(super) fn latch(&mut self) -> QueryLatch { + pub(super) fn latch(&mut self) -> QueryLatch<D> { if self.latch.is_none() { self.latch = Some(QueryLatch::new()); } @@ -123,12 +127,12 @@ impl QueryJobId { #[cold] #[inline(never)] #[cfg(not(parallel_compiler))] - pub(super) fn find_cycle_in_stack( + pub(super) fn find_cycle_in_stack<D: DepKind>( &self, - query_map: QueryMap, + query_map: QueryMap<D>, current_job: &Option<QueryJobId>, span: Span, - ) -> CycleError { + ) -> CycleError<D> { // Find the waitee amongst `current_job` parents let mut cycle = Vec::new(); let mut current_job = Option::clone(current_job); @@ -162,14 +166,18 @@ impl QueryJobId { #[cold] #[inline(never)] - pub fn try_find_layout_root(&self, query_map: QueryMap) -> Option<(QueryJobInfo, usize)> { + pub fn try_find_layout_root<D: DepKind>( + &self, + query_map: QueryMap<D>, + ) -> Option<(QueryJobInfo<D>, usize)> { let mut last_layout = None; let mut current_id = Some(*self); let mut depth = 0; while let Some(id) = current_id { let info = query_map.get(&id).unwrap(); - if info.query.name == "layout_of" { + // FIXME: This string comparison should probably not be done. + if format!("{:?}", info.query.dep_kind) == "layout_of" { depth += 1; last_layout = Some((info.clone(), depth)); } @@ -180,15 +188,15 @@ impl QueryJobId { } #[cfg(parallel_compiler)] -struct QueryWaiter { +struct QueryWaiter<D: DepKind> { query: Option<QueryJobId>, condvar: Condvar, span: Span, - cycle: Lock<Option<CycleError>>, + cycle: Lock<Option<CycleError<D>>>, } #[cfg(parallel_compiler)] -impl QueryWaiter { +impl<D: DepKind> QueryWaiter<D> { fn notify(&self, registry: &rayon_core::Registry) { rayon_core::mark_unblocked(registry); self.condvar.notify_one(); @@ -196,19 +204,19 @@ impl QueryWaiter { } #[cfg(parallel_compiler)] -struct QueryLatchInfo { +struct QueryLatchInfo<D: DepKind> { complete: bool, - waiters: Vec<Lrc<QueryWaiter>>, + waiters: Vec<Lrc<QueryWaiter<D>>>, } #[cfg(parallel_compiler)] #[derive(Clone)] -pub(super) struct QueryLatch { - info: Lrc<Mutex<QueryLatchInfo>>, +pub(super) struct QueryLatch<D: DepKind> { + info: Lrc<Mutex<QueryLatchInfo<D>>>, } #[cfg(parallel_compiler)] -impl QueryLatch { +impl<D: DepKind> QueryLatch<D> { fn new() -> Self { QueryLatch { info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), @@ -216,7 +224,11 @@ impl QueryLatch { } /// Awaits for the query job to complete. - pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> { + pub(super) fn wait_on( + &self, + query: Option<QueryJobId>, + span: Span, + ) -> Result<(), CycleError<D>> { let waiter = Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() }); self.wait_on_inner(&waiter); @@ -231,7 +243,7 @@ impl QueryLatch { } /// Awaits the caller on this latch by blocking the current thread. - fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter>) { + fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) { let mut info = self.info.lock(); if !info.complete { // We push the waiter on to the `waiters` list. It can be accessed inside @@ -247,7 +259,7 @@ impl QueryLatch { jobserver::release_thread(); waiter.condvar.wait(&mut info); // Release the lock before we potentially block in `acquire_thread` - mem::drop(info); + drop(info); jobserver::acquire_thread(); } } @@ -265,7 +277,7 @@ impl QueryLatch { /// Removes a single waiter from the list of waiters. /// This is used to break query cycles. - fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter> { + fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> { let mut info = self.info.lock(); debug_assert!(!info.complete); // Remove the waiter from the list of waiters @@ -287,9 +299,14 @@ type Waiter = (QueryJobId, usize); /// required information to resume the waiter. /// If all `visit` calls returns None, this function also returns None. #[cfg(parallel_compiler)] -fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>> +fn visit_waiters<F, D>( + query_map: &QueryMap<D>, + query: QueryJobId, + mut visit: F, +) -> Option<Option<Waiter>> where F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>, + D: DepKind, { // Visit the parent query which is a non-resumable waiter since it's on the same stack if let Some(parent) = query.parent(query_map) { @@ -318,8 +335,8 @@ where /// If a cycle is detected, this initial value is replaced with the span causing /// the cycle. #[cfg(parallel_compiler)] -fn cycle_check( - query_map: &QueryMap, +fn cycle_check<D: DepKind>( + query_map: &QueryMap<D>, query: QueryJobId, span: Span, stack: &mut Vec<(Span, QueryJobId)>, @@ -359,8 +376,8 @@ fn cycle_check( /// from `query` without going through any of the queries in `visited`. /// This is achieved with a depth first search. #[cfg(parallel_compiler)] -fn connected_to_root( - query_map: &QueryMap, +fn connected_to_root<D: DepKind>( + query_map: &QueryMap<D>, query: QueryJobId, visited: &mut FxHashSet<QueryJobId>, ) -> bool { @@ -382,9 +399,10 @@ fn connected_to_root( // Deterministically pick an query from a list #[cfg(parallel_compiler)] -fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T +fn pick_query<'a, T, F, D>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T where F: Fn(&T) -> (Span, QueryJobId), + D: DepKind, { // Deterministically pick an entry point // FIXME: Sort this instead @@ -408,10 +426,10 @@ where /// If a cycle was not found, the starting query is removed from `jobs` and /// the function returns false. #[cfg(parallel_compiler)] -fn remove_cycle( - query_map: &QueryMap, +fn remove_cycle<D: DepKind>( + query_map: &QueryMap<D>, jobs: &mut Vec<QueryJobId>, - wakelist: &mut Vec<Lrc<QueryWaiter>>, + wakelist: &mut Vec<Lrc<QueryWaiter<D>>>, ) -> bool { let mut visited = FxHashSet::default(); let mut stack = Vec::new(); @@ -513,7 +531,7 @@ fn remove_cycle( /// There may be multiple cycles involved in a deadlock, so this searches /// all active queries for cycles before finally resuming all the waiters at once. #[cfg(parallel_compiler)] -pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) { +pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Registry) { let on_panic = OnDrop(|| { eprintln!("deadlock handler panicked, aborting process"); process::abort(); @@ -549,9 +567,9 @@ pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) { #[inline(never)] #[cold] -pub(crate) fn report_cycle<'a>( +pub(crate) fn report_cycle<'a, D: DepKind>( sess: &'a Session, - CycleError { usage, cycle: stack }: &CycleError, + CycleError { usage, cycle: stack }: &CycleError<D>, ) -> DiagnosticBuilder<'a, ErrorGuaranteed> { assert!(!stack.is_empty()); @@ -617,7 +635,7 @@ pub fn print_query_stack<Qcx: QueryContext>( }; let mut diag = Diagnostic::new( Level::FailureNote, - &format!("#{} [{}] {}", i, query_info.query.name, query_info.query.description), + &format!("#{} [{:?}] {}", i, query_info.query.dep_kind, query_info.query.description), ); diag.span = query_info.job.span.into(); handler.force_print_diagnostic(diag); diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs index 7f3dc50d2..d308af192 100644 --- a/compiler/rustc_query_system/src/query/mod.rs +++ b/compiler/rustc_query_system/src/query/mod.rs @@ -12,8 +12,9 @@ pub use self::caches::{ }; mod config; -pub use self::config::{QueryConfig, QueryVTable}; +pub use self::config::{HashResult, QueryConfig, TryLoadFromDisk}; +use crate::dep_graph::DepKind; use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex}; use rustc_data_structures::sync::Lock; use rustc_errors::Diagnostic; @@ -26,37 +27,37 @@ use thin_vec::ThinVec; /// /// This is mostly used in case of cycles for error reporting. #[derive(Clone, Debug)] -pub struct QueryStackFrame { - pub name: &'static str, +pub struct QueryStackFrame<D: DepKind> { pub description: String, span: Option<Span>, pub def_id: Option<DefId>, pub def_kind: Option<DefKind>, pub ty_adt_id: Option<DefId>, + pub dep_kind: D, /// This hash is used to deterministically pick /// a query to remove cycles in the parallel compiler. #[cfg(parallel_compiler)] hash: u64, } -impl QueryStackFrame { +impl<D: DepKind> QueryStackFrame<D> { #[inline] pub fn new( - name: &'static str, description: String, span: Option<Span>, def_id: Option<DefId>, def_kind: Option<DefKind>, + dep_kind: D, ty_adt_id: Option<DefId>, _hash: impl FnOnce() -> u64, ) -> Self { Self { - name, description, span, def_id, def_kind, ty_adt_id, + dep_kind, #[cfg(parallel_compiler)] hash: _hash(), } @@ -104,7 +105,7 @@ pub trait QueryContext: HasDepContext { /// Get the query information from the TLS context. fn current_query_job(&self) -> Option<QueryJobId>; - fn try_collect_active_jobs(&self) -> Option<QueryMap>; + fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>>; /// Load side effects associated to the node in the previous session. fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects; diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 848fa67e3..b3b939eae 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -2,10 +2,9 @@ //! generate the actual methods on tcx which find and execute the provider, //! manage the caches, and so forth. -use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams}; +use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex}; use crate::ich::StableHashingContext; use crate::query::caches::QueryCache; -use crate::query::config::QueryVTable; use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}; use crate::values::Value; @@ -31,26 +30,27 @@ use thin_vec::ThinVec; use super::QueryConfig; -pub struct QueryState<K> { +pub struct QueryState<K, D: DepKind> { #[cfg(parallel_compiler)] - active: Sharded<FxHashMap<K, QueryResult>>, + active: Sharded<FxHashMap<K, QueryResult<D>>>, #[cfg(not(parallel_compiler))] - active: Lock<FxHashMap<K, QueryResult>>, + active: Lock<FxHashMap<K, QueryResult<D>>>, } /// Indicates the state of a query for a given key in a query map. -enum QueryResult { +enum QueryResult<D: DepKind> { /// An already executing query. The query job can be used to await for its completion. - Started(QueryJob), + Started(QueryJob<D>), /// The query panicked. Queries trying to wait on this will raise a fatal error which will /// silently panic. Poisoned, } -impl<K> QueryState<K> +impl<K, D> QueryState<K, D> where K: Eq + Hash + Clone + Debug, + D: DepKind, { pub fn all_inactive(&self) -> bool { #[cfg(parallel_compiler)] @@ -67,8 +67,8 @@ where pub fn try_collect_active_jobs<Qcx: Copy>( &self, qcx: Qcx, - make_query: fn(Qcx, K) -> QueryStackFrame, - jobs: &mut QueryMap, + make_query: fn(Qcx, K) -> QueryStackFrame<D>, + jobs: &mut QueryMap<D>, ) -> Option<()> { #[cfg(parallel_compiler)] { @@ -102,34 +102,34 @@ where } } -impl<K> Default for QueryState<K> { - fn default() -> QueryState<K> { +impl<K, D: DepKind> Default for QueryState<K, D> { + fn default() -> QueryState<K, D> { QueryState { active: Default::default() } } } /// A type representing the responsibility to execute the job in the `job` field. /// This will poison the relevant query if dropped. -struct JobOwner<'tcx, K> +struct JobOwner<'tcx, K, D: DepKind> where K: Eq + Hash + Clone, { - state: &'tcx QueryState<K>, + state: &'tcx QueryState<K, D>, key: K, id: QueryJobId, } #[cold] #[inline(never)] -fn mk_cycle<Qcx, V, R>( +fn mk_cycle<Qcx, V, R, D: DepKind>( qcx: Qcx, - cycle_error: CycleError, + cycle_error: CycleError<D>, handler: HandleCycleError, cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>, ) -> R where - Qcx: QueryContext, - V: std::fmt::Debug + Value<Qcx::DepContext>, + Qcx: QueryContext + crate::query::HasDepContext<DepKind = D>, + V: std::fmt::Debug + Value<Qcx::DepContext, Qcx::DepKind>, R: Clone, { let error = report_cycle(qcx.dep_context().sess(), &cycle_error); @@ -139,13 +139,13 @@ where fn handle_cycle_error<Tcx, V>( tcx: Tcx, - cycle_error: &CycleError, + cycle_error: &CycleError<Tcx::DepKind>, mut error: DiagnosticBuilder<'_, ErrorGuaranteed>, handler: HandleCycleError, ) -> V where Tcx: DepContext, - V: Value<Tcx>, + V: Value<Tcx, Tcx::DepKind>, { use HandleCycleError::*; match handler { @@ -165,7 +165,7 @@ where } } -impl<'tcx, K> JobOwner<'tcx, K> +impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D> where K: Eq + Hash + Clone, { @@ -180,12 +180,12 @@ where #[inline(always)] fn try_start<'b, Qcx>( qcx: &'b Qcx, - state: &'b QueryState<K>, + state: &'b QueryState<K, Qcx::DepKind>, span: Span, key: K, - ) -> TryGetJob<'b, K> + ) -> TryGetJob<'b, K, D> where - Qcx: QueryContext, + Qcx: QueryContext + crate::query::HasDepContext<DepKind = D>, { #[cfg(parallel_compiler)] let mut state_lock = state.active.get_shard_by_value(&key).lock(); @@ -280,9 +280,10 @@ where } } -impl<'tcx, K> Drop for JobOwner<'tcx, K> +impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D> where K: Eq + Hash + Clone, + D: DepKind, { #[inline(never)] #[cold] @@ -308,19 +309,20 @@ where } #[derive(Clone)] -pub(crate) struct CycleError { +pub(crate) struct CycleError<D: DepKind> { /// The query and related span that uses the cycle. - pub usage: Option<(Span, QueryStackFrame)>, - pub cycle: Vec<QueryInfo>, + pub usage: Option<(Span, QueryStackFrame<D>)>, + pub cycle: Vec<QueryInfo<D>>, } /// The result of `try_start`. -enum TryGetJob<'tcx, K> +enum TryGetJob<'tcx, K, D> where K: Eq + Hash + Clone, + D: DepKind, { /// The query is not yet started. Contains a guard to the cache eventually used to start it. - NotYetStarted(JobOwner<'tcx, K>), + NotYetStarted(JobOwner<'tcx, K, D>), /// The query was already completed. /// Returns the result of the query and its dep-node index @@ -329,7 +331,7 @@ where JobCompleted(TimingGuard<'tcx>), /// Trying to execute the query resulted in a cycle. - Cycle(CycleError), + Cycle(CycleError<D>), } /// Checks if the query is already computed and in the cache. @@ -337,9 +339,9 @@ where /// which will be used if the query is not in the cache and we need /// to compute it. #[inline] -pub fn try_get_cached<'a, Tcx, C, R, OnHit>( +pub fn try_get_cached<Tcx, C, R, OnHit>( tcx: Tcx, - cache: &'a C, + cache: &C, key: &C::Key, // `on_hit` can be called while holding a lock to the query cache on_hit: OnHit, @@ -358,36 +360,34 @@ where }) } -fn try_execute_query<Qcx, C>( +fn try_execute_query<Q, Qcx>( qcx: Qcx, - state: &QueryState<C::Key>, - cache: &C, + state: &QueryState<Q::Key, Qcx::DepKind>, + cache: &Q::Cache, span: Span, - key: C::Key, + key: Q::Key, dep_node: Option<DepNode<Qcx::DepKind>>, - query: &QueryVTable<Qcx, C::Key, C::Value>, -) -> (C::Stored, Option<DepNodeIndex>) +) -> (Q::Stored, Option<DepNodeIndex>) where - C: QueryCache, - C::Key: Clone + DepNodeParams<Qcx::DepContext>, - C::Value: Value<Qcx::DepContext>, - C::Stored: Debug + std::borrow::Borrow<C::Value>, + Q: QueryConfig<Qcx>, Qcx: QueryContext, { - match JobOwner::<'_, C::Key>::try_start(&qcx, state, span, key.clone()) { + match JobOwner::<'_, Q::Key, Qcx::DepKind>::try_start(&qcx, state, span, key.clone()) { TryGetJob::NotYetStarted(job) => { - let (result, dep_node_index) = execute_job(qcx, key.clone(), dep_node, query, job.id); - if query.feedable { + let (result, dep_node_index) = + execute_job::<Q, Qcx>(qcx, key.clone(), dep_node, job.id); + if Q::FEEDABLE { // We may have put a value inside the cache from inside the execution. // Verify that it has the same hash as what we have now, to ensure consistency. let _ = cache.lookup(&key, |cached_result, _| { - let hasher = query.hash_result.expect("feedable forbids no_hash"); + let hasher = Q::HASH_RESULT.expect("feedable forbids no_hash"); + let old_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| hasher(&mut hcx, cached_result.borrow())); let new_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| hasher(&mut hcx, &result)); debug_assert_eq!( old_hash, new_hash, "Computed query value for {:?}({:?}) is inconsistent with fed value,\ncomputed={:#?}\nfed={:#?}", - query.dep_kind, key, result, cached_result, + Q::DEP_KIND, key, result, cached_result, ); }); } @@ -395,7 +395,7 @@ where (result, Some(dep_node_index)) } TryGetJob::Cycle(error) => { - let result = mk_cycle(qcx, error, query.handle_cycle_error, cache); + let result = mk_cycle(qcx, error, Q::HANDLE_CYCLE_ERROR, cache); (result, None) } #[cfg(parallel_compiler)] @@ -414,16 +414,14 @@ where } } -fn execute_job<Qcx, K, V>( +fn execute_job<Q, Qcx>( qcx: Qcx, - key: K, + key: Q::Key, mut dep_node_opt: Option<DepNode<Qcx::DepKind>>, - query: &QueryVTable<Qcx, K, V>, job_id: QueryJobId, -) -> (V, DepNodeIndex) +) -> (Q::Value, DepNodeIndex) where - K: Clone + DepNodeParams<Qcx::DepContext>, - V: Debug, + Q: QueryConfig<Qcx>, Qcx: QueryContext, { let dep_graph = qcx.dep_context().dep_graph(); @@ -431,23 +429,23 @@ where // Fast path for when incr. comp. is off. if !dep_graph.is_fully_enabled() { let prof_timer = qcx.dep_context().profiler().query_provider(); - let result = qcx.start_query(job_id, query.depth_limit, None, || { - query.compute(*qcx.dep_context(), key) + let result = qcx.start_query(job_id, Q::DEPTH_LIMIT, None, || { + Q::compute(qcx, &key)(*qcx.dep_context(), key) }); let dep_node_index = dep_graph.next_virtual_depnode_index(); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); return (result, dep_node_index); } - if !query.anon && !query.eval_always { + if !Q::ANON && !Q::EVAL_ALWAYS { // `to_dep_node` is expensive for some `DepKind`s. let dep_node = - dep_node_opt.get_or_insert_with(|| query.to_dep_node(*qcx.dep_context(), &key)); + dep_node_opt.get_or_insert_with(|| Q::construct_dep_node(*qcx.dep_context(), &key)); // The diagnostics for this query will be promoted to the current session during // `try_mark_green()`, so we can ignore them here. if let Some(ret) = qcx.start_query(job_id, false, None, || { - try_load_from_disk_and_cache_in_memory(qcx, &key, &dep_node, query) + try_load_from_disk_and_cache_in_memory::<Q, Qcx>(qcx, &key, &dep_node) }) { return ret; } @@ -457,18 +455,19 @@ where let diagnostics = Lock::new(ThinVec::new()); let (result, dep_node_index) = - qcx.start_query(job_id, query.depth_limit, Some(&diagnostics), || { - if query.anon { - return dep_graph.with_anon_task(*qcx.dep_context(), query.dep_kind, || { - query.compute(*qcx.dep_context(), key) + qcx.start_query(job_id, Q::DEPTH_LIMIT, Some(&diagnostics), || { + if Q::ANON { + return dep_graph.with_anon_task(*qcx.dep_context(), Q::DEP_KIND, || { + Q::compute(qcx, &key)(*qcx.dep_context(), key) }); } // `to_dep_node` is expensive for some `DepKind`s. let dep_node = - dep_node_opt.unwrap_or_else(|| query.to_dep_node(*qcx.dep_context(), &key)); + dep_node_opt.unwrap_or_else(|| Q::construct_dep_node(*qcx.dep_context(), &key)); - dep_graph.with_task(dep_node, *qcx.dep_context(), key, query.compute, query.hash_result) + let task = Q::compute(qcx, &key); + dep_graph.with_task(dep_node, *qcx.dep_context(), key, task, Q::HASH_RESULT) }); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -477,7 +476,7 @@ where let side_effects = QuerySideEffects { diagnostics }; if std::intrinsics::unlikely(!side_effects.is_empty()) { - if query.anon { + if Q::ANON { qcx.store_side_effects_for_anon_node(dep_node_index, side_effects); } else { qcx.store_side_effects(dep_node_index, side_effects); @@ -487,16 +486,14 @@ where (result, dep_node_index) } -fn try_load_from_disk_and_cache_in_memory<Qcx, K, V>( +fn try_load_from_disk_and_cache_in_memory<Q, Qcx>( qcx: Qcx, - key: &K, + key: &Q::Key, dep_node: &DepNode<Qcx::DepKind>, - query: &QueryVTable<Qcx, K, V>, -) -> Option<(V, DepNodeIndex)> +) -> Option<(Q::Value, DepNodeIndex)> where - K: Clone, + Q: QueryConfig<Qcx>, Qcx: QueryContext, - V: Debug, { // Note this function can be called concurrently from the same query // We must ensure that this is handled correctly. @@ -508,7 +505,7 @@ where // First we try to load the result from the on-disk cache. // Some things are never cached on disk. - if let Some(try_load_from_disk) = query.try_load_from_disk { + if let Some(try_load_from_disk) = Q::try_load_from_disk(qcx, &key) { let prof_timer = qcx.dep_context().profiler().incr_cache_loading(); // The call to `with_query_deserialization` enforces that no new `DepNodes` @@ -542,7 +539,7 @@ where if std::intrinsics::unlikely( try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich, ) { - incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result); + incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT); } return Some((result, dep_node_index)); @@ -552,8 +549,7 @@ where // can be forced from `DepNode`. debug_assert!( !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(), - "missing on-disk cache entry for {:?}", - dep_node + "missing on-disk cache entry for {dep_node:?}" ); } @@ -562,7 +558,7 @@ where let prof_timer = qcx.dep_context().profiler().query_provider(); // The dep-graph for this computation is already in-place. - let result = dep_graph.with_ignore(|| query.compute(*qcx.dep_context(), key.clone())); + let result = dep_graph.with_ignore(|| Q::compute(qcx, key)(*qcx.dep_context(), key.clone())); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -575,7 +571,7 @@ where // // See issue #82920 for an example of a miscompilation that would get turned into // an ICE by this check - incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result); + incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT); Some((result, dep_node_index)) } @@ -592,8 +588,7 @@ where { assert!( tcx.dep_graph().is_green(dep_node), - "fingerprint for green query instance not loaded from cache: {:?}", - dep_node, + "fingerprint for green query instance not loaded from cache: {dep_node:?}", ); let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| { @@ -672,16 +667,16 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result: sess.emit_err(crate::error::Reentrant); } else { let run_cmd = if let Some(crate_name) = &sess.opts.crate_name { - format!("`cargo clean -p {}` or `cargo clean`", crate_name) + format!("`cargo clean -p {crate_name}` or `cargo clean`") } else { "`cargo clean`".to_string() }; sess.emit_err(crate::error::IncrementCompilation { run_cmd, - dep_node: format!("{:?}", dep_node), + dep_node: format!("{dep_node:?}"), }); - panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result); + panic!("Found unstable fingerprints for {dep_node:?}: {result:?}"); } INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic)); @@ -696,23 +691,19 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result: /// /// Note: The optimization is only available during incr. comp. #[inline(never)] -fn ensure_must_run<Qcx, K, V>( - qcx: Qcx, - key: &K, - query: &QueryVTable<Qcx, K, V>, -) -> (bool, Option<DepNode<Qcx::DepKind>>) +fn ensure_must_run<Q, Qcx>(qcx: Qcx, key: &Q::Key) -> (bool, Option<DepNode<Qcx::DepKind>>) where - K: crate::dep_graph::DepNodeParams<Qcx::DepContext>, + Q: QueryConfig<Qcx>, Qcx: QueryContext, { - if query.eval_always { + if Q::EVAL_ALWAYS { return (true, None); } // Ensuring an anonymous query makes no sense - assert!(!query.anon); + assert!(!Q::ANON); - let dep_node = query.to_dep_node(*qcx.dep_context(), key); + let dep_node = Q::construct_dep_node(*qcx.dep_context(), key); let dep_graph = qcx.dep_context().dep_graph(); match dep_graph.try_mark_green(qcx, &dep_node) { @@ -739,16 +730,15 @@ pub enum QueryMode { Ensure, } -pub fn get_query<Q, Qcx>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored> +pub fn get_query<Q, Qcx, D>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored> where + D: DepKind, Q: QueryConfig<Qcx>, - Q::Key: DepNodeParams<Qcx::DepContext>, - Q::Value: Value<Qcx::DepContext>, + Q::Value: Value<Qcx::DepContext, D>, Qcx: QueryContext, { - let query = Q::make_vtable(qcx, &key); let dep_node = if let QueryMode::Ensure = mode { - let (must_run, dep_node) = ensure_must_run(qcx, &key, &query); + let (must_run, dep_node) = ensure_must_run::<Q, _>(qcx, &key); if !must_run { return None; } @@ -757,14 +747,13 @@ where None }; - let (result, dep_node_index) = try_execute_query( + let (result, dep_node_index) = try_execute_query::<Q, Qcx>( qcx, Q::query_state(qcx), Q::query_cache(qcx), span, key, dep_node, - &query, ); if let Some(dep_node_index) = dep_node_index { qcx.dep_context().dep_graph().read_index(dep_node_index) @@ -772,11 +761,11 @@ where Some(result) } -pub fn force_query<Q, Qcx>(qcx: Qcx, key: Q::Key, dep_node: DepNode<Qcx::DepKind>) +pub fn force_query<Q, Qcx, D>(qcx: Qcx, key: Q::Key, dep_node: DepNode<Qcx::DepKind>) where + D: DepKind, Q: QueryConfig<Qcx>, - Q::Key: DepNodeParams<Qcx::DepContext>, - Q::Value: Value<Qcx::DepContext>, + Q::Value: Value<Qcx::DepContext, D>, Qcx: QueryContext, { // We may be concurrently trying both execute and force a query. @@ -793,9 +782,8 @@ where Err(()) => {} } - let query = Q::make_vtable(qcx, &key); let state = Q::query_state(qcx); - debug_assert!(!query.anon); + debug_assert!(!Q::ANON); - try_execute_query(qcx, state, cache, DUMMY_SP, key, Some(dep_node), &query); + try_execute_query::<Q, _>(qcx, state, cache, DUMMY_SP, key, Some(dep_node)); } diff --git a/compiler/rustc_query_system/src/values.rs b/compiler/rustc_query_system/src/values.rs index 214656abe..b6e2cfa3d 100644 --- a/compiler/rustc_query_system/src/values.rs +++ b/compiler/rustc_query_system/src/values.rs @@ -1,12 +1,12 @@ -use crate::dep_graph::DepContext; +use crate::dep_graph::{DepContext, DepKind}; use crate::query::QueryInfo; -pub trait Value<Tcx: DepContext>: Sized { - fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo]) -> Self; +pub trait Value<Tcx: DepContext, D: DepKind>: Sized { + fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo<D>]) -> Self; } -impl<Tcx: DepContext, T> Value<Tcx> for T { - default fn from_cycle_error(tcx: Tcx, _: &[QueryInfo]) -> T { +impl<Tcx: DepContext, T, D: DepKind> Value<Tcx, D> for T { + default fn from_cycle_error(tcx: Tcx, _: &[QueryInfo<D>]) -> T { tcx.sess().abort_if_errors(); // Ideally we would use `bug!` here. But bug! is only defined in rustc_middle, and it's // non-trivial to define it earlier. |