summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_query_system/src/query
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /compiler/rustc_query_system/src/query
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_query_system/src/query')
-rw-r--r--compiler/rustc_query_system/src/query/README.md3
-rw-r--r--compiler/rustc_query_system/src/query/caches.rs226
-rw-r--r--compiler/rustc_query_system/src/query/config.rs75
-rw-r--r--compiler/rustc_query_system/src/query/job.rs612
-rw-r--r--compiler/rustc_query_system/src/query/mod.rs125
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs742
6 files changed, 1783 insertions, 0 deletions
diff --git a/compiler/rustc_query_system/src/query/README.md b/compiler/rustc_query_system/src/query/README.md
new file mode 100644
index 000000000..8ec07b9fd
--- /dev/null
+++ b/compiler/rustc_query_system/src/query/README.md
@@ -0,0 +1,3 @@
+For more information about how the query system works, see the [rustc dev guide].
+
+[rustc dev guide]: https://rustc-dev-guide.rust-lang.org/query.html
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs
new file mode 100644
index 000000000..85c5af72e
--- /dev/null
+++ b/compiler/rustc_query_system/src/query/caches.rs
@@ -0,0 +1,226 @@
+use crate::dep_graph::DepNodeIndex;
+
+use rustc_arena::TypedArena;
+use rustc_data_structures::fx::FxHashMap;
+use rustc_data_structures::sharded;
+#[cfg(parallel_compiler)]
+use rustc_data_structures::sharded::Sharded;
+#[cfg(not(parallel_compiler))]
+use rustc_data_structures::sync::Lock;
+use rustc_data_structures::sync::WorkerLocal;
+use std::default::Default;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::marker::PhantomData;
+
+pub trait CacheSelector<K, V> {
+ type Cache;
+}
+
+pub trait QueryStorage {
+ type Value: Debug;
+ type Stored: Clone;
+
+ /// Store a value without putting it in the cache.
+ /// This is meant to be used with cycle errors.
+ fn store_nocache(&self, value: Self::Value) -> Self::Stored;
+}
+
+pub trait QueryCache: QueryStorage + Sized {
+ type Key: Hash + Eq + Clone + Debug;
+
+ /// Checks if the query is already computed and in the cache.
+ /// It returns the shard index and a lock guard to the shard,
+ /// which will be used if the query is not in the cache and we need
+ /// to compute it.
+ fn lookup<R, OnHit>(
+ &self,
+ key: &Self::Key,
+ // `on_hit` can be called while holding a lock to the query state shard.
+ on_hit: OnHit,
+ ) -> Result<R, ()>
+ where
+ OnHit: FnOnce(&Self::Stored, DepNodeIndex) -> R;
+
+ fn complete(&self, key: Self::Key, value: Self::Value, index: DepNodeIndex) -> Self::Stored;
+
+ fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex));
+}
+
+pub struct DefaultCacheSelector;
+
+impl<K: Eq + Hash, V: Clone> CacheSelector<K, V> for DefaultCacheSelector {
+ type Cache = DefaultCache<K, V>;
+}
+
+pub struct DefaultCache<K, V> {
+ #[cfg(parallel_compiler)]
+ cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>,
+ #[cfg(not(parallel_compiler))]
+ cache: Lock<FxHashMap<K, (V, DepNodeIndex)>>,
+}
+
+impl<K, V> Default for DefaultCache<K, V> {
+ fn default() -> Self {
+ DefaultCache { cache: Default::default() }
+ }
+}
+
+impl<K: Eq + Hash, V: Clone + Debug> QueryStorage for DefaultCache<K, V> {
+ type Value = V;
+ type Stored = V;
+
+ #[inline]
+ fn store_nocache(&self, value: Self::Value) -> Self::Stored {
+ // We have no dedicated storage
+ value
+ }
+}
+
+impl<K, V> QueryCache for DefaultCache<K, V>
+where
+ K: Eq + Hash + Clone + Debug,
+ V: Clone + Debug,
+{
+ type Key = K;
+
+ #[inline(always)]
+ fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
+ where
+ OnHit: FnOnce(&V, DepNodeIndex) -> R,
+ {
+ let key_hash = sharded::make_hash(key);
+ #[cfg(parallel_compiler)]
+ let lock = self.cache.get_shard_by_hash(key_hash).lock();
+ #[cfg(not(parallel_compiler))]
+ let lock = self.cache.lock();
+ let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
+
+ if let Some((_, value)) = result {
+ let hit_result = on_hit(&value.0, value.1);
+ Ok(hit_result)
+ } else {
+ Err(())
+ }
+ }
+
+ #[inline]
+ fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
+ #[cfg(parallel_compiler)]
+ let mut lock = self.cache.get_shard_by_value(&key).lock();
+ #[cfg(not(parallel_compiler))]
+ let mut lock = self.cache.lock();
+ lock.insert(key, (value.clone(), index));
+ value
+ }
+
+ fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
+ #[cfg(parallel_compiler)]
+ {
+ let shards = self.cache.lock_shards();
+ for shard in shards.iter() {
+ for (k, v) in shard.iter() {
+ f(k, &v.0, v.1);
+ }
+ }
+ }
+ #[cfg(not(parallel_compiler))]
+ {
+ let map = self.cache.lock();
+ for (k, v) in map.iter() {
+ f(k, &v.0, v.1);
+ }
+ }
+ }
+}
+
+pub struct ArenaCacheSelector<'tcx>(PhantomData<&'tcx ()>);
+
+impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<K, V> for ArenaCacheSelector<'tcx> {
+ type Cache = ArenaCache<'tcx, K, V>;
+}
+
+pub struct ArenaCache<'tcx, K, V> {
+ arena: WorkerLocal<TypedArena<(V, DepNodeIndex)>>,
+ #[cfg(parallel_compiler)]
+ cache: Sharded<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
+ #[cfg(not(parallel_compiler))]
+ cache: Lock<FxHashMap<K, &'tcx (V, DepNodeIndex)>>,
+}
+
+impl<'tcx, K, V> Default for ArenaCache<'tcx, K, V> {
+ fn default() -> Self {
+ ArenaCache { arena: WorkerLocal::new(|_| TypedArena::default()), cache: Default::default() }
+ }
+}
+
+impl<'tcx, K: Eq + Hash, V: Debug + 'tcx> QueryStorage for ArenaCache<'tcx, K, V> {
+ type Value = V;
+ type Stored = &'tcx V;
+
+ #[inline]
+ fn store_nocache(&self, value: Self::Value) -> Self::Stored {
+ let value = self.arena.alloc((value, DepNodeIndex::INVALID));
+ let value = unsafe { &*(&value.0 as *const _) };
+ &value
+ }
+}
+
+impl<'tcx, K, V: 'tcx> QueryCache for ArenaCache<'tcx, K, V>
+where
+ K: Eq + Hash + Clone + Debug,
+ V: Debug,
+{
+ type Key = K;
+
+ #[inline(always)]
+ fn lookup<R, OnHit>(&self, key: &K, on_hit: OnHit) -> Result<R, ()>
+ where
+ OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R,
+ {
+ let key_hash = sharded::make_hash(key);
+ #[cfg(parallel_compiler)]
+ let lock = self.cache.get_shard_by_hash(key_hash).lock();
+ #[cfg(not(parallel_compiler))]
+ let lock = self.cache.lock();
+ let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key);
+
+ if let Some((_, value)) = result {
+ let hit_result = on_hit(&&value.0, value.1);
+ Ok(hit_result)
+ } else {
+ Err(())
+ }
+ }
+
+ #[inline]
+ fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored {
+ let value = self.arena.alloc((value, index));
+ let value = unsafe { &*(value as *const _) };
+ #[cfg(parallel_compiler)]
+ let mut lock = self.cache.get_shard_by_value(&key).lock();
+ #[cfg(not(parallel_compiler))]
+ let mut lock = self.cache.lock();
+ lock.insert(key, value);
+ &value.0
+ }
+
+ fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
+ #[cfg(parallel_compiler)]
+ {
+ let shards = self.cache.lock_shards();
+ for shard in shards.iter() {
+ for (k, v) in shard.iter() {
+ f(k, &v.0, v.1);
+ }
+ }
+ }
+ #[cfg(not(parallel_compiler))]
+ {
+ let map = self.cache.lock();
+ for (k, v) in map.iter() {
+ f(k, &v.0, v.1);
+ }
+ }
+ }
+}
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
new file mode 100644
index 000000000..964914a13
--- /dev/null
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -0,0 +1,75 @@
+//! Query configuration and description traits.
+
+use crate::dep_graph::DepNode;
+use crate::dep_graph::SerializedDepNodeIndex;
+use crate::ich::StableHashingContext;
+use crate::query::caches::QueryCache;
+use crate::query::{QueryContext, QueryState};
+
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed};
+use std::fmt::Debug;
+use std::hash::Hash;
+
+pub trait QueryConfig {
+ const NAME: &'static str;
+
+ type Key: Eq + Hash + Clone + Debug;
+ type Value;
+ type Stored: Clone;
+}
+
+pub struct QueryVTable<CTX: QueryContext, K, V> {
+ pub anon: bool,
+ pub dep_kind: CTX::DepKind,
+ pub eval_always: bool,
+ pub cache_on_disk: bool,
+
+ pub compute: fn(CTX::DepContext, K) -> V,
+ pub hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
+ pub handle_cycle_error: fn(CTX, DiagnosticBuilder<'_, ErrorGuaranteed>) -> V,
+ pub try_load_from_disk: Option<fn(CTX, SerializedDepNodeIndex) -> Option<V>>,
+}
+
+impl<CTX: QueryContext, K, V> QueryVTable<CTX, K, V> {
+ pub(crate) fn to_dep_node(&self, tcx: CTX::DepContext, key: &K) -> DepNode<CTX::DepKind>
+ where
+ K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
+ {
+ DepNode::construct(tcx, self.dep_kind, key)
+ }
+
+ pub(crate) fn compute(&self, tcx: CTX::DepContext, key: K) -> V {
+ (self.compute)(tcx, key)
+ }
+
+ pub(crate) fn try_load_from_disk(&self, tcx: CTX, index: SerializedDepNodeIndex) -> Option<V> {
+ self.try_load_from_disk
+ .expect("QueryDescription::load_from_disk() called for an unsupported query.")(
+ tcx, index,
+ )
+ }
+}
+
+pub trait QueryDescription<CTX: QueryContext>: QueryConfig {
+ const TRY_LOAD_FROM_DISK: Option<fn(CTX, SerializedDepNodeIndex) -> Option<Self::Value>>;
+
+ type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
+
+ fn describe(tcx: CTX, key: Self::Key) -> String;
+
+ // Don't use this method to access query results, instead use the methods on TyCtxt
+ fn query_state<'a>(tcx: CTX) -> &'a QueryState<Self::Key>
+ where
+ CTX: 'a;
+
+ // Don't use this method to access query results, instead use the methods on TyCtxt
+ fn query_cache<'a>(tcx: CTX) -> &'a Self::Cache
+ where
+ CTX: 'a;
+
+ // Don't use this method to compute query results, instead use the methods on TyCtxt
+ fn make_vtable(tcx: CTX, key: &Self::Key) -> QueryVTable<CTX, Self::Key, Self::Value>;
+
+ fn cache_on_disk(tcx: CTX::DepContext, key: &Self::Key) -> bool;
+}
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
new file mode 100644
index 000000000..6d2aff381
--- /dev/null
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -0,0 +1,612 @@
+use crate::query::plumbing::CycleError;
+use crate::query::{QueryContext, QueryStackFrame};
+use rustc_hir::def::DefKind;
+
+use rustc_data_structures::fx::FxHashMap;
+use rustc_errors::{
+ struct_span_err, Diagnostic, DiagnosticBuilder, ErrorGuaranteed, Handler, Level,
+};
+use rustc_session::Session;
+use rustc_span::Span;
+
+use std::hash::Hash;
+use std::num::NonZeroU64;
+
+#[cfg(parallel_compiler)]
+use {
+ parking_lot::{Condvar, Mutex},
+ rustc_data_structures::fx::FxHashSet,
+ rustc_data_structures::sync::Lock,
+ rustc_data_structures::sync::Lrc,
+ rustc_data_structures::{jobserver, OnDrop},
+ rustc_rayon_core as rayon_core,
+ rustc_span::DUMMY_SP,
+ std::iter::{self, FromIterator},
+ std::{mem, process},
+};
+
+/// Represents a span and a query key.
+#[derive(Clone, Debug)]
+pub struct QueryInfo {
+ /// The span corresponding to the reason for which this query was required.
+ pub span: Span,
+ pub query: QueryStackFrame,
+}
+
+pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
+
+/// A value uniquely identifying an active query job.
+#[derive(Copy, Clone, Eq, PartialEq, Hash)]
+pub struct QueryJobId(pub NonZeroU64);
+
+impl QueryJobId {
+ fn query(self, map: &QueryMap) -> QueryStackFrame {
+ map.get(&self).unwrap().query.clone()
+ }
+
+ #[cfg(parallel_compiler)]
+ fn span(self, map: &QueryMap) -> Span {
+ map.get(&self).unwrap().job.span
+ }
+
+ #[cfg(parallel_compiler)]
+ fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
+ map.get(&self).unwrap().job.parent
+ }
+
+ #[cfg(parallel_compiler)]
+ fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> {
+ map.get(&self).unwrap().job.latch.as_ref()
+ }
+}
+
+pub struct QueryJobInfo {
+ pub query: QueryStackFrame,
+ pub job: QueryJob,
+}
+
+/// Represents an active query job.
+#[derive(Clone)]
+pub struct QueryJob {
+ pub id: QueryJobId,
+
+ /// The span corresponding to the reason for which this query was required.
+ pub span: Span,
+
+ /// The parent query job which created this job and is implicitly waiting on it.
+ pub parent: Option<QueryJobId>,
+
+ /// The latch that is used to wait on this job.
+ #[cfg(parallel_compiler)]
+ latch: Option<QueryLatch>,
+}
+
+impl QueryJob {
+ /// Creates a new query job.
+ #[inline]
+ pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
+ QueryJob {
+ id,
+ span,
+ parent,
+ #[cfg(parallel_compiler)]
+ latch: None,
+ }
+ }
+
+ #[cfg(parallel_compiler)]
+ pub(super) fn latch(&mut self) -> QueryLatch {
+ if self.latch.is_none() {
+ self.latch = Some(QueryLatch::new());
+ }
+ self.latch.as_ref().unwrap().clone()
+ }
+
+ /// Signals to waiters that the query is complete.
+ ///
+ /// This does nothing for single threaded rustc,
+ /// as there are no concurrent jobs which could be waiting on us
+ #[inline]
+ pub fn signal_complete(self) {
+ #[cfg(parallel_compiler)]
+ {
+ if let Some(latch) = self.latch {
+ latch.set();
+ }
+ }
+ }
+}
+
+#[cfg(not(parallel_compiler))]
+impl QueryJobId {
+ #[cold]
+ #[inline(never)]
+ pub(super) fn find_cycle_in_stack(
+ &self,
+ query_map: QueryMap,
+ current_job: &Option<QueryJobId>,
+ span: Span,
+ ) -> CycleError {
+ // Find the waitee amongst `current_job` parents
+ let mut cycle = Vec::new();
+ let mut current_job = Option::clone(current_job);
+
+ while let Some(job) = current_job {
+ let info = query_map.get(&job).unwrap();
+ cycle.push(QueryInfo { span: info.job.span, query: info.query.clone() });
+
+ if job == *self {
+ cycle.reverse();
+
+ // This is the end of the cycle
+ // The span entry we included was for the usage
+ // of the cycle itself, and not part of the cycle
+ // Replace it with the span which caused the cycle to form
+ cycle[0].span = span;
+ // Find out why the cycle itself was used
+ let usage = info
+ .job
+ .parent
+ .as_ref()
+ .map(|parent| (info.job.span, parent.query(&query_map)));
+ return CycleError { usage, cycle };
+ }
+
+ current_job = info.job.parent;
+ }
+
+ panic!("did not find a cycle")
+ }
+}
+
+#[cfg(parallel_compiler)]
+struct QueryWaiter {
+ query: Option<QueryJobId>,
+ condvar: Condvar,
+ span: Span,
+ cycle: Lock<Option<CycleError>>,
+}
+
+#[cfg(parallel_compiler)]
+impl QueryWaiter {
+ fn notify(&self, registry: &rayon_core::Registry) {
+ rayon_core::mark_unblocked(registry);
+ self.condvar.notify_one();
+ }
+}
+
+#[cfg(parallel_compiler)]
+struct QueryLatchInfo {
+ complete: bool,
+ waiters: Vec<Lrc<QueryWaiter>>,
+}
+
+#[cfg(parallel_compiler)]
+#[derive(Clone)]
+pub(super) struct QueryLatch {
+ info: Lrc<Mutex<QueryLatchInfo>>,
+}
+
+#[cfg(parallel_compiler)]
+impl QueryLatch {
+ fn new() -> Self {
+ QueryLatch {
+ info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
+ }
+ }
+
+ /// Awaits for the query job to complete.
+ pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> {
+ let waiter =
+ Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
+ self.wait_on_inner(&waiter);
+ // FIXME: Get rid of this lock. We have ownership of the QueryWaiter
+ // although another thread may still have a Lrc reference so we cannot
+ // use Lrc::get_mut
+ let mut cycle = waiter.cycle.lock();
+ match cycle.take() {
+ None => Ok(()),
+ Some(cycle) => Err(cycle),
+ }
+ }
+
+ /// Awaits the caller on this latch by blocking the current thread.
+ fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter>) {
+ let mut info = self.info.lock();
+ if !info.complete {
+ // We push the waiter on to the `waiters` list. It can be accessed inside
+ // the `wait` call below, by 1) the `set` method or 2) by deadlock detection.
+ // Both of these will remove it from the `waiters` list before resuming
+ // this thread.
+ info.waiters.push(waiter.clone());
+
+ // If this detects a deadlock and the deadlock handler wants to resume this thread
+ // we have to be in the `wait` call. This is ensured by the deadlock handler
+ // getting the self.info lock.
+ rayon_core::mark_blocked();
+ jobserver::release_thread();
+ waiter.condvar.wait(&mut info);
+ // Release the lock before we potentially block in `acquire_thread`
+ mem::drop(info);
+ jobserver::acquire_thread();
+ }
+ }
+
+ /// Sets the latch and resumes all waiters on it
+ fn set(&self) {
+ let mut info = self.info.lock();
+ debug_assert!(!info.complete);
+ info.complete = true;
+ let registry = rayon_core::Registry::current();
+ for waiter in info.waiters.drain(..) {
+ waiter.notify(&registry);
+ }
+ }
+
+ /// Removes a single waiter from the list of waiters.
+ /// This is used to break query cycles.
+ fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter> {
+ let mut info = self.info.lock();
+ debug_assert!(!info.complete);
+ // Remove the waiter from the list of waiters
+ info.waiters.remove(waiter)
+ }
+}
+
+/// A resumable waiter of a query. The usize is the index into waiters in the query's latch
+#[cfg(parallel_compiler)]
+type Waiter = (QueryJobId, usize);
+
+/// Visits all the non-resumable and resumable waiters of a query.
+/// Only waiters in a query are visited.
+/// `visit` is called for every waiter and is passed a query waiting on `query_ref`
+/// and a span indicating the reason the query waited on `query_ref`.
+/// If `visit` returns Some, this function returns.
+/// For visits of non-resumable waiters it returns the return value of `visit`.
+/// For visits of resumable waiters it returns Some(Some(Waiter)) which has the
+/// required information to resume the waiter.
+/// If all `visit` calls returns None, this function also returns None.
+#[cfg(parallel_compiler)]
+fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
+where
+ F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
+{
+ // Visit the parent query which is a non-resumable waiter since it's on the same stack
+ if let Some(parent) = query.parent(query_map) {
+ if let Some(cycle) = visit(query.span(query_map), parent) {
+ return Some(cycle);
+ }
+ }
+
+ // Visit the explicit waiters which use condvars and are resumable
+ if let Some(latch) = query.latch(query_map) {
+ for (i, waiter) in latch.info.lock().waiters.iter().enumerate() {
+ if let Some(waiter_query) = waiter.query {
+ if visit(waiter.span, waiter_query).is_some() {
+ // Return a value which indicates that this waiter can be resumed
+ return Some(Some((query, i)));
+ }
+ }
+ }
+ }
+
+ None
+}
+
+/// Look for query cycles by doing a depth first search starting at `query`.
+/// `span` is the reason for the `query` to execute. This is initially DUMMY_SP.
+/// If a cycle is detected, this initial value is replaced with the span causing
+/// the cycle.
+#[cfg(parallel_compiler)]
+fn cycle_check(
+ query_map: &QueryMap,
+ query: QueryJobId,
+ span: Span,
+ stack: &mut Vec<(Span, QueryJobId)>,
+ visited: &mut FxHashSet<QueryJobId>,
+) -> Option<Option<Waiter>> {
+ if !visited.insert(query) {
+ return if let Some(p) = stack.iter().position(|q| q.1 == query) {
+ // We detected a query cycle, fix up the initial span and return Some
+
+ // Remove previous stack entries
+ stack.drain(0..p);
+ // Replace the span for the first query with the cycle cause
+ stack[0].0 = span;
+ Some(None)
+ } else {
+ None
+ };
+ }
+
+ // Query marked as visited is added it to the stack
+ stack.push((span, query));
+
+ // Visit all the waiters
+ let r = visit_waiters(query_map, query, |span, successor| {
+ cycle_check(query_map, successor, span, stack, visited)
+ });
+
+ // Remove the entry in our stack if we didn't find a cycle
+ if r.is_none() {
+ stack.pop();
+ }
+
+ r
+}
+
+/// Finds out if there's a path to the compiler root (aka. code which isn't in a query)
+/// from `query` without going through any of the queries in `visited`.
+/// This is achieved with a depth first search.
+#[cfg(parallel_compiler)]
+fn connected_to_root(
+ query_map: &QueryMap,
+ query: QueryJobId,
+ visited: &mut FxHashSet<QueryJobId>,
+) -> bool {
+ // We already visited this or we're deliberately ignoring it
+ if !visited.insert(query) {
+ return false;
+ }
+
+ // This query is connected to the root (it has no query parent), return true
+ if query.parent(query_map).is_none() {
+ return true;
+ }
+
+ visit_waiters(query_map, query, |_, successor| {
+ connected_to_root(query_map, successor, visited).then_some(None)
+ })
+ .is_some()
+}
+
+// Deterministically pick an query from a list
+#[cfg(parallel_compiler)]
+fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
+where
+ F: Fn(&T) -> (Span, QueryJobId),
+{
+ // Deterministically pick an entry point
+ // FIXME: Sort this instead
+ queries
+ .iter()
+ .min_by_key(|v| {
+ let (span, query) = f(v);
+ let hash = query.query(query_map).hash;
+ // Prefer entry points which have valid spans for nicer error messages
+ // We add an integer to the tuple ensuring that entry points
+ // with valid spans are picked first
+ let span_cmp = if span == DUMMY_SP { 1 } else { 0 };
+ (span_cmp, hash)
+ })
+ .unwrap()
+}
+
+/// Looks for query cycles starting from the last query in `jobs`.
+/// If a cycle is found, all queries in the cycle is removed from `jobs` and
+/// the function return true.
+/// If a cycle was not found, the starting query is removed from `jobs` and
+/// the function returns false.
+#[cfg(parallel_compiler)]
+fn remove_cycle(
+ query_map: &QueryMap,
+ jobs: &mut Vec<QueryJobId>,
+ wakelist: &mut Vec<Lrc<QueryWaiter>>,
+) -> bool {
+ let mut visited = FxHashSet::default();
+ let mut stack = Vec::new();
+ // Look for a cycle starting with the last query in `jobs`
+ if let Some(waiter) =
+ cycle_check(query_map, jobs.pop().unwrap(), DUMMY_SP, &mut stack, &mut visited)
+ {
+ // The stack is a vector of pairs of spans and queries; reverse it so that
+ // the earlier entries require later entries
+ let (mut spans, queries): (Vec<_>, Vec<_>) = stack.into_iter().rev().unzip();
+
+ // Shift the spans so that queries are matched with the span for their waitee
+ spans.rotate_right(1);
+
+ // Zip them back together
+ let mut stack: Vec<_> = iter::zip(spans, queries).collect();
+
+ // Remove the queries in our cycle from the list of jobs to look at
+ for r in &stack {
+ if let Some(pos) = jobs.iter().position(|j| j == &r.1) {
+ jobs.remove(pos);
+ }
+ }
+
+ // Find the queries in the cycle which are
+ // connected to queries outside the cycle
+ let entry_points = stack
+ .iter()
+ .filter_map(|&(span, query)| {
+ if query.parent(query_map).is_none() {
+ // This query is connected to the root (it has no query parent)
+ Some((span, query, None))
+ } else {
+ let mut waiters = Vec::new();
+ // Find all the direct waiters who lead to the root
+ visit_waiters(query_map, query, |span, waiter| {
+ // Mark all the other queries in the cycle as already visited
+ let mut visited = FxHashSet::from_iter(stack.iter().map(|q| q.1));
+
+ if connected_to_root(query_map, waiter, &mut visited) {
+ waiters.push((span, waiter));
+ }
+
+ None
+ });
+ if waiters.is_empty() {
+ None
+ } else {
+ // Deterministically pick one of the waiters to show to the user
+ let waiter = *pick_query(query_map, &waiters, |s| *s);
+ Some((span, query, Some(waiter)))
+ }
+ }
+ })
+ .collect::<Vec<(Span, QueryJobId, Option<(Span, QueryJobId)>)>>();
+
+ // Deterministically pick an entry point
+ let (_, entry_point, usage) = pick_query(query_map, &entry_points, |e| (e.0, e.1));
+
+ // Shift the stack so that our entry point is first
+ let entry_point_pos = stack.iter().position(|(_, query)| query == entry_point);
+ if let Some(pos) = entry_point_pos {
+ stack.rotate_left(pos);
+ }
+
+ let usage = usage.as_ref().map(|(span, query)| (*span, query.query(query_map)));
+
+ // Create the cycle error
+ let error = CycleError {
+ usage,
+ cycle: stack
+ .iter()
+ .map(|&(s, ref q)| QueryInfo { span: s, query: q.query(query_map) })
+ .collect(),
+ };
+
+ // We unwrap `waiter` here since there must always be one
+ // edge which is resumable / waited using a query latch
+ let (waitee_query, waiter_idx) = waiter.unwrap();
+
+ // Extract the waiter we want to resume
+ let waiter = waitee_query.latch(query_map).unwrap().extract_waiter(waiter_idx);
+
+ // Set the cycle error so it will be picked up when resumed
+ *waiter.cycle.lock() = Some(error);
+
+ // Put the waiter on the list of things to resume
+ wakelist.push(waiter);
+
+ true
+ } else {
+ false
+ }
+}
+
+/// Detects query cycles by using depth first search over all active query jobs.
+/// If a query cycle is found it will break the cycle by finding an edge which
+/// uses a query latch and then resuming that waiter.
+/// There may be multiple cycles involved in a deadlock, so this searches
+/// all active queries for cycles before finally resuming all the waiters at once.
+#[cfg(parallel_compiler)]
+pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
+ let on_panic = OnDrop(|| {
+ eprintln!("deadlock handler panicked, aborting process");
+ process::abort();
+ });
+
+ let mut wakelist = Vec::new();
+ let mut jobs: Vec<QueryJobId> = query_map.keys().cloned().collect();
+
+ let mut found_cycle = false;
+
+ while jobs.len() > 0 {
+ if remove_cycle(&query_map, &mut jobs, &mut wakelist) {
+ found_cycle = true;
+ }
+ }
+
+ // Check that a cycle was found. It is possible for a deadlock to occur without
+ // a query cycle if a query which can be waited on uses Rayon to do multithreading
+ // internally. Such a query (X) may be executing on 2 threads (A and B) and A may
+ // wait using Rayon on B. Rayon may then switch to executing another query (Y)
+ // which in turn will wait on X causing a deadlock. We have a false dependency from
+ // X to Y due to Rayon waiting and a true dependency from Y to X. The algorithm here
+ // only considers the true dependency and won't detect a cycle.
+ assert!(found_cycle);
+
+ // FIXME: Ensure this won't cause a deadlock before we return
+ for waiter in wakelist.into_iter() {
+ waiter.notify(registry);
+ }
+
+ on_panic.disable();
+}
+
+#[inline(never)]
+#[cold]
+pub(crate) fn report_cycle<'a>(
+ sess: &'a Session,
+ CycleError { usage, cycle: stack }: CycleError,
+) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
+ assert!(!stack.is_empty());
+
+ let span = stack[0].query.default_span(stack[1 % stack.len()].span);
+ let mut err =
+ struct_span_err!(sess, span, E0391, "cycle detected when {}", stack[0].query.description);
+
+ for i in 1..stack.len() {
+ let query = &stack[i].query;
+ let span = query.default_span(stack[(i + 1) % stack.len()].span);
+ err.span_note(span, &format!("...which requires {}...", query.description));
+ }
+
+ if stack.len() == 1 {
+ err.note(&format!("...which immediately requires {} again", stack[0].query.description));
+ } else {
+ err.note(&format!(
+ "...which again requires {}, completing the cycle",
+ stack[0].query.description
+ ));
+ }
+
+ if stack.iter().all(|entry| {
+ entry
+ .query
+ .def_kind
+ .map_or(false, |def_kind| matches!(def_kind, DefKind::TyAlias | DefKind::TraitAlias))
+ }) {
+ if stack.iter().all(|entry| {
+ entry.query.def_kind.map_or(false, |def_kind| matches!(def_kind, DefKind::TyAlias))
+ }) {
+ err.note("type aliases cannot be recursive");
+ err.help("consider using a struct, enum, or union instead to break the cycle");
+ err.help("see <https://doc.rust-lang.org/reference/types.html#recursive-types> for more information");
+ } else {
+ err.note("trait aliases cannot be recursive");
+ }
+ }
+
+ if let Some((span, query)) = usage {
+ err.span_note(query.default_span(span), &format!("cycle used when {}", query.description));
+ }
+
+ err
+}
+
+pub fn print_query_stack<CTX: QueryContext>(
+ tcx: CTX,
+ mut current_query: Option<QueryJobId>,
+ handler: &Handler,
+ num_frames: Option<usize>,
+) -> usize {
+ // Be careful relying on global state here: this code is called from
+ // a panic hook, which means that the global `Handler` may be in a weird
+ // state if it was responsible for triggering the panic.
+ let mut i = 0;
+ let query_map = tcx.try_collect_active_jobs();
+
+ while let Some(query) = current_query {
+ if Some(i) == num_frames {
+ break;
+ }
+ let Some(query_info) = query_map.as_ref().and_then(|map| map.get(&query)) else {
+ break;
+ };
+ let mut diag = Diagnostic::new(
+ Level::FailureNote,
+ &format!("#{} [{}] {}", i, query_info.query.name, query_info.query.description),
+ );
+ diag.span = query_info.job.span.into();
+ handler.force_print_diagnostic(diag);
+
+ current_query = query_info.job.parent;
+ i += 1;
+ }
+
+ i
+}
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
new file mode 100644
index 000000000..fb2258434
--- /dev/null
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -0,0 +1,125 @@
+mod plumbing;
+pub use self::plumbing::*;
+
+mod job;
+#[cfg(parallel_compiler)]
+pub use self::job::deadlock;
+pub use self::job::{print_query_stack, QueryInfo, QueryJob, QueryJobId, QueryJobInfo, QueryMap};
+
+mod caches;
+pub use self::caches::{
+ ArenaCacheSelector, CacheSelector, DefaultCacheSelector, QueryCache, QueryStorage,
+};
+
+mod config;
+pub use self::config::{QueryConfig, QueryDescription, QueryVTable};
+
+use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
+
+use rustc_data_structures::sync::Lock;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::Diagnostic;
+use rustc_hir::def::DefKind;
+use rustc_span::Span;
+
+/// Description of a frame in the query stack.
+///
+/// This is mostly used in case of cycles for error reporting.
+#[derive(Clone, Debug)]
+pub struct QueryStackFrame {
+ pub name: &'static str,
+ pub description: String,
+ span: Option<Span>,
+ def_kind: Option<DefKind>,
+ /// This hash is used to deterministically pick
+ /// a query to remove cycles in the parallel compiler.
+ #[cfg(parallel_compiler)]
+ hash: u64,
+}
+
+impl QueryStackFrame {
+ #[inline]
+ pub fn new(
+ name: &'static str,
+ description: String,
+ span: Option<Span>,
+ def_kind: Option<DefKind>,
+ _hash: impl FnOnce() -> u64,
+ ) -> Self {
+ Self {
+ name,
+ description,
+ span,
+ def_kind,
+ #[cfg(parallel_compiler)]
+ hash: _hash(),
+ }
+ }
+
+ // FIXME(eddyb) Get more valid `Span`s on queries.
+ #[inline]
+ pub fn default_span(&self, span: Span) -> Span {
+ if !span.is_dummy() {
+ return span;
+ }
+ self.span.unwrap_or(span)
+ }
+}
+
+/// Tracks 'side effects' for a particular query.
+/// This struct is saved to disk along with the query result,
+/// and loaded from disk if we mark the query as green.
+/// This allows us to 'replay' changes to global state
+/// that would otherwise only occur if we actually
+/// executed the query method.
+#[derive(Debug, Clone, Default, Encodable, Decodable)]
+pub struct QuerySideEffects {
+ /// Stores any diagnostics emitted during query execution.
+ /// These diagnostics will be re-emitted if we mark
+ /// the query as green.
+ pub(super) diagnostics: ThinVec<Diagnostic>,
+}
+
+impl QuerySideEffects {
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ let QuerySideEffects { diagnostics } = self;
+ diagnostics.is_empty()
+ }
+ pub fn append(&mut self, other: QuerySideEffects) {
+ let QuerySideEffects { diagnostics } = self;
+ diagnostics.extend(other.diagnostics);
+ }
+}
+
+pub trait QueryContext: HasDepContext {
+ fn next_job_id(&self) -> QueryJobId;
+
+ /// Get the query information from the TLS context.
+ fn current_query_job(&self) -> Option<QueryJobId>;
+
+ fn try_collect_active_jobs(&self) -> Option<QueryMap>;
+
+ /// Load side effects associated to the node in the previous session.
+ fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
+
+ /// Register diagnostics for the given node, for use in next session.
+ fn store_side_effects(&self, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects);
+
+ /// Register diagnostics for the given node, for use in next session.
+ fn store_side_effects_for_anon_node(
+ &self,
+ dep_node_index: DepNodeIndex,
+ side_effects: QuerySideEffects,
+ );
+
+ /// Executes a job by changing the `ImplicitCtxt` to point to the
+ /// new query job while it executes. It returns the diagnostics
+ /// captured during execution and the actual result.
+ fn start_query<R>(
+ &self,
+ token: QueryJobId,
+ diagnostics: Option<&Lock<ThinVec<Diagnostic>>>,
+ compute: impl FnOnce() -> R,
+ ) -> R;
+}
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
new file mode 100644
index 000000000..5e8ea07d0
--- /dev/null
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -0,0 +1,742 @@
+//! The implementation of the query system itself. This defines the macros that
+//! generate the actual methods on tcx which find and execute the provider,
+//! manage the caches, and so forth.
+
+use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
+use crate::query::caches::QueryCache;
+use crate::query::config::{QueryDescription, QueryVTable};
+use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
+use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
+use rustc_data_structures::fingerprint::Fingerprint;
+use rustc_data_structures::fx::FxHashMap;
+#[cfg(parallel_compiler)]
+use rustc_data_structures::profiling::TimingGuard;
+#[cfg(parallel_compiler)]
+use rustc_data_structures::sharded::Sharded;
+use rustc_data_structures::sync::Lock;
+use rustc_data_structures::thin_vec::ThinVec;
+use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
+use rustc_session::Session;
+use rustc_span::{Span, DUMMY_SP};
+use std::cell::Cell;
+use std::collections::hash_map::Entry;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::mem;
+use std::ptr;
+
+pub struct QueryState<K> {
+ #[cfg(parallel_compiler)]
+ active: Sharded<FxHashMap<K, QueryResult>>,
+ #[cfg(not(parallel_compiler))]
+ active: Lock<FxHashMap<K, QueryResult>>,
+}
+
+/// Indicates the state of a query for a given key in a query map.
+enum QueryResult {
+ /// An already executing query. The query job can be used to await for its completion.
+ Started(QueryJob),
+
+ /// The query panicked. Queries trying to wait on this will raise a fatal error which will
+ /// silently panic.
+ Poisoned,
+}
+
+impl<K> QueryState<K>
+where
+ K: Eq + Hash + Clone + Debug,
+{
+ pub fn all_inactive(&self) -> bool {
+ #[cfg(parallel_compiler)]
+ {
+ let shards = self.active.lock_shards();
+ shards.iter().all(|shard| shard.is_empty())
+ }
+ #[cfg(not(parallel_compiler))]
+ {
+ self.active.lock().is_empty()
+ }
+ }
+
+ pub fn try_collect_active_jobs<CTX: Copy>(
+ &self,
+ tcx: CTX,
+ make_query: fn(CTX, K) -> QueryStackFrame,
+ jobs: &mut QueryMap,
+ ) -> Option<()> {
+ #[cfg(parallel_compiler)]
+ {
+ // We use try_lock_shards here since we are called from the
+ // deadlock handler, and this shouldn't be locked.
+ let shards = self.active.try_lock_shards()?;
+ for shard in shards.iter() {
+ for (k, v) in shard.iter() {
+ if let QueryResult::Started(ref job) = *v {
+ let query = make_query(tcx, k.clone());
+ jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
+ }
+ }
+ }
+ }
+ #[cfg(not(parallel_compiler))]
+ {
+ // We use try_lock here since we are called from the
+ // deadlock handler, and this shouldn't be locked.
+ // (FIXME: Is this relevant for non-parallel compilers? It doesn't
+ // really hurt much.)
+ for (k, v) in self.active.try_lock()?.iter() {
+ if let QueryResult::Started(ref job) = *v {
+ let query = make_query(tcx, k.clone());
+ jobs.insert(job.id, QueryJobInfo { query, job: job.clone() });
+ }
+ }
+ }
+
+ Some(())
+ }
+}
+
+impl<K> Default for QueryState<K> {
+ fn default() -> QueryState<K> {
+ QueryState { active: Default::default() }
+ }
+}
+
+/// A type representing the responsibility to execute the job in the `job` field.
+/// This will poison the relevant query if dropped.
+struct JobOwner<'tcx, K>
+where
+ K: Eq + Hash + Clone,
+{
+ state: &'tcx QueryState<K>,
+ key: K,
+ id: QueryJobId,
+}
+
+#[cold]
+#[inline(never)]
+fn mk_cycle<CTX, V, R>(
+ tcx: CTX,
+ error: CycleError,
+ handle_cycle_error: fn(CTX, DiagnosticBuilder<'_, ErrorGuaranteed>) -> V,
+ cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
+) -> R
+where
+ CTX: QueryContext,
+ V: std::fmt::Debug,
+ R: Clone,
+{
+ let error = report_cycle(tcx.dep_context().sess(), error);
+ let value = handle_cycle_error(tcx, error);
+ cache.store_nocache(value)
+}
+
+impl<'tcx, K> JobOwner<'tcx, K>
+where
+ K: Eq + Hash + Clone,
+{
+ /// Either gets a `JobOwner` corresponding the query, allowing us to
+ /// start executing the query, or returns with the result of the query.
+ /// This function assumes that `try_get_cached` is already called and returned `lookup`.
+ /// If the query is executing elsewhere, this will wait for it and return the result.
+ /// If the query panicked, this will silently panic.
+ ///
+ /// This function is inlined because that results in a noticeable speed-up
+ /// for some compile-time benchmarks.
+ #[inline(always)]
+ fn try_start<'b, CTX>(
+ tcx: &'b CTX,
+ state: &'b QueryState<K>,
+ span: Span,
+ key: K,
+ ) -> TryGetJob<'b, K>
+ where
+ CTX: QueryContext,
+ {
+ #[cfg(parallel_compiler)]
+ let mut state_lock = state.active.get_shard_by_value(&key).lock();
+ #[cfg(not(parallel_compiler))]
+ let mut state_lock = state.active.lock();
+ let lock = &mut *state_lock;
+
+ match lock.entry(key) {
+ Entry::Vacant(entry) => {
+ let id = tcx.next_job_id();
+ let job = tcx.current_query_job();
+ let job = QueryJob::new(id, span, job);
+
+ let key = entry.key().clone();
+ entry.insert(QueryResult::Started(job));
+
+ let owner = JobOwner { state, id, key };
+ return TryGetJob::NotYetStarted(owner);
+ }
+ Entry::Occupied(mut entry) => {
+ match entry.get_mut() {
+ #[cfg(not(parallel_compiler))]
+ QueryResult::Started(job) => {
+ let id = job.id;
+ drop(state_lock);
+
+ // If we are single-threaded we know that we have cycle error,
+ // so we just return the error.
+ return TryGetJob::Cycle(id.find_cycle_in_stack(
+ tcx.try_collect_active_jobs().unwrap(),
+ &tcx.current_query_job(),
+ span,
+ ));
+ }
+ #[cfg(parallel_compiler)]
+ QueryResult::Started(job) => {
+ // For parallel queries, we'll block and wait until the query running
+ // in another thread has completed. Record how long we wait in the
+ // self-profiler.
+ let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked();
+
+ // Get the latch out
+ let latch = job.latch();
+
+ drop(state_lock);
+
+ // With parallel queries we might just have to wait on some other
+ // thread.
+ let result = latch.wait_on(tcx.current_query_job(), span);
+
+ match result {
+ Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer),
+ Err(cycle) => TryGetJob::Cycle(cycle),
+ }
+ }
+ QueryResult::Poisoned => FatalError.raise(),
+ }
+ }
+ }
+ }
+
+ /// Completes the query by updating the query cache with the `result`,
+ /// signals the waiter and forgets the JobOwner, so it won't poison the query
+ fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) -> C::Stored
+ where
+ C: QueryCache<Key = K>,
+ {
+ // We can move out of `self` here because we `mem::forget` it below
+ let key = unsafe { ptr::read(&self.key) };
+ let state = self.state;
+
+ // Forget ourself so our destructor won't poison the query
+ mem::forget(self);
+
+ let (job, result) = {
+ let job = {
+ #[cfg(parallel_compiler)]
+ let mut lock = state.active.get_shard_by_value(&key).lock();
+ #[cfg(not(parallel_compiler))]
+ let mut lock = state.active.lock();
+ match lock.remove(&key).unwrap() {
+ QueryResult::Started(job) => job,
+ QueryResult::Poisoned => panic!(),
+ }
+ };
+ let result = cache.complete(key, result, dep_node_index);
+ (job, result)
+ };
+
+ job.signal_complete();
+ result
+ }
+}
+
+impl<'tcx, K> Drop for JobOwner<'tcx, K>
+where
+ K: Eq + Hash + Clone,
+{
+ #[inline(never)]
+ #[cold]
+ fn drop(&mut self) {
+ // Poison the query so jobs waiting on it panic.
+ let state = self.state;
+ let job = {
+ #[cfg(parallel_compiler)]
+ let mut shard = state.active.get_shard_by_value(&self.key).lock();
+ #[cfg(not(parallel_compiler))]
+ let mut shard = state.active.lock();
+ let job = match shard.remove(&self.key).unwrap() {
+ QueryResult::Started(job) => job,
+ QueryResult::Poisoned => panic!(),
+ };
+ shard.insert(self.key.clone(), QueryResult::Poisoned);
+ job
+ };
+ // Also signal the completion of the job, so waiters
+ // will continue execution.
+ job.signal_complete();
+ }
+}
+
+#[derive(Clone)]
+pub(crate) struct CycleError {
+ /// The query and related span that uses the cycle.
+ pub usage: Option<(Span, QueryStackFrame)>,
+ pub cycle: Vec<QueryInfo>,
+}
+
+/// The result of `try_start`.
+enum TryGetJob<'tcx, K>
+where
+ K: Eq + Hash + Clone,
+{
+ /// The query is not yet started. Contains a guard to the cache eventually used to start it.
+ NotYetStarted(JobOwner<'tcx, K>),
+
+ /// The query was already completed.
+ /// Returns the result of the query and its dep-node index
+ /// if it succeeded or a cycle error if it failed.
+ #[cfg(parallel_compiler)]
+ JobCompleted(TimingGuard<'tcx>),
+
+ /// Trying to execute the query resulted in a cycle.
+ Cycle(CycleError),
+}
+
+/// Checks if the query is already computed and in the cache.
+/// It returns the shard index and a lock guard to the shard,
+/// which will be used if the query is not in the cache and we need
+/// to compute it.
+#[inline]
+pub fn try_get_cached<'a, CTX, C, R, OnHit>(
+ tcx: CTX,
+ cache: &'a C,
+ key: &C::Key,
+ // `on_hit` can be called while holding a lock to the query cache
+ on_hit: OnHit,
+) -> Result<R, ()>
+where
+ C: QueryCache,
+ CTX: DepContext,
+ OnHit: FnOnce(&C::Stored) -> R,
+{
+ cache.lookup(&key, |value, index| {
+ if std::intrinsics::unlikely(tcx.profiler().enabled()) {
+ tcx.profiler().query_cache_hit(index.into());
+ }
+ tcx.dep_graph().read_index(index);
+ on_hit(value)
+ })
+}
+
+fn try_execute_query<CTX, C>(
+ tcx: CTX,
+ state: &QueryState<C::Key>,
+ cache: &C,
+ span: Span,
+ key: C::Key,
+ dep_node: Option<DepNode<CTX::DepKind>>,
+ query: &QueryVTable<CTX, C::Key, C::Value>,
+) -> (C::Stored, Option<DepNodeIndex>)
+where
+ C: QueryCache,
+ C::Key: Clone + DepNodeParams<CTX::DepContext>,
+ CTX: QueryContext,
+{
+ match JobOwner::<'_, C::Key>::try_start(&tcx, state, span, key.clone()) {
+ TryGetJob::NotYetStarted(job) => {
+ let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id);
+ let result = job.complete(cache, result, dep_node_index);
+ (result, Some(dep_node_index))
+ }
+ TryGetJob::Cycle(error) => {
+ let result = mk_cycle(tcx, error, query.handle_cycle_error, cache);
+ (result, None)
+ }
+ #[cfg(parallel_compiler)]
+ TryGetJob::JobCompleted(query_blocked_prof_timer) => {
+ let (v, index) = cache
+ .lookup(&key, |value, index| (value.clone(), index))
+ .unwrap_or_else(|_| panic!("value must be in cache after waiting"));
+
+ if std::intrinsics::unlikely(tcx.dep_context().profiler().enabled()) {
+ tcx.dep_context().profiler().query_cache_hit(index.into());
+ }
+ query_blocked_prof_timer.finish_with_query_invocation_id(index.into());
+
+ (v, Some(index))
+ }
+ }
+}
+
+fn execute_job<CTX, K, V>(
+ tcx: CTX,
+ key: K,
+ mut dep_node_opt: Option<DepNode<CTX::DepKind>>,
+ query: &QueryVTable<CTX, K, V>,
+ job_id: QueryJobId,
+) -> (V, DepNodeIndex)
+where
+ K: Clone + DepNodeParams<CTX::DepContext>,
+ V: Debug,
+ CTX: QueryContext,
+{
+ let dep_graph = tcx.dep_context().dep_graph();
+
+ // Fast path for when incr. comp. is off.
+ if !dep_graph.is_fully_enabled() {
+ let prof_timer = tcx.dep_context().profiler().query_provider();
+ let result = tcx.start_query(job_id, None, || query.compute(*tcx.dep_context(), key));
+ let dep_node_index = dep_graph.next_virtual_depnode_index();
+ prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+ return (result, dep_node_index);
+ }
+
+ if !query.anon && !query.eval_always {
+ // `to_dep_node` is expensive for some `DepKind`s.
+ let dep_node =
+ dep_node_opt.get_or_insert_with(|| query.to_dep_node(*tcx.dep_context(), &key));
+
+ // The diagnostics for this query will be promoted to the current session during
+ // `try_mark_green()`, so we can ignore them here.
+ if let Some(ret) = tcx.start_query(job_id, None, || {
+ try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query)
+ }) {
+ return ret;
+ }
+ }
+
+ let prof_timer = tcx.dep_context().profiler().query_provider();
+ let diagnostics = Lock::new(ThinVec::new());
+
+ let (result, dep_node_index) = tcx.start_query(job_id, Some(&diagnostics), || {
+ if query.anon {
+ return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || {
+ query.compute(*tcx.dep_context(), key)
+ });
+ }
+
+ // `to_dep_node` is expensive for some `DepKind`s.
+ let dep_node = dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key));
+
+ dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result)
+ });
+
+ prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+
+ let diagnostics = diagnostics.into_inner();
+ let side_effects = QuerySideEffects { diagnostics };
+
+ if std::intrinsics::unlikely(!side_effects.is_empty()) {
+ if query.anon {
+ tcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
+ } else {
+ tcx.store_side_effects(dep_node_index, side_effects);
+ }
+ }
+
+ (result, dep_node_index)
+}
+
+fn try_load_from_disk_and_cache_in_memory<CTX, K, V>(
+ tcx: CTX,
+ key: &K,
+ dep_node: &DepNode<CTX::DepKind>,
+ query: &QueryVTable<CTX, K, V>,
+) -> Option<(V, DepNodeIndex)>
+where
+ K: Clone,
+ CTX: QueryContext,
+ V: Debug,
+{
+ // Note this function can be called concurrently from the same query
+ // We must ensure that this is handled correctly.
+
+ let dep_graph = tcx.dep_context().dep_graph();
+ let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(tcx, &dep_node)?;
+
+ debug_assert!(dep_graph.is_green(dep_node));
+
+ // First we try to load the result from the on-disk cache.
+ // Some things are never cached on disk.
+ if query.cache_on_disk {
+ let prof_timer = tcx.dep_context().profiler().incr_cache_loading();
+
+ // The call to `with_query_deserialization` enforces that no new `DepNodes`
+ // are created during deserialization. See the docs of that method for more
+ // details.
+ let result = dep_graph
+ .with_query_deserialization(|| query.try_load_from_disk(tcx, prev_dep_node_index));
+
+ prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+
+ if let Some(result) = result {
+ if std::intrinsics::unlikely(
+ tcx.dep_context().sess().opts.unstable_opts.query_dep_graph,
+ ) {
+ dep_graph.mark_debug_loaded_from_disk(*dep_node)
+ }
+
+ let prev_fingerprint = tcx
+ .dep_context()
+ .dep_graph()
+ .prev_fingerprint_of(dep_node)
+ .unwrap_or(Fingerprint::ZERO);
+ // If `-Zincremental-verify-ich` is specified, re-hash results from
+ // the cache and make sure that they have the expected fingerprint.
+ //
+ // If not, we still seek to verify a subset of fingerprints loaded
+ // from disk. Re-hashing results is fairly expensive, so we can't
+ // currently afford to verify every hash. This subset should still
+ // give us some coverage of potential bugs though.
+ let try_verify = prev_fingerprint.as_value().1 % 32 == 0;
+ if std::intrinsics::unlikely(
+ try_verify || tcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
+ ) {
+ incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
+ }
+
+ return Some((result, dep_node_index));
+ }
+
+ // We always expect to find a cached result for things that
+ // can be forced from `DepNode`.
+ debug_assert!(
+ !tcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
+ "missing on-disk cache entry for {:?}",
+ dep_node
+ );
+ }
+
+ // We could not load a result from the on-disk cache, so
+ // recompute.
+ let prof_timer = tcx.dep_context().profiler().query_provider();
+
+ // The dep-graph for this computation is already in-place.
+ let result = dep_graph.with_ignore(|| query.compute(*tcx.dep_context(), key.clone()));
+
+ prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+
+ // Verify that re-running the query produced a result with the expected hash
+ // This catches bugs in query implementations, turning them into ICEs.
+ // For example, a query might sort its result by `DefId` - since `DefId`s are
+ // not stable across compilation sessions, the result could get up getting sorted
+ // in a different order when the query is re-run, even though all of the inputs
+ // (e.g. `DefPathHash` values) were green.
+ //
+ // See issue #82920 for an example of a miscompilation that would get turned into
+ // an ICE by this check
+ incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query);
+
+ Some((result, dep_node_index))
+}
+
+fn incremental_verify_ich<CTX, K, V: Debug>(
+ tcx: CTX::DepContext,
+ result: &V,
+ dep_node: &DepNode<CTX::DepKind>,
+ query: &QueryVTable<CTX, K, V>,
+) where
+ CTX: QueryContext,
+{
+ assert!(
+ tcx.dep_graph().is_green(dep_node),
+ "fingerprint for green query instance not loaded from cache: {:?}",
+ dep_node,
+ );
+
+ debug!("BEGIN verify_ich({:?})", dep_node);
+ let new_hash = query.hash_result.map_or(Fingerprint::ZERO, |f| {
+ tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
+ });
+ let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
+ debug!("END verify_ich({:?})", dep_node);
+
+ if Some(new_hash) != old_hash {
+ incremental_verify_ich_cold(tcx.sess(), DebugArg::from(&dep_node), DebugArg::from(&result));
+ }
+}
+
+// This DebugArg business is largely a mirror of std::fmt::ArgumentV1, which is
+// currently not exposed publicly.
+//
+// The PR which added this attempted to use `&dyn Debug` instead, but that
+// showed statistically significant worse compiler performance. It's not
+// actually clear what the cause there was -- the code should be cold. If this
+// can be replaced with `&dyn Debug` with on perf impact, then it probably
+// should be.
+extern "C" {
+ type Opaque;
+}
+
+struct DebugArg<'a> {
+ value: &'a Opaque,
+ fmt: fn(&Opaque, &mut std::fmt::Formatter<'_>) -> std::fmt::Result,
+}
+
+impl<'a, T> From<&'a T> for DebugArg<'a>
+where
+ T: std::fmt::Debug,
+{
+ fn from(value: &'a T) -> DebugArg<'a> {
+ DebugArg {
+ value: unsafe { std::mem::transmute(value) },
+ fmt: unsafe {
+ std::mem::transmute(<T as std::fmt::Debug>::fmt as fn(_, _) -> std::fmt::Result)
+ },
+ }
+ }
+}
+
+impl std::fmt::Debug for DebugArg<'_> {
+ fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
+ (self.fmt)(self.value, f)
+ }
+}
+
+// Note that this is marked #[cold] and intentionally takes the equivalent of
+// `dyn Debug` for its arguments, as we want to avoid generating a bunch of
+// different implementations for LLVM to chew on (and filling up the final
+// binary, too).
+#[cold]
+fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: DebugArg<'_>) {
+ let run_cmd = if let Some(crate_name) = &sess.opts.crate_name {
+ format!("`cargo clean -p {}` or `cargo clean`", crate_name)
+ } else {
+ "`cargo clean`".to_string()
+ };
+
+ // When we emit an error message and panic, we try to debug-print the `DepNode`
+ // and query result. Unfortunately, this can cause us to run additional queries,
+ // which may result in another fingerprint mismatch while we're in the middle
+ // of processing this one. To avoid a double-panic (which kills the process
+ // before we can print out the query static), we print out a terse
+ // but 'safe' message if we detect a re-entrant call to this method.
+ thread_local! {
+ static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
+ };
+
+ let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
+
+ if old_in_panic {
+ sess.struct_err(
+ "internal compiler error: re-entrant incremental verify failure, suppressing message",
+ )
+ .emit();
+ } else {
+ sess.struct_err(&format!("internal compiler error: encountered incremental compilation error with {:?}", dep_node))
+ .help(&format!("This is a known issue with the compiler. Run {} to allow your project to compile", run_cmd))
+ .note("Please follow the instructions below to create a bug report with the provided information")
+ .note("See <https://github.com/rust-lang/rust/issues/84970> for more information")
+ .emit();
+ panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
+ }
+
+ INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic));
+}
+
+/// Ensure that either this query has all green inputs or been executed.
+/// Executing `query::ensure(D)` is considered a read of the dep-node `D`.
+/// Returns true if the query should still run.
+///
+/// This function is particularly useful when executing passes for their
+/// side-effects -- e.g., in order to report errors for erroneous programs.
+///
+/// Note: The optimization is only available during incr. comp.
+#[inline(never)]
+fn ensure_must_run<CTX, K, V>(
+ tcx: CTX,
+ key: &K,
+ query: &QueryVTable<CTX, K, V>,
+) -> (bool, Option<DepNode<CTX::DepKind>>)
+where
+ K: crate::dep_graph::DepNodeParams<CTX::DepContext>,
+ CTX: QueryContext,
+{
+ if query.eval_always {
+ return (true, None);
+ }
+
+ // Ensuring an anonymous query makes no sense
+ assert!(!query.anon);
+
+ let dep_node = query.to_dep_node(*tcx.dep_context(), key);
+
+ let dep_graph = tcx.dep_context().dep_graph();
+ match dep_graph.try_mark_green(tcx, &dep_node) {
+ None => {
+ // A None return from `try_mark_green` means that this is either
+ // a new dep node or that the dep node has already been marked red.
+ // Either way, we can't call `dep_graph.read()` as we don't have the
+ // DepNodeIndex. We must invoke the query itself. The performance cost
+ // this introduces should be negligible as we'll immediately hit the
+ // in-memory cache, or another query down the line will.
+ (true, Some(dep_node))
+ }
+ Some((_, dep_node_index)) => {
+ dep_graph.read_index(dep_node_index);
+ tcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
+ (false, None)
+ }
+ }
+}
+
+#[derive(Debug)]
+pub enum QueryMode {
+ Get,
+ Ensure,
+}
+
+pub fn get_query<Q, CTX>(tcx: CTX, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored>
+where
+ Q: QueryDescription<CTX>,
+ Q::Key: DepNodeParams<CTX::DepContext>,
+ CTX: QueryContext,
+{
+ let query = Q::make_vtable(tcx, &key);
+ let dep_node = if let QueryMode::Ensure = mode {
+ let (must_run, dep_node) = ensure_must_run(tcx, &key, &query);
+ if !must_run {
+ return None;
+ }
+ dep_node
+ } else {
+ None
+ };
+
+ let (result, dep_node_index) = try_execute_query(
+ tcx,
+ Q::query_state(tcx),
+ Q::query_cache(tcx),
+ span,
+ key,
+ dep_node,
+ &query,
+ );
+ if let Some(dep_node_index) = dep_node_index {
+ tcx.dep_context().dep_graph().read_index(dep_node_index)
+ }
+ Some(result)
+}
+
+pub fn force_query<Q, CTX>(tcx: CTX, key: Q::Key, dep_node: DepNode<CTX::DepKind>)
+where
+ Q: QueryDescription<CTX>,
+ Q::Key: DepNodeParams<CTX::DepContext>,
+ CTX: QueryContext,
+{
+ // We may be concurrently trying both execute and force a query.
+ // Ensure that only one of them runs the query.
+ let cache = Q::query_cache(tcx);
+ let cached = cache.lookup(&key, |_, index| {
+ if std::intrinsics::unlikely(tcx.dep_context().profiler().enabled()) {
+ tcx.dep_context().profiler().query_cache_hit(index.into());
+ }
+ });
+
+ match cached {
+ Ok(()) => return,
+ Err(()) => {}
+ }
+
+ let query = Q::make_vtable(tcx, &key);
+ let state = Q::query_state(tcx);
+ debug_assert!(!query.anon);
+
+ try_execute_query(tcx, state, cache, DUMMY_SP, key, Some(dep_node), &query);
+}