summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_query_system/src/query
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_query_system/src/query')
-rw-r--r--compiler/rustc_query_system/src/query/caches.rs5
-rw-r--r--compiler/rustc_query_system/src/query/config.rs55
-rw-r--r--compiler/rustc_query_system/src/query/job.rs110
-rw-r--r--compiler/rustc_query_system/src/query/mod.rs15
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs200
5 files changed, 192 insertions, 193 deletions
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs
index 4c4680b5d..77d0d0314 100644
--- a/compiler/rustc_query_system/src/query/caches.rs
+++ b/compiler/rustc_query_system/src/query/caches.rs
@@ -9,7 +9,6 @@ use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::sync::Lock;
use rustc_data_structures::sync::WorkerLocal;
use rustc_index::vec::{Idx, IndexVec};
-use std::default::Default;
use std::fmt::Debug;
use std::hash::Hash;
use std::marker::PhantomData;
@@ -117,7 +116,7 @@ where
let mut lock = self.cache.get_shard_by_value(&key).lock();
#[cfg(not(parallel_compiler))]
let mut lock = self.cache.lock();
- // We may be overwriting another value. This is all right, since the dep-graph
+ // We may be overwriting another value. This is all right, since the dep-graph
// will check that the fingerprint matches.
lock.insert(key, (value.clone(), index));
value
@@ -204,7 +203,7 @@ where
let mut lock = self.cache.get_shard_by_value(&key).lock();
#[cfg(not(parallel_compiler))]
let mut lock = self.cache.lock();
- // We may be overwriting another value. This is all right, since the dep-graph
+ // We may be overwriting another value. This is all right, since the dep-graph
// will check that the fingerprint matches.
lock.insert(key, value);
&value.0
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
index 7d1b62ab1..8c0330e43 100644
--- a/compiler/rustc_query_system/src/query/config.rs
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -1,7 +1,6 @@
//! Query configuration and description traits.
-use crate::dep_graph::DepNode;
-use crate::dep_graph::SerializedDepNodeIndex;
+use crate::dep_graph::{DepNode, DepNodeParams, SerializedDepNodeIndex};
use crate::error::HandleCycleError;
use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache;
@@ -11,17 +10,23 @@ use rustc_data_structures::fingerprint::Fingerprint;
use std::fmt::Debug;
use std::hash::Hash;
+pub type HashResult<Qcx, Q> =
+ Option<fn(&mut StableHashingContext<'_>, &<Q as QueryConfig<Qcx>>::Value) -> Fingerprint>;
+
+pub type TryLoadFromDisk<Qcx, Q> =
+ Option<fn(Qcx, SerializedDepNodeIndex) -> Option<<Q as QueryConfig<Qcx>>::Value>>;
+
pub trait QueryConfig<Qcx: QueryContext> {
const NAME: &'static str;
- type Key: Eq + Hash + Clone + Debug;
+ type Key: DepNodeParams<Qcx::DepContext> + Eq + Hash + Clone + Debug;
type Value: Debug;
type Stored: Debug + Clone + std::borrow::Borrow<Self::Value>;
type Cache: QueryCache<Key = Self::Key, Stored = Self::Stored, Value = Self::Value>;
// Don't use this method to access query results, instead use the methods on TyCtxt
- fn query_state<'a>(tcx: Qcx) -> &'a QueryState<Self::Key>
+ fn query_state<'a>(tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind>
where
Qcx: 'a;
@@ -30,39 +35,27 @@ pub trait QueryConfig<Qcx: QueryContext> {
where
Qcx: 'a;
- // Don't use this method to compute query results, instead use the methods on TyCtxt
- fn make_vtable(tcx: Qcx, key: &Self::Key) -> QueryVTable<Qcx, Self::Key, Self::Value>;
-
fn cache_on_disk(tcx: Qcx::DepContext, key: &Self::Key) -> bool;
// Don't use this method to compute query results, instead use the methods on TyCtxt
fn execute_query(tcx: Qcx::DepContext, k: Self::Key) -> Self::Stored;
-}
-#[derive(Copy, Clone)]
-pub struct QueryVTable<Qcx: QueryContext, K, V> {
- pub anon: bool,
- pub dep_kind: Qcx::DepKind,
- pub eval_always: bool,
- pub depth_limit: bool,
- pub feedable: bool,
-
- pub compute: fn(Qcx::DepContext, K) -> V,
- pub hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
- pub handle_cycle_error: HandleCycleError,
- // NOTE: this is also `None` if `cache_on_disk()` returns false, not just if it's unsupported by the query
- pub try_load_from_disk: Option<fn(Qcx, SerializedDepNodeIndex) -> Option<V>>,
-}
+ fn compute(tcx: Qcx, key: &Self::Key) -> fn(Qcx::DepContext, Self::Key) -> Self::Value;
-impl<Qcx: QueryContext, K, V> QueryVTable<Qcx, K, V> {
- pub(crate) fn to_dep_node(&self, tcx: Qcx::DepContext, key: &K) -> DepNode<Qcx::DepKind>
- where
- K: crate::dep_graph::DepNodeParams<Qcx::DepContext>,
- {
- DepNode::construct(tcx, self.dep_kind, key)
- }
+ fn try_load_from_disk(qcx: Qcx, idx: &Self::Key) -> TryLoadFromDisk<Qcx, Self>;
+
+ const ANON: bool;
+ const EVAL_ALWAYS: bool;
+ const DEPTH_LIMIT: bool;
+ const FEEDABLE: bool;
+
+ const DEP_KIND: Qcx::DepKind;
+ const HANDLE_CYCLE_ERROR: HandleCycleError;
+
+ const HASH_RESULT: HashResult<Qcx, Self>;
- pub(crate) fn compute(&self, tcx: Qcx::DepContext, key: K) -> V {
- (self.compute)(tcx, key)
+ // Just here for convernience and checking that the key matches the kind, don't override this.
+ fn construct_dep_node(tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> {
+ DepNode::construct(tcx, Self::DEP_KIND, key)
}
}
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index 49bbcf578..a5a2f0093 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -1,6 +1,8 @@
+use crate::dep_graph::DepKind;
use crate::error::CycleStack;
use crate::query::plumbing::CycleError;
use crate::query::{QueryContext, QueryStackFrame};
+use core::marker::PhantomData;
use rustc_data_structures::fx::FxHashMap;
use rustc_errors::{
@@ -22,54 +24,54 @@ use {
rustc_data_structures::{jobserver, OnDrop},
rustc_rayon_core as rayon_core,
rustc_span::DUMMY_SP,
- std::iter::{self, FromIterator},
- std::{mem, process},
+ std::iter,
+ std::process,
};
/// Represents a span and a query key.
#[derive(Clone, Debug)]
-pub struct QueryInfo {
+pub struct QueryInfo<D: DepKind> {
/// The span corresponding to the reason for which this query was required.
pub span: Span,
- pub query: QueryStackFrame,
+ pub query: QueryStackFrame<D>,
}
-pub type QueryMap = FxHashMap<QueryJobId, QueryJobInfo>;
+pub type QueryMap<D> = FxHashMap<QueryJobId, QueryJobInfo<D>>;
/// A value uniquely identifying an active query job.
#[derive(Copy, Clone, Eq, PartialEq, Hash)]
pub struct QueryJobId(pub NonZeroU64);
impl QueryJobId {
- fn query(self, map: &QueryMap) -> QueryStackFrame {
+ fn query<D: DepKind>(self, map: &QueryMap<D>) -> QueryStackFrame<D> {
map.get(&self).unwrap().query.clone()
}
#[cfg(parallel_compiler)]
- fn span(self, map: &QueryMap) -> Span {
+ fn span<D: DepKind>(self, map: &QueryMap<D>) -> Span {
map.get(&self).unwrap().job.span
}
#[cfg(parallel_compiler)]
- fn parent(self, map: &QueryMap) -> Option<QueryJobId> {
+ fn parent<D: DepKind>(self, map: &QueryMap<D>) -> Option<QueryJobId> {
map.get(&self).unwrap().job.parent
}
#[cfg(parallel_compiler)]
- fn latch<'a>(self, map: &'a QueryMap) -> Option<&'a QueryLatch> {
+ fn latch<D: DepKind>(self, map: &QueryMap<D>) -> Option<&QueryLatch<D>> {
map.get(&self).unwrap().job.latch.as_ref()
}
}
#[derive(Clone)]
-pub struct QueryJobInfo {
- pub query: QueryStackFrame,
- pub job: QueryJob,
+pub struct QueryJobInfo<D: DepKind> {
+ pub query: QueryStackFrame<D>,
+ pub job: QueryJob<D>,
}
/// Represents an active query job.
#[derive(Clone)]
-pub struct QueryJob {
+pub struct QueryJob<D: DepKind> {
pub id: QueryJobId,
/// The span corresponding to the reason for which this query was required.
@@ -80,10 +82,11 @@ pub struct QueryJob {
/// The latch that is used to wait on this job.
#[cfg(parallel_compiler)]
- latch: Option<QueryLatch>,
+ latch: Option<QueryLatch<D>>,
+ spooky: core::marker::PhantomData<D>,
}
-impl QueryJob {
+impl<D: DepKind> QueryJob<D> {
/// Creates a new query job.
#[inline]
pub fn new(id: QueryJobId, span: Span, parent: Option<QueryJobId>) -> Self {
@@ -93,11 +96,12 @@ impl QueryJob {
parent,
#[cfg(parallel_compiler)]
latch: None,
+ spooky: PhantomData,
}
}
#[cfg(parallel_compiler)]
- pub(super) fn latch(&mut self) -> QueryLatch {
+ pub(super) fn latch(&mut self) -> QueryLatch<D> {
if self.latch.is_none() {
self.latch = Some(QueryLatch::new());
}
@@ -123,12 +127,12 @@ impl QueryJobId {
#[cold]
#[inline(never)]
#[cfg(not(parallel_compiler))]
- pub(super) fn find_cycle_in_stack(
+ pub(super) fn find_cycle_in_stack<D: DepKind>(
&self,
- query_map: QueryMap,
+ query_map: QueryMap<D>,
current_job: &Option<QueryJobId>,
span: Span,
- ) -> CycleError {
+ ) -> CycleError<D> {
// Find the waitee amongst `current_job` parents
let mut cycle = Vec::new();
let mut current_job = Option::clone(current_job);
@@ -162,14 +166,18 @@ impl QueryJobId {
#[cold]
#[inline(never)]
- pub fn try_find_layout_root(&self, query_map: QueryMap) -> Option<(QueryJobInfo, usize)> {
+ pub fn try_find_layout_root<D: DepKind>(
+ &self,
+ query_map: QueryMap<D>,
+ ) -> Option<(QueryJobInfo<D>, usize)> {
let mut last_layout = None;
let mut current_id = Some(*self);
let mut depth = 0;
while let Some(id) = current_id {
let info = query_map.get(&id).unwrap();
- if info.query.name == "layout_of" {
+ // FIXME: This string comparison should probably not be done.
+ if format!("{:?}", info.query.dep_kind) == "layout_of" {
depth += 1;
last_layout = Some((info.clone(), depth));
}
@@ -180,15 +188,15 @@ impl QueryJobId {
}
#[cfg(parallel_compiler)]
-struct QueryWaiter {
+struct QueryWaiter<D: DepKind> {
query: Option<QueryJobId>,
condvar: Condvar,
span: Span,
- cycle: Lock<Option<CycleError>>,
+ cycle: Lock<Option<CycleError<D>>>,
}
#[cfg(parallel_compiler)]
-impl QueryWaiter {
+impl<D: DepKind> QueryWaiter<D> {
fn notify(&self, registry: &rayon_core::Registry) {
rayon_core::mark_unblocked(registry);
self.condvar.notify_one();
@@ -196,19 +204,19 @@ impl QueryWaiter {
}
#[cfg(parallel_compiler)]
-struct QueryLatchInfo {
+struct QueryLatchInfo<D: DepKind> {
complete: bool,
- waiters: Vec<Lrc<QueryWaiter>>,
+ waiters: Vec<Lrc<QueryWaiter<D>>>,
}
#[cfg(parallel_compiler)]
#[derive(Clone)]
-pub(super) struct QueryLatch {
- info: Lrc<Mutex<QueryLatchInfo>>,
+pub(super) struct QueryLatch<D: DepKind> {
+ info: Lrc<Mutex<QueryLatchInfo<D>>>,
}
#[cfg(parallel_compiler)]
-impl QueryLatch {
+impl<D: DepKind> QueryLatch<D> {
fn new() -> Self {
QueryLatch {
info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })),
@@ -216,7 +224,11 @@ impl QueryLatch {
}
/// Awaits for the query job to complete.
- pub(super) fn wait_on(&self, query: Option<QueryJobId>, span: Span) -> Result<(), CycleError> {
+ pub(super) fn wait_on(
+ &self,
+ query: Option<QueryJobId>,
+ span: Span,
+ ) -> Result<(), CycleError<D>> {
let waiter =
Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() });
self.wait_on_inner(&waiter);
@@ -231,7 +243,7 @@ impl QueryLatch {
}
/// Awaits the caller on this latch by blocking the current thread.
- fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter>) {
+ fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) {
let mut info = self.info.lock();
if !info.complete {
// We push the waiter on to the `waiters` list. It can be accessed inside
@@ -247,7 +259,7 @@ impl QueryLatch {
jobserver::release_thread();
waiter.condvar.wait(&mut info);
// Release the lock before we potentially block in `acquire_thread`
- mem::drop(info);
+ drop(info);
jobserver::acquire_thread();
}
}
@@ -265,7 +277,7 @@ impl QueryLatch {
/// Removes a single waiter from the list of waiters.
/// This is used to break query cycles.
- fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter> {
+ fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> {
let mut info = self.info.lock();
debug_assert!(!info.complete);
// Remove the waiter from the list of waiters
@@ -287,9 +299,14 @@ type Waiter = (QueryJobId, usize);
/// required information to resume the waiter.
/// If all `visit` calls returns None, this function also returns None.
#[cfg(parallel_compiler)]
-fn visit_waiters<F>(query_map: &QueryMap, query: QueryJobId, mut visit: F) -> Option<Option<Waiter>>
+fn visit_waiters<F, D>(
+ query_map: &QueryMap<D>,
+ query: QueryJobId,
+ mut visit: F,
+) -> Option<Option<Waiter>>
where
F: FnMut(Span, QueryJobId) -> Option<Option<Waiter>>,
+ D: DepKind,
{
// Visit the parent query which is a non-resumable waiter since it's on the same stack
if let Some(parent) = query.parent(query_map) {
@@ -318,8 +335,8 @@ where
/// If a cycle is detected, this initial value is replaced with the span causing
/// the cycle.
#[cfg(parallel_compiler)]
-fn cycle_check(
- query_map: &QueryMap,
+fn cycle_check<D: DepKind>(
+ query_map: &QueryMap<D>,
query: QueryJobId,
span: Span,
stack: &mut Vec<(Span, QueryJobId)>,
@@ -359,8 +376,8 @@ fn cycle_check(
/// from `query` without going through any of the queries in `visited`.
/// This is achieved with a depth first search.
#[cfg(parallel_compiler)]
-fn connected_to_root(
- query_map: &QueryMap,
+fn connected_to_root<D: DepKind>(
+ query_map: &QueryMap<D>,
query: QueryJobId,
visited: &mut FxHashSet<QueryJobId>,
) -> bool {
@@ -382,9 +399,10 @@ fn connected_to_root(
// Deterministically pick an query from a list
#[cfg(parallel_compiler)]
-fn pick_query<'a, T, F>(query_map: &QueryMap, queries: &'a [T], f: F) -> &'a T
+fn pick_query<'a, T, F, D>(query_map: &QueryMap<D>, queries: &'a [T], f: F) -> &'a T
where
F: Fn(&T) -> (Span, QueryJobId),
+ D: DepKind,
{
// Deterministically pick an entry point
// FIXME: Sort this instead
@@ -408,10 +426,10 @@ where
/// If a cycle was not found, the starting query is removed from `jobs` and
/// the function returns false.
#[cfg(parallel_compiler)]
-fn remove_cycle(
- query_map: &QueryMap,
+fn remove_cycle<D: DepKind>(
+ query_map: &QueryMap<D>,
jobs: &mut Vec<QueryJobId>,
- wakelist: &mut Vec<Lrc<QueryWaiter>>,
+ wakelist: &mut Vec<Lrc<QueryWaiter<D>>>,
) -> bool {
let mut visited = FxHashSet::default();
let mut stack = Vec::new();
@@ -513,7 +531,7 @@ fn remove_cycle(
/// There may be multiple cycles involved in a deadlock, so this searches
/// all active queries for cycles before finally resuming all the waiters at once.
#[cfg(parallel_compiler)]
-pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
+pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Registry) {
let on_panic = OnDrop(|| {
eprintln!("deadlock handler panicked, aborting process");
process::abort();
@@ -549,9 +567,9 @@ pub fn deadlock(query_map: QueryMap, registry: &rayon_core::Registry) {
#[inline(never)]
#[cold]
-pub(crate) fn report_cycle<'a>(
+pub(crate) fn report_cycle<'a, D: DepKind>(
sess: &'a Session,
- CycleError { usage, cycle: stack }: &CycleError,
+ CycleError { usage, cycle: stack }: &CycleError<D>,
) -> DiagnosticBuilder<'a, ErrorGuaranteed> {
assert!(!stack.is_empty());
@@ -617,7 +635,7 @@ pub fn print_query_stack<Qcx: QueryContext>(
};
let mut diag = Diagnostic::new(
Level::FailureNote,
- &format!("#{} [{}] {}", i, query_info.query.name, query_info.query.description),
+ &format!("#{} [{:?}] {}", i, query_info.query.dep_kind, query_info.query.description),
);
diag.span = query_info.job.span.into();
handler.force_print_diagnostic(diag);
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
index 7f3dc50d2..d308af192 100644
--- a/compiler/rustc_query_system/src/query/mod.rs
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -12,8 +12,9 @@ pub use self::caches::{
};
mod config;
-pub use self::config::{QueryConfig, QueryVTable};
+pub use self::config::{HashResult, QueryConfig, TryLoadFromDisk};
+use crate::dep_graph::DepKind;
use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
use rustc_data_structures::sync::Lock;
use rustc_errors::Diagnostic;
@@ -26,37 +27,37 @@ use thin_vec::ThinVec;
///
/// This is mostly used in case of cycles for error reporting.
#[derive(Clone, Debug)]
-pub struct QueryStackFrame {
- pub name: &'static str,
+pub struct QueryStackFrame<D: DepKind> {
pub description: String,
span: Option<Span>,
pub def_id: Option<DefId>,
pub def_kind: Option<DefKind>,
pub ty_adt_id: Option<DefId>,
+ pub dep_kind: D,
/// This hash is used to deterministically pick
/// a query to remove cycles in the parallel compiler.
#[cfg(parallel_compiler)]
hash: u64,
}
-impl QueryStackFrame {
+impl<D: DepKind> QueryStackFrame<D> {
#[inline]
pub fn new(
- name: &'static str,
description: String,
span: Option<Span>,
def_id: Option<DefId>,
def_kind: Option<DefKind>,
+ dep_kind: D,
ty_adt_id: Option<DefId>,
_hash: impl FnOnce() -> u64,
) -> Self {
Self {
- name,
description,
span,
def_id,
def_kind,
ty_adt_id,
+ dep_kind,
#[cfg(parallel_compiler)]
hash: _hash(),
}
@@ -104,7 +105,7 @@ pub trait QueryContext: HasDepContext {
/// Get the query information from the TLS context.
fn current_query_job(&self) -> Option<QueryJobId>;
- fn try_collect_active_jobs(&self) -> Option<QueryMap>;
+ fn try_collect_active_jobs(&self) -> Option<QueryMap<Self::DepKind>>;
/// Load side effects associated to the node in the previous session.
fn load_side_effects(&self, prev_dep_node_index: SerializedDepNodeIndex) -> QuerySideEffects;
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 848fa67e3..b3b939eae 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -2,10 +2,9 @@
//! generate the actual methods on tcx which find and execute the provider,
//! manage the caches, and so forth.
-use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams};
+use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex};
use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache;
-use crate::query::config::QueryVTable;
use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
use crate::values::Value;
@@ -31,26 +30,27 @@ use thin_vec::ThinVec;
use super::QueryConfig;
-pub struct QueryState<K> {
+pub struct QueryState<K, D: DepKind> {
#[cfg(parallel_compiler)]
- active: Sharded<FxHashMap<K, QueryResult>>,
+ active: Sharded<FxHashMap<K, QueryResult<D>>>,
#[cfg(not(parallel_compiler))]
- active: Lock<FxHashMap<K, QueryResult>>,
+ active: Lock<FxHashMap<K, QueryResult<D>>>,
}
/// Indicates the state of a query for a given key in a query map.
-enum QueryResult {
+enum QueryResult<D: DepKind> {
/// An already executing query. The query job can be used to await for its completion.
- Started(QueryJob),
+ Started(QueryJob<D>),
/// The query panicked. Queries trying to wait on this will raise a fatal error which will
/// silently panic.
Poisoned,
}
-impl<K> QueryState<K>
+impl<K, D> QueryState<K, D>
where
K: Eq + Hash + Clone + Debug,
+ D: DepKind,
{
pub fn all_inactive(&self) -> bool {
#[cfg(parallel_compiler)]
@@ -67,8 +67,8 @@ where
pub fn try_collect_active_jobs<Qcx: Copy>(
&self,
qcx: Qcx,
- make_query: fn(Qcx, K) -> QueryStackFrame,
- jobs: &mut QueryMap,
+ make_query: fn(Qcx, K) -> QueryStackFrame<D>,
+ jobs: &mut QueryMap<D>,
) -> Option<()> {
#[cfg(parallel_compiler)]
{
@@ -102,34 +102,34 @@ where
}
}
-impl<K> Default for QueryState<K> {
- fn default() -> QueryState<K> {
+impl<K, D: DepKind> Default for QueryState<K, D> {
+ fn default() -> QueryState<K, D> {
QueryState { active: Default::default() }
}
}
/// A type representing the responsibility to execute the job in the `job` field.
/// This will poison the relevant query if dropped.
-struct JobOwner<'tcx, K>
+struct JobOwner<'tcx, K, D: DepKind>
where
K: Eq + Hash + Clone,
{
- state: &'tcx QueryState<K>,
+ state: &'tcx QueryState<K, D>,
key: K,
id: QueryJobId,
}
#[cold]
#[inline(never)]
-fn mk_cycle<Qcx, V, R>(
+fn mk_cycle<Qcx, V, R, D: DepKind>(
qcx: Qcx,
- cycle_error: CycleError,
+ cycle_error: CycleError<D>,
handler: HandleCycleError,
cache: &dyn crate::query::QueryStorage<Value = V, Stored = R>,
) -> R
where
- Qcx: QueryContext,
- V: std::fmt::Debug + Value<Qcx::DepContext>,
+ Qcx: QueryContext + crate::query::HasDepContext<DepKind = D>,
+ V: std::fmt::Debug + Value<Qcx::DepContext, Qcx::DepKind>,
R: Clone,
{
let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
@@ -139,13 +139,13 @@ where
fn handle_cycle_error<Tcx, V>(
tcx: Tcx,
- cycle_error: &CycleError,
+ cycle_error: &CycleError<Tcx::DepKind>,
mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
handler: HandleCycleError,
) -> V
where
Tcx: DepContext,
- V: Value<Tcx>,
+ V: Value<Tcx, Tcx::DepKind>,
{
use HandleCycleError::*;
match handler {
@@ -165,7 +165,7 @@ where
}
}
-impl<'tcx, K> JobOwner<'tcx, K>
+impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D>
where
K: Eq + Hash + Clone,
{
@@ -180,12 +180,12 @@ where
#[inline(always)]
fn try_start<'b, Qcx>(
qcx: &'b Qcx,
- state: &'b QueryState<K>,
+ state: &'b QueryState<K, Qcx::DepKind>,
span: Span,
key: K,
- ) -> TryGetJob<'b, K>
+ ) -> TryGetJob<'b, K, D>
where
- Qcx: QueryContext,
+ Qcx: QueryContext + crate::query::HasDepContext<DepKind = D>,
{
#[cfg(parallel_compiler)]
let mut state_lock = state.active.get_shard_by_value(&key).lock();
@@ -280,9 +280,10 @@ where
}
}
-impl<'tcx, K> Drop for JobOwner<'tcx, K>
+impl<'tcx, K, D> Drop for JobOwner<'tcx, K, D>
where
K: Eq + Hash + Clone,
+ D: DepKind,
{
#[inline(never)]
#[cold]
@@ -308,19 +309,20 @@ where
}
#[derive(Clone)]
-pub(crate) struct CycleError {
+pub(crate) struct CycleError<D: DepKind> {
/// The query and related span that uses the cycle.
- pub usage: Option<(Span, QueryStackFrame)>,
- pub cycle: Vec<QueryInfo>,
+ pub usage: Option<(Span, QueryStackFrame<D>)>,
+ pub cycle: Vec<QueryInfo<D>>,
}
/// The result of `try_start`.
-enum TryGetJob<'tcx, K>
+enum TryGetJob<'tcx, K, D>
where
K: Eq + Hash + Clone,
+ D: DepKind,
{
/// The query is not yet started. Contains a guard to the cache eventually used to start it.
- NotYetStarted(JobOwner<'tcx, K>),
+ NotYetStarted(JobOwner<'tcx, K, D>),
/// The query was already completed.
/// Returns the result of the query and its dep-node index
@@ -329,7 +331,7 @@ where
JobCompleted(TimingGuard<'tcx>),
/// Trying to execute the query resulted in a cycle.
- Cycle(CycleError),
+ Cycle(CycleError<D>),
}
/// Checks if the query is already computed and in the cache.
@@ -337,9 +339,9 @@ where
/// which will be used if the query is not in the cache and we need
/// to compute it.
#[inline]
-pub fn try_get_cached<'a, Tcx, C, R, OnHit>(
+pub fn try_get_cached<Tcx, C, R, OnHit>(
tcx: Tcx,
- cache: &'a C,
+ cache: &C,
key: &C::Key,
// `on_hit` can be called while holding a lock to the query cache
on_hit: OnHit,
@@ -358,36 +360,34 @@ where
})
}
-fn try_execute_query<Qcx, C>(
+fn try_execute_query<Q, Qcx>(
qcx: Qcx,
- state: &QueryState<C::Key>,
- cache: &C,
+ state: &QueryState<Q::Key, Qcx::DepKind>,
+ cache: &Q::Cache,
span: Span,
- key: C::Key,
+ key: Q::Key,
dep_node: Option<DepNode<Qcx::DepKind>>,
- query: &QueryVTable<Qcx, C::Key, C::Value>,
-) -> (C::Stored, Option<DepNodeIndex>)
+) -> (Q::Stored, Option<DepNodeIndex>)
where
- C: QueryCache,
- C::Key: Clone + DepNodeParams<Qcx::DepContext>,
- C::Value: Value<Qcx::DepContext>,
- C::Stored: Debug + std::borrow::Borrow<C::Value>,
+ Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
- match JobOwner::<'_, C::Key>::try_start(&qcx, state, span, key.clone()) {
+ match JobOwner::<'_, Q::Key, Qcx::DepKind>::try_start(&qcx, state, span, key.clone()) {
TryGetJob::NotYetStarted(job) => {
- let (result, dep_node_index) = execute_job(qcx, key.clone(), dep_node, query, job.id);
- if query.feedable {
+ let (result, dep_node_index) =
+ execute_job::<Q, Qcx>(qcx, key.clone(), dep_node, job.id);
+ if Q::FEEDABLE {
// We may have put a value inside the cache from inside the execution.
// Verify that it has the same hash as what we have now, to ensure consistency.
let _ = cache.lookup(&key, |cached_result, _| {
- let hasher = query.hash_result.expect("feedable forbids no_hash");
+ let hasher = Q::HASH_RESULT.expect("feedable forbids no_hash");
+
let old_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| hasher(&mut hcx, cached_result.borrow()));
let new_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| hasher(&mut hcx, &result));
debug_assert_eq!(
old_hash, new_hash,
"Computed query value for {:?}({:?}) is inconsistent with fed value,\ncomputed={:#?}\nfed={:#?}",
- query.dep_kind, key, result, cached_result,
+ Q::DEP_KIND, key, result, cached_result,
);
});
}
@@ -395,7 +395,7 @@ where
(result, Some(dep_node_index))
}
TryGetJob::Cycle(error) => {
- let result = mk_cycle(qcx, error, query.handle_cycle_error, cache);
+ let result = mk_cycle(qcx, error, Q::HANDLE_CYCLE_ERROR, cache);
(result, None)
}
#[cfg(parallel_compiler)]
@@ -414,16 +414,14 @@ where
}
}
-fn execute_job<Qcx, K, V>(
+fn execute_job<Q, Qcx>(
qcx: Qcx,
- key: K,
+ key: Q::Key,
mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
- query: &QueryVTable<Qcx, K, V>,
job_id: QueryJobId,
-) -> (V, DepNodeIndex)
+) -> (Q::Value, DepNodeIndex)
where
- K: Clone + DepNodeParams<Qcx::DepContext>,
- V: Debug,
+ Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
let dep_graph = qcx.dep_context().dep_graph();
@@ -431,23 +429,23 @@ where
// Fast path for when incr. comp. is off.
if !dep_graph.is_fully_enabled() {
let prof_timer = qcx.dep_context().profiler().query_provider();
- let result = qcx.start_query(job_id, query.depth_limit, None, || {
- query.compute(*qcx.dep_context(), key)
+ let result = qcx.start_query(job_id, Q::DEPTH_LIMIT, None, || {
+ Q::compute(qcx, &key)(*qcx.dep_context(), key)
});
let dep_node_index = dep_graph.next_virtual_depnode_index();
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
return (result, dep_node_index);
}
- if !query.anon && !query.eval_always {
+ if !Q::ANON && !Q::EVAL_ALWAYS {
// `to_dep_node` is expensive for some `DepKind`s.
let dep_node =
- dep_node_opt.get_or_insert_with(|| query.to_dep_node(*qcx.dep_context(), &key));
+ dep_node_opt.get_or_insert_with(|| Q::construct_dep_node(*qcx.dep_context(), &key));
// The diagnostics for this query will be promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
if let Some(ret) = qcx.start_query(job_id, false, None, || {
- try_load_from_disk_and_cache_in_memory(qcx, &key, &dep_node, query)
+ try_load_from_disk_and_cache_in_memory::<Q, Qcx>(qcx, &key, &dep_node)
}) {
return ret;
}
@@ -457,18 +455,19 @@ where
let diagnostics = Lock::new(ThinVec::new());
let (result, dep_node_index) =
- qcx.start_query(job_id, query.depth_limit, Some(&diagnostics), || {
- if query.anon {
- return dep_graph.with_anon_task(*qcx.dep_context(), query.dep_kind, || {
- query.compute(*qcx.dep_context(), key)
+ qcx.start_query(job_id, Q::DEPTH_LIMIT, Some(&diagnostics), || {
+ if Q::ANON {
+ return dep_graph.with_anon_task(*qcx.dep_context(), Q::DEP_KIND, || {
+ Q::compute(qcx, &key)(*qcx.dep_context(), key)
});
}
// `to_dep_node` is expensive for some `DepKind`s.
let dep_node =
- dep_node_opt.unwrap_or_else(|| query.to_dep_node(*qcx.dep_context(), &key));
+ dep_node_opt.unwrap_or_else(|| Q::construct_dep_node(*qcx.dep_context(), &key));
- dep_graph.with_task(dep_node, *qcx.dep_context(), key, query.compute, query.hash_result)
+ let task = Q::compute(qcx, &key);
+ dep_graph.with_task(dep_node, *qcx.dep_context(), key, task, Q::HASH_RESULT)
});
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -477,7 +476,7 @@ where
let side_effects = QuerySideEffects { diagnostics };
if std::intrinsics::unlikely(!side_effects.is_empty()) {
- if query.anon {
+ if Q::ANON {
qcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
} else {
qcx.store_side_effects(dep_node_index, side_effects);
@@ -487,16 +486,14 @@ where
(result, dep_node_index)
}
-fn try_load_from_disk_and_cache_in_memory<Qcx, K, V>(
+fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
qcx: Qcx,
- key: &K,
+ key: &Q::Key,
dep_node: &DepNode<Qcx::DepKind>,
- query: &QueryVTable<Qcx, K, V>,
-) -> Option<(V, DepNodeIndex)>
+) -> Option<(Q::Value, DepNodeIndex)>
where
- K: Clone,
+ Q: QueryConfig<Qcx>,
Qcx: QueryContext,
- V: Debug,
{
// Note this function can be called concurrently from the same query
// We must ensure that this is handled correctly.
@@ -508,7 +505,7 @@ where
// First we try to load the result from the on-disk cache.
// Some things are never cached on disk.
- if let Some(try_load_from_disk) = query.try_load_from_disk {
+ if let Some(try_load_from_disk) = Q::try_load_from_disk(qcx, &key) {
let prof_timer = qcx.dep_context().profiler().incr_cache_loading();
// The call to `with_query_deserialization` enforces that no new `DepNodes`
@@ -542,7 +539,7 @@ where
if std::intrinsics::unlikely(
try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
) {
- incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result);
+ incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT);
}
return Some((result, dep_node_index));
@@ -552,8 +549,7 @@ where
// can be forced from `DepNode`.
debug_assert!(
!qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
- "missing on-disk cache entry for {:?}",
- dep_node
+ "missing on-disk cache entry for {dep_node:?}"
);
}
@@ -562,7 +558,7 @@ where
let prof_timer = qcx.dep_context().profiler().query_provider();
// The dep-graph for this computation is already in-place.
- let result = dep_graph.with_ignore(|| query.compute(*qcx.dep_context(), key.clone()));
+ let result = dep_graph.with_ignore(|| Q::compute(qcx, key)(*qcx.dep_context(), key.clone()));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -575,7 +571,7 @@ where
//
// See issue #82920 for an example of a miscompilation that would get turned into
// an ICE by this check
- incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result);
+ incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT);
Some((result, dep_node_index))
}
@@ -592,8 +588,7 @@ where
{
assert!(
tcx.dep_graph().is_green(dep_node),
- "fingerprint for green query instance not loaded from cache: {:?}",
- dep_node,
+ "fingerprint for green query instance not loaded from cache: {dep_node:?}",
);
let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| {
@@ -672,16 +667,16 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result:
sess.emit_err(crate::error::Reentrant);
} else {
let run_cmd = if let Some(crate_name) = &sess.opts.crate_name {
- format!("`cargo clean -p {}` or `cargo clean`", crate_name)
+ format!("`cargo clean -p {crate_name}` or `cargo clean`")
} else {
"`cargo clean`".to_string()
};
sess.emit_err(crate::error::IncrementCompilation {
run_cmd,
- dep_node: format!("{:?}", dep_node),
+ dep_node: format!("{dep_node:?}"),
});
- panic!("Found unstable fingerprints for {:?}: {:?}", dep_node, result);
+ panic!("Found unstable fingerprints for {dep_node:?}: {result:?}");
}
INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic));
@@ -696,23 +691,19 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result:
///
/// Note: The optimization is only available during incr. comp.
#[inline(never)]
-fn ensure_must_run<Qcx, K, V>(
- qcx: Qcx,
- key: &K,
- query: &QueryVTable<Qcx, K, V>,
-) -> (bool, Option<DepNode<Qcx::DepKind>>)
+fn ensure_must_run<Q, Qcx>(qcx: Qcx, key: &Q::Key) -> (bool, Option<DepNode<Qcx::DepKind>>)
where
- K: crate::dep_graph::DepNodeParams<Qcx::DepContext>,
+ Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
- if query.eval_always {
+ if Q::EVAL_ALWAYS {
return (true, None);
}
// Ensuring an anonymous query makes no sense
- assert!(!query.anon);
+ assert!(!Q::ANON);
- let dep_node = query.to_dep_node(*qcx.dep_context(), key);
+ let dep_node = Q::construct_dep_node(*qcx.dep_context(), key);
let dep_graph = qcx.dep_context().dep_graph();
match dep_graph.try_mark_green(qcx, &dep_node) {
@@ -739,16 +730,15 @@ pub enum QueryMode {
Ensure,
}
-pub fn get_query<Q, Qcx>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored>
+pub fn get_query<Q, Qcx, D>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Stored>
where
+ D: DepKind,
Q: QueryConfig<Qcx>,
- Q::Key: DepNodeParams<Qcx::DepContext>,
- Q::Value: Value<Qcx::DepContext>,
+ Q::Value: Value<Qcx::DepContext, D>,
Qcx: QueryContext,
{
- let query = Q::make_vtable(qcx, &key);
let dep_node = if let QueryMode::Ensure = mode {
- let (must_run, dep_node) = ensure_must_run(qcx, &key, &query);
+ let (must_run, dep_node) = ensure_must_run::<Q, _>(qcx, &key);
if !must_run {
return None;
}
@@ -757,14 +747,13 @@ where
None
};
- let (result, dep_node_index) = try_execute_query(
+ let (result, dep_node_index) = try_execute_query::<Q, Qcx>(
qcx,
Q::query_state(qcx),
Q::query_cache(qcx),
span,
key,
dep_node,
- &query,
);
if let Some(dep_node_index) = dep_node_index {
qcx.dep_context().dep_graph().read_index(dep_node_index)
@@ -772,11 +761,11 @@ where
Some(result)
}
-pub fn force_query<Q, Qcx>(qcx: Qcx, key: Q::Key, dep_node: DepNode<Qcx::DepKind>)
+pub fn force_query<Q, Qcx, D>(qcx: Qcx, key: Q::Key, dep_node: DepNode<Qcx::DepKind>)
where
+ D: DepKind,
Q: QueryConfig<Qcx>,
- Q::Key: DepNodeParams<Qcx::DepContext>,
- Q::Value: Value<Qcx::DepContext>,
+ Q::Value: Value<Qcx::DepContext, D>,
Qcx: QueryContext,
{
// We may be concurrently trying both execute and force a query.
@@ -793,9 +782,8 @@ where
Err(()) => {}
}
- let query = Q::make_vtable(qcx, &key);
let state = Q::query_state(qcx);
- debug_assert!(!query.anon);
+ debug_assert!(!Q::ANON);
- try_execute_query(qcx, state, cache, DUMMY_SP, key, Some(dep_node), &query);
+ try_execute_query::<Q, _>(qcx, state, cache, DUMMY_SP, key, Some(dep_node));
}