diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:20:39 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:20:39 +0000 |
commit | 1376c5a617be5c25655d0d7cb63e3beaa5a6e026 (patch) | |
tree | 3bb8d61aee02bc7a15eab3f36e3b921afc2075d0 /compiler/rustc_query_system/src/query | |
parent | Releasing progress-linux version 1.69.0+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-1376c5a617be5c25655d0d7cb63e3beaa5a6e026.tar.xz rustc-1376c5a617be5c25655d0d7cb63e3beaa5a6e026.zip |
Merging upstream version 1.70.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_query_system/src/query')
-rw-r--r-- | compiler/rustc_query_system/src/query/caches.rs | 35 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/config.rs | 54 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/job.rs | 4 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/mod.rs | 3 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/plumbing.rs | 619 |
5 files changed, 382 insertions, 333 deletions
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs index 4b3cd16c2..29f6a07e8 100644 --- a/compiler/rustc_query_system/src/query/caches.rs +++ b/compiler/rustc_query_system/src/query/caches.rs @@ -16,17 +16,11 @@ pub trait CacheSelector<'tcx, V> { V: Copy; } -pub trait QueryStorage { - type Value: Copy; -} - -pub trait QueryCache: QueryStorage + Sized { +pub trait QueryCache: Sized { type Key: Hash + Eq + Copy + Debug; + type Value: Copy; /// Checks if the query is already computed and in the cache. - /// It returns the shard index and a lock guard to the shard, - /// which will be used if the query is not in the cache and we need - /// to compute it. fn lookup(&self, key: &Self::Key) -> Option<(Self::Value, DepNodeIndex)>; fn complete(&self, key: Self::Key, value: Self::Value, index: DepNodeIndex); @@ -55,16 +49,13 @@ impl<K, V> Default for DefaultCache<K, V> { } } -impl<K: Eq + Hash, V: Copy + Debug> QueryStorage for DefaultCache<K, V> { - type Value = V; -} - impl<K, V> QueryCache for DefaultCache<K, V> where K: Eq + Hash + Copy + Debug, - V: Copy + Debug, + V: Copy, { type Key = K; + type Value = V; #[inline(always)] fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> { @@ -127,15 +118,12 @@ impl<V> Default for SingleCache<V> { } } -impl<V: Copy + Debug> QueryStorage for SingleCache<V> { - type Value = V; -} - impl<V> QueryCache for SingleCache<V> where - V: Copy + Debug, + V: Copy, { type Key = (); + type Value = V; #[inline(always)] fn lookup(&self, _key: &()) -> Option<(V, DepNodeIndex)> { @@ -148,7 +136,9 @@ where } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - self.cache.lock().as_ref().map(|value| f(&(), &value.0, value.1)); + if let Some(value) = self.cache.lock().as_ref() { + f(&(), &value.0, value.1) + } } } @@ -173,16 +163,13 @@ impl<K: Idx, V> Default for VecCache<K, V> { } } -impl<K: Eq + Idx, V: Copy + Debug> QueryStorage for VecCache<K, V> { - type Value = V; -} - impl<K, V> QueryCache for VecCache<K, V> where K: Eq + Idx + Copy + Debug, - V: Copy + Debug, + V: Copy, { type Key = K; + type Value = V; #[inline(always)] fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> { diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs index d56373873..c8d779385 100644 --- a/compiler/rustc_query_system/src/query/config.rs +++ b/compiler/rustc_query_system/src/query/config.rs @@ -4,59 +4,67 @@ use crate::dep_graph::{DepNode, DepNodeParams, SerializedDepNodeIndex}; use crate::error::HandleCycleError; use crate::ich::StableHashingContext; use crate::query::caches::QueryCache; -use crate::query::{QueryContext, QueryState}; +use crate::query::{QueryContext, QueryInfo, QueryState}; use rustc_data_structures::fingerprint::Fingerprint; use std::fmt::Debug; use std::hash::Hash; -pub type HashResult<Qcx, Q> = - Option<fn(&mut StableHashingContext<'_>, &<Q as QueryConfig<Qcx>>::Value) -> Fingerprint>; +pub type HashResult<V> = Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>; -pub type TryLoadFromDisk<Qcx, Q> = - Option<fn(Qcx, SerializedDepNodeIndex) -> Option<<Q as QueryConfig<Qcx>>::Value>>; +pub type TryLoadFromDisk<Qcx, V> = Option<fn(Qcx, SerializedDepNodeIndex) -> Option<V>>; -pub trait QueryConfig<Qcx: QueryContext> { - const NAME: &'static str; +pub trait QueryConfig<Qcx: QueryContext>: Copy { + fn name(self) -> &'static str; // `Key` and `Value` are `Copy` instead of `Clone` to ensure copying them stays cheap, // but it isn't necessary. type Key: DepNodeParams<Qcx::DepContext> + Eq + Hash + Copy + Debug; - type Value: Debug + Copy; + type Value: Copy; type Cache: QueryCache<Key = Self::Key, Value = Self::Value>; + fn format_value(self) -> fn(&Self::Value) -> String; + // Don't use this method to access query results, instead use the methods on TyCtxt - fn query_state<'a>(tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind> + fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind> where Qcx: 'a; // Don't use this method to access query results, instead use the methods on TyCtxt - fn query_cache<'a>(tcx: Qcx) -> &'a Self::Cache + fn query_cache<'a>(self, tcx: Qcx) -> &'a Self::Cache where Qcx: 'a; - fn cache_on_disk(tcx: Qcx::DepContext, key: &Self::Key) -> bool; + fn cache_on_disk(self, tcx: Qcx::DepContext, key: &Self::Key) -> bool; // Don't use this method to compute query results, instead use the methods on TyCtxt - fn execute_query(tcx: Qcx::DepContext, k: Self::Key) -> Self::Value; + fn execute_query(self, tcx: Qcx::DepContext, k: Self::Key) -> Self::Value; + + fn compute(self, tcx: Qcx, key: Self::Key) -> Self::Value; - fn compute(tcx: Qcx, key: Self::Key) -> Self::Value; + fn try_load_from_disk(self, qcx: Qcx, idx: &Self::Key) -> TryLoadFromDisk<Qcx, Self::Value>; - fn try_load_from_disk(qcx: Qcx, idx: &Self::Key) -> TryLoadFromDisk<Qcx, Self>; + fn loadable_from_disk(self, qcx: Qcx, key: &Self::Key, idx: SerializedDepNodeIndex) -> bool; - const ANON: bool; - const EVAL_ALWAYS: bool; - const DEPTH_LIMIT: bool; - const FEEDABLE: bool; + /// Synthesize an error value to let compilation continue after a cycle. + fn value_from_cycle_error( + self, + tcx: Qcx::DepContext, + cycle: &[QueryInfo<Qcx::DepKind>], + ) -> Self::Value; - const DEP_KIND: Qcx::DepKind; - const HANDLE_CYCLE_ERROR: HandleCycleError; + fn anon(self) -> bool; + fn eval_always(self) -> bool; + fn depth_limit(self) -> bool; + fn feedable(self) -> bool; - const HASH_RESULT: HashResult<Qcx, Self>; + fn dep_kind(self) -> Qcx::DepKind; + fn handle_cycle_error(self) -> HandleCycleError; + fn hash_result(self) -> HashResult<Self::Value>; // Just here for convernience and checking that the key matches the kind, don't override this. - fn construct_dep_node(tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> { - DepNode::construct(tcx, Self::DEP_KIND, key) + fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> { + DepNode::construct(tcx, self.dep_kind(), key) } } diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index a5a2f0093..a534b5407 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -18,11 +18,11 @@ use std::num::NonZeroU64; #[cfg(parallel_compiler)] use { parking_lot::{Condvar, Mutex}, + rayon_core, rustc_data_structures::fx::FxHashSet, rustc_data_structures::sync::Lock, rustc_data_structures::sync::Lrc, rustc_data_structures::{jobserver, OnDrop}, - rustc_rayon_core as rayon_core, rustc_span::DUMMY_SP, std::iter, std::process, @@ -124,8 +124,6 @@ impl<D: DepKind> QueryJob<D> { } impl QueryJobId { - #[cold] - #[inline(never)] #[cfg(not(parallel_compiler))] pub(super) fn find_cycle_in_stack<D: DepKind>( &self, diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs index 383c63cd2..312b0e168 100644 --- a/compiler/rustc_query_system/src/query/mod.rs +++ b/compiler/rustc_query_system/src/query/mod.rs @@ -8,8 +8,7 @@ pub use self::job::{print_query_stack, QueryInfo, QueryJob, QueryJobId, QueryJob mod caches; pub use self::caches::{ - CacheSelector, DefaultCacheSelector, QueryCache, QueryStorage, SingleCacheSelector, - VecCacheSelector, + CacheSelector, DefaultCacheSelector, QueryCache, SingleCacheSelector, VecCacheSelector, }; mod config; diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 5f003fa70..20310483d 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -3,22 +3,22 @@ //! manage the caches, and so forth. use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams}; +use crate::dep_graph::{DepGraphData, HasDepContext}; use crate::ich::StableHashingContext; use crate::query::caches::QueryCache; +#[cfg(parallel_compiler)] +use crate::query::job::QueryLatch; use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; +use crate::query::SerializedDepNodeIndex; use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}; -use crate::values::Value; use crate::HandleCycleError; use rustc_data_structures::fingerprint::Fingerprint; use rustc_data_structures::fx::FxHashMap; -#[cfg(parallel_compiler)] -use rustc_data_structures::profiling::TimingGuard; -#[cfg(parallel_compiler)] -use rustc_data_structures::sharded::Sharded; use rustc_data_structures::stack::ensure_sufficient_stack; use rustc_data_structures::sync::Lock; +#[cfg(parallel_compiler)] +use rustc_data_structures::{cold_path, sharded::Sharded}; use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError}; -use rustc_session::Session; use rustc_span::{Span, DUMMY_SP}; use std::cell::Cell; use std::collections::hash_map::Entry; @@ -115,48 +115,49 @@ where { state: &'tcx QueryState<K, D>, key: K, - id: QueryJobId, } #[cold] #[inline(never)] -fn mk_cycle<Qcx, R, D: DepKind>( +fn mk_cycle<Q, Qcx>( + query: Q, qcx: Qcx, - cycle_error: CycleError<D>, + cycle_error: CycleError<Qcx::DepKind>, handler: HandleCycleError, -) -> R +) -> Q::Value where - Qcx: QueryContext + crate::query::HasDepContext<DepKind = D>, - R: std::fmt::Debug + Value<Qcx::DepContext, Qcx::DepKind>, + Q: QueryConfig<Qcx>, + Qcx: QueryContext, { let error = report_cycle(qcx.dep_context().sess(), &cycle_error); - handle_cycle_error(*qcx.dep_context(), &cycle_error, error, handler) + handle_cycle_error(query, qcx, &cycle_error, error, handler) } -fn handle_cycle_error<Tcx, V>( - tcx: Tcx, - cycle_error: &CycleError<Tcx::DepKind>, +fn handle_cycle_error<Q, Qcx>( + query: Q, + qcx: Qcx, + cycle_error: &CycleError<Qcx::DepKind>, mut error: DiagnosticBuilder<'_, ErrorGuaranteed>, handler: HandleCycleError, -) -> V +) -> Q::Value where - Tcx: DepContext, - V: Value<Tcx, Tcx::DepKind>, + Q: QueryConfig<Qcx>, + Qcx: QueryContext, { use HandleCycleError::*; match handler { Error => { error.emit(); - Value::from_cycle_error(tcx, &cycle_error.cycle) + query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle) } Fatal => { error.emit(); - tcx.sess().abort_if_errors(); + qcx.dep_context().sess().abort_if_errors(); unreachable!() } DelayBug => { error.delay_as_bug(); - Value::from_cycle_error(tcx, &cycle_error.cycle) + query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle) } } } @@ -165,84 +166,6 @@ impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D> where K: Eq + Hash + Copy, { - /// Either gets a `JobOwner` corresponding the query, allowing us to - /// start executing the query, or returns with the result of the query. - /// This function assumes that `try_get_cached` is already called and returned `lookup`. - /// If the query is executing elsewhere, this will wait for it and return the result. - /// If the query panicked, this will silently panic. - /// - /// This function is inlined because that results in a noticeable speed-up - /// for some compile-time benchmarks. - #[inline(always)] - fn try_start<'b, Qcx>( - qcx: &'b Qcx, - state: &'b QueryState<K, Qcx::DepKind>, - span: Span, - key: K, - ) -> TryGetJob<'b, K, D> - where - Qcx: QueryContext + crate::query::HasDepContext<DepKind = D>, - { - #[cfg(parallel_compiler)] - let mut state_lock = state.active.get_shard_by_value(&key).lock(); - #[cfg(not(parallel_compiler))] - let mut state_lock = state.active.lock(); - let lock = &mut *state_lock; - let current_job_id = qcx.current_query_job(); - - match lock.entry(key) { - Entry::Vacant(entry) => { - let id = qcx.next_job_id(); - let job = QueryJob::new(id, span, current_job_id); - - let key = *entry.key(); - entry.insert(QueryResult::Started(job)); - - let owner = JobOwner { state, id, key }; - return TryGetJob::NotYetStarted(owner); - } - Entry::Occupied(mut entry) => { - match entry.get_mut() { - #[cfg(not(parallel_compiler))] - QueryResult::Started(job) => { - let id = job.id; - drop(state_lock); - - // If we are single-threaded we know that we have cycle error, - // so we just return the error. - return TryGetJob::Cycle(id.find_cycle_in_stack( - qcx.try_collect_active_jobs().unwrap(), - ¤t_job_id, - span, - )); - } - #[cfg(parallel_compiler)] - QueryResult::Started(job) => { - // For parallel queries, we'll block and wait until the query running - // in another thread has completed. Record how long we wait in the - // self-profiler. - let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked(); - - // Get the latch out - let latch = job.latch(); - - drop(state_lock); - - // With parallel queries we might just have to wait on some other - // thread. - let result = latch.wait_on(current_job_id, span); - - match result { - Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer), - Err(cycle) => TryGetJob::Cycle(cycle), - } - } - QueryResult::Poisoned => FatalError.raise(), - } - } - } - } - /// Completes the query by updating the query cache with the `result`, /// signals the waiter and forgets the JobOwner, so it won't poison the query fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex) @@ -309,25 +232,6 @@ pub(crate) struct CycleError<D: DepKind> { pub cycle: Vec<QueryInfo<D>>, } -/// The result of `try_start`. -enum TryGetJob<'tcx, K, D> -where - K: Eq + Hash + Copy, - D: DepKind, -{ - /// The query is not yet started. Contains a guard to the cache eventually used to start it. - NotYetStarted(JobOwner<'tcx, K, D>), - - /// The query was already completed. - /// Returns the result of the query and its dep-node index - /// if it succeeded or a cycle error if it failed. - #[cfg(parallel_compiler)] - JobCompleted(TimingGuard<'tcx>), - - /// Trying to execute the query resulted in a cycle. - Cycle(CycleError<D>), -} - /// Checks if the query is already computed and in the cache. /// It returns the shard index and a lock guard to the shard, /// which will be used if the query is not in the cache and we need @@ -348,44 +252,54 @@ where } } +#[cold] #[inline(never)] -fn try_execute_query<Q, Qcx>( +#[cfg(not(parallel_compiler))] +fn cycle_error<Q, Qcx>( + query: Q, + qcx: Qcx, + try_execute: QueryJobId, + span: Span, +) -> (Q::Value, Option<DepNodeIndex>) +where + Q: QueryConfig<Qcx>, + Qcx: QueryContext, +{ + let error = try_execute.find_cycle_in_stack( + qcx.try_collect_active_jobs().unwrap(), + &qcx.current_query_job(), + span, + ); + (mk_cycle(query, qcx, error, query.handle_cycle_error()), None) +} + +#[inline(always)] +#[cfg(parallel_compiler)] +fn wait_for_query<Q, Qcx>( + query: Q, qcx: Qcx, span: Span, key: Q::Key, - dep_node: Option<DepNode<Qcx::DepKind>>, + latch: QueryLatch<Qcx::DepKind>, + current: Option<QueryJobId>, ) -> (Q::Value, Option<DepNodeIndex>) where Q: QueryConfig<Qcx>, Qcx: QueryContext, { - let state = Q::query_state(qcx); - match JobOwner::<'_, Q::Key, Qcx::DepKind>::try_start(&qcx, state, span, key) { - TryGetJob::NotYetStarted(job) => { - let (result, dep_node_index) = execute_job::<Q, Qcx>(qcx, key, dep_node, job.id); - let cache = Q::query_cache(qcx); - if Q::FEEDABLE { - // We should not compute queries that also got a value via feeding. - // This can't happen, as query feeding adds the very dependencies to the fed query - // as its feeding query had. So if the fed query is red, so is its feeder, which will - // get evaluated first, and re-feed the query. - if let Some((cached_result, _)) = cache.lookup(&key) { - panic!( - "fed query later has its value computed. The already cached value: {cached_result:?}" - ); - } - } - job.complete(cache, result, dep_node_index); - (result, Some(dep_node_index)) - } - TryGetJob::Cycle(error) => { - let result = mk_cycle(qcx, error, Q::HANDLE_CYCLE_ERROR); - (result, None) - } - #[cfg(parallel_compiler)] - TryGetJob::JobCompleted(query_blocked_prof_timer) => { - let Some((v, index)) = Q::query_cache(qcx).lookup(&key) else { - panic!("value must be in cache after waiting") + // For parallel queries, we'll block and wait until the query running + // in another thread has completed. Record how long we wait in the + // self-profiler. + let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked(); + + // With parallel queries we might just have to wait on some other + // thread. + let result = latch.wait_on(current, span); + + match result { + Ok(()) => { + let Some((v, index)) = query.query_cache(qcx).lookup(&key) else { + cold_path(|| panic!("value must be in cache after waiting")) }; qcx.dep_context().profiler().query_cache_hit(index.into()); @@ -393,57 +307,178 @@ where (v, Some(index)) } + Err(cycle) => (mk_cycle(query, qcx, cycle, query.handle_cycle_error()), None), + } +} + +#[inline(never)] +fn try_execute_query<Q, Qcx>( + query: Q, + qcx: Qcx, + span: Span, + key: Q::Key, + dep_node: Option<DepNode<Qcx::DepKind>>, +) -> (Q::Value, Option<DepNodeIndex>) +where + Q: QueryConfig<Qcx>, + Qcx: QueryContext, +{ + let state = query.query_state(qcx); + #[cfg(parallel_compiler)] + let mut state_lock = state.active.get_shard_by_value(&key).lock(); + #[cfg(not(parallel_compiler))] + let mut state_lock = state.active.lock(); + + // For the parallel compiler we need to check both the query cache and query state structures + // while holding the state lock to ensure that 1) the query has not yet completed and 2) the + // query is not still executing. Without checking the query cache here, we can end up + // re-executing the query since `try_start` only checks that the query is not currently + // executing, but another thread may have already completed the query and stores it result + // in the query cache. + if cfg!(parallel_compiler) && qcx.dep_context().sess().threads() > 1 { + if let Some((value, index)) = query.query_cache(qcx).lookup(&key) { + qcx.dep_context().profiler().query_cache_hit(index.into()); + return (value, Some(index)); + } + } + + let current_job_id = qcx.current_query_job(); + + match state_lock.entry(key) { + Entry::Vacant(entry) => { + // Nothing has computed or is computing the query, so we start a new job and insert it in the + // state map. + let id = qcx.next_job_id(); + let job = QueryJob::new(id, span, current_job_id); + entry.insert(QueryResult::Started(job)); + + // Drop the lock before we start executing the query + drop(state_lock); + + execute_job(query, qcx, state, key, id, dep_node) + } + Entry::Occupied(mut entry) => { + match entry.get_mut() { + #[cfg(not(parallel_compiler))] + QueryResult::Started(job) => { + let id = job.id; + drop(state_lock); + + // If we are single-threaded we know that we have cycle error, + // so we just return the error. + cycle_error(query, qcx, id, span) + } + #[cfg(parallel_compiler)] + QueryResult::Started(job) => { + // Get the latch out + let latch = job.latch(); + drop(state_lock); + + wait_for_query(query, qcx, span, key, latch, current_job_id) + } + QueryResult::Poisoned => FatalError.raise(), + } + } } } #[inline(always)] fn execute_job<Q, Qcx>( + query: Q, qcx: Qcx, + state: &QueryState<Q::Key, Qcx::DepKind>, key: Q::Key, - mut dep_node_opt: Option<DepNode<Qcx::DepKind>>, - job_id: QueryJobId, -) -> (Q::Value, DepNodeIndex) + id: QueryJobId, + dep_node: Option<DepNode<Qcx::DepKind>>, +) -> (Q::Value, Option<DepNodeIndex>) where Q: QueryConfig<Qcx>, Qcx: QueryContext, { - let dep_graph = qcx.dep_context().dep_graph(); + // Use `JobOwner` so the query will be poisoned if executing it panics. + let job_owner = JobOwner { state, key }; + + let (result, dep_node_index) = match qcx.dep_context().dep_graph().data() { + None => execute_job_non_incr(query, qcx, key, id), + Some(data) => execute_job_incr(query, qcx, data, key, dep_node, id), + }; - // Fast path for when incr. comp. is off. - if !dep_graph.is_fully_enabled() { - // Fingerprint the key, just to assert that it doesn't - // have anything we don't consider hashable - if cfg!(debug_assertions) { - let _ = key.to_fingerprint(*qcx.dep_context()); + let cache = query.query_cache(qcx); + if query.feedable() { + // We should not compute queries that also got a value via feeding. + // This can't happen, as query feeding adds the very dependencies to the fed query + // as its feeding query had. So if the fed query is red, so is its feeder, which will + // get evaluated first, and re-feed the query. + if let Some((cached_result, _)) = cache.lookup(&key) { + panic!( + "fed query later has its value computed. The already cached value: {}", + (query.format_value())(&cached_result) + ); } + } + job_owner.complete(cache, result, dep_node_index); - let prof_timer = qcx.dep_context().profiler().query_provider(); - let result = qcx.start_query(job_id, Q::DEPTH_LIMIT, None, || Q::compute(qcx, key)); - let dep_node_index = dep_graph.next_virtual_depnode_index(); - prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + (result, Some(dep_node_index)) +} - // Similarly, fingerprint the result to assert that - // it doesn't have anything not considered hashable. - if cfg!(debug_assertions) - && let Some(hash_result) = Q::HASH_RESULT - { - qcx.dep_context().with_stable_hashing_context(|mut hcx| { - hash_result(&mut hcx, &result); - }); - } +// Fast path for when incr. comp. is off. +#[inline(always)] +fn execute_job_non_incr<Q, Qcx>( + query: Q, + qcx: Qcx, + key: Q::Key, + job_id: QueryJobId, +) -> (Q::Value, DepNodeIndex) +where + Q: QueryConfig<Qcx>, + Qcx: QueryContext, +{ + debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled()); - return (result, dep_node_index); + // Fingerprint the key, just to assert that it doesn't + // have anything we don't consider hashable + if cfg!(debug_assertions) { + let _ = key.to_fingerprint(*qcx.dep_context()); } - if !Q::ANON && !Q::EVAL_ALWAYS { + let prof_timer = qcx.dep_context().profiler().query_provider(); + let result = qcx.start_query(job_id, query.depth_limit(), None, || query.compute(qcx, key)); + let dep_node_index = qcx.dep_context().dep_graph().next_virtual_depnode_index(); + prof_timer.finish_with_query_invocation_id(dep_node_index.into()); + + // Similarly, fingerprint the result to assert that + // it doesn't have anything not considered hashable. + if cfg!(debug_assertions) && let Some(hash_result) = query.hash_result() { + qcx.dep_context().with_stable_hashing_context(|mut hcx| { + hash_result(&mut hcx, &result); + }); + } + + (result, dep_node_index) +} + +#[inline(always)] +fn execute_job_incr<Q, Qcx>( + query: Q, + qcx: Qcx, + dep_graph_data: &DepGraphData<Qcx::DepKind>, + key: Q::Key, + mut dep_node_opt: Option<DepNode<Qcx::DepKind>>, + job_id: QueryJobId, +) -> (Q::Value, DepNodeIndex) +where + Q: QueryConfig<Qcx>, + Qcx: QueryContext, +{ + if !query.anon() && !query.eval_always() { // `to_dep_node` is expensive for some `DepKind`s. let dep_node = - dep_node_opt.get_or_insert_with(|| Q::construct_dep_node(*qcx.dep_context(), &key)); + dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key)); // The diagnostics for this query will be promoted to the current session during // `try_mark_green()`, so we can ignore them here. if let Some(ret) = qcx.start_query(job_id, false, None, || { - try_load_from_disk_and_cache_in_memory::<Q, Qcx>(qcx, &key, &dep_node) + try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, &dep_node) }) { return ret; } @@ -453,17 +488,24 @@ where let diagnostics = Lock::new(ThinVec::new()); let (result, dep_node_index) = - qcx.start_query(job_id, Q::DEPTH_LIMIT, Some(&diagnostics), || { - if Q::ANON { - return dep_graph - .with_anon_task(*qcx.dep_context(), Q::DEP_KIND, || Q::compute(qcx, key)); + qcx.start_query(job_id, query.depth_limit(), Some(&diagnostics), || { + if query.anon() { + return dep_graph_data.with_anon_task(*qcx.dep_context(), query.dep_kind(), || { + query.compute(qcx, key) + }); } // `to_dep_node` is expensive for some `DepKind`s. let dep_node = - dep_node_opt.unwrap_or_else(|| Q::construct_dep_node(*qcx.dep_context(), &key)); - - dep_graph.with_task(dep_node, qcx, key, Q::compute, Q::HASH_RESULT) + dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key)); + + dep_graph_data.with_task( + dep_node, + (qcx, query), + key, + |(qcx, query), key| query.compute(qcx, key), + query.hash_result(), + ) }); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -472,7 +514,7 @@ where let side_effects = QuerySideEffects { diagnostics }; if std::intrinsics::unlikely(!side_effects.is_empty()) { - if Q::ANON { + if query.anon() { qcx.store_side_effects_for_anon_node(dep_node_index, side_effects); } else { qcx.store_side_effects(dep_node_index, side_effects); @@ -484,6 +526,8 @@ where #[inline(always)] fn try_load_from_disk_and_cache_in_memory<Q, Qcx>( + query: Q, + dep_graph_data: &DepGraphData<Qcx::DepKind>, qcx: Qcx, key: &Q::Key, dep_node: &DepNode<Qcx::DepKind>, @@ -495,21 +539,22 @@ where // Note this function can be called concurrently from the same query // We must ensure that this is handled correctly. - let dep_graph = qcx.dep_context().dep_graph(); - let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(qcx, &dep_node)?; + let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, &dep_node)?; - debug_assert!(dep_graph.is_green(dep_node)); + debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index)); // First we try to load the result from the on-disk cache. // Some things are never cached on disk. - if let Some(try_load_from_disk) = Q::try_load_from_disk(qcx, &key) { + if let Some(try_load_from_disk) = query.try_load_from_disk(qcx, &key) { let prof_timer = qcx.dep_context().profiler().incr_cache_loading(); // The call to `with_query_deserialization` enforces that no new `DepNodes` // are created during deserialization. See the docs of that method for more // details. - let result = - dep_graph.with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index)); + let result = qcx + .dep_context() + .dep_graph() + .with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index)); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -517,14 +562,10 @@ where if std::intrinsics::unlikely( qcx.dep_context().sess().opts.unstable_opts.query_dep_graph, ) { - dep_graph.mark_debug_loaded_from_disk(*dep_node) + dep_graph_data.mark_debug_loaded_from_disk(*dep_node) } - let prev_fingerprint = qcx - .dep_context() - .dep_graph() - .prev_fingerprint_of(dep_node) - .unwrap_or(Fingerprint::ZERO); + let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index); // If `-Zincremental-verify-ich` is specified, re-hash results from // the cache and make sure that they have the expected fingerprint. // @@ -536,7 +577,14 @@ where if std::intrinsics::unlikely( try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich, ) { - incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT); + incremental_verify_ich( + *qcx.dep_context(), + dep_graph_data, + &result, + prev_dep_node_index, + query.hash_result(), + query.format_value(), + ); } return Some((result, dep_node_index)); @@ -546,16 +594,23 @@ where // can be forced from `DepNode`. debug_assert!( !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(), - "missing on-disk cache entry for {dep_node:?}" + "missing on-disk cache entry for reconstructible {dep_node:?}" ); } + // Sanity check for the logic in `ensure`: if the node is green and the result loadable, + // we should actually be able to load it. + debug_assert!( + !query.loadable_from_disk(qcx, &key, prev_dep_node_index), + "missing on-disk cache entry for loadable {dep_node:?}" + ); + // We could not load a result from the on-disk cache, so // recompute. let prof_timer = qcx.dep_context().profiler().query_provider(); // The dep-graph for this computation is already in-place. - let result = dep_graph.with_ignore(|| Q::compute(qcx, *key)); + let result = qcx.dep_context().dep_graph().with_ignore(|| query.compute(qcx, *key)); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -568,87 +623,69 @@ where // // See issue #82920 for an example of a miscompilation that would get turned into // an ICE by this check - incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT); + incremental_verify_ich( + *qcx.dep_context(), + dep_graph_data, + &result, + prev_dep_node_index, + query.hash_result(), + query.format_value(), + ); Some((result, dep_node_index)) } #[inline] -#[instrument(skip(tcx, result, hash_result), level = "debug")] -pub(crate) fn incremental_verify_ich<Tcx, V: Debug>( +#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")] +pub(crate) fn incremental_verify_ich<Tcx, V>( tcx: Tcx, + dep_graph_data: &DepGraphData<Tcx::DepKind>, result: &V, - dep_node: &DepNode<Tcx::DepKind>, + prev_index: SerializedDepNodeIndex, hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>, -) -> Fingerprint -where + format_value: fn(&V) -> String, +) where Tcx: DepContext, { - assert!( - tcx.dep_graph().is_green(dep_node), - "fingerprint for green query instance not loaded from cache: {dep_node:?}", - ); + if !dep_graph_data.is_index_green(prev_index) { + incremental_verify_ich_not_green(tcx, prev_index) + } let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| { tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result)) }); - let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node); + let old_hash = dep_graph_data.prev_fingerprint_of(prev_index); - if Some(new_hash) != old_hash { - incremental_verify_ich_failed( - tcx.sess(), - DebugArg::from(&dep_node), - DebugArg::from(&result), - ); + if new_hash != old_hash { + incremental_verify_ich_failed(tcx, prev_index, &|| format_value(&result)); } - - new_hash -} - -// This DebugArg business is largely a mirror of std::fmt::ArgumentV1, which is -// currently not exposed publicly. -// -// The PR which added this attempted to use `&dyn Debug` instead, but that -// showed statistically significant worse compiler performance. It's not -// actually clear what the cause there was -- the code should be cold. If this -// can be replaced with `&dyn Debug` with on perf impact, then it probably -// should be. -extern "C" { - type Opaque; -} - -struct DebugArg<'a> { - value: &'a Opaque, - fmt: fn(&Opaque, &mut std::fmt::Formatter<'_>) -> std::fmt::Result, } -impl<'a, T> From<&'a T> for DebugArg<'a> +#[cold] +#[inline(never)] +fn incremental_verify_ich_not_green<Tcx>(tcx: Tcx, prev_index: SerializedDepNodeIndex) where - T: std::fmt::Debug, + Tcx: DepContext, { - fn from(value: &'a T) -> DebugArg<'a> { - DebugArg { - value: unsafe { std::mem::transmute(value) }, - fmt: unsafe { - std::mem::transmute(<T as std::fmt::Debug>::fmt as fn(_, _) -> std::fmt::Result) - }, - } - } + panic!( + "fingerprint for green query instance not loaded from cache: {:?}", + tcx.dep_graph().data().unwrap().prev_node_of(prev_index) + ) } -impl std::fmt::Debug for DebugArg<'_> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - (self.fmt)(self.value, f) - } -} - -// Note that this is marked #[cold] and intentionally takes the equivalent of -// `dyn Debug` for its arguments, as we want to avoid generating a bunch of -// different implementations for LLVM to chew on (and filling up the final -// binary, too). +// Note that this is marked #[cold] and intentionally takes `dyn Debug` for `result`, +// as we want to avoid generating a bunch of different implementations for LLVM to +// chew on (and filling up the final binary, too). #[cold] -fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result: DebugArg<'_>) { +#[inline(never)] +fn incremental_verify_ich_failed<Tcx>( + tcx: Tcx, + prev_index: SerializedDepNodeIndex, + result: &dyn Fn() -> String, +) where + Tcx: DepContext, +{ // When we emit an error message and panic, we try to debug-print the `DepNode` // and query result. Unfortunately, this can cause us to run additional queries, // which may result in another fingerprint mismatch while we're in the middle @@ -662,19 +699,20 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result: let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true)); if old_in_panic { - sess.emit_err(crate::error::Reentrant); + tcx.sess().emit_err(crate::error::Reentrant); } else { - let run_cmd = if let Some(crate_name) = &sess.opts.crate_name { + let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name { format!("`cargo clean -p {crate_name}` or `cargo clean`") } else { "`cargo clean`".to_string() }; - sess.emit_err(crate::error::IncrementCompilation { + let dep_node = tcx.dep_graph().data().unwrap().prev_node_of(prev_index); + tcx.sess().emit_err(crate::error::IncrementCompilation { run_cmd, dep_node: format!("{dep_node:?}"), }); - panic!("Found unstable fingerprints for {dep_node:?}: {result:?}"); + panic!("Found unstable fingerprints for {dep_node:?}: {}", result()); } INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic)); @@ -689,22 +727,27 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result: /// /// Note: The optimization is only available during incr. comp. #[inline(never)] -fn ensure_must_run<Q, Qcx>(qcx: Qcx, key: &Q::Key) -> (bool, Option<DepNode<Qcx::DepKind>>) +fn ensure_must_run<Q, Qcx>( + query: Q, + qcx: Qcx, + key: &Q::Key, + check_cache: bool, +) -> (bool, Option<DepNode<Qcx::DepKind>>) where Q: QueryConfig<Qcx>, Qcx: QueryContext, { - if Q::EVAL_ALWAYS { + if query.eval_always() { return (true, None); } // Ensuring an anonymous query makes no sense - assert!(!Q::ANON); + assert!(!query.anon()); - let dep_node = Q::construct_dep_node(*qcx.dep_context(), key); + let dep_node = query.construct_dep_node(*qcx.dep_context(), key); let dep_graph = qcx.dep_context().dep_graph(); - match dep_graph.try_mark_green(qcx, &dep_node) { + let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) { None => { // A None return from `try_mark_green` means that this is either // a new dep node or that the dep node has already been marked red. @@ -712,32 +755,44 @@ where // DepNodeIndex. We must invoke the query itself. The performance cost // this introduces should be negligible as we'll immediately hit the // in-memory cache, or another query down the line will. - (true, Some(dep_node)) + return (true, Some(dep_node)); } - Some((_, dep_node_index)) => { + Some((serialized_dep_node_index, dep_node_index)) => { dep_graph.read_index(dep_node_index); qcx.dep_context().profiler().query_cache_hit(dep_node_index.into()); - (false, None) + serialized_dep_node_index } + }; + + // We do not need the value at all, so do not check the cache. + if !check_cache { + return (false, None); } + + let loadable = query.loadable_from_disk(qcx, key, serialized_dep_node_index); + (!loadable, Some(dep_node)) } #[derive(Debug)] pub enum QueryMode { Get, - Ensure, + Ensure { check_cache: bool }, } #[inline(always)] -pub fn get_query<Q, Qcx, D>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Value> +pub fn get_query<Q, Qcx>( + query: Q, + qcx: Qcx, + span: Span, + key: Q::Key, + mode: QueryMode, +) -> Option<Q::Value> where - D: DepKind, Q: QueryConfig<Qcx>, - Q::Value: Value<Qcx::DepContext, D>, Qcx: QueryContext, { - let dep_node = if let QueryMode::Ensure = mode { - let (must_run, dep_node) = ensure_must_run::<Q, _>(qcx, &key); + let dep_node = if let QueryMode::Ensure { check_cache } = mode { + let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache); if !must_run { return None; } @@ -747,28 +802,30 @@ where }; let (result, dep_node_index) = - ensure_sufficient_stack(|| try_execute_query::<Q, Qcx>(qcx, span, key, dep_node)); + ensure_sufficient_stack(|| try_execute_query(query, qcx, span, key, dep_node)); if let Some(dep_node_index) = dep_node_index { qcx.dep_context().dep_graph().read_index(dep_node_index) } Some(result) } -pub fn force_query<Q, Qcx, D>(qcx: Qcx, key: Q::Key, dep_node: DepNode<Qcx::DepKind>) -where - D: DepKind, +pub fn force_query<Q, Qcx>( + query: Q, + qcx: Qcx, + key: Q::Key, + dep_node: DepNode<<Qcx as HasDepContext>::DepKind>, +) where Q: QueryConfig<Qcx>, - Q::Value: Value<Qcx::DepContext, D>, Qcx: QueryContext, { // We may be concurrently trying both execute and force a query. // Ensure that only one of them runs the query. - if let Some((_, index)) = Q::query_cache(qcx).lookup(&key) { + if let Some((_, index)) = query.query_cache(qcx).lookup(&key) { qcx.dep_context().profiler().query_cache_hit(index.into()); return; } - debug_assert!(!Q::ANON); + debug_assert!(!query.anon()); - ensure_sufficient_stack(|| try_execute_query::<Q, _>(qcx, DUMMY_SP, key, Some(dep_node))); + ensure_sufficient_stack(|| try_execute_query(query, qcx, DUMMY_SP, key, Some(dep_node))); } |