summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_query_system
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_query_system')
-rw-r--r--compiler/rustc_query_system/Cargo.toml2
-rw-r--r--compiler/rustc_query_system/messages.ftl30
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs106
-rw-r--r--compiler/rustc_query_system/src/dep_graph/query.rs7
-rw-r--r--compiler/rustc_query_system/src/dep_graph/serialized.rs18
-rw-r--r--compiler/rustc_query_system/src/ich/impls_syntax.rs6
-rw-r--r--compiler/rustc_query_system/src/lib.rs2
-rw-r--r--compiler/rustc_query_system/src/query/caches.rs2
-rw-r--r--compiler/rustc_query_system/src/query/config.rs13
-rw-r--r--compiler/rustc_query_system/src/query/job.rs6
-rw-r--r--compiler/rustc_query_system/src/query/mod.rs7
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs164
12 files changed, 184 insertions, 179 deletions
diff --git a/compiler/rustc_query_system/Cargo.toml b/compiler/rustc_query_system/Cargo.toml
index 12b4a1143..e02cf38b6 100644
--- a/compiler/rustc_query_system/Cargo.toml
+++ b/compiler/rustc_query_system/Cargo.toml
@@ -7,11 +7,11 @@ edition = "2021"
[dependencies]
parking_lot = "0.11"
-rustc_arena = { path = "../rustc_arena" }
rustc_ast = { path = "../rustc_ast" }
rustc_data_structures = { path = "../rustc_data_structures" }
rustc_errors = { path = "../rustc_errors" }
rustc_feature = { path = "../rustc_feature" }
+rustc_fluent_macro = { path = "../rustc_fluent_macro" }
rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
rustc_macros = { path = "../rustc_macros" }
diff --git a/compiler/rustc_query_system/messages.ftl b/compiler/rustc_query_system/messages.ftl
index 870e82403..49b423d1a 100644
--- a/compiler/rustc_query_system/messages.ftl
+++ b/compiler/rustc_query_system/messages.ftl
@@ -1,30 +1,30 @@
-query_system_reentrant = internal compiler error: re-entrant incremental verify failure, suppressing message
-
-query_system_increment_compilation = internal compiler error: encountered incremental compilation error with {$dep_node}
- .help = This is a known issue with the compiler. Run {$run_cmd} to allow your project to compile
-
-query_system_increment_compilation_note1 = Please follow the instructions below to create a bug report with the provided information
-query_system_increment_compilation_note2 = See <https://github.com/rust-lang/rust/issues/84970> for more information
-
query_system_cycle = cycle detected when {$stack_bottom}
-query_system_cycle_usage = cycle used when {$usage}
+query_system_cycle_recursive_trait_alias = trait aliases cannot be recursive
-query_system_cycle_stack_single = ...which immediately requires {$stack_bottom} again
+query_system_cycle_recursive_ty_alias = type aliases cannot be recursive
+query_system_cycle_recursive_ty_alias_help1 = consider using a struct, enum, or union instead to break the cycle
+query_system_cycle_recursive_ty_alias_help2 = see <https://doc.rust-lang.org/reference/types.html#recursive-types> for more information
query_system_cycle_stack_middle = ...which requires {$desc}...
query_system_cycle_stack_multiple = ...which again requires {$stack_bottom}, completing the cycle
-query_system_cycle_recursive_ty_alias = type aliases cannot be recursive
-query_system_cycle_recursive_ty_alias_help1 = consider using a struct, enum, or union instead to break the cycle
-query_system_cycle_recursive_ty_alias_help2 = see <https://doc.rust-lang.org/reference/types.html#recursive-types> for more information
+query_system_cycle_stack_single = ...which immediately requires {$stack_bottom} again
-query_system_cycle_recursive_trait_alias = trait aliases cannot be recursive
+query_system_cycle_usage = cycle used when {$usage}
query_system_cycle_which_requires = ...which requires {$desc}...
+query_system_increment_compilation = internal compiler error: encountered incremental compilation error with {$dep_node}
+ .help = This is a known issue with the compiler. Run {$run_cmd} to allow your project to compile
+
+query_system_increment_compilation_note1 = Please follow the instructions below to create a bug report with the provided information
+query_system_increment_compilation_note2 = See <https://github.com/rust-lang/rust/issues/84970> for more information
+
+query_system_layout_of_depth = query depth increased by {$depth} when {$desc}
+
query_system_query_overflow = queries overflow the depth limit!
.help = consider increasing the recursion limit by adding a `#![recursion_limit = "{$suggested_limit}"]` attribute to your crate (`{$crate_name}`)
-query_system_layout_of_depth = query depth increased by {$depth} when {$desc}
+query_system_reentrant = internal compiler error: reentrant incremental verify failure, suppressing message
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index a9a2e6dd0..c0d7386dd 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -6,7 +6,7 @@ use rustc_data_structures::sharded::{self, Sharded};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::steal::Steal;
use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
-use rustc_index::vec::IndexVec;
+use rustc_index::IndexVec;
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
use smallvec::{smallvec, SmallVec};
use std::assert_matches::assert_matches;
@@ -249,7 +249,7 @@ impl<K: DepKind> DepGraph<K> {
/// get an ICE. Normally, we would have tried (and failed) to mark
/// some other query green (e.g. `item_children`) which was used
/// to obtain `C`, which would prevent us from ever trying to force
- /// a non-existent `D`.
+ /// a nonexistent `D`.
///
/// It might be possible to enforce that all `DepNode`s read during
/// deserialization already exist in the previous `DepGraph`. In
@@ -354,24 +354,20 @@ impl<K: DepKind> DepGraphData<K> {
- dep-node: {key:?}"
);
- let task_deps = if cx.dep_context().is_eval_always(key.kind) {
- None
+ let with_deps = |task_deps| K::with_deps(task_deps, || task(cx, arg));
+ let (result, edges) = if cx.dep_context().is_eval_always(key.kind) {
+ (with_deps(TaskDepsRef::EvalAlways), smallvec![])
} else {
- Some(Lock::new(TaskDeps {
+ let task_deps = Lock::new(TaskDeps {
#[cfg(debug_assertions)]
node: Some(key),
reads: SmallVec::new(),
read_set: Default::default(),
phantom_data: PhantomData,
- }))
+ });
+ (with_deps(TaskDepsRef::Allow(&task_deps)), task_deps.into_inner().reads)
};
- let task_deps_ref =
- task_deps.as_ref().map(TaskDepsRef::Allow).unwrap_or(TaskDepsRef::EvalAlways);
-
- let result = K::with_deps(task_deps_ref, || task(cx, arg));
- let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
-
let dcx = cx.dep_context();
let hashing_timer = dcx.profiler().incr_result_hashing();
let current_fingerprint =
@@ -660,7 +656,7 @@ impl<K: DepKind> DepGraphData<K> {
/// current compilation session. Used in various assertions
#[inline]
pub fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
- self.colors.get(prev_index).map_or(false, |c| c.is_green())
+ self.colors.get(prev_index).is_some_and(|c| c.is_green())
}
#[inline]
@@ -681,7 +677,7 @@ impl<K: DepKind> DepGraphData<K> {
impl<K: DepKind> DepGraph<K> {
#[inline]
pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
- self.data.as_ref().map_or(false, |data| data.dep_node_exists(dep_node))
+ self.data.as_ref().is_some_and(|data| data.dep_node_exists(dep_node))
}
/// Checks whether a previous work product exists for `v` and, if
@@ -959,7 +955,7 @@ impl<K: DepKind> DepGraph<K> {
/// Returns true if the given node has been marked as green during the
/// current compilation session. Used in various assertions
pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
- self.node_color(dep_node).map_or(false, |c| c.is_green())
+ self.node_color(dep_node).is_some_and(|c| c.is_green())
}
/// This method loads all on-disk cacheable query results into memory, so
@@ -1236,76 +1232,48 @@ impl<K: DepKind> CurrentDepGraph<K> {
self.node_intern_event_id.map(|eid| profiler.generic_activity_with_event_id(eid));
if let Some(prev_index) = prev_graph.node_to_index_opt(&key) {
+ let get_dep_node_index = |color, fingerprint| {
+ if print_status {
+ eprintln!("[task::{color:}] {key:?}");
+ }
+
+ let mut prev_index_to_index = self.prev_index_to_index.lock();
+
+ let dep_node_index = match prev_index_to_index[prev_index] {
+ Some(dep_node_index) => dep_node_index,
+ None => {
+ let dep_node_index =
+ self.encoder.borrow().send(profiler, key, fingerprint, edges);
+ prev_index_to_index[prev_index] = Some(dep_node_index);
+ dep_node_index
+ }
+ };
+
+ #[cfg(debug_assertions)]
+ self.record_edge(dep_node_index, key, fingerprint);
+
+ dep_node_index
+ };
+
// Determine the color and index of the new `DepNode`.
if let Some(fingerprint) = fingerprint {
if fingerprint == prev_graph.fingerprint_by_index(prev_index) {
- if print_status {
- eprintln!("[task::green] {key:?}");
- }
-
// This is a green node: it existed in the previous compilation,
// its query was re-executed, and it has the same result as before.
- let mut prev_index_to_index = self.prev_index_to_index.lock();
-
- let dep_node_index = match prev_index_to_index[prev_index] {
- Some(dep_node_index) => dep_node_index,
- None => {
- let dep_node_index =
- self.encoder.borrow().send(profiler, key, fingerprint, edges);
- prev_index_to_index[prev_index] = Some(dep_node_index);
- dep_node_index
- }
- };
-
- #[cfg(debug_assertions)]
- self.record_edge(dep_node_index, key, fingerprint);
+ let dep_node_index = get_dep_node_index("green", fingerprint);
(dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
} else {
- if print_status {
- eprintln!("[task::red] {key:?}");
- }
-
// This is a red node: it existed in the previous compilation, its query
// was re-executed, but it has a different result from before.
- let mut prev_index_to_index = self.prev_index_to_index.lock();
-
- let dep_node_index = match prev_index_to_index[prev_index] {
- Some(dep_node_index) => dep_node_index,
- None => {
- let dep_node_index =
- self.encoder.borrow().send(profiler, key, fingerprint, edges);
- prev_index_to_index[prev_index] = Some(dep_node_index);
- dep_node_index
- }
- };
-
- #[cfg(debug_assertions)]
- self.record_edge(dep_node_index, key, fingerprint);
+ let dep_node_index = get_dep_node_index("red", fingerprint);
(dep_node_index, Some((prev_index, DepNodeColor::Red)))
}
} else {
- if print_status {
- eprintln!("[task::unknown] {key:?}");
- }
-
// This is a red node, effectively: it existed in the previous compilation
// session, its query was re-executed, but it doesn't compute a result hash
// (i.e. it represents a `no_hash` query), so we have no way of determining
// whether or not the result was the same as before.
- let mut prev_index_to_index = self.prev_index_to_index.lock();
-
- let dep_node_index = match prev_index_to_index[prev_index] {
- Some(dep_node_index) => dep_node_index,
- None => {
- let dep_node_index =
- self.encoder.borrow().send(profiler, key, Fingerprint::ZERO, edges);
- prev_index_to_index[prev_index] = Some(dep_node_index);
- dep_node_index
- }
- };
-
- #[cfg(debug_assertions)]
- self.record_edge(dep_node_index, key, Fingerprint::ZERO);
+ let dep_node_index = get_dep_node_index("unknown", Fingerprint::ZERO);
(dep_node_index, Some((prev_index, DepNodeColor::Red)))
}
} else {
diff --git a/compiler/rustc_query_system/src/dep_graph/query.rs b/compiler/rustc_query_system/src/dep_graph/query.rs
index 27b3b5e13..5cbc6bf8f 100644
--- a/compiler/rustc_query_system/src/dep_graph/query.rs
+++ b/compiler/rustc_query_system/src/dep_graph/query.rs
@@ -1,6 +1,6 @@
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::graph::implementation::{Direction, Graph, NodeIndex, INCOMING};
-use rustc_index::vec::IndexVec;
+use rustc_index::IndexVec;
use super::{DepKind, DepNode, DepNodeIndex};
@@ -24,10 +24,7 @@ impl<K: DepKind> DepGraphQuery<K> {
pub fn push(&mut self, index: DepNodeIndex, node: DepNode<K>, edges: &[DepNodeIndex]) {
let source = self.graph.add_node(node);
- if index.index() >= self.dep_index_to_index.len() {
- self.dep_index_to_index.resize(index.index() + 1, None);
- }
- self.dep_index_to_index[index] = Some(source);
+ self.dep_index_to_index.insert(index, source);
self.indices.insert(node, source);
for &target in edges.iter() {
diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs
index 3d19a8491..edddfda62 100644
--- a/compiler/rustc_query_system/src/dep_graph/serialized.rs
+++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs
@@ -18,7 +18,7 @@ use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_data_structures::sync::Lock;
-use rustc_index::vec::{Idx, IndexVec};
+use rustc_index::{Idx, IndexVec};
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder};
use rustc_serialize::{Decodable, Decoder, Encodable};
use smallvec::SmallVec;
@@ -94,21 +94,19 @@ impl<'a, K: DepKind + Decodable<MemDecoder<'a>>> Decodable<MemDecoder<'a>>
{
#[instrument(level = "debug", skip(d))]
fn decode(d: &mut MemDecoder<'a>) -> SerializedDepGraph<K> {
- let start_position = d.position();
-
// The last 16 bytes are the node count and edge count.
debug!("position: {:?}", d.position());
- d.set_position(d.data.len() - 2 * IntEncodedWithFixedSize::ENCODED_SIZE);
+ let (node_count, edge_count) =
+ d.with_position(d.len() - 2 * IntEncodedWithFixedSize::ENCODED_SIZE, |d| {
+ debug!("position: {:?}", d.position());
+ let node_count = IntEncodedWithFixedSize::decode(d).0 as usize;
+ let edge_count = IntEncodedWithFixedSize::decode(d).0 as usize;
+ (node_count, edge_count)
+ });
debug!("position: {:?}", d.position());
- let node_count = IntEncodedWithFixedSize::decode(d).0 as usize;
- let edge_count = IntEncodedWithFixedSize::decode(d).0 as usize;
debug!(?node_count, ?edge_count);
- debug!("position: {:?}", d.position());
- d.set_position(start_position);
- debug!("position: {:?}", d.position());
-
let mut nodes = IndexVec::with_capacity(node_count);
let mut fingerprints = IndexVec::with_capacity(node_count);
let mut edge_list_indices = IndexVec::with_capacity(node_count);
diff --git a/compiler/rustc_query_system/src/ich/impls_syntax.rs b/compiler/rustc_query_system/src/ich/impls_syntax.rs
index 0bc811eb0..e673d5b8c 100644
--- a/compiler/rustc_query_system/src/ich/impls_syntax.rs
+++ b/compiler/rustc_query_system/src/ich/impls_syntax.rs
@@ -24,7 +24,7 @@ impl<'a> HashStable<StableHashingContext<'a>> for [ast::Attribute] {
.iter()
.filter(|attr| {
!attr.is_doc_comment()
- && !attr.ident().map_or(false, |ident| hcx.is_ignored_attr(ident.name))
+ && !attr.ident().is_some_and(|ident| hcx.is_ignored_attr(ident.name))
})
.collect();
@@ -38,7 +38,7 @@ impl<'a> HashStable<StableHashingContext<'a>> for [ast::Attribute] {
impl<'ctx> rustc_ast::HashStableContext for StableHashingContext<'ctx> {
fn hash_attr(&mut self, attr: &ast::Attribute, hasher: &mut StableHasher) {
// Make sure that these have been filtered out.
- debug_assert!(!attr.ident().map_or(false, |ident| self.is_ignored_attr(ident.name)));
+ debug_assert!(!attr.ident().is_some_and(|ident| self.is_ignored_attr(ident.name)));
debug_assert!(!attr.is_doc_comment());
let ast::Attribute { kind, id: _, style, span } = attr;
@@ -75,7 +75,7 @@ impl<'a> HashStable<StableHashingContext<'a>> for SourceFile {
ref normalized_pos,
} = *self;
- (name_hash as u64).hash_stable(hcx, hasher);
+ name_hash.hash_stable(hcx, hasher);
src_hash.hash_stable(hcx, hasher);
diff --git a/compiler/rustc_query_system/src/lib.rs b/compiler/rustc_query_system/src/lib.rs
index bb812b006..8c9e9cfad 100644
--- a/compiler/rustc_query_system/src/lib.rs
+++ b/compiler/rustc_query_system/src/lib.rs
@@ -16,7 +16,7 @@ extern crate rustc_data_structures;
extern crate rustc_macros;
use rustc_errors::{DiagnosticMessage, SubdiagnosticMessage};
-use rustc_macros::fluent_messages;
+use rustc_fluent_macro::fluent_messages;
pub mod cache;
pub mod dep_graph;
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs
index 29f6a07e8..9a09f516e 100644
--- a/compiler/rustc_query_system/src/query/caches.rs
+++ b/compiler/rustc_query_system/src/query/caches.rs
@@ -5,7 +5,7 @@ use rustc_data_structures::sharded;
#[cfg(parallel_compiler)]
use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::sync::Lock;
-use rustc_index::vec::{Idx, IndexVec};
+use rustc_index::{Idx, IndexVec};
use std::fmt::Debug;
use std::hash::Hash;
use std::marker::PhantomData;
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
index c8d779385..7e47d7012 100644
--- a/compiler/rustc_query_system/src/query/config.rs
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -4,6 +4,7 @@ use crate::dep_graph::{DepNode, DepNodeParams, SerializedDepNodeIndex};
use crate::error::HandleCycleError;
use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache;
+use crate::query::DepNodeIndex;
use crate::query::{QueryContext, QueryInfo, QueryState};
use rustc_data_structures::fingerprint::Fingerprint;
@@ -12,8 +13,6 @@ use std::hash::Hash;
pub type HashResult<V> = Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>;
-pub type TryLoadFromDisk<Qcx, V> = Option<fn(Qcx, SerializedDepNodeIndex) -> Option<V>>;
-
pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn name(self) -> &'static str;
@@ -43,7 +42,13 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn compute(self, tcx: Qcx, key: Self::Key) -> Self::Value;
- fn try_load_from_disk(self, qcx: Qcx, idx: &Self::Key) -> TryLoadFromDisk<Qcx, Self::Value>;
+ fn try_load_from_disk(
+ self,
+ tcx: Qcx,
+ key: &Self::Key,
+ prev_index: SerializedDepNodeIndex,
+ index: DepNodeIndex,
+ ) -> Option<Self::Value>;
fn loadable_from_disk(self, qcx: Qcx, key: &Self::Key, idx: SerializedDepNodeIndex) -> bool;
@@ -63,7 +68,7 @@ pub trait QueryConfig<Qcx: QueryContext>: Copy {
fn handle_cycle_error(self) -> HandleCycleError;
fn hash_result(self) -> HashResult<Self::Value>;
- // Just here for convernience and checking that the key matches the kind, don't override this.
+ // Just here for convenience and checking that the key matches the kind, don't override this.
fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> {
DepNode::construct(tcx, self.dep_kind(), key)
}
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index a534b5407..f45f7ca5d 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -22,7 +22,7 @@ use {
rustc_data_structures::fx::FxHashSet,
rustc_data_structures::sync::Lock,
rustc_data_structures::sync::Lrc,
- rustc_data_structures::{jobserver, OnDrop},
+ rustc_data_structures::{defer, jobserver},
rustc_span::DUMMY_SP,
std::iter,
std::process,
@@ -530,7 +530,7 @@ fn remove_cycle<D: DepKind>(
/// all active queries for cycles before finally resuming all the waiters at once.
#[cfg(parallel_compiler)]
pub fn deadlock<D: DepKind>(query_map: QueryMap<D>, registry: &rayon_core::Registry) {
- let on_panic = OnDrop(|| {
+ let on_panic = defer(|| {
eprintln!("deadlock handler panicked, aborting process");
process::abort();
});
@@ -633,7 +633,7 @@ pub fn print_query_stack<Qcx: QueryContext>(
};
let mut diag = Diagnostic::new(
Level::FailureNote,
- &format!("#{} [{:?}] {}", i, query_info.query.dep_kind, query_info.query.description),
+ format!("#{} [{:?}] {}", i, query_info.query.dep_kind, query_info.query.description),
);
diag.span = query_info.job.span.into();
handler.force_print_diagnostic(diag);
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
index 312b0e168..f7619d75b 100644
--- a/compiler/rustc_query_system/src/query/mod.rs
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -12,10 +12,11 @@ pub use self::caches::{
};
mod config;
-pub use self::config::{HashResult, QueryConfig, TryLoadFromDisk};
+pub use self::config::{HashResult, QueryConfig};
use crate::dep_graph::DepKind;
use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex};
+use rustc_data_structures::stable_hasher::Hash64;
use rustc_data_structures::sync::Lock;
use rustc_errors::Diagnostic;
use rustc_hir::def::DefKind;
@@ -37,7 +38,7 @@ pub struct QueryStackFrame<D: DepKind> {
/// This hash is used to deterministically pick
/// a query to remove cycles in the parallel compiler.
#[cfg(parallel_compiler)]
- hash: u64,
+ hash: Hash64,
}
impl<D: DepKind> QueryStackFrame<D> {
@@ -49,7 +50,7 @@ impl<D: DepKind> QueryStackFrame<D> {
def_kind: Option<DefKind>,
dep_kind: D,
ty_adt_id: Option<DefId>,
- _hash: impl FnOnce() -> u64,
+ _hash: impl FnOnce() -> Hash64,
) -> Self {
Self {
description,
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 20310483d..730e4c8d3 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -236,7 +236,7 @@ pub(crate) struct CycleError<D: DepKind> {
/// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need
/// to compute it.
-#[inline]
+#[inline(always)]
pub fn try_get_cached<Tcx, C>(tcx: Tcx, cache: &C, key: &C::Key) -> Option<C::Value>
where
C: QueryCache,
@@ -312,7 +312,7 @@ where
}
#[inline(never)]
-fn try_execute_query<Q, Qcx>(
+fn try_execute_query<Q, Qcx, const INCR: bool>(
query: Q,
qcx: Qcx,
span: Span,
@@ -355,7 +355,7 @@ where
// Drop the lock before we start executing the query
drop(state_lock);
- execute_job(query, qcx, state, key, id, dep_node)
+ execute_job::<_, _, INCR>(query, qcx, state, key, id, dep_node)
}
Entry::Occupied(mut entry) => {
match entry.get_mut() {
@@ -383,7 +383,7 @@ where
}
#[inline(always)]
-fn execute_job<Q, Qcx>(
+fn execute_job<Q, Qcx, const INCR: bool>(
query: Q,
qcx: Qcx,
state: &QueryState<Q::Key, Qcx::DepKind>,
@@ -398,9 +398,19 @@ where
// Use `JobOwner` so the query will be poisoned if executing it panics.
let job_owner = JobOwner { state, key };
- let (result, dep_node_index) = match qcx.dep_context().dep_graph().data() {
- None => execute_job_non_incr(query, qcx, key, id),
- Some(data) => execute_job_incr(query, qcx, data, key, dep_node, id),
+ debug_assert_eq!(qcx.dep_context().dep_graph().is_fully_enabled(), INCR);
+
+ let (result, dep_node_index) = if INCR {
+ execute_job_incr(
+ query,
+ qcx,
+ qcx.dep_context().dep_graph().data().unwrap(),
+ key,
+ dep_node,
+ id,
+ )
+ } else {
+ execute_job_non_incr(query, qcx, key, id)
};
let cache = query.query_cache(qcx);
@@ -410,10 +420,35 @@ where
// as its feeding query had. So if the fed query is red, so is its feeder, which will
// get evaluated first, and re-feed the query.
if let Some((cached_result, _)) = cache.lookup(&key) {
- panic!(
- "fed query later has its value computed. The already cached value: {}",
- (query.format_value())(&cached_result)
- );
+ let Some(hasher) = query.hash_result() else {
+ panic!(
+ "no_hash fed query later has its value computed.\n\
+ Remove `no_hash` modifier to allow recomputation.\n\
+ The already cached value: {}",
+ (query.format_value())(&cached_result)
+ );
+ };
+
+ let (old_hash, new_hash) = qcx.dep_context().with_stable_hashing_context(|mut hcx| {
+ (hasher(&mut hcx, &cached_result), hasher(&mut hcx, &result))
+ });
+ let formatter = query.format_value();
+ if old_hash != new_hash {
+ // We have an inconsistency. This can happen if one of the two
+ // results is tainted by errors. In this case, delay a bug to
+ // ensure compilation is doomed.
+ qcx.dep_context().sess().delay_span_bug(
+ DUMMY_SP,
+ format!(
+ "Computed query value for {:?}({:?}) is inconsistent with fed value,\n\
+ computed={:#?}\nfed={:#?}",
+ query.dep_kind(),
+ key,
+ formatter(&result),
+ formatter(&cached_result),
+ ),
+ );
+ }
}
}
job_owner.complete(cache, result, dep_node_index);
@@ -545,59 +580,44 @@ where
// First we try to load the result from the on-disk cache.
// Some things are never cached on disk.
- if let Some(try_load_from_disk) = query.try_load_from_disk(qcx, &key) {
- let prof_timer = qcx.dep_context().profiler().incr_cache_loading();
-
- // The call to `with_query_deserialization` enforces that no new `DepNodes`
- // are created during deserialization. See the docs of that method for more
- // details.
- let result = qcx
- .dep_context()
- .dep_graph()
- .with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index));
-
- prof_timer.finish_with_query_invocation_id(dep_node_index.into());
-
- if let Some(result) = result {
- if std::intrinsics::unlikely(
- qcx.dep_context().sess().opts.unstable_opts.query_dep_graph,
- ) {
- dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
- }
-
- let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index);
- // If `-Zincremental-verify-ich` is specified, re-hash results from
- // the cache and make sure that they have the expected fingerprint.
- //
- // If not, we still seek to verify a subset of fingerprints loaded
- // from disk. Re-hashing results is fairly expensive, so we can't
- // currently afford to verify every hash. This subset should still
- // give us some coverage of potential bugs though.
- let try_verify = prev_fingerprint.as_value().1 % 32 == 0;
- if std::intrinsics::unlikely(
- try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
- ) {
- incremental_verify_ich(
- *qcx.dep_context(),
- dep_graph_data,
- &result,
- prev_dep_node_index,
- query.hash_result(),
- query.format_value(),
- );
- }
+ if let Some(result) = query.try_load_from_disk(qcx, key, prev_dep_node_index, dep_node_index) {
+ if std::intrinsics::unlikely(qcx.dep_context().sess().opts.unstable_opts.query_dep_graph) {
+ dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
+ }
- return Some((result, dep_node_index));
+ let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index);
+ // If `-Zincremental-verify-ich` is specified, re-hash results from
+ // the cache and make sure that they have the expected fingerprint.
+ //
+ // If not, we still seek to verify a subset of fingerprints loaded
+ // from disk. Re-hashing results is fairly expensive, so we can't
+ // currently afford to verify every hash. This subset should still
+ // give us some coverage of potential bugs though.
+ let try_verify = prev_fingerprint.split().1.as_u64() % 32 == 0;
+ if std::intrinsics::unlikely(
+ try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
+ ) {
+ incremental_verify_ich(
+ *qcx.dep_context(),
+ dep_graph_data,
+ &result,
+ prev_dep_node_index,
+ query.hash_result(),
+ query.format_value(),
+ );
}
- // We always expect to find a cached result for things that
- // can be forced from `DepNode`.
- debug_assert!(
- !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
- "missing on-disk cache entry for reconstructible {dep_node:?}"
- );
+ return Some((result, dep_node_index));
}
+ // We always expect to find a cached result for things that
+ // can be forced from `DepNode`.
+ debug_assert!(
+ !query.cache_on_disk(*qcx.dep_context(), key)
+ || !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
+ "missing on-disk cache entry for {dep_node:?}"
+ );
+
// Sanity check for the logic in `ensure`: if the node is green and the result loadable,
// we should actually be able to load it.
debug_assert!(
@@ -691,7 +711,7 @@ fn incremental_verify_ich_failed<Tcx>(
// which may result in another fingerprint mismatch while we're in the middle
// of processing this one. To avoid a double-panic (which kills the process
// before we can print out the query static), we print out a terse
- // but 'safe' message if we detect a re-entrant call to this method.
+ // but 'safe' message if we detect a reentrant call to this method.
thread_local! {
static INSIDE_VERIFY_PANIC: Cell<bool> = const { Cell::new(false) };
};
@@ -780,7 +800,18 @@ pub enum QueryMode {
}
#[inline(always)]
-pub fn get_query<Q, Qcx>(
+pub fn get_query_non_incr<Q, Qcx>(query: Q, qcx: Qcx, span: Span, key: Q::Key) -> Q::Value
+where
+ Q: QueryConfig<Qcx>,
+ Qcx: QueryContext,
+{
+ debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
+
+ ensure_sufficient_stack(|| try_execute_query::<Q, Qcx, false>(query, qcx, span, key, None).0)
+}
+
+#[inline(always)]
+pub fn get_query_incr<Q, Qcx>(
query: Q,
qcx: Qcx,
span: Span,
@@ -791,6 +822,8 @@ where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
+ debug_assert!(qcx.dep_context().dep_graph().is_fully_enabled());
+
let dep_node = if let QueryMode::Ensure { check_cache } = mode {
let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache);
if !must_run {
@@ -801,8 +834,9 @@ where
None
};
- let (result, dep_node_index) =
- ensure_sufficient_stack(|| try_execute_query(query, qcx, span, key, dep_node));
+ let (result, dep_node_index) = ensure_sufficient_stack(|| {
+ try_execute_query::<_, _, true>(query, qcx, span, key, dep_node)
+ });
if let Some(dep_node_index) = dep_node_index {
qcx.dep_context().dep_graph().read_index(dep_node_index)
}
@@ -827,5 +861,7 @@ pub fn force_query<Q, Qcx>(
debug_assert!(!query.anon());
- ensure_sufficient_stack(|| try_execute_query(query, qcx, DUMMY_SP, key, Some(dep_node)));
+ ensure_sufficient_stack(|| {
+ try_execute_query::<_, _, true>(query, qcx, DUMMY_SP, key, Some(dep_node))
+ });
}