summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_query_system
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:20:39 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:20:39 +0000
commit1376c5a617be5c25655d0d7cb63e3beaa5a6e026 (patch)
tree3bb8d61aee02bc7a15eab3f36e3b921afc2075d0 /compiler/rustc_query_system
parentReleasing progress-linux version 1.69.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-1376c5a617be5c25655d0d7cb63e3beaa5a6e026.tar.xz
rustc-1376c5a617be5c25655d0d7cb63e3beaa5a6e026.zip
Merging upstream version 1.70.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_query_system')
-rw-r--r--compiler/rustc_query_system/Cargo.toml2
-rw-r--r--compiler/rustc_query_system/messages.ftl (renamed from compiler/rustc_query_system/locales/en-US.ftl)0
-rw-r--r--compiler/rustc_query_system/src/cache.rs7
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs456
-rw-r--r--compiler/rustc_query_system/src/dep_graph/mod.rs34
-rw-r--r--compiler/rustc_query_system/src/dep_graph/serialized.rs5
-rw-r--r--compiler/rustc_query_system/src/lib.rs2
-rw-r--r--compiler/rustc_query_system/src/query/caches.rs35
-rw-r--r--compiler/rustc_query_system/src/query/config.rs54
-rw-r--r--compiler/rustc_query_system/src/query/job.rs4
-rw-r--r--compiler/rustc_query_system/src/query/mod.rs3
-rw-r--r--compiler/rustc_query_system/src/query/plumbing.rs619
12 files changed, 681 insertions, 540 deletions
diff --git a/compiler/rustc_query_system/Cargo.toml b/compiler/rustc_query_system/Cargo.toml
index 7d8f75e25..12b4a1143 100644
--- a/compiler/rustc_query_system/Cargo.toml
+++ b/compiler/rustc_query_system/Cargo.toml
@@ -15,7 +15,7 @@ rustc_feature = { path = "../rustc_feature" }
rustc_hir = { path = "../rustc_hir" }
rustc_index = { path = "../rustc_index" }
rustc_macros = { path = "../rustc_macros" }
-rustc-rayon-core = { version = "0.4.0", optional = true }
+rustc-rayon-core = { version = "0.5.0", optional = true }
rustc_serialize = { path = "../rustc_serialize" }
rustc_session = { path = "../rustc_session" }
rustc_span = { path = "../rustc_span" }
diff --git a/compiler/rustc_query_system/locales/en-US.ftl b/compiler/rustc_query_system/messages.ftl
index 870e82403..870e82403 100644
--- a/compiler/rustc_query_system/locales/en-US.ftl
+++ b/compiler/rustc_query_system/messages.ftl
diff --git a/compiler/rustc_query_system/src/cache.rs b/compiler/rustc_query_system/src/cache.rs
index 7cc885be2..6e862db0b 100644
--- a/compiler/rustc_query_system/src/cache.rs
+++ b/compiler/rustc_query_system/src/cache.rs
@@ -7,11 +7,16 @@ use rustc_data_structures::sync::Lock;
use std::hash::Hash;
-#[derive(Clone)]
pub struct Cache<Key, Value> {
hashmap: Lock<FxHashMap<Key, WithDepNode<Value>>>,
}
+impl<Key: Clone, Value: Clone> Clone for Cache<Key, Value> {
+ fn clone(&self) -> Self {
+ Self { hashmap: Lock::new(self.hashmap.borrow().clone()) }
+ }
+}
+
impl<Key, Value> Default for Cache<Key, Value> {
fn default() -> Self {
Self { hashmap: Default::default() }
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index 59e0c3597..a9a2e6dd0 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -6,7 +6,6 @@ use rustc_data_structures::sharded::{self, Sharded};
use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
use rustc_data_structures::steal::Steal;
use rustc_data_structures::sync::{AtomicU32, AtomicU64, Lock, Lrc, Ordering};
-use rustc_data_structures::OnDrop;
use rustc_index::vec::IndexVec;
use rustc_serialize::opaque::{FileEncodeResult, FileEncoder};
use smallvec::{smallvec, SmallVec};
@@ -54,6 +53,11 @@ impl From<DepNodeIndex> for QueryInvocationId {
}
}
+pub struct MarkFrame<'a> {
+ index: SerializedDepNodeIndex,
+ parent: Option<&'a MarkFrame<'a>>,
+}
+
#[derive(PartialEq)]
pub enum DepNodeColor {
Red,
@@ -70,7 +74,7 @@ impl DepNodeColor {
}
}
-struct DepGraphData<K: DepKind> {
+pub struct DepGraphData<K: DepKind> {
/// The new encoding of the dependency graph, optimized for red/green
/// tracking. The `current` field is the dependency graph of only the
/// current compilation session: We don't merge the previous dep-graph into
@@ -139,7 +143,7 @@ impl<K: DepKind> DepGraph<K> {
assert_eq!(_green_node_index, DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE);
// Instantiate a dependy-less red node only once for anonymous queries.
- let (_red_node_index, _prev_and_index) = current.intern_node(
+ let (red_node_index, red_node_prev_index_and_color) = current.intern_node(
profiler,
&prev_graph,
DepNode { kind: DepKind::RED, hash: Fingerprint::ZERO.into() },
@@ -147,8 +151,21 @@ impl<K: DepKind> DepGraph<K> {
None,
false,
);
- assert_eq!(_red_node_index, DepNodeIndex::FOREVER_RED_NODE);
- assert!(matches!(_prev_and_index, None | Some((_, DepNodeColor::Red))));
+ assert_eq!(red_node_index, DepNodeIndex::FOREVER_RED_NODE);
+ match red_node_prev_index_and_color {
+ None => {
+ // This is expected when we have no previous compilation session.
+ assert!(prev_graph_node_count == 0);
+ }
+ Some((prev_red_node_index, DepNodeColor::Red)) => {
+ assert_eq!(prev_red_node_index.as_usize(), red_node_index.as_usize());
+ colors.insert(prev_red_node_index, DepNodeColor::Red);
+ }
+ Some((_, DepNodeColor::Green(_))) => {
+ // There must be a logic error somewhere if we hit this branch.
+ panic!("DepNodeIndex::FOREVER_RED_NODE evaluated to DepNodeColor::Green")
+ }
+ }
DepGraph {
data: Some(Lrc::new(DepGraphData {
@@ -168,6 +185,11 @@ impl<K: DepKind> DepGraph<K> {
DepGraph { data: None, virtual_dep_node_index: Lrc::new(AtomicU32::new(0)) }
}
+ #[inline]
+ pub fn data(&self) -> Option<&DepGraphData<K>> {
+ self.data.as_deref()
+ }
+
/// Returns `true` if we are actually building the full dep-graph, and `false` otherwise.
#[inline]
pub fn is_fully_enabled(&self) -> bool {
@@ -252,6 +274,38 @@ impl<K: DepKind> DepGraph<K> {
K::with_deps(TaskDepsRef::Forbid, op)
}
+ #[inline(always)]
+ pub fn with_task<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
+ &self,
+ key: DepNode<K>,
+ cx: Ctxt,
+ arg: A,
+ task: fn(Ctxt, A) -> R,
+ hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
+ ) -> (R, DepNodeIndex) {
+ match self.data() {
+ Some(data) => data.with_task(key, cx, arg, task, hash_result),
+ None => (task(cx, arg), self.next_virtual_depnode_index()),
+ }
+ }
+
+ pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
+ &self,
+ cx: Tcx,
+ dep_kind: K,
+ op: OP,
+ ) -> (R, DepNodeIndex)
+ where
+ OP: FnOnce() -> R,
+ {
+ match self.data() {
+ Some(data) => data.with_anon_task(cx, dep_kind, op),
+ None => (op(), self.next_virtual_depnode_index()),
+ }
+ }
+}
+
+impl<K: DepKind> DepGraphData<K> {
/// Starts a new dep-graph task. Dep-graph tasks are specified
/// using a free function (`task`) and **not** a closure -- this
/// is intentional because we want to exercise tight control over
@@ -288,29 +342,6 @@ impl<K: DepKind> DepGraph<K> {
task: fn(Ctxt, A) -> R,
hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
) -> (R, DepNodeIndex) {
- if self.is_fully_enabled() {
- self.with_task_impl(key, cx, arg, task, hash_result)
- } else {
- // Incremental compilation is turned off. We just execute the task
- // without tracking. We still provide a dep-node index that uniquely
- // identifies the task so that we have a cheap way of referring to
- // the query for self-profiling.
- (task(cx, arg), self.next_virtual_depnode_index())
- }
- }
-
- #[inline(always)]
- fn with_task_impl<Ctxt: HasDepContext<DepKind = K>, A: Debug, R>(
- &self,
- key: DepNode<K>,
- cx: Ctxt,
- arg: A,
- task: fn(Ctxt, A) -> R,
- hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
- ) -> (R, DepNodeIndex) {
- // This function is only called when the graph is enabled.
- let data = self.data.as_ref().unwrap();
-
// If the following assertion triggers, it can have two reasons:
// 1. Something is wrong with DepNode creation, either here or
// in `DepGraph::try_mark_green()`.
@@ -335,10 +366,8 @@ impl<K: DepKind> DepGraph<K> {
}))
};
- let task_deps_ref = match &task_deps {
- Some(deps) => TaskDepsRef::Allow(deps),
- None => TaskDepsRef::Ignore,
- };
+ let task_deps_ref =
+ task_deps.as_ref().map(TaskDepsRef::Allow).unwrap_or(TaskDepsRef::EvalAlways);
let result = K::with_deps(task_deps_ref, || task(cx, arg));
let edges = task_deps.map_or_else(|| smallvec![], |lock| lock.into_inner().reads);
@@ -351,9 +380,9 @@ impl<K: DepKind> DepGraph<K> {
let print_status = cfg!(debug_assertions) && dcx.sess().opts.unstable_opts.dep_tasks;
// Intern the new `DepNode`.
- let (dep_node_index, prev_and_color) = data.current.intern_node(
+ let (dep_node_index, prev_and_color) = self.current.intern_node(
dcx.profiler(),
- &data.previous,
+ &self.previous,
key,
edges,
current_fingerprint,
@@ -364,12 +393,12 @@ impl<K: DepKind> DepGraph<K> {
if let Some((prev_index, color)) = prev_and_color {
debug_assert!(
- data.colors.get(prev_index).is_none(),
+ self.colors.get(prev_index).is_none(),
"DepGraph::with_task() - Duplicate DepNodeColor \
insertion for {key:?}"
);
- data.colors.insert(prev_index, color);
+ self.colors.insert(prev_index, color);
}
(result, dep_node_index)
@@ -388,63 +417,66 @@ impl<K: DepKind> DepGraph<K> {
{
debug_assert!(!cx.is_eval_always(dep_kind));
- if let Some(ref data) = self.data {
- let task_deps = Lock::new(TaskDeps::default());
- let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op);
- let task_deps = task_deps.into_inner();
- let task_deps = task_deps.reads;
-
- let dep_node_index = match task_deps.len() {
- 0 => {
- // Because the dep-node id of anon nodes is computed from the sets of its
- // dependencies we already know what the ID of this dependency-less node is
- // going to be (i.e. equal to the precomputed
- // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
- // a `StableHasher` and sending the node through interning.
- DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
- }
- 1 => {
- // When there is only one dependency, don't bother creating a node.
- task_deps[0]
- }
- _ => {
- // The dep node indices are hashed here instead of hashing the dep nodes of the
- // dependencies. These indices may refer to different nodes per session, but this isn't
- // a problem here because we that ensure the final dep node hash is per session only by
- // combining it with the per session random number `anon_id_seed`. This hash only need
- // to map the dependencies to a single value on a per session basis.
- let mut hasher = StableHasher::new();
- task_deps.hash(&mut hasher);
-
- let target_dep_node = DepNode {
- kind: dep_kind,
- // Fingerprint::combine() is faster than sending Fingerprint
- // through the StableHasher (at least as long as StableHasher
- // is so slow).
- hash: data.current.anon_id_seed.combine(hasher.finish()).into(),
- };
+ let task_deps = Lock::new(TaskDeps::default());
+ let result = K::with_deps(TaskDepsRef::Allow(&task_deps), op);
+ let task_deps = task_deps.into_inner();
+ let task_deps = task_deps.reads;
+
+ let dep_node_index = match task_deps.len() {
+ 0 => {
+ // Because the dep-node id of anon nodes is computed from the sets of its
+ // dependencies we already know what the ID of this dependency-less node is
+ // going to be (i.e. equal to the precomputed
+ // `SINGLETON_DEPENDENCYLESS_ANON_NODE`). As a consequence we can skip creating
+ // a `StableHasher` and sending the node through interning.
+ DepNodeIndex::SINGLETON_DEPENDENCYLESS_ANON_NODE
+ }
+ 1 => {
+ // When there is only one dependency, don't bother creating a node.
+ task_deps[0]
+ }
+ _ => {
+ // The dep node indices are hashed here instead of hashing the dep nodes of the
+ // dependencies. These indices may refer to different nodes per session, but this isn't
+ // a problem here because we that ensure the final dep node hash is per session only by
+ // combining it with the per session random number `anon_id_seed`. This hash only need
+ // to map the dependencies to a single value on a per session basis.
+ let mut hasher = StableHasher::new();
+ task_deps.hash(&mut hasher);
+
+ let target_dep_node = DepNode {
+ kind: dep_kind,
+ // Fingerprint::combine() is faster than sending Fingerprint
+ // through the StableHasher (at least as long as StableHasher
+ // is so slow).
+ hash: self.current.anon_id_seed.combine(hasher.finish()).into(),
+ };
- data.current.intern_new_node(
- cx.profiler(),
- target_dep_node,
- task_deps,
- Fingerprint::ZERO,
- )
- }
- };
+ self.current.intern_new_node(
+ cx.profiler(),
+ target_dep_node,
+ task_deps,
+ Fingerprint::ZERO,
+ )
+ }
+ };
- (result, dep_node_index)
- } else {
- (op(), self.next_virtual_depnode_index())
- }
+ (result, dep_node_index)
}
+}
+impl<K: DepKind> DepGraph<K> {
#[inline]
pub fn read_index(&self, dep_node_index: DepNodeIndex) {
if let Some(ref data) = self.data {
K::read_deps(|task_deps| {
let mut task_deps = match task_deps {
TaskDepsRef::Allow(deps) => deps.lock(),
+ TaskDepsRef::EvalAlways => {
+ // We don't need to record dependencies of eval_always
+ // queries. They are re-evaluated unconditionally anyway.
+ return;
+ }
TaskDepsRef::Ignore => return,
TaskDepsRef::Forbid => {
panic!("Illegal read of: {dep_node_index:?}")
@@ -519,22 +551,38 @@ impl<K: DepKind> DepGraph<K> {
// value to an existing node.
//
// For sanity, we still check that the loaded stable hash and the new one match.
- if let Some(dep_node_index) = self.dep_node_index_of_opt(&node) {
- let _current_fingerprint =
- crate::query::incremental_verify_ich(cx, result, &node, hash_result);
+ if let Some(prev_index) = data.previous.node_to_index_opt(&node) {
+ let dep_node_index = data.current.prev_index_to_index.lock()[prev_index];
+ if let Some(dep_node_index) = dep_node_index {
+ crate::query::incremental_verify_ich(
+ cx,
+ data,
+ result,
+ prev_index,
+ hash_result,
+ |value| format!("{:?}", value),
+ );
- #[cfg(debug_assertions)]
- if hash_result.is_some() {
- data.current.record_edge(dep_node_index, node, _current_fingerprint);
- }
+ #[cfg(debug_assertions)]
+ if hash_result.is_some() {
+ data.current.record_edge(
+ dep_node_index,
+ node,
+ data.prev_fingerprint_of(prev_index),
+ );
+ }
- return dep_node_index;
+ return dep_node_index;
+ }
}
let mut edges = SmallVec::new();
K::read_deps(|task_deps| match task_deps {
TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
- TaskDepsRef::Ignore => {} // During HIR lowering, we have no dependencies.
+ TaskDepsRef::EvalAlways => {
+ edges.push(DepNodeIndex::FOREVER_RED_NODE);
+ }
+ TaskDepsRef::Ignore => {}
TaskDepsRef::Forbid => {
panic!("Cannot summarize when dependencies are not recorded.")
}
@@ -577,32 +625,63 @@ impl<K: DepKind> DepGraph<K> {
self.next_virtual_depnode_index()
}
}
+}
+impl<K: DepKind> DepGraphData<K> {
#[inline]
- pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex {
- self.dep_node_index_of_opt(dep_node).unwrap()
+ pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
+ if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
+ self.current.prev_index_to_index.lock()[prev_index]
+ } else {
+ self.current
+ .new_node_to_index
+ .get_shard_by_value(dep_node)
+ .lock()
+ .get(dep_node)
+ .copied()
+ }
}
#[inline]
- pub fn dep_node_index_of_opt(&self, dep_node: &DepNode<K>) -> Option<DepNodeIndex> {
- let data = self.data.as_ref().unwrap();
- let current = &data.current;
+ pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
+ self.dep_node_index_of_opt(dep_node).is_some()
+ }
- if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
- current.prev_index_to_index.lock()[prev_index]
+ fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
+ if let Some(prev_index) = self.previous.node_to_index_opt(dep_node) {
+ self.colors.get(prev_index)
} else {
- current.new_node_to_index.get_shard_by_value(dep_node).lock().get(dep_node).copied()
+ // This is a node that did not exist in the previous compilation session.
+ None
}
}
+ /// Returns true if the given node has been marked as green during the
+ /// current compilation session. Used in various assertions
#[inline]
- pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
- self.data.is_some() && self.dep_node_index_of_opt(dep_node).is_some()
+ pub fn is_index_green(&self, prev_index: SerializedDepNodeIndex) -> bool {
+ self.colors.get(prev_index).map_or(false, |c| c.is_green())
+ }
+
+ #[inline]
+ pub fn prev_fingerprint_of(&self, prev_index: SerializedDepNodeIndex) -> Fingerprint {
+ self.previous.fingerprint_by_index(prev_index)
+ }
+
+ #[inline]
+ pub fn prev_node_of(&self, prev_index: SerializedDepNodeIndex) -> DepNode<K> {
+ self.previous.index_to_node(prev_index)
+ }
+
+ pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) {
+ self.debug_loaded_from_disk.lock().insert(dep_node);
}
+}
+impl<K: DepKind> DepGraph<K> {
#[inline]
- pub fn prev_fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
- self.data.as_ref().unwrap().previous.fingerprint_of(dep_node)
+ pub fn dep_node_exists(&self, dep_node: &DepNode<K>) -> bool {
+ self.data.as_ref().map_or(false, |data| data.dep_node_exists(dep_node))
}
/// Checks whether a previous work product exists for `v` and, if
@@ -617,10 +696,6 @@ impl<K: DepKind> DepGraph<K> {
&self.data.as_ref().unwrap().previous_work_products
}
- pub fn mark_debug_loaded_from_disk(&self, dep_node: DepNode<K>) {
- self.data.as_ref().unwrap().debug_loaded_from_disk.lock().insert(dep_node);
- }
-
pub fn debug_was_loaded_from_disk(&self, dep_node: DepNode<K>) -> bool {
self.data.as_ref().unwrap().debug_loaded_from_disk.lock().contains(&dep_node)
}
@@ -645,17 +720,22 @@ impl<K: DepKind> DepGraph<K> {
fn node_color(&self, dep_node: &DepNode<K>) -> Option<DepNodeColor> {
if let Some(ref data) = self.data {
- if let Some(prev_index) = data.previous.node_to_index_opt(dep_node) {
- return data.colors.get(prev_index);
- } else {
- // This is a node that did not exist in the previous compilation session.
- return None;
- }
+ return data.node_color(dep_node);
}
None
}
+ pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
+ &self,
+ qcx: Qcx,
+ dep_node: &DepNode<K>,
+ ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
+ self.data().and_then(|data| data.try_mark_green(qcx, dep_node))
+ }
+}
+
+impl<K: DepKind> DepGraphData<K> {
/// Try to mark a node index for the node dep_node.
///
/// A node will have an index, when it's already been marked green, or when we can mark it
@@ -668,43 +748,33 @@ impl<K: DepKind> DepGraph<K> {
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
- // Return None if the dep graph is disabled
- let data = self.data.as_ref()?;
-
// Return None if the dep node didn't exist in the previous session
- let prev_index = data.previous.node_to_index_opt(dep_node)?;
+ let prev_index = self.previous.node_to_index_opt(dep_node)?;
- match data.colors.get(prev_index) {
- Some(DepNodeColor::Green(dep_node_index)) => return Some((prev_index, dep_node_index)),
- Some(DepNodeColor::Red) => return None,
- None => {}
+ match self.colors.get(prev_index) {
+ Some(DepNodeColor::Green(dep_node_index)) => Some((prev_index, dep_node_index)),
+ Some(DepNodeColor::Red) => None,
+ None => {
+ // This DepNode and the corresponding query invocation existed
+ // in the previous compilation session too, so we can try to
+ // mark it as green by recursively marking all of its
+ // dependencies green.
+ self.try_mark_previous_green(qcx, prev_index, &dep_node, None)
+ .map(|dep_node_index| (prev_index, dep_node_index))
+ }
}
-
- let backtrace = backtrace_printer(qcx.dep_context().sess(), data, prev_index);
-
- // This DepNode and the corresponding query invocation existed
- // in the previous compilation session too, so we can try to
- // mark it as green by recursively marking all of its
- // dependencies green.
- let ret = self
- .try_mark_previous_green(qcx, data, prev_index, &dep_node)
- .map(|dep_node_index| (prev_index, dep_node_index));
-
- // We succeeded, no backtrace.
- backtrace.disable();
- return ret;
}
- #[instrument(skip(self, qcx, data, parent_dep_node_index), level = "debug")]
+ #[instrument(skip(self, qcx, parent_dep_node_index, frame), level = "debug")]
fn try_mark_parent_green<Qcx: QueryContext<DepKind = K>>(
&self,
qcx: Qcx,
- data: &DepGraphData<K>,
parent_dep_node_index: SerializedDepNodeIndex,
dep_node: &DepNode<K>,
+ frame: Option<&MarkFrame<'_>>,
) -> Option<()> {
- let dep_dep_node_color = data.colors.get(parent_dep_node_index);
- let dep_dep_node = &data.previous.index_to_node(parent_dep_node_index);
+ let dep_dep_node_color = self.colors.get(parent_dep_node_index);
+ let dep_dep_node = &self.previous.index_to_node(parent_dep_node_index);
match dep_dep_node_color {
Some(DepNodeColor::Green(_)) => {
@@ -734,7 +804,7 @@ impl<K: DepKind> DepGraph<K> {
);
let node_index =
- self.try_mark_previous_green(qcx, data, parent_dep_node_index, dep_dep_node);
+ self.try_mark_previous_green(qcx, parent_dep_node_index, dep_dep_node, frame);
if node_index.is_some() {
debug!("managed to MARK dependency {dep_dep_node:?} as green",);
@@ -744,13 +814,13 @@ impl<K: DepKind> DepGraph<K> {
// We failed to mark it green, so we try to force the query.
debug!("trying to force dependency {dep_dep_node:?}");
- if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node) {
+ if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node, frame) {
// The DepNode could not be forced.
debug!("dependency {dep_dep_node:?} could not be forced");
return None;
}
- let dep_dep_node_color = data.colors.get(parent_dep_node_index);
+ let dep_dep_node_color = self.colors.get(parent_dep_node_index);
match dep_dep_node_color {
Some(DepNodeColor::Green(_)) => {
@@ -783,32 +853,31 @@ impl<K: DepKind> DepGraph<K> {
}
/// Try to mark a dep-node which existed in the previous compilation session as green.
- #[instrument(skip(self, qcx, data, prev_dep_node_index), level = "debug")]
+ #[instrument(skip(self, qcx, prev_dep_node_index, frame), level = "debug")]
fn try_mark_previous_green<Qcx: QueryContext<DepKind = K>>(
&self,
qcx: Qcx,
- data: &DepGraphData<K>,
prev_dep_node_index: SerializedDepNodeIndex,
dep_node: &DepNode<K>,
+ frame: Option<&MarkFrame<'_>>,
) -> Option<DepNodeIndex> {
+ let frame = MarkFrame { index: prev_dep_node_index, parent: frame };
+
#[cfg(not(parallel_compiler))]
{
debug_assert!(!self.dep_node_exists(dep_node));
- debug_assert!(data.colors.get(prev_dep_node_index).is_none());
+ debug_assert!(self.colors.get(prev_dep_node_index).is_none());
}
// We never try to mark eval_always nodes as green
debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
- debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
+ debug_assert_eq!(self.previous.index_to_node(prev_dep_node_index), *dep_node);
- let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
+ let prev_deps = self.previous.edge_targets_from(prev_dep_node_index);
for &dep_dep_node_index in prev_deps {
- let backtrace = backtrace_printer(qcx.dep_context().sess(), data, dep_dep_node_index);
- let success = self.try_mark_parent_green(qcx, data, dep_dep_node_index, dep_node);
- backtrace.disable();
- success?;
+ self.try_mark_parent_green(qcx, dep_dep_node_index, dep_node, Some(&frame))?;
}
// If we got here without hitting a `return` that means that all
@@ -819,9 +888,9 @@ impl<K: DepKind> DepGraph<K> {
// We allocating an entry for the node in the current dependency graph and
// adding all the appropriate edges imported from the previous graph
- let dep_node_index = data.current.promote_node_and_deps_to_current(
+ let dep_node_index = self.current.promote_node_and_deps_to_current(
qcx.dep_context().profiler(),
- &data.previous,
+ &self.previous,
prev_dep_node_index,
);
@@ -833,20 +902,20 @@ impl<K: DepKind> DepGraph<K> {
#[cfg(not(parallel_compiler))]
debug_assert!(
- data.colors.get(prev_dep_node_index).is_none(),
+ self.colors.get(prev_dep_node_index).is_none(),
"DepGraph::try_mark_previous_green() - Duplicate DepNodeColor \
insertion for {dep_node:?}"
);
if !side_effects.is_empty() {
- self.with_query_deserialization(|| {
- self.emit_side_effects(qcx, data, dep_node_index, side_effects)
+ qcx.dep_context().dep_graph().with_query_deserialization(|| {
+ self.emit_side_effects(qcx, dep_node_index, side_effects)
});
}
// ... and finally storing a "Green" entry in the color map.
// Multiple threads can all write the same color here
- data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
+ self.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
debug!("successfully marked {dep_node:?} as green");
Some(dep_node_index)
@@ -859,11 +928,10 @@ impl<K: DepKind> DepGraph<K> {
fn emit_side_effects<Qcx: QueryContext<DepKind = K>>(
&self,
qcx: Qcx,
- data: &DepGraphData<K>,
dep_node_index: DepNodeIndex,
side_effects: QuerySideEffects,
) {
- let mut processed = data.processed_side_effects.lock();
+ let mut processed = self.processed_side_effects.lock();
if processed.insert(dep_node_index) {
// We were the first to insert the node in the set so this thread
@@ -879,7 +947,9 @@ impl<K: DepKind> DepGraph<K> {
}
}
}
+}
+impl<K: DepKind> DepGraph<K> {
/// Returns true if the given node has been marked as red during the
/// current compilation session. Used in various assertions
pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
@@ -937,6 +1007,7 @@ impl<K: DepKind> DepGraph<K> {
}
pub(crate) fn next_virtual_depnode_index(&self) -> DepNodeIndex {
+ debug_assert!(self.data.is_none());
let index = self.virtual_dep_node_index.fetch_add(1, Relaxed);
DepNodeIndex::from_u32(index)
}
@@ -1020,7 +1091,7 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
/// This is used to verify that fingerprints do not change between the creation of a node
/// and its recomputation.
#[cfg(debug_assertions)]
- fingerprints: Lock<FxHashMap<DepNode<K>, Fingerprint>>,
+ fingerprints: Lock<IndexVec<DepNodeIndex, Option<Fingerprint>>>,
/// Used to trap when a specific edge is added to the graph.
/// This is used for debug purposes and is only active with `debug_assertions`.
@@ -1106,7 +1177,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
#[cfg(debug_assertions)]
forbidden_edge,
#[cfg(debug_assertions)]
- fingerprints: Lock::new(Default::default()),
+ fingerprints: Lock::new(IndexVec::from_elem_n(None, new_node_count_estimate)),
total_read_count: AtomicU64::new(0),
total_duplicate_read_count: AtomicU64::new(0),
node_intern_event_id,
@@ -1118,14 +1189,8 @@ impl<K: DepKind> CurrentDepGraph<K> {
if let Some(forbidden_edge) = &self.forbidden_edge {
forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
}
- match self.fingerprints.lock().entry(key) {
- Entry::Vacant(v) => {
- v.insert(fingerprint);
- }
- Entry::Occupied(o) => {
- assert_eq!(*o.get(), fingerprint, "Unstable fingerprints for {:?}", key);
- }
- }
+ let previous = *self.fingerprints.lock().get_or_insert_with(dep_node_index, || fingerprint);
+ assert_eq!(previous, fingerprint, "Unstable fingerprints for {:?}", key);
}
/// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
@@ -1310,10 +1375,13 @@ pub enum TaskDepsRef<'a, K: DepKind> {
/// `TaskDeps`. This is used when executing a 'normal' query
/// (no `eval_always` modifier)
Allow(&'a Lock<TaskDeps<K>>),
- /// New dependencies are ignored. This is used when
- /// executing an `eval_always` query, since there's no
+ /// This is used when executing an `eval_always` query. We don't
/// need to track dependencies for a query that's always
- /// re-executed. This is also used for `dep_graph.with_ignore`
+ /// re-executed -- but we need to know that this is an `eval_always`
+ /// query in order to emit dependencies to `DepNodeIndex::FOREVER_RED_NODE`
+ /// when directly feeding other queries.
+ EvalAlways,
+ /// New dependencies are ignored. This is also used for `dep_graph.with_ignore`.
Ignore,
/// Any attempt to add new dependencies will cause a panic.
/// This is used when decoding a query result from disk,
@@ -1381,25 +1449,25 @@ impl DepNodeColorMap {
}
}
-fn backtrace_printer<'a, K: DepKind>(
- sess: &'a rustc_session::Session,
- graph: &'a DepGraphData<K>,
- node: SerializedDepNodeIndex,
-) -> OnDrop<impl Fn() + 'a> {
- OnDrop(
- #[inline(never)]
- #[cold]
- move || {
- let node = graph.previous.index_to_node(node);
- // Do not try to rely on DepNode's Debug implementation, since it may panic.
- let diag = rustc_errors::Diagnostic::new(
- rustc_errors::Level::FailureNote,
- &format!(
- "encountered while trying to mark dependency green: {:?}({})",
- node.kind, node.hash
- ),
- );
- sess.diagnostic().force_print_diagnostic(diag);
- },
- )
+#[inline(never)]
+#[cold]
+pub(crate) fn print_markframe_trace<K: DepKind>(
+ graph: &DepGraph<K>,
+ frame: Option<&MarkFrame<'_>>,
+) {
+ let data = graph.data.as_ref().unwrap();
+
+ eprintln!("there was a panic while trying to force a dep node");
+ eprintln!("try_mark_green dep node stack:");
+
+ let mut i = 0;
+ let mut current = frame;
+ while let Some(frame) = current {
+ let node = data.previous.index_to_node(frame.index);
+ eprintln!("#{i} {:?}", node);
+ current = frame.parent;
+ i += 1;
+ }
+
+ eprintln!("end of try_mark_green dep node stack");
}
diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs
index 6969f2dbe..40e713198 100644
--- a/compiler/rustc_query_system/src/dep_graph/mod.rs
+++ b/compiler/rustc_query_system/src/dep_graph/mod.rs
@@ -6,7 +6,8 @@ mod serialized;
pub use dep_node::{DepKindStruct, DepNode, DepNodeParams, WorkProductId};
pub use graph::{
- hash_result, DepGraph, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef, WorkProduct,
+ hash_result, DepGraph, DepGraphData, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef,
+ WorkProduct,
};
pub use query::DepGraphQuery;
pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex};
@@ -16,8 +17,10 @@ use rustc_data_structures::profiling::SelfProfilerRef;
use rustc_serialize::{opaque::FileEncoder, Encodable};
use rustc_session::Session;
-use std::fmt;
use std::hash::Hash;
+use std::{fmt, panic};
+
+use self::graph::{print_markframe_trace, MarkFrame};
pub trait DepContext: Copy {
type DepKind: self::DepKind;
@@ -52,11 +55,23 @@ pub trait DepContext: Copy {
}
/// Try to force a dep node to execute and see if it's green.
- #[instrument(skip(self), level = "debug")]
- fn try_force_from_dep_node(self, dep_node: DepNode<Self::DepKind>) -> bool {
+ #[inline]
+ #[instrument(skip(self, frame), level = "debug")]
+ fn try_force_from_dep_node(
+ self,
+ dep_node: DepNode<Self::DepKind>,
+ frame: Option<&MarkFrame<'_>>,
+ ) -> bool {
let cb = self.dep_kind_info(dep_node.kind);
if let Some(f) = cb.force_from_dep_node {
- f(self, dep_node);
+ if let Err(value) = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+ f(self, dep_node);
+ })) {
+ if !value.is::<rustc_errors::FatalErrorMarker>() {
+ print_markframe_trace(self.dep_graph(), frame);
+ }
+ panic::resume_unwind(value)
+ }
true
} else {
false
@@ -88,6 +103,15 @@ impl<T: DepContext> HasDepContext for T {
}
}
+impl<T: HasDepContext, Q: Copy> HasDepContext for (T, Q) {
+ type DepKind = T::DepKind;
+ type DepContext = T::DepContext;
+
+ fn dep_context(&self) -> &Self::DepContext {
+ self.0.dep_context()
+ }
+}
+
/// Describes the contents of the fingerprint generated by a given query.
#[derive(Debug, PartialEq, Eq, Copy, Clone)]
pub enum FingerprintStyle {
diff --git a/compiler/rustc_query_system/src/dep_graph/serialized.rs b/compiler/rustc_query_system/src/dep_graph/serialized.rs
index 29513df46..3d19a8491 100644
--- a/compiler/rustc_query_system/src/dep_graph/serialized.rs
+++ b/compiler/rustc_query_system/src/dep_graph/serialized.rs
@@ -80,11 +80,6 @@ impl<K: DepKind> SerializedDepGraph<K> {
}
#[inline]
- pub fn fingerprint_of(&self, dep_node: &DepNode<K>) -> Option<Fingerprint> {
- self.index.get(dep_node).map(|&node_index| self.fingerprints[node_index])
- }
-
- #[inline]
pub fn fingerprint_by_index(&self, dep_node_index: SerializedDepNodeIndex) -> Fingerprint {
self.fingerprints[dep_node_index]
}
diff --git a/compiler/rustc_query_system/src/lib.rs b/compiler/rustc_query_system/src/lib.rs
index 6cc4c9a7e..bb812b006 100644
--- a/compiler/rustc_query_system/src/lib.rs
+++ b/compiler/rustc_query_system/src/lib.rs
@@ -30,4 +30,4 @@ pub use error::LayoutOfDepth;
pub use error::QueryOverflow;
pub use values::Value;
-fluent_messages! { "../locales/en-US.ftl" }
+fluent_messages! { "../messages.ftl" }
diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs
index 4b3cd16c2..29f6a07e8 100644
--- a/compiler/rustc_query_system/src/query/caches.rs
+++ b/compiler/rustc_query_system/src/query/caches.rs
@@ -16,17 +16,11 @@ pub trait CacheSelector<'tcx, V> {
V: Copy;
}
-pub trait QueryStorage {
- type Value: Copy;
-}
-
-pub trait QueryCache: QueryStorage + Sized {
+pub trait QueryCache: Sized {
type Key: Hash + Eq + Copy + Debug;
+ type Value: Copy;
/// Checks if the query is already computed and in the cache.
- /// It returns the shard index and a lock guard to the shard,
- /// which will be used if the query is not in the cache and we need
- /// to compute it.
fn lookup(&self, key: &Self::Key) -> Option<(Self::Value, DepNodeIndex)>;
fn complete(&self, key: Self::Key, value: Self::Value, index: DepNodeIndex);
@@ -55,16 +49,13 @@ impl<K, V> Default for DefaultCache<K, V> {
}
}
-impl<K: Eq + Hash, V: Copy + Debug> QueryStorage for DefaultCache<K, V> {
- type Value = V;
-}
-
impl<K, V> QueryCache for DefaultCache<K, V>
where
K: Eq + Hash + Copy + Debug,
- V: Copy + Debug,
+ V: Copy,
{
type Key = K;
+ type Value = V;
#[inline(always)]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
@@ -127,15 +118,12 @@ impl<V> Default for SingleCache<V> {
}
}
-impl<V: Copy + Debug> QueryStorage for SingleCache<V> {
- type Value = V;
-}
-
impl<V> QueryCache for SingleCache<V>
where
- V: Copy + Debug,
+ V: Copy,
{
type Key = ();
+ type Value = V;
#[inline(always)]
fn lookup(&self, _key: &()) -> Option<(V, DepNodeIndex)> {
@@ -148,7 +136,9 @@ where
}
fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) {
- self.cache.lock().as_ref().map(|value| f(&(), &value.0, value.1));
+ if let Some(value) = self.cache.lock().as_ref() {
+ f(&(), &value.0, value.1)
+ }
}
}
@@ -173,16 +163,13 @@ impl<K: Idx, V> Default for VecCache<K, V> {
}
}
-impl<K: Eq + Idx, V: Copy + Debug> QueryStorage for VecCache<K, V> {
- type Value = V;
-}
-
impl<K, V> QueryCache for VecCache<K, V>
where
K: Eq + Idx + Copy + Debug,
- V: Copy + Debug,
+ V: Copy,
{
type Key = K;
+ type Value = V;
#[inline(always)]
fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> {
diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs
index d56373873..c8d779385 100644
--- a/compiler/rustc_query_system/src/query/config.rs
+++ b/compiler/rustc_query_system/src/query/config.rs
@@ -4,59 +4,67 @@ use crate::dep_graph::{DepNode, DepNodeParams, SerializedDepNodeIndex};
use crate::error::HandleCycleError;
use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache;
-use crate::query::{QueryContext, QueryState};
+use crate::query::{QueryContext, QueryInfo, QueryState};
use rustc_data_structures::fingerprint::Fingerprint;
use std::fmt::Debug;
use std::hash::Hash;
-pub type HashResult<Qcx, Q> =
- Option<fn(&mut StableHashingContext<'_>, &<Q as QueryConfig<Qcx>>::Value) -> Fingerprint>;
+pub type HashResult<V> = Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>;
-pub type TryLoadFromDisk<Qcx, Q> =
- Option<fn(Qcx, SerializedDepNodeIndex) -> Option<<Q as QueryConfig<Qcx>>::Value>>;
+pub type TryLoadFromDisk<Qcx, V> = Option<fn(Qcx, SerializedDepNodeIndex) -> Option<V>>;
-pub trait QueryConfig<Qcx: QueryContext> {
- const NAME: &'static str;
+pub trait QueryConfig<Qcx: QueryContext>: Copy {
+ fn name(self) -> &'static str;
// `Key` and `Value` are `Copy` instead of `Clone` to ensure copying them stays cheap,
// but it isn't necessary.
type Key: DepNodeParams<Qcx::DepContext> + Eq + Hash + Copy + Debug;
- type Value: Debug + Copy;
+ type Value: Copy;
type Cache: QueryCache<Key = Self::Key, Value = Self::Value>;
+ fn format_value(self) -> fn(&Self::Value) -> String;
+
// Don't use this method to access query results, instead use the methods on TyCtxt
- fn query_state<'a>(tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind>
+ fn query_state<'a>(self, tcx: Qcx) -> &'a QueryState<Self::Key, Qcx::DepKind>
where
Qcx: 'a;
// Don't use this method to access query results, instead use the methods on TyCtxt
- fn query_cache<'a>(tcx: Qcx) -> &'a Self::Cache
+ fn query_cache<'a>(self, tcx: Qcx) -> &'a Self::Cache
where
Qcx: 'a;
- fn cache_on_disk(tcx: Qcx::DepContext, key: &Self::Key) -> bool;
+ fn cache_on_disk(self, tcx: Qcx::DepContext, key: &Self::Key) -> bool;
// Don't use this method to compute query results, instead use the methods on TyCtxt
- fn execute_query(tcx: Qcx::DepContext, k: Self::Key) -> Self::Value;
+ fn execute_query(self, tcx: Qcx::DepContext, k: Self::Key) -> Self::Value;
+
+ fn compute(self, tcx: Qcx, key: Self::Key) -> Self::Value;
- fn compute(tcx: Qcx, key: Self::Key) -> Self::Value;
+ fn try_load_from_disk(self, qcx: Qcx, idx: &Self::Key) -> TryLoadFromDisk<Qcx, Self::Value>;
- fn try_load_from_disk(qcx: Qcx, idx: &Self::Key) -> TryLoadFromDisk<Qcx, Self>;
+ fn loadable_from_disk(self, qcx: Qcx, key: &Self::Key, idx: SerializedDepNodeIndex) -> bool;
- const ANON: bool;
- const EVAL_ALWAYS: bool;
- const DEPTH_LIMIT: bool;
- const FEEDABLE: bool;
+ /// Synthesize an error value to let compilation continue after a cycle.
+ fn value_from_cycle_error(
+ self,
+ tcx: Qcx::DepContext,
+ cycle: &[QueryInfo<Qcx::DepKind>],
+ ) -> Self::Value;
- const DEP_KIND: Qcx::DepKind;
- const HANDLE_CYCLE_ERROR: HandleCycleError;
+ fn anon(self) -> bool;
+ fn eval_always(self) -> bool;
+ fn depth_limit(self) -> bool;
+ fn feedable(self) -> bool;
- const HASH_RESULT: HashResult<Qcx, Self>;
+ fn dep_kind(self) -> Qcx::DepKind;
+ fn handle_cycle_error(self) -> HandleCycleError;
+ fn hash_result(self) -> HashResult<Self::Value>;
// Just here for convernience and checking that the key matches the kind, don't override this.
- fn construct_dep_node(tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> {
- DepNode::construct(tcx, Self::DEP_KIND, key)
+ fn construct_dep_node(self, tcx: Qcx::DepContext, key: &Self::Key) -> DepNode<Qcx::DepKind> {
+ DepNode::construct(tcx, self.dep_kind(), key)
}
}
diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs
index a5a2f0093..a534b5407 100644
--- a/compiler/rustc_query_system/src/query/job.rs
+++ b/compiler/rustc_query_system/src/query/job.rs
@@ -18,11 +18,11 @@ use std::num::NonZeroU64;
#[cfg(parallel_compiler)]
use {
parking_lot::{Condvar, Mutex},
+ rayon_core,
rustc_data_structures::fx::FxHashSet,
rustc_data_structures::sync::Lock,
rustc_data_structures::sync::Lrc,
rustc_data_structures::{jobserver, OnDrop},
- rustc_rayon_core as rayon_core,
rustc_span::DUMMY_SP,
std::iter,
std::process,
@@ -124,8 +124,6 @@ impl<D: DepKind> QueryJob<D> {
}
impl QueryJobId {
- #[cold]
- #[inline(never)]
#[cfg(not(parallel_compiler))]
pub(super) fn find_cycle_in_stack<D: DepKind>(
&self,
diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs
index 383c63cd2..312b0e168 100644
--- a/compiler/rustc_query_system/src/query/mod.rs
+++ b/compiler/rustc_query_system/src/query/mod.rs
@@ -8,8 +8,7 @@ pub use self::job::{print_query_stack, QueryInfo, QueryJob, QueryJobId, QueryJob
mod caches;
pub use self::caches::{
- CacheSelector, DefaultCacheSelector, QueryCache, QueryStorage, SingleCacheSelector,
- VecCacheSelector,
+ CacheSelector, DefaultCacheSelector, QueryCache, SingleCacheSelector, VecCacheSelector,
};
mod config;
diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs
index 5f003fa70..20310483d 100644
--- a/compiler/rustc_query_system/src/query/plumbing.rs
+++ b/compiler/rustc_query_system/src/query/plumbing.rs
@@ -3,22 +3,22 @@
//! manage the caches, and so forth.
use crate::dep_graph::{DepContext, DepKind, DepNode, DepNodeIndex, DepNodeParams};
+use crate::dep_graph::{DepGraphData, HasDepContext};
use crate::ich::StableHashingContext;
use crate::query::caches::QueryCache;
+#[cfg(parallel_compiler)]
+use crate::query::job::QueryLatch;
use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo};
+use crate::query::SerializedDepNodeIndex;
use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame};
-use crate::values::Value;
use crate::HandleCycleError;
use rustc_data_structures::fingerprint::Fingerprint;
use rustc_data_structures::fx::FxHashMap;
-#[cfg(parallel_compiler)]
-use rustc_data_structures::profiling::TimingGuard;
-#[cfg(parallel_compiler)]
-use rustc_data_structures::sharded::Sharded;
use rustc_data_structures::stack::ensure_sufficient_stack;
use rustc_data_structures::sync::Lock;
+#[cfg(parallel_compiler)]
+use rustc_data_structures::{cold_path, sharded::Sharded};
use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError};
-use rustc_session::Session;
use rustc_span::{Span, DUMMY_SP};
use std::cell::Cell;
use std::collections::hash_map::Entry;
@@ -115,48 +115,49 @@ where
{
state: &'tcx QueryState<K, D>,
key: K,
- id: QueryJobId,
}
#[cold]
#[inline(never)]
-fn mk_cycle<Qcx, R, D: DepKind>(
+fn mk_cycle<Q, Qcx>(
+ query: Q,
qcx: Qcx,
- cycle_error: CycleError<D>,
+ cycle_error: CycleError<Qcx::DepKind>,
handler: HandleCycleError,
-) -> R
+) -> Q::Value
where
- Qcx: QueryContext + crate::query::HasDepContext<DepKind = D>,
- R: std::fmt::Debug + Value<Qcx::DepContext, Qcx::DepKind>,
+ Q: QueryConfig<Qcx>,
+ Qcx: QueryContext,
{
let error = report_cycle(qcx.dep_context().sess(), &cycle_error);
- handle_cycle_error(*qcx.dep_context(), &cycle_error, error, handler)
+ handle_cycle_error(query, qcx, &cycle_error, error, handler)
}
-fn handle_cycle_error<Tcx, V>(
- tcx: Tcx,
- cycle_error: &CycleError<Tcx::DepKind>,
+fn handle_cycle_error<Q, Qcx>(
+ query: Q,
+ qcx: Qcx,
+ cycle_error: &CycleError<Qcx::DepKind>,
mut error: DiagnosticBuilder<'_, ErrorGuaranteed>,
handler: HandleCycleError,
-) -> V
+) -> Q::Value
where
- Tcx: DepContext,
- V: Value<Tcx, Tcx::DepKind>,
+ Q: QueryConfig<Qcx>,
+ Qcx: QueryContext,
{
use HandleCycleError::*;
match handler {
Error => {
error.emit();
- Value::from_cycle_error(tcx, &cycle_error.cycle)
+ query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle)
}
Fatal => {
error.emit();
- tcx.sess().abort_if_errors();
+ qcx.dep_context().sess().abort_if_errors();
unreachable!()
}
DelayBug => {
error.delay_as_bug();
- Value::from_cycle_error(tcx, &cycle_error.cycle)
+ query.value_from_cycle_error(*qcx.dep_context(), &cycle_error.cycle)
}
}
}
@@ -165,84 +166,6 @@ impl<'tcx, K, D: DepKind> JobOwner<'tcx, K, D>
where
K: Eq + Hash + Copy,
{
- /// Either gets a `JobOwner` corresponding the query, allowing us to
- /// start executing the query, or returns with the result of the query.
- /// This function assumes that `try_get_cached` is already called and returned `lookup`.
- /// If the query is executing elsewhere, this will wait for it and return the result.
- /// If the query panicked, this will silently panic.
- ///
- /// This function is inlined because that results in a noticeable speed-up
- /// for some compile-time benchmarks.
- #[inline(always)]
- fn try_start<'b, Qcx>(
- qcx: &'b Qcx,
- state: &'b QueryState<K, Qcx::DepKind>,
- span: Span,
- key: K,
- ) -> TryGetJob<'b, K, D>
- where
- Qcx: QueryContext + crate::query::HasDepContext<DepKind = D>,
- {
- #[cfg(parallel_compiler)]
- let mut state_lock = state.active.get_shard_by_value(&key).lock();
- #[cfg(not(parallel_compiler))]
- let mut state_lock = state.active.lock();
- let lock = &mut *state_lock;
- let current_job_id = qcx.current_query_job();
-
- match lock.entry(key) {
- Entry::Vacant(entry) => {
- let id = qcx.next_job_id();
- let job = QueryJob::new(id, span, current_job_id);
-
- let key = *entry.key();
- entry.insert(QueryResult::Started(job));
-
- let owner = JobOwner { state, id, key };
- return TryGetJob::NotYetStarted(owner);
- }
- Entry::Occupied(mut entry) => {
- match entry.get_mut() {
- #[cfg(not(parallel_compiler))]
- QueryResult::Started(job) => {
- let id = job.id;
- drop(state_lock);
-
- // If we are single-threaded we know that we have cycle error,
- // so we just return the error.
- return TryGetJob::Cycle(id.find_cycle_in_stack(
- qcx.try_collect_active_jobs().unwrap(),
- &current_job_id,
- span,
- ));
- }
- #[cfg(parallel_compiler)]
- QueryResult::Started(job) => {
- // For parallel queries, we'll block and wait until the query running
- // in another thread has completed. Record how long we wait in the
- // self-profiler.
- let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked();
-
- // Get the latch out
- let latch = job.latch();
-
- drop(state_lock);
-
- // With parallel queries we might just have to wait on some other
- // thread.
- let result = latch.wait_on(current_job_id, span);
-
- match result {
- Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer),
- Err(cycle) => TryGetJob::Cycle(cycle),
- }
- }
- QueryResult::Poisoned => FatalError.raise(),
- }
- }
- }
- }
-
/// Completes the query by updating the query cache with the `result`,
/// signals the waiter and forgets the JobOwner, so it won't poison the query
fn complete<C>(self, cache: &C, result: C::Value, dep_node_index: DepNodeIndex)
@@ -309,25 +232,6 @@ pub(crate) struct CycleError<D: DepKind> {
pub cycle: Vec<QueryInfo<D>>,
}
-/// The result of `try_start`.
-enum TryGetJob<'tcx, K, D>
-where
- K: Eq + Hash + Copy,
- D: DepKind,
-{
- /// The query is not yet started. Contains a guard to the cache eventually used to start it.
- NotYetStarted(JobOwner<'tcx, K, D>),
-
- /// The query was already completed.
- /// Returns the result of the query and its dep-node index
- /// if it succeeded or a cycle error if it failed.
- #[cfg(parallel_compiler)]
- JobCompleted(TimingGuard<'tcx>),
-
- /// Trying to execute the query resulted in a cycle.
- Cycle(CycleError<D>),
-}
-
/// Checks if the query is already computed and in the cache.
/// It returns the shard index and a lock guard to the shard,
/// which will be used if the query is not in the cache and we need
@@ -348,44 +252,54 @@ where
}
}
+#[cold]
#[inline(never)]
-fn try_execute_query<Q, Qcx>(
+#[cfg(not(parallel_compiler))]
+fn cycle_error<Q, Qcx>(
+ query: Q,
+ qcx: Qcx,
+ try_execute: QueryJobId,
+ span: Span,
+) -> (Q::Value, Option<DepNodeIndex>)
+where
+ Q: QueryConfig<Qcx>,
+ Qcx: QueryContext,
+{
+ let error = try_execute.find_cycle_in_stack(
+ qcx.try_collect_active_jobs().unwrap(),
+ &qcx.current_query_job(),
+ span,
+ );
+ (mk_cycle(query, qcx, error, query.handle_cycle_error()), None)
+}
+
+#[inline(always)]
+#[cfg(parallel_compiler)]
+fn wait_for_query<Q, Qcx>(
+ query: Q,
qcx: Qcx,
span: Span,
key: Q::Key,
- dep_node: Option<DepNode<Qcx::DepKind>>,
+ latch: QueryLatch<Qcx::DepKind>,
+ current: Option<QueryJobId>,
) -> (Q::Value, Option<DepNodeIndex>)
where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
- let state = Q::query_state(qcx);
- match JobOwner::<'_, Q::Key, Qcx::DepKind>::try_start(&qcx, state, span, key) {
- TryGetJob::NotYetStarted(job) => {
- let (result, dep_node_index) = execute_job::<Q, Qcx>(qcx, key, dep_node, job.id);
- let cache = Q::query_cache(qcx);
- if Q::FEEDABLE {
- // We should not compute queries that also got a value via feeding.
- // This can't happen, as query feeding adds the very dependencies to the fed query
- // as its feeding query had. So if the fed query is red, so is its feeder, which will
- // get evaluated first, and re-feed the query.
- if let Some((cached_result, _)) = cache.lookup(&key) {
- panic!(
- "fed query later has its value computed. The already cached value: {cached_result:?}"
- );
- }
- }
- job.complete(cache, result, dep_node_index);
- (result, Some(dep_node_index))
- }
- TryGetJob::Cycle(error) => {
- let result = mk_cycle(qcx, error, Q::HANDLE_CYCLE_ERROR);
- (result, None)
- }
- #[cfg(parallel_compiler)]
- TryGetJob::JobCompleted(query_blocked_prof_timer) => {
- let Some((v, index)) = Q::query_cache(qcx).lookup(&key) else {
- panic!("value must be in cache after waiting")
+ // For parallel queries, we'll block and wait until the query running
+ // in another thread has completed. Record how long we wait in the
+ // self-profiler.
+ let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked();
+
+ // With parallel queries we might just have to wait on some other
+ // thread.
+ let result = latch.wait_on(current, span);
+
+ match result {
+ Ok(()) => {
+ let Some((v, index)) = query.query_cache(qcx).lookup(&key) else {
+ cold_path(|| panic!("value must be in cache after waiting"))
};
qcx.dep_context().profiler().query_cache_hit(index.into());
@@ -393,57 +307,178 @@ where
(v, Some(index))
}
+ Err(cycle) => (mk_cycle(query, qcx, cycle, query.handle_cycle_error()), None),
+ }
+}
+
+#[inline(never)]
+fn try_execute_query<Q, Qcx>(
+ query: Q,
+ qcx: Qcx,
+ span: Span,
+ key: Q::Key,
+ dep_node: Option<DepNode<Qcx::DepKind>>,
+) -> (Q::Value, Option<DepNodeIndex>)
+where
+ Q: QueryConfig<Qcx>,
+ Qcx: QueryContext,
+{
+ let state = query.query_state(qcx);
+ #[cfg(parallel_compiler)]
+ let mut state_lock = state.active.get_shard_by_value(&key).lock();
+ #[cfg(not(parallel_compiler))]
+ let mut state_lock = state.active.lock();
+
+ // For the parallel compiler we need to check both the query cache and query state structures
+ // while holding the state lock to ensure that 1) the query has not yet completed and 2) the
+ // query is not still executing. Without checking the query cache here, we can end up
+ // re-executing the query since `try_start` only checks that the query is not currently
+ // executing, but another thread may have already completed the query and stores it result
+ // in the query cache.
+ if cfg!(parallel_compiler) && qcx.dep_context().sess().threads() > 1 {
+ if let Some((value, index)) = query.query_cache(qcx).lookup(&key) {
+ qcx.dep_context().profiler().query_cache_hit(index.into());
+ return (value, Some(index));
+ }
+ }
+
+ let current_job_id = qcx.current_query_job();
+
+ match state_lock.entry(key) {
+ Entry::Vacant(entry) => {
+ // Nothing has computed or is computing the query, so we start a new job and insert it in the
+ // state map.
+ let id = qcx.next_job_id();
+ let job = QueryJob::new(id, span, current_job_id);
+ entry.insert(QueryResult::Started(job));
+
+ // Drop the lock before we start executing the query
+ drop(state_lock);
+
+ execute_job(query, qcx, state, key, id, dep_node)
+ }
+ Entry::Occupied(mut entry) => {
+ match entry.get_mut() {
+ #[cfg(not(parallel_compiler))]
+ QueryResult::Started(job) => {
+ let id = job.id;
+ drop(state_lock);
+
+ // If we are single-threaded we know that we have cycle error,
+ // so we just return the error.
+ cycle_error(query, qcx, id, span)
+ }
+ #[cfg(parallel_compiler)]
+ QueryResult::Started(job) => {
+ // Get the latch out
+ let latch = job.latch();
+ drop(state_lock);
+
+ wait_for_query(query, qcx, span, key, latch, current_job_id)
+ }
+ QueryResult::Poisoned => FatalError.raise(),
+ }
+ }
}
}
#[inline(always)]
fn execute_job<Q, Qcx>(
+ query: Q,
qcx: Qcx,
+ state: &QueryState<Q::Key, Qcx::DepKind>,
key: Q::Key,
- mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
- job_id: QueryJobId,
-) -> (Q::Value, DepNodeIndex)
+ id: QueryJobId,
+ dep_node: Option<DepNode<Qcx::DepKind>>,
+) -> (Q::Value, Option<DepNodeIndex>)
where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
- let dep_graph = qcx.dep_context().dep_graph();
+ // Use `JobOwner` so the query will be poisoned if executing it panics.
+ let job_owner = JobOwner { state, key };
+
+ let (result, dep_node_index) = match qcx.dep_context().dep_graph().data() {
+ None => execute_job_non_incr(query, qcx, key, id),
+ Some(data) => execute_job_incr(query, qcx, data, key, dep_node, id),
+ };
- // Fast path for when incr. comp. is off.
- if !dep_graph.is_fully_enabled() {
- // Fingerprint the key, just to assert that it doesn't
- // have anything we don't consider hashable
- if cfg!(debug_assertions) {
- let _ = key.to_fingerprint(*qcx.dep_context());
+ let cache = query.query_cache(qcx);
+ if query.feedable() {
+ // We should not compute queries that also got a value via feeding.
+ // This can't happen, as query feeding adds the very dependencies to the fed query
+ // as its feeding query had. So if the fed query is red, so is its feeder, which will
+ // get evaluated first, and re-feed the query.
+ if let Some((cached_result, _)) = cache.lookup(&key) {
+ panic!(
+ "fed query later has its value computed. The already cached value: {}",
+ (query.format_value())(&cached_result)
+ );
}
+ }
+ job_owner.complete(cache, result, dep_node_index);
- let prof_timer = qcx.dep_context().profiler().query_provider();
- let result = qcx.start_query(job_id, Q::DEPTH_LIMIT, None, || Q::compute(qcx, key));
- let dep_node_index = dep_graph.next_virtual_depnode_index();
- prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+ (result, Some(dep_node_index))
+}
- // Similarly, fingerprint the result to assert that
- // it doesn't have anything not considered hashable.
- if cfg!(debug_assertions)
- && let Some(hash_result) = Q::HASH_RESULT
- {
- qcx.dep_context().with_stable_hashing_context(|mut hcx| {
- hash_result(&mut hcx, &result);
- });
- }
+// Fast path for when incr. comp. is off.
+#[inline(always)]
+fn execute_job_non_incr<Q, Qcx>(
+ query: Q,
+ qcx: Qcx,
+ key: Q::Key,
+ job_id: QueryJobId,
+) -> (Q::Value, DepNodeIndex)
+where
+ Q: QueryConfig<Qcx>,
+ Qcx: QueryContext,
+{
+ debug_assert!(!qcx.dep_context().dep_graph().is_fully_enabled());
- return (result, dep_node_index);
+ // Fingerprint the key, just to assert that it doesn't
+ // have anything we don't consider hashable
+ if cfg!(debug_assertions) {
+ let _ = key.to_fingerprint(*qcx.dep_context());
}
- if !Q::ANON && !Q::EVAL_ALWAYS {
+ let prof_timer = qcx.dep_context().profiler().query_provider();
+ let result = qcx.start_query(job_id, query.depth_limit(), None, || query.compute(qcx, key));
+ let dep_node_index = qcx.dep_context().dep_graph().next_virtual_depnode_index();
+ prof_timer.finish_with_query_invocation_id(dep_node_index.into());
+
+ // Similarly, fingerprint the result to assert that
+ // it doesn't have anything not considered hashable.
+ if cfg!(debug_assertions) && let Some(hash_result) = query.hash_result() {
+ qcx.dep_context().with_stable_hashing_context(|mut hcx| {
+ hash_result(&mut hcx, &result);
+ });
+ }
+
+ (result, dep_node_index)
+}
+
+#[inline(always)]
+fn execute_job_incr<Q, Qcx>(
+ query: Q,
+ qcx: Qcx,
+ dep_graph_data: &DepGraphData<Qcx::DepKind>,
+ key: Q::Key,
+ mut dep_node_opt: Option<DepNode<Qcx::DepKind>>,
+ job_id: QueryJobId,
+) -> (Q::Value, DepNodeIndex)
+where
+ Q: QueryConfig<Qcx>,
+ Qcx: QueryContext,
+{
+ if !query.anon() && !query.eval_always() {
// `to_dep_node` is expensive for some `DepKind`s.
let dep_node =
- dep_node_opt.get_or_insert_with(|| Q::construct_dep_node(*qcx.dep_context(), &key));
+ dep_node_opt.get_or_insert_with(|| query.construct_dep_node(*qcx.dep_context(), &key));
// The diagnostics for this query will be promoted to the current session during
// `try_mark_green()`, so we can ignore them here.
if let Some(ret) = qcx.start_query(job_id, false, None, || {
- try_load_from_disk_and_cache_in_memory::<Q, Qcx>(qcx, &key, &dep_node)
+ try_load_from_disk_and_cache_in_memory(query, dep_graph_data, qcx, &key, &dep_node)
}) {
return ret;
}
@@ -453,17 +488,24 @@ where
let diagnostics = Lock::new(ThinVec::new());
let (result, dep_node_index) =
- qcx.start_query(job_id, Q::DEPTH_LIMIT, Some(&diagnostics), || {
- if Q::ANON {
- return dep_graph
- .with_anon_task(*qcx.dep_context(), Q::DEP_KIND, || Q::compute(qcx, key));
+ qcx.start_query(job_id, query.depth_limit(), Some(&diagnostics), || {
+ if query.anon() {
+ return dep_graph_data.with_anon_task(*qcx.dep_context(), query.dep_kind(), || {
+ query.compute(qcx, key)
+ });
}
// `to_dep_node` is expensive for some `DepKind`s.
let dep_node =
- dep_node_opt.unwrap_or_else(|| Q::construct_dep_node(*qcx.dep_context(), &key));
-
- dep_graph.with_task(dep_node, qcx, key, Q::compute, Q::HASH_RESULT)
+ dep_node_opt.unwrap_or_else(|| query.construct_dep_node(*qcx.dep_context(), &key));
+
+ dep_graph_data.with_task(
+ dep_node,
+ (qcx, query),
+ key,
+ |(qcx, query), key| query.compute(qcx, key),
+ query.hash_result(),
+ )
});
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -472,7 +514,7 @@ where
let side_effects = QuerySideEffects { diagnostics };
if std::intrinsics::unlikely(!side_effects.is_empty()) {
- if Q::ANON {
+ if query.anon() {
qcx.store_side_effects_for_anon_node(dep_node_index, side_effects);
} else {
qcx.store_side_effects(dep_node_index, side_effects);
@@ -484,6 +526,8 @@ where
#[inline(always)]
fn try_load_from_disk_and_cache_in_memory<Q, Qcx>(
+ query: Q,
+ dep_graph_data: &DepGraphData<Qcx::DepKind>,
qcx: Qcx,
key: &Q::Key,
dep_node: &DepNode<Qcx::DepKind>,
@@ -495,21 +539,22 @@ where
// Note this function can be called concurrently from the same query
// We must ensure that this is handled correctly.
- let dep_graph = qcx.dep_context().dep_graph();
- let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(qcx, &dep_node)?;
+ let (prev_dep_node_index, dep_node_index) = dep_graph_data.try_mark_green(qcx, &dep_node)?;
- debug_assert!(dep_graph.is_green(dep_node));
+ debug_assert!(dep_graph_data.is_index_green(prev_dep_node_index));
// First we try to load the result from the on-disk cache.
// Some things are never cached on disk.
- if let Some(try_load_from_disk) = Q::try_load_from_disk(qcx, &key) {
+ if let Some(try_load_from_disk) = query.try_load_from_disk(qcx, &key) {
let prof_timer = qcx.dep_context().profiler().incr_cache_loading();
// The call to `with_query_deserialization` enforces that no new `DepNodes`
// are created during deserialization. See the docs of that method for more
// details.
- let result =
- dep_graph.with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index));
+ let result = qcx
+ .dep_context()
+ .dep_graph()
+ .with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -517,14 +562,10 @@ where
if std::intrinsics::unlikely(
qcx.dep_context().sess().opts.unstable_opts.query_dep_graph,
) {
- dep_graph.mark_debug_loaded_from_disk(*dep_node)
+ dep_graph_data.mark_debug_loaded_from_disk(*dep_node)
}
- let prev_fingerprint = qcx
- .dep_context()
- .dep_graph()
- .prev_fingerprint_of(dep_node)
- .unwrap_or(Fingerprint::ZERO);
+ let prev_fingerprint = dep_graph_data.prev_fingerprint_of(prev_dep_node_index);
// If `-Zincremental-verify-ich` is specified, re-hash results from
// the cache and make sure that they have the expected fingerprint.
//
@@ -536,7 +577,14 @@ where
if std::intrinsics::unlikely(
try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich,
) {
- incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT);
+ incremental_verify_ich(
+ *qcx.dep_context(),
+ dep_graph_data,
+ &result,
+ prev_dep_node_index,
+ query.hash_result(),
+ query.format_value(),
+ );
}
return Some((result, dep_node_index));
@@ -546,16 +594,23 @@ where
// can be forced from `DepNode`.
debug_assert!(
!qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(),
- "missing on-disk cache entry for {dep_node:?}"
+ "missing on-disk cache entry for reconstructible {dep_node:?}"
);
}
+ // Sanity check for the logic in `ensure`: if the node is green and the result loadable,
+ // we should actually be able to load it.
+ debug_assert!(
+ !query.loadable_from_disk(qcx, &key, prev_dep_node_index),
+ "missing on-disk cache entry for loadable {dep_node:?}"
+ );
+
// We could not load a result from the on-disk cache, so
// recompute.
let prof_timer = qcx.dep_context().profiler().query_provider();
// The dep-graph for this computation is already in-place.
- let result = dep_graph.with_ignore(|| Q::compute(qcx, *key));
+ let result = qcx.dep_context().dep_graph().with_ignore(|| query.compute(qcx, *key));
prof_timer.finish_with_query_invocation_id(dep_node_index.into());
@@ -568,87 +623,69 @@ where
//
// See issue #82920 for an example of a miscompilation that would get turned into
// an ICE by this check
- incremental_verify_ich(*qcx.dep_context(), &result, dep_node, Q::HASH_RESULT);
+ incremental_verify_ich(
+ *qcx.dep_context(),
+ dep_graph_data,
+ &result,
+ prev_dep_node_index,
+ query.hash_result(),
+ query.format_value(),
+ );
Some((result, dep_node_index))
}
#[inline]
-#[instrument(skip(tcx, result, hash_result), level = "debug")]
-pub(crate) fn incremental_verify_ich<Tcx, V: Debug>(
+#[instrument(skip(tcx, dep_graph_data, result, hash_result, format_value), level = "debug")]
+pub(crate) fn incremental_verify_ich<Tcx, V>(
tcx: Tcx,
+ dep_graph_data: &DepGraphData<Tcx::DepKind>,
result: &V,
- dep_node: &DepNode<Tcx::DepKind>,
+ prev_index: SerializedDepNodeIndex,
hash_result: Option<fn(&mut StableHashingContext<'_>, &V) -> Fingerprint>,
-) -> Fingerprint
-where
+ format_value: fn(&V) -> String,
+) where
Tcx: DepContext,
{
- assert!(
- tcx.dep_graph().is_green(dep_node),
- "fingerprint for green query instance not loaded from cache: {dep_node:?}",
- );
+ if !dep_graph_data.is_index_green(prev_index) {
+ incremental_verify_ich_not_green(tcx, prev_index)
+ }
let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| {
tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result))
});
- let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node);
+ let old_hash = dep_graph_data.prev_fingerprint_of(prev_index);
- if Some(new_hash) != old_hash {
- incremental_verify_ich_failed(
- tcx.sess(),
- DebugArg::from(&dep_node),
- DebugArg::from(&result),
- );
+ if new_hash != old_hash {
+ incremental_verify_ich_failed(tcx, prev_index, &|| format_value(&result));
}
-
- new_hash
-}
-
-// This DebugArg business is largely a mirror of std::fmt::ArgumentV1, which is
-// currently not exposed publicly.
-//
-// The PR which added this attempted to use `&dyn Debug` instead, but that
-// showed statistically significant worse compiler performance. It's not
-// actually clear what the cause there was -- the code should be cold. If this
-// can be replaced with `&dyn Debug` with on perf impact, then it probably
-// should be.
-extern "C" {
- type Opaque;
-}
-
-struct DebugArg<'a> {
- value: &'a Opaque,
- fmt: fn(&Opaque, &mut std::fmt::Formatter<'_>) -> std::fmt::Result,
}
-impl<'a, T> From<&'a T> for DebugArg<'a>
+#[cold]
+#[inline(never)]
+fn incremental_verify_ich_not_green<Tcx>(tcx: Tcx, prev_index: SerializedDepNodeIndex)
where
- T: std::fmt::Debug,
+ Tcx: DepContext,
{
- fn from(value: &'a T) -> DebugArg<'a> {
- DebugArg {
- value: unsafe { std::mem::transmute(value) },
- fmt: unsafe {
- std::mem::transmute(<T as std::fmt::Debug>::fmt as fn(_, _) -> std::fmt::Result)
- },
- }
- }
+ panic!(
+ "fingerprint for green query instance not loaded from cache: {:?}",
+ tcx.dep_graph().data().unwrap().prev_node_of(prev_index)
+ )
}
-impl std::fmt::Debug for DebugArg<'_> {
- fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
- (self.fmt)(self.value, f)
- }
-}
-
-// Note that this is marked #[cold] and intentionally takes the equivalent of
-// `dyn Debug` for its arguments, as we want to avoid generating a bunch of
-// different implementations for LLVM to chew on (and filling up the final
-// binary, too).
+// Note that this is marked #[cold] and intentionally takes `dyn Debug` for `result`,
+// as we want to avoid generating a bunch of different implementations for LLVM to
+// chew on (and filling up the final binary, too).
#[cold]
-fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result: DebugArg<'_>) {
+#[inline(never)]
+fn incremental_verify_ich_failed<Tcx>(
+ tcx: Tcx,
+ prev_index: SerializedDepNodeIndex,
+ result: &dyn Fn() -> String,
+) where
+ Tcx: DepContext,
+{
// When we emit an error message and panic, we try to debug-print the `DepNode`
// and query result. Unfortunately, this can cause us to run additional queries,
// which may result in another fingerprint mismatch while we're in the middle
@@ -662,19 +699,20 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result:
let old_in_panic = INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.replace(true));
if old_in_panic {
- sess.emit_err(crate::error::Reentrant);
+ tcx.sess().emit_err(crate::error::Reentrant);
} else {
- let run_cmd = if let Some(crate_name) = &sess.opts.crate_name {
+ let run_cmd = if let Some(crate_name) = &tcx.sess().opts.crate_name {
format!("`cargo clean -p {crate_name}` or `cargo clean`")
} else {
"`cargo clean`".to_string()
};
- sess.emit_err(crate::error::IncrementCompilation {
+ let dep_node = tcx.dep_graph().data().unwrap().prev_node_of(prev_index);
+ tcx.sess().emit_err(crate::error::IncrementCompilation {
run_cmd,
dep_node: format!("{dep_node:?}"),
});
- panic!("Found unstable fingerprints for {dep_node:?}: {result:?}");
+ panic!("Found unstable fingerprints for {dep_node:?}: {}", result());
}
INSIDE_VERIFY_PANIC.with(|in_panic| in_panic.set(old_in_panic));
@@ -689,22 +727,27 @@ fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result:
///
/// Note: The optimization is only available during incr. comp.
#[inline(never)]
-fn ensure_must_run<Q, Qcx>(qcx: Qcx, key: &Q::Key) -> (bool, Option<DepNode<Qcx::DepKind>>)
+fn ensure_must_run<Q, Qcx>(
+ query: Q,
+ qcx: Qcx,
+ key: &Q::Key,
+ check_cache: bool,
+) -> (bool, Option<DepNode<Qcx::DepKind>>)
where
Q: QueryConfig<Qcx>,
Qcx: QueryContext,
{
- if Q::EVAL_ALWAYS {
+ if query.eval_always() {
return (true, None);
}
// Ensuring an anonymous query makes no sense
- assert!(!Q::ANON);
+ assert!(!query.anon());
- let dep_node = Q::construct_dep_node(*qcx.dep_context(), key);
+ let dep_node = query.construct_dep_node(*qcx.dep_context(), key);
let dep_graph = qcx.dep_context().dep_graph();
- match dep_graph.try_mark_green(qcx, &dep_node) {
+ let serialized_dep_node_index = match dep_graph.try_mark_green(qcx, &dep_node) {
None => {
// A None return from `try_mark_green` means that this is either
// a new dep node or that the dep node has already been marked red.
@@ -712,32 +755,44 @@ where
// DepNodeIndex. We must invoke the query itself. The performance cost
// this introduces should be negligible as we'll immediately hit the
// in-memory cache, or another query down the line will.
- (true, Some(dep_node))
+ return (true, Some(dep_node));
}
- Some((_, dep_node_index)) => {
+ Some((serialized_dep_node_index, dep_node_index)) => {
dep_graph.read_index(dep_node_index);
qcx.dep_context().profiler().query_cache_hit(dep_node_index.into());
- (false, None)
+ serialized_dep_node_index
}
+ };
+
+ // We do not need the value at all, so do not check the cache.
+ if !check_cache {
+ return (false, None);
}
+
+ let loadable = query.loadable_from_disk(qcx, key, serialized_dep_node_index);
+ (!loadable, Some(dep_node))
}
#[derive(Debug)]
pub enum QueryMode {
Get,
- Ensure,
+ Ensure { check_cache: bool },
}
#[inline(always)]
-pub fn get_query<Q, Qcx, D>(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option<Q::Value>
+pub fn get_query<Q, Qcx>(
+ query: Q,
+ qcx: Qcx,
+ span: Span,
+ key: Q::Key,
+ mode: QueryMode,
+) -> Option<Q::Value>
where
- D: DepKind,
Q: QueryConfig<Qcx>,
- Q::Value: Value<Qcx::DepContext, D>,
Qcx: QueryContext,
{
- let dep_node = if let QueryMode::Ensure = mode {
- let (must_run, dep_node) = ensure_must_run::<Q, _>(qcx, &key);
+ let dep_node = if let QueryMode::Ensure { check_cache } = mode {
+ let (must_run, dep_node) = ensure_must_run(query, qcx, &key, check_cache);
if !must_run {
return None;
}
@@ -747,28 +802,30 @@ where
};
let (result, dep_node_index) =
- ensure_sufficient_stack(|| try_execute_query::<Q, Qcx>(qcx, span, key, dep_node));
+ ensure_sufficient_stack(|| try_execute_query(query, qcx, span, key, dep_node));
if let Some(dep_node_index) = dep_node_index {
qcx.dep_context().dep_graph().read_index(dep_node_index)
}
Some(result)
}
-pub fn force_query<Q, Qcx, D>(qcx: Qcx, key: Q::Key, dep_node: DepNode<Qcx::DepKind>)
-where
- D: DepKind,
+pub fn force_query<Q, Qcx>(
+ query: Q,
+ qcx: Qcx,
+ key: Q::Key,
+ dep_node: DepNode<<Qcx as HasDepContext>::DepKind>,
+) where
Q: QueryConfig<Qcx>,
- Q::Value: Value<Qcx::DepContext, D>,
Qcx: QueryContext,
{
// We may be concurrently trying both execute and force a query.
// Ensure that only one of them runs the query.
- if let Some((_, index)) = Q::query_cache(qcx).lookup(&key) {
+ if let Some((_, index)) = query.query_cache(qcx).lookup(&key) {
qcx.dep_context().profiler().query_cache_hit(index.into());
return;
}
- debug_assert!(!Q::ANON);
+ debug_assert!(!query.anon());
- ensure_sufficient_stack(|| try_execute_query::<Q, _>(qcx, DUMMY_SP, key, Some(dep_node)));
+ ensure_sufficient_stack(|| try_execute_query(query, qcx, DUMMY_SP, key, Some(dep_node)));
}