summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_query_system/src/dep_graph/graph.rs
diff options
context:
space:
mode:
Diffstat (limited to 'compiler/rustc_query_system/src/dep_graph/graph.rs')
-rw-r--r--compiler/rustc_query_system/src/dep_graph/graph.rs268
1 files changed, 174 insertions, 94 deletions
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs
index 8ff561327..38c7c6cce 100644
--- a/compiler/rustc_query_system/src/dep_graph/graph.rs
+++ b/compiler/rustc_query_system/src/dep_graph/graph.rs
@@ -377,9 +377,9 @@ impl<K: DepKind> DepGraph<K> {
/// Executes something within an "anonymous" task, that is, a task the
/// `DepNode` of which is determined by the list of inputs it read from.
- pub fn with_anon_task<Ctxt: DepContext<DepKind = K>, OP, R>(
+ pub fn with_anon_task<Tcx: DepContext<DepKind = K>, OP, R>(
&self,
- cx: Ctxt,
+ cx: Tcx,
dep_kind: K,
op: OP,
) -> (R, DepNodeIndex)
@@ -489,6 +489,95 @@ impl<K: DepKind> DepGraph<K> {
}
}
+ /// Create a node when we force-feed a value into the query cache.
+ /// This is used to remove cycles during type-checking const generic parameters.
+ ///
+ /// As usual in the query system, we consider the current state of the calling query
+ /// only depends on the list of dependencies up to now. As a consequence, the value
+ /// that this query gives us can only depend on those dependencies too. Therefore,
+ /// it is sound to use the current dependency set for the created node.
+ ///
+ /// During replay, the order of the nodes is relevant in the dependency graph.
+ /// So the unchanged replay will mark the caller query before trying to mark this one.
+ /// If there is a change to report, the caller query will be re-executed before this one.
+ ///
+ /// FIXME: If the code is changed enough for this node to be marked before requiring the
+ /// caller's node, we suppose that those changes will be enough to mark this node red and
+ /// force a recomputation using the "normal" way.
+ pub fn with_feed_task<Ctxt: DepContext<DepKind = K>, A: Debug, R: Debug>(
+ &self,
+ node: DepNode<K>,
+ cx: Ctxt,
+ key: A,
+ result: &R,
+ hash_result: Option<fn(&mut StableHashingContext<'_>, &R) -> Fingerprint>,
+ ) -> DepNodeIndex {
+ if let Some(data) = self.data.as_ref() {
+ // The caller query has more dependencies than the node we are creating. We may
+ // encounter a case where this created node is marked as green, but the caller query is
+ // subsequently marked as red or recomputed. In this case, we will end up feeding a
+ // value to an existing node.
+ //
+ // For sanity, we still check that the loaded stable hash and the new one match.
+ if let Some(dep_node_index) = self.dep_node_index_of_opt(&node) {
+ let _current_fingerprint =
+ crate::query::incremental_verify_ich(cx, result, &node, hash_result);
+
+ #[cfg(debug_assertions)]
+ if hash_result.is_some() {
+ data.current.record_edge(dep_node_index, node, _current_fingerprint);
+ }
+
+ return dep_node_index;
+ }
+
+ let mut edges = SmallVec::new();
+ K::read_deps(|task_deps| match task_deps {
+ TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()),
+ TaskDepsRef::Ignore => {} // During HIR lowering, we have no dependencies.
+ TaskDepsRef::Forbid => {
+ panic!("Cannot summarize when dependencies are not recorded.")
+ }
+ });
+
+ let hashing_timer = cx.profiler().incr_result_hashing();
+ let current_fingerprint = hash_result.map(|hash_result| {
+ cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result))
+ });
+
+ let print_status = cfg!(debug_assertions) && cx.sess().opts.unstable_opts.dep_tasks;
+
+ // Intern the new `DepNode` with the dependencies up-to-now.
+ let (dep_node_index, prev_and_color) = data.current.intern_node(
+ cx.profiler(),
+ &data.previous,
+ node,
+ edges,
+ current_fingerprint,
+ print_status,
+ );
+
+ hashing_timer.finish_with_query_invocation_id(dep_node_index.into());
+
+ if let Some((prev_index, color)) = prev_and_color {
+ debug_assert!(
+ data.colors.get(prev_index).is_none(),
+ "DepGraph::with_task() - Duplicate DepNodeColor insertion for {key:?}",
+ );
+
+ data.colors.insert(prev_index, color);
+ }
+
+ dep_node_index
+ } else {
+ // Incremental compilation is turned off. We just execute the task
+ // without tracking. We still provide a dep-node index that uniquely
+ // identifies the task so that we have a cheap way of referring to
+ // the query for self-profiling.
+ self.next_virtual_depnode_index()
+ }
+ }
+
#[inline]
pub fn dep_node_index_of(&self, dep_node: &DepNode<K>) -> DepNodeIndex {
self.dep_node_index_of_opt(dep_node).unwrap()
@@ -571,12 +660,12 @@ impl<K: DepKind> DepGraph<K> {
/// A node will have an index, when it's already been marked green, or when we can mark it
/// green. This function will mark the current task as a reader of the specified node, when
/// a node index can be found for that node.
- pub fn try_mark_green<Ctxt: QueryContext<DepKind = K>>(
+ pub fn try_mark_green<Qcx: QueryContext<DepKind = K>>(
&self,
- tcx: Ctxt,
+ qcx: Qcx,
dep_node: &DepNode<K>,
) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> {
- debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
+ debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
// Return None if the dep graph is disabled
let data = self.data.as_ref()?;
@@ -592,15 +681,16 @@ impl<K: DepKind> DepGraph<K> {
// in the previous compilation session too, so we can try to
// mark it as green by recursively marking all of its
// dependencies green.
- self.try_mark_previous_green(tcx, data, prev_index, &dep_node)
+ self.try_mark_previous_green(qcx, data, prev_index, &dep_node)
.map(|dep_node_index| (prev_index, dep_node_index))
}
}
}
- fn try_mark_parent_green<Ctxt: QueryContext<DepKind = K>>(
+ #[instrument(skip(self, qcx, data, parent_dep_node_index), level = "debug")]
+ fn try_mark_parent_green<Qcx: QueryContext<DepKind = K>>(
&self,
- tcx: Ctxt,
+ qcx: Qcx,
data: &DepGraphData<K>,
parent_dep_node_index: SerializedDepNodeIndex,
dep_node: &DepNode<K>,
@@ -613,11 +703,7 @@ impl<K: DepKind> DepGraph<K> {
// This dependency has been marked as green before, we are
// still fine and can continue with checking the other
// dependencies.
- debug!(
- "try_mark_previous_green({:?}) --- found dependency {:?} to \
- be immediately green",
- dep_node, dep_dep_node,
- );
+ debug!("dependency {dep_dep_node:?} was immediately green");
return Some(());
}
Some(DepNodeColor::Red) => {
@@ -625,10 +711,7 @@ impl<K: DepKind> DepGraph<K> {
// compared to the previous compilation session. We cannot
// mark the DepNode as green and also don't need to bother
// with checking any of the other dependencies.
- debug!(
- "try_mark_previous_green({:?}) - END - dependency {:?} was immediately red",
- dep_node, dep_dep_node,
- );
+ debug!("dependency {dep_dep_node:?} was immediately red");
return None;
}
None => {}
@@ -636,35 +719,26 @@ impl<K: DepKind> DepGraph<K> {
// We don't know the state of this dependency. If it isn't
// an eval_always node, let's try to mark it green recursively.
- if !tcx.dep_context().is_eval_always(dep_dep_node.kind) {
+ if !qcx.dep_context().is_eval_always(dep_dep_node.kind) {
debug!(
- "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \
- is unknown, trying to mark it green",
- dep_node, dep_dep_node, dep_dep_node.hash,
+ "state of dependency {:?} ({}) is unknown, trying to mark it green",
+ dep_dep_node, dep_dep_node.hash,
);
let node_index =
- self.try_mark_previous_green(tcx, data, parent_dep_node_index, dep_dep_node);
+ self.try_mark_previous_green(qcx, data, parent_dep_node_index, dep_dep_node);
+
if node_index.is_some() {
- debug!(
- "try_mark_previous_green({:?}) --- managed to MARK dependency {:?} as green",
- dep_node, dep_dep_node
- );
+ debug!("managed to MARK dependency {dep_dep_node:?} as green",);
return Some(());
}
}
// We failed to mark it green, so we try to force the query.
- debug!(
- "try_mark_previous_green({:?}) --- trying to force dependency {:?}",
- dep_node, dep_dep_node
- );
- if !tcx.dep_context().try_force_from_dep_node(*dep_dep_node) {
+ debug!("trying to force dependency {dep_dep_node:?}");
+ if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node) {
// The DepNode could not be forced.
- debug!(
- "try_mark_previous_green({:?}) - END - dependency {:?} could not be forced",
- dep_node, dep_dep_node
- );
+ debug!("dependency {dep_dep_node:?} could not be forced");
return None;
}
@@ -672,23 +746,17 @@ impl<K: DepKind> DepGraph<K> {
match dep_dep_node_color {
Some(DepNodeColor::Green(_)) => {
- debug!(
- "try_mark_previous_green({:?}) --- managed to FORCE dependency {:?} to green",
- dep_node, dep_dep_node
- );
+ debug!("managed to FORCE dependency {dep_dep_node:?} to green");
return Some(());
}
Some(DepNodeColor::Red) => {
- debug!(
- "try_mark_previous_green({:?}) - END - dependency {:?} was red after forcing",
- dep_node, dep_dep_node
- );
+ debug!("dependency {dep_dep_node:?} was red after forcing",);
return None;
}
None => {}
}
- if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
+ if let None = qcx.dep_context().sess().has_errors_or_delayed_span_bugs() {
panic!("try_mark_previous_green() - Forcing the DepNode should have set its color")
}
@@ -702,23 +770,19 @@ impl<K: DepKind> DepGraph<K> {
// invalid state will not be persisted to the
// incremental compilation cache because of
// compilation errors being present.
- debug!(
- "try_mark_previous_green({:?}) - END - dependency {:?} resulted in compilation error",
- dep_node, dep_dep_node
- );
+ debug!("dependency {dep_dep_node:?} resulted in compilation error",);
return None;
}
/// Try to mark a dep-node which existed in the previous compilation session as green.
- fn try_mark_previous_green<Ctxt: QueryContext<DepKind = K>>(
+ #[instrument(skip(self, qcx, data, prev_dep_node_index), level = "debug")]
+ fn try_mark_previous_green<Qcx: QueryContext<DepKind = K>>(
&self,
- tcx: Ctxt,
+ qcx: Qcx,
data: &DepGraphData<K>,
prev_dep_node_index: SerializedDepNodeIndex,
dep_node: &DepNode<K>,
) -> Option<DepNodeIndex> {
- debug!("try_mark_previous_green({:?}) - BEGIN", dep_node);
-
#[cfg(not(parallel_compiler))]
{
debug_assert!(!self.dep_node_exists(dep_node));
@@ -726,14 +790,14 @@ impl<K: DepKind> DepGraph<K> {
}
// We never try to mark eval_always nodes as green
- debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind));
+ debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind));
debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node);
let prev_deps = data.previous.edge_targets_from(prev_dep_node_index);
for &dep_dep_node_index in prev_deps {
- self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)?
+ self.try_mark_parent_green(qcx, data, dep_dep_node_index, dep_node)?
}
// If we got here without hitting a `return` that means that all
@@ -745,7 +809,7 @@ impl<K: DepKind> DepGraph<K> {
// We allocating an entry for the node in the current dependency graph and
// adding all the appropriate edges imported from the previous graph
let dep_node_index = data.current.promote_node_and_deps_to_current(
- tcx.dep_context().profiler(),
+ qcx.dep_context().profiler(),
&data.previous,
prev_dep_node_index,
);
@@ -754,7 +818,7 @@ impl<K: DepKind> DepGraph<K> {
// FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere
// Maybe store a list on disk and encode this fact in the DepNodeState
- let side_effects = tcx.load_side_effects(prev_dep_node_index);
+ let side_effects = qcx.load_side_effects(prev_dep_node_index);
#[cfg(not(parallel_compiler))]
debug_assert!(
@@ -765,14 +829,14 @@ impl<K: DepKind> DepGraph<K> {
);
if !side_effects.is_empty() {
- self.emit_side_effects(tcx, data, dep_node_index, side_effects);
+ self.emit_side_effects(qcx, data, dep_node_index, side_effects);
}
// ... and finally storing a "Green" entry in the color map.
// Multiple threads can all write the same color here
data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index));
- debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node);
+ debug!("successfully marked {dep_node:?} as green");
Some(dep_node_index)
}
@@ -780,9 +844,9 @@ impl<K: DepKind> DepGraph<K> {
/// This may be called concurrently on multiple threads for the same dep node.
#[cold]
#[inline(never)]
- fn emit_side_effects<Ctxt: QueryContext<DepKind = K>>(
+ fn emit_side_effects<Qcx: QueryContext<DepKind = K>>(
&self,
- tcx: Ctxt,
+ qcx: Qcx,
data: &DepGraphData<K>,
dep_node_index: DepNodeIndex,
side_effects: QuerySideEffects,
@@ -794,9 +858,9 @@ impl<K: DepKind> DepGraph<K> {
// must process side effects
// Promote the previous diagnostics to the current session.
- tcx.store_side_effects(dep_node_index, side_effects.clone());
+ qcx.store_side_effects(dep_node_index, side_effects.clone());
- let handle = tcx.dep_context().sess().diagnostic();
+ let handle = qcx.dep_context().sess().diagnostic();
for mut diagnostic in side_effects.diagnostics {
handle.emit_diagnostic(&mut diagnostic);
@@ -804,27 +868,27 @@ impl<K: DepKind> DepGraph<K> {
}
}
- // Returns true if the given node has been marked as red during the
- // current compilation session. Used in various assertions
+ /// Returns true if the given node has been marked as red during the
+ /// current compilation session. Used in various assertions
pub fn is_red(&self, dep_node: &DepNode<K>) -> bool {
self.node_color(dep_node) == Some(DepNodeColor::Red)
}
- // Returns true if the given node has been marked as green during the
- // current compilation session. Used in various assertions
+ /// Returns true if the given node has been marked as green during the
+ /// current compilation session. Used in various assertions
pub fn is_green(&self, dep_node: &DepNode<K>) -> bool {
self.node_color(dep_node).map_or(false, |c| c.is_green())
}
- // This method loads all on-disk cacheable query results into memory, so
- // they can be written out to the new cache file again. Most query results
- // will already be in memory but in the case where we marked something as
- // green but then did not need the value, that value will never have been
- // loaded from disk.
- //
- // This method will only load queries that will end up in the disk cache.
- // Other queries will not be executed.
- pub fn exec_cache_promotions<Ctxt: DepContext<DepKind = K>>(&self, tcx: Ctxt) {
+ /// This method loads all on-disk cacheable query results into memory, so
+ /// they can be written out to the new cache file again. Most query results
+ /// will already be in memory but in the case where we marked something as
+ /// green but then did not need the value, that value will never have been
+ /// loaded from disk.
+ ///
+ /// This method will only load queries that will end up in the disk cache.
+ /// Other queries will not be executed.
+ pub fn exec_cache_promotions<Tcx: DepContext<DepKind = K>>(&self, tcx: Tcx) {
let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion");
let data = self.data.as_ref().unwrap();
@@ -941,6 +1005,11 @@ pub(super) struct CurrentDepGraph<K: DepKind> {
new_node_to_index: Sharded<FxHashMap<DepNode<K>, DepNodeIndex>>,
prev_index_to_index: Lock<IndexVec<SerializedDepNodeIndex, Option<DepNodeIndex>>>,
+ /// This is used to verify that fingerprints do not change between the creation of a node
+ /// and its recomputation.
+ #[cfg(debug_assertions)]
+ fingerprints: Lock<FxHashMap<DepNode<K>, Fingerprint>>,
+
/// Used to trap when a specific edge is added to the graph.
/// This is used for debug purposes and is only active with `debug_assertions`.
#[cfg(debug_assertions)]
@@ -1024,6 +1093,8 @@ impl<K: DepKind> CurrentDepGraph<K> {
anon_id_seed,
#[cfg(debug_assertions)]
forbidden_edge,
+ #[cfg(debug_assertions)]
+ fingerprints: Lock::new(Default::default()),
total_read_count: AtomicU64::new(0),
total_duplicate_read_count: AtomicU64::new(0),
node_intern_event_id,
@@ -1031,10 +1102,18 @@ impl<K: DepKind> CurrentDepGraph<K> {
}
#[cfg(debug_assertions)]
- fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>) {
+ fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode<K>, fingerprint: Fingerprint) {
if let Some(forbidden_edge) = &self.forbidden_edge {
forbidden_edge.index_to_node.lock().insert(dep_node_index, key);
}
+ match self.fingerprints.lock().entry(key) {
+ Entry::Vacant(v) => {
+ v.insert(fingerprint);
+ }
+ Entry::Occupied(o) => {
+ assert_eq!(*o.get(), fingerprint, "Unstable fingerprints for {:?}", key);
+ }
+ }
}
/// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it.
@@ -1046,17 +1125,21 @@ impl<K: DepKind> CurrentDepGraph<K> {
edges: EdgesVec,
current_fingerprint: Fingerprint,
) -> DepNodeIndex {
- match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key) {
+ let dep_node_index = match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key)
+ {
Entry::Occupied(entry) => *entry.get(),
Entry::Vacant(entry) => {
let dep_node_index =
self.encoder.borrow().send(profiler, key, current_fingerprint, edges);
entry.insert(dep_node_index);
- #[cfg(debug_assertions)]
- self.record_edge(dep_node_index, key);
dep_node_index
}
- }
+ };
+
+ #[cfg(debug_assertions)]
+ self.record_edge(dep_node_index, key, current_fingerprint);
+
+ dep_node_index
}
fn intern_node(
@@ -1097,7 +1180,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
};
#[cfg(debug_assertions)]
- self.record_edge(dep_node_index, key);
+ self.record_edge(dep_node_index, key, fingerprint);
(dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index))))
} else {
if print_status {
@@ -1119,7 +1202,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
};
#[cfg(debug_assertions)]
- self.record_edge(dep_node_index, key);
+ self.record_edge(dep_node_index, key, fingerprint);
(dep_node_index, Some((prev_index, DepNodeColor::Red)))
}
} else {
@@ -1144,7 +1227,7 @@ impl<K: DepKind> CurrentDepGraph<K> {
};
#[cfg(debug_assertions)]
- self.record_edge(dep_node_index, key);
+ self.record_edge(dep_node_index, key, Fingerprint::ZERO);
(dep_node_index, Some((prev_index, DepNodeColor::Red)))
}
} else {
@@ -1175,19 +1258,16 @@ impl<K: DepKind> CurrentDepGraph<K> {
Some(dep_node_index) => dep_node_index,
None => {
let key = prev_graph.index_to_node(prev_index);
- let dep_node_index = self.encoder.borrow().send(
- profiler,
- key,
- prev_graph.fingerprint_by_index(prev_index),
- prev_graph
- .edge_targets_from(prev_index)
- .iter()
- .map(|i| prev_index_to_index[*i].unwrap())
- .collect(),
- );
+ let edges = prev_graph
+ .edge_targets_from(prev_index)
+ .iter()
+ .map(|i| prev_index_to_index[*i].unwrap())
+ .collect();
+ let fingerprint = prev_graph.fingerprint_by_index(prev_index);
+ let dep_node_index = self.encoder.borrow().send(profiler, key, fingerprint, edges);
prev_index_to_index[prev_index] = Some(dep_node_index);
#[cfg(debug_assertions)]
- self.record_edge(dep_node_index, key);
+ self.record_edge(dep_node_index, key, fingerprint);
dep_node_index
}
}