From 4547b622d8d29df964fa2914213088b148c498fc Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 17 Apr 2024 14:18:32 +0200 Subject: Merging upstream version 1.67.1+dfsg1. Signed-off-by: Daniel Baumann --- compiler/rustc_query_system/src/cache.rs | 4 +- .../rustc_query_system/src/dep_graph/dep_node.rs | 36 +-- compiler/rustc_query_system/src/dep_graph/graph.rs | 268 +++++++++++++------- compiler/rustc_query_system/src/dep_graph/mod.rs | 3 +- compiler/rustc_query_system/src/ich/hcx.rs | 39 +-- compiler/rustc_query_system/src/query/caches.rs | 207 ++++++++++++++- compiler/rustc_query_system/src/query/config.rs | 65 +++-- compiler/rustc_query_system/src/query/job.rs | 6 +- compiler/rustc_query_system/src/query/mod.rs | 4 +- compiler/rustc_query_system/src/query/plumbing.rs | 280 +++++++++++---------- compiler/rustc_query_system/src/values.rs | 8 +- 11 files changed, 587 insertions(+), 333 deletions(-) (limited to 'compiler/rustc_query_system/src') diff --git a/compiler/rustc_query_system/src/cache.rs b/compiler/rustc_query_system/src/cache.rs index d592812f7..7cc885be2 100644 --- a/compiler/rustc_query_system/src/cache.rs +++ b/compiler/rustc_query_system/src/cache.rs @@ -26,7 +26,7 @@ impl Cache { } impl Cache { - pub fn get(&self, key: &Key, tcx: CTX) -> Option { + pub fn get(&self, key: &Key, tcx: Tcx) -> Option { Some(self.hashmap.borrow().get(key)?.get(tcx)) } @@ -46,7 +46,7 @@ impl WithDepNode { WithDepNode { dep_node, cached_value } } - pub fn get(&self, tcx: CTX) -> T { + pub fn get(&self, tcx: Tcx) -> T { tcx.dep_graph().read_index(self.dep_node); self.cached_value.clone() } diff --git a/compiler/rustc_query_system/src/dep_graph/dep_node.rs b/compiler/rustc_query_system/src/dep_graph/dep_node.rs index 5c6ce0556..d79c5816a 100644 --- a/compiler/rustc_query_system/src/dep_graph/dep_node.rs +++ b/compiler/rustc_query_system/src/dep_graph/dep_node.rs @@ -61,18 +61,18 @@ impl DepNode { /// Creates a new, parameterless DepNode. This method will assert /// that the DepNode corresponding to the given DepKind actually /// does not require any parameters. - pub fn new_no_params(tcx: Ctxt, kind: K) -> DepNode + pub fn new_no_params(tcx: Tcx, kind: K) -> DepNode where - Ctxt: super::DepContext, + Tcx: super::DepContext, { debug_assert_eq!(tcx.fingerprint_style(kind), FingerprintStyle::Unit); DepNode { kind, hash: Fingerprint::ZERO.into() } } - pub fn construct(tcx: Ctxt, kind: K, arg: &Key) -> DepNode + pub fn construct(tcx: Tcx, kind: K, arg: &Key) -> DepNode where - Ctxt: super::DepContext, - Key: DepNodeParams, + Tcx: super::DepContext, + Key: DepNodeParams, { let hash = arg.to_fingerprint(tcx); let dep_node = DepNode { kind, hash: hash.into() }; @@ -93,9 +93,9 @@ impl DepNode { /// Construct a DepNode from the given DepKind and DefPathHash. This /// method will assert that the given DepKind actually requires a /// single DefId/DefPathHash parameter. - pub fn from_def_path_hash(tcx: Ctxt, def_path_hash: DefPathHash, kind: K) -> Self + pub fn from_def_path_hash(tcx: Tcx, def_path_hash: DefPathHash, kind: K) -> Self where - Ctxt: super::DepContext, + Tcx: super::DepContext, { debug_assert!(tcx.fingerprint_style(kind) == FingerprintStyle::DefPathHash); DepNode { kind, hash: def_path_hash.0.into() } @@ -108,18 +108,18 @@ impl fmt::Debug for DepNode { } } -pub trait DepNodeParams: fmt::Debug + Sized { +pub trait DepNodeParams: fmt::Debug + Sized { fn fingerprint_style() -> FingerprintStyle; /// This method turns the parameters of a DepNodeConstructor into an opaque /// Fingerprint to be used in DepNode. /// Not all DepNodeParams support being turned into a Fingerprint (they /// don't need to if the corresponding DepNode is anonymous). - fn to_fingerprint(&self, _: Ctxt) -> Fingerprint { + fn to_fingerprint(&self, _: Tcx) -> Fingerprint { panic!("Not implemented. Accidentally called on anonymous node?") } - fn to_debug_str(&self, _: Ctxt) -> String { + fn to_debug_str(&self, _: Tcx) -> String { format!("{:?}", self) } @@ -129,10 +129,10 @@ pub trait DepNodeParams: fmt::Debug + Sized { /// `fingerprint_style()` is not `FingerprintStyle::Opaque`. /// It is always valid to return `None` here, in which case incremental /// compilation will treat the query as having changed instead of forcing it. - fn recover(tcx: Ctxt, dep_node: &DepNode) -> Option; + fn recover(tcx: Tcx, dep_node: &DepNode) -> Option; } -impl DepNodeParams for T +impl DepNodeParams for T where T: for<'a> HashStable> + fmt::Debug, { @@ -142,7 +142,7 @@ where } #[inline(always)] - default fn to_fingerprint(&self, tcx: Ctxt) -> Fingerprint { + default fn to_fingerprint(&self, tcx: Tcx) -> Fingerprint { tcx.with_stable_hashing_context(|mut hcx| { let mut hasher = StableHasher::new(); self.hash_stable(&mut hcx, &mut hasher); @@ -151,12 +151,12 @@ where } #[inline(always)] - default fn to_debug_str(&self, _: Ctxt) -> String { + default fn to_debug_str(&self, _: Tcx) -> String { format!("{:?}", *self) } #[inline(always)] - default fn recover(_: Ctxt, _: &DepNode) -> Option { + default fn recover(_: Tcx, _: &DepNode) -> Option { None } } @@ -166,7 +166,7 @@ where /// Information is retrieved by indexing the `DEP_KINDS` array using the integer value /// of the `DepKind`. Overall, this allows to implement `DepContext` using this manual /// jump table instead of large matches. -pub struct DepKindStruct { +pub struct DepKindStruct { /// Anonymous queries cannot be replayed from one compiler invocation to the next. /// When their result is needed, it is recomputed. They are useful for fine-grained /// dependency tracking, and caching within one compiler invocation. @@ -216,10 +216,10 @@ pub struct DepKindStruct { /// with kind `MirValidated`, we know that the GUID/fingerprint of the `DepNode` /// is actually a `DefPathHash`, and can therefore just look up the corresponding /// `DefId` in `tcx.def_path_hash_to_def_id`. - pub force_from_dep_node: Option) -> bool>, + pub force_from_dep_node: Option) -> bool>, /// Invoke a query to put the on-disk cached value in memory. - pub try_load_from_on_disk_cache: Option)>, + pub try_load_from_on_disk_cache: Option)>, } /// A "work product" corresponds to a `.o` (or other) file that we diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index 8ff561327..38c7c6cce 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -377,9 +377,9 @@ impl DepGraph { /// Executes something within an "anonymous" task, that is, a task the /// `DepNode` of which is determined by the list of inputs it read from. - pub fn with_anon_task, OP, R>( + pub fn with_anon_task, OP, R>( &self, - cx: Ctxt, + cx: Tcx, dep_kind: K, op: OP, ) -> (R, DepNodeIndex) @@ -489,6 +489,95 @@ impl DepGraph { } } + /// Create a node when we force-feed a value into the query cache. + /// This is used to remove cycles during type-checking const generic parameters. + /// + /// As usual in the query system, we consider the current state of the calling query + /// only depends on the list of dependencies up to now. As a consequence, the value + /// that this query gives us can only depend on those dependencies too. Therefore, + /// it is sound to use the current dependency set for the created node. + /// + /// During replay, the order of the nodes is relevant in the dependency graph. + /// So the unchanged replay will mark the caller query before trying to mark this one. + /// If there is a change to report, the caller query will be re-executed before this one. + /// + /// FIXME: If the code is changed enough for this node to be marked before requiring the + /// caller's node, we suppose that those changes will be enough to mark this node red and + /// force a recomputation using the "normal" way. + pub fn with_feed_task, A: Debug, R: Debug>( + &self, + node: DepNode, + cx: Ctxt, + key: A, + result: &R, + hash_result: Option, &R) -> Fingerprint>, + ) -> DepNodeIndex { + if let Some(data) = self.data.as_ref() { + // The caller query has more dependencies than the node we are creating. We may + // encounter a case where this created node is marked as green, but the caller query is + // subsequently marked as red or recomputed. In this case, we will end up feeding a + // value to an existing node. + // + // For sanity, we still check that the loaded stable hash and the new one match. + if let Some(dep_node_index) = self.dep_node_index_of_opt(&node) { + let _current_fingerprint = + crate::query::incremental_verify_ich(cx, result, &node, hash_result); + + #[cfg(debug_assertions)] + if hash_result.is_some() { + data.current.record_edge(dep_node_index, node, _current_fingerprint); + } + + return dep_node_index; + } + + let mut edges = SmallVec::new(); + K::read_deps(|task_deps| match task_deps { + TaskDepsRef::Allow(deps) => edges.extend(deps.lock().reads.iter().copied()), + TaskDepsRef::Ignore => {} // During HIR lowering, we have no dependencies. + TaskDepsRef::Forbid => { + panic!("Cannot summarize when dependencies are not recorded.") + } + }); + + let hashing_timer = cx.profiler().incr_result_hashing(); + let current_fingerprint = hash_result.map(|hash_result| { + cx.with_stable_hashing_context(|mut hcx| hash_result(&mut hcx, result)) + }); + + let print_status = cfg!(debug_assertions) && cx.sess().opts.unstable_opts.dep_tasks; + + // Intern the new `DepNode` with the dependencies up-to-now. + let (dep_node_index, prev_and_color) = data.current.intern_node( + cx.profiler(), + &data.previous, + node, + edges, + current_fingerprint, + print_status, + ); + + hashing_timer.finish_with_query_invocation_id(dep_node_index.into()); + + if let Some((prev_index, color)) = prev_and_color { + debug_assert!( + data.colors.get(prev_index).is_none(), + "DepGraph::with_task() - Duplicate DepNodeColor insertion for {key:?}", + ); + + data.colors.insert(prev_index, color); + } + + dep_node_index + } else { + // Incremental compilation is turned off. We just execute the task + // without tracking. We still provide a dep-node index that uniquely + // identifies the task so that we have a cheap way of referring to + // the query for self-profiling. + self.next_virtual_depnode_index() + } + } + #[inline] pub fn dep_node_index_of(&self, dep_node: &DepNode) -> DepNodeIndex { self.dep_node_index_of_opt(dep_node).unwrap() @@ -571,12 +660,12 @@ impl DepGraph { /// A node will have an index, when it's already been marked green, or when we can mark it /// green. This function will mark the current task as a reader of the specified node, when /// a node index can be found for that node. - pub fn try_mark_green>( + pub fn try_mark_green>( &self, - tcx: Ctxt, + qcx: Qcx, dep_node: &DepNode, ) -> Option<(SerializedDepNodeIndex, DepNodeIndex)> { - debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind)); + debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind)); // Return None if the dep graph is disabled let data = self.data.as_ref()?; @@ -592,15 +681,16 @@ impl DepGraph { // in the previous compilation session too, so we can try to // mark it as green by recursively marking all of its // dependencies green. - self.try_mark_previous_green(tcx, data, prev_index, &dep_node) + self.try_mark_previous_green(qcx, data, prev_index, &dep_node) .map(|dep_node_index| (prev_index, dep_node_index)) } } } - fn try_mark_parent_green>( + #[instrument(skip(self, qcx, data, parent_dep_node_index), level = "debug")] + fn try_mark_parent_green>( &self, - tcx: Ctxt, + qcx: Qcx, data: &DepGraphData, parent_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode, @@ -613,11 +703,7 @@ impl DepGraph { // This dependency has been marked as green before, we are // still fine and can continue with checking the other // dependencies. - debug!( - "try_mark_previous_green({:?}) --- found dependency {:?} to \ - be immediately green", - dep_node, dep_dep_node, - ); + debug!("dependency {dep_dep_node:?} was immediately green"); return Some(()); } Some(DepNodeColor::Red) => { @@ -625,10 +711,7 @@ impl DepGraph { // compared to the previous compilation session. We cannot // mark the DepNode as green and also don't need to bother // with checking any of the other dependencies. - debug!( - "try_mark_previous_green({:?}) - END - dependency {:?} was immediately red", - dep_node, dep_dep_node, - ); + debug!("dependency {dep_dep_node:?} was immediately red"); return None; } None => {} @@ -636,35 +719,26 @@ impl DepGraph { // We don't know the state of this dependency. If it isn't // an eval_always node, let's try to mark it green recursively. - if !tcx.dep_context().is_eval_always(dep_dep_node.kind) { + if !qcx.dep_context().is_eval_always(dep_dep_node.kind) { debug!( - "try_mark_previous_green({:?}) --- state of dependency {:?} ({}) \ - is unknown, trying to mark it green", - dep_node, dep_dep_node, dep_dep_node.hash, + "state of dependency {:?} ({}) is unknown, trying to mark it green", + dep_dep_node, dep_dep_node.hash, ); let node_index = - self.try_mark_previous_green(tcx, data, parent_dep_node_index, dep_dep_node); + self.try_mark_previous_green(qcx, data, parent_dep_node_index, dep_dep_node); + if node_index.is_some() { - debug!( - "try_mark_previous_green({:?}) --- managed to MARK dependency {:?} as green", - dep_node, dep_dep_node - ); + debug!("managed to MARK dependency {dep_dep_node:?} as green",); return Some(()); } } // We failed to mark it green, so we try to force the query. - debug!( - "try_mark_previous_green({:?}) --- trying to force dependency {:?}", - dep_node, dep_dep_node - ); - if !tcx.dep_context().try_force_from_dep_node(*dep_dep_node) { + debug!("trying to force dependency {dep_dep_node:?}"); + if !qcx.dep_context().try_force_from_dep_node(*dep_dep_node) { // The DepNode could not be forced. - debug!( - "try_mark_previous_green({:?}) - END - dependency {:?} could not be forced", - dep_node, dep_dep_node - ); + debug!("dependency {dep_dep_node:?} could not be forced"); return None; } @@ -672,23 +746,17 @@ impl DepGraph { match dep_dep_node_color { Some(DepNodeColor::Green(_)) => { - debug!( - "try_mark_previous_green({:?}) --- managed to FORCE dependency {:?} to green", - dep_node, dep_dep_node - ); + debug!("managed to FORCE dependency {dep_dep_node:?} to green"); return Some(()); } Some(DepNodeColor::Red) => { - debug!( - "try_mark_previous_green({:?}) - END - dependency {:?} was red after forcing", - dep_node, dep_dep_node - ); + debug!("dependency {dep_dep_node:?} was red after forcing",); return None; } None => {} } - if !tcx.dep_context().sess().has_errors_or_delayed_span_bugs() { + if let None = qcx.dep_context().sess().has_errors_or_delayed_span_bugs() { panic!("try_mark_previous_green() - Forcing the DepNode should have set its color") } @@ -702,23 +770,19 @@ impl DepGraph { // invalid state will not be persisted to the // incremental compilation cache because of // compilation errors being present. - debug!( - "try_mark_previous_green({:?}) - END - dependency {:?} resulted in compilation error", - dep_node, dep_dep_node - ); + debug!("dependency {dep_dep_node:?} resulted in compilation error",); return None; } /// Try to mark a dep-node which existed in the previous compilation session as green. - fn try_mark_previous_green>( + #[instrument(skip(self, qcx, data, prev_dep_node_index), level = "debug")] + fn try_mark_previous_green>( &self, - tcx: Ctxt, + qcx: Qcx, data: &DepGraphData, prev_dep_node_index: SerializedDepNodeIndex, dep_node: &DepNode, ) -> Option { - debug!("try_mark_previous_green({:?}) - BEGIN", dep_node); - #[cfg(not(parallel_compiler))] { debug_assert!(!self.dep_node_exists(dep_node)); @@ -726,14 +790,14 @@ impl DepGraph { } // We never try to mark eval_always nodes as green - debug_assert!(!tcx.dep_context().is_eval_always(dep_node.kind)); + debug_assert!(!qcx.dep_context().is_eval_always(dep_node.kind)); debug_assert_eq!(data.previous.index_to_node(prev_dep_node_index), *dep_node); let prev_deps = data.previous.edge_targets_from(prev_dep_node_index); for &dep_dep_node_index in prev_deps { - self.try_mark_parent_green(tcx, data, dep_dep_node_index, dep_node)? + self.try_mark_parent_green(qcx, data, dep_dep_node_index, dep_node)? } // If we got here without hitting a `return` that means that all @@ -745,7 +809,7 @@ impl DepGraph { // We allocating an entry for the node in the current dependency graph and // adding all the appropriate edges imported from the previous graph let dep_node_index = data.current.promote_node_and_deps_to_current( - tcx.dep_context().profiler(), + qcx.dep_context().profiler(), &data.previous, prev_dep_node_index, ); @@ -754,7 +818,7 @@ impl DepGraph { // FIXME: Store the fact that a node has diagnostics in a bit in the dep graph somewhere // Maybe store a list on disk and encode this fact in the DepNodeState - let side_effects = tcx.load_side_effects(prev_dep_node_index); + let side_effects = qcx.load_side_effects(prev_dep_node_index); #[cfg(not(parallel_compiler))] debug_assert!( @@ -765,14 +829,14 @@ impl DepGraph { ); if !side_effects.is_empty() { - self.emit_side_effects(tcx, data, dep_node_index, side_effects); + self.emit_side_effects(qcx, data, dep_node_index, side_effects); } // ... and finally storing a "Green" entry in the color map. // Multiple threads can all write the same color here data.colors.insert(prev_dep_node_index, DepNodeColor::Green(dep_node_index)); - debug!("try_mark_previous_green({:?}) - END - successfully marked as green", dep_node); + debug!("successfully marked {dep_node:?} as green"); Some(dep_node_index) } @@ -780,9 +844,9 @@ impl DepGraph { /// This may be called concurrently on multiple threads for the same dep node. #[cold] #[inline(never)] - fn emit_side_effects>( + fn emit_side_effects>( &self, - tcx: Ctxt, + qcx: Qcx, data: &DepGraphData, dep_node_index: DepNodeIndex, side_effects: QuerySideEffects, @@ -794,9 +858,9 @@ impl DepGraph { // must process side effects // Promote the previous diagnostics to the current session. - tcx.store_side_effects(dep_node_index, side_effects.clone()); + qcx.store_side_effects(dep_node_index, side_effects.clone()); - let handle = tcx.dep_context().sess().diagnostic(); + let handle = qcx.dep_context().sess().diagnostic(); for mut diagnostic in side_effects.diagnostics { handle.emit_diagnostic(&mut diagnostic); @@ -804,27 +868,27 @@ impl DepGraph { } } - // Returns true if the given node has been marked as red during the - // current compilation session. Used in various assertions + /// Returns true if the given node has been marked as red during the + /// current compilation session. Used in various assertions pub fn is_red(&self, dep_node: &DepNode) -> bool { self.node_color(dep_node) == Some(DepNodeColor::Red) } - // Returns true if the given node has been marked as green during the - // current compilation session. Used in various assertions + /// Returns true if the given node has been marked as green during the + /// current compilation session. Used in various assertions pub fn is_green(&self, dep_node: &DepNode) -> bool { self.node_color(dep_node).map_or(false, |c| c.is_green()) } - // This method loads all on-disk cacheable query results into memory, so - // they can be written out to the new cache file again. Most query results - // will already be in memory but in the case where we marked something as - // green but then did not need the value, that value will never have been - // loaded from disk. - // - // This method will only load queries that will end up in the disk cache. - // Other queries will not be executed. - pub fn exec_cache_promotions>(&self, tcx: Ctxt) { + /// This method loads all on-disk cacheable query results into memory, so + /// they can be written out to the new cache file again. Most query results + /// will already be in memory but in the case where we marked something as + /// green but then did not need the value, that value will never have been + /// loaded from disk. + /// + /// This method will only load queries that will end up in the disk cache. + /// Other queries will not be executed. + pub fn exec_cache_promotions>(&self, tcx: Tcx) { let _prof_timer = tcx.profiler().generic_activity("incr_comp_query_cache_promotion"); let data = self.data.as_ref().unwrap(); @@ -941,6 +1005,11 @@ pub(super) struct CurrentDepGraph { new_node_to_index: Sharded, DepNodeIndex>>, prev_index_to_index: Lock>>, + /// This is used to verify that fingerprints do not change between the creation of a node + /// and its recomputation. + #[cfg(debug_assertions)] + fingerprints: Lock, Fingerprint>>, + /// Used to trap when a specific edge is added to the graph. /// This is used for debug purposes and is only active with `debug_assertions`. #[cfg(debug_assertions)] @@ -1024,6 +1093,8 @@ impl CurrentDepGraph { anon_id_seed, #[cfg(debug_assertions)] forbidden_edge, + #[cfg(debug_assertions)] + fingerprints: Lock::new(Default::default()), total_read_count: AtomicU64::new(0), total_duplicate_read_count: AtomicU64::new(0), node_intern_event_id, @@ -1031,10 +1102,18 @@ impl CurrentDepGraph { } #[cfg(debug_assertions)] - fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode) { + fn record_edge(&self, dep_node_index: DepNodeIndex, key: DepNode, fingerprint: Fingerprint) { if let Some(forbidden_edge) = &self.forbidden_edge { forbidden_edge.index_to_node.lock().insert(dep_node_index, key); } + match self.fingerprints.lock().entry(key) { + Entry::Vacant(v) => { + v.insert(fingerprint); + } + Entry::Occupied(o) => { + assert_eq!(*o.get(), fingerprint, "Unstable fingerprints for {:?}", key); + } + } } /// Writes the node to the current dep-graph and allocates a `DepNodeIndex` for it. @@ -1046,17 +1125,21 @@ impl CurrentDepGraph { edges: EdgesVec, current_fingerprint: Fingerprint, ) -> DepNodeIndex { - match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key) { + let dep_node_index = match self.new_node_to_index.get_shard_by_value(&key).lock().entry(key) + { Entry::Occupied(entry) => *entry.get(), Entry::Vacant(entry) => { let dep_node_index = self.encoder.borrow().send(profiler, key, current_fingerprint, edges); entry.insert(dep_node_index); - #[cfg(debug_assertions)] - self.record_edge(dep_node_index, key); dep_node_index } - } + }; + + #[cfg(debug_assertions)] + self.record_edge(dep_node_index, key, current_fingerprint); + + dep_node_index } fn intern_node( @@ -1097,7 +1180,7 @@ impl CurrentDepGraph { }; #[cfg(debug_assertions)] - self.record_edge(dep_node_index, key); + self.record_edge(dep_node_index, key, fingerprint); (dep_node_index, Some((prev_index, DepNodeColor::Green(dep_node_index)))) } else { if print_status { @@ -1119,7 +1202,7 @@ impl CurrentDepGraph { }; #[cfg(debug_assertions)] - self.record_edge(dep_node_index, key); + self.record_edge(dep_node_index, key, fingerprint); (dep_node_index, Some((prev_index, DepNodeColor::Red))) } } else { @@ -1144,7 +1227,7 @@ impl CurrentDepGraph { }; #[cfg(debug_assertions)] - self.record_edge(dep_node_index, key); + self.record_edge(dep_node_index, key, Fingerprint::ZERO); (dep_node_index, Some((prev_index, DepNodeColor::Red))) } } else { @@ -1175,19 +1258,16 @@ impl CurrentDepGraph { Some(dep_node_index) => dep_node_index, None => { let key = prev_graph.index_to_node(prev_index); - let dep_node_index = self.encoder.borrow().send( - profiler, - key, - prev_graph.fingerprint_by_index(prev_index), - prev_graph - .edge_targets_from(prev_index) - .iter() - .map(|i| prev_index_to_index[*i].unwrap()) - .collect(), - ); + let edges = prev_graph + .edge_targets_from(prev_index) + .iter() + .map(|i| prev_index_to_index[*i].unwrap()) + .collect(); + let fingerprint = prev_graph.fingerprint_by_index(prev_index); + let dep_node_index = self.encoder.borrow().send(profiler, key, fingerprint, edges); prev_index_to_index[prev_index] = Some(dep_node_index); #[cfg(debug_assertions)] - self.record_edge(dep_node_index, key); + self.record_edge(dep_node_index, key, fingerprint); dep_node_index } } diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs index da2075fd5..e370c6990 100644 --- a/compiler/rustc_query_system/src/dep_graph/mod.rs +++ b/compiler/rustc_query_system/src/dep_graph/mod.rs @@ -52,9 +52,8 @@ pub trait DepContext: Copy { } /// Try to force a dep node to execute and see if it's green. + #[instrument(skip(self), level = "debug")] fn try_force_from_dep_node(self, dep_node: DepNode) -> bool { - debug!("try_force_from_dep_node({:?}) --- trying to force", dep_node); - let cb = self.dep_kind_info(dep_node.kind); if let Some(f) = cb.force_from_dep_node { f(self, dep_node); diff --git a/compiler/rustc_query_system/src/ich/hcx.rs b/compiler/rustc_query_system/src/ich/hcx.rs index 148eabb38..6378ec108 100644 --- a/compiler/rustc_query_system/src/ich/hcx.rs +++ b/compiler/rustc_query_system/src/ich/hcx.rs @@ -49,15 +49,13 @@ pub(super) enum BodyResolver<'tcx> { impl<'a> StableHashingContext<'a> { #[inline] - fn new_with_or_without_spans( + pub fn new( sess: &'a Session, definitions: &'a Definitions, cstore: &'a dyn CrateStore, source_span: &'a IndexVec, - always_ignore_spans: bool, ) -> Self { - let hash_spans_initial = - !always_ignore_spans && !sess.opts.unstable_opts.incremental_ignore_spans; + let hash_spans_initial = !sess.opts.unstable_opts.incremental_ignore_spans; StableHashingContext { body_resolver: BodyResolver::Forbidden, @@ -71,33 +69,6 @@ impl<'a> StableHashingContext<'a> { } } - #[inline] - pub fn new( - sess: &'a Session, - definitions: &'a Definitions, - cstore: &'a dyn CrateStore, - source_span: &'a IndexVec, - ) -> Self { - Self::new_with_or_without_spans( - sess, - definitions, - cstore, - source_span, - /*always_ignore_spans=*/ false, - ) - } - - #[inline] - pub fn ignore_spans( - sess: &'a Session, - definitions: &'a Definitions, - cstore: &'a dyn CrateStore, - source_span: &'a IndexVec, - ) -> Self { - let always_ignore_spans = true; - Self::new_with_or_without_spans(sess, definitions, cstore, source_span, always_ignore_spans) - } - #[inline] pub fn without_hir_bodies(&mut self, f: impl FnOnce(&mut StableHashingContext<'_>)) { f(&mut StableHashingContext { body_resolver: BodyResolver::Ignore, ..self.clone() }); @@ -202,10 +173,4 @@ impl<'a> rustc_span::HashStableContext for StableHashingContext<'a> { } } -impl<'a> rustc_data_structures::intern::InternedHashingContext for StableHashingContext<'a> { - fn with_def_path_and_no_spans(&mut self, f: impl FnOnce(&mut Self)) { - self.while_hashing_spans(false, f); - } -} - impl<'a> rustc_session::HashStableContext for StableHashingContext<'a> {} diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs index 85c5af72e..4c4680b5d 100644 --- a/compiler/rustc_query_system/src/query/caches.rs +++ b/compiler/rustc_query_system/src/query/caches.rs @@ -8,13 +8,17 @@ use rustc_data_structures::sharded::Sharded; #[cfg(not(parallel_compiler))] use rustc_data_structures::sync::Lock; use rustc_data_structures::sync::WorkerLocal; +use rustc_index::vec::{Idx, IndexVec}; use std::default::Default; use std::fmt::Debug; use std::hash::Hash; use std::marker::PhantomData; -pub trait CacheSelector { - type Cache; +pub trait CacheSelector<'tcx, V> { + type Cache + where + V: Clone; + type ArenaCache; } pub trait QueryStorage { @@ -47,10 +51,13 @@ pub trait QueryCache: QueryStorage + Sized { fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)); } -pub struct DefaultCacheSelector; +pub struct DefaultCacheSelector(PhantomData); -impl CacheSelector for DefaultCacheSelector { - type Cache = DefaultCache; +impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<'tcx, V> for DefaultCacheSelector { + type Cache = DefaultCache + where + V: Clone; + type ArenaCache = ArenaCache<'tcx, K, V>; } pub struct DefaultCache { @@ -110,6 +117,8 @@ where let mut lock = self.cache.get_shard_by_value(&key).lock(); #[cfg(not(parallel_compiler))] let mut lock = self.cache.lock(); + // We may be overwriting another value. This is all right, since the dep-graph + // will check that the fingerprint matches. lock.insert(key, (value.clone(), index)); value } @@ -134,12 +143,6 @@ where } } -pub struct ArenaCacheSelector<'tcx>(PhantomData<&'tcx ()>); - -impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector for ArenaCacheSelector<'tcx> { - type Cache = ArenaCache<'tcx, K, V>; -} - pub struct ArenaCache<'tcx, K, V> { arena: WorkerLocal>, #[cfg(parallel_compiler)] @@ -201,6 +204,8 @@ where let mut lock = self.cache.get_shard_by_value(&key).lock(); #[cfg(not(parallel_compiler))] let mut lock = self.cache.lock(); + // We may be overwriting another value. This is all right, since the dep-graph + // will check that the fingerprint matches. lock.insert(key, value); &value.0 } @@ -224,3 +229,183 @@ where } } } + +pub struct VecCacheSelector(PhantomData); + +impl<'tcx, K: Idx, V: 'tcx> CacheSelector<'tcx, V> for VecCacheSelector { + type Cache = VecCache + where + V: Clone; + type ArenaCache = VecArenaCache<'tcx, K, V>; +} + +pub struct VecCache { + #[cfg(parallel_compiler)] + cache: Sharded>>, + #[cfg(not(parallel_compiler))] + cache: Lock>>, +} + +impl Default for VecCache { + fn default() -> Self { + VecCache { cache: Default::default() } + } +} + +impl QueryStorage for VecCache { + type Value = V; + type Stored = V; + + #[inline] + fn store_nocache(&self, value: Self::Value) -> Self::Stored { + // We have no dedicated storage + value + } +} + +impl QueryCache for VecCache +where + K: Eq + Idx + Clone + Debug, + V: Clone + Debug, +{ + type Key = K; + + #[inline(always)] + fn lookup(&self, key: &K, on_hit: OnHit) -> Result + where + OnHit: FnOnce(&V, DepNodeIndex) -> R, + { + #[cfg(parallel_compiler)] + let lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); + #[cfg(not(parallel_compiler))] + let lock = self.cache.lock(); + if let Some(Some(value)) = lock.get(*key) { + let hit_result = on_hit(&value.0, value.1); + Ok(hit_result) + } else { + Err(()) + } + } + + #[inline] + fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored { + #[cfg(parallel_compiler)] + let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); + #[cfg(not(parallel_compiler))] + let mut lock = self.cache.lock(); + lock.insert(key, (value.clone(), index)); + value + } + + fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { + #[cfg(parallel_compiler)] + { + let shards = self.cache.lock_shards(); + for shard in shards.iter() { + for (k, v) in shard.iter_enumerated() { + if let Some(v) = v { + f(&k, &v.0, v.1); + } + } + } + } + #[cfg(not(parallel_compiler))] + { + let map = self.cache.lock(); + for (k, v) in map.iter_enumerated() { + if let Some(v) = v { + f(&k, &v.0, v.1); + } + } + } + } +} + +pub struct VecArenaCache<'tcx, K: Idx, V> { + arena: WorkerLocal>, + #[cfg(parallel_compiler)] + cache: Sharded>>, + #[cfg(not(parallel_compiler))] + cache: Lock>>, +} + +impl<'tcx, K: Idx, V> Default for VecArenaCache<'tcx, K, V> { + fn default() -> Self { + VecArenaCache { + arena: WorkerLocal::new(|_| TypedArena::default()), + cache: Default::default(), + } + } +} + +impl<'tcx, K: Eq + Idx, V: Debug + 'tcx> QueryStorage for VecArenaCache<'tcx, K, V> { + type Value = V; + type Stored = &'tcx V; + + #[inline] + fn store_nocache(&self, value: Self::Value) -> Self::Stored { + let value = self.arena.alloc((value, DepNodeIndex::INVALID)); + let value = unsafe { &*(&value.0 as *const _) }; + &value + } +} + +impl<'tcx, K, V: 'tcx> QueryCache for VecArenaCache<'tcx, K, V> +where + K: Eq + Idx + Clone + Debug, + V: Debug, +{ + type Key = K; + + #[inline(always)] + fn lookup(&self, key: &K, on_hit: OnHit) -> Result + where + OnHit: FnOnce(&&'tcx V, DepNodeIndex) -> R, + { + #[cfg(parallel_compiler)] + let lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); + #[cfg(not(parallel_compiler))] + let lock = self.cache.lock(); + if let Some(Some(value)) = lock.get(*key) { + let hit_result = on_hit(&&value.0, value.1); + Ok(hit_result) + } else { + Err(()) + } + } + + #[inline] + fn complete(&self, key: K, value: V, index: DepNodeIndex) -> Self::Stored { + let value = self.arena.alloc((value, index)); + let value = unsafe { &*(value as *const _) }; + #[cfg(parallel_compiler)] + let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); + #[cfg(not(parallel_compiler))] + let mut lock = self.cache.lock(); + lock.insert(key, value); + &value.0 + } + + fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { + #[cfg(parallel_compiler)] + { + let shards = self.cache.lock_shards(); + for shard in shards.iter() { + for (k, v) in shard.iter_enumerated() { + if let Some(v) = v { + f(&k, &v.0, v.1); + } + } + } + } + #[cfg(not(parallel_compiler))] + { + let map = self.cache.lock(); + for (k, v) in map.iter_enumerated() { + if let Some(v) = v { + f(&k, &v.0, v.1); + } + } + } + } +} diff --git a/compiler/rustc_query_system/src/query/config.rs b/compiler/rustc_query_system/src/query/config.rs index 0a1cffa3b..7d1b62ab1 100644 --- a/compiler/rustc_query_system/src/query/config.rs +++ b/compiler/rustc_query_system/src/query/config.rs @@ -11,59 +11,58 @@ use rustc_data_structures::fingerprint::Fingerprint; use std::fmt::Debug; use std::hash::Hash; -pub trait QueryConfig { +pub trait QueryConfig { const NAME: &'static str; type Key: Eq + Hash + Clone + Debug; - type Value; - type Stored: Clone; + type Value: Debug; + type Stored: Debug + Clone + std::borrow::Borrow; + + type Cache: QueryCache; + + // Don't use this method to access query results, instead use the methods on TyCtxt + fn query_state<'a>(tcx: Qcx) -> &'a QueryState + where + Qcx: 'a; + + // Don't use this method to access query results, instead use the methods on TyCtxt + fn query_cache<'a>(tcx: Qcx) -> &'a Self::Cache + where + Qcx: 'a; + + // Don't use this method to compute query results, instead use the methods on TyCtxt + fn make_vtable(tcx: Qcx, key: &Self::Key) -> QueryVTable; + + fn cache_on_disk(tcx: Qcx::DepContext, key: &Self::Key) -> bool; + + // Don't use this method to compute query results, instead use the methods on TyCtxt + fn execute_query(tcx: Qcx::DepContext, k: Self::Key) -> Self::Stored; } #[derive(Copy, Clone)] -pub struct QueryVTable { +pub struct QueryVTable { pub anon: bool, - pub dep_kind: CTX::DepKind, + pub dep_kind: Qcx::DepKind, pub eval_always: bool, pub depth_limit: bool, + pub feedable: bool, - pub compute: fn(CTX::DepContext, K) -> V, + pub compute: fn(Qcx::DepContext, K) -> V, pub hash_result: Option, &V) -> Fingerprint>, pub handle_cycle_error: HandleCycleError, // NOTE: this is also `None` if `cache_on_disk()` returns false, not just if it's unsupported by the query - pub try_load_from_disk: Option Option>, + pub try_load_from_disk: Option Option>, } -impl QueryVTable { - pub(crate) fn to_dep_node(&self, tcx: CTX::DepContext, key: &K) -> DepNode +impl QueryVTable { + pub(crate) fn to_dep_node(&self, tcx: Qcx::DepContext, key: &K) -> DepNode where - K: crate::dep_graph::DepNodeParams, + K: crate::dep_graph::DepNodeParams, { DepNode::construct(tcx, self.dep_kind, key) } - pub(crate) fn compute(&self, tcx: CTX::DepContext, key: K) -> V { + pub(crate) fn compute(&self, tcx: Qcx::DepContext, key: K) -> V { (self.compute)(tcx, key) } } - -pub trait QueryDescription: QueryConfig { - type Cache: QueryCache; - - // Don't use this method to access query results, instead use the methods on TyCtxt - fn query_state<'a>(tcx: CTX) -> &'a QueryState - where - CTX: 'a; - - // Don't use this method to access query results, instead use the methods on TyCtxt - fn query_cache<'a>(tcx: CTX) -> &'a Self::Cache - where - CTX: 'a; - - // Don't use this method to compute query results, instead use the methods on TyCtxt - fn make_vtable(tcx: CTX, key: &Self::Key) -> QueryVTable; - - fn cache_on_disk(tcx: CTX::DepContext, key: &Self::Key) -> bool; - - // Don't use this method to compute query results, instead use the methods on TyCtxt - fn execute_query(tcx: CTX::DepContext, k: Self::Key) -> Self::Stored; -} diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index ed65393f5..49bbcf578 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -596,8 +596,8 @@ pub(crate) fn report_cycle<'a>( cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic) } -pub fn print_query_stack( - tcx: CTX, +pub fn print_query_stack( + qcx: Qcx, mut current_query: Option, handler: &Handler, num_frames: Option, @@ -606,7 +606,7 @@ pub fn print_query_stack( // a panic hook, which means that the global `Handler` may be in a weird // state if it was responsible for triggering the panic. let mut i = 0; - let query_map = tcx.try_collect_active_jobs(); + let query_map = qcx.try_collect_active_jobs(); while let Some(query) = current_query { if Some(i) == num_frames { diff --git a/compiler/rustc_query_system/src/query/mod.rs b/compiler/rustc_query_system/src/query/mod.rs index 118703fc0..7f3dc50d2 100644 --- a/compiler/rustc_query_system/src/query/mod.rs +++ b/compiler/rustc_query_system/src/query/mod.rs @@ -8,11 +8,11 @@ pub use self::job::{print_query_stack, QueryInfo, QueryJob, QueryJobId, QueryJob mod caches; pub use self::caches::{ - ArenaCacheSelector, CacheSelector, DefaultCacheSelector, QueryCache, QueryStorage, + CacheSelector, DefaultCacheSelector, QueryCache, QueryStorage, VecCacheSelector, }; mod config; -pub use self::config::{QueryConfig, QueryDescription, QueryVTable}; +pub use self::config::{QueryConfig, QueryVTable}; use crate::dep_graph::{DepNodeIndex, HasDepContext, SerializedDepNodeIndex}; use rustc_data_structures::sync::Lock; diff --git a/compiler/rustc_query_system/src/query/plumbing.rs b/compiler/rustc_query_system/src/query/plumbing.rs index 15b89daa6..848fa67e3 100644 --- a/compiler/rustc_query_system/src/query/plumbing.rs +++ b/compiler/rustc_query_system/src/query/plumbing.rs @@ -3,8 +3,9 @@ //! manage the caches, and so forth. use crate::dep_graph::{DepContext, DepNode, DepNodeIndex, DepNodeParams}; +use crate::ich::StableHashingContext; use crate::query::caches::QueryCache; -use crate::query::config::{QueryDescription, QueryVTable}; +use crate::query::config::QueryVTable; use crate::query::job::{report_cycle, QueryInfo, QueryJob, QueryJobId, QueryJobInfo}; use crate::query::{QueryContext, QueryMap, QuerySideEffects, QueryStackFrame}; use crate::values::Value; @@ -19,6 +20,7 @@ use rustc_data_structures::sync::Lock; use rustc_errors::{DiagnosticBuilder, ErrorGuaranteed, FatalError}; use rustc_session::Session; use rustc_span::{Span, DUMMY_SP}; +use std::borrow::Borrow; use std::cell::Cell; use std::collections::hash_map::Entry; use std::fmt::Debug; @@ -27,6 +29,8 @@ use std::mem; use std::ptr; use thin_vec::ThinVec; +use super::QueryConfig; + pub struct QueryState { #[cfg(parallel_compiler)] active: Sharded>, @@ -60,10 +64,10 @@ where } } - pub fn try_collect_active_jobs( + pub fn try_collect_active_jobs( &self, - tcx: CTX, - make_query: fn(CTX, K) -> QueryStackFrame, + qcx: Qcx, + make_query: fn(Qcx, K) -> QueryStackFrame, jobs: &mut QueryMap, ) -> Option<()> { #[cfg(parallel_compiler)] @@ -74,7 +78,7 @@ where for shard in shards.iter() { for (k, v) in shard.iter() { if let QueryResult::Started(ref job) = *v { - let query = make_query(tcx, k.clone()); + let query = make_query(qcx, k.clone()); jobs.insert(job.id, QueryJobInfo { query, job: job.clone() }); } } @@ -88,7 +92,7 @@ where // really hurt much.) for (k, v) in self.active.try_lock()?.iter() { if let QueryResult::Started(ref job) = *v { - let query = make_query(tcx, k.clone()); + let query = make_query(qcx, k.clone()); jobs.insert(job.id, QueryJobInfo { query, job: job.clone() }); } } @@ -117,31 +121,31 @@ where #[cold] #[inline(never)] -fn mk_cycle( - tcx: CTX, +fn mk_cycle( + qcx: Qcx, cycle_error: CycleError, handler: HandleCycleError, cache: &dyn crate::query::QueryStorage, ) -> R where - CTX: QueryContext, - V: std::fmt::Debug + Value, + Qcx: QueryContext, + V: std::fmt::Debug + Value, R: Clone, { - let error = report_cycle(tcx.dep_context().sess(), &cycle_error); - let value = handle_cycle_error(*tcx.dep_context(), &cycle_error, error, handler); + let error = report_cycle(qcx.dep_context().sess(), &cycle_error); + let value = handle_cycle_error(*qcx.dep_context(), &cycle_error, error, handler); cache.store_nocache(value) } -fn handle_cycle_error( - tcx: CTX, +fn handle_cycle_error( + tcx: Tcx, cycle_error: &CycleError, mut error: DiagnosticBuilder<'_, ErrorGuaranteed>, handler: HandleCycleError, ) -> V where - CTX: DepContext, - V: Value, + Tcx: DepContext, + V: Value, { use HandleCycleError::*; match handler { @@ -174,14 +178,14 @@ where /// This function is inlined because that results in a noticeable speed-up /// for some compile-time benchmarks. #[inline(always)] - fn try_start<'b, CTX>( - tcx: &'b CTX, + fn try_start<'b, Qcx>( + qcx: &'b Qcx, state: &'b QueryState, span: Span, key: K, ) -> TryGetJob<'b, K> where - CTX: QueryContext, + Qcx: QueryContext, { #[cfg(parallel_compiler)] let mut state_lock = state.active.get_shard_by_value(&key).lock(); @@ -191,8 +195,8 @@ where match lock.entry(key) { Entry::Vacant(entry) => { - let id = tcx.next_job_id(); - let job = tcx.current_query_job(); + let id = qcx.next_job_id(); + let job = qcx.current_query_job(); let job = QueryJob::new(id, span, job); let key = entry.key().clone(); @@ -211,8 +215,8 @@ where // If we are single-threaded we know that we have cycle error, // so we just return the error. return TryGetJob::Cycle(id.find_cycle_in_stack( - tcx.try_collect_active_jobs().unwrap(), - &tcx.current_query_job(), + qcx.try_collect_active_jobs().unwrap(), + &qcx.current_query_job(), span, )); } @@ -221,7 +225,7 @@ where // For parallel queries, we'll block and wait until the query running // in another thread has completed. Record how long we wait in the // self-profiler. - let query_blocked_prof_timer = tcx.dep_context().profiler().query_blocked(); + let query_blocked_prof_timer = qcx.dep_context().profiler().query_blocked(); // Get the latch out let latch = job.latch(); @@ -230,7 +234,7 @@ where // With parallel queries we might just have to wait on some other // thread. - let result = latch.wait_on(tcx.current_query_job(), span); + let result = latch.wait_on(qcx.current_query_job(), span); match result { Ok(()) => TryGetJob::JobCompleted(query_blocked_prof_timer), @@ -333,8 +337,8 @@ where /// which will be used if the query is not in the cache and we need /// to compute it. #[inline] -pub fn try_get_cached<'a, CTX, C, R, OnHit>( - tcx: CTX, +pub fn try_get_cached<'a, Tcx, C, R, OnHit>( + tcx: Tcx, cache: &'a C, key: &C::Key, // `on_hit` can be called while holding a lock to the query cache @@ -342,7 +346,7 @@ pub fn try_get_cached<'a, CTX, C, R, OnHit>( ) -> Result where C: QueryCache, - CTX: DepContext, + Tcx: DepContext, OnHit: FnOnce(&C::Stored) -> R, { cache.lookup(&key, |value, index| { @@ -354,29 +358,44 @@ where }) } -fn try_execute_query( - tcx: CTX, +fn try_execute_query( + qcx: Qcx, state: &QueryState, cache: &C, span: Span, key: C::Key, - dep_node: Option>, - query: &QueryVTable, + dep_node: Option>, + query: &QueryVTable, ) -> (C::Stored, Option) where C: QueryCache, - C::Key: Clone + DepNodeParams, - C::Value: Value, - CTX: QueryContext, + C::Key: Clone + DepNodeParams, + C::Value: Value, + C::Stored: Debug + std::borrow::Borrow, + Qcx: QueryContext, { - match JobOwner::<'_, C::Key>::try_start(&tcx, state, span, key.clone()) { + match JobOwner::<'_, C::Key>::try_start(&qcx, state, span, key.clone()) { TryGetJob::NotYetStarted(job) => { - let (result, dep_node_index) = execute_job(tcx, key, dep_node, query, job.id); + let (result, dep_node_index) = execute_job(qcx, key.clone(), dep_node, query, job.id); + if query.feedable { + // We may have put a value inside the cache from inside the execution. + // Verify that it has the same hash as what we have now, to ensure consistency. + let _ = cache.lookup(&key, |cached_result, _| { + let hasher = query.hash_result.expect("feedable forbids no_hash"); + let old_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| hasher(&mut hcx, cached_result.borrow())); + let new_hash = qcx.dep_context().with_stable_hashing_context(|mut hcx| hasher(&mut hcx, &result)); + debug_assert_eq!( + old_hash, new_hash, + "Computed query value for {:?}({:?}) is inconsistent with fed value,\ncomputed={:#?}\nfed={:#?}", + query.dep_kind, key, result, cached_result, + ); + }); + } let result = job.complete(cache, result, dep_node_index); (result, Some(dep_node_index)) } TryGetJob::Cycle(error) => { - let result = mk_cycle(tcx, error, query.handle_cycle_error, cache); + let result = mk_cycle(qcx, error, query.handle_cycle_error, cache); (result, None) } #[cfg(parallel_compiler)] @@ -385,8 +404,8 @@ where .lookup(&key, |value, index| (value.clone(), index)) .unwrap_or_else(|_| panic!("value must be in cache after waiting")); - if std::intrinsics::unlikely(tcx.dep_context().profiler().enabled()) { - tcx.dep_context().profiler().query_cache_hit(index.into()); + if std::intrinsics::unlikely(qcx.dep_context().profiler().enabled()) { + qcx.dep_context().profiler().query_cache_hit(index.into()); } query_blocked_prof_timer.finish_with_query_invocation_id(index.into()); @@ -395,25 +414,25 @@ where } } -fn execute_job( - tcx: CTX, +fn execute_job( + qcx: Qcx, key: K, - mut dep_node_opt: Option>, - query: &QueryVTable, + mut dep_node_opt: Option>, + query: &QueryVTable, job_id: QueryJobId, ) -> (V, DepNodeIndex) where - K: Clone + DepNodeParams, + K: Clone + DepNodeParams, V: Debug, - CTX: QueryContext, + Qcx: QueryContext, { - let dep_graph = tcx.dep_context().dep_graph(); + let dep_graph = qcx.dep_context().dep_graph(); // Fast path for when incr. comp. is off. if !dep_graph.is_fully_enabled() { - let prof_timer = tcx.dep_context().profiler().query_provider(); - let result = tcx.start_query(job_id, query.depth_limit, None, || { - query.compute(*tcx.dep_context(), key) + let prof_timer = qcx.dep_context().profiler().query_provider(); + let result = qcx.start_query(job_id, query.depth_limit, None, || { + query.compute(*qcx.dep_context(), key) }); let dep_node_index = dep_graph.next_virtual_depnode_index(); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -423,33 +442,33 @@ where if !query.anon && !query.eval_always { // `to_dep_node` is expensive for some `DepKind`s. let dep_node = - dep_node_opt.get_or_insert_with(|| query.to_dep_node(*tcx.dep_context(), &key)); + dep_node_opt.get_or_insert_with(|| query.to_dep_node(*qcx.dep_context(), &key)); // The diagnostics for this query will be promoted to the current session during // `try_mark_green()`, so we can ignore them here. - if let Some(ret) = tcx.start_query(job_id, false, None, || { - try_load_from_disk_and_cache_in_memory(tcx, &key, &dep_node, query) + if let Some(ret) = qcx.start_query(job_id, false, None, || { + try_load_from_disk_and_cache_in_memory(qcx, &key, &dep_node, query) }) { return ret; } } - let prof_timer = tcx.dep_context().profiler().query_provider(); + let prof_timer = qcx.dep_context().profiler().query_provider(); let diagnostics = Lock::new(ThinVec::new()); let (result, dep_node_index) = - tcx.start_query(job_id, query.depth_limit, Some(&diagnostics), || { + qcx.start_query(job_id, query.depth_limit, Some(&diagnostics), || { if query.anon { - return dep_graph.with_anon_task(*tcx.dep_context(), query.dep_kind, || { - query.compute(*tcx.dep_context(), key) + return dep_graph.with_anon_task(*qcx.dep_context(), query.dep_kind, || { + query.compute(*qcx.dep_context(), key) }); } // `to_dep_node` is expensive for some `DepKind`s. let dep_node = - dep_node_opt.unwrap_or_else(|| query.to_dep_node(*tcx.dep_context(), &key)); + dep_node_opt.unwrap_or_else(|| query.to_dep_node(*qcx.dep_context(), &key)); - dep_graph.with_task(dep_node, *tcx.dep_context(), key, query.compute, query.hash_result) + dep_graph.with_task(dep_node, *qcx.dep_context(), key, query.compute, query.hash_result) }); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -459,55 +478,55 @@ where if std::intrinsics::unlikely(!side_effects.is_empty()) { if query.anon { - tcx.store_side_effects_for_anon_node(dep_node_index, side_effects); + qcx.store_side_effects_for_anon_node(dep_node_index, side_effects); } else { - tcx.store_side_effects(dep_node_index, side_effects); + qcx.store_side_effects(dep_node_index, side_effects); } } (result, dep_node_index) } -fn try_load_from_disk_and_cache_in_memory( - tcx: CTX, +fn try_load_from_disk_and_cache_in_memory( + qcx: Qcx, key: &K, - dep_node: &DepNode, - query: &QueryVTable, + dep_node: &DepNode, + query: &QueryVTable, ) -> Option<(V, DepNodeIndex)> where K: Clone, - CTX: QueryContext, + Qcx: QueryContext, V: Debug, { // Note this function can be called concurrently from the same query // We must ensure that this is handled correctly. - let dep_graph = tcx.dep_context().dep_graph(); - let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(tcx, &dep_node)?; + let dep_graph = qcx.dep_context().dep_graph(); + let (prev_dep_node_index, dep_node_index) = dep_graph.try_mark_green(qcx, &dep_node)?; debug_assert!(dep_graph.is_green(dep_node)); // First we try to load the result from the on-disk cache. // Some things are never cached on disk. if let Some(try_load_from_disk) = query.try_load_from_disk { - let prof_timer = tcx.dep_context().profiler().incr_cache_loading(); + let prof_timer = qcx.dep_context().profiler().incr_cache_loading(); // The call to `with_query_deserialization` enforces that no new `DepNodes` // are created during deserialization. See the docs of that method for more // details. let result = - dep_graph.with_query_deserialization(|| try_load_from_disk(tcx, prev_dep_node_index)); + dep_graph.with_query_deserialization(|| try_load_from_disk(qcx, prev_dep_node_index)); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); if let Some(result) = result { if std::intrinsics::unlikely( - tcx.dep_context().sess().opts.unstable_opts.query_dep_graph, + qcx.dep_context().sess().opts.unstable_opts.query_dep_graph, ) { dep_graph.mark_debug_loaded_from_disk(*dep_node) } - let prev_fingerprint = tcx + let prev_fingerprint = qcx .dep_context() .dep_graph() .prev_fingerprint_of(dep_node) @@ -521,9 +540,9 @@ where // give us some coverage of potential bugs though. let try_verify = prev_fingerprint.as_value().1 % 32 == 0; if std::intrinsics::unlikely( - try_verify || tcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich, + try_verify || qcx.dep_context().sess().opts.unstable_opts.incremental_verify_ich, ) { - incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query); + incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result); } return Some((result, dep_node_index)); @@ -532,7 +551,7 @@ where // We always expect to find a cached result for things that // can be forced from `DepNode`. debug_assert!( - !tcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(), + !qcx.dep_context().fingerprint_style(dep_node.kind).reconstructible(), "missing on-disk cache entry for {:?}", dep_node ); @@ -540,10 +559,10 @@ where // We could not load a result from the on-disk cache, so // recompute. - let prof_timer = tcx.dep_context().profiler().query_provider(); + let prof_timer = qcx.dep_context().profiler().query_provider(); // The dep-graph for this computation is already in-place. - let result = dep_graph.with_ignore(|| query.compute(*tcx.dep_context(), key.clone())); + let result = dep_graph.with_ignore(|| query.compute(*qcx.dep_context(), key.clone())); prof_timer.finish_with_query_invocation_id(dep_node_index.into()); @@ -556,18 +575,20 @@ where // // See issue #82920 for an example of a miscompilation that would get turned into // an ICE by this check - incremental_verify_ich(*tcx.dep_context(), &result, dep_node, query); + incremental_verify_ich(*qcx.dep_context(), &result, dep_node, query.hash_result); Some((result, dep_node_index)) } -fn incremental_verify_ich( - tcx: CTX::DepContext, +#[instrument(skip(tcx, result, hash_result), level = "debug")] +pub(crate) fn incremental_verify_ich( + tcx: Tcx, result: &V, - dep_node: &DepNode, - query: &QueryVTable, -) where - CTX: QueryContext, + dep_node: &DepNode, + hash_result: Option, &V) -> Fingerprint>, +) -> Fingerprint +where + Tcx: DepContext, { assert!( tcx.dep_graph().is_green(dep_node), @@ -575,16 +596,21 @@ fn incremental_verify_ich( dep_node, ); - debug!("BEGIN verify_ich({:?})", dep_node); - let new_hash = query.hash_result.map_or(Fingerprint::ZERO, |f| { + let new_hash = hash_result.map_or(Fingerprint::ZERO, |f| { tcx.with_stable_hashing_context(|mut hcx| f(&mut hcx, result)) }); + let old_hash = tcx.dep_graph().prev_fingerprint_of(dep_node); - debug!("END verify_ich({:?})", dep_node); if Some(new_hash) != old_hash { - incremental_verify_ich_cold(tcx.sess(), DebugArg::from(&dep_node), DebugArg::from(&result)); + incremental_verify_ich_failed( + tcx.sess(), + DebugArg::from(&dep_node), + DebugArg::from(&result), + ); } + + new_hash } // This DebugArg business is largely a mirror of std::fmt::ArgumentV1, which is @@ -629,13 +655,7 @@ impl std::fmt::Debug for DebugArg<'_> { // different implementations for LLVM to chew on (and filling up the final // binary, too). #[cold] -fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: DebugArg<'_>) { - let run_cmd = if let Some(crate_name) = &sess.opts.crate_name { - format!("`cargo clean -p {}` or `cargo clean`", crate_name) - } else { - "`cargo clean`".to_string() - }; - +fn incremental_verify_ich_failed(sess: &Session, dep_node: DebugArg<'_>, result: DebugArg<'_>) { // When we emit an error message and panic, we try to debug-print the `DepNode` // and query result. Unfortunately, this can cause us to run additional queries, // which may result in another fingerprint mismatch while we're in the middle @@ -651,6 +671,12 @@ fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: D if old_in_panic { sess.emit_err(crate::error::Reentrant); } else { + let run_cmd = if let Some(crate_name) = &sess.opts.crate_name { + format!("`cargo clean -p {}` or `cargo clean`", crate_name) + } else { + "`cargo clean`".to_string() + }; + sess.emit_err(crate::error::IncrementCompilation { run_cmd, dep_node: format!("{:?}", dep_node), @@ -670,14 +696,14 @@ fn incremental_verify_ich_cold(sess: &Session, dep_node: DebugArg<'_>, result: D /// /// Note: The optimization is only available during incr. comp. #[inline(never)] -fn ensure_must_run( - tcx: CTX, +fn ensure_must_run( + qcx: Qcx, key: &K, - query: &QueryVTable, -) -> (bool, Option>) + query: &QueryVTable, +) -> (bool, Option>) where - K: crate::dep_graph::DepNodeParams, - CTX: QueryContext, + K: crate::dep_graph::DepNodeParams, + Qcx: QueryContext, { if query.eval_always { return (true, None); @@ -686,10 +712,10 @@ where // Ensuring an anonymous query makes no sense assert!(!query.anon); - let dep_node = query.to_dep_node(*tcx.dep_context(), key); + let dep_node = query.to_dep_node(*qcx.dep_context(), key); - let dep_graph = tcx.dep_context().dep_graph(); - match dep_graph.try_mark_green(tcx, &dep_node) { + let dep_graph = qcx.dep_context().dep_graph(); + match dep_graph.try_mark_green(qcx, &dep_node) { None => { // A None return from `try_mark_green` means that this is either // a new dep node or that the dep node has already been marked red. @@ -701,7 +727,7 @@ where } Some((_, dep_node_index)) => { dep_graph.read_index(dep_node_index); - tcx.dep_context().profiler().query_cache_hit(dep_node_index.into()); + qcx.dep_context().profiler().query_cache_hit(dep_node_index.into()); (false, None) } } @@ -713,16 +739,16 @@ pub enum QueryMode { Ensure, } -pub fn get_query(tcx: CTX, span: Span, key: Q::Key, mode: QueryMode) -> Option +pub fn get_query(qcx: Qcx, span: Span, key: Q::Key, mode: QueryMode) -> Option where - Q: QueryDescription, - Q::Key: DepNodeParams, - Q::Value: Value, - CTX: QueryContext, + Q: QueryConfig, + Q::Key: DepNodeParams, + Q::Value: Value, + Qcx: QueryContext, { - let query = Q::make_vtable(tcx, &key); + let query = Q::make_vtable(qcx, &key); let dep_node = if let QueryMode::Ensure = mode { - let (must_run, dep_node) = ensure_must_run(tcx, &key, &query); + let (must_run, dep_node) = ensure_must_run(qcx, &key, &query); if !must_run { return None; } @@ -732,33 +758,33 @@ where }; let (result, dep_node_index) = try_execute_query( - tcx, - Q::query_state(tcx), - Q::query_cache(tcx), + qcx, + Q::query_state(qcx), + Q::query_cache(qcx), span, key, dep_node, &query, ); if let Some(dep_node_index) = dep_node_index { - tcx.dep_context().dep_graph().read_index(dep_node_index) + qcx.dep_context().dep_graph().read_index(dep_node_index) } Some(result) } -pub fn force_query(tcx: CTX, key: Q::Key, dep_node: DepNode) +pub fn force_query(qcx: Qcx, key: Q::Key, dep_node: DepNode) where - Q: QueryDescription, - Q::Key: DepNodeParams, - Q::Value: Value, - CTX: QueryContext, + Q: QueryConfig, + Q::Key: DepNodeParams, + Q::Value: Value, + Qcx: QueryContext, { // We may be concurrently trying both execute and force a query. // Ensure that only one of them runs the query. - let cache = Q::query_cache(tcx); + let cache = Q::query_cache(qcx); let cached = cache.lookup(&key, |_, index| { - if std::intrinsics::unlikely(tcx.dep_context().profiler().enabled()) { - tcx.dep_context().profiler().query_cache_hit(index.into()); + if std::intrinsics::unlikely(qcx.dep_context().profiler().enabled()) { + qcx.dep_context().profiler().query_cache_hit(index.into()); } }); @@ -767,9 +793,9 @@ where Err(()) => {} } - let query = Q::make_vtable(tcx, &key); - let state = Q::query_state(tcx); + let query = Q::make_vtable(qcx, &key); + let state = Q::query_state(qcx); debug_assert!(!query.anon); - try_execute_query(tcx, state, cache, DUMMY_SP, key, Some(dep_node), &query); + try_execute_query(qcx, state, cache, DUMMY_SP, key, Some(dep_node), &query); } diff --git a/compiler/rustc_query_system/src/values.rs b/compiler/rustc_query_system/src/values.rs index 67fbf14e6..214656abe 100644 --- a/compiler/rustc_query_system/src/values.rs +++ b/compiler/rustc_query_system/src/values.rs @@ -1,12 +1,12 @@ use crate::dep_graph::DepContext; use crate::query::QueryInfo; -pub trait Value: Sized { - fn from_cycle_error(tcx: CTX, cycle: &[QueryInfo]) -> Self; +pub trait Value: Sized { + fn from_cycle_error(tcx: Tcx, cycle: &[QueryInfo]) -> Self; } -impl Value for T { - default fn from_cycle_error(tcx: CTX, _: &[QueryInfo]) -> T { +impl Value for T { + default fn from_cycle_error(tcx: Tcx, _: &[QueryInfo]) -> T { tcx.sess().abort_if_errors(); // Ideally we would use `bug!` here. But bug! is only defined in rustc_middle, and it's // non-trivial to define it earlier. -- cgit v1.2.3