diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:59:35 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:59:35 +0000 |
commit | d1b2d29528b7794b41e66fc2136e395a02f8529b (patch) | |
tree | a4a17504b260206dec3cf55b2dca82929a348ac2 /compiler/rustc_query_system/src | |
parent | Releasing progress-linux version 1.72.1+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-d1b2d29528b7794b41e66fc2136e395a02f8529b.tar.xz rustc-d1b2d29528b7794b41e66fc2136e395a02f8529b.zip |
Merging upstream version 1.73.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_query_system/src')
-rw-r--r-- | compiler/rustc_query_system/src/dep_graph/graph.rs | 17 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/dep_graph/mod.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/error.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/caches.rs | 56 | ||||
-rw-r--r-- | compiler/rustc_query_system/src/query/job.rs | 74 |
5 files changed, 68 insertions, 83 deletions
diff --git a/compiler/rustc_query_system/src/dep_graph/graph.rs b/compiler/rustc_query_system/src/dep_graph/graph.rs index c9e80a6d9..30422ea11 100644 --- a/compiler/rustc_query_system/src/dep_graph/graph.rs +++ b/compiler/rustc_query_system/src/dep_graph/graph.rs @@ -1,6 +1,5 @@ -use parking_lot::Mutex; use rustc_data_structures::fingerprint::Fingerprint; -use rustc_data_structures::fx::{FxHashMap, FxHashSet, FxIndexMap}; +use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_data_structures::profiling::{EventId, QueryInvocationId, SelfProfilerRef}; use rustc_data_structures::sharded::{self, Sharded}; use rustc_data_structures::stable_hasher::{HashStable, StableHasher}; @@ -88,13 +87,13 @@ pub struct DepGraphData<K: DepKind> { colors: DepNodeColorMap, - processed_side_effects: Mutex<FxHashSet<DepNodeIndex>>, + processed_side_effects: Lock<FxHashSet<DepNodeIndex>>, /// When we load, there may be `.o` files, cached MIR, or other such /// things available to us. If we find that they are not dirty, we /// load the path to the file storing those work-products here into /// this map. We can later look for and extract that data. - previous_work_products: FxIndexMap<WorkProductId, WorkProduct>, + previous_work_products: WorkProductMap, dep_node_debug: Lock<FxHashMap<DepNode<K>, String>>, @@ -117,7 +116,7 @@ impl<K: DepKind> DepGraph<K> { pub fn new( profiler: &SelfProfilerRef, prev_graph: SerializedDepGraph<K>, - prev_work_products: FxIndexMap<WorkProductId, WorkProduct>, + prev_work_products: WorkProductMap, encoder: FileEncoder, record_graph: bool, record_stats: bool, @@ -557,7 +556,7 @@ impl<K: DepKind> DepGraph<K> { result, prev_index, hash_result, - |value| format!("{:?}", value), + |value| format!("{value:?}"), ); #[cfg(debug_assertions)] @@ -689,7 +688,7 @@ impl<K: DepKind> DepGraph<K> { /// Access the map of work-products created during the cached run. Only /// used during saving of the dep-graph. - pub fn previous_work_products(&self) -> &FxIndexMap<WorkProductId, WorkProduct> { + pub fn previous_work_products(&self) -> &WorkProductMap { &self.data.as_ref().unwrap().previous_work_products } @@ -1052,6 +1051,8 @@ pub struct WorkProduct { pub saved_files: UnordMap<String, String>, } +pub type WorkProductMap = UnordMap<WorkProductId, WorkProduct>; + // Index type for `DepNodeData`'s edges. rustc_index::newtype_index! { struct EdgeIndex {} @@ -1433,7 +1434,7 @@ pub(crate) fn print_markframe_trace<K: DepKind>( let mut current = frame; while let Some(frame) = current { let node = data.previous.index_to_node(frame.index); - eprintln!("#{i} {:?}", node); + eprintln!("#{i} {node:?}"); current = frame.parent; i += 1; } diff --git a/compiler/rustc_query_system/src/dep_graph/mod.rs b/compiler/rustc_query_system/src/dep_graph/mod.rs index 40e713198..0fd9e35d6 100644 --- a/compiler/rustc_query_system/src/dep_graph/mod.rs +++ b/compiler/rustc_query_system/src/dep_graph/mod.rs @@ -7,7 +7,7 @@ mod serialized; pub use dep_node::{DepKindStruct, DepNode, DepNodeParams, WorkProductId}; pub use graph::{ hash_result, DepGraph, DepGraphData, DepNodeColor, DepNodeIndex, TaskDeps, TaskDepsRef, - WorkProduct, + WorkProduct, WorkProductMap, }; pub use query::DepGraphQuery; pub use serialized::{SerializedDepGraph, SerializedDepNodeIndex}; diff --git a/compiler/rustc_query_system/src/error.rs b/compiler/rustc_query_system/src/error.rs index cf2f04c74..e49e78cc7 100644 --- a/compiler/rustc_query_system/src/error.rs +++ b/compiler/rustc_query_system/src/error.rs @@ -57,6 +57,8 @@ pub struct Cycle { pub alias: Option<Alias>, #[subdiagnostic] pub cycle_usage: Option<CycleUsage>, + #[note] + pub note_span: (), } #[derive(Diagnostic)] diff --git a/compiler/rustc_query_system/src/query/caches.rs b/compiler/rustc_query_system/src/query/caches.rs index 9a09f516e..4ba9d53a9 100644 --- a/compiler/rustc_query_system/src/query/caches.rs +++ b/compiler/rustc_query_system/src/query/caches.rs @@ -1,9 +1,7 @@ use crate::dep_graph::DepNodeIndex; use rustc_data_structures::fx::FxHashMap; -use rustc_data_structures::sharded; -#[cfg(parallel_compiler)] -use rustc_data_structures::sharded::Sharded; +use rustc_data_structures::sharded::{self, Sharded}; use rustc_data_structures::sync::Lock; use rustc_index::{Idx, IndexVec}; use std::fmt::Debug; @@ -37,10 +35,7 @@ impl<'tcx, K: Eq + Hash, V: 'tcx> CacheSelector<'tcx, V> for DefaultCacheSelecto } pub struct DefaultCache<K, V> { - #[cfg(parallel_compiler)] cache: Sharded<FxHashMap<K, (V, DepNodeIndex)>>, - #[cfg(not(parallel_compiler))] - cache: Lock<FxHashMap<K, (V, DepNodeIndex)>>, } impl<K, V> Default for DefaultCache<K, V> { @@ -60,10 +55,7 @@ where #[inline(always)] fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> { let key_hash = sharded::make_hash(key); - #[cfg(parallel_compiler)] let lock = self.cache.get_shard_by_hash(key_hash).lock(); - #[cfg(not(parallel_compiler))] - let lock = self.cache.lock(); let result = lock.raw_entry().from_key_hashed_nocheck(key_hash, key); if let Some((_, value)) = result { Some(*value) } else { None } @@ -71,29 +63,16 @@ where #[inline] fn complete(&self, key: K, value: V, index: DepNodeIndex) { - #[cfg(parallel_compiler)] let mut lock = self.cache.get_shard_by_value(&key).lock(); - #[cfg(not(parallel_compiler))] - let mut lock = self.cache.lock(); // We may be overwriting another value. This is all right, since the dep-graph // will check that the fingerprint matches. lock.insert(key, (value, index)); } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - #[cfg(parallel_compiler)] - { - let shards = self.cache.lock_shards(); - for shard in shards.iter() { - for (k, v) in shard.iter() { - f(k, &v.0, v.1); - } - } - } - #[cfg(not(parallel_compiler))] - { - let map = self.cache.lock(); - for (k, v) in map.iter() { + let shards = self.cache.lock_shards(); + for shard in shards.iter() { + for (k, v) in shard.iter() { f(k, &v.0, v.1); } } @@ -151,10 +130,7 @@ impl<'tcx, K: Idx, V: 'tcx> CacheSelector<'tcx, V> for VecCacheSelector<K> { } pub struct VecCache<K: Idx, V> { - #[cfg(parallel_compiler)] cache: Sharded<IndexVec<K, Option<(V, DepNodeIndex)>>>, - #[cfg(not(parallel_compiler))] - cache: Lock<IndexVec<K, Option<(V, DepNodeIndex)>>>, } impl<K: Idx, V> Default for VecCache<K, V> { @@ -173,38 +149,20 @@ where #[inline(always)] fn lookup(&self, key: &K) -> Option<(V, DepNodeIndex)> { - #[cfg(parallel_compiler)] let lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); - #[cfg(not(parallel_compiler))] - let lock = self.cache.lock(); if let Some(Some(value)) = lock.get(*key) { Some(*value) } else { None } } #[inline] fn complete(&self, key: K, value: V, index: DepNodeIndex) { - #[cfg(parallel_compiler)] let mut lock = self.cache.get_shard_by_hash(key.index() as u64).lock(); - #[cfg(not(parallel_compiler))] - let mut lock = self.cache.lock(); lock.insert(key, (value, index)); } fn iter(&self, f: &mut dyn FnMut(&Self::Key, &Self::Value, DepNodeIndex)) { - #[cfg(parallel_compiler)] - { - let shards = self.cache.lock_shards(); - for shard in shards.iter() { - for (k, v) in shard.iter_enumerated() { - if let Some(v) = v { - f(&k, &v.0, v.1); - } - } - } - } - #[cfg(not(parallel_compiler))] - { - let map = self.cache.lock(); - for (k, v) in map.iter_enumerated() { + let shards = self.cache.lock_shards(); + for shard in shards.iter() { + for (k, v) in shard.iter_enumerated() { if let Some(v) = v { f(&k, &v.0, v.1); } diff --git a/compiler/rustc_query_system/src/query/job.rs b/compiler/rustc_query_system/src/query/job.rs index f45f7ca5d..1b1248924 100644 --- a/compiler/rustc_query_system/src/query/job.rs +++ b/compiler/rustc_query_system/src/query/job.rs @@ -13,6 +13,7 @@ use rustc_session::Session; use rustc_span::Span; use std::hash::Hash; +use std::io::Write; use std::num::NonZeroU64; #[cfg(parallel_compiler)] @@ -20,12 +21,11 @@ use { parking_lot::{Condvar, Mutex}, rayon_core, rustc_data_structures::fx::FxHashSet, - rustc_data_structures::sync::Lock, - rustc_data_structures::sync::Lrc, rustc_data_structures::{defer, jobserver}, rustc_span::DUMMY_SP, std::iter, std::process, + std::sync::Arc, }; /// Represents a span and a query key. @@ -190,7 +190,7 @@ struct QueryWaiter<D: DepKind> { query: Option<QueryJobId>, condvar: Condvar, span: Span, - cycle: Lock<Option<CycleError<D>>>, + cycle: Mutex<Option<CycleError<D>>>, } #[cfg(parallel_compiler)] @@ -204,20 +204,20 @@ impl<D: DepKind> QueryWaiter<D> { #[cfg(parallel_compiler)] struct QueryLatchInfo<D: DepKind> { complete: bool, - waiters: Vec<Lrc<QueryWaiter<D>>>, + waiters: Vec<Arc<QueryWaiter<D>>>, } #[cfg(parallel_compiler)] #[derive(Clone)] pub(super) struct QueryLatch<D: DepKind> { - info: Lrc<Mutex<QueryLatchInfo<D>>>, + info: Arc<Mutex<QueryLatchInfo<D>>>, } #[cfg(parallel_compiler)] impl<D: DepKind> QueryLatch<D> { fn new() -> Self { QueryLatch { - info: Lrc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), + info: Arc::new(Mutex::new(QueryLatchInfo { complete: false, waiters: Vec::new() })), } } @@ -228,11 +228,11 @@ impl<D: DepKind> QueryLatch<D> { span: Span, ) -> Result<(), CycleError<D>> { let waiter = - Lrc::new(QueryWaiter { query, span, cycle: Lock::new(None), condvar: Condvar::new() }); + Arc::new(QueryWaiter { query, span, cycle: Mutex::new(None), condvar: Condvar::new() }); self.wait_on_inner(&waiter); // FIXME: Get rid of this lock. We have ownership of the QueryWaiter - // although another thread may still have a Lrc reference so we cannot - // use Lrc::get_mut + // although another thread may still have a Arc reference so we cannot + // use Arc::get_mut let mut cycle = waiter.cycle.lock(); match cycle.take() { None => Ok(()), @@ -241,7 +241,7 @@ impl<D: DepKind> QueryLatch<D> { } /// Awaits the caller on this latch by blocking the current thread. - fn wait_on_inner(&self, waiter: &Lrc<QueryWaiter<D>>) { + fn wait_on_inner(&self, waiter: &Arc<QueryWaiter<D>>) { let mut info = self.info.lock(); if !info.complete { // We push the waiter on to the `waiters` list. It can be accessed inside @@ -275,7 +275,7 @@ impl<D: DepKind> QueryLatch<D> { /// Removes a single waiter from the list of waiters. /// This is used to break query cycles. - fn extract_waiter(&self, waiter: usize) -> Lrc<QueryWaiter<D>> { + fn extract_waiter(&self, waiter: usize) -> Arc<QueryWaiter<D>> { let mut info = self.info.lock(); debug_assert!(!info.complete); // Remove the waiter from the list of waiters @@ -427,7 +427,7 @@ where fn remove_cycle<D: DepKind>( query_map: &QueryMap<D>, jobs: &mut Vec<QueryJobId>, - wakelist: &mut Vec<Lrc<QueryWaiter<D>>>, + wakelist: &mut Vec<Arc<QueryWaiter<D>>>, ) -> bool { let mut visited = FxHashSet::default(); let mut stack = Vec::new(); @@ -592,7 +592,10 @@ pub(crate) fn report_cycle<'a, D: DepKind>( }); } - let alias = if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TyAlias)) { + let alias = if stack + .iter() + .all(|entry| matches!(entry.query.def_kind, Some(DefKind::TyAlias { .. }))) + { Some(crate::error::Alias::Ty) } else if stack.iter().all(|entry| entry.query.def_kind == Some(DefKind::TraitAlias)) { Some(crate::error::Alias::Trait) @@ -607,6 +610,7 @@ pub(crate) fn report_cycle<'a, D: DepKind>( alias, cycle_usage: cycle_usage, stack_count, + note_span: (), }; cycle_diag.into_diagnostic(&sess.parse_sess.span_diagnostic) @@ -617,30 +621,50 @@ pub fn print_query_stack<Qcx: QueryContext>( mut current_query: Option<QueryJobId>, handler: &Handler, num_frames: Option<usize>, + mut file: Option<std::fs::File>, ) -> usize { // Be careful relying on global state here: this code is called from // a panic hook, which means that the global `Handler` may be in a weird // state if it was responsible for triggering the panic. - let mut i = 0; + let mut count_printed = 0; + let mut count_total = 0; let query_map = qcx.try_collect_active_jobs(); + if let Some(ref mut file) = file { + let _ = writeln!(file, "\n\nquery stack during panic:"); + } while let Some(query) = current_query { - if Some(i) == num_frames { - break; - } let Some(query_info) = query_map.as_ref().and_then(|map| map.get(&query)) else { break; }; - let mut diag = Diagnostic::new( - Level::FailureNote, - format!("#{} [{:?}] {}", i, query_info.query.dep_kind, query_info.query.description), - ); - diag.span = query_info.job.span.into(); - handler.force_print_diagnostic(diag); + if Some(count_printed) < num_frames || num_frames.is_none() { + // Only print to stderr as many stack frames as `num_frames` when present. + let mut diag = Diagnostic::new( + Level::FailureNote, + format!( + "#{} [{:?}] {}", + count_printed, query_info.query.dep_kind, query_info.query.description + ), + ); + diag.span = query_info.job.span.into(); + handler.force_print_diagnostic(diag); + count_printed += 1; + } + + if let Some(ref mut file) = file { + let _ = writeln!( + file, + "#{} [{:?}] {}", + count_total, query_info.query.dep_kind, query_info.query.description + ); + } current_query = query_info.job.parent; - i += 1; + count_total += 1; } - i + if let Some(ref mut file) = file { + let _ = writeln!(file, "end of query stack"); + } + count_printed } |