diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:19:50 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:19:50 +0000 |
commit | 2e00214b3efbdfeefaa0fe9e8b8fd519de7adc35 (patch) | |
tree | d325add32978dbdc1db975a438b3a77d571b1ab8 /compiler/rustc_data_structures | |
parent | Releasing progress-linux version 1.68.2+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-2e00214b3efbdfeefaa0fe9e8b8fd519de7adc35.tar.xz rustc-2e00214b3efbdfeefaa0fe9e8b8fd519de7adc35.zip |
Merging upstream version 1.69.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_data_structures')
13 files changed, 215 insertions, 40 deletions
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml index 0366fb0a1..29cb2c0a3 100644 --- a/compiler/rustc_data_structures/Cargo.toml +++ b/compiler/rustc_data_structures/Cargo.toml @@ -9,7 +9,7 @@ edition = "2021" arrayvec = { version = "0.7", default-features = false } bitflags = "1.2.1" cfg-if = "1.0" -ena = "0.14" +ena = "0.14.1" indexmap = { version = "1.9.1" } jobserver_crate = { version = "0.1.13", package = "jobserver" } libc = "0.2" @@ -29,8 +29,9 @@ smallvec = { version = "1.8.1", features = [ stable_deref_trait = "1.0.0" stacker = "0.1.15" tempfile = "3.2" -thin-vec = "0.2.9" +thin-vec = "0.2.12" tracing = "0.1" +elsa = "1.8" [dependencies.parking_lot] version = "0.11" diff --git a/compiler/rustc_data_structures/src/functor.rs b/compiler/rustc_data_structures/src/functor.rs index 84cb417dd..28fcf80b3 100644 --- a/compiler/rustc_data_structures/src/functor.rs +++ b/compiler/rustc_data_structures/src/functor.rs @@ -1,5 +1,5 @@ use rustc_index::vec::{Idx, IndexVec}; -use std::mem; +use std::{mem, rc::Rc, sync::Arc}; pub trait IdFunctor: Sized { type Inner; @@ -65,3 +65,52 @@ impl<I: Idx, T> IdFunctor for IndexVec<I, T> { self.raw.try_map_id(f).map(IndexVec::from_raw) } } + +macro_rules! rc { + ($($rc:ident),+) => {$( + impl<T: Clone> IdFunctor for $rc<T> { + type Inner = T; + + #[inline] + fn try_map_id<F, E>(mut self, mut f: F) -> Result<Self, E> + where + F: FnMut(Self::Inner) -> Result<Self::Inner, E>, + { + // We merely want to replace the contained `T`, if at all possible, + // so that we don't needlessly allocate a new `$rc` or indeed clone + // the contained type. + unsafe { + // First step is to ensure that we have a unique reference to + // the contained type, which `$rc::make_mut` will accomplish (by + // allocating a new `$rc` and cloning the `T` only if required). + // This is done *before* casting to `$rc<ManuallyDrop<T>>` so that + // panicking during `make_mut` does not leak the `T`. + $rc::make_mut(&mut self); + + // Casting to `$rc<ManuallyDrop<T>>` is safe because `ManuallyDrop` + // is `repr(transparent)`. + let ptr = $rc::into_raw(self).cast::<mem::ManuallyDrop<T>>(); + let mut unique = $rc::from_raw(ptr); + + // Call to `$rc::make_mut` above guarantees that `unique` is the + // sole reference to the contained value, so we can avoid doing + // a checked `get_mut` here. + let slot = $rc::get_mut_unchecked(&mut unique); + + // Semantically move the contained type out from `unique`, fold + // it, then move the folded value back into `unique`. Should + // folding fail, `ManuallyDrop` ensures that the "moved-out" + // value is not re-dropped. + let owned = mem::ManuallyDrop::take(slot); + let folded = f(owned)?; + *slot = mem::ManuallyDrop::new(folded); + + // Cast back to `$rc<T>`. + Ok($rc::from_raw($rc::into_raw(unique).cast())) + } + } + } + )+}; +} + +rc! { Rc, Arc } diff --git a/compiler/rustc_data_structures/src/graph/dominators/mod.rs b/compiler/rustc_data_structures/src/graph/dominators/mod.rs index 471457f61..0a21a4249 100644 --- a/compiler/rustc_data_structures/src/graph/dominators/mod.rs +++ b/compiler/rustc_data_structures/src/graph/dominators/mod.rs @@ -135,7 +135,47 @@ pub fn dominators<G: ControlFlowGraph>(graph: G) -> Dominators<G::Node> { // This loop computes the semi[w] for w. semi[w] = w; for v in graph.predecessors(pre_order_to_real[w]) { - // Reachable vertices may have unreachable predecessors, so ignore any of them + // TL;DR: Reachable vertices may have unreachable predecessors, so ignore any of them. + // + // Ignore blocks which are not connected to the entry block. + // + // The algorithm that was used to traverse the graph and build the + // `pre_order_to_real` and `real_to_pre_order` vectors does so by + // starting from the entry block and following the successors. + // Therefore, any blocks not reachable from the entry block will be + // set to `None` in the `pre_order_to_real` vector. + // + // For example, in this graph, A and B should be skipped: + // + // ┌─────┐ + // │ │ + // └──┬──┘ + // │ + // ┌──▼──┐ ┌─────┐ + // │ │ │ A │ + // └──┬──┘ └──┬──┘ + // │ │ + // ┌───────┴───────┐ │ + // │ │ │ + // ┌──▼──┐ ┌──▼──┐ ┌──▼──┐ + // │ │ │ │ │ B │ + // └──┬──┘ └──┬──┘ └──┬──┘ + // │ └──────┬─────┘ + // ┌──▼──┐ │ + // │ │ │ + // └──┬──┘ ┌──▼──┐ + // │ │ │ + // │ └─────┘ + // ┌──▼──┐ + // │ │ + // └──┬──┘ + // │ + // ┌──▼──┐ + // │ │ + // └─────┘ + // + // ...this may be the case if a MirPass modifies the CFG to remove + // or rearrange certain blocks/edges. let Some(v) = real_to_pre_order[v] else { continue }; @@ -264,13 +304,18 @@ fn compress( } } +/// Tracks the list of dominators for each node. #[derive(Clone, Debug)] pub struct Dominators<N: Idx> { post_order_rank: IndexVec<N, usize>, + // Even though we track only the immediate dominator of each node, it's + // possible to get its full list of dominators by looking up the dominator + // of each dominator. (See the `impl Iterator for Iter` definition). immediate_dominators: IndexVec<N, Option<N>>, } impl<Node: Idx> Dominators<Node> { + /// Whether the given Node has an immediate dominator. pub fn is_reachable(&self, node: Node) -> bool { self.immediate_dominators[node].is_some() } @@ -280,12 +325,14 @@ impl<Node: Idx> Dominators<Node> { self.immediate_dominators[node].unwrap() } + /// Provides an iterator over each dominator up the CFG, for the given Node. + /// See the `impl Iterator for Iter` definition to understand how this works. pub fn dominators(&self, node: Node) -> Iter<'_, Node> { assert!(self.is_reachable(node), "node {node:?} is not reachable"); Iter { dominators: self, node: Some(node) } } - pub fn is_dominated_by(&self, node: Node, dom: Node) -> bool { + pub fn dominates(&self, dom: Node, node: Node) -> bool { // FIXME -- could be optimized by using post-order-rank self.dominators(node).any(|n| n == dom) } diff --git a/compiler/rustc_data_structures/src/graph/scc/mod.rs b/compiler/rustc_data_structures/src/graph/scc/mod.rs index c8e66eb67..c4b11951a 100644 --- a/compiler/rustc_data_structures/src/graph/scc/mod.rs +++ b/compiler/rustc_data_structures/src/graph/scc/mod.rs @@ -27,7 +27,7 @@ pub struct Sccs<N: Idx, S: Idx> { scc_data: SccData<S>, } -struct SccData<S: Idx> { +pub struct SccData<S: Idx> { /// For each SCC, the range of `all_successors` where its /// successors can be found. ranges: IndexVec<S, Range<usize>>, @@ -43,6 +43,14 @@ impl<N: Idx, S: Idx + Ord> Sccs<N, S> { SccsConstruction::construct(graph) } + pub fn scc_indices(&self) -> &IndexVec<N, S> { + &self.scc_indices + } + + pub fn scc_data(&self) -> &SccData<S> { + &self.scc_data + } + /// Returns the number of SCCs in the graph. pub fn num_sccs(&self) -> usize { self.scc_data.len() @@ -115,6 +123,14 @@ impl<S: Idx> SccData<S> { self.ranges.len() } + pub fn ranges(&self) -> &IndexVec<S, Range<usize>> { + &self.ranges + } + + pub fn all_successors(&self) -> &Vec<S> { + &self.all_successors + } + /// Returns the successors of the given SCC. fn successors(&self, scc: S) -> &[S] { // Annoyingly, `range` does not implement `Copy`, so we have diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index 954e84c30..a94e52fdf 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -11,6 +11,7 @@ #![feature(associated_type_bounds)] #![feature(auto_traits)] #![feature(cell_leak)] +#![feature(core_intrinsics)] #![feature(extend_one)] #![feature(hash_raw_entry)] #![feature(hasher_prefixfree_extras)] @@ -25,6 +26,7 @@ #![feature(test)] #![feature(thread_id_value)] #![feature(vec_into_raw_parts)] +#![feature(get_mut_unchecked)] #![allow(rustc::default_hash_types)] #![allow(rustc::potential_query_instability)] #![deny(rustc::untranslatable_diagnostic)] diff --git a/compiler/rustc_data_structures/src/obligation_forest/mod.rs b/compiler/rustc_data_structures/src/obligation_forest/mod.rs index 10e673cd9..91abdaada 100644 --- a/compiler/rustc_data_structures/src/obligation_forest/mod.rs +++ b/compiler/rustc_data_structures/src/obligation_forest/mod.rs @@ -95,10 +95,7 @@ pub trait ForestObligation: Clone + Debug { pub trait ObligationProcessor { type Obligation: ForestObligation; type Error: Debug; - type OUT: OutcomeTrait< - Obligation = Self::Obligation, - Error = Error<Self::Obligation, Self::Error>, - >; + type OUT: OutcomeTrait<Obligation = Self::Obligation, Error = Error<Self::Obligation, Self::Error>>; fn needs_process_obligation(&self, obligation: &Self::Obligation) -> bool; @@ -139,8 +136,7 @@ pub enum ProcessResult<O, E> { #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] struct ObligationTreeId(usize); -type ObligationTreeIdGenerator = - std::iter::Map<std::ops::RangeFrom<usize>, fn(usize) -> ObligationTreeId>; +type ObligationTreeIdGenerator = impl Iterator<Item = ObligationTreeId>; pub struct ObligationForest<O: ForestObligation> { /// The list of obligations. In between calls to [Self::process_obligations], @@ -430,6 +426,7 @@ impl<O: ForestObligation> ObligationForest<O> { // nodes. Therefore we use a `while` loop. let mut index = 0; while let Some(node) = self.nodes.get_mut(index) { + // This test is extremely hot. if node.state.get() != NodeState::Pending || !processor.needs_process_obligation(&node.obligation) { @@ -443,6 +440,7 @@ impl<O: ForestObligation> ObligationForest<O> { // out of sync with `nodes`. It's not very common, but it does // happen, and code in `compress` has to allow for it. + // This code is much less hot. match processor.process_obligation(&mut node.obligation) { ProcessResult::Unchanged => { // No change in state. diff --git a/compiler/rustc_data_structures/src/profiling.rs b/compiler/rustc_data_structures/src/profiling.rs index 393f17390..443316836 100644 --- a/compiler/rustc_data_structures/src/profiling.rs +++ b/compiler/rustc_data_structures/src/profiling.rs @@ -88,6 +88,7 @@ use std::borrow::Borrow; use std::collections::hash_map::Entry; use std::error::Error; use std::fs; +use std::intrinsics::unlikely; use std::path::Path; use std::process; use std::sync::Arc; @@ -206,8 +207,7 @@ impl SelfProfilerRef { /// a measureme event, "verbose" generic activities also print a timing entry to /// stderr if the compiler is invoked with -Ztime-passes. pub fn verbose_generic_activity(&self, event_label: &'static str) -> VerboseTimingGuard<'_> { - let message = - if self.print_verbose_generic_activities { Some(event_label.to_owned()) } else { None }; + let message = self.print_verbose_generic_activities.then(|| event_label.to_owned()); VerboseTimingGuard::start(message, self.generic_activity(event_label)) } @@ -221,11 +221,9 @@ impl SelfProfilerRef { where A: Borrow<str> + Into<String>, { - let message = if self.print_verbose_generic_activities { - Some(format!("{}({})", event_label, event_arg.borrow())) - } else { - None - }; + let message = self + .print_verbose_generic_activities + .then(|| format!("{}({})", event_label, event_arg.borrow())); VerboseTimingGuard::start(message, self.generic_activity_with_arg(event_label, event_arg)) } @@ -395,11 +393,18 @@ impl SelfProfilerRef { /// Record a query in-memory cache hit. #[inline(always)] pub fn query_cache_hit(&self, query_invocation_id: QueryInvocationId) { - self.instant_query_event( - |profiler| profiler.query_cache_hit_event_kind, - query_invocation_id, - EventFilter::QUERY_CACHE_HITS, - ); + #[inline(never)] + #[cold] + fn cold_call(profiler_ref: &SelfProfilerRef, query_invocation_id: QueryInvocationId) { + profiler_ref.instant_query_event( + |profiler| profiler.query_cache_hit_event_kind, + query_invocation_id, + ); + } + + if unlikely(self.event_filter_mask.contains(EventFilter::QUERY_CACHE_HITS)) { + cold_call(self, query_invocation_id); + } } /// Start profiling a query being blocked on a concurrent execution. @@ -444,20 +449,15 @@ impl SelfProfilerRef { &self, event_kind: fn(&SelfProfiler) -> StringId, query_invocation_id: QueryInvocationId, - event_filter: EventFilter, ) { - drop(self.exec(event_filter, |profiler| { - let event_id = StringId::new_virtual(query_invocation_id.0); - let thread_id = get_thread_id(); - - profiler.profiler.record_instant_event( - event_kind(profiler), - EventId::from_virtual(event_id), - thread_id, - ); - - TimingGuard::none() - })); + let event_id = StringId::new_virtual(query_invocation_id.0); + let thread_id = get_thread_id(); + let profiler = self.profiler.as_ref().unwrap(); + profiler.profiler.record_instant_event( + event_kind(profiler), + EventId::from_virtual(event_id), + thread_id, + ); } pub fn with_profiler(&self, f: impl FnOnce(&SelfProfiler)) { diff --git a/compiler/rustc_data_structures/src/sorted_map/index_map.rs b/compiler/rustc_data_structures/src/sorted_map/index_map.rs index 814e7c7fb..7d23ff519 100644 --- a/compiler/rustc_data_structures/src/sorted_map/index_map.rs +++ b/compiler/rustc_data_structures/src/sorted_map/index_map.rs @@ -100,6 +100,11 @@ impl<I: Idx, K: Ord, V> SortedIndexMultiMap<I, K, V> { (k == &key).then_some((i, v)) }) } + + #[inline] + pub fn contains_key(&self, key: K) -> bool { + self.get_by_key(key).next().is_some() + } } impl<I: Idx, K: Eq, V: Eq> Eq for SortedIndexMultiMap<I, K, V> {} diff --git a/compiler/rustc_data_structures/src/sorted_map/tests.rs b/compiler/rustc_data_structures/src/sorted_map/tests.rs index 3cc250862..def7a7112 100644 --- a/compiler/rustc_data_structures/src/sorted_map/tests.rs +++ b/compiler/rustc_data_structures/src/sorted_map/tests.rs @@ -17,6 +17,10 @@ fn test_sorted_index_multi_map() { assert_eq!(set.get_by_key(3).copied().collect::<Vec<_>>(), vec![0]); assert!(set.get_by_key(4).next().is_none()); + // `contains_key` works + assert!(set.contains_key(3)); + assert!(!set.contains_key(4)); + // `get_by_key` returns items in insertion order. let twos: Vec<_> = set.get_by_key_enumerated(2).collect(); let idxs: Vec<usize> = twos.iter().map(|(i, _)| *i).collect(); diff --git a/compiler/rustc_data_structures/src/stable_hasher.rs b/compiler/rustc_data_structures/src/stable_hasher.rs index ae4836645..e0d77cdae 100644 --- a/compiler/rustc_data_structures/src/stable_hasher.rs +++ b/compiler/rustc_data_structures/src/stable_hasher.rs @@ -486,6 +486,14 @@ impl<HCX> ToStableHashKey<HCX> for String { } } +impl<HCX, T1: ToStableHashKey<HCX>, T2: ToStableHashKey<HCX>> ToStableHashKey<HCX> for (T1, T2) { + type KeyType = (T1::KeyType, T2::KeyType); + #[inline] + fn to_stable_hash_key(&self, hcx: &HCX) -> Self::KeyType { + (self.0.to_stable_hash_key(hcx), self.1.to_stable_hash_key(hcx)) + } +} + impl<CTX> HashStable<CTX> for bool { #[inline] fn hash_stable(&self, ctx: &mut CTX, hasher: &mut StableHasher) { diff --git a/compiler/rustc_data_structures/src/stable_hasher/tests.rs b/compiler/rustc_data_structures/src/stable_hasher/tests.rs index b0d66c32a..724be5888 100644 --- a/compiler/rustc_data_structures/src/stable_hasher/tests.rs +++ b/compiler/rustc_data_structures/src/stable_hasher/tests.rs @@ -150,7 +150,7 @@ fn test_isize_compression() { let hash_b = hash(&(b as isize, a as isize)); assert_ne!( hash_a, hash_b, - "The hash stayed the same when permuting values `{a}` and `{b}!", + "The hash stayed the same when permuting values `{a}` and `{b}`!", ); } diff --git a/compiler/rustc_data_structures/src/sync.rs b/compiler/rustc_data_structures/src/sync.rs index ed5341c40..31323c21d 100644 --- a/compiler/rustc_data_structures/src/sync.rs +++ b/compiler/rustc_data_structures/src/sync.rs @@ -26,13 +26,17 @@ use std::panic::{catch_unwind, resume_unwind, AssertUnwindSafe}; pub use std::sync::atomic::Ordering; pub use std::sync::atomic::Ordering::SeqCst; +pub use vec::AppendOnlyVec; + +mod vec; + cfg_if! { if #[cfg(not(parallel_compiler))] { pub auto trait Send {} pub auto trait Sync {} - impl<T: ?Sized> Send for T {} - impl<T: ?Sized> Sync for T {} + impl<T> Send for T {} + impl<T> Sync for T {} #[macro_export] macro_rules! rustc_erase_owner { diff --git a/compiler/rustc_data_structures/src/sync/vec.rs b/compiler/rustc_data_structures/src/sync/vec.rs new file mode 100644 index 000000000..cbea4f059 --- /dev/null +++ b/compiler/rustc_data_structures/src/sync/vec.rs @@ -0,0 +1,41 @@ +use std::marker::PhantomData; + +use rustc_index::vec::Idx; + +pub struct AppendOnlyVec<I: Idx, T: Copy> { + #[cfg(not(parallel_compiler))] + vec: elsa::vec::FrozenVec<T>, + #[cfg(parallel_compiler)] + vec: elsa::sync::LockFreeFrozenVec<T>, + _marker: PhantomData<fn(&I)>, +} + +impl<I: Idx, T: Copy> AppendOnlyVec<I, T> { + pub fn new() -> Self { + Self { + #[cfg(not(parallel_compiler))] + vec: elsa::vec::FrozenVec::new(), + #[cfg(parallel_compiler)] + vec: elsa::sync::LockFreeFrozenVec::new(), + _marker: PhantomData, + } + } + + pub fn push(&self, val: T) -> I { + #[cfg(not(parallel_compiler))] + let i = self.vec.len(); + #[cfg(not(parallel_compiler))] + self.vec.push(val); + #[cfg(parallel_compiler)] + let i = self.vec.push(val); + I::new(i) + } + + pub fn get(&self, i: I) -> Option<T> { + let i = i.index(); + #[cfg(not(parallel_compiler))] + return self.vec.get_copy(i); + #[cfg(parallel_compiler)] + return self.vec.get(i); + } +} |