diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:59:24 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-30 03:59:24 +0000 |
commit | 023939b627b7dc93b01471f7d41fb8553ddb4ffa (patch) | |
tree | 60fc59477c605c72b0a1051409062ddecc43f877 /compiler/rustc_data_structures | |
parent | Adding debian version 1.72.1+dfsg1-1. (diff) | |
download | rustc-023939b627b7dc93b01471f7d41fb8553ddb4ffa.tar.xz rustc-023939b627b7dc93b01471f7d41fb8553ddb4ffa.zip |
Merging upstream version 1.73.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_data_structures')
-rw-r--r-- | compiler/rustc_data_structures/Cargo.toml | 2 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/base_n.rs | 10 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/binary_search_util/mod.rs | 38 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/graph/dominators/mod.rs | 4 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/lib.rs | 1 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sorted_map.rs | 5 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sso/map.rs | 6 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sync/vec.rs | 28 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/sync/worker_local.rs | 2 | ||||
-rw-r--r-- | compiler/rustc_data_structures/src/unord.rs | 73 |
10 files changed, 78 insertions, 91 deletions
diff --git a/compiler/rustc_data_structures/Cargo.toml b/compiler/rustc_data_structures/Cargo.toml index a5c3cb3f8..f77bd53e7 100644 --- a/compiler/rustc_data_structures/Cargo.toml +++ b/compiler/rustc_data_structures/Cargo.toml @@ -35,7 +35,7 @@ elsa = "=1.7.1" itertools = "0.10.1" [dependencies.parking_lot] -version = "0.11" +version = "0.12" [target.'cfg(windows)'.dependencies.windows] version = "0.48.0" diff --git a/compiler/rustc_data_structures/src/base_n.rs b/compiler/rustc_data_structures/src/base_n.rs index 4567759c0..a3eb2b9c4 100644 --- a/compiler/rustc_data_structures/src/base_n.rs +++ b/compiler/rustc_data_structures/src/base_n.rs @@ -16,22 +16,24 @@ const BASE_64: &[u8; MAX_BASE] = pub fn push_str(mut n: u128, base: usize, output: &mut String) { debug_assert!(base >= 2 && base <= MAX_BASE); let mut s = [0u8; 128]; - let mut index = 0; + let mut index = s.len(); let base = base as u128; loop { + index -= 1; s[index] = BASE_64[(n % base) as usize]; - index += 1; n /= base; if n == 0 { break; } } - s[0..index].reverse(); - output.push_str(str::from_utf8(&s[0..index]).unwrap()); + output.push_str(unsafe { + // SAFETY: `s` is populated using only valid utf8 characters from `BASE_64` + str::from_utf8_unchecked(&s[index..]) + }); } #[inline] diff --git a/compiler/rustc_data_structures/src/binary_search_util/mod.rs b/compiler/rustc_data_structures/src/binary_search_util/mod.rs index d40172a2e..bc8a6b9ea 100644 --- a/compiler/rustc_data_structures/src/binary_search_util/mod.rs +++ b/compiler/rustc_data_structures/src/binary_search_util/mod.rs @@ -10,41 +10,17 @@ pub fn binary_search_slice<'d, E, K>(data: &'d [E], key_fn: impl Fn(&E) -> K, ke where K: Ord, { - let Ok(mid) = data.binary_search_by_key(key, &key_fn) else { + let size = data.len(); + let start = data.partition_point(|x| key_fn(x) < *key); + // At this point `start` either points at the first entry with equal or + // greater key or is equal to `size` in case all elements have smaller keys + if start == size || key_fn(&data[start]) != *key { return &[]; }; - let size = data.len(); - - // We get back *some* element with the given key -- so do - // a galloping search backwards to find the *first* one. - let mut start = mid; - let mut previous = mid; - let mut step = 1; - loop { - start = start.saturating_sub(step); - if start == 0 || key_fn(&data[start]) != *key { - break; - } - previous = start; - step *= 2; - } - step = previous - start; - while step > 1 { - let half = step / 2; - let mid = start + half; - if key_fn(&data[mid]) != *key { - start = mid; - } - step -= half; - } - // adjust by one if we have overshot - if start < size && key_fn(&data[start]) != *key { - start += 1; - } // Now search forward to find the *last* one. - let mut end = mid; - let mut previous = mid; + let mut end = start; + let mut previous = start; let mut step = 1; loop { end = end.saturating_add(step).min(size); diff --git a/compiler/rustc_data_structures/src/graph/dominators/mod.rs b/compiler/rustc_data_structures/src/graph/dominators/mod.rs index a5db14d91..85ef2de9b 100644 --- a/compiler/rustc_data_structures/src/graph/dominators/mod.rs +++ b/compiler/rustc_data_structures/src/graph/dominators/mod.rs @@ -176,9 +176,7 @@ pub fn dominators<G: ControlFlowGraph>(graph: &G) -> Dominators<G::Node> { // // ...this may be the case if a MirPass modifies the CFG to remove // or rearrange certain blocks/edges. - let Some(v) = real_to_pre_order[v] else { - continue - }; + let Some(v) = real_to_pre_order[v] else { continue }; // eval returns a vertex x from which semi[x] is minimum among // vertices semi[v] +> x *> v. diff --git a/compiler/rustc_data_structures/src/lib.rs b/compiler/rustc_data_structures/src/lib.rs index 3deb9c5c2..337720897 100644 --- a/compiler/rustc_data_structures/src/lib.rs +++ b/compiler/rustc_data_structures/src/lib.rs @@ -37,6 +37,7 @@ #![allow(rustc::potential_query_instability)] #![deny(rustc::untranslatable_diagnostic)] #![deny(rustc::diagnostic_outside_of_impl)] +#![cfg_attr(not(bootstrap), allow(internal_features))] #![deny(unsafe_op_in_unsafe_fn)] #[macro_use] diff --git a/compiler/rustc_data_structures/src/sorted_map.rs b/compiler/rustc_data_structures/src/sorted_map.rs index 9409057d4..60b343afb 100644 --- a/compiler/rustc_data_structures/src/sorted_map.rs +++ b/compiler/rustc_data_structures/src/sorted_map.rs @@ -49,12 +49,11 @@ impl<K: Ord, V> SortedMap<K, V> { } #[inline] - pub fn insert(&mut self, key: K, mut value: V) -> Option<V> { + pub fn insert(&mut self, key: K, value: V) -> Option<V> { match self.lookup_index_for(&key) { Ok(index) => { let slot = unsafe { self.data.get_unchecked_mut(index) }; - mem::swap(&mut slot.1, &mut value); - Some(value) + Some(mem::replace(&mut slot.1, value)) } Err(index) => { self.data.insert(index, (key, value)); diff --git a/compiler/rustc_data_structures/src/sso/map.rs b/compiler/rustc_data_structures/src/sso/map.rs index 99581ed23..04e359a54 100644 --- a/compiler/rustc_data_structures/src/sso/map.rs +++ b/compiler/rustc_data_structures/src/sso/map.rs @@ -268,11 +268,7 @@ impl<K: Eq + Hash, V> SsoHashMap<K, V> { pub fn remove_entry(&mut self, key: &K) -> Option<(K, V)> { match self { SsoHashMap::Array(array) => { - if let Some(index) = array.iter().position(|(k, _v)| k == key) { - Some(array.swap_remove(index)) - } else { - None - } + array.iter().position(|(k, _v)| k == key).map(|index| array.swap_remove(index)) } SsoHashMap::Map(map) => map.remove_entry(key), } diff --git a/compiler/rustc_data_structures/src/sync/vec.rs b/compiler/rustc_data_structures/src/sync/vec.rs index e36dded9e..314496ce9 100644 --- a/compiler/rustc_data_structures/src/sync/vec.rs +++ b/compiler/rustc_data_structures/src/sync/vec.rs @@ -43,37 +43,23 @@ impl<I: Idx, T: Copy> AppendOnlyIndexVec<I, T> { #[derive(Default)] pub struct AppendOnlyVec<T: Copy> { - #[cfg(not(parallel_compiler))] - vec: elsa::vec::FrozenVec<T>, - #[cfg(parallel_compiler)] - vec: elsa::sync::LockFreeFrozenVec<T>, + vec: parking_lot::RwLock<Vec<T>>, } impl<T: Copy> AppendOnlyVec<T> { pub fn new() -> Self { - Self { - #[cfg(not(parallel_compiler))] - vec: elsa::vec::FrozenVec::new(), - #[cfg(parallel_compiler)] - vec: elsa::sync::LockFreeFrozenVec::new(), - } + Self { vec: Default::default() } } pub fn push(&self, val: T) -> usize { - #[cfg(not(parallel_compiler))] - let i = self.vec.len(); - #[cfg(not(parallel_compiler))] - self.vec.push(val); - #[cfg(parallel_compiler)] - let i = self.vec.push(val); - i + let mut v = self.vec.write(); + let n = v.len(); + v.push(val); + n } pub fn get(&self, i: usize) -> Option<T> { - #[cfg(not(parallel_compiler))] - return self.vec.get_copy(i); - #[cfg(parallel_compiler)] - return self.vec.get(i); + self.vec.read().get(i).copied() } pub fn iter_enumerated(&self) -> impl Iterator<Item = (usize, T)> + '_ { diff --git a/compiler/rustc_data_structures/src/sync/worker_local.rs b/compiler/rustc_data_structures/src/sync/worker_local.rs index d61bb55be..8c84daf4f 100644 --- a/compiler/rustc_data_structures/src/sync/worker_local.rs +++ b/compiler/rustc_data_structures/src/sync/worker_local.rs @@ -116,7 +116,7 @@ pub struct WorkerLocal<T> { // This is safe because the `deref` call will return a reference to a `T` unique to each thread // or it will panic for threads without an associated local. So there isn't a need for `T` to do -// it's own synchronization. The `verify` method on `RegistryId` has an issue where the the id +// it's own synchronization. The `verify` method on `RegistryId` has an issue where the id // can be reused, but `WorkerLocal` has a reference to `Registry` which will prevent any reuse. #[cfg(parallel_compiler)] unsafe impl<T: Send> Sync for WorkerLocal<T> {} diff --git a/compiler/rustc_data_structures/src/unord.rs b/compiler/rustc_data_structures/src/unord.rs index 2b21815b6..47c56eba7 100644 --- a/compiler/rustc_data_structures/src/unord.rs +++ b/compiler/rustc_data_structures/src/unord.rs @@ -31,6 +31,7 @@ use crate::{ /// /// It's still possible to do the same thing with an `Fn` by using interior mutability, /// but the chance of doing it accidentally is reduced. +#[derive(Clone)] pub struct UnordItems<T, I: Iterator<Item = T>>(I); impl<T, I: Iterator<Item = T>> UnordItems<T, I> { @@ -167,6 +168,14 @@ impl<T: Ord, I: Iterator<Item = T>> UnordItems<T, I> { } } +/// A marker trait specifying that `Self` can consume `UnordItems<_>` without +/// exposing any internal ordering. +/// +/// Note: right now this is just a marker trait. It could be extended to contain +/// some useful, common methods though, like `len`, `clear`, or the various +/// kinds of `to_sorted`. +trait UnordCollection {} + /// This is a set collection type that tries very hard to not expose /// any internal iteration. This is a useful property when trying to /// uphold the determinism invariants imposed by the query system. @@ -181,6 +190,8 @@ pub struct UnordSet<V: Eq + Hash> { inner: FxHashSet<V>, } +impl<V: Eq + Hash> UnordCollection for UnordSet<V> {} + impl<V: Eq + Hash> Default for UnordSet<V> { #[inline] fn default() -> Self { @@ -195,6 +206,11 @@ impl<V: Eq + Hash> UnordSet<V> { } #[inline] + pub fn with_capacity(capacity: usize) -> Self { + Self { inner: FxHashSet::with_capacity_and_hasher(capacity, Default::default()) } + } + + #[inline] pub fn len(&self) -> usize { self.inner.len() } @@ -258,9 +274,9 @@ impl<V: Eq + Hash> UnordSet<V> { #[inline] pub fn to_sorted_stable_ord(&self) -> Vec<V> where - V: Ord + StableOrd + Copy, + V: Ord + StableOrd + Clone, { - let mut items: Vec<V> = self.inner.iter().copied().collect(); + let mut items: Vec<V> = self.inner.iter().cloned().collect(); items.sort_unstable(); items } @@ -279,16 +295,28 @@ impl<V: Eq + Hash> UnordSet<V> { to_sorted_vec(hcx, self.inner.into_iter(), cache_sort_key, |x| x) } - // We can safely extend this UnordSet from a set of unordered values because that - // won't expose the internal ordering anywhere. #[inline] - pub fn extend_unord<I: Iterator<Item = V>>(&mut self, items: UnordItems<V, I>) { - self.inner.extend(items.0) + pub fn clear(&mut self) { + self.inner.clear(); } +} + +pub trait ExtendUnord<T> { + /// Extend this unord collection with the given `UnordItems`. + /// This method is called `extend_unord` instead of just `extend` so it + /// does not conflict with `Extend::extend`. Otherwise there would be many + /// places where the two methods would have to be explicitly disambiguated + /// via UFCS. + fn extend_unord<I: Iterator<Item = T>>(&mut self, items: UnordItems<T, I>); +} +// Note: it is important that `C` implements `UnordCollection` in addition to +// `Extend`, otherwise this impl would leak the internal iteration order of +// `items`, e.g. when calling `some_vec.extend_unord(some_unord_items)`. +impl<C: Extend<T> + UnordCollection, T> ExtendUnord<T> for C { #[inline] - pub fn clear(&mut self) { - self.inner.clear(); + fn extend_unord<I: Iterator<Item = T>>(&mut self, items: UnordItems<T, I>) { + self.extend(items.0) } } @@ -312,6 +340,12 @@ impl<V: Hash + Eq> From<FxHashSet<V>> for UnordSet<V> { } } +impl<V: Hash + Eq, I: Iterator<Item = V>> From<UnordItems<V, I>> for UnordSet<V> { + fn from(value: UnordItems<V, I>) -> Self { + UnordSet { inner: FxHashSet::from_iter(value.0) } + } +} + impl<HCX, V: Hash + Eq + HashStable<HCX>> HashStable<HCX> for UnordSet<V> { #[inline] fn hash_stable(&self, hcx: &mut HCX, hasher: &mut StableHasher) { @@ -333,6 +367,8 @@ pub struct UnordMap<K: Eq + Hash, V> { inner: FxHashMap<K, V>, } +impl<K: Eq + Hash, V> UnordCollection for UnordMap<K, V> {} + impl<K: Eq + Hash, V> Default for UnordMap<K, V> { #[inline] fn default() -> Self { @@ -363,6 +399,11 @@ impl<K: Hash + Eq, V, I: Iterator<Item = (K, V)>> From<UnordItems<(K, V), I>> fo impl<K: Eq + Hash, V> UnordMap<K, V> { #[inline] + pub fn with_capacity(capacity: usize) -> Self { + Self { inner: FxHashMap::with_capacity_and_hasher(capacity, Default::default()) } + } + + #[inline] pub fn len(&self) -> usize { self.inner.len() } @@ -428,13 +469,6 @@ impl<K: Eq + Hash, V> UnordMap<K, V> { UnordItems(self.inner.into_iter()) } - // We can safely extend this UnordMap from a set of unordered values because that - // won't expose the internal ordering anywhere. - #[inline] - pub fn extend<I: Iterator<Item = (K, V)>>(&mut self, items: UnordItems<(K, V), I>) { - self.inner.extend(items.0) - } - /// Returns the entries of this map in stable sort order (as defined by `ToStableHashKey`). /// /// The `cache_sort_key` parameter controls if [slice::sort_by_cached_key] or @@ -554,15 +588,10 @@ impl<V> UnordBag<V> { pub fn into_items(self) -> UnordItems<V, impl Iterator<Item = V>> { UnordItems(self.inner.into_iter()) } - - // We can safely extend this UnordSet from a set of unordered values because that - // won't expose the internal ordering anywhere. - #[inline] - pub fn extend<I: Iterator<Item = V>>(&mut self, items: UnordItems<V, I>) { - self.inner.extend(items.0) - } } +impl<T> UnordCollection for UnordBag<T> {} + impl<T> Extend<T> for UnordBag<T> { fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) { self.inner.extend(iter) |