summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_data_structures/src/sharded.rs
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 18:31:44 +0000
commitc23a457e72abe608715ac76f076f47dc42af07a5 (patch)
tree2772049aaf84b5c9d0ed12ec8d86812f7a7904b6 /compiler/rustc_data_structures/src/sharded.rs
parentReleasing progress-linux version 1.73.0+dfsg1-1~progress7.99u1. (diff)
downloadrustc-c23a457e72abe608715ac76f076f47dc42af07a5.tar.xz
rustc-c23a457e72abe608715ac76f076f47dc42af07a5.zip
Merging upstream version 1.74.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_data_structures/src/sharded.rs')
-rw-r--r--compiler/rustc_data_structures/src/sharded.rs159
1 files changed, 114 insertions, 45 deletions
diff --git a/compiler/rustc_data_structures/src/sharded.rs b/compiler/rustc_data_structures/src/sharded.rs
index 40cbf1495..29516fffd 100644
--- a/compiler/rustc_data_structures/src/sharded.rs
+++ b/compiler/rustc_data_structures/src/sharded.rs
@@ -1,31 +1,29 @@
use crate::fx::{FxHashMap, FxHasher};
#[cfg(parallel_compiler)]
-use crate::sync::is_dyn_thread_safe;
-use crate::sync::{CacheAligned, Lock, LockGuard};
+use crate::sync::{is_dyn_thread_safe, CacheAligned};
+use crate::sync::{Lock, LockGuard, Mode};
+#[cfg(parallel_compiler)]
+use itertools::Either;
use std::borrow::Borrow;
use std::collections::hash_map::RawEntryMut;
use std::hash::{Hash, Hasher};
+use std::iter;
use std::mem;
-#[cfg(parallel_compiler)]
// 32 shards is sufficient to reduce contention on an 8-core Ryzen 7 1700,
// but this should be tested on higher core count CPUs. How the `Sharded` type gets used
// may also affect the ideal number of shards.
const SHARD_BITS: usize = 5;
-#[cfg(not(parallel_compiler))]
-const SHARD_BITS: usize = 0;
-
-pub const SHARDS: usize = 1 << SHARD_BITS;
+#[cfg(parallel_compiler)]
+const SHARDS: usize = 1 << SHARD_BITS;
/// An array of cache-line aligned inner locked structures with convenience methods.
-pub struct Sharded<T> {
- /// This mask is used to ensure that accesses are inbounds of `shards`.
- /// When dynamic thread safety is off, this field is set to 0 causing only
- /// a single shard to be used for greater cache efficiency.
+/// A single field is used when the compiler uses only one thread.
+pub enum Sharded<T> {
+ Single(Lock<T>),
#[cfg(parallel_compiler)]
- mask: usize,
- shards: [CacheAligned<Lock<T>>; SHARDS],
+ Shards(Box<[CacheAligned<Lock<T>>; SHARDS]>),
}
impl<T: Default> Default for Sharded<T> {
@@ -38,62 +36,133 @@ impl<T: Default> Default for Sharded<T> {
impl<T> Sharded<T> {
#[inline]
pub fn new(mut value: impl FnMut() -> T) -> Self {
- Sharded {
- #[cfg(parallel_compiler)]
- mask: if is_dyn_thread_safe() { SHARDS - 1 } else { 0 },
- shards: [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))),
+ #[cfg(parallel_compiler)]
+ if is_dyn_thread_safe() {
+ return Sharded::Shards(Box::new(
+ [(); SHARDS].map(|()| CacheAligned(Lock::new(value()))),
+ ));
}
+
+ Sharded::Single(Lock::new(value()))
}
- #[inline(always)]
- fn mask(&self) -> usize {
- #[cfg(parallel_compiler)]
- {
- if SHARDS == 1 { 0 } else { self.mask }
- }
- #[cfg(not(parallel_compiler))]
- {
- 0
+ /// The shard is selected by hashing `val` with `FxHasher`.
+ #[inline]
+ pub fn get_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> &Lock<T> {
+ match self {
+ Self::Single(single) => &single,
+ #[cfg(parallel_compiler)]
+ Self::Shards(..) => self.get_shard_by_hash(make_hash(_val)),
}
}
- #[inline(always)]
- fn count(&self) -> usize {
- // `self.mask` is always one below the used shard count
- self.mask() + 1
+ #[inline]
+ pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
+ self.get_shard_by_index(get_shard_hash(hash))
+ }
+
+ #[inline]
+ pub fn get_shard_by_index(&self, _i: usize) -> &Lock<T> {
+ match self {
+ Self::Single(single) => &single,
+ #[cfg(parallel_compiler)]
+ Self::Shards(shards) => {
+ // SAFETY: The index gets ANDed with the shard mask, ensuring it is always inbounds.
+ unsafe { &shards.get_unchecked(_i & (SHARDS - 1)).0 }
+ }
+ }
}
/// The shard is selected by hashing `val` with `FxHasher`.
#[inline]
- pub fn get_shard_by_value<K: Hash + ?Sized>(&self, val: &K) -> &Lock<T> {
- self.get_shard_by_hash(if SHARDS == 1 { 0 } else { make_hash(val) })
+ #[track_caller]
+ pub fn lock_shard_by_value<K: Hash + ?Sized>(&self, _val: &K) -> LockGuard<'_, T> {
+ match self {
+ Self::Single(single) => {
+ // Syncronization is disabled so use the `lock_assume_no_sync` method optimized
+ // for that case.
+
+ // SAFETY: We know `is_dyn_thread_safe` was false when creating the lock thus
+ // `might_be_dyn_thread_safe` was also false.
+ unsafe { single.lock_assume(Mode::NoSync) }
+ }
+ #[cfg(parallel_compiler)]
+ Self::Shards(..) => self.lock_shard_by_hash(make_hash(_val)),
+ }
+ }
+
+ #[inline]
+ #[track_caller]
+ pub fn lock_shard_by_hash(&self, hash: u64) -> LockGuard<'_, T> {
+ self.lock_shard_by_index(get_shard_hash(hash))
}
#[inline]
- pub fn get_shard_by_hash(&self, hash: u64) -> &Lock<T> {
- self.get_shard_by_index(get_shard_hash(hash))
+ #[track_caller]
+ pub fn lock_shard_by_index(&self, _i: usize) -> LockGuard<'_, T> {
+ match self {
+ Self::Single(single) => {
+ // Syncronization is disabled so use the `lock_assume_no_sync` method optimized
+ // for that case.
+
+ // SAFETY: We know `is_dyn_thread_safe` was false when creating the lock thus
+ // `might_be_dyn_thread_safe` was also false.
+ unsafe { single.lock_assume(Mode::NoSync) }
+ }
+ #[cfg(parallel_compiler)]
+ Self::Shards(shards) => {
+ // Syncronization is enabled so use the `lock_assume_sync` method optimized
+ // for that case.
+
+ // SAFETY (get_unchecked): The index gets ANDed with the shard mask, ensuring it is
+ // always inbounds.
+ // SAFETY (lock_assume_sync): We know `is_dyn_thread_safe` was true when creating
+ // the lock thus `might_be_dyn_thread_safe` was also true.
+ unsafe { shards.get_unchecked(_i & (SHARDS - 1)).0.lock_assume(Mode::Sync) }
+ }
+ }
}
#[inline]
- pub fn get_shard_by_index(&self, i: usize) -> &Lock<T> {
- // SAFETY: The index get ANDed with the mask, ensuring it is always inbounds.
- unsafe { &self.shards.get_unchecked(i & self.mask()).0 }
+ pub fn lock_shards(&self) -> impl Iterator<Item = LockGuard<'_, T>> {
+ match self {
+ #[cfg(not(parallel_compiler))]
+ Self::Single(single) => iter::once(single.lock()),
+ #[cfg(parallel_compiler)]
+ Self::Single(single) => Either::Left(iter::once(single.lock())),
+ #[cfg(parallel_compiler)]
+ Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.lock())),
+ }
}
- pub fn lock_shards(&self) -> Vec<LockGuard<'_, T>> {
- (0..self.count()).map(|i| self.get_shard_by_index(i).lock()).collect()
+ #[inline]
+ pub fn try_lock_shards(&self) -> impl Iterator<Item = Option<LockGuard<'_, T>>> {
+ match self {
+ #[cfg(not(parallel_compiler))]
+ Self::Single(single) => iter::once(single.try_lock()),
+ #[cfg(parallel_compiler)]
+ Self::Single(single) => Either::Left(iter::once(single.try_lock())),
+ #[cfg(parallel_compiler)]
+ Self::Shards(shards) => Either::Right(shards.iter().map(|shard| shard.0.try_lock())),
+ }
}
+}
- pub fn try_lock_shards(&self) -> Option<Vec<LockGuard<'_, T>>> {
- (0..self.count()).map(|i| self.get_shard_by_index(i).try_lock()).collect()
+#[inline]
+pub fn shards() -> usize {
+ #[cfg(parallel_compiler)]
+ if is_dyn_thread_safe() {
+ return SHARDS;
}
+
+ 1
}
pub type ShardedHashMap<K, V> = Sharded<FxHashMap<K, V>>;
impl<K: Eq, V> ShardedHashMap<K, V> {
pub fn len(&self) -> usize {
- self.lock_shards().iter().map(|shard| shard.len()).sum()
+ self.lock_shards().map(|shard| shard.len()).sum()
}
}
@@ -105,7 +174,7 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
Q: Hash + Eq,
{
let hash = make_hash(value);
- let mut shard = self.get_shard_by_hash(hash).lock();
+ let mut shard = self.lock_shard_by_hash(hash);
let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, value);
match entry {
@@ -125,7 +194,7 @@ impl<K: Eq + Hash + Copy> ShardedHashMap<K, ()> {
Q: Hash + Eq,
{
let hash = make_hash(&value);
- let mut shard = self.get_shard_by_hash(hash).lock();
+ let mut shard = self.lock_shard_by_hash(hash);
let entry = shard.raw_entry_mut().from_key_hashed_nocheck(hash, &value);
match entry {
@@ -147,7 +216,7 @@ pub trait IntoPointer {
impl<K: Eq + Hash + Copy + IntoPointer> ShardedHashMap<K, ()> {
pub fn contains_pointer_to<T: Hash + IntoPointer>(&self, value: &T) -> bool {
let hash = make_hash(&value);
- let shard = self.get_shard_by_hash(hash).lock();
+ let shard = self.lock_shard_by_hash(hash);
let value = value.into_pointer();
shard.raw_entry().from_hash(hash, |entry| entry.into_pointer() == value).is_some()
}