summaryrefslogtreecommitdiffstats
path: root/vendor/dashmap/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:11:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:11:28 +0000
commit94a0819fe3a0d679c3042a77bfe6a2afc505daea (patch)
tree2b827afe6a05f3538db3f7803a88c4587fe85648 /vendor/dashmap/src
parentAdding upstream version 1.64.0+dfsg1. (diff)
downloadrustc-94a0819fe3a0d679c3042a77bfe6a2afc505daea.tar.xz
rustc-94a0819fe3a0d679c3042a77bfe6a2afc505daea.zip
Adding upstream version 1.66.0+dfsg1.upstream/1.66.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/dashmap/src')
-rw-r--r--vendor/dashmap/src/lib.rs59
-rw-r--r--vendor/dashmap/src/rayon/map.rs4
-rw-r--r--vendor/dashmap/src/rayon/read_only.rs96
-rw-r--r--vendor/dashmap/src/read_only.rs2
-rw-r--r--vendor/dashmap/src/table.rs81
5 files changed, 155 insertions, 87 deletions
diff --git a/vendor/dashmap/src/lib.rs b/vendor/dashmap/src/lib.rs
index 627b51381..bf112d570 100644
--- a/vendor/dashmap/src/lib.rs
+++ b/vendor/dashmap/src/lib.rs
@@ -16,6 +16,7 @@ mod util;
#[cfg(feature = "rayon")]
pub mod rayon {
pub mod map;
+ pub mod read_only;
pub mod set;
}
@@ -35,6 +36,7 @@ use iter::{Iter, IterMut, OwningIter};
use mapref::entry::{Entry, OccupiedEntry, VacantEntry};
use mapref::multiple::RefMulti;
use mapref::one::{Ref, RefMut};
+use once_cell::sync::OnceCell;
pub use read_only::ReadOnlyView;
pub use set::DashSet;
use std::collections::hash_map::RandomState;
@@ -51,8 +53,19 @@ cfg_if! {
pub(crate) type HashMap<K, V, S> = hashbrown::HashMap<K, SharedValue<V>, S>;
+// Temporary reimplementation of [`std::collections::TryReserveError`]
+// util [`std::collections::TryReserveError`] stabilises.
+// We cannot easily create `std::collections` error type from `hashbrown` error type
+// without access to `TryReserveError::kind` method.
+#[non_exhaustive]
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct TryReserveError {}
+
fn default_shard_amount() -> usize {
- (std::thread::available_parallelism().map_or(1, usize::from) * 4).next_power_of_two()
+ static DEFAULT_SHARD_AMOUNT: OnceCell<usize> = OnceCell::new();
+ *DEFAULT_SHARD_AMOUNT.get_or_init(|| {
+ (std::thread::available_parallelism().map_or(1, usize::from) * 4).next_power_of_two()
+ })
}
fn ncb(shard_amount: usize) -> usize {
@@ -65,7 +78,7 @@ fn ncb(shard_amount: usize) -> usize {
/// with some slight changes to handle concurrency.
///
/// DashMap tries to be very simple to use and to be a direct replacement for `RwLock<HashMap<K, V, S>>`.
-/// To accomplish these all methods take `&self` instead modifying methods taking `&mut self`.
+/// To accomplish this, all methods take `&self` instead of modifying methods taking `&mut self`.
/// This allows you to put a DashMap in an `Arc<T>` and share it between threads while being able to modify it.
///
/// Documentation mentioning locking behaviour acts in the reference frame of the calling thread.
@@ -216,7 +229,7 @@ impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone> DashMap<K, V, S> {
/// Creates a new DashMap with a specified hasher and shard amount
///
- /// shard_amount should greater than 0 and be a power of two.
+ /// shard_amount should be greater than 0 and a power of two.
/// If a shard_amount which is not a power of two is provided, the function will panic.
///
/// # Examples
@@ -797,6 +810,24 @@ impl<'a, K: 'a + Eq + Hash, V: 'a, S: BuildHasher + Clone> DashMap<K, V, S> {
pub fn try_entry(&'a self, key: K) -> Option<Entry<'a, K, V, S>> {
self._try_entry(key)
}
+
+ /// Advanced entry API that tries to mimic `std::collections::HashMap::try_reserve`.
+ /// Tries to reserve capacity for at least `shard * additional`
+ /// and may reserve more space to avoid frequent reallocations.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error is returned.
+ // TODO: return std::collections::TryReserveError once std::collections::TryReserveErrorKind stabilises.
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ for shard in self.shards.iter() {
+ shard
+ .write()
+ .try_reserve(additional)
+ .map_err(|_| TryReserveError {})?;
+ }
+ Ok(())
+ }
}
impl<'a, K: 'a + Eq + Hash, V: 'a, S: 'a + BuildHasher + Clone> Map<'a, K, V, S>
@@ -1367,4 +1398,26 @@ mod tests {
assert!(result2.is_locked());
}
}
+
+ #[test]
+ fn test_try_reserve() {
+ let mut map: DashMap<i32, i32> = DashMap::new();
+ // DashMap is empty and doesn't allocate memory
+ assert_eq!(map.capacity(), 0);
+
+ map.try_reserve(10).unwrap();
+
+ // And now map can hold at least 10 elements
+ assert!(map.capacity() >= 10);
+ }
+
+ #[test]
+ fn test_try_reserve_errors() {
+ let mut map: DashMap<i32, i32> = DashMap::new();
+
+ match map.try_reserve(usize::MAX) {
+ Err(_) => {}
+ _ => panic!("should have raised CapacityOverflow error"),
+ }
+ }
}
diff --git a/vendor/dashmap/src/rayon/map.rs b/vendor/dashmap/src/rayon/map.rs
index c0947cf6b..4fc0c43aa 100644
--- a/vendor/dashmap/src/rayon/map.rs
+++ b/vendor/dashmap/src/rayon/map.rs
@@ -80,7 +80,7 @@ where
}
pub struct OwningIter<K, V, S = RandomState> {
- shards: Box<[RwLock<HashMap<K, V, S>>]>,
+ pub(super) shards: Box<[RwLock<HashMap<K, V, S>>]>,
}
impl<K, V, S> ParallelIterator for OwningIter<K, V, S>
@@ -125,7 +125,7 @@ where
}
pub struct Iter<'a, K, V, S = RandomState> {
- shards: &'a [RwLock<HashMap<K, V, S>>],
+ pub(super) shards: &'a [RwLock<HashMap<K, V, S>>],
}
impl<'a, K, V, S> ParallelIterator for Iter<'a, K, V, S>
diff --git a/vendor/dashmap/src/rayon/read_only.rs b/vendor/dashmap/src/rayon/read_only.rs
new file mode 100644
index 000000000..a11448cee
--- /dev/null
+++ b/vendor/dashmap/src/rayon/read_only.rs
@@ -0,0 +1,96 @@
+use crate::mapref::multiple::RefMulti;
+use crate::rayon::map::Iter;
+use crate::ReadOnlyView;
+use core::hash::{BuildHasher, Hash};
+use rayon::iter::IntoParallelIterator;
+
+impl<K, V, S> IntoParallelIterator for ReadOnlyView<K, V, S>
+where
+ K: Send + Eq + Hash,
+ V: Send,
+ S: Send + Clone + BuildHasher,
+{
+ type Iter = super::map::OwningIter<K, V, S>;
+ type Item = (K, V);
+
+ fn into_par_iter(self) -> Self::Iter {
+ super::map::OwningIter {
+ shards: self.map.shards,
+ }
+ }
+}
+
+// This impl also enables `IntoParallelRefIterator::par_iter`
+impl<'a, K, V, S> IntoParallelIterator for &'a ReadOnlyView<K, V, S>
+where
+ K: Send + Sync + Eq + Hash,
+ V: Send + Sync,
+ S: Send + Sync + Clone + BuildHasher,
+{
+ type Iter = Iter<'a, K, V, S>;
+ type Item = RefMulti<'a, K, V, S>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ Iter {
+ shards: &self.map.shards,
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::DashMap;
+ use rayon::iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator};
+
+ fn construct_sample_map() -> DashMap<i32, String> {
+ let map = DashMap::new();
+
+ map.insert(1, "one".to_string());
+
+ map.insert(10, "ten".to_string());
+
+ map.insert(27, "twenty seven".to_string());
+
+ map.insert(45, "forty five".to_string());
+
+ map
+ }
+
+ #[test]
+ fn test_par_iter() {
+ let map = construct_sample_map();
+
+ let view = map.clone().into_read_only();
+
+ view.par_iter().for_each(|entry| {
+ let key = *entry.key();
+
+ assert!(view.contains_key(&key));
+
+ let map_entry = map.get(&key).unwrap();
+
+ assert_eq!(view.get(&key).unwrap(), map_entry.value());
+
+ let key_value: (&i32, &String) = view.get_key_value(&key).unwrap();
+
+ assert_eq!(key_value.0, map_entry.key());
+
+ assert_eq!(key_value.1, map_entry.value());
+ });
+ }
+
+ #[test]
+ fn test_into_par_iter() {
+ let map = construct_sample_map();
+
+ let view = map.clone().into_read_only();
+
+ view.into_par_iter().for_each(|(key, value)| {
+ let map_entry = map.get(&key).unwrap();
+
+ assert_eq!(&key, map_entry.key());
+
+ assert_eq!(&value, map_entry.value());
+ });
+ }
+}
diff --git a/vendor/dashmap/src/read_only.rs b/vendor/dashmap/src/read_only.rs
index c43947b2e..14582245b 100644
--- a/vendor/dashmap/src/read_only.rs
+++ b/vendor/dashmap/src/read_only.rs
@@ -7,7 +7,7 @@ use std::collections::hash_map::RandomState;
/// A read-only view into a `DashMap`. Allows to obtain raw references to the stored values.
pub struct ReadOnlyView<K, V, S = RandomState> {
- map: DashMap<K, V, S>,
+ pub(crate) map: DashMap<K, V, S>,
}
impl<K: Eq + Hash + Clone, V: Clone, S: Clone> Clone for ReadOnlyView<K, V, S> {
diff --git a/vendor/dashmap/src/table.rs b/vendor/dashmap/src/table.rs
deleted file mode 100644
index 03acd76e2..000000000
--- a/vendor/dashmap/src/table.rs
+++ /dev/null
@@ -1,81 +0,0 @@
-use super::u128::AtomicU128;
-use std::borrow::Borrow;
-use std::cell::UnsafeCell;
-use std::hash::{BuildHasher, Hash, Hasher};
-use std::mem::MaybeUninit;
-use std::sync::atomic::{AtomicU64, Ordering};
-use std::sync::{Arc, Mutex};
-
-const TOMBSTONE_BIT: u64 = 1 << 63;
-const ALLOCATED_BIT: u64 = 1 << 62;
-const POINTER_MASK: u64 = 0x3FFFFFFFFFFFFFFF;
-
-fn hash<S, K>(hasher: &S, key: &K) -> u64
-where
- S: BuildHasher,
- K: Hash,
-{
- let mut hasher = hasher.build_hasher();
- key.hash(&mut hasher);
- hasher.finish()
-}
-
-struct Slot<K, V> {
- data: AtomicU64,
- pair: UnsafeCell<MaybeUninit<(K, V)>>,
-}
-
-pub struct Table<K, V, S> {
- hash: Arc<S>,
- slots: Box<[Slot<K, V>]>,
- mask: usize,
-}
-
-impl<K, V, S> Table<K, V, S>
-where
- K: Eq + Hash,
- S: BuildHasher,
-{
- pub fn new(hasher: Arc<S>, capacity: usize) -> Self {
- debug_assert!(capacity.is_power_of_two());
- let slots = (0..capacity)
- .map(|_| Slot {
- data: AtomicU64::new(0),
- pair: UnsafeCell::new(MaybeUninit::uninit()),
- })
- .collect::<Vec<_>>();
-
- Table {
- hash: hasher,
- slots: slots.into_boxed_slice(),
- mask: capacity - 1,
- }
- }
-
- pub fn get<Q>(&self, key: &Q) -> Option<*mut (K, V)>
- where
- K: Borrow<Q>,
- Q: Eq + Hash,
- {
- let hash = hash(&*self.hash, key);
- let mut idx = hash as usize & self.mask;
- let mut i = 0;
-
- loop {
- let slot = &self.slots[idx];
- let data = slot.data.load(Ordering::Relaxed);
- let ptr = (data & POINTER_MASK) as *mut (K, V);
- if !ptr.is_null() {
- let stored = unsafe { (*ptr).0.borrow() };
- if stored == key {
- return Some(ptr);
- }
- } else if data & TOMBSTONE_BIT != TOMBSTONE_BIT || i > self.mask {
- return None;
- }
-
- idx = (idx + 1) & self.mask;
- i += 1;
- }
- }
-}