summaryrefslogtreecommitdiffstats
path: root/vendor/hashbrown/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
commitdc0db358abe19481e475e10c32149b53370f1a1c (patch)
treeab8ce99c4b255ce46f99ef402c27916055b899ee /vendor/hashbrown/src
parentReleasing progress-linux version 1.71.1+dfsg1-2~progress7.99u1. (diff)
downloadrustc-dc0db358abe19481e475e10c32149b53370f1a1c.tar.xz
rustc-dc0db358abe19481e475e10c32149b53370f1a1c.zip
Merging upstream version 1.72.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/hashbrown/src')
-rw-r--r--vendor/hashbrown/src/external_trait_impls/mod.rs2
-rw-r--r--vendor/hashbrown/src/external_trait_impls/rayon/map.rs7
-rw-r--r--vendor/hashbrown/src/external_trait_impls/rkyv/hash_map.rs125
-rw-r--r--vendor/hashbrown/src/external_trait_impls/rkyv/hash_set.rs123
-rw-r--r--vendor/hashbrown/src/external_trait_impls/rkyv/mod.rs2
-rw-r--r--vendor/hashbrown/src/lib.rs22
-rw-r--r--vendor/hashbrown/src/macros.rs2
-rw-r--r--vendor/hashbrown/src/map.rs284
-rw-r--r--vendor/hashbrown/src/raw/alloc.rs57
-rw-r--r--vendor/hashbrown/src/raw/bitmask.rs99
-rw-r--r--vendor/hashbrown/src/raw/generic.rs59
-rw-r--r--vendor/hashbrown/src/raw/mod.rs1246
-rw-r--r--vendor/hashbrown/src/raw/neon.rs124
-rw-r--r--vendor/hashbrown/src/raw/sse2.rs31
-rw-r--r--vendor/hashbrown/src/rustc_entry.rs6
-rw-r--r--vendor/hashbrown/src/set.rs90
16 files changed, 1766 insertions, 513 deletions
diff --git a/vendor/hashbrown/src/external_trait_impls/mod.rs b/vendor/hashbrown/src/external_trait_impls/mod.rs
index ef497836c..01d386b04 100644
--- a/vendor/hashbrown/src/external_trait_impls/mod.rs
+++ b/vendor/hashbrown/src/external_trait_impls/mod.rs
@@ -1,4 +1,6 @@
#[cfg(feature = "rayon")]
pub(crate) mod rayon;
+#[cfg(feature = "rkyv")]
+mod rkyv;
#[cfg(feature = "serde")]
mod serde;
diff --git a/vendor/hashbrown/src/external_trait_impls/rayon/map.rs b/vendor/hashbrown/src/external_trait_impls/rayon/map.rs
index 14d91c220..1124bfd32 100644
--- a/vendor/hashbrown/src/external_trait_impls/rayon/map.rs
+++ b/vendor/hashbrown/src/external_trait_impls/rayon/map.rs
@@ -561,10 +561,7 @@ mod test_par_map {
assert_eq!(value.load(Ordering::Relaxed), 100);
// retain only half
- let _v: Vec<_> = hm
- .into_par_iter()
- .filter(|&(ref key, _)| key.k < 50)
- .collect();
+ let _v: Vec<_> = hm.into_par_iter().filter(|(key, _)| key.k < 50).collect();
assert_eq!(key.load(Ordering::Relaxed), 50);
assert_eq!(value.load(Ordering::Relaxed), 50);
@@ -611,7 +608,7 @@ mod test_par_map {
assert_eq!(value.load(Ordering::Relaxed), 100);
// retain only half
- let _v: Vec<_> = hm.drain().filter(|&(ref key, _)| key.k < 50).collect();
+ let _v: Vec<_> = hm.drain().filter(|(key, _)| key.k < 50).collect();
assert!(hm.is_empty());
assert_eq!(key.load(Ordering::Relaxed), 50);
diff --git a/vendor/hashbrown/src/external_trait_impls/rkyv/hash_map.rs b/vendor/hashbrown/src/external_trait_impls/rkyv/hash_map.rs
new file mode 100644
index 000000000..fae7f7676
--- /dev/null
+++ b/vendor/hashbrown/src/external_trait_impls/rkyv/hash_map.rs
@@ -0,0 +1,125 @@
+use crate::HashMap;
+use core::{
+ borrow::Borrow,
+ hash::{BuildHasher, Hash},
+};
+use rkyv::{
+ collections::hash_map::{ArchivedHashMap, HashMapResolver},
+ ser::{ScratchSpace, Serializer},
+ Archive, Deserialize, Fallible, Serialize,
+};
+
+impl<K: Archive + Hash + Eq, V: Archive, S> Archive for HashMap<K, V, S>
+where
+ K::Archived: Hash + Eq,
+{
+ type Archived = ArchivedHashMap<K::Archived, V::Archived>;
+ type Resolver = HashMapResolver;
+
+ #[inline]
+ unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) {
+ ArchivedHashMap::resolve_from_len(self.len(), pos, resolver, out);
+ }
+}
+
+impl<K, V, S, RandomState> Serialize<S> for HashMap<K, V, RandomState>
+where
+ K: Serialize<S> + Hash + Eq,
+ K::Archived: Hash + Eq,
+ V: Serialize<S>,
+ S: Serializer + ScratchSpace + ?Sized,
+{
+ #[inline]
+ fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
+ unsafe { ArchivedHashMap::serialize_from_iter(self.iter(), serializer) }
+ }
+}
+
+impl<K: Archive + Hash + Eq, V: Archive, D: Fallible + ?Sized, S: Default + BuildHasher>
+ Deserialize<HashMap<K, V, S>, D> for ArchivedHashMap<K::Archived, V::Archived>
+where
+ K::Archived: Deserialize<K, D> + Hash + Eq,
+ V::Archived: Deserialize<V, D>,
+{
+ #[inline]
+ fn deserialize(&self, deserializer: &mut D) -> Result<HashMap<K, V, S>, D::Error> {
+ let mut result = HashMap::with_capacity_and_hasher(self.len(), S::default());
+ for (k, v) in self.iter() {
+ result.insert(k.deserialize(deserializer)?, v.deserialize(deserializer)?);
+ }
+ Ok(result)
+ }
+}
+
+impl<K: Hash + Eq + Borrow<AK>, V, AK: Hash + Eq, AV: PartialEq<V>, S: BuildHasher>
+ PartialEq<HashMap<K, V, S>> for ArchivedHashMap<AK, AV>
+{
+ #[inline]
+ fn eq(&self, other: &HashMap<K, V, S>) -> bool {
+ if self.len() != other.len() {
+ false
+ } else {
+ self.iter()
+ .all(|(key, value)| other.get(key).map_or(false, |v| value.eq(v)))
+ }
+ }
+}
+
+impl<K: Hash + Eq + Borrow<AK>, V, AK: Hash + Eq, AV: PartialEq<V>>
+ PartialEq<ArchivedHashMap<AK, AV>> for HashMap<K, V>
+{
+ #[inline]
+ fn eq(&self, other: &ArchivedHashMap<AK, AV>) -> bool {
+ other.eq(self)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::HashMap;
+ use alloc::string::String;
+ use rkyv::{
+ archived_root, check_archived_root,
+ ser::{serializers::AllocSerializer, Serializer},
+ Deserialize, Infallible,
+ };
+
+ #[test]
+ fn index_map() {
+ let mut value = HashMap::new();
+ value.insert(String::from("foo"), 10);
+ value.insert(String::from("bar"), 20);
+ value.insert(String::from("baz"), 40);
+ value.insert(String::from("bat"), 80);
+
+ let mut serializer = AllocSerializer::<4096>::default();
+ serializer.serialize_value(&value).unwrap();
+ let result = serializer.into_serializer().into_inner();
+ let archived = unsafe { archived_root::<HashMap<String, i32>>(result.as_ref()) };
+
+ assert_eq!(value.len(), archived.len());
+ for (k, v) in value.iter() {
+ let (ak, av) = archived.get_key_value(k.as_str()).unwrap();
+ assert_eq!(k, ak);
+ assert_eq!(v, av);
+ }
+
+ let deserialized: HashMap<String, i32> = archived.deserialize(&mut Infallible).unwrap();
+ assert_eq!(value, deserialized);
+ }
+
+ #[test]
+ fn validate_index_map() {
+ let mut value = HashMap::new();
+ value.insert(String::from("foo"), 10);
+ value.insert(String::from("bar"), 20);
+ value.insert(String::from("baz"), 40);
+ value.insert(String::from("bat"), 80);
+
+ let mut serializer = AllocSerializer::<4096>::default();
+ serializer.serialize_value(&value).unwrap();
+ let result = serializer.into_serializer().into_inner();
+ check_archived_root::<HashMap<String, i32>>(result.as_ref())
+ .expect("failed to validate archived index map");
+ }
+}
diff --git a/vendor/hashbrown/src/external_trait_impls/rkyv/hash_set.rs b/vendor/hashbrown/src/external_trait_impls/rkyv/hash_set.rs
new file mode 100644
index 000000000..c8a69cf4f
--- /dev/null
+++ b/vendor/hashbrown/src/external_trait_impls/rkyv/hash_set.rs
@@ -0,0 +1,123 @@
+use crate::HashSet;
+use core::{
+ borrow::Borrow,
+ hash::{BuildHasher, Hash},
+};
+use rkyv::{
+ collections::hash_set::{ArchivedHashSet, HashSetResolver},
+ ser::{ScratchSpace, Serializer},
+ Archive, Deserialize, Fallible, Serialize,
+};
+
+impl<K: Archive + Hash + Eq, S> Archive for HashSet<K, S>
+where
+ K::Archived: Hash + Eq,
+{
+ type Archived = ArchivedHashSet<K::Archived>;
+ type Resolver = HashSetResolver;
+
+ #[inline]
+ unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) {
+ ArchivedHashSet::<K::Archived>::resolve_from_len(self.len(), pos, resolver, out);
+ }
+}
+
+impl<K, S, RS> Serialize<S> for HashSet<K, RS>
+where
+ K::Archived: Hash + Eq,
+ K: Serialize<S> + Hash + Eq,
+ S: ScratchSpace + Serializer + ?Sized,
+{
+ #[inline]
+ fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
+ unsafe { ArchivedHashSet::serialize_from_iter(self.iter(), serializer) }
+ }
+}
+
+impl<K, D, S> Deserialize<HashSet<K, S>, D> for ArchivedHashSet<K::Archived>
+where
+ K: Archive + Hash + Eq,
+ K::Archived: Deserialize<K, D> + Hash + Eq,
+ D: Fallible + ?Sized,
+ S: Default + BuildHasher,
+{
+ #[inline]
+ fn deserialize(&self, deserializer: &mut D) -> Result<HashSet<K, S>, D::Error> {
+ let mut result = HashSet::with_hasher(S::default());
+ for k in self.iter() {
+ result.insert(k.deserialize(deserializer)?);
+ }
+ Ok(result)
+ }
+}
+
+impl<K: Hash + Eq + Borrow<AK>, AK: Hash + Eq, S: BuildHasher> PartialEq<HashSet<K, S>>
+ for ArchivedHashSet<AK>
+{
+ #[inline]
+ fn eq(&self, other: &HashSet<K, S>) -> bool {
+ if self.len() != other.len() {
+ false
+ } else {
+ self.iter().all(|key| other.get(key).is_some())
+ }
+ }
+}
+
+impl<K: Hash + Eq + Borrow<AK>, AK: Hash + Eq, S: BuildHasher> PartialEq<ArchivedHashSet<AK>>
+ for HashSet<K, S>
+{
+ #[inline]
+ fn eq(&self, other: &ArchivedHashSet<AK>) -> bool {
+ other.eq(self)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::HashSet;
+ use alloc::string::String;
+ use rkyv::{
+ archived_root, check_archived_root,
+ ser::{serializers::AllocSerializer, Serializer},
+ Deserialize, Infallible,
+ };
+
+ #[test]
+ fn index_set() {
+ let mut value = HashSet::new();
+ value.insert(String::from("foo"));
+ value.insert(String::from("bar"));
+ value.insert(String::from("baz"));
+ value.insert(String::from("bat"));
+
+ let mut serializer = AllocSerializer::<4096>::default();
+ serializer.serialize_value(&value).unwrap();
+ let result = serializer.into_serializer().into_inner();
+ let archived = unsafe { archived_root::<HashSet<String>>(result.as_ref()) };
+
+ assert_eq!(value.len(), archived.len());
+ for k in value.iter() {
+ let ak = archived.get(k.as_str()).unwrap();
+ assert_eq!(k, ak);
+ }
+
+ let deserialized: HashSet<String> = archived.deserialize(&mut Infallible).unwrap();
+ assert_eq!(value, deserialized);
+ }
+
+ #[test]
+ fn validate_index_set() {
+ let mut value = HashSet::new();
+ value.insert(String::from("foo"));
+ value.insert(String::from("bar"));
+ value.insert(String::from("baz"));
+ value.insert(String::from("bat"));
+
+ let mut serializer = AllocSerializer::<4096>::default();
+ serializer.serialize_value(&value).unwrap();
+ let result = serializer.into_serializer().into_inner();
+ check_archived_root::<HashSet<String>>(result.as_ref())
+ .expect("failed to validate archived index set");
+ }
+}
diff --git a/vendor/hashbrown/src/external_trait_impls/rkyv/mod.rs b/vendor/hashbrown/src/external_trait_impls/rkyv/mod.rs
new file mode 100644
index 000000000..2bde6a065
--- /dev/null
+++ b/vendor/hashbrown/src/external_trait_impls/rkyv/mod.rs
@@ -0,0 +1,2 @@
+mod hash_map;
+mod hash_set;
diff --git a/vendor/hashbrown/src/lib.rs b/vendor/hashbrown/src/lib.rs
index e43165dd6..013a9ddd9 100644
--- a/vendor/hashbrown/src/lib.rs
+++ b/vendor/hashbrown/src/lib.rs
@@ -20,9 +20,8 @@
extend_one,
allocator_api,
slice_ptr_get,
- nonnull_slice_from_raw_parts,
maybe_uninit_array_assume_init,
- build_hasher_simple_hash_one
+ strict_provenance
)
)]
#![allow(
@@ -37,6 +36,7 @@
)]
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
+#![cfg_attr(feature = "nightly", warn(fuzzy_provenance_casts))]
#[cfg(test)]
#[macro_use]
@@ -163,21 +163,3 @@ pub enum TryReserveError {
layout: alloc::alloc::Layout,
},
}
-
-/// Wrapper around `Bump` which allows it to be used as an allocator for
-/// `HashMap`, `HashSet` and `RawTable`.
-///
-/// `Bump` can be used directly without this wrapper on nightly if you enable
-/// the `allocator-api` feature of the `bumpalo` crate.
-#[cfg(feature = "bumpalo")]
-#[derive(Clone, Copy, Debug)]
-pub struct BumpWrapper<'a>(pub &'a bumpalo::Bump);
-
-#[cfg(feature = "bumpalo")]
-#[test]
-fn test_bumpalo() {
- use bumpalo::Bump;
- let bump = Bump::new();
- let mut map = HashMap::new_in(BumpWrapper(&bump));
- map.insert(0, 1);
-}
diff --git a/vendor/hashbrown/src/macros.rs b/vendor/hashbrown/src/macros.rs
index f8ef917b1..eaba6bed1 100644
--- a/vendor/hashbrown/src/macros.rs
+++ b/vendor/hashbrown/src/macros.rs
@@ -37,7 +37,7 @@ macro_rules! cfg_if {
// semicolon is all the remaining items
(@__items ($($not:meta,)*) ; ) => {};
(@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => {
- // Emit all items within one block, applying an approprate #[cfg]. The
+ // Emit all items within one block, applying an appropriate #[cfg]. The
// #[cfg] will require all `$m` matchers specified and must also negate
// all previous matchers.
cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* }
diff --git a/vendor/hashbrown/src/map.rs b/vendor/hashbrown/src/map.rs
index e238bf66b..548ca0f9e 100644
--- a/vendor/hashbrown/src/map.rs
+++ b/vendor/hashbrown/src/map.rs
@@ -260,29 +260,6 @@ where
hash_builder.hash_one(val)
}
-#[cfg(not(feature = "nightly"))]
-#[cfg_attr(feature = "inline-more", inline)]
-pub(crate) fn make_insert_hash<K, S>(hash_builder: &S, val: &K) -> u64
-where
- K: Hash,
- S: BuildHasher,
-{
- use core::hash::Hasher;
- let mut state = hash_builder.build_hasher();
- val.hash(&mut state);
- state.finish()
-}
-
-#[cfg(feature = "nightly")]
-#[cfg_attr(feature = "inline-more", inline)]
-pub(crate) fn make_insert_hash<K, S>(hash_builder: &S, val: &K) -> u64
-where
- K: Hash,
- S: BuildHasher,
-{
- hash_builder.hash_one(val)
-}
-
#[cfg(feature = "ahash")]
impl<K, V> HashMap<K, V, DefaultHashBuilder> {
/// Creates an empty `HashMap`.
@@ -368,13 +345,11 @@ impl<K, V, A: Allocator + Clone> HashMap<K, V, DefaultHashBuilder, A> {
/// # Examples
///
/// ```
- /// # #[cfg(feature = "bumpalo")]
- /// # fn test() {
- /// use hashbrown::{HashMap, BumpWrapper};
+ /// use hashbrown::HashMap;
/// use bumpalo::Bump;
///
/// let bump = Bump::new();
- /// let mut map = HashMap::new_in(BumpWrapper(&bump));
+ /// let mut map = HashMap::new_in(&bump);
///
/// // The created HashMap holds none elements
/// assert_eq!(map.len(), 0);
@@ -388,11 +363,6 @@ impl<K, V, A: Allocator + Clone> HashMap<K, V, DefaultHashBuilder, A> {
/// assert_eq!(map.len(), 1);
/// // And it also allocates some capacity
/// assert!(map.capacity() > 1);
- /// # }
- /// # fn main() {
- /// # #[cfg(feature = "bumpalo")]
- /// # test()
- /// # }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn new_in(alloc: A) -> Self {
@@ -419,13 +389,11 @@ impl<K, V, A: Allocator + Clone> HashMap<K, V, DefaultHashBuilder, A> {
/// # Examples
///
/// ```
- /// # #[cfg(feature = "bumpalo")]
- /// # fn test() {
- /// use hashbrown::{HashMap, BumpWrapper};
+ /// use hashbrown::HashMap;
/// use bumpalo::Bump;
///
/// let bump = Bump::new();
- /// let mut map = HashMap::with_capacity_in(5, BumpWrapper(&bump));
+ /// let mut map = HashMap::with_capacity_in(5, &bump);
///
/// // The created HashMap holds none elements
/// assert_eq!(map.len(), 0);
@@ -444,11 +412,6 @@ impl<K, V, A: Allocator + Clone> HashMap<K, V, DefaultHashBuilder, A> {
/// assert_eq!(map.len(), 5);
/// // But its capacity isn't changed
/// assert_eq!(map.capacity(), empty_map_capacity)
- /// # }
- /// # fn main() {
- /// # #[cfg(feature = "bumpalo")]
- /// # test()
- /// # }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
@@ -972,15 +935,12 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// In other words, move all pairs `(k, v)` such that `f(&k, &mut v)` returns `true` out
/// into another iterator.
///
- /// Note that `drain_filter` lets you mutate every value in the filter closure, regardless of
+ /// Note that `extract_if` lets you mutate every value in the filter closure, regardless of
/// whether you choose to keep or remove it.
///
- /// When the returned DrainedFilter is dropped, any remaining elements that satisfy
- /// the predicate are dropped from the table.
- ///
- /// It is unspecified how many more elements will be subjected to the closure
- /// if a panic occurs in the closure, or a panic occurs while dropping an element,
- /// or if the `DrainFilter` value is leaked.
+ /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating
+ /// or the iteration short-circuits, then the remaining elements will be retained.
+ /// Use [`retain()`] with a negated predicate if you do not need the returned iterator.
///
/// Keeps the allocated memory for reuse.
///
@@ -991,7 +951,7 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
///
/// let mut map: HashMap<i32, i32> = (0..8).map(|x| (x, x)).collect();
///
- /// let drained: HashMap<i32, i32> = map.drain_filter(|k, _v| k % 2 == 0).collect();
+ /// let drained: HashMap<i32, i32> = map.extract_if(|k, _v| k % 2 == 0).collect();
///
/// let mut evens = drained.keys().cloned().collect::<Vec<_>>();
/// let mut odds = map.keys().cloned().collect::<Vec<_>>();
@@ -1004,21 +964,20 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// let mut map: HashMap<i32, i32> = (0..8).map(|x| (x, x)).collect();
///
/// { // Iterator is dropped without being consumed.
- /// let d = map.drain_filter(|k, _v| k % 2 != 0);
+ /// let d = map.extract_if(|k, _v| k % 2 != 0);
/// }
///
- /// // But the map lens have been reduced by half
- /// // even if we do not use DrainFilter iterator.
- /// assert_eq!(map.len(), 4);
+ /// // ExtractIf was not exhausted, therefore no elements were drained.
+ /// assert_eq!(map.len(), 8);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
- pub fn drain_filter<F>(&mut self, f: F) -> DrainFilter<'_, K, V, F, A>
+ pub fn extract_if<F>(&mut self, f: F) -> ExtractIf<'_, K, V, F, A>
where
F: FnMut(&K, &mut V) -> bool,
{
- DrainFilter {
+ ExtractIf {
f,
- inner: DrainFilterInner {
+ inner: ExtractIfInner {
iter: unsafe { self.table.iter() },
table: &mut self.table,
},
@@ -1266,7 +1225,7 @@ where
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn entry(&mut self, key: K) -> Entry<'_, K, V, S, A> {
- let hash = make_insert_hash::<K, S>(&self.hash_builder, &key);
+ let hash = make_hash::<K, S>(&self.hash_builder, &key);
if let Some(elem) = self.table.find(hash, equivalent_key(&key)) {
Entry::Occupied(OccupiedEntry {
hash,
@@ -1348,7 +1307,7 @@ where
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.get_inner(k) {
- Some(&(_, ref v)) => Some(v),
+ Some((_, v)) => Some(v),
None => None,
}
}
@@ -1379,7 +1338,7 @@ where
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.get_inner(k) {
- Some(&(ref key, ref value)) => Some((key, value)),
+ Some((key, value)) => Some((key, value)),
None => None,
}
}
@@ -1786,13 +1745,19 @@ where
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, k: K, v: V) -> Option<V> {
- let hash = make_insert_hash::<K, S>(&self.hash_builder, &k);
- if let Some((_, item)) = self.table.get_mut(hash, equivalent_key(&k)) {
- Some(mem::replace(item, v))
- } else {
- self.table
- .insert(hash, (k, v), make_hasher::<_, V, S>(&self.hash_builder));
- None
+ let hash = make_hash::<K, S>(&self.hash_builder, &k);
+ let hasher = make_hasher::<_, V, S>(&self.hash_builder);
+ match self
+ .table
+ .find_or_find_insert_slot(hash, equivalent_key(&k), hasher)
+ {
+ Ok(bucket) => Some(mem::replace(unsafe { &mut bucket.as_mut().1 }, v)),
+ Err(slot) => {
+ unsafe {
+ self.table.insert_in_slot(hash, slot, (k, v));
+ }
+ None
+ }
}
}
@@ -1847,7 +1812,7 @@ where
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert_unique_unchecked(&mut self, k: K, v: V) -> (&K, &mut V) {
- let hash = make_insert_hash::<K, S>(&self.hash_builder, &k);
+ let hash = make_hash::<K, S>(&self.hash_builder, &k);
let bucket = self
.table
.insert(hash, (k, v), make_hasher::<_, V, S>(&self.hash_builder));
@@ -2123,6 +2088,18 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
RawEntryBuilder { map: self }
}
+ /// Returns a reference to the [`RawTable`] used underneath [`HashMap`].
+ /// This function is only available if the `raw` feature of the crate is enabled.
+ ///
+ /// See [`raw_table_mut`] for more.
+ ///
+ /// [`raw_table_mut`]: Self::raw_table_mut
+ #[cfg(feature = "raw")]
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn raw_table(&self) -> &RawTable<(K, V), A> {
+ &self.table
+ }
+
/// Returns a mutable reference to the [`RawTable`] used underneath [`HashMap`].
/// This function is only available if the `raw` feature of the crate is enabled.
///
@@ -2159,9 +2136,9 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// where
/// F: Fn(&(K, V)) -> bool,
/// {
- /// let raw_table = map.raw_table();
+ /// let raw_table = map.raw_table_mut();
/// match raw_table.find(hash, is_match) {
- /// Some(bucket) => Some(unsafe { raw_table.remove(bucket) }),
+ /// Some(bucket) => Some(unsafe { raw_table.remove(bucket).0 }),
/// None => None,
/// }
/// }
@@ -2180,7 +2157,7 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// ```
#[cfg(feature = "raw")]
#[cfg_attr(feature = "inline-more", inline)]
- pub fn raw_table(&mut self) -> &mut RawTable<(K, V), A> {
+ pub fn raw_table_mut(&mut self) -> &mut RawTable<(K, V), A> {
&mut self.table
}
}
@@ -2711,10 +2688,10 @@ impl<K, V, A: Allocator + Clone> Drain<'_, K, V, A> {
/// A draining iterator over entries of a `HashMap` which don't satisfy the predicate
/// `f(&k, &mut v)` in arbitrary order. The iterator element type is `(K, V)`.
///
-/// This `struct` is created by the [`drain_filter`] method on [`HashMap`]. See its
+/// This `struct` is created by the [`extract_if`] method on [`HashMap`]. See its
/// documentation for more.
///
-/// [`drain_filter`]: struct.HashMap.html#method.drain_filter
+/// [`extract_if`]: struct.HashMap.html#method.extract_if
/// [`HashMap`]: struct.HashMap.html
///
/// # Examples
@@ -2724,54 +2701,31 @@ impl<K, V, A: Allocator + Clone> Drain<'_, K, V, A> {
///
/// let mut map: HashMap<i32, &str> = [(1, "a"), (2, "b"), (3, "c")].into();
///
-/// let mut drain_filter = map.drain_filter(|k, _v| k % 2 != 0);
-/// let mut vec = vec![drain_filter.next(), drain_filter.next()];
+/// let mut extract_if = map.extract_if(|k, _v| k % 2 != 0);
+/// let mut vec = vec![extract_if.next(), extract_if.next()];
///
-/// // The `DrainFilter` iterator produces items in arbitrary order, so the
+/// // The `ExtractIf` iterator produces items in arbitrary order, so the
/// // items must be sorted to test them against a sorted array.
/// vec.sort_unstable();
/// assert_eq!(vec, [Some((1, "a")),Some((3, "c"))]);
///
/// // It is fused iterator
-/// assert_eq!(drain_filter.next(), None);
-/// assert_eq!(drain_filter.next(), None);
-/// drop(drain_filter);
+/// assert_eq!(extract_if.next(), None);
+/// assert_eq!(extract_if.next(), None);
+/// drop(extract_if);
///
/// assert_eq!(map.len(), 1);
/// ```
-pub struct DrainFilter<'a, K, V, F, A: Allocator + Clone = Global>
+#[must_use = "Iterators are lazy unless consumed"]
+pub struct ExtractIf<'a, K, V, F, A: Allocator + Clone = Global>
where
F: FnMut(&K, &mut V) -> bool,
{
f: F,
- inner: DrainFilterInner<'a, K, V, A>,
-}
-
-impl<'a, K, V, F, A> Drop for DrainFilter<'a, K, V, F, A>
-where
- F: FnMut(&K, &mut V) -> bool,
- A: Allocator + Clone,
-{
- #[cfg_attr(feature = "inline-more", inline)]
- fn drop(&mut self) {
- while let Some(item) = self.next() {
- let guard = ConsumeAllOnDrop(self);
- drop(item);
- mem::forget(guard);
- }
- }
+ inner: ExtractIfInner<'a, K, V, A>,
}
-pub(super) struct ConsumeAllOnDrop<'a, T: Iterator>(pub &'a mut T);
-
-impl<T: Iterator> Drop for ConsumeAllOnDrop<'_, T> {
- #[cfg_attr(feature = "inline-more", inline)]
- fn drop(&mut self) {
- self.0.for_each(drop);
- }
-}
-
-impl<K, V, F, A> Iterator for DrainFilter<'_, K, V, F, A>
+impl<K, V, F, A> Iterator for ExtractIf<'_, K, V, F, A>
where
F: FnMut(&K, &mut V) -> bool,
A: Allocator + Clone,
@@ -2789,15 +2743,15 @@ where
}
}
-impl<K, V, F> FusedIterator for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {}
+impl<K, V, F> FusedIterator for ExtractIf<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {}
-/// Portions of `DrainFilter` shared with `set::DrainFilter`
-pub(super) struct DrainFilterInner<'a, K, V, A: Allocator + Clone> {
+/// Portions of `ExtractIf` shared with `set::ExtractIf`
+pub(super) struct ExtractIfInner<'a, K, V, A: Allocator + Clone> {
pub iter: RawIter<(K, V)>,
pub table: &'a mut RawTable<(K, V), A>,
}
-impl<K, V, A: Allocator + Clone> DrainFilterInner<'_, K, V, A> {
+impl<K, V, A: Allocator + Clone> ExtractIfInner<'_, K, V, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub(super) fn next<F>(&mut self, f: &mut F) -> Option<(K, V)>
where
@@ -2807,7 +2761,7 @@ impl<K, V, A: Allocator + Clone> DrainFilterInner<'_, K, V, A> {
for item in &mut self.iter {
let &mut (ref key, ref mut value) = item.as_mut();
if f(key, value) {
- return Some(self.table.remove(item));
+ return Some(self.table.remove(item).0);
}
}
}
@@ -3360,7 +3314,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> {
F: FnMut(&K) -> bool,
{
match self.map.table.get(hash, |(k, _)| is_match(k)) {
- Some(&(ref key, ref value)) => Some((key, value)),
+ Some((key, value)) => Some((key, value)),
None => None,
}
}
@@ -3756,7 +3710,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn get_key_value(&self) -> (&K, &V) {
unsafe {
- let &(ref key, ref value) = self.elem.as_ref();
+ let (key, value) = self.elem.as_ref();
(key, value)
}
}
@@ -3928,7 +3882,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> {
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
- unsafe { self.table.remove(self.elem) }
+ unsafe { self.table.remove(self.elem).0 }
}
/// Provides shared access to the key and owned access to the value of
@@ -4012,7 +3966,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> {
K: Hash,
S: BuildHasher,
{
- let hash = make_insert_hash::<K, S>(self.hash_builder, &key);
+ let hash = make_hash::<K, S>(self.hash_builder, &key);
self.insert_hashed_nocheck(hash, key, value)
}
@@ -4120,7 +4074,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> {
K: Hash,
S: BuildHasher,
{
- let hash = make_insert_hash::<K, S>(self.hash_builder, &key);
+ let hash = make_hash::<K, S>(self.hash_builder, &key);
let elem = self.table.insert(
hash,
(key, value),
@@ -4297,7 +4251,7 @@ impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for Entry<'_, K, V, S, A
/// assert_eq!(map.get(&"c"), None);
/// assert_eq!(map.len(), 2);
/// ```
-pub struct OccupiedEntry<'a, K, V, S, A: Allocator + Clone = Global> {
+pub struct OccupiedEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator + Clone = Global> {
hash: u64,
key: Option<K>,
elem: Bucket<(K, V)>,
@@ -4360,7 +4314,7 @@ impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for OccupiedEntry<'_, K,
/// }
/// assert!(map[&"b"] == 20 && map.len() == 2);
/// ```
-pub struct VacantEntry<'a, K, V, S, A: Allocator + Clone = Global> {
+pub struct VacantEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator + Clone = Global> {
hash: u64,
key: K,
table: &'a mut HashMap<K, V, S, A>,
@@ -4568,7 +4522,7 @@ impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccupiedEntryRef")
- .field("key", &self.key())
+ .field("key", &self.key().borrow())
.field("value", &self.get())
.finish()
}
@@ -5301,7 +5255,7 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> {
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
- unsafe { self.table.table.remove(self.elem) }
+ unsafe { self.table.table.remove(self.elem).0 }
}
/// Gets a reference to the value in the entry.
@@ -5838,7 +5792,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V,
K: Borrow<Q>,
{
match *self {
- EntryRef::Occupied(ref entry) => entry.key(),
+ EntryRef::Occupied(ref entry) => entry.key().borrow(),
EntryRef::Vacant(ref entry) => entry.key(),
}
}
@@ -5934,8 +5888,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V,
#[cfg_attr(feature = "inline-more", inline)]
pub fn and_replace_entry_with<F>(self, f: F) -> Self
where
- F: FnOnce(&Q, V) -> Option<V>,
- K: Borrow<Q>,
+ F: FnOnce(&K, V) -> Option<V>,
{
match self {
EntryRef::Occupied(entry) => entry.replace_entry_with(f),
@@ -5994,11 +5947,8 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
- pub fn key(&self) -> &Q
- where
- K: Borrow<Q>,
- {
- unsafe { &self.elem.as_ref().0 }.borrow()
+ pub fn key(&self) -> &K {
+ unsafe { &self.elem.as_ref().0 }
}
/// Take the ownership of the key and value from the map.
@@ -6027,7 +5977,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
- unsafe { self.table.table.remove(self.elem) }
+ unsafe { self.table.table.remove(self.elem).0 }
}
/// Gets a reference to the value in the entry.
@@ -6303,8 +6253,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
#[cfg_attr(feature = "inline-more", inline)]
pub fn replace_entry_with<F>(self, f: F) -> EntryRef<'a, 'b, K, Q, V, S, A>
where
- F: FnOnce(&Q, V) -> Option<V>,
- K: Borrow<Q>,
+ F: FnOnce(&K, V) -> Option<V>,
{
unsafe {
let mut spare_key = None;
@@ -6312,7 +6261,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
self.table
.table
.replace_bucket_with(self.elem.clone(), |(key, value)| {
- if let Some(new_value) = f(key.borrow(), value) {
+ if let Some(new_value) = f(&key, value) {
Some((key, new_value))
} else {
spare_key = Some(KeyOrRef::Owned(key));
@@ -6926,7 +6875,6 @@ mod test_map {
}
});
- #[allow(clippy::let_underscore_drop)] // kind-of a false positive
for _ in half.by_ref() {}
DROP_VECTOR.with(|v| {
@@ -7254,10 +7202,10 @@ mod test_map {
map.insert(1, 2);
map.insert(3, 4);
- let map_str = format!("{:?}", map);
+ let map_str = format!("{map:?}");
assert!(map_str == "{1: 2, 3: 4}" || map_str == "{3: 4, 1: 2}");
- assert_eq!(format!("{:?}", empty), "{}");
+ assert_eq!(format!("{empty:?}"), "{}");
}
#[test]
@@ -7573,7 +7521,7 @@ mod test_map {
// Test for #19292
fn check(m: &HashMap<i32, ()>) {
for k in m.keys() {
- assert!(m.contains_key(k), "{} is in keys() but not in the map?", k);
+ assert!(m.contains_key(k), "{k} is in keys() but not in the map?");
}
}
@@ -7609,7 +7557,7 @@ mod test_map {
// Test for #19292
fn check(m: &HashMap<std::string::String, ()>) {
for k in m.keys() {
- assert!(m.contains_key(k), "{} is in keys() but not in the map?", k);
+ assert!(m.contains_key(k), "{k} is in keys() but not in the map?");
}
}
@@ -7658,6 +7606,7 @@ mod test_map {
}
#[test]
+ #[allow(clippy::needless_borrow)]
fn test_extend_ref_kv_tuple() {
use std::ops::AddAssign;
let mut a = HashMap::new();
@@ -8080,7 +8029,7 @@ mod test_map {
// Test for #19292
fn check(m: &HashMap<i32, ()>) {
for k in m.keys() {
- assert!(m.contains_key(k), "{} is in keys() but not in the map?", k);
+ assert!(m.contains_key(k), "{k} is in keys() but not in the map?");
}
}
@@ -8110,7 +8059,7 @@ mod test_map {
// Test for #19292
fn check(m: &HashMap<std::string::String, ()>) {
for k in m.keys() {
- assert!(m.contains_key(k), "{} is in keys() but not in the map?", k);
+ assert!(m.contains_key(k), "{k} is in keys() but not in the map?");
}
}
@@ -8148,10 +8097,10 @@ mod test_map {
}
#[test]
- fn test_drain_filter() {
+ fn test_extract_if() {
{
let mut map: HashMap<i32, i32> = (0..8).map(|x| (x, x * 10)).collect();
- let drained = map.drain_filter(|&k, _| k % 2 == 0);
+ let drained = map.extract_if(|&k, _| k % 2 == 0);
let mut out = drained.collect::<Vec<_>>();
out.sort_unstable();
assert_eq!(vec![(0, 0), (2, 20), (4, 40), (6, 60)], out);
@@ -8159,7 +8108,7 @@ mod test_map {
}
{
let mut map: HashMap<i32, i32> = (0..8).map(|x| (x, x * 10)).collect();
- drop(map.drain_filter(|&k, _| k % 2 == 0));
+ map.extract_if(|&k, _| k % 2 == 0).for_each(drop);
assert_eq!(map.len(), 4);
}
}
@@ -8208,7 +8157,7 @@ mod test_map {
let mut map: HashMap<_, _> = xs.iter().copied().collect();
let compute_hash = |map: &HashMap<i32, i32>, k: i32| -> u64 {
- super::make_insert_hash::<i32, _>(map.hasher(), &k)
+ super::make_hash::<i32, _>(map.hasher(), &k)
};
// Existing key (insert)
@@ -8370,17 +8319,17 @@ mod test_map {
loop {
// occasionally remove some elements
if i < n && rng.gen_bool(0.1) {
- let hash_value = super::make_insert_hash(&hash_builder, &i);
+ let hash_value = super::make_hash(&hash_builder, &i);
unsafe {
let e = map.table.find(hash_value, |q| q.0.eq(&i));
if let Some(e) = e {
it.reflect_remove(&e);
- let t = map.table.remove(e);
+ let t = map.table.remove(e).0;
removed.push(t);
left -= 1;
} else {
- assert!(removed.contains(&(i, 2 * i)), "{} not in {:?}", i, removed);
+ assert!(removed.contains(&(i, 2 * i)), "{i} not in {removed:?}");
let e = map.table.insert(
hash_value,
(i, 2 * i),
@@ -8509,4 +8458,49 @@ mod test_map {
map2.clone_from(&map1);
}
+
+ #[test]
+ #[should_panic = "panic in clone"]
+ fn test_clone_from_memory_leaks() {
+ use ::alloc::vec::Vec;
+
+ struct CheckedClone {
+ panic_in_clone: bool,
+ need_drop: Vec<i32>,
+ }
+ impl Clone for CheckedClone {
+ fn clone(&self) -> Self {
+ if self.panic_in_clone {
+ panic!("panic in clone")
+ }
+ Self {
+ panic_in_clone: self.panic_in_clone,
+ need_drop: self.need_drop.clone(),
+ }
+ }
+ }
+ let mut map1 = HashMap::new();
+ map1.insert(
+ 1,
+ CheckedClone {
+ panic_in_clone: false,
+ need_drop: vec![0, 1, 2],
+ },
+ );
+ map1.insert(
+ 2,
+ CheckedClone {
+ panic_in_clone: false,
+ need_drop: vec![3, 4, 5],
+ },
+ );
+ map1.insert(
+ 3,
+ CheckedClone {
+ panic_in_clone: true,
+ need_drop: vec![6, 7, 8],
+ },
+ );
+ let _map2 = map1.clone();
+ }
}
diff --git a/vendor/hashbrown/src/raw/alloc.rs b/vendor/hashbrown/src/raw/alloc.rs
index ba09ea9de..15299e7b0 100644
--- a/vendor/hashbrown/src/raw/alloc.rs
+++ b/vendor/hashbrown/src/raw/alloc.rs
@@ -1,5 +1,9 @@
pub(crate) use self::inner::{do_alloc, Allocator, Global};
+// Nightly-case.
+// Use unstable `allocator_api` feature.
+// This is compatible with `allocator-api2` which can be enabled or not.
+// This is used when building for `std`.
#[cfg(feature = "nightly")]
mod inner {
use crate::alloc::alloc::Layout;
@@ -7,28 +11,44 @@ mod inner {
use core::ptr::NonNull;
#[allow(clippy::map_err_ignore)]
- pub fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
+ pub(crate) fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
match alloc.allocate(layout) {
Ok(ptr) => Ok(ptr.as_non_null_ptr()),
Err(_) => Err(()),
}
}
+}
- #[cfg(feature = "bumpalo")]
- unsafe impl Allocator for crate::BumpWrapper<'_> {
- #[inline]
- fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
- match self.0.try_alloc_layout(layout) {
- Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())),
- Err(_) => Err(core::alloc::AllocError),
- }
+// Basic non-nightly case.
+// This uses `allocator-api2` enabled by default.
+// If any crate enables "nightly" in `allocator-api2`,
+// this will be equivalent to the nightly case,
+// since `allocator_api2::alloc::Allocator` would be re-export of
+// `core::alloc::Allocator`.
+#[cfg(all(not(feature = "nightly"), feature = "allocator-api2"))]
+mod inner {
+ use crate::alloc::alloc::Layout;
+ pub use allocator_api2::alloc::{Allocator, Global};
+ use core::ptr::NonNull;
+
+ #[allow(clippy::map_err_ignore)]
+ pub(crate) fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
+ match alloc.allocate(layout) {
+ Ok(ptr) => Ok(ptr.cast()),
+ Err(_) => Err(()),
}
- #[inline]
- unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {}
}
}
-#[cfg(not(feature = "nightly"))]
+// No-defaults case.
+// When building with default-features turned off and
+// neither `nightly` nor `allocator-api2` is enabled,
+// this will be used.
+// Making it impossible to use any custom allocator with collections defined
+// in this crate.
+// Any crate in build-tree can enable `allocator-api2`,
+// or `nightly` without disturbing users that don't want to use it.
+#[cfg(not(any(feature = "nightly", feature = "allocator-api2")))]
mod inner {
use crate::alloc::alloc::{alloc, dealloc, Layout};
use core::ptr::NonNull;
@@ -41,6 +61,7 @@ mod inner {
#[derive(Copy, Clone)]
pub struct Global;
+
unsafe impl Allocator for Global {
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<u8>, ()> {
@@ -51,6 +72,7 @@ mod inner {
dealloc(ptr.as_ptr(), layout);
}
}
+
impl Default for Global {
#[inline]
fn default() -> Self {
@@ -58,16 +80,7 @@ mod inner {
}
}
- pub fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
+ pub(crate) fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
alloc.allocate(layout)
}
-
- #[cfg(feature = "bumpalo")]
- unsafe impl Allocator for crate::BumpWrapper<'_> {
- #[allow(clippy::map_err_ignore)]
- fn allocate(&self, layout: Layout) -> Result<NonNull<u8>, ()> {
- self.0.try_alloc_layout(layout).map_err(|_| ())
- }
- unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {}
- }
}
diff --git a/vendor/hashbrown/src/raw/bitmask.rs b/vendor/hashbrown/src/raw/bitmask.rs
index 7d4f9fc38..6576b3c5c 100644
--- a/vendor/hashbrown/src/raw/bitmask.rs
+++ b/vendor/hashbrown/src/raw/bitmask.rs
@@ -1,6 +1,6 @@
-use super::imp::{BitMaskWord, BITMASK_MASK, BITMASK_STRIDE};
-#[cfg(feature = "nightly")]
-use core::intrinsics;
+use super::imp::{
+ BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_MASK, BITMASK_STRIDE,
+};
/// A bit mask which contains the result of a `Match` operation on a `Group` and
/// allows iterating through them.
@@ -8,75 +8,55 @@ use core::intrinsics;
/// The bit mask is arranged so that low-order bits represent lower memory
/// addresses for group match results.
///
-/// For implementation reasons, the bits in the set may be sparsely packed, so
-/// that there is only one bit-per-byte used (the high bit, 7). If this is the
+/// For implementation reasons, the bits in the set may be sparsely packed with
+/// groups of 8 bits representing one element. If any of these bits are non-zero
+/// then this element is considered to true in the mask. If this is the
/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be
/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is
/// similarly a mask of all the actually-used bits.
+///
+/// To iterate over a bit mask, it must be converted to a form where only 1 bit
+/// is set per element. This is done by applying `BITMASK_ITER_MASK` on the
+/// mask bits.
#[derive(Copy, Clone)]
-pub struct BitMask(pub BitMaskWord);
+pub(crate) struct BitMask(pub(crate) BitMaskWord);
#[allow(clippy::use_self)]
impl BitMask {
/// Returns a new `BitMask` with all bits inverted.
#[inline]
#[must_use]
- pub fn invert(self) -> Self {
+ #[allow(dead_code)]
+ pub(crate) fn invert(self) -> Self {
BitMask(self.0 ^ BITMASK_MASK)
}
- /// Flip the bit in the mask for the entry at the given index.
- ///
- /// Returns the bit's previous state.
- #[inline]
- #[allow(clippy::cast_ptr_alignment)]
- #[cfg(feature = "raw")]
- pub unsafe fn flip(&mut self, index: usize) -> bool {
- // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit.
- let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1);
- self.0 ^= mask;
- // The bit was set if the bit is now 0.
- self.0 & mask == 0
- }
-
/// Returns a new `BitMask` with the lowest bit removed.
#[inline]
#[must_use]
- pub fn remove_lowest_bit(self) -> Self {
+ fn remove_lowest_bit(self) -> Self {
BitMask(self.0 & (self.0 - 1))
}
+
/// Returns whether the `BitMask` has at least one set bit.
#[inline]
- pub fn any_bit_set(self) -> bool {
+ pub(crate) fn any_bit_set(self) -> bool {
self.0 != 0
}
/// Returns the first set bit in the `BitMask`, if there is one.
#[inline]
- pub fn lowest_set_bit(self) -> Option<usize> {
- if self.0 == 0 {
- None
+ pub(crate) fn lowest_set_bit(self) -> Option<usize> {
+ if let Some(nonzero) = NonZeroBitMaskWord::new(self.0) {
+ Some(Self::nonzero_trailing_zeros(nonzero))
} else {
- Some(unsafe { self.lowest_set_bit_nonzero() })
+ None
}
}
- /// Returns the first set bit in the `BitMask`, if there is one. The
- /// bitmask must not be empty.
- #[inline]
- #[cfg(feature = "nightly")]
- pub unsafe fn lowest_set_bit_nonzero(self) -> usize {
- intrinsics::cttz_nonzero(self.0) as usize / BITMASK_STRIDE
- }
- #[inline]
- #[cfg(not(feature = "nightly"))]
- pub unsafe fn lowest_set_bit_nonzero(self) -> usize {
- self.trailing_zeros()
- }
-
/// Returns the number of trailing zeroes in the `BitMask`.
#[inline]
- pub fn trailing_zeros(self) -> usize {
+ pub(crate) fn trailing_zeros(self) -> usize {
// ARM doesn't have a trailing_zeroes instruction, and instead uses
// reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM
// versions (pre-ARMv7) don't have RBIT and need to emulate it
@@ -89,9 +69,21 @@ impl BitMask {
}
}
+ /// Same as above but takes a `NonZeroBitMaskWord`.
+ #[inline]
+ fn nonzero_trailing_zeros(nonzero: NonZeroBitMaskWord) -> usize {
+ if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 {
+ // SAFETY: A byte-swapped non-zero value is still non-zero.
+ let swapped = unsafe { NonZeroBitMaskWord::new_unchecked(nonzero.get().swap_bytes()) };
+ swapped.leading_zeros() as usize / BITMASK_STRIDE
+ } else {
+ nonzero.trailing_zeros() as usize / BITMASK_STRIDE
+ }
+ }
+
/// Returns the number of leading zeroes in the `BitMask`.
#[inline]
- pub fn leading_zeros(self) -> usize {
+ pub(crate) fn leading_zeros(self) -> usize {
self.0.leading_zeros() as usize / BITMASK_STRIDE
}
}
@@ -102,13 +94,32 @@ impl IntoIterator for BitMask {
#[inline]
fn into_iter(self) -> BitMaskIter {
- BitMaskIter(self)
+ // A BitMask only requires each element (group of bits) to be non-zero.
+ // However for iteration we need each element to only contain 1 bit.
+ BitMaskIter(BitMask(self.0 & BITMASK_ITER_MASK))
}
}
/// Iterator over the contents of a `BitMask`, returning the indices of set
/// bits.
-pub struct BitMaskIter(BitMask);
+#[derive(Copy, Clone)]
+pub(crate) struct BitMaskIter(pub(crate) BitMask);
+
+impl BitMaskIter {
+ /// Flip the bit in the mask for the entry at the given index.
+ ///
+ /// Returns the bit's previous state.
+ #[inline]
+ #[allow(clippy::cast_ptr_alignment)]
+ #[cfg(feature = "raw")]
+ pub(crate) unsafe fn flip(&mut self, index: usize) -> bool {
+ // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit.
+ let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1);
+ self.0 .0 ^= mask;
+ // The bit was set if the bit is now 0.
+ self.0 .0 & mask == 0
+ }
+}
impl Iterator for BitMaskIter {
type Item = usize;
diff --git a/vendor/hashbrown/src/raw/generic.rs b/vendor/hashbrown/src/raw/generic.rs
index 52955a45b..c668b0642 100644
--- a/vendor/hashbrown/src/raw/generic.rs
+++ b/vendor/hashbrown/src/raw/generic.rs
@@ -5,26 +5,29 @@ use core::{mem, ptr};
// Use the native word size as the group size. Using a 64-bit group size on
// a 32-bit architecture will just end up being more expensive because
// shifts and multiplies will need to be emulated.
-#[cfg(any(
- target_pointer_width = "64",
- target_arch = "aarch64",
- target_arch = "x86_64",
- target_arch = "wasm32",
-))]
-type GroupWord = u64;
-#[cfg(all(
- any(target_pointer_width = "32", target_pointer_width = "16"),
- not(target_arch = "aarch64"),
- not(target_arch = "x86_64"),
- not(target_arch = "wasm32"),
-))]
-type GroupWord = u32;
-pub type BitMaskWord = GroupWord;
-pub const BITMASK_STRIDE: usize = 8;
+cfg_if! {
+ if #[cfg(any(
+ target_pointer_width = "64",
+ target_arch = "aarch64",
+ target_arch = "x86_64",
+ target_arch = "wasm32",
+ ))] {
+ type GroupWord = u64;
+ type NonZeroGroupWord = core::num::NonZeroU64;
+ } else {
+ type GroupWord = u32;
+ type NonZeroGroupWord = core::num::NonZeroU32;
+ }
+}
+
+pub(crate) type BitMaskWord = GroupWord;
+pub(crate) type NonZeroBitMaskWord = NonZeroGroupWord;
+pub(crate) const BITMASK_STRIDE: usize = 8;
// We only care about the highest bit of each byte for the mask.
#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)]
-pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord;
+pub(crate) const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord;
+pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0;
/// Helper function to replicate a byte across a `GroupWord`.
#[inline]
@@ -37,7 +40,7 @@ fn repeat(byte: u8) -> GroupWord {
///
/// This implementation uses a word-sized integer.
#[derive(Copy, Clone)]
-pub struct Group(GroupWord);
+pub(crate) struct Group(GroupWord);
// We perform all operations in the native endianness, and convert to
// little-endian just before creating a BitMask. The can potentially
@@ -46,14 +49,14 @@ pub struct Group(GroupWord);
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
- pub const WIDTH: usize = mem::size_of::<Self>();
+ pub(crate) const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty bytes, suitable for use as the initial
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
#[inline]
- pub const fn static_empty() -> &'static [u8; Group::WIDTH] {
+ pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] {
#[repr(C)]
struct AlignedBytes {
_align: [Group; 0],
@@ -69,7 +72,7 @@ impl Group {
/// Loads a group of bytes starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
- pub unsafe fn load(ptr: *const u8) -> Self {
+ pub(crate) unsafe fn load(ptr: *const u8) -> Self {
Group(ptr::read_unaligned(ptr.cast()))
}
@@ -77,7 +80,7 @@ impl Group {
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
- pub unsafe fn load_aligned(ptr: *const u8) -> Self {
+ pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
Group(ptr::read(ptr.cast()))
@@ -87,7 +90,7 @@ impl Group {
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
- pub unsafe fn store_aligned(self, ptr: *mut u8) {
+ pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
ptr::write(ptr.cast(), self.0);
@@ -104,7 +107,7 @@ impl Group {
/// - This only happens if there is at least 1 true match.
/// - The chance of this happening is very low (< 1% chance per byte).
#[inline]
- pub fn match_byte(self, byte: u8) -> BitMask {
+ pub(crate) fn match_byte(self, byte: u8) -> BitMask {
// This algorithm is derived from
// https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
let cmp = self.0 ^ repeat(byte);
@@ -114,7 +117,7 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY`.
#[inline]
- pub fn match_empty(self) -> BitMask {
+ pub(crate) fn match_empty(self) -> BitMask {
// If the high bit is set, then the byte must be either:
// 1111_1111 (EMPTY) or 1000_0000 (DELETED).
// So we can just check if the top two bits are 1 by ANDing them.
@@ -124,14 +127,14 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
- pub fn match_empty_or_deleted(self) -> BitMask {
+ pub(crate) fn match_empty_or_deleted(self) -> BitMask {
// A byte is EMPTY or DELETED iff the high bit is set
BitMask((self.0 & repeat(0x80)).to_le())
}
/// Returns a `BitMask` indicating all bytes in the group which are full.
#[inline]
- pub fn match_full(self) -> BitMask {
+ pub(crate) fn match_full(self) -> BitMask {
self.match_empty_or_deleted().invert()
}
@@ -140,7 +143,7 @@ impl Group {
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
- pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
+ pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
diff --git a/vendor/hashbrown/src/raw/mod.rs b/vendor/hashbrown/src/raw/mod.rs
index 0e96306ef..1a6dced4b 100644
--- a/vendor/hashbrown/src/raw/mod.rs
+++ b/vendor/hashbrown/src/raw/mod.rs
@@ -25,8 +25,10 @@ cfg_if! {
))] {
mod sse2;
use sse2 as imp;
+ } else if #[cfg(all(target_arch = "aarch64", target_feature = "neon"))] {
+ mod neon;
+ use neon as imp;
} else {
- #[path = "generic.rs"]
mod generic;
use generic as imp;
}
@@ -37,36 +39,26 @@ pub(crate) use self::alloc::{do_alloc, Allocator, Global};
mod bitmask;
-use self::bitmask::{BitMask, BitMaskIter};
+use self::bitmask::BitMaskIter;
use self::imp::Group;
// Branch prediction hint. This is currently only available on nightly but it
// consistently improves performance by 10-15%.
+#[cfg(not(feature = "nightly"))]
+use core::convert::identity as likely;
+#[cfg(not(feature = "nightly"))]
+use core::convert::identity as unlikely;
#[cfg(feature = "nightly")]
use core::intrinsics::{likely, unlikely};
-// On stable we can use #[cold] to get a equivalent effect: this attributes
-// suggests that the function is unlikely to be called
-#[cfg(not(feature = "nightly"))]
-#[inline]
-#[cold]
-fn cold() {}
-
-#[cfg(not(feature = "nightly"))]
-#[inline]
-fn likely(b: bool) -> bool {
- if !b {
- cold();
- }
- b
-}
+// Use strict provenance functions if available.
+#[cfg(feature = "nightly")]
+use core::ptr::invalid_mut;
+// Implement it with a cast otherwise.
#[cfg(not(feature = "nightly"))]
-#[inline]
-fn unlikely(b: bool) -> bool {
- if b {
- cold();
- }
- b
+#[inline(always)]
+fn invalid_mut<T>(addr: usize) -> *mut T {
+ addr as *mut T
}
#[inline]
@@ -272,6 +264,11 @@ impl TableLayout {
}
}
+/// A reference to an empty bucket into which an can be inserted.
+pub struct InsertSlot {
+ index: usize,
+}
+
/// A reference to a hash table bucket containing a `T`.
///
/// This is usually just a pointer to the element itself. However if the element
@@ -299,11 +296,79 @@ impl<T> Clone for Bucket<T> {
impl<T> Bucket<T> {
const IS_ZERO_SIZED_TYPE: bool = mem::size_of::<T>() == 0;
+ /// Creates a [`Bucket`] that contain pointer to the data.
+ /// The pointer calculation is performed by calculating the
+ /// offset from given `base` pointer (convenience for
+ /// `base.as_ptr().sub(index)`).
+ ///
+ /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// If the `T` is a ZST, then we instead track the index of the element
+ /// in the table so that `erase` works properly (return
+ /// `NonNull::new_unchecked((index + 1) as *mut T)`)
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
+ /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
+ /// rules of [`NonNull::new_unchecked`] function.
+ ///
+ /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
+ /// and [`NonNull::new_unchecked`] function, as well as for the correct
+ /// logic of the work of this crate, the following rules are necessary and
+ /// sufficient:
+ ///
+ /// * the `base` pointer must not be `dangling` and must points to the
+ /// end of the first `value element` from the `data part` of the table, i.e.
+ /// must be the pointer that returned by [`RawTable::data_end`] or by
+ /// [`RawTableInner::data_end<T>`];
+ ///
+ /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
+ /// must be no greater than the number returned by the function
+ /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
+ ///
+ /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
+ /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
+ /// must be no greater than the number returned by the function
+ /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
+ ///
+ /// [`Bucket`]: crate::raw::Bucket
+ /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
+ /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
+ /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
+ /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
+ /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
#[inline]
unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
+ // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
+ // the data part of the table (we start counting from "0", so that
+ // in the expression T[last], the "last" index actually one less than the
+ // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
+ //
+ // `from_base_index(base, 1).as_ptr()` returns a pointer that
+ // points here in the data part of the table
+ // (to the start of T1)
+ // |
+ // | `base: NonNull<T>` must point here
+ // | (to the end of T0 or to the start of C0)
+ // v v
+ // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
+ // ^
+ // `from_base_index(base, 1)` returns a pointer
+ // that points here in the data part of the table
+ // (to the end of T1)
+ //
+ // where: T0...Tlast - our stored data; C0...Clast - control bytes
+ // or metadata for data.
let ptr = if Self::IS_ZERO_SIZED_TYPE {
- // won't overflow because index must be less than length
- (index + 1) as *mut T
+ // won't overflow because index must be less than length (bucket_mask)
+ // and bucket_mask is guaranteed to be less than `isize::MAX`
+ // (see TableLayout::calculate_layout_for method)
+ invalid_mut(index + 1)
} else {
base.as_ptr().sub(index)
};
@@ -311,27 +376,183 @@ impl<T> Bucket<T> {
ptr: NonNull::new_unchecked(ptr),
}
}
+
+ /// Calculates the index of a [`Bucket`] as distance between two pointers
+ /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
+ /// The returned value is in units of T: the distance in bytes divided by
+ /// [`core::mem::size_of::<T>()`].
+ ///
+ /// If the `T` is a ZST, then we return the index of the element in
+ /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
+ ///
+ /// This function is the inverse of [`from_base_index`].
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
+ /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
+ ///
+ /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
+ /// method, as well as for the correct logic of the work of this crate, the
+ /// following rules are necessary and sufficient:
+ ///
+ /// * `base` contained pointer must not be `dangling` and must point to the
+ /// end of the first `element` from the `data part` of the table, i.e.
+ /// must be a pointer that returns by [`RawTable::data_end`] or by
+ /// [`RawTableInner::data_end<T>`];
+ ///
+ /// * `self` also must not contain dangling pointer;
+ ///
+ /// * both `self` and `base` must be created from the same [`RawTable`]
+ /// (or [`RawTableInner`]).
+ ///
+ /// If `mem::size_of::<T>() == 0`, this function is always safe.
+ ///
+ /// [`Bucket`]: crate::raw::Bucket
+ /// [`from_base_index`]: crate::raw::Bucket::from_base_index
+ /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
+ /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [`RawTableInner`]: RawTableInner
+ /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
#[inline]
unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
+ // If mem::size_of::<T>() != 0 then return an index under which we used to store the
+ // `element` in the data part of the table (we start counting from "0", so
+ // that in the expression T[last], the "last" index actually is one less than the
+ // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
+ // For example for 5th element in table calculation is performed like this:
+ //
+ // mem::size_of::<T>()
+ // |
+ // | `self = from_base_index(base, 5)` that returns pointer
+ // | that points here in tha data part of the table
+ // | (to the end of T5)
+ // | | `base: NonNull<T>` must point here
+ // v | (to the end of T0 or to the start of C0)
+ // /???\ v v
+ // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
+ // \__________ __________/
+ // \/
+ // `bucket.to_base_index(base)` = 5
+ // (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
+ //
+ // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
if Self::IS_ZERO_SIZED_TYPE {
+ // this can not be UB
self.ptr.as_ptr() as usize - 1
} else {
offset_from(base.as_ptr(), self.ptr.as_ptr())
}
}
+
+ /// Acquires the underlying raw pointer `*mut T` to `data`.
+ ///
+ /// # Note
+ ///
+ /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
+ /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
+ /// for properly dropping the data we also need to clear `data` control bytes. If we
+ /// drop data, but do not clear `data control byte` it leads to double drop when
+ /// [`RawTable`] goes out of scope.
+ ///
+ /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
+ /// `T` value and its borrowed form *must* match those for the old `T` value, as the map
+ /// will not re-evaluate where the new value should go, meaning the value may become
+ /// "lost" if their location does not reflect their state.
+ ///
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place
+ /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
+ /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "raw")]
+ /// # fn test() {
+ /// use core::hash::{BuildHasher, Hash};
+ /// use hashbrown::raw::{Bucket, RawTable};
+ ///
+ /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
+ ///
+ /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
+ /// use core::hash::Hasher;
+ /// let mut state = hash_builder.build_hasher();
+ /// key.hash(&mut state);
+ /// state.finish()
+ /// }
+ ///
+ /// let hash_builder = NewHashBuilder::default();
+ /// let mut table = RawTable::new();
+ ///
+ /// let value = ("a", 100);
+ /// let hash = make_hash(&hash_builder, &value.0);
+ ///
+ /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
+ ///
+ /// let bucket: Bucket<(&str, i32)> = table.find(hash, |(k1, _)| k1 == &value.0).unwrap();
+ ///
+ /// assert_eq!(unsafe { &*bucket.as_ptr() }, &("a", 100));
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "raw")]
+ /// # test()
+ /// # }
+ /// ```
#[inline]
pub fn as_ptr(&self) -> *mut T {
if Self::IS_ZERO_SIZED_TYPE {
// Just return an arbitrary ZST pointer which is properly aligned
- mem::align_of::<T>() as *mut T
+ // invalid pointer is good enough for ZST
+ invalid_mut(mem::align_of::<T>())
} else {
unsafe { self.ptr.as_ptr().sub(1) }
}
}
+
+ /// Create a new [`Bucket`] that is offset from the `self` by the given
+ /// `offset`. The pointer calculation is performed by calculating the
+ /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
+ /// This function is used for iterators.
+ ///
+ /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
+ /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
+ /// rules of [`NonNull::new_unchecked`] function.
+ ///
+ /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
+ /// and [`NonNull::new_unchecked`] function, as well as for the correct
+ /// logic of the work of this crate, the following rules are necessary and
+ /// sufficient:
+ ///
+ /// * `self` contained pointer must not be `dangling`;
+ ///
+ /// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
+ /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other
+ /// words, `self.to_base_index() + ofset + 1` must be no greater than the number returned
+ /// by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
+ ///
+ /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
+ /// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
+ /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words,
+ /// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the
+ /// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
+ ///
+ /// [`Bucket`]: crate::raw::Bucket
+ /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
+ /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
+ /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
#[inline]
unsafe fn next_n(&self, offset: usize) -> Self {
let ptr = if Self::IS_ZERO_SIZED_TYPE {
- (self.ptr.as_ptr() as usize + offset) as *mut T
+ // invalid pointer is good enough for ZST
+ invalid_mut(self.ptr.as_ptr() as usize + offset)
} else {
self.ptr.as_ptr().sub(offset)
};
@@ -339,26 +560,212 @@ impl<T> Bucket<T> {
ptr: NonNull::new_unchecked(ptr),
}
}
+
+ /// Executes the destructor (if any) of the pointed-to `data`.
+ ///
+ /// # Safety
+ ///
+ /// See [`ptr::drop_in_place`] for safety concerns.
+ ///
+ /// You should use [`RawTable::erase`] instead of this function,
+ /// or be careful with calling this function directly, because for
+ /// properly dropping the data we need also clear `data` control bytes.
+ /// If we drop data, but do not erase `data control byte` it leads to
+ /// double drop when [`RawTable`] goes out of scope.
+ ///
+ /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [`RawTable::erase`]: crate::raw::RawTable::erase
#[cfg_attr(feature = "inline-more", inline)]
pub(crate) unsafe fn drop(&self) {
self.as_ptr().drop_in_place();
}
+
+ /// Reads the `value` from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// # Safety
+ ///
+ /// See [`ptr::read`] for safety concerns.
+ ///
+ /// You should use [`RawTable::remove`] instead of this function,
+ /// or be careful with calling this function directly, because compiler
+ /// calls its destructor when readed `value` goes out of scope. It
+ /// can cause double dropping when [`RawTable`] goes out of scope,
+ /// because of not erased `data control byte`.
+ ///
+ /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [`RawTable::remove`]: crate::raw::RawTable::remove
#[inline]
pub(crate) unsafe fn read(&self) -> T {
self.as_ptr().read()
}
+
+ /// Overwrites a memory location with the given `value` without reading
+ /// or dropping the old value (like [`ptr::write`] function).
+ ///
+ /// # Safety
+ ///
+ /// See [`ptr::write`] for safety concerns.
+ ///
+ /// # Note
+ ///
+ /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
+ /// those for the old `T` value, as the map will not re-evaluate where the new
+ /// value should go, meaning the value may become "lost" if their location
+ /// does not reflect their state.
+ ///
+ /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
+ /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
+ /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
#[inline]
pub(crate) unsafe fn write(&self, val: T) {
self.as_ptr().write(val);
}
+
+ /// Returns a shared immutable reference to the `value`.
+ ///
+ /// # Safety
+ ///
+ /// See [`NonNull::as_ref`] for safety concerns.
+ ///
+ /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "raw")]
+ /// # fn test() {
+ /// use core::hash::{BuildHasher, Hash};
+ /// use hashbrown::raw::{Bucket, RawTable};
+ ///
+ /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
+ ///
+ /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
+ /// use core::hash::Hasher;
+ /// let mut state = hash_builder.build_hasher();
+ /// key.hash(&mut state);
+ /// state.finish()
+ /// }
+ ///
+ /// let hash_builder = NewHashBuilder::default();
+ /// let mut table = RawTable::new();
+ ///
+ /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
+ /// let hash = make_hash(&hash_builder, &value.0);
+ ///
+ /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
+ ///
+ /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
+ ///
+ /// assert_eq!(
+ /// unsafe { bucket.as_ref() },
+ /// &("A pony", "is a small horse".to_owned())
+ /// );
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "raw")]
+ /// # test()
+ /// # }
+ /// ```
#[inline]
pub unsafe fn as_ref<'a>(&self) -> &'a T {
&*self.as_ptr()
}
+
+ /// Returns a unique mutable reference to the `value`.
+ ///
+ /// # Safety
+ ///
+ /// See [`NonNull::as_mut`] for safety concerns.
+ ///
+ /// # Note
+ ///
+ /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
+ /// those for the old `T` value, as the map will not re-evaluate where the new
+ /// value should go, meaning the value may become "lost" if their location
+ /// does not reflect their state.
+ ///
+ /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut
+ /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
+ /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "raw")]
+ /// # fn test() {
+ /// use core::hash::{BuildHasher, Hash};
+ /// use hashbrown::raw::{Bucket, RawTable};
+ ///
+ /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
+ ///
+ /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
+ /// use core::hash::Hasher;
+ /// let mut state = hash_builder.build_hasher();
+ /// key.hash(&mut state);
+ /// state.finish()
+ /// }
+ ///
+ /// let hash_builder = NewHashBuilder::default();
+ /// let mut table = RawTable::new();
+ ///
+ /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
+ /// let hash = make_hash(&hash_builder, &value.0);
+ ///
+ /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
+ ///
+ /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
+ ///
+ /// unsafe {
+ /// bucket
+ /// .as_mut()
+ /// .1
+ /// .push_str(" less than 147 cm at the withers")
+ /// };
+ /// assert_eq!(
+ /// unsafe { bucket.as_ref() },
+ /// &(
+ /// "A pony",
+ /// "is a small horse less than 147 cm at the withers".to_owned()
+ /// )
+ /// );
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "raw")]
+ /// # test()
+ /// # }
+ /// ```
#[inline]
pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
&mut *self.as_ptr()
}
+
+ /// Copies `size_of<T>` bytes from `other` to `self`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// # Safety
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns.
+ ///
+ /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of
+ /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values
+ /// in the region beginning at `*self` and the region beginning at `*other` can
+ /// [violate memory safety].
+ ///
+ /// # Note
+ ///
+ /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
+ /// those for the old `T` value, as the map will not re-evaluate where the new
+ /// value should go, meaning the value may become "lost" if their location
+ /// does not reflect their state.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: https://doc.rust-lang.org/core/ptr/fn.copy_nonoverlapping.html
+ /// [`read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
+ /// [violate memory safety]: https://doc.rust-lang.org/std/ptr/fn.read.html#ownership-of-the-returned-value
+ /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
+ /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
#[cfg(feature = "raw")]
#[inline]
pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
@@ -516,9 +923,9 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Returns pointer to start of data table.
#[inline]
- #[cfg(feature = "nightly")]
- pub unsafe fn data_start(&self) -> *mut T {
- self.data_end().as_ptr().wrapping_sub(self.buckets())
+ #[cfg(any(feature = "raw", feature = "nightly"))]
+ pub unsafe fn data_start(&self) -> NonNull<T> {
+ NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets()))
}
/// Return the information about memory allocated by the table.
@@ -581,11 +988,18 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
/// Removes an element from the table, returning it.
+ ///
+ /// This also returns an `InsertSlot` pointing to the newly free bucket.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
- pub unsafe fn remove(&mut self, item: Bucket<T>) -> T {
+ pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
self.erase_no_drop(&item);
- item.read()
+ (
+ item.read(),
+ InsertSlot {
+ index: self.bucket_index(&item),
+ },
+ )
}
/// Finds and removes an element from the table, returning it.
@@ -593,7 +1007,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
// Avoid `Option::map` because it bloats LLVM IR.
match self.find(hash, eq) {
- Some(bucket) => Some(unsafe { self.remove(bucket) }),
+ Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
None => None,
}
}
@@ -607,6 +1021,10 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Removes all elements from the table without freeing the backing memory.
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear(&mut self) {
+ if self.is_empty() {
+ // Special case empty table to avoid surprising O(capacity) time.
+ return;
+ }
// Ensure that the table is reset even if one of the drops panic
let mut self_ = guard(self, |self_| self_.clear_no_drop());
unsafe {
@@ -663,7 +1081,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// without reallocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
- if additional > self.table.growth_left {
+ if unlikely(additional > self.table.growth_left) {
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
if self
.reserve_rehash(additional, hasher, Fallibility::Infallible)
@@ -737,22 +1155,18 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
unsafe {
- let mut index = self.table.find_insert_slot(hash);
+ let mut slot = self.table.find_insert_slot(hash);
// We can avoid growing the table once we have reached our load
// factor if we are replacing a tombstone. This works since the
// number of EMPTY slots does not change in this case.
- let old_ctrl = *self.table.ctrl(index);
+ let old_ctrl = *self.table.ctrl(slot.index);
if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) {
self.reserve(1, hasher);
- index = self.table.find_insert_slot(hash);
+ slot = self.table.find_insert_slot(hash);
}
- self.table.record_item_insert_at(index, old_ctrl, hash);
-
- let bucket = self.bucket(index);
- bucket.write(value);
- bucket
+ self.insert_in_slot(hash, slot, value)
}
}
@@ -820,7 +1234,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
let old_ctrl = *self.table.ctrl(index);
debug_assert!(self.is_bucket_full(index));
let old_growth_left = self.table.growth_left;
- let item = self.remove(bucket);
+ let item = self.remove(bucket).0;
if let Some(new_item) = f(item) {
self.table.growth_left = old_growth_left;
self.table.set_ctrl(index, old_ctrl);
@@ -832,6 +1246,49 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
}
+ /// Searches for an element in the table. If the element is not found,
+ /// returns `Err` with the position of a slot where an element with the
+ /// same hash could be inserted.
+ ///
+ /// This function may resize the table if additional space is required for
+ /// inserting an element.
+ #[inline]
+ pub fn find_or_find_insert_slot(
+ &mut self,
+ hash: u64,
+ mut eq: impl FnMut(&T) -> bool,
+ hasher: impl Fn(&T) -> u64,
+ ) -> Result<Bucket<T>, InsertSlot> {
+ self.reserve(1, hasher);
+
+ match self
+ .table
+ .find_or_find_insert_slot_inner(hash, &mut |index| unsafe {
+ eq(self.bucket(index).as_ref())
+ }) {
+ Ok(index) => Ok(unsafe { self.bucket(index) }),
+ Err(slot) => Err(slot),
+ }
+ }
+
+ /// Inserts a new element into the table in the given slot, and returns its
+ /// raw bucket.
+ ///
+ /// # Safety
+ ///
+ /// `slot` must point to a slot previously returned by
+ /// `find_or_find_insert_slot`, and no mutation of the table must have
+ /// occurred since that call.
+ #[inline]
+ pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
+ let old_ctrl = *self.table.ctrl(slot.index);
+ self.table.record_item_insert_at(slot.index, old_ctrl, hash);
+
+ let bucket = self.bucket(slot.index);
+ bucket.write(value);
+ bucket
+ }
+
/// Searches for an element in the table.
#[inline]
pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
@@ -984,7 +1441,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "raw")]
- pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> {
+ pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<T> {
RawIterHash::new(self, hash)
}
@@ -1072,6 +1529,11 @@ where
}
impl<A> RawTableInner<A> {
+ /// Creates a new empty hash table without allocating any memory.
+ ///
+ /// In effect this returns a table with exactly 1 bucket. However we can
+ /// leave the data pointer dangling since that bucket is never accessed
+ /// due to our load factor forcing us to always have at least 1 free bucket.
#[inline]
const fn new_in(alloc: A) -> Self {
Self {
@@ -1086,6 +1548,18 @@ impl<A> RawTableInner<A> {
}
impl<A: Allocator + Clone> RawTableInner<A> {
+ /// Allocates a new [`RawTableInner`] with the given number of buckets.
+ /// The control bytes and buckets are left uninitialized.
+ ///
+ /// # Safety
+ ///
+ /// The caller of this function must ensure that the `buckets` is power of two
+ /// and also initialize all control bytes of the length `self.bucket_mask + 1 +
+ /// Group::WIDTH` with the [`EMPTY`] bytes.
+ ///
+ /// See also [`Allocator`] API for other safety concerns.
+ ///
+ /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new_uninitialized(
alloc: A,
@@ -1106,6 +1580,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
Err(_) => return Err(fallibility.alloc_err(layout)),
};
+ // SAFETY: null pointer will be caught in above check
let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
Ok(Self {
ctrl,
@@ -1116,6 +1591,10 @@ impl<A: Allocator + Clone> RawTableInner<A> {
})
}
+ /// Attempts to allocate a new [`RawTableInner`] with at least enough
+ /// capacity for inserting the given number of elements without reallocating.
+ ///
+ /// All the control bytes are initialized with the [`EMPTY`] bytes.
#[inline]
fn fallible_with_capacity(
alloc: A,
@@ -1126,11 +1605,16 @@ impl<A: Allocator + Clone> RawTableInner<A> {
if capacity == 0 {
Ok(Self::new_in(alloc))
} else {
+ // SAFETY: We checked that we could successfully allocate the new table, and then
+ // initialized all control bytes with the constant `EMPTY` byte.
unsafe {
let buckets =
capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?;
let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
+ // SAFETY: We checked that the table is allocated and therefore the table already has
+ // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
+ // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
Ok(result)
@@ -1138,66 +1622,200 @@ impl<A: Allocator + Clone> RawTableInner<A> {
}
}
+ /// Fixes up an insertion slot due to false positives for groups smaller than the group width.
+ /// This must only be used on insertion slots found by `find_insert_slot_in_group`.
+ #[inline]
+ unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot {
+ // In tables smaller than the group width
+ // (self.buckets() < Group::WIDTH), trailing control
+ // bytes outside the range of the table are filled with
+ // EMPTY entries. These will unfortunately trigger a
+ // match, but once masked may point to a full bucket that
+ // is already occupied. We detect this situation here and
+ // perform a second scan starting at the beginning of the
+ // table. This second scan is guaranteed to find an empty
+ // slot (due to the load factor) before hitting the trailing
+ // control bytes (containing EMPTY).
+ if unlikely(self.is_bucket_full(index)) {
+ debug_assert!(self.bucket_mask < Group::WIDTH);
+ // SAFETY:
+ //
+ // * We are in range and `ptr = self.ctrl(0)` are valid for reads
+ // and properly aligned, because the table is already allocated
+ // (see `TableLayout::calculate_layout_for` and `ptr::read`);
+ //
+ // * For tables larger than the group width (self.buckets() >= Group::WIDTH),
+ // we will never end up in the given branch, since
+ // `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group` cannot
+ // return a full bucket index. For tables smaller than the group width, calling the
+ // `unwrap_unchecked` function is also
+ // safe, as the trailing control bytes outside the range of the table are filled
+ // with EMPTY bytes, so this second scan either finds an empty slot (due to the
+ // load factor) or hits the trailing control bytes (containing EMPTY).
+ index = Group::load_aligned(self.ctrl(0))
+ .match_empty_or_deleted()
+ .lowest_set_bit()
+ .unwrap_unchecked();
+ }
+ InsertSlot { index }
+ }
+
+ /// Finds the position to insert something in a group.
+ /// This may have false positives and must be fixed up with `fix_insert_slot` before it's used.
+ #[inline]
+ fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
+ let bit = group.match_empty_or_deleted().lowest_set_bit();
+
+ if likely(bit.is_some()) {
+ Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
+ } else {
+ None
+ }
+ }
+
+ /// Searches for an element in the table, or a potential slot where that element could be
+ /// inserted.
+ ///
+ /// This uses dynamic dispatch to reduce the amount of code generated, but that is
+ /// eliminated by LLVM optimizations.
+ #[inline]
+ fn find_or_find_insert_slot_inner(
+ &self,
+ hash: u64,
+ eq: &mut dyn FnMut(usize) -> bool,
+ ) -> Result<usize, InsertSlot> {
+ let mut insert_slot = None;
+
+ let h2_hash = h2(hash);
+ let mut probe_seq = self.probe_seq(hash);
+
+ loop {
+ let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
+
+ for bit in group.match_byte(h2_hash) {
+ let index = (probe_seq.pos + bit) & self.bucket_mask;
+
+ if likely(eq(index)) {
+ return Ok(index);
+ }
+ }
+
+ // We didn't find the element we were looking for in the group, try to get an
+ // insertion slot from the group if we don't have one yet.
+ if likely(insert_slot.is_none()) {
+ insert_slot = self.find_insert_slot_in_group(&group, &probe_seq);
+ }
+
+ // Only stop the search if the group contains at least one empty element.
+ // Otherwise, the element that we are looking for might be in a following group.
+ if likely(group.match_empty().any_bit_set()) {
+ // We must have found a insert slot by now, since the current group contains at
+ // least one. For tables smaller than the group width, there will still be an
+ // empty element in the current (and only) group due to the load factor.
+ unsafe {
+ return Err(self.fix_insert_slot(insert_slot.unwrap_unchecked()));
+ }
+ }
+
+ probe_seq.move_next(self.bucket_mask);
+ }
+ }
+
/// Searches for an empty or deleted bucket which is suitable for inserting
/// a new element and sets the hash for that slot.
///
/// There must be at least 1 empty bucket in the table.
#[inline]
unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) {
- let index = self.find_insert_slot(hash);
+ let index = self.find_insert_slot(hash).index;
let old_ctrl = *self.ctrl(index);
self.set_ctrl_h2(index, hash);
(index, old_ctrl)
}
/// Searches for an empty or deleted bucket which is suitable for inserting
- /// a new element.
+ /// a new element, returning the `index` for the new [`Bucket`].
///
- /// There must be at least 1 empty bucket in the table.
+ /// This function does not make any changes to the `data` part of the table, or any
+ /// changes to the `items` or `growth_left` field of the table.
+ ///
+ /// The table must have at least 1 empty or deleted `bucket`, otherwise this function
+ /// will never return (will go into an infinite loop) for tables larger than the group
+ /// width, or return an index outside of the table indices range if the table is less
+ /// than the group width.
+ ///
+ /// # Note
+ ///
+ /// Calling this function is always safe, but attempting to write data at
+ /// the index returned by this function when the table is less than the group width
+ /// and if there was not at least one empty bucket in the table will cause immediate
+ /// [`undefined behavior`]. This is because in this case the function will return
+ /// `self.bucket_mask + 1` as an index due to the trailing EMPTY control bytes outside
+ /// the table range.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- fn find_insert_slot(&self, hash: u64) -> usize {
+ fn find_insert_slot(&self, hash: u64) -> InsertSlot {
let mut probe_seq = self.probe_seq(hash);
loop {
+ // SAFETY:
+ // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
+ // of the table due to masking with `self.bucket_mask` and also because mumber of
+ // buckets is a power of two (see comment for masking below).
+ //
+ // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
+ // call `Group::load` due to the extended control bytes range, which is
+ // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
+ // byte will never be read for the allocated table);
+ //
+ // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
+ // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
+ // bytes, which is safe (see RawTableInner::new_in).
unsafe {
let group = Group::load(self.ctrl(probe_seq.pos));
- if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
- let result = (probe_seq.pos + bit) & self.bucket_mask;
-
- // In tables smaller than the group width, trailing control
- // bytes outside the range of the table are filled with
- // EMPTY entries. These will unfortunately trigger a
- // match, but once masked may point to a full bucket that
- // is already occupied. We detect this situation here and
- // perform a second scan starting at the beginning of the
- // table. This second scan is guaranteed to find an empty
- // slot (due to the load factor) before hitting the trailing
- // control bytes (containing EMPTY).
- if unlikely(self.is_bucket_full(result)) {
- debug_assert!(self.bucket_mask < Group::WIDTH);
- debug_assert_ne!(probe_seq.pos, 0);
- return Group::load_aligned(self.ctrl(0))
- .match_empty_or_deleted()
- .lowest_set_bit_nonzero();
- }
+ let index = self.find_insert_slot_in_group(&group, &probe_seq);
- return result;
+ if likely(index.is_some()) {
+ return self.fix_insert_slot(index.unwrap_unchecked());
}
}
probe_seq.move_next(self.bucket_mask);
}
}
- /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of
- /// code generated, but it is eliminated by LLVM optimizations.
+ /// Searches for an element in a table, returning the `index` of the found element.
+ /// This uses dynamic dispatch to reduce the amount of code generated, but it is
+ /// eliminated by LLVM optimizations.
+ ///
+ /// This function does not make any changes to the `data` part of the table, or any
+ /// changes to the `items` or `growth_left` field of the table.
+ ///
+ /// The table must have at least 1 empty `bucket`, otherwise, if the
+ /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`,
+ /// this function will also never return (will go into an infinite loop).
#[inline(always)]
fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
let h2_hash = h2(hash);
let mut probe_seq = self.probe_seq(hash);
loop {
+ // SAFETY:
+ // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
+ // of the table due to masking with `self.bucket_mask`.
+ //
+ // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
+ // call `Group::load` due to the extended control bytes range, which is
+ // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
+ // byte will never be read for the allocated table);
+ //
+ // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
+ // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
+ // bytes, which is safe (see RawTableInner::new_in).
let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
for bit in group.match_byte(h2_hash) {
+ // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
+ // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
let index = (probe_seq.pos + bit) & self.bucket_mask;
if likely(eq(index)) {
@@ -1213,12 +1831,49 @@ impl<A: Allocator + Clone> RawTableInner<A> {
}
}
+ /// Prepares for rehashing data in place (that is, without allocating new memory).
+ /// Converts all full index `control bytes` to `DELETED` and all `DELETED` control
+ /// bytes to `EMPTY`, i.e. performs the following conversion:
+ ///
+ /// - `EMPTY` control bytes -> `EMPTY`;
+ /// - `DELETED` control bytes -> `EMPTY`;
+ /// - `FULL` control bytes -> `DELETED`.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// You must observe the following safety rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The caller of this function must convert the `DELETED` bytes back to `FULL`
+ /// bytes when re-inserting them into their ideal position (which was impossible
+ /// to do during the first insert due to tombstones). If the caller does not do
+ /// this, then calling this function may result in a memory leak.
+ ///
+ /// Calling this function on a table that has not been allocated results in
+ /// [`undefined behavior`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[allow(clippy::mut_mut)]
#[inline]
unsafe fn prepare_rehash_in_place(&mut self) {
- // Bulk convert all full control bytes to DELETED, and all DELETED
- // control bytes to EMPTY. This effectively frees up all buckets
- // containing a DELETED entry.
+ // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY.
+ // This effectively frees up all buckets containing a DELETED entry.
+ //
+ // SAFETY:
+ // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`;
+ // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned`
+ // due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`;
+ // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated;
+ // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0
+ // and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for).
for i in (0..self.buckets()).step_by(Group::WIDTH) {
let group = Group::load_aligned(self.ctrl(i));
let group = group.convert_special_to_empty_and_full_to_deleted();
@@ -1227,10 +1882,19 @@ impl<A: Allocator + Clone> RawTableInner<A> {
// Fix up the trailing control bytes. See the comments in set_ctrl
// for the handling of tables smaller than the group width.
- if self.buckets() < Group::WIDTH {
+ //
+ // SAFETY: The caller of this function guarantees that [`RawTableInner`]
+ // has already been allocated
+ if unlikely(self.buckets() < Group::WIDTH) {
+ // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
+ // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
+ // `Group::WIDTH` is safe
self.ctrl(0)
.copy_to(self.ctrl(Group::WIDTH), self.buckets());
} else {
+ // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
+ // control bytes,so copying `Group::WIDTH` bytes with offset equal
+ // to `self.buckets() == self.bucket_mask + 1` is safe
self.ctrl(0)
.copy_to(self.ctrl(self.buckets()), Group::WIDTH);
}
@@ -1274,7 +1938,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
#[cfg(feature = "raw")]
#[inline]
unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result<usize, ()> {
- let index = self.find_insert_slot(hash);
+ let index = self.find_insert_slot(hash).index;
let old_ctrl = *self.ctrl(index);
if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
Err(())
@@ -1301,13 +1965,68 @@ impl<A: Allocator + Clone> RawTableInner<A> {
/// Sets a control byte to the hash, and possibly also the replicated control byte at
/// the end of the array.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`]
+ /// method. Thus, in order to uphold the safety contracts for the method, you must observe the
+ /// following rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
+ /// be no greater than the number returned by the function [`RawTableInner::buckets`].
+ ///
+ /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) {
+ // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_h2`]
self.set_ctrl(index, h2(hash));
}
+ /// Replaces the hash in the control byte at the given index with the provided one,
+ /// and possibly also replicates the new control byte at the end of the array of control
+ /// bytes, returning the old control byte.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_h2`]
+ /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both
+ /// methods, you must observe the following rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
+ /// be no greater than the number returned by the function [`RawTableInner::buckets`].
+ ///
+ /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn replace_ctrl_h2(&self, index: usize, hash: u64) -> u8 {
+ // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_h2`]
let prev_ctrl = *self.ctrl(index);
self.set_ctrl_h2(index, hash);
prev_ctrl
@@ -1315,6 +2034,28 @@ impl<A: Allocator + Clone> RawTableInner<A> {
/// Sets a control byte, and possibly also the replicated control byte at
/// the end of the array.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// You must observe the following safety rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
+ /// be no greater than the number returned by the function [`RawTableInner::buckets`].
+ ///
+ /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn set_ctrl(&self, index: usize, ctrl: u8) {
// Replicate the first Group::WIDTH control bytes at the end of
@@ -1335,16 +2076,43 @@ impl<A: Allocator + Clone> RawTableInner<A> {
// ---------------------------------------------
// | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
// ---------------------------------------------
+
+ // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
+ // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
+ // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
*self.ctrl(index) = ctrl;
*self.ctrl(index2) = ctrl;
}
/// Returns a pointer to a control byte.
+ ///
+ /// # Safety
+ ///
+ /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`],
+ /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`.
+ /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH`
+ /// will return a pointer to the end of the allocated table and it is useless on its own.
+ ///
+ /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a
+ /// table that has not been allocated results in [`Undefined Behavior`].
+ ///
+ /// So to satisfy both requirements you should always follow the rule that
+ /// `index < self.bucket_mask + 1 + Group::WIDTH`
+ ///
+ /// Calling this function on [`RawTableInner`] that are not already allocated is safe
+ /// for read-only purpose.
+ ///
+ /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`Bucket::as_ptr()`]: Bucket::as_ptr()
+ /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn ctrl(&self, index: usize) -> *mut u8 {
debug_assert!(index < self.num_ctrl_bytes());
+ // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
self.ctrl.as_ptr().add(index)
}
@@ -1541,7 +2309,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
let hash = hasher(*guard, i);
// Search for a suitable place to put it
- let new_i = guard.find_insert_slot(hash);
+ let new_i = guard.find_insert_slot(hash).index;
// Probing works by scanning through all of the control
// bytes in groups, which may not be aligned to the group
@@ -1626,27 +2394,95 @@ impl<A: Allocator + Clone> RawTableInner<A> {
self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
}
+ /// Erases the [`Bucket`]'s control byte at the given index so that it does not
+ /// triggered as full, decreases the `items` of the table and, if it can be done,
+ /// increases `self.growth_left`.
+ ///
+ /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it
+ /// does not make any changes to the `data` parts of the table. The caller of this
+ /// function must take care to properly drop the `data`, otherwise calling this
+ /// function may result in a memory leak.
+ ///
+ /// # Safety
+ ///
+ /// You must observe the following safety rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * It must be the full control byte at the given position;
+ ///
+ /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
+ /// be no greater than the number returned by the function [`RawTableInner::buckets`].
+ ///
+ /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
+ ///
+ /// Calling this function on a table with no elements is unspecified, but calling subsequent
+ /// functions is likely to result in [`undefined behavior`] due to overflow subtraction
+ /// (`self.items -= 1 cause overflow when self.items == 0`).
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn erase(&mut self, index: usize) {
debug_assert!(self.is_bucket_full(index));
+
+ // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
+ // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
+ // SAFETY:
+ // - The caller must uphold the safety contract for `erase` method;
+ // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
let empty_before = Group::load(self.ctrl(index_before)).match_empty();
let empty_after = Group::load(self.ctrl(index)).match_empty();
- // If we are inside a continuous block of Group::WIDTH full or deleted
- // cells then a probe window may have seen a full block when trying to
- // insert. We therefore need to keep that block non-empty so that
- // lookups will continue searching to the next probe window.
+ // Inserting and searching in the map is performed by two key functions:
+ //
+ // - The `find_insert_slot` function that looks up the index of any `EMPTY` or `DELETED`
+ // slot in a group to be able to insert. If it doesn't find an `EMPTY` or `DELETED`
+ // slot immediately in the first group, it jumps to the next `Group` looking for it,
+ // and so on until it has gone through all the groups in the control bytes.
+ //
+ // - The `find_inner` function that looks for the index of the desired element by looking
+ // at all the `FULL` bytes in the group. If it did not find the element right away, and
+ // there is no `EMPTY` byte in the group, then this means that the `find_insert_slot`
+ // function may have found a suitable slot in the next group. Therefore, `find_inner`
+ // jumps further, and if it does not find the desired element and again there is no `EMPTY`
+ // byte, then it jumps further, and so on. The search stops only if `find_inner` function
+ // finds the desired element or hits an `EMPTY` slot/byte.
+ //
+ // Accordingly, this leads to two consequences:
//
- // Note that in this context `leading_zeros` refers to the bytes at the
- // end of a group, while `trailing_zeros` refers to the bytes at the
- // beginning of a group.
+ // - The map must have `EMPTY` slots (bytes);
+ //
+ // - You can't just mark the byte to be erased as `EMPTY`, because otherwise the `find_inner`
+ // function may stumble upon an `EMPTY` byte before finding the desired element and stop
+ // searching.
+ //
+ // Thus it is necessary to check all bytes after and before the erased element. If we are in
+ // a contiguous `Group` of `FULL` or `DELETED` bytes (the number of `FULL` or `DELETED` bytes
+ // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
+ // `DELETED` in order for the `find_inner` function to go further. On the other hand, if there
+ // is at least one `EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
+ // upon an `EMPTY` byte, so we can safely mark our erased byte as `EMPTY` as well.
+ //
+ // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
+ // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
+ // cannot have `DELETED` bytes.
+ //
+ // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
+ // `trailing_zeros` refers to the bytes at the beginning of a group.
let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
DELETED
} else {
self.growth_left += 1;
EMPTY
};
+ // SAFETY: the caller must uphold the safety contract for `erase` method.
self.set_ctrl(index, ctrl);
self.items -= 1;
}
@@ -1752,7 +2588,8 @@ impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
.copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
source
.data_start()
- .copy_to_nonoverlapping(self.data_start(), self.table.buckets());
+ .as_ptr()
+ .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets());
self.table.items = source.table.items;
self.table.growth_left = source.table.growth_left;
@@ -1776,7 +2613,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
// to make sure we drop only the elements that have been
// cloned so far.
let mut guard = guard((0, &mut *self), |(index, self_)| {
- if Self::DATA_NEEDS_DROP && !self_.is_empty() {
+ if Self::DATA_NEEDS_DROP {
for i in 0..=*index {
if self_.is_bucket_full(i) {
self_.bucket(i).drop();
@@ -1896,7 +2733,7 @@ impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> {
pub(crate) struct RawIterRange<T> {
// Mask of full buckets in the current group. Bits are cleared from this
// mask as each element is processed.
- current_group: BitMask,
+ current_group: BitMaskIter,
// Pointer to the buckets for the current group.
data: Bucket<T>,
@@ -1924,7 +2761,7 @@ impl<T> RawIterRange<T> {
let next_ctrl = ctrl.add(Group::WIDTH);
Self {
- current_group,
+ current_group: current_group.into_iter(),
data,
next_ctrl,
end,
@@ -1981,8 +2818,7 @@ impl<T> RawIterRange<T> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
loop {
- if let Some(index) = self.current_group.lowest_set_bit() {
- self.current_group = self.current_group.remove_lowest_bit();
+ if let Some(index) = self.current_group.next() {
return Some(self.data.next_n(index));
}
@@ -1995,7 +2831,7 @@ impl<T> RawIterRange<T> {
// than the group size where the trailing control bytes are all
// EMPTY. On larger tables self.end is guaranteed to be aligned
// to the group size (since tables are power-of-two sized).
- self.current_group = Group::load_aligned(self.next_ctrl).match_full();
+ self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter();
self.data = self.data.next_n(Group::WIDTH);
self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
}
@@ -2074,7 +2910,7 @@ impl<T> RawIter<T> {
/// This method should be called _before_ the removal is made. It is not necessary to call this
/// method if you are removing an item that this iterator yielded in the past.
#[cfg(feature = "raw")]
- pub fn reflect_remove(&mut self, b: &Bucket<T>) {
+ pub unsafe fn reflect_remove(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, false);
}
@@ -2088,36 +2924,76 @@ impl<T> RawIter<T> {
///
/// This method should be called _after_ the given insert is made.
#[cfg(feature = "raw")]
- pub fn reflect_insert(&mut self, b: &Bucket<T>) {
+ pub unsafe fn reflect_insert(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, true);
}
/// Refresh the iterator so that it reflects a change to the state of the given bucket.
#[cfg(feature = "raw")]
- fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
- unsafe {
- if b.as_ptr() > self.iter.data.as_ptr() {
- // The iterator has already passed the bucket's group.
- // So the toggle isn't relevant to this iterator.
- return;
+ unsafe fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
+ if b.as_ptr() > self.iter.data.as_ptr() {
+ // The iterator has already passed the bucket's group.
+ // So the toggle isn't relevant to this iterator.
+ return;
+ }
+
+ if self.iter.next_ctrl < self.iter.end
+ && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
+ {
+ // The iterator has not yet reached the bucket's group.
+ // We don't need to reload anything, but we do need to adjust the item count.
+
+ if cfg!(debug_assertions) {
+ // Double-check that the user isn't lying to us by checking the bucket state.
+ // To do that, we need to find its control byte. We know that self.iter.data is
+ // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
+ let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
+ let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
+ // This method should be called _before_ a removal, or _after_ an insert,
+ // so in both cases the ctrl byte should indicate that the bucket is full.
+ assert!(is_full(*ctrl));
}
- if self.iter.next_ctrl < self.iter.end
- && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
- {
- // The iterator has not yet reached the bucket's group.
- // We don't need to reload anything, but we do need to adjust the item count.
+ if is_insert {
+ self.items += 1;
+ } else {
+ self.items -= 1;
+ }
- if cfg!(debug_assertions) {
- // Double-check that the user isn't lying to us by checking the bucket state.
- // To do that, we need to find its control byte. We know that self.iter.data is
- // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
- let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
- let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
- // This method should be called _before_ a removal, or _after_ an insert,
- // so in both cases the ctrl byte should indicate that the bucket is full.
- assert!(is_full(*ctrl));
- }
+ return;
+ }
+
+ // The iterator is at the bucket group that the toggled bucket is in.
+ // We need to do two things:
+ //
+ // - Determine if the iterator already yielded the toggled bucket.
+ // If it did, we're done.
+ // - Otherwise, update the iterator cached group so that it won't
+ // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
+ // We'll also need to update the item count accordingly.
+ if let Some(index) = self.iter.current_group.0.lowest_set_bit() {
+ let next_bucket = self.iter.data.next_n(index);
+ if b.as_ptr() > next_bucket.as_ptr() {
+ // The toggled bucket is "before" the bucket the iterator would yield next. We
+ // therefore don't need to do anything --- the iterator has already passed the
+ // bucket in question.
+ //
+ // The item count must already be correct, since a removal or insert "prior" to
+ // the iterator's position wouldn't affect the item count.
+ } else {
+ // The removed bucket is an upcoming bucket. We need to make sure it does _not_
+ // get yielded, and also that it's no longer included in the item count.
+ //
+ // NOTE: We can't just reload the group here, both since that might reflect
+ // inserts we've already passed, and because that might inadvertently unset the
+ // bits for _other_ removals. If we do that, we'd have to also decrement the
+ // item count for those other bits that we unset. But the presumably subsequent
+ // call to reflect for those buckets might _also_ decrement the item count.
+ // Instead, we _just_ flip the bit for the particular bucket the caller asked
+ // us to reflect.
+ let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
+ let was_full = self.iter.current_group.flip(our_bit);
+ debug_assert_ne!(was_full, is_insert);
if is_insert {
self.items += 1;
@@ -2125,60 +3001,18 @@ impl<T> RawIter<T> {
self.items -= 1;
}
- return;
- }
-
- // The iterator is at the bucket group that the toggled bucket is in.
- // We need to do two things:
- //
- // - Determine if the iterator already yielded the toggled bucket.
- // If it did, we're done.
- // - Otherwise, update the iterator cached group so that it won't
- // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
- // We'll also need to update the item count accordingly.
- if let Some(index) = self.iter.current_group.lowest_set_bit() {
- let next_bucket = self.iter.data.next_n(index);
- if b.as_ptr() > next_bucket.as_ptr() {
- // The toggled bucket is "before" the bucket the iterator would yield next. We
- // therefore don't need to do anything --- the iterator has already passed the
- // bucket in question.
- //
- // The item count must already be correct, since a removal or insert "prior" to
- // the iterator's position wouldn't affect the item count.
- } else {
- // The removed bucket is an upcoming bucket. We need to make sure it does _not_
- // get yielded, and also that it's no longer included in the item count.
- //
- // NOTE: We can't just reload the group here, both since that might reflect
- // inserts we've already passed, and because that might inadvertently unset the
- // bits for _other_ removals. If we do that, we'd have to also decrement the
- // item count for those other bits that we unset. But the presumably subsequent
- // call to reflect for those buckets might _also_ decrement the item count.
- // Instead, we _just_ flip the bit for the particular bucket the caller asked
- // us to reflect.
- let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
- let was_full = self.iter.current_group.flip(our_bit);
- debug_assert_ne!(was_full, is_insert);
-
- if is_insert {
- self.items += 1;
+ if cfg!(debug_assertions) {
+ if b.as_ptr() == next_bucket.as_ptr() {
+ // The removed bucket should no longer be next
+ debug_assert_ne!(self.iter.current_group.0.lowest_set_bit(), Some(index));
} else {
- self.items -= 1;
- }
-
- if cfg!(debug_assertions) {
- if b.as_ptr() == next_bucket.as_ptr() {
- // The removed bucket should no longer be next
- debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index));
- } else {
- // We should not have changed what bucket comes next.
- debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index));
- }
+ // We should not have changed what bucket comes next.
+ debug_assert_eq!(self.iter.current_group.0.lowest_set_bit(), Some(index));
}
}
- } else {
- // We must have already iterated past the removed item.
}
+ } else {
+ // We must have already iterated past the removed item.
}
}
@@ -2217,9 +3051,8 @@ impl<T> Iterator for RawIter<T> {
self.iter.next_impl::<false>()
};
- if nxt.is_some() {
- self.items -= 1;
- }
+ debug_assert!(nxt.is_some());
+ self.items -= 1;
nxt
}
@@ -2388,13 +3221,28 @@ impl<T, A: Allocator + Clone> FusedIterator for RawDrain<'_, T, A> {}
/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
/// items that have a hash value different than the one provided. You should
/// always validate the returned values before using them.
-pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> {
- inner: RawIterHashInner<'a, A>,
+///
+/// For maximum flexibility this iterator is not bound by a lifetime, but you
+/// must observe several rules when using it:
+/// - You must not free the hash table while iterating (including via growing/shrinking).
+/// - It is fine to erase a bucket that has been yielded by the iterator.
+/// - Erasing a bucket that has not yet been yielded by the iterator may still
+/// result in the iterator yielding that bucket.
+/// - It is unspecified whether an element inserted after the iterator was
+/// created will be yielded by that iterator.
+/// - The order in which the iterator yields buckets is unspecified and may
+/// change in the future.
+pub struct RawIterHash<T> {
+ inner: RawIterHashInner,
_marker: PhantomData<T>,
}
-struct RawIterHashInner<'a, A: Allocator + Clone> {
- table: &'a RawTableInner<A>,
+struct RawIterHashInner {
+ // See `RawTableInner`'s corresponding fields for details.
+ // We can't store a `*const RawTableInner` as it would get
+ // invalidated by the user calling `&mut` methods on `RawTable`.
+ bucket_mask: usize,
+ ctrl: NonNull<u8>,
// The top 7 bits of the hash.
h2_hash: u8,
@@ -2408,65 +3256,77 @@ struct RawIterHashInner<'a, A: Allocator + Clone> {
bitmask: BitMaskIter,
}
-impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> {
+impl<T> RawIterHash<T> {
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "raw")]
- fn new(table: &'a RawTable<T, A>, hash: u64) -> Self {
+ unsafe fn new<A: Allocator + Clone>(table: &RawTable<T, A>, hash: u64) -> Self {
RawIterHash {
inner: RawIterHashInner::new(&table.table, hash),
_marker: PhantomData,
}
}
}
-impl<'a, A: Allocator + Clone> RawIterHashInner<'a, A> {
+impl RawIterHashInner {
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "raw")]
- fn new(table: &'a RawTableInner<A>, hash: u64) -> Self {
- unsafe {
- let h2_hash = h2(hash);
- let probe_seq = table.probe_seq(hash);
- let group = Group::load(table.ctrl(probe_seq.pos));
- let bitmask = group.match_byte(h2_hash).into_iter();
-
- RawIterHashInner {
- table,
- h2_hash,
- probe_seq,
- group,
- bitmask,
- }
+ unsafe fn new<A: Allocator + Clone>(table: &RawTableInner<A>, hash: u64) -> Self {
+ let h2_hash = h2(hash);
+ let probe_seq = table.probe_seq(hash);
+ let group = Group::load(table.ctrl(probe_seq.pos));
+ let bitmask = group.match_byte(h2_hash).into_iter();
+
+ RawIterHashInner {
+ bucket_mask: table.bucket_mask,
+ ctrl: table.ctrl,
+ h2_hash,
+ probe_seq,
+ group,
+ bitmask,
}
}
}
-impl<'a, T, A: Allocator + Clone> Iterator for RawIterHash<'a, T, A> {
+impl<T> Iterator for RawIterHash<T> {
type Item = Bucket<T>;
fn next(&mut self) -> Option<Bucket<T>> {
unsafe {
match self.inner.next() {
- Some(index) => Some(self.inner.table.bucket(index)),
+ Some(index) => {
+ // Can't use `RawTable::bucket` here as we don't have
+ // an actual `RawTable` reference to use.
+ debug_assert!(index <= self.inner.bucket_mask);
+ let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index);
+ Some(bucket)
+ }
None => None,
}
}
}
}
-impl<'a, A: Allocator + Clone> Iterator for RawIterHashInner<'a, A> {
+impl Iterator for RawIterHashInner {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
unsafe {
loop {
if let Some(bit) = self.bitmask.next() {
- let index = (self.probe_seq.pos + bit) & self.table.bucket_mask;
+ let index = (self.probe_seq.pos + bit) & self.bucket_mask;
return Some(index);
}
if likely(self.group.match_empty().any_bit_set()) {
return None;
}
- self.probe_seq.move_next(self.table.bucket_mask);
- self.group = Group::load(self.table.ctrl(self.probe_seq.pos));
+ self.probe_seq.move_next(self.bucket_mask);
+
+ // Can't use `RawTableInner::ctrl` here as we don't have
+ // an actual `RawTableInner` reference to use.
+ let index = self.probe_seq.pos;
+ debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH);
+ let group_ctrl = self.ctrl.as_ptr().add(index);
+
+ self.group = Group::load(group_ctrl);
self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
}
}
diff --git a/vendor/hashbrown/src/raw/neon.rs b/vendor/hashbrown/src/raw/neon.rs
new file mode 100644
index 000000000..44e82d57d
--- /dev/null
+++ b/vendor/hashbrown/src/raw/neon.rs
@@ -0,0 +1,124 @@
+use super::bitmask::BitMask;
+use super::EMPTY;
+use core::arch::aarch64 as neon;
+use core::mem;
+use core::num::NonZeroU64;
+
+pub(crate) type BitMaskWord = u64;
+pub(crate) type NonZeroBitMaskWord = NonZeroU64;
+pub(crate) const BITMASK_STRIDE: usize = 8;
+pub(crate) const BITMASK_MASK: BitMaskWord = !0;
+pub(crate) const BITMASK_ITER_MASK: BitMaskWord = 0x8080_8080_8080_8080;
+
+/// Abstraction over a group of control bytes which can be scanned in
+/// parallel.
+///
+/// This implementation uses a 64-bit NEON value.
+#[derive(Copy, Clone)]
+pub(crate) struct Group(neon::uint8x8_t);
+
+#[allow(clippy::use_self)]
+impl Group {
+ /// Number of bytes in the group.
+ pub(crate) const WIDTH: usize = mem::size_of::<Self>();
+
+ /// Returns a full group of empty bytes, suitable for use as the initial
+ /// value for an empty hash table.
+ ///
+ /// This is guaranteed to be aligned to the group size.
+ #[inline]
+ pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] {
+ #[repr(C)]
+ struct AlignedBytes {
+ _align: [Group; 0],
+ bytes: [u8; Group::WIDTH],
+ }
+ const ALIGNED_BYTES: AlignedBytes = AlignedBytes {
+ _align: [],
+ bytes: [EMPTY; Group::WIDTH],
+ };
+ &ALIGNED_BYTES.bytes
+ }
+
+ /// Loads a group of bytes starting at the given address.
+ #[inline]
+ #[allow(clippy::cast_ptr_alignment)] // unaligned load
+ pub(crate) unsafe fn load(ptr: *const u8) -> Self {
+ Group(neon::vld1_u8(ptr))
+ }
+
+ /// Loads a group of bytes starting at the given address, which must be
+ /// aligned to `mem::align_of::<Group>()`.
+ #[inline]
+ #[allow(clippy::cast_ptr_alignment)]
+ pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self {
+ // FIXME: use align_offset once it stabilizes
+ debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
+ Group(neon::vld1_u8(ptr))
+ }
+
+ /// Stores the group of bytes to the given address, which must be
+ /// aligned to `mem::align_of::<Group>()`.
+ #[inline]
+ #[allow(clippy::cast_ptr_alignment)]
+ pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) {
+ // FIXME: use align_offset once it stabilizes
+ debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
+ neon::vst1_u8(ptr, self.0);
+ }
+
+ /// Returns a `BitMask` indicating all bytes in the group which *may*
+ /// have the given value.
+ #[inline]
+ pub(crate) fn match_byte(self, byte: u8) -> BitMask {
+ unsafe {
+ let cmp = neon::vceq_u8(self.0, neon::vdup_n_u8(byte));
+ BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0))
+ }
+ }
+
+ /// Returns a `BitMask` indicating all bytes in the group which are
+ /// `EMPTY`.
+ #[inline]
+ pub(crate) fn match_empty(self) -> BitMask {
+ self.match_byte(EMPTY)
+ }
+
+ /// Returns a `BitMask` indicating all bytes in the group which are
+ /// `EMPTY` or `DELETED`.
+ #[inline]
+ pub(crate) fn match_empty_or_deleted(self) -> BitMask {
+ unsafe {
+ let cmp = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0));
+ BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0))
+ }
+ }
+
+ /// Returns a `BitMask` indicating all bytes in the group which are full.
+ #[inline]
+ pub(crate) fn match_full(self) -> BitMask {
+ unsafe {
+ let cmp = neon::vcgez_s8(neon::vreinterpret_s8_u8(self.0));
+ BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0))
+ }
+ }
+
+ /// Performs the following transformation on all bytes in the group:
+ /// - `EMPTY => EMPTY`
+ /// - `DELETED => EMPTY`
+ /// - `FULL => DELETED`
+ #[inline]
+ pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
+ // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
+ // and high_bit = 0 (FULL) to 1000_0000
+ //
+ // Here's this logic expanded to concrete values:
+ // let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false)
+ // 1111_1111 | 1000_0000 = 1111_1111
+ // 0000_0000 | 1000_0000 = 1000_0000
+ unsafe {
+ let special = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0));
+ Group(neon::vorr_u8(special, neon::vdup_n_u8(0x80)))
+ }
+ }
+}
diff --git a/vendor/hashbrown/src/raw/sse2.rs b/vendor/hashbrown/src/raw/sse2.rs
index a0bf6da80..956ba5d26 100644
--- a/vendor/hashbrown/src/raw/sse2.rs
+++ b/vendor/hashbrown/src/raw/sse2.rs
@@ -1,28 +1,31 @@
use super::bitmask::BitMask;
use super::EMPTY;
use core::mem;
+use core::num::NonZeroU16;
#[cfg(target_arch = "x86")]
use core::arch::x86;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64 as x86;
-pub type BitMaskWord = u16;
-pub const BITMASK_STRIDE: usize = 1;
-pub const BITMASK_MASK: BitMaskWord = 0xffff;
+pub(crate) type BitMaskWord = u16;
+pub(crate) type NonZeroBitMaskWord = NonZeroU16;
+pub(crate) const BITMASK_STRIDE: usize = 1;
+pub(crate) const BITMASK_MASK: BitMaskWord = 0xffff;
+pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0;
/// Abstraction over a group of control bytes which can be scanned in
/// parallel.
///
/// This implementation uses a 128-bit SSE value.
#[derive(Copy, Clone)]
-pub struct Group(x86::__m128i);
+pub(crate) struct Group(x86::__m128i);
// FIXME: https://github.com/rust-lang/rust-clippy/issues/3859
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
- pub const WIDTH: usize = mem::size_of::<Self>();
+ pub(crate) const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty bytes, suitable for use as the initial
/// value for an empty hash table.
@@ -30,7 +33,7 @@ impl Group {
/// This is guaranteed to be aligned to the group size.
#[inline]
#[allow(clippy::items_after_statements)]
- pub const fn static_empty() -> &'static [u8; Group::WIDTH] {
+ pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] {
#[repr(C)]
struct AlignedBytes {
_align: [Group; 0],
@@ -46,7 +49,7 @@ impl Group {
/// Loads a group of bytes starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
- pub unsafe fn load(ptr: *const u8) -> Self {
+ pub(crate) unsafe fn load(ptr: *const u8) -> Self {
Group(x86::_mm_loadu_si128(ptr.cast()))
}
@@ -54,7 +57,7 @@ impl Group {
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
- pub unsafe fn load_aligned(ptr: *const u8) -> Self {
+ pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
Group(x86::_mm_load_si128(ptr.cast()))
@@ -64,7 +67,7 @@ impl Group {
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
- pub unsafe fn store_aligned(self, ptr: *mut u8) {
+ pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
x86::_mm_store_si128(ptr.cast(), self.0);
@@ -73,7 +76,7 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which have
/// the given value.
#[inline]
- pub fn match_byte(self, byte: u8) -> BitMask {
+ pub(crate) fn match_byte(self, byte: u8) -> BitMask {
#[allow(
clippy::cast_possible_wrap, // byte: u8 as i8
// byte: i32 as u16
@@ -91,14 +94,14 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY`.
#[inline]
- pub fn match_empty(self) -> BitMask {
+ pub(crate) fn match_empty(self) -> BitMask {
self.match_byte(EMPTY)
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
- pub fn match_empty_or_deleted(self) -> BitMask {
+ pub(crate) fn match_empty_or_deleted(self) -> BitMask {
#[allow(
// byte: i32 as u16
// note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the
@@ -114,7 +117,7 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which are full.
#[inline]
- pub fn match_full(&self) -> BitMask {
+ pub(crate) fn match_full(&self) -> BitMask {
self.match_empty_or_deleted().invert()
}
@@ -123,7 +126,7 @@ impl Group {
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
- pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
+ pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
diff --git a/vendor/hashbrown/src/rustc_entry.rs b/vendor/hashbrown/src/rustc_entry.rs
index 2e8459526..89447d27d 100644
--- a/vendor/hashbrown/src/rustc_entry.rs
+++ b/vendor/hashbrown/src/rustc_entry.rs
@@ -1,5 +1,5 @@
use self::RustcEntry::*;
-use crate::map::{make_insert_hash, Drain, HashMap, IntoIter, Iter, IterMut};
+use crate::map::{make_hash, Drain, HashMap, IntoIter, Iter, IterMut};
use crate::raw::{Allocator, Bucket, Global, RawTable};
use core::fmt::{self, Debug};
use core::hash::{BuildHasher, Hash};
@@ -32,7 +32,7 @@ where
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_entry(&mut self, key: K) -> RustcEntry<'_, K, V, A> {
- let hash = make_insert_hash(&self.hash_builder, &key);
+ let hash = make_hash(&self.hash_builder, &key);
if let Some(elem) = self.table.find(hash, |q| q.0.eq(&key)) {
RustcEntry::Occupied(RustcOccupiedEntry {
key: Some(key),
@@ -330,7 +330,7 @@ impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> {
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
- unsafe { self.table.remove(self.elem) }
+ unsafe { self.table.remove(self.elem).0 }
}
/// Gets a reference to the value in the entry.
diff --git a/vendor/hashbrown/src/set.rs b/vendor/hashbrown/src/set.rs
index a8f24de80..52f6fdaf2 100644
--- a/vendor/hashbrown/src/set.rs
+++ b/vendor/hashbrown/src/set.rs
@@ -5,10 +5,9 @@ use alloc::borrow::ToOwned;
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::iter::{Chain, FromIterator, FusedIterator};
-use core::mem;
use core::ops::{BitAnd, BitOr, BitXor, Sub};
-use super::map::{self, ConsumeAllOnDrop, DefaultHashBuilder, DrainFilterInner, HashMap, Keys};
+use super::map::{self, DefaultHashBuilder, ExtractIfInner, HashMap, Keys};
use crate::raw::{Allocator, Global};
// Future Optimization (FIXME!)
@@ -380,8 +379,9 @@ impl<T, S, A: Allocator + Clone> HashSet<T, S, A> {
/// In other words, move all elements `e` such that `f(&e)` returns `true` out
/// into another iterator.
///
- /// When the returned DrainedFilter is dropped, any remaining elements that satisfy
- /// the predicate are dropped from the set.
+ /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating
+ /// or the iteration short-circuits, then the remaining elements will be retained.
+ /// Use [`retain()`] with a negated predicate if you do not need the returned iterator.
///
/// # Examples
///
@@ -389,7 +389,7 @@ impl<T, S, A: Allocator + Clone> HashSet<T, S, A> {
/// use hashbrown::HashSet;
///
/// let mut set: HashSet<i32> = (0..8).collect();
- /// let drained: HashSet<i32> = set.drain_filter(|v| v % 2 == 0).collect();
+ /// let drained: HashSet<i32> = set.extract_if(|v| v % 2 == 0).collect();
///
/// let mut evens = drained.into_iter().collect::<Vec<_>>();
/// let mut odds = set.into_iter().collect::<Vec<_>>();
@@ -400,13 +400,13 @@ impl<T, S, A: Allocator + Clone> HashSet<T, S, A> {
/// assert_eq!(odds, vec![1, 3, 5, 7]);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
- pub fn drain_filter<F>(&mut self, f: F) -> DrainFilter<'_, T, F, A>
+ pub fn extract_if<F>(&mut self, f: F) -> ExtractIf<'_, T, F, A>
where
F: FnMut(&T) -> bool,
{
- DrainFilter {
+ ExtractIf {
f,
- inner: DrainFilterInner {
+ inner: ExtractIfInner {
iter: unsafe { self.map.table.iter() },
table: &mut self.map.table,
},
@@ -1221,8 +1221,10 @@ where
None => None,
}
}
+}
- /// Returns a mutable reference to the [`RawTable`] used underneath [`HashSet`].
+impl<T, S, A: Allocator + Clone> HashSet<T, S, A> {
+ /// Returns a reference to the [`RawTable`] used underneath [`HashSet`].
/// This function is only available if the `raw` feature of the crate is enabled.
///
/// # Note
@@ -1238,9 +1240,29 @@ where
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[cfg(feature = "raw")]
#[cfg_attr(feature = "inline-more", inline)]
- pub fn raw_table(&mut self) -> &mut RawTable<(T, ()), A> {
+ pub fn raw_table(&self) -> &RawTable<(T, ()), A> {
self.map.raw_table()
}
+
+ /// Returns a mutable reference to the [`RawTable`] used underneath [`HashSet`].
+ /// This function is only available if the `raw` feature of the crate is enabled.
+ ///
+ /// # Note
+ ///
+ /// Calling this function is safe, but using the raw hash table API may require
+ /// unsafe functions or blocks.
+ ///
+ /// `RawTable` API gives the lowest level of control under the set that can be useful
+ /// for extending the HashSet's API, but may lead to *[undefined behavior]*.
+ ///
+ /// [`HashSet`]: struct.HashSet.html
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[cfg(feature = "raw")]
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn raw_table_mut(&mut self) -> &mut RawTable<(T, ()), A> {
+ self.map.raw_table_mut()
+ }
}
impl<T, S, A> PartialEq for HashSet<T, S, A>
@@ -1547,17 +1569,18 @@ pub struct Drain<'a, K, A: Allocator + Clone = Global> {
/// A draining iterator over entries of a `HashSet` which don't satisfy the predicate `f`.
///
-/// This `struct` is created by the [`drain_filter`] method on [`HashSet`]. See its
+/// This `struct` is created by the [`extract_if`] method on [`HashSet`]. See its
/// documentation for more.
///
-/// [`drain_filter`]: struct.HashSet.html#method.drain_filter
+/// [`extract_if`]: struct.HashSet.html#method.extract_if
/// [`HashSet`]: struct.HashSet.html
-pub struct DrainFilter<'a, K, F, A: Allocator + Clone = Global>
+#[must_use = "Iterators are lazy unless consumed"]
+pub struct ExtractIf<'a, K, F, A: Allocator + Clone = Global>
where
F: FnMut(&K) -> bool,
{
f: F,
- inner: DrainFilterInner<'a, K, (), A>,
+ inner: ExtractIfInner<'a, K, (), A>,
}
/// A lazy iterator producing elements in the intersection of `HashSet`s.
@@ -1748,21 +1771,7 @@ impl<K: fmt::Debug, A: Allocator + Clone> fmt::Debug for Drain<'_, K, A> {
}
}
-impl<'a, K, F, A: Allocator + Clone> Drop for DrainFilter<'a, K, F, A>
-where
- F: FnMut(&K) -> bool,
-{
- #[cfg_attr(feature = "inline-more", inline)]
- fn drop(&mut self) {
- while let Some(item) = self.next() {
- let guard = ConsumeAllOnDrop(self);
- drop(item);
- mem::forget(guard);
- }
- }
-}
-
-impl<K, F, A: Allocator + Clone> Iterator for DrainFilter<'_, K, F, A>
+impl<K, F, A: Allocator + Clone> Iterator for ExtractIf<'_, K, F, A>
where
F: FnMut(&K) -> bool,
{
@@ -1781,10 +1790,7 @@ where
}
}
-impl<K, F, A: Allocator + Clone> FusedIterator for DrainFilter<'_, K, F, A> where
- F: FnMut(&K) -> bool
-{
-}
+impl<K, F, A: Allocator + Clone> FusedIterator for ExtractIf<'_, K, F, A> where F: FnMut(&K) -> bool {}
impl<T, S, A: Allocator + Clone> Clone for Intersection<'_, T, S, A> {
#[cfg_attr(feature = "inline-more", inline)]
@@ -2712,10 +2718,10 @@ mod test_set {
set.insert(1);
set.insert(2);
- let set_str = format!("{:?}", set);
+ let set_str = format!("{set:?}");
assert!(set_str == "{1, 2}" || set_str == "{2, 1}");
- assert_eq!(format!("{:?}", empty), "{}");
+ assert_eq!(format!("{empty:?}"), "{}");
}
#[test]
@@ -2790,6 +2796,7 @@ mod test_set {
}
#[test]
+ #[allow(clippy::needless_borrow)]
fn test_extend_ref() {
let mut a = HashSet::new();
a.insert(1);
@@ -2829,10 +2836,10 @@ mod test_set {
}
#[test]
- fn test_drain_filter() {
+ fn test_extract_if() {
{
let mut set: HashSet<i32> = (0..8).collect();
- let drained = set.drain_filter(|&k| k % 2 == 0);
+ let drained = set.extract_if(|&k| k % 2 == 0);
let mut out = drained.collect::<Vec<_>>();
out.sort_unstable();
assert_eq!(vec![0, 2, 4, 6], out);
@@ -2840,7 +2847,7 @@ mod test_set {
}
{
let mut set: HashSet<i32> = (0..8).collect();
- drop(set.drain_filter(|&k| k % 2 == 0));
+ set.extract_if(|&k| k % 2 == 0).for_each(drop);
assert_eq!(set.len(), 4, "Removes non-matching items on drop");
}
}
@@ -2886,4 +2893,11 @@ mod test_set {
set.insert(i);
}
}
+
+ #[test]
+ fn collect() {
+ // At the time of writing, this hits the ZST case in from_base_index
+ // (and without the `map`, it does not).
+ let mut _set: HashSet<_> = (0..3).map(|_| ()).collect();
+ }
}