summaryrefslogtreecommitdiffstats
path: root/vendor/hashbrown/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:19:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:19:03 +0000
commit64d98f8ee037282c35007b64c2649055c56af1db (patch)
tree5492bcf97fce41ee1c0b1cc2add283f3e66cdab0 /vendor/hashbrown/src
parentAdding debian version 1.67.1+dfsg1-1. (diff)
downloadrustc-64d98f8ee037282c35007b64c2649055c56af1db.tar.xz
rustc-64d98f8ee037282c35007b64c2649055c56af1db.zip
Merging upstream version 1.68.2+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/hashbrown/src')
-rw-r--r--vendor/hashbrown/src/lib.rs33
-rw-r--r--vendor/hashbrown/src/map.rs396
-rw-r--r--vendor/hashbrown/src/raw/generic.rs2
-rw-r--r--vendor/hashbrown/src/raw/mod.rs160
-rw-r--r--vendor/hashbrown/src/scopeguard.rs14
-rw-r--r--vendor/hashbrown/src/set.rs179
6 files changed, 531 insertions, 253 deletions
diff --git a/vendor/hashbrown/src/lib.rs b/vendor/hashbrown/src/lib.rs
index bc1c97130..e43165dd6 100644
--- a/vendor/hashbrown/src/lib.rs
+++ b/vendor/hashbrown/src/lib.rs
@@ -117,6 +117,39 @@ pub mod hash_set {
pub use crate::map::HashMap;
pub use crate::set::HashSet;
+/// Key equivalence trait.
+///
+/// This trait defines the function used to compare the input value with the
+/// map keys (or set values) during a lookup operation such as [`HashMap::get`]
+/// or [`HashSet::contains`].
+/// It is provided with a blanket implementation based on the
+/// [`Borrow`](core::borrow::Borrow) trait.
+///
+/// # Correctness
+///
+/// Equivalent values must hash to the same value.
+pub trait Equivalent<K: ?Sized> {
+ /// Checks if this value is equivalent to the given key.
+ ///
+ /// Returns `true` if both values are equivalent, and `false` otherwise.
+ ///
+ /// # Correctness
+ ///
+ /// When this function returns `true`, both `self` and `key` must hash to
+ /// the same value.
+ fn equivalent(&self, key: &K) -> bool;
+}
+
+impl<Q: ?Sized, K: ?Sized> Equivalent<K> for Q
+where
+ Q: Eq,
+ K: core::borrow::Borrow<Q>,
+{
+ fn equivalent(&self, key: &K) -> bool {
+ self == key.borrow()
+ }
+}
+
/// The error type for `try_reserve` methods.
#[derive(Clone, PartialEq, Eq, Debug)]
pub enum TryReserveError {
diff --git a/vendor/hashbrown/src/map.rs b/vendor/hashbrown/src/map.rs
index a5d3ccb97..5049aa2b5 100644
--- a/vendor/hashbrown/src/map.rs
+++ b/vendor/hashbrown/src/map.rs
@@ -1,5 +1,5 @@
use crate::raw::{Allocator, Bucket, Global, RawDrain, RawIntoIter, RawIter, RawTable};
-use crate::TryReserveError;
+use crate::{Equivalent, TryReserveError};
use core::borrow::Borrow;
use core::fmt::{self, Debug};
use core::hash::{BuildHasher, Hash};
@@ -10,7 +10,7 @@ use core::ops::Index;
/// Default hasher for `HashMap`.
#[cfg(feature = "ahash")]
-pub type DefaultHashBuilder = ahash::RandomState;
+pub type DefaultHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
/// Dummy default hasher for `HashMap`.
#[cfg(not(feature = "ahash"))]
@@ -209,13 +209,12 @@ impl<K: Clone, V: Clone, S: Clone, A: Allocator + Clone> Clone for HashMap<K, V,
/// Ensures that a single closure type across uses of this which, in turn prevents multiple
/// instances of any functions like RawTable::reserve from being generated
#[cfg_attr(feature = "inline-more", inline)]
-pub(crate) fn make_hasher<K, Q, V, S>(hash_builder: &S) -> impl Fn(&(Q, V)) -> u64 + '_
+pub(crate) fn make_hasher<Q, V, S>(hash_builder: &S) -> impl Fn(&(Q, V)) -> u64 + '_
where
- K: Borrow<Q>,
Q: Hash,
S: BuildHasher,
{
- move |val| make_hash::<K, Q, S>(hash_builder, &val.0)
+ move |val| make_hash::<Q, S>(hash_builder, &val.0)
}
/// Ensures that a single closure type across uses of this which, in turn prevents multiple
@@ -223,10 +222,9 @@ where
#[cfg_attr(feature = "inline-more", inline)]
fn equivalent_key<Q, K, V>(k: &Q) -> impl Fn(&(K, V)) -> bool + '_
where
- K: Borrow<Q>,
- Q: ?Sized + Eq,
+ Q: ?Sized + Equivalent<K>,
{
- move |x| k.eq(x.0.borrow())
+ move |x| k.equivalent(&x.0)
}
/// Ensures that a single closure type across uses of this which, in turn prevents multiple
@@ -234,17 +232,15 @@ where
#[cfg_attr(feature = "inline-more", inline)]
fn equivalent<Q, K>(k: &Q) -> impl Fn(&K) -> bool + '_
where
- K: Borrow<Q>,
- Q: ?Sized + Eq,
+ Q: ?Sized + Equivalent<K>,
{
- move |x| k.eq(x.borrow())
+ move |x| k.equivalent(x)
}
#[cfg(not(feature = "nightly"))]
#[cfg_attr(feature = "inline-more", inline)]
-pub(crate) fn make_hash<K, Q, S>(hash_builder: &S, val: &Q) -> u64
+pub(crate) fn make_hash<Q, S>(hash_builder: &S, val: &Q) -> u64
where
- K: Borrow<Q>,
Q: Hash + ?Sized,
S: BuildHasher,
{
@@ -256,9 +252,8 @@ where
#[cfg(feature = "nightly")]
#[cfg_attr(feature = "inline-more", inline)]
-pub(crate) fn make_hash<K, Q, S>(hash_builder: &S, val: &Q) -> u64
+pub(crate) fn make_hash<Q, S>(hash_builder: &S, val: &Q) -> u64
where
- K: Borrow<Q>,
Q: Hash + ?Sized,
S: BuildHasher,
{
@@ -295,6 +290,18 @@ impl<K, V> HashMap<K, V, DefaultHashBuilder> {
/// The hash map is initially created with a capacity of 0, so it will not allocate until it
/// is first inserted into.
///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`], for example with
+ /// [`with_hasher`](HashMap::with_hasher) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
/// # Examples
///
/// ```
@@ -313,6 +320,18 @@ impl<K, V> HashMap<K, V, DefaultHashBuilder> {
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`], for example with
+ /// [`with_capacity_and_hasher`](HashMap::with_capacity_and_hasher) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
/// # Examples
///
/// ```
@@ -333,6 +352,48 @@ impl<K, V, A: Allocator + Clone> HashMap<K, V, DefaultHashBuilder, A> {
///
/// The hash map is initially created with a capacity of 0, so it will not allocate until it
/// is first inserted into.
+ ///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`], for example with
+ /// [`with_hasher_in`](HashMap::with_hasher_in) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "bumpalo")]
+ /// # fn test() {
+ /// use hashbrown::{HashMap, BumpWrapper};
+ /// use bumpalo::Bump;
+ ///
+ /// let bump = Bump::new();
+ /// let mut map = HashMap::new_in(BumpWrapper(&bump));
+ ///
+ /// // The created HashMap holds none elements
+ /// assert_eq!(map.len(), 0);
+ ///
+ /// // The created HashMap also doesn't allocate memory
+ /// assert_eq!(map.capacity(), 0);
+ ///
+ /// // Now we insert element inside created HashMap
+ /// map.insert("One", 1);
+ /// // We can see that the HashMap holds 1 element
+ /// assert_eq!(map.len(), 1);
+ /// // And it also allocates some capacity
+ /// assert!(map.capacity() > 1);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "bumpalo")]
+ /// # test()
+ /// # }
+ /// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn new_in(alloc: A) -> Self {
Self::with_hasher_in(DefaultHashBuilder::default(), alloc)
@@ -342,6 +403,53 @@ impl<K, V, A: Allocator + Clone> HashMap<K, V, DefaultHashBuilder, A> {
///
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
+ ///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`], for example with
+ /// [`with_capacity_and_hasher_in`](HashMap::with_capacity_and_hasher_in) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "bumpalo")]
+ /// # fn test() {
+ /// use hashbrown::{HashMap, BumpWrapper};
+ /// use bumpalo::Bump;
+ ///
+ /// let bump = Bump::new();
+ /// let mut map = HashMap::with_capacity_in(5, BumpWrapper(&bump));
+ ///
+ /// // The created HashMap holds none elements
+ /// assert_eq!(map.len(), 0);
+ /// // But it can hold at least 5 elements without reallocating
+ /// let empty_map_capacity = map.capacity();
+ /// assert!(empty_map_capacity >= 5);
+ ///
+ /// // Now we insert some 5 elements inside created HashMap
+ /// map.insert("One", 1);
+ /// map.insert("Two", 2);
+ /// map.insert("Three", 3);
+ /// map.insert("Four", 4);
+ /// map.insert("Five", 5);
+ ///
+ /// // We can see that the HashMap holds 5 elements
+ /// assert_eq!(map.len(), 5);
+ /// // But its capacity isn't changed
+ /// assert_eq!(map.capacity(), empty_map_capacity)
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "bumpalo")]
+ /// # test()
+ /// # }
+ /// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Self::with_capacity_and_hasher_in(capacity, DefaultHashBuilder::default(), alloc)
@@ -355,14 +463,21 @@ impl<K, V, S> HashMap<K, V, S> {
/// The hash map is initially created with a capacity of 0, so it will not
/// allocate until it is first inserted into.
///
- /// Warning: `hash_builder` is normally randomly generated, and
- /// is designed to allow HashMaps to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`].
///
/// The `hash_builder` passed should implement the [`BuildHasher`] trait for
/// the HashMap to be useful, see its documentation for details.
///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
+ ///
/// # Examples
///
/// ```
@@ -376,8 +491,6 @@ impl<K, V, S> HashMap<K, V, S> {
///
/// map.insert(1, 2);
/// ```
- ///
- /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
#[cfg_attr(feature = "inline-more", inline)]
pub const fn with_hasher(hash_builder: S) -> Self {
Self {
@@ -392,14 +505,21 @@ impl<K, V, S> HashMap<K, V, S> {
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
///
- /// Warning: `hash_builder` is normally randomly generated, and
- /// is designed to allow HashMaps to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`].
///
/// The `hash_builder` passed should implement the [`BuildHasher`] trait for
/// the HashMap to be useful, see its documentation for details.
///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
+ ///
/// # Examples
///
/// ```
@@ -413,8 +533,6 @@ impl<K, V, S> HashMap<K, V, S> {
///
/// map.insert(1, 2);
/// ```
- ///
- /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
#[cfg_attr(feature = "inline-more", inline)]
pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self {
Self {
@@ -434,12 +552,19 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// Creates an empty `HashMap` which will use the given hash builder to hash
/// keys. It will be allocated with the given allocator.
///
- /// The created map has the default initial capacity.
+ /// The hash map is initially created with a capacity of 0, so it will not allocate until it
+ /// is first inserted into.
+ ///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`].
///
- /// Warning: `hash_builder` is normally randomly generated, and
- /// is designed to allow HashMaps to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
///
/// # Examples
///
@@ -452,7 +577,7 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// map.insert(1, 2);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
- pub fn with_hasher_in(hash_builder: S, alloc: A) -> Self {
+ pub const fn with_hasher_in(hash_builder: S, alloc: A) -> Self {
Self {
hash_builder,
table: RawTable::new_in(alloc),
@@ -465,10 +590,16 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
///
- /// Warning: `hash_builder` is normally randomly generated, and
- /// is designed to allow HashMaps to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`].
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
///
/// # Examples
///
@@ -810,14 +941,11 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
///
/// let mut map: HashMap<i32, i32> = (0..8).map(|x|(x, x*10)).collect();
/// assert_eq!(map.len(), 8);
- /// let capacity_before_retain = map.capacity();
///
/// map.retain(|&k, _| k % 2 == 0);
///
/// // We can see, that the number of elements inside map is changed.
/// assert_eq!(map.len(), 4);
- /// // But map capacity is equal to old one.
- /// assert_eq!(map.capacity(), capacity_before_retain);
///
/// let mut vec: Vec<(i32, i32)> = map.iter().map(|(&k, &v)| (k, v)).collect();
/// vec.sort_unstable();
@@ -862,7 +990,7 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<i32, i32> = (0..8).map(|x| (x, x)).collect();
- /// let capacity_before_drain_filter = map.capacity();
+ ///
/// let drained: HashMap<i32, i32> = map.drain_filter(|k, _v| k % 2 == 0).collect();
///
/// let mut evens = drained.keys().cloned().collect::<Vec<_>>();
@@ -872,8 +1000,6 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
///
/// assert_eq!(evens, vec![0, 2, 4, 6]);
/// assert_eq!(odds, vec![1, 3, 5, 7]);
- /// // Map capacity is equal to old one.
- /// assert_eq!(map.capacity(), capacity_before_drain_filter);
///
/// let mut map: HashMap<i32, i32> = (0..8).map(|x| (x, x)).collect();
///
@@ -992,9 +1118,12 @@ where
///
/// # Panics
///
- /// Panics if the new allocation size overflows [`usize`].
+ /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
+ /// in case of allocation error. Use [`try_reserve`](HashMap::try_reserve) instead
+ /// if you want to handle memory allocation failure.
///
- /// [`usize`]: https://doc.rust-lang.org/std/primitive.usize.html
+ /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html
+ /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
///
/// # Examples
///
@@ -1012,7 +1141,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn reserve(&mut self, additional: usize) {
self.table
- .reserve(additional, make_hasher::<K, _, V, S>(&self.hash_builder));
+ .reserve(additional, make_hasher::<_, V, S>(&self.hash_builder));
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
@@ -1062,7 +1191,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
self.table
- .try_reserve(additional, make_hasher::<K, _, V, S>(&self.hash_builder))
+ .try_reserve(additional, make_hasher::<_, V, S>(&self.hash_builder))
}
/// Shrinks the capacity of the map as much as possible. It will drop
@@ -1084,7 +1213,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn shrink_to_fit(&mut self) {
self.table
- .shrink_to(0, make_hasher::<K, _, V, S>(&self.hash_builder));
+ .shrink_to(0, make_hasher::<_, V, S>(&self.hash_builder));
}
/// Shrinks the capacity of the map with a lower limit. It will drop
@@ -1113,7 +1242,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.table
- .shrink_to(min_capacity, make_hasher::<K, _, V, S>(&self.hash_builder));
+ .shrink_to(min_capacity, make_hasher::<_, V, S>(&self.hash_builder));
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
@@ -1174,10 +1303,9 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn entry_ref<'a, 'b, Q: ?Sized>(&'a mut self, key: &'b Q) -> EntryRef<'a, 'b, K, Q, V, S, A>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
- let hash = make_hash::<K, Q, S>(&self.hash_builder, key);
+ let hash = make_hash::<Q, S>(&self.hash_builder, key);
if let Some(elem) = self.table.find(hash, equivalent_key(key)) {
EntryRef::Occupied(OccupiedEntryRef {
hash,
@@ -1216,8 +1344,7 @@ where
#[inline]
pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.get_inner(k) {
@@ -1248,8 +1375,7 @@ where
#[inline]
pub fn get_key_value<Q: ?Sized>(&self, k: &Q) -> Option<(&K, &V)>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.get_inner(k) {
@@ -1261,13 +1387,12 @@ where
#[inline]
fn get_inner<Q: ?Sized>(&self, k: &Q) -> Option<&(K, V)>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
if self.table.is_empty() {
None
} else {
- let hash = make_hash::<K, Q, S>(&self.hash_builder, k);
+ let hash = make_hash::<Q, S>(&self.hash_builder, k);
self.table.get(hash, equivalent_key(k))
}
}
@@ -1298,8 +1423,7 @@ where
#[inline]
pub fn get_key_value_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<(&K, &mut V)>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.get_inner_mut(k) {
@@ -1330,8 +1454,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
self.get_inner(k).is_some()
}
@@ -1362,8 +1485,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.get_inner_mut(k) {
@@ -1375,13 +1497,12 @@ where
#[inline]
fn get_inner_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut (K, V)>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
if self.table.is_empty() {
None
} else {
- let hash = make_hash::<K, Q, S>(&self.hash_builder, k);
+ let hash = make_hash::<Q, S>(&self.hash_builder, k);
self.table.get_mut(hash, equivalent_key(k))
}
}
@@ -1431,8 +1552,7 @@ where
/// ```
pub fn get_many_mut<Q: ?Sized, const N: usize>(&mut self, ks: [&Q; N]) -> Option<[&'_ mut V; N]>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
self.get_many_mut_inner(ks).map(|res| res.map(|(_, v)| v))
}
@@ -1487,8 +1607,7 @@ where
ks: [&Q; N],
) -> Option<[&'_ mut V; N]>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
self.get_many_unchecked_mut_inner(ks)
.map(|res| res.map(|(_, v)| v))
@@ -1543,8 +1662,7 @@ where
ks: [&Q; N],
) -> Option<[(&'_ K, &'_ mut V); N]>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
self.get_many_mut_inner(ks)
.map(|res| res.map(|(k, v)| (&*k, v)))
@@ -1599,8 +1717,7 @@ where
ks: [&Q; N],
) -> Option<[(&'_ K, &'_ mut V); N]>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
self.get_many_unchecked_mut_inner(ks)
.map(|res| res.map(|(k, v)| (&*k, v)))
@@ -1611,12 +1728,11 @@ where
ks: [&Q; N],
) -> Option<[&'_ mut (K, V); N]>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
let hashes = self.build_hashes_inner(ks);
self.table
- .get_many_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow()))
+ .get_many_mut(hashes, |i, (k, _)| ks[i].equivalent(k))
}
unsafe fn get_many_unchecked_mut_inner<Q: ?Sized, const N: usize>(
@@ -1624,22 +1740,20 @@ where
ks: [&Q; N],
) -> Option<[&'_ mut (K, V); N]>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
let hashes = self.build_hashes_inner(ks);
self.table
- .get_many_unchecked_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow()))
+ .get_many_unchecked_mut(hashes, |i, (k, _)| ks[i].equivalent(k))
}
fn build_hashes_inner<Q: ?Sized, const N: usize>(&self, ks: [&Q; N]) -> [u64; N]
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
let mut hashes = [0_u64; N];
for i in 0..N {
- hashes[i] = make_hash::<K, Q, S>(&self.hash_builder, ks[i]);
+ hashes[i] = make_hash::<Q, S>(&self.hash_builder, ks[i]);
}
hashes
}
@@ -1677,7 +1791,7 @@ where
Some(mem::replace(item, v))
} else {
self.table
- .insert(hash, (k, v), make_hasher::<K, _, V, S>(&self.hash_builder));
+ .insert(hash, (k, v), make_hasher::<_, V, S>(&self.hash_builder));
None
}
}
@@ -1736,7 +1850,7 @@ where
let hash = make_insert_hash::<K, S>(&self.hash_builder, &k);
let bucket = self
.table
- .insert(hash, (k, v), make_hasher::<K, _, V, S>(&self.hash_builder));
+ .insert(hash, (k, v), make_hasher::<_, V, S>(&self.hash_builder));
let (k_ref, v_ref) = unsafe { bucket.as_mut() };
(k_ref, v_ref)
}
@@ -1801,19 +1915,17 @@ where
/// assert!(map.is_empty() && map.capacity() == 0);
///
/// map.insert(1, "a");
- /// let capacity_before_remove = map.capacity();
///
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
///
- /// // Now map holds none elements but capacity is equal to the old one
- /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove);
+ /// // Now map holds none elements
+ /// assert!(map.is_empty());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.remove_entry(k) {
@@ -1842,21 +1954,19 @@ where
/// assert!(map.is_empty() && map.capacity() == 0);
///
/// map.insert(1, "a");
- /// let capacity_before_remove = map.capacity();
///
/// assert_eq!(map.remove_entry(&1), Some((1, "a")));
/// assert_eq!(map.remove(&1), None);
///
- /// // Now map hold none elements but capacity is equal to the old one
- /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove);
+ /// // Now map hold none elements
+ /// assert!(map.is_empty());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry<Q: ?Sized>(&mut self, k: &Q) -> Option<(K, V)>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
- let hash = make_hash::<K, Q, S>(&self.hash_builder, k);
+ let hash = make_hash::<Q, S>(&self.hash_builder, k);
self.table.remove_entry(hash, equivalent_key(k))
}
}
@@ -2018,14 +2128,14 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
///
/// # Note
///
- /// Calling the function safe, but using raw hash table API's may require
+ /// Calling this function is safe, but using the raw hash table API may require
/// unsafe functions or blocks.
///
/// `RawTable` API gives the lowest level of control under the map that can be useful
/// for extending the HashMap's API, but may lead to *[undefined behavior]*.
///
/// [`HashMap`]: struct.HashMap.html
- /// [`RawTable`]: raw/struct.RawTable.html
+ /// [`RawTable`]: crate::raw::RawTable
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
@@ -2140,8 +2250,8 @@ where
impl<K, Q: ?Sized, V, S, A> Index<&Q> for HashMap<K, V, S, A>
where
- K: Eq + Hash + Borrow<Q>,
- Q: Eq + Hash,
+ K: Eq + Hash,
+ Q: Hash + Equivalent<K>,
S: BuildHasher,
A: Allocator + Clone,
{
@@ -3103,10 +3213,9 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> {
pub fn from_key<Q: ?Sized>(self, k: &Q) -> RawEntryMut<'a, K, V, S, A>
where
S: BuildHasher,
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
- let hash = make_hash::<K, Q, S>(&self.map.hash_builder, k);
+ let hash = make_hash::<Q, S>(&self.map.hash_builder, k);
self.from_key_hashed_nocheck(hash, k)
}
@@ -3136,8 +3245,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> {
#[allow(clippy::wrong_self_convention)]
pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S, A>
where
- K: Borrow<Q>,
- Q: Eq,
+ Q: Equivalent<K>,
{
self.from_hash(hash, equivalent(k))
}
@@ -3211,10 +3319,9 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> {
pub fn from_key<Q: ?Sized>(self, k: &Q) -> Option<(&'a K, &'a V)>
where
S: BuildHasher,
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
- let hash = make_hash::<K, Q, S>(&self.map.hash_builder, k);
+ let hash = make_hash::<Q, S>(&self.map.hash_builder, k);
self.from_key_hashed_nocheck(hash, k)
}
@@ -3242,8 +3349,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> {
#[allow(clippy::wrong_self_convention)]
pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)>
where
- K: Borrow<Q>,
- Q: Eq,
+ Q: Equivalent<K>,
{
self.from_hash(hash, equivalent(k))
}
@@ -3950,7 +4056,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> {
let &mut (ref mut k, ref mut v) = self.table.insert_entry(
hash,
(key, value),
- make_hasher::<K, _, V, S>(self.hash_builder),
+ make_hasher::<_, V, S>(self.hash_builder),
);
(k, v)
}
@@ -4018,7 +4124,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> {
let elem = self.table.insert(
hash,
(key, value),
- make_hasher::<K, _, V, S>(self.hash_builder),
+ make_hasher::<_, V, S>(self.hash_builder),
);
RawOccupiedEntryMut {
elem,
@@ -5183,7 +5289,6 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> {
/// assert!(map.is_empty() && map.capacity() == 0);
///
/// map.entry("poneyland").or_insert(12);
- /// let capacity_before_remove = map.capacity();
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// // We delete the entry from the map.
@@ -5191,8 +5296,8 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> {
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
- /// // Now map hold none elements but capacity is equal to the old one
- /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove);
+ /// // Now map hold none elements
+ /// assert!(map.is_empty());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
@@ -5319,15 +5424,14 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> {
/// assert!(map.is_empty() && map.capacity() == 0);
///
/// map.entry("poneyland").or_insert(12);
- /// let capacity_before_remove = map.capacity();
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// assert_eq!(o.remove(), 12);
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
- /// // Now map hold none elements but capacity is equal to the old one
- /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove);
+ /// // Now map hold none elements
+ /// assert!(map.is_empty());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove(self) -> V {
@@ -5567,7 +5671,7 @@ impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> {
let entry = table.insert_entry(
self.hash,
(self.key, value),
- make_hasher::<K, _, V, S>(&self.table.hash_builder),
+ make_hasher::<_, V, S>(&self.table.hash_builder),
);
&mut entry.1
}
@@ -5581,7 +5685,7 @@ impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> {
let elem = self.table.table.insert(
self.hash,
(self.key, value),
- make_hasher::<K, _, V, S>(&self.table.hash_builder),
+ make_hasher::<_, V, S>(&self.table.hash_builder),
);
OccupiedEntry {
hash: self.hash,
@@ -5682,10 +5786,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V,
/// Ensures a value is in the entry by inserting, if empty, the result of the default function.
/// This method allows for generating key-derived values for insertion by providing the default
- /// function a reference to the key that was moved during the `.entry_ref(key)` method call.
- ///
- /// The reference to the moved key is provided so that cloning or copying the key is
- /// unnecessary, unlike with `.or_insert_with(|| ... )`.
+ /// function an access to the borrower form of the key.
///
/// # Examples
///
@@ -5914,7 +6015,6 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
/// assert!(map.is_empty() && map.capacity() == 0);
///
/// map.entry_ref("poneyland").or_insert(12);
- /// let capacity_before_remove = map.capacity();
///
/// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") {
/// // We delete the entry from the map.
@@ -5923,7 +6023,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// // Now map hold none elements but capacity is equal to the old one
- /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove);
+ /// assert!(map.is_empty());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
@@ -6048,7 +6148,6 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
/// assert!(map.is_empty() && map.capacity() == 0);
///
/// map.entry_ref("poneyland").or_insert(12);
- /// let capacity_before_remove = map.capacity();
///
/// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") {
/// assert_eq!(o.remove(), 12);
@@ -6056,7 +6155,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// // Now map hold none elements but capacity is equal to the old one
- /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove);
+ /// assert!(map.is_empty());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove(self) -> V {
@@ -6068,7 +6167,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
///
/// # Panics
///
- /// Will panic if this OccupiedEntry was created through [`EntryRef::insert`].
+ /// Will panic if this OccupiedEntryRef was created through [`EntryRef::insert`].
///
/// # Examples
///
@@ -6110,7 +6209,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
///
/// # Panics
///
- /// Will panic if this OccupiedEntry was created through [`Entry::insert`].
+ /// Will panic if this OccupiedEntryRef was created through [`EntryRef::insert`].
///
/// # Examples
///
@@ -6138,7 +6237,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
/// fn reclaim_memory(map: &mut HashMap<Rc<str>, usize>, keys: &[Rc<str>]) {
/// for key in keys {
/// if let EntryRef::Occupied(entry) = map.entry_ref(key.as_ref()) {
- /// /// Replaces the entry's key with our version of it in `keys`.
+ /// // Replaces the entry's key with our version of it in `keys`.
/// entry.replace_key();
/// }
/// }
@@ -6305,7 +6404,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K,
let entry = table.insert_entry(
self.hash,
(self.key.into_owned(), value),
- make_hasher::<K, _, V, S>(&self.table.hash_builder),
+ make_hasher::<_, V, S>(&self.table.hash_builder),
);
&mut entry.1
}
@@ -6319,7 +6418,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K,
let elem = self.table.table.insert(
self.hash,
(self.key.into_owned(), value),
- make_hasher::<K, _, V, S>(&self.table.hash_builder),
+ make_hasher::<_, V, S>(&self.table.hash_builder),
);
OccupiedEntryRef {
hash: self.hash,
@@ -6455,17 +6554,17 @@ where
/// map.insert(1, 100);
///
/// let arr = [(1, 1), (2, 2)];
- /// let some_iter = arr.iter().map(|&(k, v)| (k, v));
+ /// let some_iter = arr.iter().map(|(k, v)| (k, v));
/// map.extend(some_iter);
/// // Replace values with existing keys with new values returned from the iterator.
/// // So that the map.get(&1) doesn't return Some(&100).
/// assert_eq!(map.get(&1), Some(&1));
///
/// let some_vec: Vec<_> = vec![(3, 3), (4, 4)];
- /// map.extend(some_vec.iter().map(|&(k, v)| (k, v)));
+ /// map.extend(some_vec.iter().map(|(k, v)| (k, v)));
///
/// let some_arr = [(5, 5), (6, 6)];
- /// map.extend(some_arr.iter().map(|&(k, v)| (k, v)));
+ /// map.extend(some_arr.iter().map(|(k, v)| (k, v)));
///
/// // You can also extend from another HashMap
/// let mut new_map = HashMap::new();
@@ -8070,27 +8169,32 @@ mod test_map {
fn test_try_reserve() {
use crate::TryReserveError::{AllocError, CapacityOverflow};
- const MAX_USIZE: usize = usize::MAX;
+ const MAX_ISIZE: usize = isize::MAX as usize;
let mut empty_bytes: HashMap<u8, u8> = HashMap::new();
- if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(usize::MAX) {
} else {
panic!("usize::MAX should trigger an overflow!");
}
- if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 16) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_ISIZE) {
+ } else {
+ panic!("isize::MAX should trigger an overflow!");
+ }
+
+ if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_ISIZE / 5) {
} else {
// This may succeed if there is enough free memory. Attempt to
// allocate a few more hashmaps to ensure the allocation will fail.
let mut empty_bytes2: HashMap<u8, u8> = HashMap::new();
- let _ = empty_bytes2.try_reserve(MAX_USIZE / 16);
+ let _ = empty_bytes2.try_reserve(MAX_ISIZE / 5);
let mut empty_bytes3: HashMap<u8, u8> = HashMap::new();
- let _ = empty_bytes3.try_reserve(MAX_USIZE / 16);
+ let _ = empty_bytes3.try_reserve(MAX_ISIZE / 5);
let mut empty_bytes4: HashMap<u8, u8> = HashMap::new();
- if let Err(AllocError { .. }) = empty_bytes4.try_reserve(MAX_USIZE / 16) {
+ if let Err(AllocError { .. }) = empty_bytes4.try_reserve(MAX_ISIZE / 5) {
} else {
- panic!("usize::MAX / 8 should trigger an OOM!");
+ panic!("isize::MAX / 5 should trigger an OOM!");
}
}
}
@@ -8280,7 +8384,7 @@ mod test_map {
let e = map.table.insert(
hash_value,
(i, 2 * i),
- super::make_hasher::<usize, _, usize, _>(&hash_builder),
+ super::make_hasher::<_, usize, _>(&hash_builder),
);
it.reflect_insert(&e);
if let Some(p) = removed.iter().position(|e| e == &(i, 2 * i)) {
diff --git a/vendor/hashbrown/src/raw/generic.rs b/vendor/hashbrown/src/raw/generic.rs
index b4d31e62c..52955a45b 100644
--- a/vendor/hashbrown/src/raw/generic.rs
+++ b/vendor/hashbrown/src/raw/generic.rs
@@ -13,7 +13,7 @@ use core::{mem, ptr};
))]
type GroupWord = u64;
#[cfg(all(
- target_pointer_width = "32",
+ any(target_pointer_width = "32", target_pointer_width = "16"),
not(target_arch = "aarch64"),
not(target_arch = "x86_64"),
not(target_arch = "wasm32"),
diff --git a/vendor/hashbrown/src/raw/mod.rs b/vendor/hashbrown/src/raw/mod.rs
index 211b818a5..625ca1d71 100644
--- a/vendor/hashbrown/src/raw/mod.rs
+++ b/vendor/hashbrown/src/raw/mod.rs
@@ -134,6 +134,13 @@ fn h1(hash: u64) -> usize {
hash as usize
}
+// Constant for h2 function that grabing the top 7 bits of the hash.
+const MIN_HASH_LEN: usize = if mem::size_of::<usize>() < mem::size_of::<u64>() {
+ mem::size_of::<usize>()
+} else {
+ mem::size_of::<u64>()
+};
+
/// Secondary hash function, saved in the low 7 bits of the control byte.
#[inline]
#[allow(clippy::cast_possible_truncation)]
@@ -141,8 +148,8 @@ fn h2(hash: u64) -> u8 {
// Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
// value, some hash functions (such as FxHash) produce a usize result
// instead, which means that the top 32 bits are 0 on 32-bit platforms.
- let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>());
- let top7 = hash >> (hash_len * 8 - 7);
+ // So we use MIN_HASH_LEN constant to handle this.
+ let top7 = hash >> (MIN_HASH_LEN * 8 - 7);
(top7 & 0x7f) as u8 // truncation
}
@@ -230,11 +237,15 @@ struct TableLayout {
impl TableLayout {
#[inline]
- fn new<T>() -> Self {
+ const fn new<T>() -> Self {
let layout = Layout::new::<T>();
Self {
size: layout.size(),
- ctrl_align: usize::max(layout.align(), Group::WIDTH),
+ ctrl_align: if layout.align() > Group::WIDTH {
+ layout.align()
+ } else {
+ Group::WIDTH
+ },
}
}
@@ -248,6 +259,12 @@ impl TableLayout {
size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
+ // We need an additional check to ensure that the allocation doesn't
+ // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
+ if len > isize::MAX as usize - (ctrl_align - 1) {
+ return None;
+ }
+
Some((
unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
ctrl_offset,
@@ -255,16 +272,6 @@ impl TableLayout {
}
}
-/// Returns a Layout which describes the allocation required for a hash table,
-/// and the offset of the control bytes in the allocation.
-/// (the offset is also one past last element of buckets)
-///
-/// Returns `None` if an overflow occurs.
-#[cfg_attr(feature = "inline-more", inline)]
-fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
- TableLayout::new::<T>().calculate_layout_for(buckets)
-}
-
/// A reference to a hash table bucket containing a `T`.
///
/// This is usually just a pointer to the element itself. However if the element
@@ -290,9 +297,11 @@ impl<T> Clone for Bucket<T> {
}
impl<T> Bucket<T> {
+ const IS_ZERO_SIZED_TYPE: bool = mem::size_of::<T>() == 0;
+
#[inline]
unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
- let ptr = if mem::size_of::<T>() == 0 {
+ let ptr = if Self::IS_ZERO_SIZED_TYPE {
// won't overflow because index must be less than length
(index + 1) as *mut T
} else {
@@ -304,7 +313,7 @@ impl<T> Bucket<T> {
}
#[inline]
unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
- if mem::size_of::<T>() == 0 {
+ if Self::IS_ZERO_SIZED_TYPE {
self.ptr.as_ptr() as usize - 1
} else {
offset_from(base.as_ptr(), self.ptr.as_ptr())
@@ -312,7 +321,7 @@ impl<T> Bucket<T> {
}
#[inline]
pub fn as_ptr(&self) -> *mut T {
- if mem::size_of::<T>() == 0 {
+ if Self::IS_ZERO_SIZED_TYPE {
// Just return an arbitrary ZST pointer which is properly aligned
mem::align_of::<T>() as *mut T
} else {
@@ -321,7 +330,7 @@ impl<T> Bucket<T> {
}
#[inline]
unsafe fn next_n(&self, offset: usize) -> Self {
- let ptr = if mem::size_of::<T>() == 0 {
+ let ptr = if Self::IS_ZERO_SIZED_TYPE {
(self.ptr.as_ptr() as usize + offset) as *mut T
} else {
self.ptr.as_ptr().sub(offset)
@@ -331,15 +340,15 @@ impl<T> Bucket<T> {
}
}
#[cfg_attr(feature = "inline-more", inline)]
- pub unsafe fn drop(&self) {
+ pub(crate) unsafe fn drop(&self) {
self.as_ptr().drop_in_place();
}
#[inline]
- pub unsafe fn read(&self) -> T {
+ pub(crate) unsafe fn read(&self) -> T {
self.as_ptr().read()
}
#[inline]
- pub unsafe fn write(&self, val: T) {
+ pub(crate) unsafe fn write(&self, val: T) {
self.as_ptr().write(val);
}
#[inline]
@@ -413,6 +422,9 @@ impl<T> RawTable<T, Global> {
}
impl<T, A: Allocator + Clone> RawTable<T, A> {
+ const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>();
+ const DATA_NEEDS_DROP: bool = mem::needs_drop::<T>();
+
/// Creates a new empty hash table without allocating any memory, using the
/// given allocator.
///
@@ -420,7 +432,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// leave the data pointer dangling since that bucket is never written to
/// due to our load factor forcing us to always have at least 1 free bucket.
#[inline]
- pub fn new_in(alloc: A) -> Self {
+ pub const fn new_in(alloc: A) -> Self {
Self {
table: RawTableInner::new_in(alloc),
marker: PhantomData,
@@ -441,7 +453,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
Ok(Self {
table: RawTableInner::new_uninitialized(
alloc,
- TableLayout::new::<T>(),
+ Self::TABLE_LAYOUT,
buckets,
fallibility,
)?,
@@ -459,7 +471,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
Ok(Self {
table: RawTableInner::fallible_with_capacity(
alloc,
- TableLayout::new::<T>(),
+ Self::TABLE_LAYOUT,
capacity,
fallibility,
)?,
@@ -493,7 +505,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Deallocates the table without dropping any entries.
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn free_buckets(&mut self) {
- self.table.free_buckets(TableLayout::new::<T>());
+ self.table.free_buckets(Self::TABLE_LAYOUT);
}
/// Returns pointer to one past last element of data table.
@@ -509,6 +521,19 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
self.data_end().as_ptr().wrapping_sub(self.buckets())
}
+ /// Return the information about memory allocated by the table.
+ ///
+ /// `RawTable` allocates single memory block to store both data and metadata.
+ /// This function returns allocation size and alignment and the beginning of the area.
+ /// These are the arguments which will be passed to `dealloc` when the table is dropped.
+ ///
+ /// This function might be useful for memory profiling.
+ #[inline]
+ #[cfg(feature = "raw")]
+ pub fn allocation_info(&self) -> (NonNull<u8>, Layout) {
+ self.table.allocation_info(Self::TABLE_LAYOUT)
+ }
+
/// Returns the index of a bucket from a `Bucket`.
#[inline]
pub unsafe fn bucket_index(&self, bucket: &Bucket<T>) -> usize {
@@ -525,8 +550,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Erases an element from the table without dropping it.
#[cfg_attr(feature = "inline-more", inline)]
- #[deprecated(since = "0.8.1", note = "use erase or remove instead")]
- pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
+ unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
let index = self.bucket_index(item);
self.table.erase(index);
}
@@ -534,7 +558,6 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Erases an element from the table, dropping it in place.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
- #[allow(deprecated)]
pub unsafe fn erase(&mut self, item: Bucket<T>) {
// Erase the element from the table first since drop might panic.
self.erase_no_drop(&item);
@@ -560,7 +583,6 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Removes an element from the table, returning it.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
- #[allow(deprecated)]
pub unsafe fn remove(&mut self, item: Bucket<T>) -> T {
self.erase_no_drop(&item);
item.read()
@@ -593,7 +615,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
unsafe fn drop_elements(&mut self) {
- if mem::needs_drop::<T>() && !self.is_empty() {
+ if Self::DATA_NEEDS_DROP && !self.is_empty() {
for item in self.iter() {
item.drop();
}
@@ -681,8 +703,8 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
additional,
&|table, index| hasher(table.bucket::<T>(index).as_ref()),
fallibility,
- TableLayout::new::<T>(),
- if mem::needs_drop::<T>() {
+ Self::TABLE_LAYOUT,
+ if Self::DATA_NEEDS_DROP {
Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
} else {
None
@@ -704,7 +726,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
capacity,
&|table, index| hasher(table.bucket::<T>(index).as_ref()),
fallibility,
- TableLayout::new::<T>(),
+ Self::TABLE_LAYOUT,
)
}
}
@@ -796,7 +818,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
{
let index = self.bucket_index(&bucket);
let old_ctrl = *self.table.ctrl(index);
- debug_assert!(is_full(old_ctrl));
+ debug_assert!(self.is_bucket_full(index));
let old_growth_left = self.table.growth_left;
let item = self.remove(bucket);
if let Some(new_item) = f(item) {
@@ -928,6 +950,16 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
self.table.bucket_mask + 1
}
+ /// Checks whether the bucket at `index` is full.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure `index` is less than the number of buckets.
+ #[inline]
+ pub unsafe fn is_bucket_full(&self, index: usize) -> bool {
+ self.table.is_bucket_full(index)
+ }
+
/// Returns an iterator over every element in the table. It is up to
/// the caller to ensure that the `RawTable` outlives the `RawIter`.
/// Because we cannot make the `next` method unsafe on the `RawIter`
@@ -1011,10 +1043,11 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
None
} else {
// Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
- let (layout, ctrl_offset) = match calculate_layout::<T>(self.table.buckets()) {
- Some(lco) => lco,
- None => unsafe { hint::unreachable_unchecked() },
- };
+ let (layout, ctrl_offset) =
+ match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) {
+ Some(lco) => lco,
+ None => unsafe { hint::unreachable_unchecked() },
+ };
Some((
unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) },
layout,
@@ -1068,15 +1101,6 @@ impl<A: Allocator + Clone> RawTableInner<A> {
None => return Err(fallibility.capacity_overflow()),
};
- // We need an additional check to ensure that the allocation doesn't
- // exceed `isize::MAX`. We can skip this check on 64-bit systems since
- // such allocations will never succeed anyways.
- //
- // This mirrors what Vec does in the standard library.
- if mem::size_of::<usize>() < 8 && layout.size() > isize::MAX as usize {
- return Err(fallibility.capacity_overflow());
- }
-
let ptr: NonNull<u8> = match do_alloc(&alloc, layout) {
Ok(block) => block.cast(),
Err(_) => return Err(fallibility.alloc_err(layout)),
@@ -1148,7 +1172,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
// table. This second scan is guaranteed to find an empty
// slot (due to the load factor) before hitting the trailing
// control bytes (containing EMPTY).
- if unlikely(is_full(*self.ctrl(result))) {
+ if unlikely(self.is_bucket_full(result)) {
debug_assert!(self.bucket_mask < Group::WIDTH);
debug_assert_ne!(probe_seq.pos, 0);
return Group::load_aligned(self.ctrl(0))
@@ -1329,6 +1353,17 @@ impl<A: Allocator + Clone> RawTableInner<A> {
self.bucket_mask + 1
}
+ /// Checks whether the bucket at `index` is full.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure `index` is less than the number of buckets.
+ #[inline]
+ unsafe fn is_bucket_full(&self, index: usize) -> bool {
+ debug_assert!(index < self.buckets());
+ is_full(*self.ctrl(index))
+ }
+
#[inline]
fn num_ctrl_bytes(&self) -> usize {
self.bucket_mask + 1 + Group::WIDTH
@@ -1427,7 +1462,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
// Copy all elements to the new table.
for i in 0..self.buckets() {
- if !is_full(*self.ctrl(i)) {
+ if !self.is_bucket_full(i) {
continue;
}
@@ -1507,7 +1542,6 @@ impl<A: Allocator + Clone> RawTableInner<A> {
// Search for a suitable place to put it
let new_i = guard.find_insert_slot(hash);
- let new_i_p = guard.bucket_ptr(new_i, size_of);
// Probing works by scanning through all of the control
// bytes in groups, which may not be aligned to the group
@@ -1519,6 +1553,8 @@ impl<A: Allocator + Clone> RawTableInner<A> {
continue 'outer;
}
+ let new_i_p = guard.bucket_ptr(new_i, size_of);
+
// We are moving the current item to a new position. Write
// our H2 to the control byte of the new position.
let prev_ctrl = guard.replace_ctrl_h2(new_i, hash);
@@ -1547,15 +1583,21 @@ impl<A: Allocator + Clone> RawTableInner<A> {
#[inline]
unsafe fn free_buckets(&mut self, table_layout: TableLayout) {
+ let (ptr, layout) = self.allocation_info(table_layout);
+ self.alloc.deallocate(ptr, layout);
+ }
+
+ #[inline]
+ fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
// Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
Some(lco) => lco,
- None => hint::unreachable_unchecked(),
+ None => unsafe { hint::unreachable_unchecked() },
};
- self.alloc.deallocate(
- NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)),
+ (
+ unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
layout,
- );
+ )
}
/// Marks all table buckets as empty without dropping their contents.
@@ -1572,7 +1614,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
#[inline]
unsafe fn erase(&mut self, index: usize) {
- debug_assert!(is_full(*self.ctrl(index)));
+ debug_assert!(self.is_bucket_full(index));
let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
let empty_before = Group::load(self.ctrl(index_before)).match_empty();
let empty_after = Group::load(self.ctrl(index)).match_empty();
@@ -1720,9 +1762,9 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
// to make sure we drop only the elements that have been
// cloned so far.
let mut guard = guard((0, &mut *self), |(index, self_)| {
- if mem::needs_drop::<T>() && !self_.is_empty() {
+ if Self::DATA_NEEDS_DROP && !self_.is_empty() {
for i in 0..=*index {
- if is_full(*self_.table.ctrl(i)) {
+ if self_.is_bucket_full(i) {
self_.bucket(i).drop();
}
}
@@ -2008,6 +2050,8 @@ pub struct RawIter<T> {
}
impl<T> RawIter<T> {
+ const DATA_NEEDS_DROP: bool = mem::needs_drop::<T>();
+
/// Refresh the iterator so that it reflects a removal from the given bucket.
///
/// For the iterator to remain valid, this method must be called once
@@ -2125,7 +2169,7 @@ impl<T> RawIter<T> {
}
unsafe fn drop_elements(&mut self) {
- if mem::needs_drop::<T>() && self.len() != 0 {
+ if Self::DATA_NEEDS_DROP && self.len() != 0 {
for item in self {
item.drop();
}
diff --git a/vendor/hashbrown/src/scopeguard.rs b/vendor/hashbrown/src/scopeguard.rs
index f85e6ab0e..382d06043 100644
--- a/vendor/hashbrown/src/scopeguard.rs
+++ b/vendor/hashbrown/src/scopeguard.rs
@@ -1,6 +1,6 @@
// Extracted from the scopeguard crate
use core::{
- mem,
+ mem::ManuallyDrop,
ops::{Deref, DerefMut},
ptr,
};
@@ -28,15 +28,13 @@ where
#[inline]
pub fn into_inner(guard: Self) -> T {
// Cannot move out of Drop-implementing types, so
- // ptr::read the value and forget the guard.
+ // ptr::read the value out of a ManuallyDrop<Self>
+ // Don't use mem::forget as that might invalidate value
+ let guard = ManuallyDrop::new(guard);
unsafe {
let value = ptr::read(&guard.value);
- // read the closure so that it is dropped, and assign it to a local
- // variable to ensure that it is only dropped after the guard has
- // been forgotten. (In case the Drop impl of the closure, or that
- // of any consumed captured variable, panics).
- let _dropfn = ptr::read(&guard.dropfn);
- mem::forget(guard);
+ // read the closure so that it is dropped
+ let _ = ptr::read(&guard.dropfn);
value
}
}
diff --git a/vendor/hashbrown/src/set.rs b/vendor/hashbrown/src/set.rs
index 2a4dcea52..60c1b1cd1 100644
--- a/vendor/hashbrown/src/set.rs
+++ b/vendor/hashbrown/src/set.rs
@@ -1,6 +1,7 @@
-use crate::TryReserveError;
+#[cfg(feature = "raw")]
+use crate::raw::RawTable;
+use crate::{Equivalent, TryReserveError};
use alloc::borrow::ToOwned;
-use core::borrow::Borrow;
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::iter::{Chain, FromIterator, FusedIterator};
@@ -135,6 +136,18 @@ impl<T> HashSet<T, DefaultHashBuilder> {
/// The hash set is initially created with a capacity of 0, so it will not allocate until it
/// is first inserted into.
///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`], for example with
+ /// [`with_hasher`](HashSet::with_hasher) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
/// # Examples
///
/// ```
@@ -153,6 +166,18 @@ impl<T> HashSet<T, DefaultHashBuilder> {
/// The hash set will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash set will not allocate.
///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`], for example with
+ /// [`with_capacity_and_hasher`](HashSet::with_capacity_and_hasher) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
/// # Examples
///
/// ```
@@ -175,6 +200,18 @@ impl<T: Hash + Eq, A: Allocator + Clone> HashSet<T, DefaultHashBuilder, A> {
/// The hash set is initially created with a capacity of 0, so it will not allocate until it
/// is first inserted into.
///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`], for example with
+ /// [`with_hasher_in`](HashSet::with_hasher_in) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
/// # Examples
///
/// ```
@@ -193,6 +230,18 @@ impl<T: Hash + Eq, A: Allocator + Clone> HashSet<T, DefaultHashBuilder, A> {
/// The hash set will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash set will not allocate.
///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`], for example with
+ /// [`with_capacity_and_hasher_in`](HashSet::with_capacity_and_hasher_in) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
/// # Examples
///
/// ```
@@ -386,16 +435,23 @@ impl<T, S> HashSet<T, S, Global> {
/// Creates a new empty hash set which will use the given hasher to hash
/// keys.
///
- /// The hash set is also created with the default initial capacity.
+ /// The hash set is initially created with a capacity of 0, so it will not
+ /// allocate until it is first inserted into.
+ ///
+ /// # HashDoS resistance
///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow `HashSet`s to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`].
///
/// The `hash_builder` passed should implement the [`BuildHasher`] trait for
- /// the HashMap to be useful, see its documentation for details.
+ /// the HashSet to be useful, see its documentation for details.
///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
///
/// # Examples
///
@@ -407,8 +463,6 @@ impl<T, S> HashSet<T, S, Global> {
/// let mut set = HashSet::with_hasher(s);
/// set.insert(2);
/// ```
- ///
- /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html
#[cfg_attr(feature = "inline-more", inline)]
pub const fn with_hasher(hasher: S) -> Self {
Self {
@@ -422,13 +476,20 @@ impl<T, S> HashSet<T, S, Global> {
/// The hash set will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash set will not allocate.
///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow `HashSet`s to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`].
///
/// The `hash_builder` passed should implement the [`BuildHasher`] trait for
- /// the HashMap to be useful, see its documentation for details.
+ /// the HashSet to be useful, see its documentation for details.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
///
/// # Examples
///
@@ -440,8 +501,6 @@ impl<T, S> HashSet<T, S, Global> {
/// let mut set = HashSet::with_capacity_and_hasher(10, s);
/// set.insert(1);
/// ```
- ///
- /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html
#[cfg_attr(feature = "inline-more", inline)]
pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self {
Self {
@@ -463,12 +522,23 @@ where
/// Creates a new empty hash set which will use the given hasher to hash
/// keys.
///
- /// The hash set is also created with the default initial capacity.
+ /// The hash set is initially created with a capacity of 0, so it will not
+ /// allocate until it is first inserted into.
///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow `HashSet`s to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`].
+ ///
+ /// The `hash_builder` passed should implement the [`BuildHasher`] trait for
+ /// the HashSet to be useful, see its documentation for details.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
///
/// # Examples
///
@@ -481,7 +551,7 @@ where
/// set.insert(2);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
- pub fn with_hasher_in(hasher: S, alloc: A) -> Self {
+ pub const fn with_hasher_in(hasher: S, alloc: A) -> Self {
Self {
map: HashMap::with_hasher_in(hasher, alloc),
}
@@ -493,10 +563,20 @@ where
/// The hash set will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash set will not allocate.
///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow `HashSet`s to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`].
+ ///
+ /// The `hash_builder` passed should implement the [`BuildHasher`] trait for
+ /// the HashSet to be useful, see its documentation for details.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
///
/// # Examples
///
@@ -547,7 +627,12 @@ where
///
/// # Panics
///
- /// Panics if the new allocation size overflows `usize`.
+ /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
+ /// in case of allocation error. Use [`try_reserve`](HashSet::try_reserve) instead
+ /// if you want to handle memory allocation failure.
+ ///
+ /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html
+ /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
///
/// # Examples
///
@@ -773,8 +858,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn contains<Q: ?Sized>(&self, value: &Q) -> bool
where
- T: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<T>,
{
self.map.contains_key(value)
}
@@ -800,8 +884,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T>
where
- T: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<T>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.map.get_key_value(value) {
@@ -856,8 +939,7 @@ where
#[inline]
pub fn get_or_insert_owned<Q: ?Sized>(&mut self, value: &Q) -> &T
where
- T: Borrow<Q>,
- Q: Hash + Eq + ToOwned<Owned = T>,
+ Q: Hash + Equivalent<T> + ToOwned<Owned = T>,
{
// Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with
// `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`.
@@ -889,8 +971,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn get_or_insert_with<Q: ?Sized, F>(&mut self, value: &Q, f: F) -> &T
where
- T: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<T>,
F: FnOnce(&Q) -> T,
{
// Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with
@@ -1106,8 +1187,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove<Q: ?Sized>(&mut self, value: &Q) -> bool
where
- T: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<T>,
{
self.map.remove(value).is_some()
}
@@ -1133,8 +1213,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
where
- T: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<T>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.map.remove_entry(value) {
@@ -1142,6 +1221,26 @@ where
None => None,
}
}
+
+ /// Returns a mutable reference to the [`RawTable`] used underneath [`HashSet`].
+ /// This function is only available if the `raw` feature of the crate is enabled.
+ ///
+ /// # Note
+ ///
+ /// Calling this function is safe, but using the raw hash table API may require
+ /// unsafe functions or blocks.
+ ///
+ /// `RawTable` API gives the lowest level of control under the set that can be useful
+ /// for extending the HashSet's API, but may lead to *[undefined behavior]*.
+ ///
+ /// [`HashSet`]: struct.HashSet.html
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[cfg(feature = "raw")]
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn raw_table(&mut self) -> &mut RawTable<(T, ()), A> {
+ self.map.raw_table()
+ }
}
impl<T, S, A> PartialEq for HashSet<T, S, A>