diff options
Diffstat (limited to 'vendor/hashbrown')
-rw-r--r-- | vendor/hashbrown/.cargo-checksum.json | 2 | ||||
-rw-r--r-- | vendor/hashbrown/CHANGELOG.md | 34 | ||||
-rw-r--r-- | vendor/hashbrown/Cargo.toml | 10 | ||||
-rw-r--r-- | vendor/hashbrown/README.md | 2 | ||||
-rw-r--r-- | vendor/hashbrown/src/external_trait_impls/rayon/map.rs | 40 | ||||
-rw-r--r-- | vendor/hashbrown/src/external_trait_impls/rayon/mod.rs | 1 | ||||
-rw-r--r-- | vendor/hashbrown/src/external_trait_impls/rayon/raw.rs | 23 | ||||
-rw-r--r-- | vendor/hashbrown/src/external_trait_impls/rayon/set.rs | 34 | ||||
-rw-r--r-- | vendor/hashbrown/src/external_trait_impls/rayon/table.rs | 252 | ||||
-rw-r--r-- | vendor/hashbrown/src/external_trait_impls/serde.rs | 63 | ||||
-rw-r--r-- | vendor/hashbrown/src/lib.rs | 22 | ||||
-rw-r--r-- | vendor/hashbrown/src/map.rs | 614 | ||||
-rw-r--r-- | vendor/hashbrown/src/raw/mod.rs | 1942 | ||||
-rw-r--r-- | vendor/hashbrown/src/rustc_entry.rs | 26 | ||||
-rw-r--r-- | vendor/hashbrown/src/set.rs | 154 | ||||
-rw-r--r-- | vendor/hashbrown/src/table.rs | 2030 |
16 files changed, 4665 insertions, 584 deletions
diff --git a/vendor/hashbrown/.cargo-checksum.json b/vendor/hashbrown/.cargo-checksum.json index aed788fc5..6bb5ac55f 100644 --- a/vendor/hashbrown/.cargo-checksum.json +++ b/vendor/hashbrown/.cargo-checksum.json @@ -1 +1 @@ -{"files":{"CHANGELOG.md":"f4769b4c6f44e09c379f55694f89a189620a78586a030a6c6c8d183265c31c52","Cargo.toml":"7a3568541b22e0e7dd0a8c2227699c84474673870f5e46dbb7db61ac2eac391f","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"00e45d59f6f8537aa0f8e3dec17e24e9838b52f35aa9c1815c71ab1e8f63888e","benches/bench.rs":"ef7bc025922f077d307c565640c005d056e3d6c1713448a95aae92d3c22c1005","benches/insert_unique_unchecked.rs":"cb84275f22d5f95a5ac995ac6b2df74ffcf342765b401d27c95f2955c7b7cb9f","clippy.toml":"7535949f908c6d9aea4f9a9f3a7625552c93fc29e963d059d40f4def9d77ea7b","src/external_trait_impls/mod.rs":"0625e6a5e3b8ecc8901a12aeeea54393fd84617fb3a14d98a34d2d2bddb8d257","src/external_trait_impls/rayon/helpers.rs":"ba105bf0853ebc45157f22116ad0f55d3bdab75e721d8e7a677c7b912d0c0c6d","src/external_trait_impls/rayon/map.rs":"c0f50c8c6f2f70c70994a3243d92de3bbda5e78519c906c4f81f207ed63e5cc3","src/external_trait_impls/rayon/mod.rs":"156de9c1ad0123334ea3b7e5a17444faf1b8bf971aa88a1f23e2f2d1c3021141","src/external_trait_impls/rayon/raw.rs":"e62c5f3ca5fffea47357e64b6f8c34cec94af62d9bd28a2b87934da46c22b66e","src/external_trait_impls/rayon/set.rs":"c4c44d44e56c2f59e9e1355662e29d8744ac96645ca4414127a359fb46cb0fbf","src/external_trait_impls/rkyv/hash_map.rs":"7abe24318143b776016052b05840656afc858b1ba5252f3d418d61972477f53d","src/external_trait_impls/rkyv/hash_set.rs":"38d969125d17d606492ec4ec9fc06b7e7118eb903240dacf40de21b9b06fa5c8","src/external_trait_impls/rkyv/mod.rs":"54399ce5574fd1d84b7b0cb4238fa3e898575e89a6724299be009d2172bda02e","src/external_trait_impls/serde.rs":"0bc1a1f218d1ae7a5262557a5e3737b9334caf7d50c136dbdc75ff75680c223b","src/lib.rs":"662765875308544b71a46e20f18782e7a3246fddb11496206e296a24b78b56a5","src/macros.rs":"98a26b908fc0fbe6a58d008a317e550013d615eb3cc17a5054a573c62c1d74cb","src/map.rs":"4f4bdc2a2eb3c4395d655f5ce12d217b82f6edeef3579efcb6b18d08345f1d52","src/raw/alloc.rs":"902f8588d0fdee3e5c3dc02410f41d4b38ac88843727387f929f3186b3a2d322","src/raw/bitmask.rs":"3b3dce8d6a48856ada19085abf43908f124ab3419fcf434b9ca64d7bff243f67","src/raw/generic.rs":"efc5e603be3e9a17935aef1836a38ce01c78a0093b2af0671548eb5459b37921","src/raw/mod.rs":"cecbe517b36042094818887d41226bdafd78c6f67657540a8c1ea020cd5d302f","src/raw/neon.rs":"9907d8ebc36fc3df562dde478ea9b72213fda65288a304718d8647f0029dc9ad","src/raw/sse2.rs":"39038e3344e49f4638e211bcdbf56565ac53e90dce56172cc3b526fea911c2af","src/rustc_entry.rs":"19d3346843bc62c7c0165e8824d26355ab2666086f3088b1150a8b3f59376a76","src/scopeguard.rs":"1a246e08a63c06cd8ad934bd7da229421bf804f991ae93cd7e242da27ca6c601","src/set.rs":"349a1523656a8a3f364b5313d98a969444717461a7ab7133c8e5f215ac2c329d","tests/equivalent_trait.rs":"84faa3fe9d67c375d03fec81f0f1412c47862477d42e84e7d235258236338d5b","tests/hasher.rs":"9a8fdf67e4415618e16729969c386eefe71408cded5d46cf7b67d969276a3452","tests/raw.rs":"43ed2f98877533a0905611d9a30f26b183dd3e103e3856eeab80e7b8ac7894d3","tests/rayon.rs":"39cb24ab45fce8087bb54948715c8b6973ebfba1a325292b5b3cd9aab50b5fd2","tests/serde.rs":"6bac8054db722dd049901b37a6e006535bac30f425eb5cd91af19b5bc1dfe78e","tests/set.rs":"9f8011c29d1059aadb54b6dd4623521d5178b4278b4a56021ef2cee4bbb19fd9"},"package":"2c6201b9ff9fd90a5a3bac2e56a830d0caa509576f0e503818ee82c181b3437a"}
\ No newline at end of file +{"files":{"CHANGELOG.md":"9cff035ecd949ca041cae2ab20be5c642360b369a499286ea830d4a48bf3b284","Cargo.toml":"a23bc72f1aed8ac540796975437fb8e158e7b4a186c1d646711717f57c4473ce","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"84c222ce49510535419d338b7532a72a2bf22b7466e44de78d92d25b6c7d636b","benches/bench.rs":"ef7bc025922f077d307c565640c005d056e3d6c1713448a95aae92d3c22c1005","benches/insert_unique_unchecked.rs":"cb84275f22d5f95a5ac995ac6b2df74ffcf342765b401d27c95f2955c7b7cb9f","clippy.toml":"7535949f908c6d9aea4f9a9f3a7625552c93fc29e963d059d40f4def9d77ea7b","src/external_trait_impls/mod.rs":"0625e6a5e3b8ecc8901a12aeeea54393fd84617fb3a14d98a34d2d2bddb8d257","src/external_trait_impls/rayon/helpers.rs":"ba105bf0853ebc45157f22116ad0f55d3bdab75e721d8e7a677c7b912d0c0c6d","src/external_trait_impls/rayon/map.rs":"96fdf39b3f601f77152d7ce84541b8f51f32b9274b7da9c294862892e721a5d8","src/external_trait_impls/rayon/mod.rs":"126edc882501dddd25e442d9236508b5b386eb8c0a9f5d654f2dd081086c1616","src/external_trait_impls/rayon/raw.rs":"04012fb2e99648819b4bc0044107ed3cb94013e242b7865075c5bd9ebf1b6865","src/external_trait_impls/rayon/set.rs":"7539348ff7bc6e3cce6b3c019d62dc401eea0138c578fef729c2593e8ead1cfa","src/external_trait_impls/rayon/table.rs":"8778d29509c68b5b7cb66859db025d3939ce22e7cf370b20ff3dea4fe4b29fd0","src/external_trait_impls/rkyv/hash_map.rs":"7abe24318143b776016052b05840656afc858b1ba5252f3d418d61972477f53d","src/external_trait_impls/rkyv/hash_set.rs":"38d969125d17d606492ec4ec9fc06b7e7118eb903240dacf40de21b9b06fa5c8","src/external_trait_impls/rkyv/mod.rs":"54399ce5574fd1d84b7b0cb4238fa3e898575e89a6724299be009d2172bda02e","src/external_trait_impls/serde.rs":"6dbe104dee16b453b6b048b541c6e02c6d067d970dfafd243fc4360288b0168c","src/lib.rs":"fbc05970d6458046590e9c4a33fc9a6fdc94ef725b9b00354fa609e207e6ae50","src/macros.rs":"98a26b908fc0fbe6a58d008a317e550013d615eb3cc17a5054a573c62c1d74cb","src/map.rs":"688f2ccecd38f32c66c7fc905703f363dd88511fc29c99bc260bb6973db66430","src/raw/alloc.rs":"902f8588d0fdee3e5c3dc02410f41d4b38ac88843727387f929f3186b3a2d322","src/raw/bitmask.rs":"3b3dce8d6a48856ada19085abf43908f124ab3419fcf434b9ca64d7bff243f67","src/raw/generic.rs":"efc5e603be3e9a17935aef1836a38ce01c78a0093b2af0671548eb5459b37921","src/raw/mod.rs":"73038e430bd54d56c484b6798e67dece4d67b3cf86031639a819629e8376d673","src/raw/neon.rs":"9907d8ebc36fc3df562dde478ea9b72213fda65288a304718d8647f0029dc9ad","src/raw/sse2.rs":"39038e3344e49f4638e211bcdbf56565ac53e90dce56172cc3b526fea911c2af","src/rustc_entry.rs":"8142ed89b50155602ef8c1628382bd62d3ee903920fe49d403d4100a278c6ba4","src/scopeguard.rs":"1a246e08a63c06cd8ad934bd7da229421bf804f991ae93cd7e242da27ca6c601","src/set.rs":"4069da81fc978f6d3b9605d8cf349c2b1b8c7766ab6bf3fec83b6442718fdce7","src/table.rs":"b64e4c4910b911175ae0eb72e744986ce695d3ecc0b52b70d916e3adefdd1908","tests/equivalent_trait.rs":"84faa3fe9d67c375d03fec81f0f1412c47862477d42e84e7d235258236338d5b","tests/hasher.rs":"9a8fdf67e4415618e16729969c386eefe71408cded5d46cf7b67d969276a3452","tests/raw.rs":"43ed2f98877533a0905611d9a30f26b183dd3e103e3856eeab80e7b8ac7894d3","tests/rayon.rs":"39cb24ab45fce8087bb54948715c8b6973ebfba1a325292b5b3cd9aab50b5fd2","tests/serde.rs":"6bac8054db722dd049901b37a6e006535bac30f425eb5cd91af19b5bc1dfe78e","tests/set.rs":"9f8011c29d1059aadb54b6dd4623521d5178b4278b4a56021ef2cee4bbb19fd9"},"package":"f93e7192158dbcda357bdec5fb5788eebf8bbac027f3f33e719d29135ae84156"}
\ No newline at end of file diff --git a/vendor/hashbrown/CHANGELOG.md b/vendor/hashbrown/CHANGELOG.md index 8837c51ca..0e13b230c 100644 --- a/vendor/hashbrown/CHANGELOG.md +++ b/vendor/hashbrown/CHANGELOG.md @@ -7,6 +7,34 @@ and this project adheres to [Semantic Versioning](https://semver.org/). ## [Unreleased] +## [v0.14.2] - 2023-10-19 + +### Added + +- `HashTable` type which provides a low-level but safe API with explicit hashing. (#466) + +### Fixed + +- Disabled the use of NEON instructions on big-endian ARM. (#475) +- Disabled the use of NEON instructions on Miri. (#476) + +## [v0.14.1] - 2023-09-28 + +### Added + +- Allow serializing `HashMap`s that use a custom allocator. (#449) + +### Changed + +- Use the `Equivalent` trait from the `equivalent` crate. (#442) +- Slightly improved performance of table resizing. (#451) +- Relaxed MSRV to 1.63.0. (#457) +- Removed `Clone` requirement from custom allocators. (#468) + +### Fixed + +- Fixed custom allocators being leaked in some situations. (#439, #465) + ## [v0.14.0] - 2023-06-01 ### Added @@ -22,7 +50,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/). ### Changed - Optimized insertion to only perform a single lookup. (#277) -- `DrainFilter` has been renamed to `ExtractIf` and no longer drops remaining +- `DrainFilter` (`drain_filter`) has been renamed to `ExtractIf` and no longer drops remaining elements when the iterator is dropped. #(374) - Bumped MSRV to 1.64.0. (#431) - `{Map,Set}::raw_table` now returns an immutable reference. (#404) @@ -433,7 +461,9 @@ This release was _yanked_ due to a breaking change for users of `no-default-feat - Initial release -[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.14.0...HEAD +[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.14.2...HEAD +[v0.14.2]: https://github.com/rust-lang/hashbrown/compare/v0.14.1...v0.14.2 +[v0.14.1]: https://github.com/rust-lang/hashbrown/compare/v0.14.0...v0.14.1 [v0.14.0]: https://github.com/rust-lang/hashbrown/compare/v0.13.2...v0.14.0 [v0.13.2]: https://github.com/rust-lang/hashbrown/compare/v0.13.1...v0.13.2 [v0.13.1]: https://github.com/rust-lang/hashbrown/compare/v0.12.3...v0.13.1 diff --git a/vendor/hashbrown/Cargo.toml b/vendor/hashbrown/Cargo.toml index 27e3f9b4f..2f374f446 100644 --- a/vendor/hashbrown/Cargo.toml +++ b/vendor/hashbrown/Cargo.toml @@ -11,9 +11,9 @@ [package] edition = "2021" -rust-version = "1.64.0" +rust-version = "1.63.0" name = "hashbrown" -version = "0.14.0" +version = "0.14.2" authors = ["Amanieu d'Antras <amanieu@gmail.com>"] exclude = [ ".github", @@ -41,6 +41,7 @@ features = [ "serde", "raw", ] +rustdoc-args = ["--generate-link-to-definition"] [dependencies.ahash] version = "0.8.0" @@ -67,6 +68,11 @@ version = "1.0.0" optional = true package = "rustc-std-workspace-core" +[dependencies.equivalent] +version = "1.0" +optional = true +default-features = false + [dependencies.rayon] version = "1.0" optional = true diff --git a/vendor/hashbrown/README.md b/vendor/hashbrown/README.md index d0c4261e9..5eaef8bd0 100644 --- a/vendor/hashbrown/README.md +++ b/vendor/hashbrown/README.md @@ -4,7 +4,7 @@ hashbrown [![Build Status](https://github.com/rust-lang/hashbrown/actions/workflows/rust.yml/badge.svg)](https://github.com/rust-lang/hashbrown/actions) [![Crates.io](https://img.shields.io/crates/v/hashbrown.svg)](https://crates.io/crates/hashbrown) [![Documentation](https://docs.rs/hashbrown/badge.svg)](https://docs.rs/hashbrown) -[![Rust](https://img.shields.io/badge/rust-1.64.0%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown) +[![Rust](https://img.shields.io/badge/rust-1.63.0%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown) This crate is a Rust port of Google's high-performance [SwissTable] hash map, adapted to make it a drop-in replacement for Rust's standard `HashMap` diff --git a/vendor/hashbrown/src/external_trait_impls/rayon/map.rs b/vendor/hashbrown/src/external_trait_impls/rayon/map.rs index 1124bfd32..2534dc9b2 100644 --- a/vendor/hashbrown/src/external_trait_impls/rayon/map.rs +++ b/vendor/hashbrown/src/external_trait_impls/rayon/map.rs @@ -232,11 +232,11 @@ impl<K: Eq + Hash, V: fmt::Debug> fmt::Debug for ParValuesMut<'_, K, V> { /// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter /// [`HashMap`]: /hashbrown/struct.HashMap.html /// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html -pub struct IntoParIter<K, V, A: Allocator + Clone = Global> { +pub struct IntoParIter<K, V, A: Allocator = Global> { inner: RawIntoParIter<(K, V), A>, } -impl<K: Send, V: Send, A: Allocator + Clone + Send> ParallelIterator for IntoParIter<K, V, A> { +impl<K: Send, V: Send, A: Allocator + Send> ParallelIterator for IntoParIter<K, V, A> { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -248,9 +248,7 @@ impl<K: Send, V: Send, A: Allocator + Clone + Send> ParallelIterator for IntoPar } } -impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator + Clone> fmt::Debug - for IntoParIter<K, V, A> -{ +impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator> fmt::Debug for IntoParIter<K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ParIter { inner: unsafe { self.inner.par_iter() }, @@ -267,11 +265,11 @@ impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator + Clone> fmt::Debug /// /// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain /// [`HashMap`]: /hashbrown/struct.HashMap.html -pub struct ParDrain<'a, K, V, A: Allocator + Clone = Global> { +pub struct ParDrain<'a, K, V, A: Allocator = Global> { inner: RawParDrain<'a, (K, V), A>, } -impl<K: Send, V: Send, A: Allocator + Clone + Sync> ParallelIterator for ParDrain<'_, K, V, A> { +impl<K: Send, V: Send, A: Allocator + Sync> ParallelIterator for ParDrain<'_, K, V, A> { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -283,9 +281,7 @@ impl<K: Send, V: Send, A: Allocator + Clone + Sync> ParallelIterator for ParDrai } } -impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator + Clone> fmt::Debug - for ParDrain<'_, K, V, A> -{ +impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator> fmt::Debug for ParDrain<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { ParIter { inner: unsafe { self.inner.par_iter() }, @@ -295,7 +291,7 @@ impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator + Clone> fmt::Debug } } -impl<K: Sync, V: Sync, S, A: Allocator + Clone> HashMap<K, V, S, A> { +impl<K: Sync, V: Sync, S, A: Allocator> HashMap<K, V, S, A> { /// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order. #[cfg_attr(feature = "inline-more", inline)] pub fn par_keys(&self) -> ParKeys<'_, K, V> { @@ -315,7 +311,7 @@ impl<K: Sync, V: Sync, S, A: Allocator + Clone> HashMap<K, V, S, A> { } } -impl<K: Send, V: Send, S, A: Allocator + Clone> HashMap<K, V, S, A> { +impl<K: Send, V: Send, S, A: Allocator> HashMap<K, V, S, A> { /// Visits (potentially in parallel) mutably borrowed values in an arbitrary order. #[cfg_attr(feature = "inline-more", inline)] pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> { @@ -340,7 +336,7 @@ where K: Eq + Hash + Sync, V: PartialEq + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { /// Returns `true` if the map is equal to another, /// i.e. both maps contain the same keys mapped to the same values. @@ -354,9 +350,7 @@ where } } -impl<K: Send, V: Send, S, A: Allocator + Clone + Send> IntoParallelIterator - for HashMap<K, V, S, A> -{ +impl<K: Send, V: Send, S, A: Allocator + Send> IntoParallelIterator for HashMap<K, V, S, A> { type Item = (K, V); type Iter = IntoParIter<K, V, A>; @@ -368,9 +362,7 @@ impl<K: Send, V: Send, S, A: Allocator + Clone + Send> IntoParallelIterator } } -impl<'a, K: Sync, V: Sync, S, A: Allocator + Clone> IntoParallelIterator - for &'a HashMap<K, V, S, A> -{ +impl<'a, K: Sync, V: Sync, S, A: Allocator> IntoParallelIterator for &'a HashMap<K, V, S, A> { type Item = (&'a K, &'a V); type Iter = ParIter<'a, K, V>; @@ -383,9 +375,7 @@ impl<'a, K: Sync, V: Sync, S, A: Allocator + Clone> IntoParallelIterator } } -impl<'a, K: Sync, V: Send, S, A: Allocator + Clone> IntoParallelIterator - for &'a mut HashMap<K, V, S, A> -{ +impl<'a, K: Sync, V: Send, S, A: Allocator> IntoParallelIterator for &'a mut HashMap<K, V, S, A> { type Item = (&'a K, &'a mut V); type Iter = ParIterMut<'a, K, V>; @@ -424,7 +414,7 @@ where K: Eq + Hash + Send, V: Send, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn par_extend<I>(&mut self, par_iter: I) where @@ -440,7 +430,7 @@ where K: Copy + Eq + Hash + Sync, V: Copy + Sync, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn par_extend<I>(&mut self, par_iter: I) where @@ -456,7 +446,7 @@ where K: Eq + Hash, S: BuildHasher, I: IntoParallelIterator, - A: Allocator + Clone, + A: Allocator, HashMap<K, V, S, A>: Extend<I::Item>, { let (list, len) = super::helpers::collect(par_iter); diff --git a/vendor/hashbrown/src/external_trait_impls/rayon/mod.rs b/vendor/hashbrown/src/external_trait_impls/rayon/mod.rs index 99337a1ce..61ca69b61 100644 --- a/vendor/hashbrown/src/external_trait_impls/rayon/mod.rs +++ b/vendor/hashbrown/src/external_trait_impls/rayon/mod.rs @@ -2,3 +2,4 @@ mod helpers; pub(crate) mod map; pub(crate) mod raw; pub(crate) mod set; +pub(crate) mod table; diff --git a/vendor/hashbrown/src/external_trait_impls/rayon/raw.rs b/vendor/hashbrown/src/external_trait_impls/rayon/raw.rs index 883303e27..612be47a5 100644 --- a/vendor/hashbrown/src/external_trait_impls/rayon/raw.rs +++ b/vendor/hashbrown/src/external_trait_impls/rayon/raw.rs @@ -1,7 +1,6 @@ use crate::raw::Bucket; use crate::raw::{Allocator, Global, RawIter, RawIterRange, RawTable}; use crate::scopeguard::guard; -use alloc::alloc::dealloc; use core::marker::PhantomData; use core::mem; use core::ptr::NonNull; @@ -76,18 +75,18 @@ impl<T> UnindexedProducer for ParIterProducer<T> { } /// Parallel iterator which consumes a table and returns elements. -pub struct RawIntoParIter<T, A: Allocator + Clone = Global> { +pub struct RawIntoParIter<T, A: Allocator = Global> { table: RawTable<T, A>, } -impl<T, A: Allocator + Clone> RawIntoParIter<T, A> { +impl<T, A: Allocator> RawIntoParIter<T, A> { #[cfg_attr(feature = "inline-more", inline)] pub(super) unsafe fn par_iter(&self) -> RawParIter<T> { self.table.par_iter() } } -impl<T: Send, A: Allocator + Clone + Send> ParallelIterator for RawIntoParIter<T, A> { +impl<T: Send, A: Allocator + Send> ParallelIterator for RawIntoParIter<T, A> { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -97,9 +96,9 @@ impl<T: Send, A: Allocator + Clone + Send> ParallelIterator for RawIntoParIter<T { let iter = unsafe { self.table.iter().iter }; let _guard = guard(self.table.into_allocation(), |alloc| { - if let Some((ptr, layout)) = *alloc { + if let Some((ptr, layout, ref alloc)) = *alloc { unsafe { - dealloc(ptr.as_ptr(), layout); + alloc.deallocate(ptr, layout); } } }); @@ -109,23 +108,23 @@ impl<T: Send, A: Allocator + Clone + Send> ParallelIterator for RawIntoParIter<T } /// Parallel iterator which consumes elements without freeing the table storage. -pub struct RawParDrain<'a, T, A: Allocator + Clone = Global> { +pub struct RawParDrain<'a, T, A: Allocator = Global> { // We don't use a &'a mut RawTable<T> because we want RawParDrain to be // covariant over T. table: NonNull<RawTable<T, A>>, marker: PhantomData<&'a RawTable<T, A>>, } -unsafe impl<T: Send, A: Allocator + Clone> Send for RawParDrain<'_, T, A> {} +unsafe impl<T: Send, A: Allocator> Send for RawParDrain<'_, T, A> {} -impl<T, A: Allocator + Clone> RawParDrain<'_, T, A> { +impl<T, A: Allocator> RawParDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] pub(super) unsafe fn par_iter(&self) -> RawParIter<T> { self.table.as_ref().par_iter() } } -impl<T: Send, A: Allocator + Clone> ParallelIterator for RawParDrain<'_, T, A> { +impl<T: Send, A: Allocator> ParallelIterator for RawParDrain<'_, T, A> { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -143,7 +142,7 @@ impl<T: Send, A: Allocator + Clone> ParallelIterator for RawParDrain<'_, T, A> { } } -impl<T, A: Allocator + Clone> Drop for RawParDrain<'_, T, A> { +impl<T, A: Allocator> Drop for RawParDrain<'_, T, A> { fn drop(&mut self) { // If drive_unindexed is not called then simply clear the table. unsafe { @@ -204,7 +203,7 @@ impl<T> Drop for ParDrainProducer<T> { } } -impl<T, A: Allocator + Clone> RawTable<T, A> { +impl<T, A: Allocator> RawTable<T, A> { /// Returns a parallel iterator over the elements in a `RawTable`. #[cfg_attr(feature = "inline-more", inline)] pub unsafe fn par_iter(&self) -> RawParIter<T> { diff --git a/vendor/hashbrown/src/external_trait_impls/rayon/set.rs b/vendor/hashbrown/src/external_trait_impls/rayon/set.rs index ee4f6e669..3de98fccb 100644 --- a/vendor/hashbrown/src/external_trait_impls/rayon/set.rs +++ b/vendor/hashbrown/src/external_trait_impls/rayon/set.rs @@ -16,11 +16,11 @@ use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, Pa /// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter /// [`HashSet`]: /hashbrown/struct.HashSet.html /// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html -pub struct IntoParIter<T, A: Allocator + Clone = Global> { +pub struct IntoParIter<T, A: Allocator = Global> { inner: map::IntoParIter<T, (), A>, } -impl<T: Send, A: Allocator + Clone + Send> ParallelIterator for IntoParIter<T, A> { +impl<T: Send, A: Allocator + Send> ParallelIterator for IntoParIter<T, A> { type Item = T; fn drive_unindexed<C>(self, consumer: C) -> C::Result @@ -38,11 +38,11 @@ impl<T: Send, A: Allocator + Clone + Send> ParallelIterator for IntoParIter<T, A /// /// [`par_drain`]: /hashbrown/struct.HashSet.html#method.par_drain /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParDrain<'a, T, A: Allocator + Clone = Global> { +pub struct ParDrain<'a, T, A: Allocator = Global> { inner: map::ParDrain<'a, T, (), A>, } -impl<T: Send, A: Allocator + Clone + Send + Sync> ParallelIterator for ParDrain<'_, T, A> { +impl<T: Send, A: Allocator + Send + Sync> ParallelIterator for ParDrain<'_, T, A> { type Item = T; fn drive_unindexed<C>(self, consumer: C) -> C::Result @@ -85,7 +85,7 @@ impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { /// /// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParDifference<'a, T, S, A: Allocator + Clone = Global> { +pub struct ParDifference<'a, T, S, A: Allocator = Global> { a: &'a HashSet<T, S, A>, b: &'a HashSet<T, S, A>, } @@ -94,7 +94,7 @@ impl<'a, T, S, A> ParallelIterator for ParDifference<'a, T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { type Item = &'a T; @@ -118,7 +118,7 @@ where /// /// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParSymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { +pub struct ParSymmetricDifference<'a, T, S, A: Allocator = Global> { a: &'a HashSet<T, S, A>, b: &'a HashSet<T, S, A>, } @@ -127,7 +127,7 @@ impl<'a, T, S, A> ParallelIterator for ParSymmetricDifference<'a, T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { type Item = &'a T; @@ -150,7 +150,7 @@ where /// /// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParIntersection<'a, T, S, A: Allocator + Clone = Global> { +pub struct ParIntersection<'a, T, S, A: Allocator = Global> { a: &'a HashSet<T, S, A>, b: &'a HashSet<T, S, A>, } @@ -159,7 +159,7 @@ impl<'a, T, S, A> ParallelIterator for ParIntersection<'a, T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { type Item = &'a T; @@ -181,7 +181,7 @@ where /// /// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union /// [`HashSet`]: /hashbrown/struct.HashSet.html -pub struct ParUnion<'a, T, S, A: Allocator + Clone = Global> { +pub struct ParUnion<'a, T, S, A: Allocator = Global> { a: &'a HashSet<T, S, A>, b: &'a HashSet<T, S, A>, } @@ -190,7 +190,7 @@ impl<'a, T, S, A> ParallelIterator for ParUnion<'a, T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { type Item = &'a T; @@ -216,7 +216,7 @@ impl<T, S, A> HashSet<T, S, A> where T: Eq + Hash + Sync, S: BuildHasher + Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { /// Visits (potentially in parallel) the values representing the union, /// i.e. all the values in `self` or `other`, without duplicates. @@ -289,7 +289,7 @@ where impl<T, S, A> HashSet<T, S, A> where T: Eq + Hash + Send, - A: Allocator + Clone + Send, + A: Allocator + Send, { /// Consumes (potentially in parallel) all values in an arbitrary order, /// while preserving the set's allocated memory for reuse. @@ -301,7 +301,7 @@ where } } -impl<T: Send, S, A: Allocator + Clone + Send> IntoParallelIterator for HashSet<T, S, A> { +impl<T: Send, S, A: Allocator + Send> IntoParallelIterator for HashSet<T, S, A> { type Item = T; type Iter = IntoParIter<T, A>; @@ -313,7 +313,7 @@ impl<T: Send, S, A: Allocator + Clone + Send> IntoParallelIterator for HashSet<T } } -impl<'a, T: Sync, S, A: Allocator + Clone> IntoParallelIterator for &'a HashSet<T, S, A> { +impl<'a, T: Sync, S, A: Allocator> IntoParallelIterator for &'a HashSet<T, S, A> { type Item = &'a T; type Iter = ParIter<'a, T>; @@ -374,7 +374,7 @@ fn extend<T, S, I, A>(set: &mut HashSet<T, S, A>, par_iter: I) where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, I: IntoParallelIterator, HashSet<T, S, A>: Extend<I::Item>, { diff --git a/vendor/hashbrown/src/external_trait_impls/rayon/table.rs b/vendor/hashbrown/src/external_trait_impls/rayon/table.rs new file mode 100644 index 000000000..e8e50944a --- /dev/null +++ b/vendor/hashbrown/src/external_trait_impls/rayon/table.rs @@ -0,0 +1,252 @@ +//! Rayon extensions for `HashTable`. + +use super::raw::{RawIntoParIter, RawParDrain, RawParIter}; +use crate::hash_table::HashTable; +use crate::raw::{Allocator, Global}; +use core::fmt; +use core::marker::PhantomData; +use rayon::iter::plumbing::UnindexedConsumer; +use rayon::iter::{IntoParallelIterator, ParallelIterator}; + +/// Parallel iterator over shared references to entries in a map. +/// +/// This iterator is created by the [`par_iter`] method on [`HashTable`] +/// (provided by the [`IntoParallelRefIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter`]: /hashbrown/struct.HashTable.html#method.par_iter +/// [`HashTable`]: /hashbrown/struct.HashTable.html +/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html +pub struct ParIter<'a, T> { + inner: RawParIter<T>, + marker: PhantomData<&'a T>, +} + +impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> { + type Item = &'a T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed<C>(self, consumer: C) -> C::Result + where + C: UnindexedConsumer<Self::Item>, + { + self.inner + .map(|x| unsafe { x.as_ref() }) + .drive_unindexed(consumer) + } +} + +impl<T> Clone for ParIter<'_, T> { + #[cfg_attr(feature = "inline-more", inline)] + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + marker: PhantomData, + } + } +} + +impl<T: fmt::Debug> fmt::Debug for ParIter<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let iter = unsafe { self.inner.iter() }.map(|x| unsafe { x.as_ref() }); + f.debug_list().entries(iter).finish() + } +} + +/// Parallel iterator over mutable references to entries in a map. +/// +/// This iterator is created by the [`par_iter_mut`] method on [`HashTable`] +/// (provided by the [`IntoParallelRefMutIterator`] trait). +/// See its documentation for more. +/// +/// [`par_iter_mut`]: /hashbrown/struct.HashTable.html#method.par_iter_mut +/// [`HashTable`]: /hashbrown/struct.HashTable.html +/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html +pub struct ParIterMut<'a, T> { + inner: RawParIter<T>, + marker: PhantomData<&'a mut T>, +} + +impl<'a, T: Send> ParallelIterator for ParIterMut<'a, T> { + type Item = &'a mut T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed<C>(self, consumer: C) -> C::Result + where + C: UnindexedConsumer<Self::Item>, + { + self.inner + .map(|x| unsafe { x.as_mut() }) + .drive_unindexed(consumer) + } +} + +impl<T: fmt::Debug> fmt::Debug for ParIterMut<'_, T> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: self.inner.clone(), + marker: PhantomData, + } + .fmt(f) + } +} + +/// Parallel iterator over entries of a consumed map. +/// +/// This iterator is created by the [`into_par_iter`] method on [`HashTable`] +/// (provided by the [`IntoParallelIterator`] trait). +/// See its documentation for more. +/// +/// [`into_par_iter`]: /hashbrown/struct.HashTable.html#method.into_par_iter +/// [`HashTable`]: /hashbrown/struct.HashTable.html +/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html +pub struct IntoParIter<T, A: Allocator = Global> { + inner: RawIntoParIter<T, A>, +} + +impl<T: Send, A: Allocator + Send> ParallelIterator for IntoParIter<T, A> { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed<C>(self, consumer: C) -> C::Result + where + C: UnindexedConsumer<Self::Item>, + { + self.inner.drive_unindexed(consumer) + } +} + +impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoParIter<T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: unsafe { self.inner.par_iter() }, + marker: PhantomData, + } + .fmt(f) + } +} + +/// Parallel draining iterator over entries of a map. +/// +/// This iterator is created by the [`par_drain`] method on [`HashTable`]. +/// See its documentation for more. +/// +/// [`par_drain`]: /hashbrown/struct.HashTable.html#method.par_drain +/// [`HashTable`]: /hashbrown/struct.HashTable.html +pub struct ParDrain<'a, T, A: Allocator = Global> { + inner: RawParDrain<'a, T, A>, +} + +impl<T: Send, A: Allocator + Sync> ParallelIterator for ParDrain<'_, T, A> { + type Item = T; + + #[cfg_attr(feature = "inline-more", inline)] + fn drive_unindexed<C>(self, consumer: C) -> C::Result + where + C: UnindexedConsumer<Self::Item>, + { + self.inner.drive_unindexed(consumer) + } +} + +impl<T: fmt::Debug, A: Allocator> fmt::Debug for ParDrain<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + ParIter { + inner: unsafe { self.inner.par_iter() }, + marker: PhantomData, + } + .fmt(f) + } +} + +impl<T: Send, A: Allocator> HashTable<T, A> { + /// Consumes (potentially in parallel) all values in an arbitrary order, + /// while preserving the map's allocated memory for reuse. + #[cfg_attr(feature = "inline-more", inline)] + pub fn par_drain(&mut self) -> ParDrain<'_, T, A> { + ParDrain { + inner: self.raw.par_drain(), + } + } +} + +impl<T: Send, A: Allocator + Send> IntoParallelIterator for HashTable<T, A> { + type Item = T; + type Iter = IntoParIter<T, A>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + IntoParIter { + inner: self.raw.into_par_iter(), + } + } +} + +impl<'a, T: Sync, A: Allocator> IntoParallelIterator for &'a HashTable<T, A> { + type Item = &'a T; + type Iter = ParIter<'a, T>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIter { + inner: unsafe { self.raw.par_iter() }, + marker: PhantomData, + } + } +} + +impl<'a, T: Send, A: Allocator> IntoParallelIterator for &'a mut HashTable<T, A> { + type Item = &'a mut T; + type Iter = ParIterMut<'a, T>; + + #[cfg_attr(feature = "inline-more", inline)] + fn into_par_iter(self) -> Self::Iter { + ParIterMut { + inner: unsafe { self.raw.par_iter() }, + marker: PhantomData, + } + } +} + +#[cfg(test)] +mod test_par_table { + use alloc::vec::Vec; + use core::sync::atomic::{AtomicUsize, Ordering}; + + use rayon::prelude::*; + + use crate::{ + hash_map::{make_hash, DefaultHashBuilder}, + hash_table::HashTable, + }; + + #[test] + fn test_iterate() { + let hasher = DefaultHashBuilder::default(); + let mut a = HashTable::new(); + for i in 0..32 { + a.insert_unique(make_hash(&hasher, &i), i, |x| make_hash(&hasher, x)); + } + let observed = AtomicUsize::new(0); + a.par_iter().for_each(|k| { + observed.fetch_or(1 << *k, Ordering::Relaxed); + }); + assert_eq!(observed.into_inner(), 0xFFFF_FFFF); + } + + #[test] + fn test_move_iter() { + let hasher = DefaultHashBuilder::default(); + let hs = { + let mut hs = HashTable::new(); + + hs.insert_unique(make_hash(&hasher, &'a'), 'a', |x| make_hash(&hasher, x)); + hs.insert_unique(make_hash(&hasher, &'b'), 'b', |x| make_hash(&hasher, x)); + + hs + }; + + let v = hs.into_par_iter().collect::<Vec<char>>(); + assert!(v == ['a', 'b'] || v == ['b', 'a']); + } +} diff --git a/vendor/hashbrown/src/external_trait_impls/serde.rs b/vendor/hashbrown/src/external_trait_impls/serde.rs index 4d62deeb7..0a76dbec2 100644 --- a/vendor/hashbrown/src/external_trait_impls/serde.rs +++ b/vendor/hashbrown/src/external_trait_impls/serde.rs @@ -11,6 +11,7 @@ mod size_hint { } mod map { + use crate::raw::Allocator; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::marker::PhantomData; @@ -21,11 +22,12 @@ mod map { use super::size_hint; - impl<K, V, H> Serialize for HashMap<K, V, H> + impl<K, V, H, A> Serialize for HashMap<K, V, H, A> where K: Serialize + Eq + Hash, V: Serialize, H: BuildHasher, + A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> @@ -36,40 +38,46 @@ mod map { } } - impl<'de, K, V, S> Deserialize<'de> for HashMap<K, V, S> + impl<'de, K, V, S, A> Deserialize<'de> for HashMap<K, V, S, A> where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: BuildHasher + Default, + A: Allocator + Default, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { - struct MapVisitor<K, V, S> { - marker: PhantomData<HashMap<K, V, S>>, + struct MapVisitor<K, V, S, A> + where + A: Allocator, + { + marker: PhantomData<HashMap<K, V, S, A>>, } - impl<'de, K, V, S> Visitor<'de> for MapVisitor<K, V, S> + impl<'de, K, V, S, A> Visitor<'de> for MapVisitor<K, V, S, A> where K: Deserialize<'de> + Eq + Hash, V: Deserialize<'de>, S: BuildHasher + Default, + A: Allocator + Default, { - type Value = HashMap<K, V, S>; + type Value = HashMap<K, V, S, A>; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a map") } #[cfg_attr(feature = "inline-more", inline)] - fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> + fn visit_map<M>(self, mut map: M) -> Result<Self::Value, M::Error> where - A: MapAccess<'de>, + M: MapAccess<'de>, { - let mut values = HashMap::with_capacity_and_hasher( + let mut values = HashMap::with_capacity_and_hasher_in( size_hint::cautious(map.size_hint()), S::default(), + A::default(), ); while let Some((key, value)) = map.next_entry()? { @@ -89,6 +97,7 @@ mod map { } mod set { + use crate::raw::Allocator; use core::fmt; use core::hash::{BuildHasher, Hash}; use core::marker::PhantomData; @@ -99,10 +108,11 @@ mod set { use super::size_hint; - impl<T, H> Serialize for HashSet<T, H> + impl<T, H, A> Serialize for HashSet<T, H, A> where T: Serialize + Eq + Hash, H: BuildHasher, + A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error> @@ -113,38 +123,44 @@ mod set { } } - impl<'de, T, S> Deserialize<'de> for HashSet<T, S> + impl<'de, T, S, A> Deserialize<'de> for HashSet<T, S, A> where T: Deserialize<'de> + Eq + Hash, S: BuildHasher + Default, + A: Allocator + Default, { fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> where D: Deserializer<'de>, { - struct SeqVisitor<T, S> { - marker: PhantomData<HashSet<T, S>>, + struct SeqVisitor<T, S, A> + where + A: Allocator, + { + marker: PhantomData<HashSet<T, S, A>>, } - impl<'de, T, S> Visitor<'de> for SeqVisitor<T, S> + impl<'de, T, S, A> Visitor<'de> for SeqVisitor<T, S, A> where T: Deserialize<'de> + Eq + Hash, S: BuildHasher + Default, + A: Allocator + Default, { - type Value = HashSet<T, S>; + type Value = HashSet<T, S, A>; fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result { formatter.write_str("a sequence") } #[cfg_attr(feature = "inline-more", inline)] - fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> + fn visit_seq<M>(self, mut seq: M) -> Result<Self::Value, M::Error> where - A: SeqAccess<'de>, + M: SeqAccess<'de>, { - let mut values = HashSet::with_capacity_and_hasher( + let mut values = HashSet::with_capacity_and_hasher_in( size_hint::cautious(seq.size_hint()), S::default(), + A::default(), ); while let Some(value) = seq.next_element()? { @@ -166,12 +182,15 @@ mod set { where D: Deserializer<'de>, { - struct SeqInPlaceVisitor<'a, T, S>(&'a mut HashSet<T, S>); + struct SeqInPlaceVisitor<'a, T, S, A>(&'a mut HashSet<T, S, A>) + where + A: Allocator; - impl<'a, 'de, T, S> Visitor<'de> for SeqInPlaceVisitor<'a, T, S> + impl<'a, 'de, T, S, A> Visitor<'de> for SeqInPlaceVisitor<'a, T, S, A> where T: Deserialize<'de> + Eq + Hash, S: BuildHasher + Default, + A: Allocator, { type Value = (); @@ -180,9 +199,9 @@ mod set { } #[cfg_attr(feature = "inline-more", inline)] - fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> + fn visit_seq<M>(self, mut seq: M) -> Result<Self::Value, M::Error> where - A: SeqAccess<'de>, + M: SeqAccess<'de>, { self.0.clear(); self.0.reserve(size_hint::cautious(seq.size_hint())); diff --git a/vendor/hashbrown/src/lib.rs b/vendor/hashbrown/src/lib.rs index 013a9ddd9..6e9592abe 100644 --- a/vendor/hashbrown/src/lib.rs +++ b/vendor/hashbrown/src/lib.rs @@ -81,6 +81,7 @@ mod map; mod rustc_entry; mod scopeguard; mod set; +mod table; pub mod hash_map { //! A hash map implemented with quadratic probing and SIMD lookup. @@ -113,10 +114,30 @@ pub mod hash_set { pub use crate::external_trait_impls::rayon::set::*; } } +pub mod hash_table { + //! A hash table implemented with quadratic probing and SIMD lookup. + pub use crate::table::*; + + #[cfg(feature = "rayon")] + /// [rayon]-based parallel iterator types for hash tables. + /// You will rarely need to interact with it directly unless you have need + /// to name one of the iterator types. + /// + /// [rayon]: https://docs.rs/rayon/1.0/rayon + pub mod rayon { + pub use crate::external_trait_impls::rayon::table::*; + } +} pub use crate::map::HashMap; pub use crate::set::HashSet; +pub use crate::table::HashTable; + +#[cfg(feature = "equivalent")] +pub use equivalent::Equivalent; +// This is only used as a fallback when building as part of `std`. +#[cfg(not(feature = "equivalent"))] /// Key equivalence trait. /// /// This trait defines the function used to compare the input value with the @@ -140,6 +161,7 @@ pub trait Equivalent<K: ?Sized> { fn equivalent(&self, key: &K) -> bool; } +#[cfg(not(feature = "equivalent"))] impl<Q: ?Sized, K: ?Sized> Equivalent<K> for Q where Q: Eq, diff --git a/vendor/hashbrown/src/map.rs b/vendor/hashbrown/src/map.rs index 548ca0f9e..b5e657bc6 100644 --- a/vendor/hashbrown/src/map.rs +++ b/vendor/hashbrown/src/map.rs @@ -1,4 +1,6 @@ -use crate::raw::{Allocator, Bucket, Global, RawDrain, RawIntoIter, RawIter, RawTable}; +use crate::raw::{ + Allocator, Bucket, Global, RawDrain, RawExtractIf, RawIntoIter, RawIter, RawTable, +}; use crate::{Equivalent, TryReserveError}; use core::borrow::Borrow; use core::fmt::{self, Debug}; @@ -185,7 +187,7 @@ pub enum DefaultHashBuilder {} /// .iter().cloned().collect(); /// // use the values stored in map /// ``` -pub struct HashMap<K, V, S = DefaultHashBuilder, A: Allocator + Clone = Global> { +pub struct HashMap<K, V, S = DefaultHashBuilder, A: Allocator = Global> { pub(crate) hash_builder: S, pub(crate) table: RawTable<(K, V), A>, } @@ -324,7 +326,7 @@ impl<K, V> HashMap<K, V, DefaultHashBuilder> { } #[cfg(feature = "ahash")] -impl<K, V, A: Allocator + Clone> HashMap<K, V, DefaultHashBuilder, A> { +impl<K, V, A: Allocator> HashMap<K, V, DefaultHashBuilder, A> { /// Creates an empty `HashMap` using the given allocator. /// /// The hash map is initially created with a capacity of 0, so it will not allocate until it @@ -505,7 +507,7 @@ impl<K, V, S> HashMap<K, V, S> { } } -impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> { +impl<K, V, S, A: Allocator> HashMap<K, V, S, A> { /// Returns a reference to the underlying allocator. #[inline] pub fn allocator(&self) -> &A { @@ -944,6 +946,8 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> { /// /// Keeps the allocated memory for reuse. /// + /// [`retain()`]: HashMap::retain + /// /// # Examples /// /// ``` @@ -977,7 +981,7 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> { { ExtractIf { f, - inner: ExtractIfInner { + inner: RawExtractIf { iter: unsafe { self.table.iter() }, table: &mut self.table, }, @@ -1069,7 +1073,7 @@ impl<K, V, S, A> HashMap<K, V, S, A> where K: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Reserves capacity for at least `additional` more elements to be inserted /// in the `HashMap`. The collection may reserve more space to avoid @@ -1936,7 +1940,7 @@ where } } -impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> { +impl<K, V, S, A: Allocator> HashMap<K, V, S, A> { /// Creates a raw entry builder for the HashMap. /// /// Raw entries provide the lowest level of control for searching and @@ -2167,7 +2171,7 @@ where K: Eq + Hash, V: PartialEq, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn eq(&self, other: &Self) -> bool { if self.len() != other.len() { @@ -2184,7 +2188,7 @@ where K: Eq + Hash, V: Eq, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } @@ -2192,7 +2196,7 @@ impl<K, V, S, A> Debug for HashMap<K, V, S, A> where K: Debug, V: Debug, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() @@ -2202,7 +2206,7 @@ where impl<K, V, S, A> Default for HashMap<K, V, S, A> where S: Default, - A: Default + Allocator + Clone, + A: Default + Allocator, { /// Creates an empty `HashMap<K, V, S, A>`, with the `Default` value for the hasher and allocator. /// @@ -2230,7 +2234,7 @@ where K: Eq + Hash, Q: Hash + Equivalent<K>, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Output = V; @@ -2261,7 +2265,7 @@ where impl<K, V, A, const N: usize> From<[(K, V); N]> for HashMap<K, V, DefaultHashBuilder, A> where K: Eq + Hash, - A: Default + Allocator + Clone, + A: Default + Allocator, { /// # Examples /// @@ -2406,11 +2410,11 @@ impl<K, V> IterMut<'_, K, V> { /// assert_eq!(iter.next(), None); /// assert_eq!(iter.next(), None); /// ``` -pub struct IntoIter<K, V, A: Allocator + Clone = Global> { +pub struct IntoIter<K, V, A: Allocator = Global> { inner: RawIntoIter<(K, V), A>, } -impl<K, V, A: Allocator + Clone> IntoIter<K, V, A> { +impl<K, V, A: Allocator> IntoIter<K, V, A> { /// Returns a iterator of references over the remaining items. #[cfg_attr(feature = "inline-more", inline)] pub(super) fn iter(&self) -> Iter<'_, K, V> { @@ -2450,11 +2454,11 @@ impl<K, V, A: Allocator + Clone> IntoIter<K, V, A> { /// assert_eq!(keys.next(), None); /// assert_eq!(keys.next(), None); /// ``` -pub struct IntoKeys<K, V, A: Allocator + Clone = Global> { +pub struct IntoKeys<K, V, A: Allocator = Global> { inner: IntoIter<K, V, A>, } -impl<K, V, A: Allocator + Clone> Iterator for IntoKeys<K, V, A> { +impl<K, V, A: Allocator> Iterator for IntoKeys<K, V, A> { type Item = K; #[inline] @@ -2467,16 +2471,16 @@ impl<K, V, A: Allocator + Clone> Iterator for IntoKeys<K, V, A> { } } -impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoKeys<K, V, A> { +impl<K, V, A: Allocator> ExactSizeIterator for IntoKeys<K, V, A> { #[inline] fn len(&self) -> usize { self.inner.len() } } -impl<K, V, A: Allocator + Clone> FusedIterator for IntoKeys<K, V, A> {} +impl<K, V, A: Allocator> FusedIterator for IntoKeys<K, V, A> {} -impl<K: Debug, V: Debug, A: Allocator + Clone> fmt::Debug for IntoKeys<K, V, A> { +impl<K: Debug, V: Debug, A: Allocator> fmt::Debug for IntoKeys<K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() .entries(self.inner.iter().map(|(k, _)| k)) @@ -2512,11 +2516,11 @@ impl<K: Debug, V: Debug, A: Allocator + Clone> fmt::Debug for IntoKeys<K, V, A> /// assert_eq!(values.next(), None); /// assert_eq!(values.next(), None); /// ``` -pub struct IntoValues<K, V, A: Allocator + Clone = Global> { +pub struct IntoValues<K, V, A: Allocator = Global> { inner: IntoIter<K, V, A>, } -impl<K, V, A: Allocator + Clone> Iterator for IntoValues<K, V, A> { +impl<K, V, A: Allocator> Iterator for IntoValues<K, V, A> { type Item = V; #[inline] @@ -2529,16 +2533,16 @@ impl<K, V, A: Allocator + Clone> Iterator for IntoValues<K, V, A> { } } -impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoValues<K, V, A> { +impl<K, V, A: Allocator> ExactSizeIterator for IntoValues<K, V, A> { #[inline] fn len(&self) -> usize { self.inner.len() } } -impl<K, V, A: Allocator + Clone> FusedIterator for IntoValues<K, V, A> {} +impl<K, V, A: Allocator> FusedIterator for IntoValues<K, V, A> {} -impl<K, V: Debug, A: Allocator + Clone> fmt::Debug for IntoValues<K, V, A> { +impl<K, V: Debug, A: Allocator> fmt::Debug for IntoValues<K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list() .entries(self.inner.iter().map(|(_, v)| v)) @@ -2670,11 +2674,11 @@ impl<K, V: Debug> fmt::Debug for Values<'_, K, V> { /// assert_eq!(drain_iter.next(), None); /// assert_eq!(drain_iter.next(), None); /// ``` -pub struct Drain<'a, K, V, A: Allocator + Clone = Global> { +pub struct Drain<'a, K, V, A: Allocator = Global> { inner: RawDrain<'a, (K, V), A>, } -impl<K, V, A: Allocator + Clone> Drain<'_, K, V, A> { +impl<K, V, A: Allocator> Drain<'_, K, V, A> { /// Returns a iterator of references over the remaining items. #[cfg_attr(feature = "inline-more", inline)] pub(super) fn iter(&self) -> Iter<'_, K, V> { @@ -2717,24 +2721,24 @@ impl<K, V, A: Allocator + Clone> Drain<'_, K, V, A> { /// assert_eq!(map.len(), 1); /// ``` #[must_use = "Iterators are lazy unless consumed"] -pub struct ExtractIf<'a, K, V, F, A: Allocator + Clone = Global> +pub struct ExtractIf<'a, K, V, F, A: Allocator = Global> where F: FnMut(&K, &mut V) -> bool, { f: F, - inner: ExtractIfInner<'a, K, V, A>, + inner: RawExtractIf<'a, (K, V), A>, } impl<K, V, F, A> Iterator for ExtractIf<'_, K, V, F, A> where F: FnMut(&K, &mut V) -> bool, - A: Allocator + Clone, + A: Allocator, { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] fn next(&mut self) -> Option<Self::Item> { - self.inner.next(&mut self.f) + self.inner.next(|&mut (ref k, ref mut v)| (self.f)(k, v)) } #[inline] @@ -2745,30 +2749,6 @@ where impl<K, V, F> FusedIterator for ExtractIf<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {} -/// Portions of `ExtractIf` shared with `set::ExtractIf` -pub(super) struct ExtractIfInner<'a, K, V, A: Allocator + Clone> { - pub iter: RawIter<(K, V)>, - pub table: &'a mut RawTable<(K, V), A>, -} - -impl<K, V, A: Allocator + Clone> ExtractIfInner<'_, K, V, A> { - #[cfg_attr(feature = "inline-more", inline)] - pub(super) fn next<F>(&mut self, f: &mut F) -> Option<(K, V)> - where - F: FnMut(&K, &mut V) -> bool, - { - unsafe { - for item in &mut self.iter { - let &mut (ref key, ref mut value) = item.as_mut(); - if f(key, value) { - return Some(self.table.remove(item).0); - } - } - } - None - } -} - /// A mutable iterator over the values of a `HashMap` in arbitrary order. /// The iterator element type is `&'a mut V`. /// @@ -2855,7 +2835,7 @@ pub struct ValuesMut<'a, K, V> { /// /// assert_eq!(map.len(), 6); /// ``` -pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator = Global> { map: &'a mut HashMap<K, V, S, A>, } @@ -2943,7 +2923,7 @@ pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator + Clone = Global> { /// vec.sort_unstable(); /// assert_eq!(vec, [('a', 10), ('b', 20), ('c', 30), ('d', 40), ('e', 50), ('f', 60)]); /// ``` -pub enum RawEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { +pub enum RawEntryMut<'a, K, V, S, A: Allocator = Global> { /// An occupied entry. /// /// # Examples @@ -3034,7 +3014,7 @@ pub enum RawEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { /// assert_eq!(map.get(&"b"), None); /// assert_eq!(map.len(), 1); /// ``` -pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator = Global> { elem: Bucket<(K, V)>, table: &'a mut RawTable<(K, V), A>, hash_builder: &'a S, @@ -3045,7 +3025,7 @@ where K: Send, V: Send, S: Send, - A: Send + Allocator + Clone, + A: Send + Allocator, { } unsafe impl<K, V, S, A> Sync for RawOccupiedEntryMut<'_, K, V, S, A> @@ -3053,7 +3033,7 @@ where K: Sync, V: Sync, S: Sync, - A: Sync + Allocator + Clone, + A: Sync + Allocator, { } @@ -3105,7 +3085,7 @@ where /// } /// assert!(map[&"c"] == 30 && map.len() == 3); /// ``` -pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator = Global> { table: &'a mut RawTable<(K, V), A>, hash_builder: &'a S, } @@ -3144,11 +3124,11 @@ pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator + Clone = Global> { /// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv); /// } /// ``` -pub struct RawEntryBuilder<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct RawEntryBuilder<'a, K, V, S, A: Allocator = Global> { map: &'a HashMap<K, V, S, A>, } -impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { /// Creates a `RawEntryMut` from the given key. /// /// # Examples @@ -3205,7 +3185,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> { /// Creates a `RawEntryMut` from the given hash and matching function. /// /// # Examples @@ -3256,7 +3236,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawEntryBuilder<'a, K, V, S, A> { /// Access an immutable entry by key. /// /// # Examples @@ -3349,7 +3329,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawEntryMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawEntryMut<'a, K, V, S, A> { /// Sets the value of the entry, and returns a RawOccupiedEntryMut. /// /// # Examples @@ -3543,7 +3523,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryMut<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawOccupiedEntryMut<'a, K, V, S, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -3942,7 +3922,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> RawVacantEntryMut<'a, K, V, S, A> { /// Sets the value of the entry with the VacantEntry's key, /// and returns a mutable reference to it. /// @@ -4088,13 +4068,13 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> { } } -impl<K, V, S, A: Allocator + Clone> Debug for RawEntryBuilderMut<'_, K, V, S, A> { +impl<K, V, S, A: Allocator> Debug for RawEntryBuilderMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawEntryBuilder").finish() } } -impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for RawEntryMut<'_, K, V, S, A> { +impl<K: Debug, V: Debug, S, A: Allocator> Debug for RawEntryMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { RawEntryMut::Vacant(ref v) => f.debug_tuple("RawEntry").field(v).finish(), @@ -4103,7 +4083,7 @@ impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for RawEntryMut<'_, K, V } } -impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for RawOccupiedEntryMut<'_, K, V, S, A> { +impl<K: Debug, V: Debug, S, A: Allocator> Debug for RawOccupiedEntryMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawOccupiedEntryMut") .field("key", self.key()) @@ -4112,13 +4092,13 @@ impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for RawOccupiedEntryMut< } } -impl<K, V, S, A: Allocator + Clone> Debug for RawVacantEntryMut<'_, K, V, S, A> { +impl<K, V, S, A: Allocator> Debug for RawVacantEntryMut<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawVacantEntryMut").finish() } } -impl<K, V, S, A: Allocator + Clone> Debug for RawEntryBuilder<'_, K, V, S, A> { +impl<K, V, S, A: Allocator> Debug for RawEntryBuilder<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RawEntryBuilder").finish() } @@ -4169,7 +4149,7 @@ impl<K, V, S, A: Allocator + Clone> Debug for RawEntryBuilder<'_, K, V, S, A> { /// ``` pub enum Entry<'a, K, V, S, A = Global> where - A: Allocator + Clone, + A: Allocator, { /// An occupied entry. /// @@ -4202,7 +4182,7 @@ where Vacant(VacantEntry<'a, K, V, S, A>), } -impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for Entry<'_, K, V, S, A> { +impl<K: Debug, V: Debug, S, A: Allocator> Debug for Entry<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), @@ -4251,7 +4231,7 @@ impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for Entry<'_, K, V, S, A /// assert_eq!(map.get(&"c"), None); /// assert_eq!(map.len(), 2); /// ``` -pub struct OccupiedEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator + Clone = Global> { +pub struct OccupiedEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator = Global> { hash: u64, key: Option<K>, elem: Bucket<(K, V)>, @@ -4263,7 +4243,7 @@ where K: Send, V: Send, S: Send, - A: Send + Allocator + Clone, + A: Send + Allocator, { } unsafe impl<K, V, S, A> Sync for OccupiedEntry<'_, K, V, S, A> @@ -4271,11 +4251,11 @@ where K: Sync, V: Sync, S: Sync, - A: Sync + Allocator + Clone, + A: Sync + Allocator, { } -impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for OccupiedEntry<'_, K, V, S, A> { +impl<K: Debug, V: Debug, S, A: Allocator> Debug for OccupiedEntry<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("key", self.key()) @@ -4314,13 +4294,13 @@ impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for OccupiedEntry<'_, K, /// } /// assert!(map[&"b"] == 20 && map.len() == 2); /// ``` -pub struct VacantEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator + Clone = Global> { +pub struct VacantEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator = Global> { hash: u64, key: K, table: &'a mut HashMap<K, V, S, A>, } -impl<K: Debug, V, S, A: Allocator + Clone> Debug for VacantEntry<'_, K, V, S, A> { +impl<K: Debug, V, S, A: Allocator> Debug for VacantEntry<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.key()).finish() } @@ -4380,7 +4360,7 @@ impl<K: Debug, V, S, A: Allocator + Clone> Debug for VacantEntry<'_, K, V, S, A> /// ``` pub enum EntryRef<'a, 'b, K, Q: ?Sized, V, S, A = Global> where - A: Allocator + Clone, + A: Allocator, { /// An occupied entry. /// @@ -4413,7 +4393,7 @@ where Vacant(VacantEntryRef<'a, 'b, K, Q, V, S, A>), } -impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug +impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator> Debug for EntryRef<'_, '_, K, Q, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -4491,7 +4471,7 @@ impl<'a, K: Borrow<Q>, Q: ?Sized> AsRef<Q> for KeyOrRef<'a, K, Q> { /// assert_eq!(map.get("c"), None); /// assert_eq!(map.len(), 2); /// ``` -pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> { +pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator = Global> { hash: u64, key: Option<KeyOrRef<'b, K, Q>>, elem: Bucket<(K, V)>, @@ -4504,7 +4484,7 @@ where Q: Sync + ?Sized, V: Send, S: Send, - A: Send + Allocator + Clone, + A: Send + Allocator, { } unsafe impl<'a, 'b, K, Q, V, S, A> Sync for OccupiedEntryRef<'a, 'b, K, Q, V, S, A> @@ -4513,11 +4493,11 @@ where Q: Sync + ?Sized, V: Sync, S: Sync, - A: Sync + Allocator + Clone, + A: Sync + Allocator, { } -impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug +impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator> Debug for OccupiedEntryRef<'_, '_, K, Q, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -4558,13 +4538,13 @@ impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug /// } /// assert!(map["b"] == 20 && map.len() == 2); /// ``` -pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> { +pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator = Global> { hash: u64, key: KeyOrRef<'b, K, Q>, table: &'a mut HashMap<K, V, S, A>, } -impl<K: Borrow<Q>, Q: ?Sized + Debug, V, S, A: Allocator + Clone> Debug +impl<K: Borrow<Q>, Q: ?Sized + Debug, V, S, A: Allocator> Debug for VacantEntryRef<'_, '_, K, Q, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { @@ -4596,14 +4576,14 @@ impl<K: Borrow<Q>, Q: ?Sized + Debug, V, S, A: Allocator + Clone> Debug /// } /// assert_eq!(map[&"a"], 100); /// ``` -pub struct OccupiedError<'a, K, V, S, A: Allocator + Clone = Global> { +pub struct OccupiedError<'a, K, V, S, A: Allocator = Global> { /// The entry in the map that was already occupied. pub entry: OccupiedEntry<'a, K, V, S, A>, /// The value which was not inserted, because the entry was already occupied. pub value: V, } -impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for OccupiedError<'_, K, V, S, A> { +impl<K: Debug, V: Debug, S, A: Allocator> Debug for OccupiedError<'_, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedError") .field("key", self.entry.key()) @@ -4613,9 +4593,7 @@ impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for OccupiedError<'_, K, } } -impl<'a, K: Debug, V: Debug, S, A: Allocator + Clone> fmt::Display - for OccupiedError<'a, K, V, S, A> -{ +impl<'a, K: Debug, V: Debug, S, A: Allocator> fmt::Display for OccupiedError<'a, K, V, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!( f, @@ -4627,7 +4605,7 @@ impl<'a, K: Debug, V: Debug, S, A: Allocator + Clone> fmt::Display } } -impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a HashMap<K, V, S, A> { +impl<'a, K, V, S, A: Allocator> IntoIterator for &'a HashMap<K, V, S, A> { type Item = (&'a K, &'a V); type IntoIter = Iter<'a, K, V>; @@ -4659,7 +4637,7 @@ impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a HashMap<K, V, S, A> } } -impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a mut HashMap<K, V, S, A> { +impl<'a, K, V, S, A: Allocator> IntoIterator for &'a mut HashMap<K, V, S, A> { type Item = (&'a K, &'a mut V); type IntoIter = IterMut<'a, K, V>; @@ -4696,7 +4674,7 @@ impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a mut HashMap<K, V, S } } -impl<K, V, S, A: Allocator + Clone> IntoIterator for HashMap<K, V, S, A> { +impl<K, V, S, A: Allocator> IntoIterator for HashMap<K, V, S, A> { type Item = (K, V); type IntoIter = IntoIter<K, V, A>; @@ -4791,7 +4769,7 @@ where } } -impl<K, V, A: Allocator + Clone> Iterator for IntoIter<K, V, A> { +impl<K, V, A: Allocator> Iterator for IntoIter<K, V, A> { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -4803,15 +4781,15 @@ impl<K, V, A: Allocator + Clone> Iterator for IntoIter<K, V, A> { self.inner.size_hint() } } -impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoIter<K, V, A> { +impl<K, V, A: Allocator> ExactSizeIterator for IntoIter<K, V, A> { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.inner.len() } } -impl<K, V, A: Allocator + Clone> FusedIterator for IntoIter<K, V, A> {} +impl<K, V, A: Allocator> FusedIterator for IntoIter<K, V, A> {} -impl<K: Debug, V: Debug, A: Allocator + Clone> fmt::Debug for IntoIter<K, V, A> { +impl<K: Debug, V: Debug, A: Allocator> fmt::Debug for IntoIter<K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } @@ -4897,7 +4875,7 @@ impl<K, V: Debug> fmt::Debug for ValuesMut<'_, K, V> { } } -impl<'a, K, V, A: Allocator + Clone> Iterator for Drain<'a, K, V, A> { +impl<'a, K, V, A: Allocator> Iterator for Drain<'a, K, V, A> { type Item = (K, V); #[cfg_attr(feature = "inline-more", inline)] @@ -4909,26 +4887,26 @@ impl<'a, K, V, A: Allocator + Clone> Iterator for Drain<'a, K, V, A> { self.inner.size_hint() } } -impl<K, V, A: Allocator + Clone> ExactSizeIterator for Drain<'_, K, V, A> { +impl<K, V, A: Allocator> ExactSizeIterator for Drain<'_, K, V, A> { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.inner.len() } } -impl<K, V, A: Allocator + Clone> FusedIterator for Drain<'_, K, V, A> {} +impl<K, V, A: Allocator> FusedIterator for Drain<'_, K, V, A> {} impl<K, V, A> fmt::Debug for Drain<'_, K, V, A> where K: fmt::Debug, V: fmt::Debug, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.iter()).finish() } } -impl<'a, K, V, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> Entry<'a, K, V, S, A> { /// Sets the value of the entry, and returns an OccupiedEntry. /// /// # Examples @@ -5175,7 +5153,7 @@ impl<'a, K, V, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { } } -impl<'a, K, V: Default, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { +impl<'a, K, V: Default, S, A: Allocator> Entry<'a, K, V, S, A> { /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// @@ -5208,7 +5186,7 @@ impl<'a, K, V: Default, S, A: Allocator + Clone> Entry<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> OccupiedEntry<'a, K, V, S, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -5563,7 +5541,7 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> { } } -impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> { +impl<'a, K, V, S, A: Allocator> VacantEntry<'a, K, V, S, A> { /// Gets a reference to the key that would be used when inserting a value /// through the `VacantEntry`. /// @@ -5650,7 +5628,7 @@ impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> { } } -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> EntryRef<'a, 'b, K, Q, V, S, A> { /// Sets the value of the entry, and returns an OccupiedEntryRef. /// /// # Examples @@ -5897,7 +5875,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, } } -impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator> EntryRef<'a, 'b, K, Q, V, S, A> { /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// @@ -5930,7 +5908,7 @@ impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator + Clone> EntryRef<'a, 'b, } } -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -6282,7 +6260,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, } } -impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K, Q, V, S, A> { +impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> VacantEntryRef<'a, 'b, K, Q, V, S, A> { /// Gets a reference to the key that would be used when inserting a value /// through the `VacantEntryRef`. /// @@ -6382,7 +6360,7 @@ impl<K, V, S, A> FromIterator<(K, V)> for HashMap<K, V, S, A> where K: Eq + Hash, S: BuildHasher + Default, - A: Default + Allocator + Clone, + A: Default + Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self { @@ -6402,7 +6380,7 @@ impl<K, V, S, A> Extend<(K, V)> for HashMap<K, V, S, A> where K: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Inserts all new key-values from the iterator to existing `HashMap<K, V, S, A>`. /// Replace values with existing keys with new values returned from the iterator. @@ -6486,7 +6464,7 @@ where K: Eq + Hash + Copy, V: Copy, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Inserts all new key-values from the iterator to existing `HashMap<K, V, S, A>`. /// Replace values with existing keys with new values returned from the iterator. @@ -6551,7 +6529,7 @@ where K: Eq + Hash + Copy, V: Copy, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Inserts all new key-values from the iterator to existing `HashMap<K, V, S, A>`. /// Replace values with existing keys with new values returned from the iterator. @@ -6618,12 +6596,12 @@ fn assert_covariance() { fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> { v } - fn into_iter_key<'new, A: Allocator + Clone>( + fn into_iter_key<'new, A: Allocator>( v: IntoIter<&'static str, u8, A>, ) -> IntoIter<&'new str, u8, A> { v } - fn into_iter_val<'new, A: Allocator + Clone>( + fn into_iter_val<'new, A: Allocator>( v: IntoIter<u8, &'static str, A>, ) -> IntoIter<u8, &'new str, A> { v @@ -6653,6 +6631,12 @@ mod test_map { use super::Entry::{Occupied, Vacant}; use super::EntryRef; use super::{HashMap, RawEntryMut}; + use alloc::string::{String, ToString}; + use alloc::sync::Arc; + use allocator_api2::alloc::{AllocError, Allocator, Global}; + use core::alloc::Layout; + use core::ptr::NonNull; + use core::sync::atomic::{AtomicI8, Ordering}; use rand::{rngs::SmallRng, Rng, SeedableRng}; use std::borrow::ToOwned; use std::cell::RefCell; @@ -8503,4 +8487,396 @@ mod test_map { ); let _map2 = map1.clone(); } + + struct MyAllocInner { + drop_count: Arc<AtomicI8>, + } + + #[derive(Clone)] + struct MyAlloc { + _inner: Arc<MyAllocInner>, + } + + impl MyAlloc { + fn new(drop_count: Arc<AtomicI8>) -> Self { + MyAlloc { + _inner: Arc::new(MyAllocInner { drop_count }), + } + } + } + + impl Drop for MyAllocInner { + fn drop(&mut self) { + println!("MyAlloc freed."); + self.drop_count.fetch_sub(1, Ordering::SeqCst); + } + } + + unsafe impl Allocator for MyAlloc { + fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> { + let g = Global; + g.allocate(layout) + } + + unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) { + let g = Global; + g.deallocate(ptr, layout) + } + } + + #[test] + fn test_hashmap_into_iter_bug() { + let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(1)); + + { + let mut map = HashMap::with_capacity_in(10, MyAlloc::new(dropped.clone())); + for i in 0..10 { + map.entry(i).or_insert_with(|| "i".to_string()); + } + + for (k, v) in map { + println!("{}, {}", k, v); + } + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 0); + } + + #[derive(Debug)] + struct CheckedCloneDrop<T> { + panic_in_clone: bool, + panic_in_drop: bool, + dropped: bool, + data: T, + } + + impl<T> CheckedCloneDrop<T> { + fn new(panic_in_clone: bool, panic_in_drop: bool, data: T) -> Self { + CheckedCloneDrop { + panic_in_clone, + panic_in_drop, + dropped: false, + data, + } + } + } + + impl<T: Clone> Clone for CheckedCloneDrop<T> { + fn clone(&self) -> Self { + if self.panic_in_clone { + panic!("panic in clone") + } + Self { + panic_in_clone: self.panic_in_clone, + panic_in_drop: self.panic_in_drop, + dropped: self.dropped, + data: self.data.clone(), + } + } + } + + impl<T> Drop for CheckedCloneDrop<T> { + fn drop(&mut self) { + if self.panic_in_drop { + self.dropped = true; + panic!("panic in drop"); + } + if self.dropped { + panic!("double drop"); + } + self.dropped = true; + } + } + + /// Return hashmap with predefined distribution of elements. + /// All elements will be located in the same order as elements + /// returned by iterator. + /// + /// This function does not panic, but returns an error as a `String` + /// to distinguish between a test panic and an error in the input data. + fn get_test_map<I, T, A>( + iter: I, + mut fun: impl FnMut(u64) -> T, + alloc: A, + ) -> Result<HashMap<u64, CheckedCloneDrop<T>, DefaultHashBuilder, A>, String> + where + I: Iterator<Item = (bool, bool)> + Clone + ExactSizeIterator, + A: Allocator, + T: PartialEq + core::fmt::Debug, + { + use crate::scopeguard::guard; + + let mut map: HashMap<u64, CheckedCloneDrop<T>, _, A> = + HashMap::with_capacity_in(iter.size_hint().0, alloc); + { + let mut guard = guard(&mut map, |map| { + for (_, value) in map.iter_mut() { + value.panic_in_drop = false + } + }); + + let mut count = 0; + // Hash and Key must be equal to each other for controlling the elements placement. + for (panic_in_clone, panic_in_drop) in iter.clone() { + if core::mem::needs_drop::<T>() && panic_in_drop { + return Err(String::from( + "panic_in_drop can be set with a type that doesn't need to be dropped", + )); + } + guard.table.insert( + count, + ( + count, + CheckedCloneDrop::new(panic_in_clone, panic_in_drop, fun(count)), + ), + |(k, _)| *k, + ); + count += 1; + } + + // Let's check that all elements are located as we wanted + let mut check_count = 0; + for ((key, value), (panic_in_clone, panic_in_drop)) in guard.iter().zip(iter) { + if *key != check_count { + return Err(format!( + "key != check_count,\nkey: `{}`,\ncheck_count: `{}`", + key, check_count + )); + } + if value.dropped + || value.panic_in_clone != panic_in_clone + || value.panic_in_drop != panic_in_drop + || value.data != fun(check_count) + { + return Err(format!( + "Value is not equal to expected,\nvalue: `{:?}`,\nexpected: \ + `CheckedCloneDrop {{ panic_in_clone: {}, panic_in_drop: {}, dropped: {}, data: {:?} }}`", + value, panic_in_clone, panic_in_drop, false, fun(check_count) + )); + } + check_count += 1; + } + + if guard.len() != check_count as usize { + return Err(format!( + "map.len() != check_count,\nmap.len(): `{}`,\ncheck_count: `{}`", + guard.len(), + check_count + )); + } + + if count != check_count { + return Err(format!( + "count != check_count,\ncount: `{}`,\ncheck_count: `{}`", + count, check_count + )); + } + core::mem::forget(guard); + } + Ok(map) + } + + const DISARMED: bool = false; + const ARMED: bool = true; + + const ARMED_FLAGS: [bool; 8] = [ + DISARMED, DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED, + ]; + + const DISARMED_FLAGS: [bool; 8] = [ + DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, + ]; + + #[test] + #[should_panic = "panic in clone"] + fn test_clone_memory_leaks_and_double_drop_one() { + let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let map: HashMap<u64, CheckedCloneDrop<Vec<u64>>, DefaultHashBuilder, MyAlloc> = + match get_test_map( + ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| vec![n], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + // Clone should normally clone a few elements, and then (when the + // clone function panics), deallocate both its own memory, memory + // of `dropped: Arc<AtomicI8>` and the memory of already cloned + // elements (Vec<i32> memory inside CheckedCloneDrop). + let _map2 = map.clone(); + } + } + + #[test] + #[should_panic = "panic in drop"] + fn test_clone_memory_leaks_and_double_drop_two() { + let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let map: HashMap<u64, CheckedCloneDrop<u64>, DefaultHashBuilder, _> = match get_test_map( + DISARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| n, + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + let mut map2 = match get_test_map( + DISARMED_FLAGS.into_iter().zip(ARMED_FLAGS), + |n| n, + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + // The `clone_from` should try to drop the elements of `map2` without + // double drop and leaking the allocator. Elements that have not been + // dropped leak their memory. + map2.clone_from(&map); + } + } + + /// We check that we have a working table if the clone operation from another + /// thread ended in a panic (when buckets of maps are equal to each other). + #[test] + fn test_catch_panic_clone_from_when_len_is_equal() { + use std::thread; + + let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let mut map = match get_test_map( + DISARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| vec![n], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + thread::scope(|s| { + let result: thread::ScopedJoinHandle<'_, String> = s.spawn(|| { + let scope_map = + match get_test_map(ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), |n| vec![n * 2], MyAlloc::new(dropped.clone())) { + Ok(map) => map, + Err(msg) => return msg, + }; + if map.table.buckets() != scope_map.table.buckets() { + return format!( + "map.table.buckets() != scope_map.table.buckets(),\nleft: `{}`,\nright: `{}`", + map.table.buckets(), scope_map.table.buckets() + ); + } + map.clone_from(&scope_map); + "We must fail the cloning!!!".to_owned() + }); + if let Ok(msg) = result.join() { + panic!("{msg}") + } + }); + + // Let's check that all iterators work fine and do not return elements + // (especially `RawIterRange`, which does not depend on the number of + // elements in the table, but looks directly at the control bytes) + // + // SAFETY: We know for sure that `RawTable` will outlive + // the returned `RawIter / RawIterRange` iterator. + assert_eq!(map.len(), 0); + assert_eq!(map.iter().count(), 0); + assert_eq!(unsafe { map.table.iter().count() }, 0); + assert_eq!(unsafe { map.table.iter().iter.count() }, 0); + + for idx in 0..map.table.buckets() { + let idx = idx as u64; + assert!( + map.table.find(idx, |(k, _)| *k == idx).is_none(), + "Index: {idx}" + ); + } + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 0); + } + + /// We check that we have a working table if the clone operation from another + /// thread ended in a panic (when buckets of maps are not equal to each other). + #[test] + fn test_catch_panic_clone_from_when_len_is_not_equal() { + use std::thread; + + let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2)); + + { + assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len()); + + let mut map = match get_test_map( + [DISARMED].into_iter().zip([DISARMED]), + |n| vec![n], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => panic!("{msg}"), + }; + + thread::scope(|s| { + let result: thread::ScopedJoinHandle<'_, String> = s.spawn(|| { + let scope_map = match get_test_map( + ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), + |n| vec![n * 2], + MyAlloc::new(dropped.clone()), + ) { + Ok(map) => map, + Err(msg) => return msg, + }; + if map.table.buckets() == scope_map.table.buckets() { + return format!( + "map.table.buckets() == scope_map.table.buckets(): `{}`", + map.table.buckets() + ); + } + map.clone_from(&scope_map); + "We must fail the cloning!!!".to_owned() + }); + if let Ok(msg) = result.join() { + panic!("{msg}") + } + }); + + // Let's check that all iterators work fine and do not return elements + // (especially `RawIterRange`, which does not depend on the number of + // elements in the table, but looks directly at the control bytes) + // + // SAFETY: We know for sure that `RawTable` will outlive + // the returned `RawIter / RawIterRange` iterator. + assert_eq!(map.len(), 0); + assert_eq!(map.iter().count(), 0); + assert_eq!(unsafe { map.table.iter().count() }, 0); + assert_eq!(unsafe { map.table.iter().iter.count() }, 0); + + for idx in 0..map.table.buckets() { + let idx = idx as u64; + assert!( + map.table.find(idx, |(k, _)| *k == idx).is_none(), + "Index: {idx}" + ); + } + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 0); + } } diff --git a/vendor/hashbrown/src/raw/mod.rs b/vendor/hashbrown/src/raw/mod.rs index 1a6dced4b..25c5d1c4d 100644 --- a/vendor/hashbrown/src/raw/mod.rs +++ b/vendor/hashbrown/src/raw/mod.rs @@ -4,7 +4,6 @@ use crate::TryReserveError; use core::iter::FusedIterator; use core::marker::PhantomData; use core::mem; -use core::mem::ManuallyDrop; use core::mem::MaybeUninit; use core::ptr::NonNull; use core::{hint, ptr}; @@ -21,11 +20,18 @@ cfg_if! { if #[cfg(all( target_feature = "sse2", any(target_arch = "x86", target_arch = "x86_64"), - not(miri) + not(miri), ))] { mod sse2; use sse2 as imp; - } else if #[cfg(all(target_arch = "aarch64", target_feature = "neon"))] { + } else if #[cfg(all( + target_arch = "aarch64", + target_feature = "neon", + // NEON intrinsics are currently broken on big-endian targets. + // See https://github.com/rust-lang/stdarch/issues/1484. + target_endian = "little", + not(miri), + ))] { mod neon; use neon as imp; } else { @@ -93,6 +99,13 @@ impl Fallibility { } } +trait SizedTypeProperties: Sized { + const IS_ZERO_SIZED: bool = mem::size_of::<Self>() == 0; + const NEEDS_DROP: bool = mem::needs_drop::<Self>(); +} + +impl<T> SizedTypeProperties for T {} + /// Control byte value for an empty bucket. const EMPTY: u8 = 0b1111_1111; @@ -294,8 +307,6 @@ impl<T> Clone for Bucket<T> { } impl<T> Bucket<T> { - const IS_ZERO_SIZED_TYPE: bool = mem::size_of::<T>() == 0; - /// Creates a [`Bucket`] that contain pointer to the data. /// The pointer calculation is performed by calculating the /// offset from given `base` pointer (convenience for @@ -364,7 +375,7 @@ impl<T> Bucket<T> { // // where: T0...Tlast - our stored data; C0...Clast - control bytes // or metadata for data. - let ptr = if Self::IS_ZERO_SIZED_TYPE { + let ptr = if T::IS_ZERO_SIZED { // won't overflow because index must be less than length (bucket_mask) // and bucket_mask is guaranteed to be less than `isize::MAX` // (see TableLayout::calculate_layout_for method) @@ -438,7 +449,7 @@ impl<T> Bucket<T> { // (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>() // // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data. - if Self::IS_ZERO_SIZED_TYPE { + if T::IS_ZERO_SIZED { // this can not be UB self.ptr.as_ptr() as usize - 1 } else { @@ -502,7 +513,7 @@ impl<T> Bucket<T> { /// ``` #[inline] pub fn as_ptr(&self) -> *mut T { - if Self::IS_ZERO_SIZED_TYPE { + if T::IS_ZERO_SIZED { // Just return an arbitrary ZST pointer which is properly aligned // invalid pointer is good enough for ZST invalid_mut(mem::align_of::<T>()) @@ -550,7 +561,7 @@ impl<T> Bucket<T> { /// [`RawTableInner::buckets`]: RawTableInner::buckets #[inline] unsafe fn next_n(&self, offset: usize) -> Self { - let ptr = if Self::IS_ZERO_SIZED_TYPE { + let ptr = if T::IS_ZERO_SIZED { // invalid pointer is good enough for ZST invalid_mut(self.ptr.as_ptr() as usize + offset) } else { @@ -774,15 +785,16 @@ impl<T> Bucket<T> { } /// A raw hash table with an unsafe API. -pub struct RawTable<T, A: Allocator + Clone = Global> { - table: RawTableInner<A>, +pub struct RawTable<T, A: Allocator = Global> { + table: RawTableInner, + alloc: A, // Tell dropck that we own instances of T. marker: PhantomData<T>, } /// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless /// of how many different key-value types are used. -struct RawTableInner<A> { +struct RawTableInner { // Mask to get an index from a hash value. The value is one less than the // number of buckets in the table. bucket_mask: usize, @@ -796,8 +808,6 @@ struct RawTableInner<A> { // Number of elements in the table, only really used by len() items: usize, - - alloc: A, } impl<T> RawTable<T, Global> { @@ -809,7 +819,8 @@ impl<T> RawTable<T, Global> { #[inline] pub const fn new() -> Self { Self { - table: RawTableInner::new_in(Global), + table: RawTableInner::NEW, + alloc: Global, marker: PhantomData, } } @@ -828,9 +839,8 @@ impl<T> RawTable<T, Global> { } } -impl<T, A: Allocator + Clone> RawTable<T, A> { +impl<T, A: Allocator> RawTable<T, A> { const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>(); - const DATA_NEEDS_DROP: bool = mem::needs_drop::<T>(); /// Creates a new empty hash table without allocating any memory, using the /// given allocator. @@ -841,7 +851,8 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { #[inline] pub const fn new_in(alloc: A) -> Self { Self { - table: RawTableInner::new_in(alloc), + table: RawTableInner::NEW, + alloc, marker: PhantomData, } } @@ -859,66 +870,77 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { Ok(Self { table: RawTableInner::new_uninitialized( - alloc, + &alloc, Self::TABLE_LAYOUT, buckets, fallibility, )?, + alloc, marker: PhantomData, }) } - /// Attempts to allocate a new hash table with at least enough capacity - /// for inserting the given number of elements without reallocating. - fn fallible_with_capacity( - alloc: A, - capacity: usize, - fallibility: Fallibility, - ) -> Result<Self, TryReserveError> { + /// Attempts to allocate a new hash table using the given allocator, with at least enough + /// capacity for inserting the given number of elements without reallocating. + #[cfg(feature = "raw")] + pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> { Ok(Self { table: RawTableInner::fallible_with_capacity( - alloc, + &alloc, Self::TABLE_LAYOUT, capacity, - fallibility, + Fallibility::Fallible, )?, + alloc, marker: PhantomData, }) } - /// Attempts to allocate a new hash table using the given allocator, with at least enough - /// capacity for inserting the given number of elements without reallocating. - #[cfg(feature = "raw")] - pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> { - Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible) - } - /// Allocates a new hash table using the given allocator, with at least enough capacity for /// inserting the given number of elements without reallocating. pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { - // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) { - Ok(capacity) => capacity, - Err(_) => unsafe { hint::unreachable_unchecked() }, + Self { + table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity), + alloc, + marker: PhantomData, } } /// Returns a reference to the underlying allocator. #[inline] pub fn allocator(&self) -> &A { - &self.table.alloc - } - - /// Deallocates the table without dropping any entries. - #[cfg_attr(feature = "inline-more", inline)] - unsafe fn free_buckets(&mut self) { - self.table.free_buckets(Self::TABLE_LAYOUT); + &self.alloc } - /// Returns pointer to one past last element of data table. + /// Returns pointer to one past last `data` element in the the table as viewed from + /// the start point of the allocation. + /// + /// The caller must ensure that the `RawTable` outlives the returned [`NonNull<T>`], + /// otherwise using it may result in [`undefined behavior`]. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - pub unsafe fn data_end(&self) -> NonNull<T> { - NonNull::new_unchecked(self.table.ctrl.as_ptr().cast()) + pub fn data_end(&self) -> NonNull<T> { + // SAFETY: `self.table.ctrl` is `NonNull`, so casting it is safe + // + // `self.table.ctrl.as_ptr().cast()` returns pointer that + // points here (to the end of `T0`) + // ∨ + // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m + // \________ ________/ + // \/ + // `n = buckets - 1`, i.e. `RawTable::buckets() - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`. + // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search + // with loading `Group` bytes from the heap works properly, even if the result + // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also + // `RawTableInner::set_ctrl` function. + // + // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().cast()) } } /// Returns pointer to start of data table. @@ -938,7 +960,9 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { #[inline] #[cfg(feature = "raw")] pub fn allocation_info(&self) -> (NonNull<u8>, Layout) { - self.table.allocation_info_or_zero(Self::TABLE_LAYOUT) + // SAFETY: We use the same `table_layout` that was used to allocate + // this table. + unsafe { self.table.allocation_info_or_zero(Self::TABLE_LAYOUT) } } /// Returns the index of a bucket from a `Bucket`. @@ -948,8 +972,55 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { } /// Returns a pointer to an element in the table. + /// + /// The caller must ensure that the `RawTable` outlives the returned [`Bucket<T>`], + /// otherwise using it may result in [`undefined behavior`]. + /// + /// # Safety + /// + /// If `mem::size_of::<T>() != 0`, then the caller of this function must observe the + /// following safety rules: + /// + /// * The table must already be allocated; + /// + /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`] + /// function, i.e. `(index + 1) <= self.buckets()`. + /// + /// It is safe to call this function with index of zero (`index == 0`) on a table that has + /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`]. + /// + /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must + /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e. + /// `(index + 1) <= self.buckets()`. + /// + /// [`RawTable::buckets`]: RawTable::buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] pub unsafe fn bucket(&self, index: usize) -> Bucket<T> { + // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table + // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than + // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"): + // + // `table.bucket(3).as_ptr()` returns a pointer that points here in the `data` + // part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`) + // | + // | `base = self.data_end()` points here + // | (to the start of CT0 or to the end of T0) + // v v + // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m + // ^ \__________ __________/ + // `table.bucket(3)` returns a pointer that points \/ + // here in the `data` part of the `RawTable` (to additional control bytes + // the end of T3) `m = Group::WIDTH - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`; + // CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from + // the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask` + // is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function. + // + // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`. debug_assert_ne!(self.table.bucket_mask, 0); debug_assert!(index < self.buckets()); Bucket::from_base_index(self.data_end(), index) @@ -1028,15 +1099,10 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { // Ensure that the table is reset even if one of the drops panic let mut self_ = guard(self, |self_| self_.clear_no_drop()); unsafe { - self_.drop_elements(); - } - } - - unsafe fn drop_elements(&mut self) { - if Self::DATA_NEEDS_DROP && !self.is_empty() { - for item in self.iter() { - item.drop(); - } + // SAFETY: ScopeGuard sets to zero the `items` field of the table + // even in case of panic during the dropping of the elements so + // that there will be no double drop of the elements. + self_.table.drop_elements::<T>(); } } @@ -1047,7 +1113,16 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { // space for. let min_size = usize::max(self.table.items, min_size); if min_size == 0 { - *self = Self::new_in(self.table.alloc.clone()); + let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW); + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If any elements' drop function panics, then there will only be a memory leak, + // because we have replaced the inner table with a new one. + old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT); + } return; } @@ -1064,14 +1139,33 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { if min_buckets < self.buckets() { // Fast path if the table is empty if self.table.items == 0 { - *self = Self::with_capacity_in(min_size, self.table.alloc.clone()); + let new_inner = + RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size); + let mut old_inner = mem::replace(&mut self.table, new_inner); + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If any elements' drop function panics, then there will only be a memory leak, + // because we have replaced the inner table with a new one. + old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT); + } } else { // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - if self - .resize(min_size, hasher, Fallibility::Infallible) - .is_err() - { - unsafe { hint::unreachable_unchecked() } + unsafe { + // SAFETY: + // 1. We know for sure that `min_size >= self.table.items`. + // 2. The [`RawTableInner`] must already have properly initialized control bytes since + // we will never expose RawTable::new_uninitialized in a public API. + if self + .resize(min_size, hasher, Fallibility::Infallible) + .is_err() + { + // SAFETY: The result of calling the `resize` function cannot be an error + // because `fallibility == Fallibility::Infallible. + hint::unreachable_unchecked() + } } } } @@ -1083,11 +1177,16 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { if unlikely(additional > self.table.growth_left) { // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - if self - .reserve_rehash(additional, hasher, Fallibility::Infallible) - .is_err() - { - unsafe { hint::unreachable_unchecked() } + unsafe { + // SAFETY: The [`RawTableInner`] must already have properly initialized control + // bytes since we will never expose RawTable::new_uninitialized in a public API. + if self + .reserve_rehash(additional, hasher, Fallibility::Infallible) + .is_err() + { + // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`. + hint::unreachable_unchecked() + } } } } @@ -1101,28 +1200,45 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { hasher: impl Fn(&T) -> u64, ) -> Result<(), TryReserveError> { if additional > self.table.growth_left { - self.reserve_rehash(additional, hasher, Fallibility::Fallible) + // SAFETY: The [`RawTableInner`] must already have properly initialized control + // bytes since we will never expose RawTable::new_uninitialized in a public API. + unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) } } else { Ok(()) } } /// Out-of-line slow path for `reserve` and `try_reserve`. + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes, + /// otherwise calling this function results in [`undefined behavior`] + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[cold] #[inline(never)] - fn reserve_rehash( + unsafe fn reserve_rehash( &mut self, additional: usize, hasher: impl Fn(&T) -> u64, fallibility: Fallibility, ) -> Result<(), TryReserveError> { unsafe { + // SAFETY: + // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and + // [`TableLayout`] that were used to allocate this table. + // 2. The `drop` function is the actual drop function of the elements stored in + // the table. + // 3. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. self.table.reserve_rehash_inner( + &self.alloc, additional, &|table, index| hasher(table.bucket::<T>(index).as_ref()), fallibility, Self::TABLE_LAYOUT, - if Self::DATA_NEEDS_DROP { + if T::NEEDS_DROP { Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T))) } else { None @@ -1133,20 +1249,50 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { /// Allocates a new table of a different size and moves the contents of the /// current table into it. - fn resize( + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes, + /// otherwise calling this function results in [`undefined behavior`] + /// + /// The caller of this function must ensure that `capacity >= self.table.items` + /// otherwise: + /// + /// * If `self.table.items != 0`, calling of this function with `capacity` + /// equal to 0 (`capacity == 0`) results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and + /// `self.table.items > capacity_to_buckets(capacity)` + /// calling this function results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and + /// `self.table.items > capacity_to_buckets(capacity)` + /// calling this function are never return (will go into an + /// infinite loop). + /// + /// See [`RawTableInner::find_insert_slot`] for more information. + /// + /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + unsafe fn resize( &mut self, capacity: usize, hasher: impl Fn(&T) -> u64, fallibility: Fallibility, ) -> Result<(), TryReserveError> { - unsafe { - self.table.resize_inner( - capacity, - &|table, index| hasher(table.bucket::<T>(index).as_ref()), - fallibility, - Self::TABLE_LAYOUT, - ) - } + // SAFETY: + // 1. The caller of this function guarantees that `capacity >= self.table.items`. + // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and + // [`TableLayout`] that were used to allocate this table. + // 3. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. + self.table.resize_inner( + &self.alloc, + capacity, + &|table, index| hasher(table.bucket::<T>(index).as_ref()), + fallibility, + Self::TABLE_LAYOUT, + ) } /// Inserts a new element into the table, and returns its raw bucket. @@ -1155,14 +1301,23 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { #[cfg_attr(feature = "inline-more", inline)] pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> { unsafe { + // SAFETY: + // 1. The [`RawTableInner`] must already have properly initialized control bytes since + // we will never expose `RawTable::new_uninitialized` in a public API. + // + // 2. We reserve additional space (if necessary) right after calling this function. let mut slot = self.table.find_insert_slot(hash); - // We can avoid growing the table once we have reached our load - // factor if we are replacing a tombstone. This works since the - // number of EMPTY slots does not change in this case. + // We can avoid growing the table once we have reached our load factor if we are replacing + // a tombstone. This works since the number of EMPTY slots does not change in this case. + // + // SAFETY: The function is guaranteed to return [`InsertSlot`] that contains an index + // in the range `0..=self.buckets()`. let old_ctrl = *self.table.ctrl(slot.index); if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) { self.reserve(1, hasher); + // SAFETY: We know for sure that `RawTableInner` has control bytes + // initialized and that there is extra space in the table. slot = self.table.find_insert_slot(hash); } @@ -1261,13 +1416,22 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { ) -> Result<Bucket<T>, InsertSlot> { self.reserve(1, hasher); - match self - .table - .find_or_find_insert_slot_inner(hash, &mut |index| unsafe { - eq(self.bucket(index).as_ref()) - }) { - Ok(index) => Ok(unsafe { self.bucket(index) }), - Err(slot) => Err(slot), + unsafe { + // SAFETY: + // 1. We know for sure that there is at least one empty `bucket` in the table. + // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will + // never expose `RawTable::new_uninitialized` in a public API. + // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket, + // which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in + // the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe. + match self + .table + .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref())) + { + // SAFETY: See explanation above. + Ok(index) => Ok(self.bucket(index)), + Err(slot) => Err(slot), + } } } @@ -1292,14 +1456,23 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { /// Searches for an element in the table. #[inline] pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> { - let result = self.table.find_inner(hash, &mut |index| unsafe { - eq(self.bucket(index).as_ref()) - }); - - // Avoid `Option::map` because it bloats LLVM IR. - match result { - Some(index) => Some(unsafe { self.bucket(index) }), - None => None, + unsafe { + // SAFETY: + // 1. The [`RawTableInner`] must already have properly initialized control bytes since we + // will never expose `RawTable::new_uninitialized` in a public API. + // 1. The `find_inner` function returns the `index` of only the full bucket, which is in + // the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref` + // is safe. + let result = self + .table + .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref())); + + // Avoid `Option::map` because it bloats LLVM IR. + match result { + // SAFETY: See explanation above. + Some(index) => Some(self.bucket(index)), + None => None, + } } } @@ -1423,11 +1596,11 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { /// struct, we have to make the `iter` method unsafe. #[inline] pub unsafe fn iter(&self) -> RawIter<T> { - let data = Bucket::from_base_index(self.data_end(), 0); - RawIter { - iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()), - items: self.table.items, - } + // SAFETY: + // 1. The caller must uphold the safety contract for `iter` method. + // 2. The [`RawTableInner`] must already have properly initialized control bytes since + // we will never expose RawTable::new_uninitialized in a public API. + self.table.iter() } /// Returns an iterator over occupied buckets that could match a given hash. @@ -1467,8 +1640,8 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { debug_assert_eq!(iter.len(), self.len()); RawDrain { iter, - table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))), - orig_table: NonNull::from(self), + table: mem::replace(&mut self.table, RawTableInner::NEW), + orig_table: NonNull::from(&mut self.table), marker: PhantomData, } } @@ -1482,20 +1655,18 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> { debug_assert_eq!(iter.len(), self.len()); - let alloc = self.table.alloc.clone(); let allocation = self.into_allocation(); RawIntoIter { iter, allocation, marker: PhantomData, - alloc, } } /// Converts the table into a raw allocation. The contents of the table /// should be dropped using a `RawIter` before freeing the allocation. #[cfg_attr(feature = "inline-more", inline)] - pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout)> { + pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout, A)> { let alloc = if self.table.is_empty_singleton() { None } else { @@ -1508,6 +1679,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { Some(( unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) }, layout, + unsafe { ptr::read(&self.alloc) }, )) }; mem::forget(self); @@ -1515,39 +1687,40 @@ impl<T, A: Allocator + Clone> RawTable<T, A> { } } -unsafe impl<T, A: Allocator + Clone> Send for RawTable<T, A> +unsafe impl<T, A: Allocator> Send for RawTable<T, A> where T: Send, A: Send, { } -unsafe impl<T, A: Allocator + Clone> Sync for RawTable<T, A> +unsafe impl<T, A: Allocator> Sync for RawTable<T, A> where T: Sync, A: Sync, { } -impl<A> RawTableInner<A> { +impl RawTableInner { + const NEW: Self = RawTableInner::new(); + /// Creates a new empty hash table without allocating any memory. /// /// In effect this returns a table with exactly 1 bucket. However we can /// leave the data pointer dangling since that bucket is never accessed /// due to our load factor forcing us to always have at least 1 free bucket. #[inline] - const fn new_in(alloc: A) -> Self { + const fn new() -> Self { Self { // Be careful to cast the entire slice to a raw pointer. ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) }, bucket_mask: 0, items: 0, growth_left: 0, - alloc, } } } -impl<A: Allocator + Clone> RawTableInner<A> { +impl RawTableInner { /// Allocates a new [`RawTableInner`] with the given number of buckets. /// The control bytes and buckets are left uninitialized. /// @@ -1561,12 +1734,15 @@ impl<A: Allocator + Clone> RawTableInner<A> { /// /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html #[cfg_attr(feature = "inline-more", inline)] - unsafe fn new_uninitialized( - alloc: A, + unsafe fn new_uninitialized<A>( + alloc: &A, table_layout: TableLayout, buckets: usize, fallibility: Fallibility, - ) -> Result<Self, TryReserveError> { + ) -> Result<Self, TryReserveError> + where + A: Allocator, + { debug_assert!(buckets.is_power_of_two()); // Avoid `Option::ok_or_else` because it bloats LLVM IR. @@ -1575,7 +1751,7 @@ impl<A: Allocator + Clone> RawTableInner<A> { None => return Err(fallibility.capacity_overflow()), }; - let ptr: NonNull<u8> = match do_alloc(&alloc, layout) { + let ptr: NonNull<u8> = match do_alloc(alloc, layout) { Ok(block) => block.cast(), Err(_) => return Err(fallibility.alloc_err(layout)), }; @@ -1587,7 +1763,6 @@ impl<A: Allocator + Clone> RawTableInner<A> { bucket_mask: buckets - 1, items: 0, growth_left: bucket_mask_to_capacity(buckets - 1), - alloc, }) } @@ -1596,14 +1771,17 @@ impl<A: Allocator + Clone> RawTableInner<A> { /// /// All the control bytes are initialized with the [`EMPTY`] bytes. #[inline] - fn fallible_with_capacity( - alloc: A, + fn fallible_with_capacity<A>( + alloc: &A, table_layout: TableLayout, capacity: usize, fallibility: Fallibility, - ) -> Result<Self, TryReserveError> { + ) -> Result<Self, TryReserveError> + where + A: Allocator, + { if capacity == 0 { - Ok(Self::new_in(alloc)) + Ok(Self::NEW) } else { // SAFETY: We checked that we could successfully allocate the new table, and then // initialized all control bytes with the constant `EMPTY` byte. @@ -1622,36 +1800,95 @@ impl<A: Allocator + Clone> RawTableInner<A> { } } - /// Fixes up an insertion slot due to false positives for groups smaller than the group width. - /// This must only be used on insertion slots found by `find_insert_slot_in_group`. + /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting + /// the given number of elements without reallocating. + /// + /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program + /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to + /// handle memory allocation failure. + /// + /// All the control bytes are initialized with the [`EMPTY`] bytes. + /// + /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity + /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html + fn with_capacity<A>(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self + where + A: Allocator, + { + // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. + match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) { + Ok(table_inner) => table_inner, + // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`. + Err(_) => unsafe { hint::unreachable_unchecked() }, + } + } + + /// Fixes up an insertion slot returned by the [`RawTableInner::find_insert_slot_in_group`] method. + /// + /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control + /// bytes outside the range of the table are filled with [`EMPTY`] entries. These will unfortunately + /// trigger a match of [`RawTableInner::find_insert_slot_in_group`] function. This is because + /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking + /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied. + /// We detect this situation here and perform a second scan starting at the beginning of the table. + /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the + /// trailing control bytes (containing [`EMPTY`] bytes). + /// + /// If this function is called correctly, it is guaranteed to return [`InsertSlot`] with an + /// index of an empty or deleted bucket in the range `0..self.buckets()` (see `Warning` and + /// `Safety`). + /// + /// # Warning + /// + /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than + /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the + /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that + /// index will cause immediate [`undefined behavior`]. + /// + /// # Safety + /// + /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method. + /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work + /// of this crate, the following rules are necessary and sufficient: + /// + /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this + /// function results in [`undefined behavior`]. + /// + /// * This function must only be used on insertion slots found by [`RawTableInner::find_insert_slot_in_group`] + /// (after the `find_insert_slot_in_group` function, but before insertion into the table). + /// + /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()` + /// (this one is provided by the [`RawTableInner::find_insert_slot_in_group`] function). + /// + /// Calling this function with an index not provided by [`RawTableInner::find_insert_slot_in_group`] + /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the + /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`). + /// + /// [`RawTableInner::ctrl`]: RawTableInner::ctrl + /// [`RawTableInner::find_insert_slot_in_group`]: RawTableInner::find_insert_slot_in_group + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot { - // In tables smaller than the group width - // (self.buckets() < Group::WIDTH), trailing control - // bytes outside the range of the table are filled with - // EMPTY entries. These will unfortunately trigger a - // match, but once masked may point to a full bucket that - // is already occupied. We detect this situation here and - // perform a second scan starting at the beginning of the - // table. This second scan is guaranteed to find an empty - // slot (due to the load factor) before hitting the trailing - // control bytes (containing EMPTY). + // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`. if unlikely(self.is_bucket_full(index)) { debug_assert!(self.bucket_mask < Group::WIDTH); // SAFETY: // - // * We are in range and `ptr = self.ctrl(0)` are valid for reads - // and properly aligned, because the table is already allocated - // (see `TableLayout::calculate_layout_for` and `ptr::read`); + // * Since the caller of this function ensures that the control bytes are properly + // initialized and `ptr = self.ctrl(0)` points to the start of the array of control + // bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH` + // and points to the properly initialized control bytes (see also + // `TableLayout::calculate_layout_for` and `ptr::read`); // - // * For tables larger than the group width (self.buckets() >= Group::WIDTH), - // we will never end up in the given branch, since - // `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group` cannot - // return a full bucket index. For tables smaller than the group width, calling the - // `unwrap_unchecked` function is also - // safe, as the trailing control bytes outside the range of the table are filled - // with EMPTY bytes, so this second scan either finds an empty slot (due to the - // load factor) or hits the trailing control bytes (containing EMPTY). + // * Because the caller of this function ensures that the index was provided by the + // `self.find_insert_slot_in_group()` function, so for for tables larger than the + // group width (self.buckets() >= Group::WIDTH), we will never end up in the given + // branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group` + // cannot return a full bucket index. For tables smaller than the group width, calling + // the `unwrap_unchecked` function is also safe, as the trailing control bytes outside + // the range of the table are filled with EMPTY bytes (and we know for sure that there + // is at least one FULL bucket), so this second scan either finds an empty slot (due to + // the load factor) or hits the trailing control bytes (containing EMPTY). index = Group::load_aligned(self.ctrl(0)) .match_empty_or_deleted() .lowest_set_bit() @@ -1661,25 +1898,62 @@ impl<A: Allocator + Clone> RawTableInner<A> { } /// Finds the position to insert something in a group. - /// This may have false positives and must be fixed up with `fix_insert_slot` before it's used. + /// + /// **This may have false positives and must be fixed up with `fix_insert_slot` + /// before it's used.** + /// + /// The function is guaranteed to return the index of an empty or deleted [`Bucket`] + /// in the range `0..self.buckets()` (`0..=self.bucket_mask`). #[inline] fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> { let bit = group.match_empty_or_deleted().lowest_set_bit(); if likely(bit.is_some()) { + // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask) } else { None } } - /// Searches for an element in the table, or a potential slot where that element could be - /// inserted. + /// Searches for an element in the table, or a potential slot where that element could + /// be inserted (an empty or deleted [`Bucket`] index). /// /// This uses dynamic dispatch to reduce the amount of code generated, but that is /// eliminated by LLVM optimizations. + /// + /// This function does not make any changes to the `data` part of the table, or any + /// changes to the `items` or `growth_left` field of the table. + /// + /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the + /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function + /// will never return (will go into an infinite loop) for tables larger than the group + /// width, or return an index outside of the table indices range if the table is less + /// than the group width. + /// + /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool` + /// function with only `FULL` buckets' indices and return the `index` of the found + /// element (as `Ok(index)`). If the element is not found and there is at least 1 + /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return + /// [InsertSlot] with an index in the range `0..self.buckets()`, but in any case, + /// if this function returns [`InsertSlot`], it will contain an index in the range + /// `0..=self.buckets()`. + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling + /// this function results in [`undefined behavior`]. + /// + /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is + /// less than the group width and if there was not at least one empty or deleted bucket in + /// the table will cause immediate [`undefined behavior`]. This is because in this case the + /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY] + /// control bytes outside the table range. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - fn find_or_find_insert_slot_inner( + unsafe fn find_or_find_insert_slot_inner( &self, hash: u64, eq: &mut dyn FnMut(usize) -> bool, @@ -1690,6 +1964,21 @@ impl<A: Allocator + Clone> RawTableInner<A> { let mut probe_seq = self.probe_seq(hash); loop { + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` + // of the table due to masking with `self.bucket_mask` and also because mumber of + // buckets is a power of two (see `self.probe_seq` function). + // + // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to + // call `Group::load` due to the extended control bytes range, which is + // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control + // byte will never be read for the allocated table); + // + // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will + // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` + // bytes, which is safe (see RawTableInner::new). let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; for bit in group.match_byte(h2_hash) { @@ -1713,6 +2002,10 @@ impl<A: Allocator + Clone> RawTableInner<A> { // least one. For tables smaller than the group width, there will still be an // empty element in the current (and only) group due to the load factor. unsafe { + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * We use this function with the slot / index found by `self.find_insert_slot_in_group` return Err(self.fix_insert_slot(insert_slot.unwrap_unchecked())); } } @@ -1721,13 +2014,68 @@ impl<A: Allocator + Clone> RawTableInner<A> { } } - /// Searches for an empty or deleted bucket which is suitable for inserting - /// a new element and sets the hash for that slot. + /// Searches for an empty or deleted bucket which is suitable for inserting a new + /// element and sets the hash for that slot. Returns an index of that slot and the + /// old control byte stored in the found index. + /// + /// This function does not check if the given element exists in the table. Also, + /// this function does not check if there is enough space in the table to insert + /// a new element. Caller of the funtion must make ensure that the table has at + /// least 1 empty or deleted `bucket`, otherwise this function will never return + /// (will go into an infinite loop) for tables larger than the group width, or + /// return an index outside of the table indices range if the table is less than + /// the group width. /// - /// There must be at least 1 empty bucket in the table. + /// If there is at least 1 empty or deleted `bucket` in the table, the function is + /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case, + /// if this function returns an `index` it will be in the range `0..=self.buckets()`. + /// + /// This function does not make any changes to the `data` parts of the table, + /// or any changes to the the `items` or `growth_left` field of the table. + /// + /// # Safety + /// + /// The safety rules are directly derived from the safety rules for the + /// [`RawTableInner::set_ctrl_h2`] and [`RawTableInner::find_insert_slot`] methods. + /// Thus, in order to uphold the safety contracts for that methods, as well as for + /// the correct logic of the work of this crate, you must observe the following rules + /// when calling this function: + /// + /// * The [`RawTableInner`] has already been allocated and has properly initialized + /// control bytes otherwise calling this function results in [`undefined behavior`]. + /// + /// * The caller of this function must ensure that the "data" parts of the table + /// will have an entry in the returned index (matching the given hash) right + /// after calling this function. + /// + /// Attempt to write data at the `index` returned by this function when the table is + /// less than the group width and if there was not at least one empty or deleted bucket in + /// the table will cause immediate [`undefined behavior`]. This is because in this case the + /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY] + /// control bytes outside the table range. + /// + /// The caller must independently increase the `items` field of the table, and also, + /// if the old control byte was [`EMPTY`], then decrease the table's `growth_left` + /// field, and do not change it if the old control byte was [`DELETED`]. + /// + /// See also [`Bucket::as_ptr`] method, for more information about of properly removing + /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`]. + /// + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// [`RawTableInner::ctrl`]: RawTableInner::ctrl + /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2 + /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot #[inline] - unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) { - let index = self.find_insert_slot(hash).index; + unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, u8) { + // SAFETY: Caller of this function ensures that the control bytes are properly initialized. + let index: usize = self.find_insert_slot(hash).index; + // SAFETY: + // 1. The `find_insert_slot` function either returns an `index` less than or + // equal to `self.buckets() = self.bucket_mask + 1` of the table, or never + // returns if it cannot find an empty or deleted slot. + // 2. The caller of this function guarantees that the table has already been + // allocated let old_ctrl = *self.ctrl(index); self.set_ctrl_h2(index, hash); (index, old_ctrl) @@ -1744,24 +2092,33 @@ impl<A: Allocator + Clone> RawTableInner<A> { /// width, or return an index outside of the table indices range if the table is less /// than the group width. /// - /// # Note + /// If there is at least 1 empty or deleted `bucket` in the table, the function is + /// guaranteed to return [`InsertSlot`] with an index in the range `0..self.buckets()`, + /// but in any case, if this function returns [`InsertSlot`], it will contain an index + /// in the range `0..=self.buckets()`. /// - /// Calling this function is always safe, but attempting to write data at - /// the index returned by this function when the table is less than the group width - /// and if there was not at least one empty bucket in the table will cause immediate - /// [`undefined behavior`]. This is because in this case the function will return - /// `self.bucket_mask + 1` as an index due to the trailing EMPTY control bytes outside - /// the table range. + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling + /// this function results in [`undefined behavior`]. + /// + /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is + /// less than the group width and if there was not at least one empty or deleted bucket in + /// the table will cause immediate [`undefined behavior`]. This is because in this case the + /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY] + /// control bytes outside the table range. /// /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - fn find_insert_slot(&self, hash: u64) -> InsertSlot { + unsafe fn find_insert_slot(&self, hash: u64) -> InsertSlot { let mut probe_seq = self.probe_seq(hash); loop { // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` // of the table due to masking with `self.bucket_mask` and also because mumber of - // buckets is a power of two (see comment for masking below). + // buckets is a power of two (see `self.probe_seq` function). // // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to // call `Group::load` due to the extended control bytes range, which is @@ -1770,12 +2127,16 @@ impl<A: Allocator + Clone> RawTableInner<A> { // // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()` - // bytes, which is safe (see RawTableInner::new_in). - unsafe { - let group = Group::load(self.ctrl(probe_seq.pos)); - let index = self.find_insert_slot_in_group(&group, &probe_seq); + // bytes, which is safe (see RawTableInner::new). + let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) }; - if likely(index.is_some()) { + let index = self.find_insert_slot_in_group(&group, &probe_seq); + if likely(index.is_some()) { + // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // + // * We use this function with the slot / index found by `self.find_insert_slot_in_group` + unsafe { return self.fix_insert_slot(index.unwrap_unchecked()); } } @@ -1793,13 +2154,27 @@ impl<A: Allocator + Clone> RawTableInner<A> { /// The table must have at least 1 empty `bucket`, otherwise, if the /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, /// this function will also never return (will go into an infinite loop). + /// + /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool` + /// function with only `FULL` buckets' indices and return the `index` of the found + /// element as `Some(index)`, so the index will always be in the range + /// `0..self.buckets()`. + /// + /// # Safety + /// + /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling + /// this function results in [`undefined behavior`]. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline(always)] - fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> { + unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> { let h2_hash = h2(hash); let mut probe_seq = self.probe_seq(hash); loop { // SAFETY: + // * Caller of this function ensures that the control bytes are properly initialized. + // // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1` // of the table due to masking with `self.bucket_mask`. // @@ -1853,6 +2228,9 @@ impl<A: Allocator + Clone> RawTableInner<A> { /// to do during the first insert due to tombstones). If the caller does not do /// this, then calling this function may result in a memory leak. /// + /// * The [`RawTableInner`] must have properly initialized control bytes otherwise + /// calling this function results in [`undefined behavior`]. + /// /// Calling this function on a table that has not been allocated results in /// [`undefined behavior`]. /// @@ -1900,6 +2278,227 @@ impl<A: Allocator + Clone> RawTableInner<A> { } } + /// Returns an iterator over every element in the table. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result + /// is [`undefined behavior`]: + /// + /// * The caller has to ensure that the `RawTableInner` outlives the + /// `RawIter`. Because we cannot make the `next` method unsafe on + /// the `RawIter` struct, we have to make the `iter` method unsafe. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// The type `T` must be the actual type of the elements stored in the table, + /// otherwise using the returned [`RawIter`] results in [`undefined behavior`]. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline] + unsafe fn iter<T>(&self) -> RawIter<T> { + // SAFETY: + // 1. Since the caller of this function ensures that the control bytes + // are properly initialized and `self.data_end()` points to the start + // of the array of control bytes, therefore: `ctrl` is valid for reads, + // properly aligned to `Group::WIDTH` and points to the properly initialized + // control bytes. + // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e. + // equal to zero). + // 3. We pass the exact value of buckets of the table to the function. + // + // `ctrl` points here (to the start + // of the first control byte `CT0`) + // ∨ + // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m + // \________ ________/ + // \/ + // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`. + // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search + // with loading `Group` bytes from the heap works properly, even if the result + // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also + // `RawTableInner::set_ctrl` function. + // + // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + let data = Bucket::from_base_index(self.data_end(), 0); + RawIter { + // SAFETY: See explanation above + iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()), + items: self.items, + } + } + + /// Executes the destructors (if any) of the values stored in the table. + /// + /// # Note + /// + /// This function does not erase the control bytes of the table and does + /// not make any changes to the `items` or `growth_left` fields of the + /// table. If necessary, the caller of this function must manually set + /// up these table fields, for example using the [`clear_no_drop`] function. + /// + /// Be careful during calling this function, because drop function of + /// the elements can panic, and this can leave table in an inconsistent + /// state. + /// + /// # Safety + /// + /// The type `T` must be the actual type of the elements stored in the table, + /// otherwise calling this function may result in [`undefined behavior`]. + /// + /// If `T` is a type that should be dropped and **the table is not empty**, + /// calling this function more than once results in [`undefined behavior`]. + /// + /// If `T` is not [`Copy`], attempting to use values stored in the table after + /// calling this function may result in [`undefined behavior`]. + /// + /// It is safe to call this function on a table that has not been allocated, + /// on a table with uninitialized control bytes, and on a table with no actual + /// data but with `Full` control bytes if `self.items == 0`. + /// + /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information + /// about of properly removing or saving `element` from / into the [`RawTable`] / + /// [`RawTableInner`]. + /// + /// [`Bucket::drop`]: Bucket::drop + /// [`Bucket::as_ptr`]: Bucket::as_ptr + /// [`clear_no_drop`]: RawTableInner::clear_no_drop + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + unsafe fn drop_elements<T>(&mut self) { + // Check that `self.items != 0`. Protects against the possibility + // of creating an iterator on an table with uninitialized control bytes. + if T::NEEDS_DROP && self.items != 0 { + // SAFETY: We know for sure that RawTableInner will outlive the + // returned `RawIter` iterator, and the caller of this function + // must uphold the safety contract for `drop_elements` method. + for item in self.iter::<T>() { + // SAFETY: The caller must uphold the safety contract for + // `drop_elements` method. + item.drop(); + } + } + } + + /// Executes the destructors (if any) of the values stored in the table and than + /// deallocates the table. + /// + /// # Note + /// + /// Calling this function automatically makes invalid (dangling) all instances of + /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table. + /// + /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left` + /// fields of the table. If necessary, the caller of this function must manually set + /// up these table fields. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`undefined behavior`]: + /// + /// * Calling this function more than once; + /// + /// * The type `T` must be the actual type of the elements stored in the table. + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used + /// to allocate this table. + /// + /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that + /// was used to allocate this table. + /// + /// The caller of this function should pay attention to the possibility of the + /// elements' drop function panicking, because this: + /// + /// * May leave the table in an inconsistent state; + /// + /// * Memory is never deallocated, so a memory leak may occur. + /// + /// Attempt to use the `ctrl` field of the table (dereference) after calling this + /// function results in [`undefined behavior`]. + /// + /// It is safe to call this function on a table that has not been allocated, + /// on a table with uninitialized control bytes, and on a table with no actual + /// data but with `Full` control bytes if `self.items == 0`. + /// + /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`] + /// for more information. + /// + /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements + /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) { + if !self.is_empty_singleton() { + unsafe { + // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method. + self.drop_elements::<T>(); + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. The caller must uphold the safety contract for `drop_inner_table` method. + self.free_buckets(alloc, table_layout); + } + } + } + + /// Returns a pointer to an element in the table (convenience for + /// `Bucket::from_base_index(self.data_end::<T>(), index)`). + /// + /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket<T>`], + /// otherwise using it may result in [`undefined behavior`]. + /// + /// # Safety + /// + /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived from the + /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling + /// this function, the following safety rules must be observed: + /// + /// * The table must already be allocated; + /// + /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`] + /// function, i.e. `(index + 1) <= self.buckets()`. + /// + /// * The type `T` must be the actual type of the elements stored in the table, otherwise + /// using the returned [`Bucket`] may result in [`undefined behavior`]. + /// + /// It is safe to call this function with index of zero (`index == 0`) on a table that has + /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`]. + /// + /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must + /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e. + /// `(index + 1) <= self.buckets()`. + /// + /// ```none + /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table + /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than + /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"): + /// + /// `table.bucket(3).as_ptr()` returns a pointer that points here in the `data` + /// part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`]) + /// | + /// | `base = table.data_end::<T>()` points here + /// | (to the start of CT0 or to the end of T0) + /// v v + /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m + /// ^ \__________ __________/ + /// `table.bucket(3)` returns a pointer that points \/ + /// here in the `data` part of the `RawTableInner` additional control bytes + /// (to the end of T3) `m = Group::WIDTH - 1` + /// + /// where: T0...T_n - our stored data; + /// CT0...CT_n - control bytes or metadata for `data`; + /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from + /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask` + /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function. + /// + /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + /// ``` + /// + /// [`Bucket::from_base_index`]: Bucket::from_base_index + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> { debug_assert_ne!(self.bucket_mask, 0); @@ -1907,6 +2506,52 @@ impl<A: Allocator + Clone> RawTableInner<A> { Bucket::from_base_index(self.data_end(), index) } + /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table + /// (convenience for `self.data_end::<u8>().as_ptr().sub((index + 1) * size_of)`). + /// + /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`, + /// otherwise using it may result in [`undefined behavior`]. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`undefined behavior`]: + /// + /// * The table must already be allocated; + /// + /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`] + /// function, i.e. `(index + 1) <= self.buckets()`; + /// + /// * The `size_of` must be equal to the size of the elements stored in the table; + /// + /// ```none + /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table + /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than + /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"): + /// + /// `table.bucket_ptr(3, mem::size_of::<T>())` returns a pointer that points here in the + /// `data` part of the `RawTableInner`, i.e. to the start of T3 + /// | + /// | `base = table.data_end::<u8>()` points here + /// | (to the start of CT0 or to the end of T0) + /// v v + /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m + /// \__________ __________/ + /// \/ + /// additional control bytes + /// `m = Group::WIDTH - 1` + /// + /// where: T0...T_n - our stored data; + /// CT0...CT_n - control bytes or metadata for `data`; + /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from + /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask` + /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function. + /// + /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + /// ``` + /// + /// [`RawTableInner::buckets`]: RawTableInner::buckets + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 { debug_assert_ne!(self.bucket_mask, 0); @@ -1915,9 +2560,47 @@ impl<A: Allocator + Clone> RawTableInner<A> { base.sub((index + 1) * size_of) } + /// Returns pointer to one past last `data` element in the the table as viewed from + /// the start point of the allocation (convenience for `self.ctrl.cast()`). + /// + /// This function actually returns a pointer to the end of the `data element` at + /// index "0" (zero). + /// + /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull<T>`], + /// otherwise using it may result in [`undefined behavior`]. + /// + /// # Note + /// + /// The type `T` must be the actual type of the elements stored in the table, otherwise + /// using the returned [`NonNull<T>`] may result in [`undefined behavior`]. + /// + /// ```none + /// `table.data_end::<T>()` returns pointer that points here + /// (to the end of `T0`) + /// ∨ + /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m + /// \________ ________/ + /// \/ + /// `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1` + /// + /// where: T0...T_n - our stored data; + /// CT0...CT_n - control bytes or metadata for `data`. + /// CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search + /// with loading `Group` bytes from the heap works properly, even if the result + /// of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also + /// `RawTableInner::set_ctrl` function. + /// + /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number + /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. + /// ``` + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - unsafe fn data_end<T>(&self) -> NonNull<T> { - NonNull::new_unchecked(self.ctrl.as_ptr().cast()) + fn data_end<T>(&self) -> NonNull<T> { + unsafe { + // SAFETY: `self.ctrl` is `NonNull`, so casting it is safe + NonNull::new_unchecked(self.ctrl.as_ptr().cast()) + } } /// Returns an iterator-like object for a probe sequence on the table. @@ -1928,6 +2611,8 @@ impl<A: Allocator + Clone> RawTableInner<A> { #[inline] fn probe_seq(&self, hash: u64) -> ProbeSeq { ProbeSeq { + // This is the same as `hash as usize % self.buckets()` because the number + // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`. pos: h1(hash) & self.bucket_mask, stride: 0, } @@ -1991,7 +2676,7 @@ impl<A: Allocator + Clone> RawTableInner<A> { /// [`Bucket::as_ptr`]: Bucket::as_ptr /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) { + unsafe fn set_ctrl_h2(&mut self, index: usize, hash: u64) { // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_h2`] self.set_ctrl(index, h2(hash)); } @@ -2025,7 +2710,7 @@ impl<A: Allocator + Clone> RawTableInner<A> { /// [`Bucket::as_ptr`]: Bucket::as_ptr /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - unsafe fn replace_ctrl_h2(&self, index: usize, hash: u64) -> u8 { + unsafe fn replace_ctrl_h2(&mut self, index: usize, hash: u64) -> u8 { // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_h2`] let prev_ctrl = *self.ctrl(index); self.set_ctrl_h2(index, hash); @@ -2057,9 +2742,12 @@ impl<A: Allocator + Clone> RawTableInner<A> { /// [`Bucket::as_ptr`]: Bucket::as_ptr /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[inline] - unsafe fn set_ctrl(&self, index: usize, ctrl: u8) { + unsafe fn set_ctrl(&mut self, index: usize, ctrl: u8) { // Replicate the first Group::WIDTH control bytes at the end of - // the array without using a branch: + // the array without using a branch. If the tables smaller than + // the group width (self.buckets() < Group::WIDTH), + // `index2 = Group::WIDTH + index`, otherwise `index2` is: + // // - If index >= Group::WIDTH then index == index2. // - Otherwise index2 == self.bucket_mask + 1 + index. // @@ -2142,25 +2830,45 @@ impl<A: Allocator + Clone> RawTableInner<A> { self.bucket_mask == 0 } + /// Attempts to allocate a new hash table with at least enough capacity + /// for inserting the given number of elements without reallocating, + /// and return it inside ScopeGuard to protect against panic in the hash + /// function. + /// + /// # Note + /// + /// It is recommended (but not required): + /// + /// * That the new table's `capacity` be greater than or equal to `self.items`. + /// + /// * The `alloc` is the same [`Allocator`] as the `Allocator` used + /// to allocate this table. + /// + /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used + /// to allocate this table. + /// + /// If `table_layout` does not match the `TableLayout` that was used to allocate + /// this table, then using `mem::swap` with the `self` and the new table returned + /// by this function results in [`undefined behavior`]. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::mut_mut)] #[inline] - unsafe fn prepare_resize( + fn prepare_resize<'a, A>( &self, + alloc: &'a A, table_layout: TableLayout, capacity: usize, fallibility: Fallibility, - ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self)>, TryReserveError> { + ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, TryReserveError> + where + A: Allocator, + { debug_assert!(self.items <= capacity); // Allocate and initialize the new table. - let mut new_table = RawTableInner::fallible_with_capacity( - self.alloc.clone(), - table_layout, - capacity, - fallibility, - )?; - new_table.growth_left -= self.items; - new_table.items = self.items; + let new_table = + RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?; // The hash function may panic, in which case we simply free the new // table without dropping any elements that may have been copied into @@ -2170,7 +2878,11 @@ impl<A: Allocator + Clone> RawTableInner<A> { // the comment at the bottom of this function. Ok(guard(new_table, move |self_| { if !self_.is_empty_singleton() { - self_.free_buckets(table_layout); + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. We know for sure that the `alloc` and `table_layout` matches the + // [`Allocator`] and [`TableLayout`] used to allocate this table. + unsafe { self_.free_buckets(alloc, table_layout) }; } })) } @@ -2179,16 +2891,38 @@ impl<A: Allocator + Clone> RawTableInner<A> { /// /// This uses dynamic dispatch to reduce the amount of /// code generated, but it is eliminated by LLVM optimizations when inlined. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`undefined behavior`]: + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used + /// to allocate this table. + /// + /// * The `layout` must be the same [`TableLayout`] as the `TableLayout` + /// used to allocate this table. + /// + /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of + /// the elements stored in the table. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::inline_always)] #[inline(always)] - unsafe fn reserve_rehash_inner( + unsafe fn reserve_rehash_inner<A>( &mut self, + alloc: &A, additional: usize, hasher: &dyn Fn(&mut Self, usize) -> u64, fallibility: Fallibility, layout: TableLayout, drop: Option<fn(*mut u8)>, - ) -> Result<(), TryReserveError> { + ) -> Result<(), TryReserveError> + where + A: Allocator, + { // Avoid `Option::ok_or_else` because it bloats LLVM IR. let new_items = match self.items.checked_add(additional) { Some(new_items) => new_items, @@ -2198,12 +2932,30 @@ impl<A: Allocator + Clone> RawTableInner<A> { if new_items <= full_capacity / 2 { // Rehash in-place without re-allocating if we have plenty of spare // capacity that is locked up due to DELETED entries. + + // SAFETY: + // 1. We know for sure that `[`RawTableInner`]` has already been allocated + // (since new_items <= full_capacity / 2); + // 2. The caller ensures that `drop` function is the actual drop function of + // the elements stored in the table. + // 3. The caller ensures that `layout` matches the [`TableLayout`] that was + // used to allocate this table. + // 4. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. self.rehash_in_place(hasher, layout.size, drop); Ok(()) } else { // Otherwise, conservatively resize to at least the next size up // to avoid churning deletes into frequent rehashes. + // + // SAFETY: + // 1. We know for sure that `capacity >= self.items`. + // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and + // [`TableLayout`] that were used to allocate this table. + // 3. The caller ensures that the control bytes of the `RawTableInner` + // are already initialized. self.resize_inner( + alloc, usize::max(new_items, full_capacity + 1), hasher, fallibility, @@ -2212,48 +2964,160 @@ impl<A: Allocator + Clone> RawTableInner<A> { } } + /// Returns an iterator over full buckets indices in the table. + /// + /// # Safety + /// + /// Behavior is undefined if any of the following conditions are violated: + /// + /// * The caller has to ensure that the `RawTableInner` outlives the + /// `FullBucketsIndices`. Because we cannot make the `next` method + /// unsafe on the `FullBucketsIndices` struct, we have to make the + /// `full_buckets_indices` method unsafe. + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + #[inline(always)] + unsafe fn full_buckets_indices(&self) -> FullBucketsIndices { + // SAFETY: + // 1. Since the caller of this function ensures that the control bytes + // are properly initialized and `self.ctrl(0)` points to the start + // of the array of control bytes, therefore: `ctrl` is valid for reads, + // properly aligned to `Group::WIDTH` and points to the properly initialized + // control bytes. + // 2. The value of `items` is equal to the amount of data (values) added + // to the table. + // + // `ctrl` points here (to the start + // of the first control byte `CT0`) + // ∨ + // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH + // \________ ________/ + // \/ + // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1` + // + // where: T0...T_n - our stored data; + // CT0...CT_n - control bytes or metadata for `data`. + let ctrl = NonNull::new_unchecked(self.ctrl(0)); + + FullBucketsIndices { + // Load the first group + // SAFETY: See explanation above. + current_group: Group::load_aligned(ctrl.as_ptr()).match_full().into_iter(), + group_first_index: 0, + ctrl, + items: self.items, + } + } + /// Allocates a new table of a different size and moves the contents of the /// current table into it. /// /// This uses dynamic dispatch to reduce the amount of /// code generated, but it is eliminated by LLVM optimizations when inlined. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`undefined behavior`]: + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used + /// to allocate this table; + /// + /// * The `layout` must be the same [`TableLayout`] as the `TableLayout` + /// used to allocate this table; + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// The caller of this function must ensure that `capacity >= self.items` + /// otherwise: + /// + /// * If `self.items != 0`, calling of this function with `capacity == 0` + /// results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and + /// `self.items > capacity_to_buckets(capacity)` calling this function + /// results in [`undefined behavior`]. + /// + /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and + /// `self.items > capacity_to_buckets(capacity)` calling this function + /// are never return (will go into an infinite loop). + /// + /// Note: It is recommended (but not required) that the new table's `capacity` + /// be greater than or equal to `self.items`. In case if `capacity <= self.items` + /// this function can never return. See [`RawTableInner::find_insert_slot`] for + /// more information. + /// + /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::inline_always)] #[inline(always)] - unsafe fn resize_inner( + unsafe fn resize_inner<A>( &mut self, + alloc: &A, capacity: usize, hasher: &dyn Fn(&mut Self, usize) -> u64, fallibility: Fallibility, layout: TableLayout, - ) -> Result<(), TryReserveError> { - let mut new_table = self.prepare_resize(layout, capacity, fallibility)?; - - // Copy all elements to the new table. - for i in 0..self.buckets() { - if !self.is_bucket_full(i) { - continue; - } - + ) -> Result<(), TryReserveError> + where + A: Allocator, + { + // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`] + // that were used to allocate this table. + let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?; + + // SAFETY: We know for sure that RawTableInner will outlive the + // returned `FullBucketsIndices` iterator, and the caller of this + // function ensures that the control bytes are properly initialized. + for full_byte_index in self.full_buckets_indices() { // This may panic. - let hash = hasher(self, i); + let hash = hasher(self, full_byte_index); + // SAFETY: // We can use a simpler version of insert() here since: - // - there are no DELETED entries. - // - we know there is enough space in the table. - // - all elements are unique. - let (index, _) = new_table.prepare_insert_slot(hash); + // 1. There are no DELETED entries. + // 2. We know there is enough space in the table. + // 3. All elements are unique. + // 4. The caller of this function guarantees that `capacity > 0` + // so `new_table` must already have some allocated memory. + // 5. We set `growth_left` and `items` fields of the new table + // after the loop. + // 6. We insert into the table, at the returned index, the data + // matching the given hash immediately after calling this function. + let (new_index, _) = new_table.prepare_insert_slot(hash); + // SAFETY: + // + // * `src` is valid for reads of `layout.size` bytes, since the + // table is alive and the `full_byte_index` is guaranteed to be + // within bounds (see `FullBucketsIndices::next_impl`); + // + // * `dst` is valid for writes of `layout.size` bytes, since the + // caller ensures that `table_layout` matches the [`TableLayout`] + // that was used to allocate old table and we have the `new_index` + // returned by `prepare_insert_slot`. + // + // * Both `src` and `dst` are properly aligned. + // + // * Both `src` and `dst` point to different region of memory. ptr::copy_nonoverlapping( - self.bucket_ptr(i, layout.size), - new_table.bucket_ptr(index, layout.size), + self.bucket_ptr(full_byte_index, layout.size), + new_table.bucket_ptr(new_index, layout.size), layout.size, ); } + // The hash function didn't panic, so we can safely set the + // `growth_left` and `items` fields of the new table. + new_table.growth_left -= self.items; + new_table.items = self.items; + // We successfully copied all elements without panicking. Now replace // self with the new table. The old table will have its memory freed but // the items will not be dropped (since they have been moved into the // new table). + // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`] + // that was used to allocate this table. mem::swap(self, &mut new_table); Ok(()) @@ -2266,6 +3130,21 @@ impl<A: Allocator + Clone> RawTableInner<A> { /// /// This uses dynamic dispatch to reduce the amount of /// code generated, but it is eliminated by LLVM optimizations when inlined. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`undefined behavior`]: + /// + /// * The `size_of` must be equal to the size of the elements stored in the table; + /// + /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of + /// the elements stored in the table. + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The [`RawTableInner`] must have properly initialized control bytes. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[allow(clippy::inline_always)] #[cfg_attr(feature = "inline-more", inline(always))] #[cfg_attr(not(feature = "inline-more"), inline)] @@ -2309,6 +3188,9 @@ impl<A: Allocator + Clone> RawTableInner<A> { let hash = hasher(*guard, i); // Search for a suitable place to put it + // + // SAFETY: Caller of this function ensures that the control bytes + // are properly initialized. let new_i = guard.find_insert_slot(hash).index; // Probing works by scanning through all of the control @@ -2349,14 +3231,64 @@ impl<A: Allocator + Clone> RawTableInner<A> { mem::forget(guard); } + /// Deallocates the table without dropping any entries. + /// + /// # Note + /// + /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements), + /// else it can lead to leaking of memory. Also calling this function automatically + /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid + /// (dangling) the `ctrl` field of the table. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is [`Undefined Behavior`]: + /// + /// * The [`RawTableInner`] has already been allocated; + /// + /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used + /// to allocate this table. + /// + /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used + /// to allocate this table. + /// + /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information. + /// + /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc + /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate #[inline] - unsafe fn free_buckets(&mut self, table_layout: TableLayout) { + unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout) + where + A: Allocator, + { + // SAFETY: The caller must uphold the safety contract for `free_buckets` + // method. let (ptr, layout) = self.allocation_info(table_layout); - self.alloc.deallocate(ptr, layout); + alloc.deallocate(ptr, layout); } + /// Returns a pointer to the allocated memory and the layout that was used to + /// allocate the table. + /// + /// # Safety + /// + /// Caller of this function must observe the following safety rules: + /// + /// * The [`RawTableInner`] has already been allocated, otherwise + /// calling this function results in [`undefined behavior`] + /// + /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` + /// that was used to allocate this table. Failure to comply with this condition + /// may result in [`undefined behavior`]. + /// + /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc + /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate #[inline] - fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) { + unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) { debug_assert!( !self.is_empty_singleton(), "this function can only be called on non-empty tables" @@ -2368,17 +3300,37 @@ impl<A: Allocator + Clone> RawTableInner<A> { None => unsafe { hint::unreachable_unchecked() }, }; ( + // SAFETY: The caller must uphold the safety contract for `allocation_info` method. unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) }, layout, ) } + /// Returns a pointer to the allocated memory and the layout that was used to + /// allocate the table. If [`RawTableInner`] has not been allocated, this + /// function return `dangling` pointer and `()` (unit) layout. + /// + /// # Safety + /// + /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout` + /// that was used to allocate this table. Failure to comply with this condition + /// may result in [`undefined behavior`]. + /// + /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information. + /// + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc + /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate #[cfg(feature = "raw")] - fn allocation_info_or_zero(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) { + unsafe fn allocation_info_or_zero(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) { if self.is_empty_singleton() { (NonNull::dangling(), Layout::new::<()>()) } else { - self.allocation_info(table_layout) + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. The caller ensures that `table_layout` matches the [`TableLayout`] + // that was used to allocate this table. + unsafe { self.allocation_info(table_layout) } } } @@ -2491,12 +3443,16 @@ impl<A: Allocator + Clone> RawTableInner<A> { impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> { fn clone(&self) -> Self { if self.table.is_empty_singleton() { - Self::new_in(self.table.alloc.clone()) + Self::new_in(self.alloc.clone()) } else { unsafe { // Avoid `Result::ok_or_else` because it bloats LLVM IR. - let new_table = match Self::new_uninitialized( - self.table.alloc.clone(), + // + // SAFETY: This is safe as we are taking the size of an already allocated table + // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power + // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`. + let mut new_table = match Self::new_uninitialized( + self.alloc.clone(), self.table.buckets(), Fallibility::Infallible, ) { @@ -2504,24 +3460,32 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> { Err(_) => hint::unreachable_unchecked(), }; - // If cloning fails then we need to free the allocation for the - // new table. However we don't run its drop since its control - // bytes are not initialized yet. - let mut guard = guard(ManuallyDrop::new(new_table), |new_table| { - new_table.free_buckets(); - }); - - guard.clone_from_spec(self); - - // Disarm the scope guard and return the newly created table. - ManuallyDrop::into_inner(ScopeGuard::into_inner(guard)) + // Cloning elements may fail (the clone function may panic). But we don't + // need to worry about uninitialized control bits, since: + // 1. The number of items (elements) in the table is zero, which means that + // the control bits will not be readed by Drop function. + // 2. The `clone_from_spec` method will first copy all control bits from + // `self` (thus initializing them). But this will not affect the `Drop` + // function, since the `clone_from_spec` function sets `items` only after + // successfully clonning all elements. + new_table.clone_from_spec(self); + new_table } } } fn clone_from(&mut self, source: &Self) { if source.table.is_empty_singleton() { - *self = Self::new_in(self.table.alloc.clone()); + let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW); + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If any elements' drop function panics, then there will only be a memory leak, + // because we have replaced the inner table with a new one. + old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT); + } } else { unsafe { // Make sure that if any panics occurs, we clear the table and @@ -2536,27 +3500,38 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> { // // This leak is unavoidable: we can't try dropping more elements // since this could lead to another panic and abort the process. - self_.drop_elements(); + // + // SAFETY: If something gets wrong we clear our table right after + // dropping the elements, so there is no double drop, since `items` + // will be equal to zero. + self_.table.drop_elements::<T>(); // If necessary, resize our table to match the source. if self_.buckets() != source.buckets() { - // Skip our drop by using ptr::write. - if !self_.table.is_empty_singleton() { - self_.free_buckets(); + let new_inner = match RawTableInner::new_uninitialized( + &self_.alloc, + Self::TABLE_LAYOUT, + source.buckets(), + Fallibility::Infallible, + ) { + Ok(table) => table, + Err(_) => hint::unreachable_unchecked(), + }; + // Replace the old inner with new uninitialized one. It's ok, since if something gets + // wrong `ScopeGuard` will initialize all control bytes and leave empty table. + let mut old_inner = mem::replace(&mut self_.table, new_inner); + if !old_inner.is_empty_singleton() { + // SAFETY: + // 1. We have checked that our table is allocated. + // 2. We know for sure that `alloc` and `table_layout` matches + // the [`Allocator`] and [`TableLayout`] that were used to allocate this table. + old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT); } - (&mut **self_ as *mut Self).write( - // Avoid `Result::unwrap_or_else` because it bloats LLVM IR. - match Self::new_uninitialized( - self_.table.alloc.clone(), - source.buckets(), - Fallibility::Infallible, - ) { - Ok(table) => table, - Err(_) => hint::unreachable_unchecked(), - }, - ); } + // Cloning elements may fail (the clone function may panic), but the `ScopeGuard` + // inside the `clone_from_impl` function will take care of that, dropping all + // cloned elements if necessary. Our `ScopeGuard` will clear the table. self_.clone_from_spec(source); // Disarm the scope guard if cloning was successful. @@ -2613,7 +3588,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> { // to make sure we drop only the elements that have been // cloned so far. let mut guard = guard((0, &mut *self), |(index, self_)| { - if Self::DATA_NEEDS_DROP { + if T::NEEDS_DROP { for i in 0..=*index { if self_.is_bucket_full(i) { self_.bucket(i).drop(); @@ -2650,7 +3625,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> { { self.clear(); - let guard_self = guard(&mut *self, |self_| { + let mut guard_self = guard(&mut *self, |self_| { // Clear the partially copied table if a panic occurs, otherwise // items and growth_left will be out of sync with the contents // of the table. @@ -2683,7 +3658,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> { } } -impl<T, A: Allocator + Clone + Default> Default for RawTable<T, A> { +impl<T, A: Allocator + Default> Default for RawTable<T, A> { #[inline] fn default() -> Self { Self::new_in(Default::default()) @@ -2691,31 +3666,41 @@ impl<T, A: Allocator + Clone + Default> Default for RawTable<T, A> { } #[cfg(feature = "nightly")] -unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable<T, A> { +unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable<T, A> { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { - if !self.table.is_empty_singleton() { - unsafe { - self.drop_elements(); - self.free_buckets(); - } + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If the drop function of any elements fails, then only a memory leak will occur, + // and we don't care because we are inside the `Drop` function of the `RawTable`, + // so there won't be any table left in an inconsistent state. + self.table + .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT); } } } #[cfg(not(feature = "nightly"))] -impl<T, A: Allocator + Clone> Drop for RawTable<T, A> { +impl<T, A: Allocator> Drop for RawTable<T, A> { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { - if !self.table.is_empty_singleton() { - unsafe { - self.drop_elements(); - self.free_buckets(); - } + unsafe { + // SAFETY: + // 1. We call the function only once; + // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`] + // and [`TableLayout`] that were used to allocate this table. + // 3. If the drop function of any elements fails, then only a memory leak will occur, + // and we don't care because we are inside the `Drop` function of the `RawTable`, + // so there won't be any table left in an inconsistent state. + self.table + .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT); } } } -impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> { +impl<T, A: Allocator> IntoIterator for RawTable<T, A> { type Item = T; type IntoIter = RawIntoIter<T, A>; @@ -2749,14 +3734,39 @@ pub(crate) struct RawIterRange<T> { impl<T> RawIterRange<T> { /// Returns a `RawIterRange` covering a subset of a table. /// - /// The control byte address must be aligned to the group size. + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`undefined behavior`]: + /// + /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`; + /// + /// * `ctrl` must be properly aligned to the group size (Group::WIDTH); + /// + /// * `ctrl` must point to the array of properly initialized control bytes; + /// + /// * `data` must be the [`Bucket`] at the `ctrl` index in the table; + /// + /// * the value of `len` must be less than or equal to the number of table buckets, + /// and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())` + /// must be positive. + /// + /// * The `ctrl.add(len)` pointer must be either in bounds or one + /// byte past the end of the same [allocated table]. + /// + /// * The `len` must be a power of two. + /// + /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety + /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html #[cfg_attr(feature = "inline-more", inline)] unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self { debug_assert_ne!(len, 0); debug_assert_eq!(ctrl as usize % Group::WIDTH, 0); + // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`] let end = ctrl.add(len); // Load the first group and advance ctrl to point to the next group + // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`] let current_group = Group::load_aligned(ctrl).match_full(); let next_ctrl = ctrl.add(Group::WIDTH); @@ -2900,8 +3910,6 @@ pub struct RawIter<T> { } impl<T> RawIter<T> { - const DATA_NEEDS_DROP: bool = mem::needs_drop::<T>(); - /// Refresh the iterator so that it reflects a removal from the given bucket. /// /// For the iterator to remain valid, this method must be called once @@ -3017,7 +4025,7 @@ impl<T> RawIter<T> { } unsafe fn drop_elements(&mut self) { - if Self::DATA_NEEDS_DROP && self.len() != 0 { + if T::NEEDS_DROP && self.items != 0 { for item in self { item.drop(); } @@ -3066,28 +4074,146 @@ impl<T> Iterator for RawIter<T> { impl<T> ExactSizeIterator for RawIter<T> {} impl<T> FusedIterator for RawIter<T> {} +/// Iterator which returns an index of every full bucket in the table. +/// +/// For maximum flexibility this iterator is not bound by a lifetime, but you +/// must observe several rules when using it: +/// - You must not free the hash table while iterating (including via growing/shrinking). +/// - It is fine to erase a bucket that has been yielded by the iterator. +/// - Erasing a bucket that has not yet been yielded by the iterator may still +/// result in the iterator yielding index of that bucket. +/// - It is unspecified whether an element inserted after the iterator was +/// created will be yielded by that iterator. +/// - The order in which the iterator yields indices of the buckets is unspecified +/// and may change in the future. +pub(crate) struct FullBucketsIndices { + // Mask of full buckets in the current group. Bits are cleared from this + // mask as each element is processed. + current_group: BitMaskIter, + + // Initial value of the bytes' indices of the current group (relative + // to the start of the control bytes). + group_first_index: usize, + + // Pointer to the current group of control bytes, + // Must be aligned to the group size (Group::WIDTH). + ctrl: NonNull<u8>, + + // Number of elements in the table. + items: usize, +} + +impl FullBucketsIndices { + /// Advances the iterator and returns the next value. + /// + /// # Safety + /// + /// If any of the following conditions are violated, the result is + /// [`Undefined Behavior`]: + /// + /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved, + /// i.e. table outlives the `FullBucketsIndices`; + /// + /// * It never tries to iterate after getting all elements. + /// + /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + #[inline(always)] + unsafe fn next_impl(&mut self) -> Option<usize> { + loop { + if let Some(index) = self.current_group.next() { + // The returned `self.group_first_index + index` will always + // be in the range `0..self.buckets()`. See explanation below. + return Some(self.group_first_index + index); + } + + // SAFETY: The caller of this function ensures that: + // + // 1. It never tries to iterate after getting all the elements; + // 2. The table is alive and did not moved; + // 3. The first `self.ctrl` pointed to the start of the array of control bytes. + // + // Taking the above into account, we always stay within the bounds, because: + // + // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH), + // we will never end up in the given branch, since we should have already + // yielded all the elements of the table. + // + // 2. For tables larger than the group width. The the number of buckets is a + // power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Sinse + // `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the + // the start of the array of control bytes, and never try to iterate after + // getting all the elements, the last `self.ctrl` will be equal to + // the `self.buckets() - Group::WIDTH`, so `self.current_group.next()` + // will always contains indices within the range `0..Group::WIDTH`, + // and subsequent `self.group_first_index + index` will always return a + // number less than `self.buckets()`. + self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH)); + + // SAFETY: See explanation above. + self.current_group = Group::load_aligned(self.ctrl.as_ptr()) + .match_full() + .into_iter(); + self.group_first_index += Group::WIDTH; + } + } +} + +impl Iterator for FullBucketsIndices { + type Item = usize; + + /// Advances the iterator and returns the next value. It is up to + /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`, + /// because we cannot make the `next` method unsafe. + #[inline(always)] + fn next(&mut self) -> Option<usize> { + // Return if we already yielded all items. + if self.items == 0 { + return None; + } + + let nxt = unsafe { + // SAFETY: + // 1. We check number of items to yield using `items` field. + // 2. The caller ensures that the table is alive and has not moved. + self.next_impl() + }; + + debug_assert!(nxt.is_some()); + self.items -= 1; + + nxt + } + + #[inline(always)] + fn size_hint(&self) -> (usize, Option<usize>) { + (self.items, Some(self.items)) + } +} + +impl ExactSizeIterator for FullBucketsIndices {} +impl FusedIterator for FullBucketsIndices {} + /// Iterator which consumes a table and returns elements. -pub struct RawIntoIter<T, A: Allocator + Clone = Global> { +pub struct RawIntoIter<T, A: Allocator = Global> { iter: RawIter<T>, - allocation: Option<(NonNull<u8>, Layout)>, + allocation: Option<(NonNull<u8>, Layout, A)>, marker: PhantomData<T>, - alloc: A, } -impl<T, A: Allocator + Clone> RawIntoIter<T, A> { +impl<T, A: Allocator> RawIntoIter<T, A> { #[cfg_attr(feature = "inline-more", inline)] pub fn iter(&self) -> RawIter<T> { self.iter.clone() } } -unsafe impl<T, A: Allocator + Clone> Send for RawIntoIter<T, A> +unsafe impl<T, A: Allocator> Send for RawIntoIter<T, A> where T: Send, A: Send, { } -unsafe impl<T, A: Allocator + Clone> Sync for RawIntoIter<T, A> +unsafe impl<T, A: Allocator> Sync for RawIntoIter<T, A> where T: Sync, A: Sync, @@ -3095,7 +4221,7 @@ where } #[cfg(feature = "nightly")] -unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> { +unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter<T, A> { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { unsafe { @@ -3103,14 +4229,14 @@ unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> { self.iter.drop_elements(); // Free the table - if let Some((ptr, layout)) = self.allocation { - self.alloc.deallocate(ptr, layout); + if let Some((ptr, layout, ref alloc)) = self.allocation { + alloc.deallocate(ptr, layout); } } } } #[cfg(not(feature = "nightly"))] -impl<T, A: Allocator + Clone> Drop for RawIntoIter<T, A> { +impl<T, A: Allocator> Drop for RawIntoIter<T, A> { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { unsafe { @@ -3118,14 +4244,14 @@ impl<T, A: Allocator + Clone> Drop for RawIntoIter<T, A> { self.iter.drop_elements(); // Free the table - if let Some((ptr, layout)) = self.allocation { - self.alloc.deallocate(ptr, layout); + if let Some((ptr, layout, ref alloc)) = self.allocation { + alloc.deallocate(ptr, layout); } } } } -impl<T, A: Allocator + Clone> Iterator for RawIntoIter<T, A> { +impl<T, A: Allocator> Iterator for RawIntoIter<T, A> { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -3139,45 +4265,45 @@ impl<T, A: Allocator + Clone> Iterator for RawIntoIter<T, A> { } } -impl<T, A: Allocator + Clone> ExactSizeIterator for RawIntoIter<T, A> {} -impl<T, A: Allocator + Clone> FusedIterator for RawIntoIter<T, A> {} +impl<T, A: Allocator> ExactSizeIterator for RawIntoIter<T, A> {} +impl<T, A: Allocator> FusedIterator for RawIntoIter<T, A> {} /// Iterator which consumes elements without freeing the table storage. -pub struct RawDrain<'a, T, A: Allocator + Clone = Global> { +pub struct RawDrain<'a, T, A: Allocator = Global> { iter: RawIter<T>, // The table is moved into the iterator for the duration of the drain. This // ensures that an empty table is left if the drain iterator is leaked // without dropping. - table: ManuallyDrop<RawTable<T, A>>, - orig_table: NonNull<RawTable<T, A>>, + table: RawTableInner, + orig_table: NonNull<RawTableInner>, // We don't use a &'a mut RawTable<T> because we want RawDrain to be // covariant over T. marker: PhantomData<&'a RawTable<T, A>>, } -impl<T, A: Allocator + Clone> RawDrain<'_, T, A> { +impl<T, A: Allocator> RawDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] pub fn iter(&self) -> RawIter<T> { self.iter.clone() } } -unsafe impl<T, A: Allocator + Copy> Send for RawDrain<'_, T, A> +unsafe impl<T, A: Allocator> Send for RawDrain<'_, T, A> where T: Send, A: Send, { } -unsafe impl<T, A: Allocator + Copy> Sync for RawDrain<'_, T, A> +unsafe impl<T, A: Allocator> Sync for RawDrain<'_, T, A> where T: Sync, A: Sync, { } -impl<T, A: Allocator + Clone> Drop for RawDrain<'_, T, A> { +impl<T, A: Allocator> Drop for RawDrain<'_, T, A> { #[cfg_attr(feature = "inline-more", inline)] fn drop(&mut self) { unsafe { @@ -3191,12 +4317,12 @@ impl<T, A: Allocator + Clone> Drop for RawDrain<'_, T, A> { // Move the now empty table back to its original location. self.orig_table .as_ptr() - .copy_from_nonoverlapping(&*self.table, 1); + .copy_from_nonoverlapping(&self.table, 1); } } } -impl<T, A: Allocator + Clone> Iterator for RawDrain<'_, T, A> { +impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> { type Item = T; #[cfg_attr(feature = "inline-more", inline)] @@ -3213,8 +4339,8 @@ impl<T, A: Allocator + Clone> Iterator for RawDrain<'_, T, A> { } } -impl<T, A: Allocator + Clone> ExactSizeIterator for RawDrain<'_, T, A> {} -impl<T, A: Allocator + Clone> FusedIterator for RawDrain<'_, T, A> {} +impl<T, A: Allocator> ExactSizeIterator for RawDrain<'_, T, A> {} +impl<T, A: Allocator> FusedIterator for RawDrain<'_, T, A> {} /// Iterator over occupied buckets that could match a given hash. /// @@ -3259,7 +4385,7 @@ struct RawIterHashInner { impl<T> RawIterHash<T> { #[cfg_attr(feature = "inline-more", inline)] #[cfg(feature = "raw")] - unsafe fn new<A: Allocator + Clone>(table: &RawTable<T, A>, hash: u64) -> Self { + unsafe fn new<A: Allocator>(table: &RawTable<T, A>, hash: u64) -> Self { RawIterHash { inner: RawIterHashInner::new(&table.table, hash), _marker: PhantomData, @@ -3269,7 +4395,7 @@ impl<T> RawIterHash<T> { impl RawIterHashInner { #[cfg_attr(feature = "inline-more", inline)] #[cfg(feature = "raw")] - unsafe fn new<A: Allocator + Clone>(table: &RawTableInner<A>, hash: u64) -> Self { + unsafe fn new(table: &RawTableInner, hash: u64) -> Self { let h2_hash = h2(hash); let probe_seq = table.probe_seq(hash); let group = Group::load(table.ctrl(probe_seq.pos)); @@ -3333,6 +4459,28 @@ impl Iterator for RawIterHashInner { } } +pub(crate) struct RawExtractIf<'a, T, A: Allocator> { + pub iter: RawIter<T>, + pub table: &'a mut RawTable<T, A>, +} + +impl<T, A: Allocator> RawExtractIf<'_, T, A> { + #[cfg_attr(feature = "inline-more", inline)] + pub(crate) fn next<F>(&mut self, mut f: F) -> Option<T> + where + F: FnMut(&mut T) -> bool, + { + unsafe { + for item in &mut self.iter { + if f(item.as_mut()) { + return Some(self.table.remove(item).0); + } + } + } + None + } +} + #[cfg(test)] mod test_map { use super::*; @@ -3375,4 +4523,214 @@ mod test_map { assert!(table.find(i + 100, |x| *x == i + 100).is_none()); } } + + /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF + /// AN UNINITIALIZED TABLE DURING THE DROP + #[test] + fn test_drop_uninitialized() { + use ::alloc::vec::Vec; + + let table = unsafe { + // SAFETY: The `buckets` is power of two and we're not + // trying to actually use the returned RawTable. + RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible) + .unwrap() + }; + drop(table); + } + + /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS` + /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES. + #[test] + fn test_drop_zero_items() { + use ::alloc::vec::Vec; + unsafe { + // SAFETY: The `buckets` is power of two and we're not + // trying to actually use the returned RawTable. + let table = + RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible) + .unwrap(); + + // WE SIMULATE, AS IT WERE, A FULL TABLE. + + // SAFETY: We checked that the table is allocated and therefore the table already has + // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for) + // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe. + table + .table + .ctrl(0) + .write_bytes(EMPTY, table.table.num_ctrl_bytes()); + + // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets() + table.table.ctrl(0).write_bytes(0, table.capacity()); + + // Fix up the trailing control bytes. See the comments in set_ctrl + // for the handling of tables smaller than the group width. + if table.buckets() < Group::WIDTH { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes, + // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to + // `Group::WIDTH` is safe + table + .table + .ctrl(0) + .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets()); + } else { + // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of + // control bytes,so copying `Group::WIDTH` bytes with offset equal + // to `self.buckets() == self.bucket_mask + 1` is safe + table + .table + .ctrl(0) + .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH); + } + drop(table); + } + } + + /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS` + /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES. + #[test] + fn test_catch_panic_clone_from() { + use ::alloc::sync::Arc; + use ::alloc::vec::Vec; + use allocator_api2::alloc::{AllocError, Allocator, Global}; + use core::sync::atomic::{AtomicI8, Ordering}; + use std::thread; + + struct MyAllocInner { + drop_count: Arc<AtomicI8>, + } + + #[derive(Clone)] + struct MyAlloc { + _inner: Arc<MyAllocInner>, + } + + impl Drop for MyAllocInner { + fn drop(&mut self) { + println!("MyAlloc freed."); + self.drop_count.fetch_sub(1, Ordering::SeqCst); + } + } + + unsafe impl Allocator for MyAlloc { + fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> { + let g = Global; + g.allocate(layout) + } + + unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) { + let g = Global; + g.deallocate(ptr, layout) + } + } + + const DISARMED: bool = false; + const ARMED: bool = true; + + struct CheckedCloneDrop { + panic_in_clone: bool, + dropped: bool, + need_drop: Vec<u64>, + } + + impl Clone for CheckedCloneDrop { + fn clone(&self) -> Self { + if self.panic_in_clone { + panic!("panic in clone") + } + Self { + panic_in_clone: self.panic_in_clone, + dropped: self.dropped, + need_drop: self.need_drop.clone(), + } + } + } + + impl Drop for CheckedCloneDrop { + fn drop(&mut self) { + if self.dropped { + panic!("double drop"); + } + self.dropped = true; + } + } + + let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2)); + + let mut table = RawTable::new_in(MyAlloc { + _inner: Arc::new(MyAllocInner { + drop_count: dropped.clone(), + }), + }); + + for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() { + let idx = idx as u64; + table.insert( + idx, + ( + idx, + CheckedCloneDrop { + panic_in_clone, + dropped: false, + need_drop: vec![idx], + }, + ), + |(k, _)| *k, + ); + } + + assert_eq!(table.len(), 7); + + thread::scope(|s| { + let result = s.spawn(|| { + let armed_flags = [ + DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED, + ]; + let mut scope_table = RawTable::new_in(MyAlloc { + _inner: Arc::new(MyAllocInner { + drop_count: dropped.clone(), + }), + }); + for (idx, &panic_in_clone) in armed_flags.iter().enumerate() { + let idx = idx as u64; + scope_table.insert( + idx, + ( + idx, + CheckedCloneDrop { + panic_in_clone, + dropped: false, + need_drop: vec![idx + 100], + }, + ), + |(k, _)| *k, + ); + } + table.clone_from(&scope_table); + }); + assert!(result.join().is_err()); + }); + + // Let's check that all iterators work fine and do not return elements + // (especially `RawIterRange`, which does not depend on the number of + // elements in the table, but looks directly at the control bytes) + // + // SAFETY: We know for sure that `RawTable` will outlive + // the returned `RawIter / RawIterRange` iterator. + assert_eq!(table.len(), 0); + assert_eq!(unsafe { table.iter().count() }, 0); + assert_eq!(unsafe { table.iter().iter.count() }, 0); + + for idx in 0..table.buckets() { + let idx = idx as u64; + assert!( + table.find(idx, |(k, _)| *k == idx).is_none(), + "Index: {idx}" + ); + } + + // All allocator clones should already be dropped. + assert_eq!(dropped.load(Ordering::SeqCst), 1); + } } diff --git a/vendor/hashbrown/src/rustc_entry.rs b/vendor/hashbrown/src/rustc_entry.rs index 89447d27d..defbd4bb8 100644 --- a/vendor/hashbrown/src/rustc_entry.rs +++ b/vendor/hashbrown/src/rustc_entry.rs @@ -9,7 +9,7 @@ impl<K, V, S, A> HashMap<K, V, S, A> where K: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Gets the given key's corresponding entry in the map for in-place manipulation. /// @@ -62,7 +62,7 @@ where /// [`rustc_entry`]: struct.HashMap.html#method.rustc_entry pub enum RustcEntry<'a, K, V, A = Global> where - A: Allocator + Clone, + A: Allocator, { /// An occupied entry. Occupied(RustcOccupiedEntry<'a, K, V, A>), @@ -71,7 +71,7 @@ where Vacant(RustcVacantEntry<'a, K, V, A>), } -impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for RustcEntry<'_, K, V, A> { +impl<K: Debug, V: Debug, A: Allocator> Debug for RustcEntry<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), @@ -86,7 +86,7 @@ impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for RustcEntry<'_, K, V, A> /// [`RustcEntry`]: enum.RustcEntry.html pub struct RustcOccupiedEntry<'a, K, V, A = Global> where - A: Allocator + Clone, + A: Allocator, { key: Option<K>, elem: Bucket<(K, V)>, @@ -97,18 +97,18 @@ unsafe impl<K, V, A> Send for RustcOccupiedEntry<'_, K, V, A> where K: Send, V: Send, - A: Allocator + Clone + Send, + A: Allocator + Send, { } unsafe impl<K, V, A> Sync for RustcOccupiedEntry<'_, K, V, A> where K: Sync, V: Sync, - A: Allocator + Clone + Sync, + A: Allocator + Sync, { } -impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for RustcOccupiedEntry<'_, K, V, A> { +impl<K: Debug, V: Debug, A: Allocator> Debug for RustcOccupiedEntry<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("key", self.key()) @@ -123,20 +123,20 @@ impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for RustcOccupiedEntry<'_, /// [`RustcEntry`]: enum.RustcEntry.html pub struct RustcVacantEntry<'a, K, V, A = Global> where - A: Allocator + Clone, + A: Allocator, { hash: u64, key: K, table: &'a mut RawTable<(K, V), A>, } -impl<K: Debug, V, A: Allocator + Clone> Debug for RustcVacantEntry<'_, K, V, A> { +impl<K: Debug, V, A: Allocator> Debug for RustcVacantEntry<'_, K, V, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.key()).finish() } } -impl<'a, K, V, A: Allocator + Clone> RustcEntry<'a, K, V, A> { +impl<'a, K, V, A: Allocator> RustcEntry<'a, K, V, A> { /// Sets the value of the entry, and returns a RustcOccupiedEntry. /// /// # Examples @@ -265,7 +265,7 @@ impl<'a, K, V, A: Allocator + Clone> RustcEntry<'a, K, V, A> { } } -impl<'a, K, V: Default, A: Allocator + Clone> RustcEntry<'a, K, V, A> { +impl<'a, K, V: Default, A: Allocator> RustcEntry<'a, K, V, A> { /// Ensures a value is in the entry by inserting the default value if empty, /// and returns a mutable reference to the value in the entry. /// @@ -293,7 +293,7 @@ impl<'a, K, V: Default, A: Allocator + Clone> RustcEntry<'a, K, V, A> { } } -impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> { +impl<'a, K, V, A: Allocator> RustcOccupiedEntry<'a, K, V, A> { /// Gets a reference to the key in the entry. /// /// # Examples @@ -518,7 +518,7 @@ impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> { } } -impl<'a, K, V, A: Allocator + Clone> RustcVacantEntry<'a, K, V, A> { +impl<'a, K, V, A: Allocator> RustcVacantEntry<'a, K, V, A> { /// Gets a reference to the key that would be used when inserting a value /// through the `RustcVacantEntry`. /// diff --git a/vendor/hashbrown/src/set.rs b/vendor/hashbrown/src/set.rs index 52f6fdaf2..09b45fd9f 100644 --- a/vendor/hashbrown/src/set.rs +++ b/vendor/hashbrown/src/set.rs @@ -7,8 +7,8 @@ use core::hash::{BuildHasher, Hash}; use core::iter::{Chain, FromIterator, FusedIterator}; use core::ops::{BitAnd, BitOr, BitXor, Sub}; -use super::map::{self, DefaultHashBuilder, ExtractIfInner, HashMap, Keys}; -use crate::raw::{Allocator, Global}; +use super::map::{self, DefaultHashBuilder, HashMap, Keys}; +use crate::raw::{Allocator, Global, RawExtractIf}; // Future Optimization (FIXME!) // ============================= @@ -112,7 +112,7 @@ use crate::raw::{Allocator, Global}; /// [`HashMap`]: struct.HashMap.html /// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html /// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html -pub struct HashSet<T, S = DefaultHashBuilder, A: Allocator + Clone = Global> { +pub struct HashSet<T, S = DefaultHashBuilder, A: Allocator = Global> { pub(crate) map: HashMap<T, (), S, A>, } @@ -193,7 +193,7 @@ impl<T> HashSet<T, DefaultHashBuilder> { } #[cfg(feature = "ahash")] -impl<T: Hash + Eq, A: Allocator + Clone> HashSet<T, DefaultHashBuilder, A> { +impl<T: Hash + Eq, A: Allocator> HashSet<T, DefaultHashBuilder, A> { /// Creates an empty `HashSet`. /// /// The hash set is initially created with a capacity of 0, so it will not allocate until it @@ -256,7 +256,7 @@ impl<T: Hash + Eq, A: Allocator + Clone> HashSet<T, DefaultHashBuilder, A> { } } -impl<T, S, A: Allocator + Clone> HashSet<T, S, A> { +impl<T, S, A: Allocator> HashSet<T, S, A> { /// Returns the number of elements the set can hold without reallocating. /// /// # Examples @@ -383,6 +383,8 @@ impl<T, S, A: Allocator + Clone> HashSet<T, S, A> { /// or the iteration short-circuits, then the remaining elements will be retained. /// Use [`retain()`] with a negated predicate if you do not need the returned iterator. /// + /// [`retain()`]: HashSet::retain + /// /// # Examples /// /// ``` @@ -406,7 +408,7 @@ impl<T, S, A: Allocator + Clone> HashSet<T, S, A> { { ExtractIf { f, - inner: ExtractIfInner { + inner: RawExtractIf { iter: unsafe { self.map.table.iter() }, table: &mut self.map.table, }, @@ -511,7 +513,7 @@ impl<T, S> HashSet<T, S, Global> { impl<T, S, A> HashSet<T, S, A> where - A: Allocator + Clone, + A: Allocator, { /// Returns a reference to the underlying allocator. #[inline] @@ -619,7 +621,7 @@ impl<T, S, A> HashSet<T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { /// Reserves capacity for at least `additional` more elements to be inserted /// in the `HashSet`. The collection may reserve more space to avoid @@ -1223,7 +1225,7 @@ where } } -impl<T, S, A: Allocator + Clone> HashSet<T, S, A> { +impl<T, S, A: Allocator> HashSet<T, S, A> { /// Returns a reference to the [`RawTable`] used underneath [`HashSet`]. /// This function is only available if the `raw` feature of the crate is enabled. /// @@ -1269,7 +1271,7 @@ impl<T, S, A> PartialEq for HashSet<T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn eq(&self, other: &Self) -> bool { if self.len() != other.len() { @@ -1284,14 +1286,14 @@ impl<T, S, A> Eq for HashSet<T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } impl<T, S, A> fmt::Debug for HashSet<T, S, A> where T: fmt::Debug, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_set().entries(self.iter()).finish() @@ -1300,7 +1302,7 @@ where impl<T, S, A> From<HashMap<T, (), S, A>> for HashSet<T, S, A> where - A: Allocator + Clone, + A: Allocator, { fn from(map: HashMap<T, (), S, A>) -> Self { Self { map } @@ -1311,7 +1313,7 @@ impl<T, S, A> FromIterator<T> for HashSet<T, S, A> where T: Eq + Hash, S: BuildHasher + Default, - A: Default + Allocator + Clone, + A: Default + Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self { @@ -1326,7 +1328,7 @@ where impl<T, A, const N: usize> From<[T; N]> for HashSet<T, DefaultHashBuilder, A> where T: Eq + Hash, - A: Default + Allocator + Clone, + A: Default + Allocator, { /// # Examples /// @@ -1346,7 +1348,7 @@ impl<T, S, A> Extend<T> for HashSet<T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) { @@ -1370,7 +1372,7 @@ impl<'a, T, S, A> Extend<&'a T> for HashSet<T, S, A> where T: 'a + Eq + Hash + Copy, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { #[cfg_attr(feature = "inline-more", inline)] fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) { @@ -1393,7 +1395,7 @@ where impl<T, S, A> Default for HashSet<T, S, A> where S: Default, - A: Default + Allocator + Clone, + A: Default + Allocator, { /// Creates an empty `HashSet<T, S>` with the `Default` value for the hasher. #[cfg_attr(feature = "inline-more", inline)] @@ -1408,7 +1410,7 @@ impl<T, S, A> BitOr<&HashSet<T, S, A>> for &HashSet<T, S, A> where T: Eq + Hash + Clone, S: BuildHasher + Default, - A: Allocator + Clone, + A: Allocator, { type Output = HashSet<T, S>; @@ -1441,7 +1443,7 @@ impl<T, S, A> BitAnd<&HashSet<T, S, A>> for &HashSet<T, S, A> where T: Eq + Hash + Clone, S: BuildHasher + Default, - A: Allocator + Clone, + A: Allocator, { type Output = HashSet<T, S>; @@ -1552,7 +1554,7 @@ pub struct Iter<'a, K> { /// /// [`HashSet`]: struct.HashSet.html /// [`into_iter`]: struct.HashSet.html#method.into_iter -pub struct IntoIter<K, A: Allocator + Clone = Global> { +pub struct IntoIter<K, A: Allocator = Global> { iter: map::IntoIter<K, (), A>, } @@ -1563,7 +1565,7 @@ pub struct IntoIter<K, A: Allocator + Clone = Global> { /// /// [`HashSet`]: struct.HashSet.html /// [`drain`]: struct.HashSet.html#method.drain -pub struct Drain<'a, K, A: Allocator + Clone = Global> { +pub struct Drain<'a, K, A: Allocator = Global> { iter: map::Drain<'a, K, (), A>, } @@ -1575,12 +1577,12 @@ pub struct Drain<'a, K, A: Allocator + Clone = Global> { /// [`extract_if`]: struct.HashSet.html#method.extract_if /// [`HashSet`]: struct.HashSet.html #[must_use = "Iterators are lazy unless consumed"] -pub struct ExtractIf<'a, K, F, A: Allocator + Clone = Global> +pub struct ExtractIf<'a, K, F, A: Allocator = Global> where F: FnMut(&K) -> bool, { f: F, - inner: ExtractIfInner<'a, K, (), A>, + inner: RawExtractIf<'a, (K, ()), A>, } /// A lazy iterator producing elements in the intersection of `HashSet`s. @@ -1590,7 +1592,7 @@ where /// /// [`HashSet`]: struct.HashSet.html /// [`intersection`]: struct.HashSet.html#method.intersection -pub struct Intersection<'a, T, S, A: Allocator + Clone = Global> { +pub struct Intersection<'a, T, S, A: Allocator = Global> { // iterator of the first set iter: Iter<'a, T>, // the second set @@ -1604,7 +1606,7 @@ pub struct Intersection<'a, T, S, A: Allocator + Clone = Global> { /// /// [`HashSet`]: struct.HashSet.html /// [`difference`]: struct.HashSet.html#method.difference -pub struct Difference<'a, T, S, A: Allocator + Clone = Global> { +pub struct Difference<'a, T, S, A: Allocator = Global> { // iterator of the first set iter: Iter<'a, T>, // the second set @@ -1618,7 +1620,7 @@ pub struct Difference<'a, T, S, A: Allocator + Clone = Global> { /// /// [`HashSet`]: struct.HashSet.html /// [`symmetric_difference`]: struct.HashSet.html#method.symmetric_difference -pub struct SymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { +pub struct SymmetricDifference<'a, T, S, A: Allocator = Global> { iter: Chain<Difference<'a, T, S, A>, Difference<'a, T, S, A>>, } @@ -1629,11 +1631,11 @@ pub struct SymmetricDifference<'a, T, S, A: Allocator + Clone = Global> { /// /// [`HashSet`]: struct.HashSet.html /// [`union`]: struct.HashSet.html#method.union -pub struct Union<'a, T, S, A: Allocator + Clone = Global> { +pub struct Union<'a, T, S, A: Allocator = Global> { iter: Chain<Iter<'a, T>, Difference<'a, T, S, A>>, } -impl<'a, T, S, A: Allocator + Clone> IntoIterator for &'a HashSet<T, S, A> { +impl<'a, T, S, A: Allocator> IntoIterator for &'a HashSet<T, S, A> { type Item = &'a T; type IntoIter = Iter<'a, T>; @@ -1643,7 +1645,7 @@ impl<'a, T, S, A: Allocator + Clone> IntoIterator for &'a HashSet<T, S, A> { } } -impl<T, S, A: Allocator + Clone> IntoIterator for HashSet<T, S, A> { +impl<T, S, A: Allocator> IntoIterator for HashSet<T, S, A> { type Item = T; type IntoIter = IntoIter<T, A>; @@ -1709,7 +1711,7 @@ impl<K: fmt::Debug> fmt::Debug for Iter<'_, K> { } } -impl<K, A: Allocator + Clone> Iterator for IntoIter<K, A> { +impl<K, A: Allocator> Iterator for IntoIter<K, A> { type Item = K; #[cfg_attr(feature = "inline-more", inline)] @@ -1725,22 +1727,22 @@ impl<K, A: Allocator + Clone> Iterator for IntoIter<K, A> { self.iter.size_hint() } } -impl<K, A: Allocator + Clone> ExactSizeIterator for IntoIter<K, A> { +impl<K, A: Allocator> ExactSizeIterator for IntoIter<K, A> { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.iter.len() } } -impl<K, A: Allocator + Clone> FusedIterator for IntoIter<K, A> {} +impl<K, A: Allocator> FusedIterator for IntoIter<K, A> {} -impl<K: fmt::Debug, A: Allocator + Clone> fmt::Debug for IntoIter<K, A> { +impl<K: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<K, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let entries_iter = self.iter.iter().map(|(k, _)| k); f.debug_list().entries(entries_iter).finish() } } -impl<K, A: Allocator + Clone> Iterator for Drain<'_, K, A> { +impl<K, A: Allocator> Iterator for Drain<'_, K, A> { type Item = K; #[cfg_attr(feature = "inline-more", inline)] @@ -1756,22 +1758,22 @@ impl<K, A: Allocator + Clone> Iterator for Drain<'_, K, A> { self.iter.size_hint() } } -impl<K, A: Allocator + Clone> ExactSizeIterator for Drain<'_, K, A> { +impl<K, A: Allocator> ExactSizeIterator for Drain<'_, K, A> { #[cfg_attr(feature = "inline-more", inline)] fn len(&self) -> usize { self.iter.len() } } -impl<K, A: Allocator + Clone> FusedIterator for Drain<'_, K, A> {} +impl<K, A: Allocator> FusedIterator for Drain<'_, K, A> {} -impl<K: fmt::Debug, A: Allocator + Clone> fmt::Debug for Drain<'_, K, A> { +impl<K: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, K, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let entries_iter = self.iter.iter().map(|(k, _)| k); f.debug_list().entries(entries_iter).finish() } } -impl<K, F, A: Allocator + Clone> Iterator for ExtractIf<'_, K, F, A> +impl<K, F, A: Allocator> Iterator for ExtractIf<'_, K, F, A> where F: FnMut(&K) -> bool, { @@ -1779,9 +1781,9 @@ where #[cfg_attr(feature = "inline-more", inline)] fn next(&mut self) -> Option<Self::Item> { - let f = &mut self.f; - let (k, _) = self.inner.next(&mut |k, _| f(k))?; - Some(k) + self.inner + .next(|&mut (ref k, ())| (self.f)(k)) + .map(|(k, ())| k) } #[inline] @@ -1790,9 +1792,9 @@ where } } -impl<K, F, A: Allocator + Clone> FusedIterator for ExtractIf<'_, K, F, A> where F: FnMut(&K) -> bool {} +impl<K, F, A: Allocator> FusedIterator for ExtractIf<'_, K, F, A> where F: FnMut(&K) -> bool {} -impl<T, S, A: Allocator + Clone> Clone for Intersection<'_, T, S, A> { +impl<T, S, A: Allocator> Clone for Intersection<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { Intersection { @@ -1806,7 +1808,7 @@ impl<'a, T, S, A> Iterator for Intersection<'a, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Item = &'a T; @@ -1831,7 +1833,7 @@ impl<T, S, A> fmt::Debug for Intersection<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() @@ -1842,11 +1844,11 @@ impl<T, S, A> FusedIterator for Intersection<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } -impl<T, S, A: Allocator + Clone> Clone for Difference<'_, T, S, A> { +impl<T, S, A: Allocator> Clone for Difference<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { Difference { @@ -1860,7 +1862,7 @@ impl<'a, T, S, A> Iterator for Difference<'a, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Item = &'a T; @@ -1885,7 +1887,7 @@ impl<T, S, A> FusedIterator for Difference<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } @@ -1893,14 +1895,14 @@ impl<T, S, A> fmt::Debug for Difference<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } -impl<T, S, A: Allocator + Clone> Clone for SymmetricDifference<'_, T, S, A> { +impl<T, S, A: Allocator> Clone for SymmetricDifference<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { SymmetricDifference { @@ -1913,7 +1915,7 @@ impl<'a, T, S, A> Iterator for SymmetricDifference<'a, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Item = &'a T; @@ -1931,7 +1933,7 @@ impl<T, S, A> FusedIterator for SymmetricDifference<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } @@ -1939,14 +1941,14 @@ impl<T, S, A> fmt::Debug for SymmetricDifference<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() } } -impl<T, S, A: Allocator + Clone> Clone for Union<'_, T, S, A> { +impl<T, S, A: Allocator> Clone for Union<'_, T, S, A> { #[cfg_attr(feature = "inline-more", inline)] fn clone(&self) -> Self { Union { @@ -1959,7 +1961,7 @@ impl<T, S, A> FusedIterator for Union<'_, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { } @@ -1967,7 +1969,7 @@ impl<T, S, A> fmt::Debug for Union<'_, T, S, A> where T: fmt::Debug + Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_list().entries(self.clone()).finish() @@ -1978,7 +1980,7 @@ impl<'a, T, S, A> Iterator for Union<'a, T, S, A> where T: Eq + Hash, S: BuildHasher, - A: Allocator + Clone, + A: Allocator, { type Item = &'a T; @@ -2030,7 +2032,7 @@ where /// ``` pub enum Entry<'a, T, S, A = Global> where - A: Allocator + Clone, + A: Allocator, { /// An occupied entry. /// @@ -2063,7 +2065,7 @@ where Vacant(VacantEntry<'a, T, S, A>), } -impl<T: fmt::Debug, S, A: Allocator + Clone> fmt::Debug for Entry<'_, T, S, A> { +impl<T: fmt::Debug, S, A: Allocator> fmt::Debug for Entry<'_, T, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match *self { Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), @@ -2108,11 +2110,11 @@ impl<T: fmt::Debug, S, A: Allocator + Clone> fmt::Debug for Entry<'_, T, S, A> { /// assert_eq!(set.get(&"c"), None); /// assert_eq!(set.len(), 2); /// ``` -pub struct OccupiedEntry<'a, T, S, A: Allocator + Clone = Global> { +pub struct OccupiedEntry<'a, T, S, A: Allocator = Global> { inner: map::OccupiedEntry<'a, T, (), S, A>, } -impl<T: fmt::Debug, S, A: Allocator + Clone> fmt::Debug for OccupiedEntry<'_, T, S, A> { +impl<T: fmt::Debug, S, A: Allocator> fmt::Debug for OccupiedEntry<'_, T, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("OccupiedEntry") .field("value", self.get()) @@ -2146,17 +2148,17 @@ impl<T: fmt::Debug, S, A: Allocator + Clone> fmt::Debug for OccupiedEntry<'_, T, /// } /// assert!(set.contains("b") && set.len() == 2); /// ``` -pub struct VacantEntry<'a, T, S, A: Allocator + Clone = Global> { +pub struct VacantEntry<'a, T, S, A: Allocator = Global> { inner: map::VacantEntry<'a, T, (), S, A>, } -impl<T: fmt::Debug, S, A: Allocator + Clone> fmt::Debug for VacantEntry<'_, T, S, A> { +impl<T: fmt::Debug, S, A: Allocator> fmt::Debug for VacantEntry<'_, T, S, A> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("VacantEntry").field(self.get()).finish() } } -impl<'a, T, S, A: Allocator + Clone> Entry<'a, T, S, A> { +impl<'a, T, S, A: Allocator> Entry<'a, T, S, A> { /// Sets the value of the entry, and returns an OccupiedEntry. /// /// # Examples @@ -2233,7 +2235,7 @@ impl<'a, T, S, A: Allocator + Clone> Entry<'a, T, S, A> { } } -impl<T, S, A: Allocator + Clone> OccupiedEntry<'_, T, S, A> { +impl<T, S, A: Allocator> OccupiedEntry<'_, T, S, A> { /// Gets a reference to the value in the entry. /// /// # Examples @@ -2320,7 +2322,7 @@ impl<T, S, A: Allocator + Clone> OccupiedEntry<'_, T, S, A> { } } -impl<'a, T, S, A: Allocator + Clone> VacantEntry<'a, T, S, A> { +impl<'a, T, S, A: Allocator> VacantEntry<'a, T, S, A> { /// Gets a reference to the value that would be used when inserting /// through the `VacantEntry`. /// @@ -2400,34 +2402,30 @@ fn assert_covariance() { fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> { v } - fn into_iter<'new, A: Allocator + Clone>( - v: IntoIter<&'static str, A>, - ) -> IntoIter<&'new str, A> { + fn into_iter<'new, A: Allocator>(v: IntoIter<&'static str, A>) -> IntoIter<&'new str, A> { v } - fn difference<'a, 'new, A: Allocator + Clone>( + fn difference<'a, 'new, A: Allocator>( v: Difference<'a, &'static str, DefaultHashBuilder, A>, ) -> Difference<'a, &'new str, DefaultHashBuilder, A> { v } - fn symmetric_difference<'a, 'new, A: Allocator + Clone>( + fn symmetric_difference<'a, 'new, A: Allocator>( v: SymmetricDifference<'a, &'static str, DefaultHashBuilder, A>, ) -> SymmetricDifference<'a, &'new str, DefaultHashBuilder, A> { v } - fn intersection<'a, 'new, A: Allocator + Clone>( + fn intersection<'a, 'new, A: Allocator>( v: Intersection<'a, &'static str, DefaultHashBuilder, A>, ) -> Intersection<'a, &'new str, DefaultHashBuilder, A> { v } - fn union<'a, 'new, A: Allocator + Clone>( + fn union<'a, 'new, A: Allocator>( v: Union<'a, &'static str, DefaultHashBuilder, A>, ) -> Union<'a, &'new str, DefaultHashBuilder, A> { v } - fn drain<'new, A: Allocator + Clone>( - d: Drain<'static, &'static str, A>, - ) -> Drain<'new, &'new str, A> { + fn drain<'new, A: Allocator>(d: Drain<'static, &'static str, A>) -> Drain<'new, &'new str, A> { d } } diff --git a/vendor/hashbrown/src/table.rs b/vendor/hashbrown/src/table.rs new file mode 100644 index 000000000..bfb5dd989 --- /dev/null +++ b/vendor/hashbrown/src/table.rs @@ -0,0 +1,2030 @@ +use core::{fmt, iter::FusedIterator, marker::PhantomData}; + +use crate::{ + raw::{ + Allocator, Bucket, Global, InsertSlot, RawDrain, RawExtractIf, RawIntoIter, RawIter, + RawTable, + }, + TryReserveError, +}; + +/// Low-level hash table with explicit hashing. +/// +/// The primary use case for this type over [`HashMap`] or [`HashSet`] is to +/// support types that do not implement the [`Hash`] and [`Eq`] traits, but +/// instead require additional data not contained in the key itself to compute a +/// hash and compare two elements for equality. +/// +/// Examples of when this can be useful include: +/// - An `IndexMap` implementation where indices into a `Vec` are stored as +/// elements in a `HashTable<usize>`. Hashing and comparing the elements +/// requires indexing the associated `Vec` to get the actual value referred to +/// by the index. +/// - Avoiding re-computing a hash when it is already known. +/// - Mutating the key of an element in a way that doesn't affect its hash. +/// +/// To achieve this, `HashTable` methods that search for an element in the table +/// require a hash value and equality function to be explicitly passed in as +/// arguments. The method will then iterate over the elements with the given +/// hash and call the equality function on each of them, until a match is found. +/// +/// In most cases, a `HashTable` will not be exposed directly in an API. It will +/// instead be wrapped in a helper type which handles the work of calculating +/// hash values and comparing elements. +/// +/// Due to its low-level nature, this type provides fewer guarantees than +/// [`HashMap`] and [`HashSet`]. Specifically, the API allows you to shoot +/// yourself in the foot by having multiple elements with identical keys in the +/// table. The table itself will still function correctly and lookups will +/// arbitrarily return one of the matching elements. However you should avoid +/// doing this because it changes the runtime of hash table operations from +/// `O(1)` to `O(k)` where `k` is the number of duplicate entries. +/// +/// [`HashMap`]: super::HashMap +/// [`HashSet`]: super::HashSet +pub struct HashTable<T, A = Global> +where + A: Allocator, +{ + pub(crate) raw: RawTable<T, A>, +} + +impl<T> HashTable<T, Global> { + /// Creates an empty `HashTable`. + /// + /// The hash table is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashTable; + /// let mut table: HashTable<&str> = HashTable::new(); + /// assert_eq!(table.len(), 0); + /// assert_eq!(table.capacity(), 0); + /// ``` + pub const fn new() -> Self { + Self { + raw: RawTable::new(), + } + } + + /// Creates an empty `HashTable` with the specified capacity. + /// + /// The hash table will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash table will not allocate. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashTable; + /// let mut table: HashTable<&str> = HashTable::with_capacity(10); + /// assert_eq!(table.len(), 0); + /// assert!(table.capacity() >= 10); + /// ``` + pub fn with_capacity(capacity: usize) -> Self { + Self { + raw: RawTable::with_capacity(capacity), + } + } +} + +impl<T, A> HashTable<T, A> +where + A: Allocator, +{ + /// Creates an empty `HashTable` using the given allocator. + /// + /// The hash table is initially created with a capacity of 0, so it will not allocate until it + /// is first inserted into. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use bumpalo::Bump; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let bump = Bump::new(); + /// let mut table = HashTable::new_in(&bump); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// // The created HashTable holds none elements + /// assert_eq!(table.len(), 0); + /// + /// // The created HashTable also doesn't allocate memory + /// assert_eq!(table.capacity(), 0); + /// + /// // Now we insert element inside created HashTable + /// table.insert_unique(hasher(&"One"), "One", hasher); + /// // We can see that the HashTable holds 1 element + /// assert_eq!(table.len(), 1); + /// // And it also allocates some capacity + /// assert!(table.capacity() > 1); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub const fn new_in(alloc: A) -> Self { + Self { + raw: RawTable::new_in(alloc), + } + } + + /// Creates an empty `HashTable` with the specified capacity using the given allocator. + /// + /// The hash table will be able to hold at least `capacity` elements without + /// reallocating. If `capacity` is 0, the hash table will not allocate. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use bumpalo::Bump; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let bump = Bump::new(); + /// let mut table = HashTable::with_capacity_in(5, &bump); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// // The created HashTable holds none elements + /// assert_eq!(table.len(), 0); + /// // But it can hold at least 5 elements without reallocating + /// let empty_map_capacity = table.capacity(); + /// assert!(empty_map_capacity >= 5); + /// + /// // Now we insert some 5 elements inside created HashTable + /// table.insert_unique(hasher(&"One"), "One", hasher); + /// table.insert_unique(hasher(&"Two"), "Two", hasher); + /// table.insert_unique(hasher(&"Three"), "Three", hasher); + /// table.insert_unique(hasher(&"Four"), "Four", hasher); + /// table.insert_unique(hasher(&"Five"), "Five", hasher); + /// + /// // We can see that the HashTable holds 5 elements + /// assert_eq!(table.len(), 5); + /// // But its capacity isn't changed + /// assert_eq!(table.capacity(), empty_map_capacity) + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn with_capacity_in(capacity: usize, alloc: A) -> Self { + Self { + raw: RawTable::with_capacity_in(capacity, alloc), + } + } + + /// Returns a reference to the underlying allocator. + pub fn allocator(&self) -> &A { + self.raw.allocator() + } + + /// Returns a reference to an entry in the table with the given hash and + /// which satisfies the equality function passed. + /// + /// This method will call `eq` for all entries with the given hash, but may + /// also call it for entries with a different hash. `eq` should only return + /// true for the desired entry, at which point the search is stopped. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 1, hasher); + /// table.insert_unique(hasher(&2), 2, hasher); + /// table.insert_unique(hasher(&3), 3, hasher); + /// assert_eq!(table.find(hasher(&2), |&val| val == 2), Some(&2)); + /// assert_eq!(table.find(hasher(&4), |&val| val == 4), None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn find(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> { + self.raw.get(hash, eq) + } + + /// Returns a mutable reference to an entry in the table with the given hash + /// and which satisfies the equality function passed. + /// + /// This method will call `eq` for all entries with the given hash, but may + /// also call it for entries with a different hash. `eq` should only return + /// true for the desired entry, at which point the search is stopped. + /// + /// When mutating an entry, you should ensure that it still retains the same + /// hash value as when it was inserted, otherwise lookups of that entry may + /// fail to find it. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0)); + /// if let Some(val) = table.find_mut(hasher(&1), |val| val.0 == 1) { + /// val.1 = "b"; + /// } + /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), Some(&(1, "b"))); + /// assert_eq!(table.find(hasher(&2), |val| val.0 == 2), None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn find_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> { + self.raw.get_mut(hash, eq) + } + + /// Returns an `OccupiedEntry` for an entry in the table with the given hash + /// and which satisfies the equality function passed. + /// + /// This can be used to remove the entry from the table. Call + /// [`HashTable::entry`] instead if you wish to insert an entry if the + /// lookup fails. + /// + /// This method will call `eq` for all entries with the given hash, but may + /// also call it for entries with a different hash. `eq` should only return + /// true for the desired entry, at which point the search is stopped. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0)); + /// if let Ok(entry) = table.find_entry(hasher(&1), |val| val.0 == 1) { + /// entry.remove(); + /// } + /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn find_entry( + &mut self, + hash: u64, + eq: impl FnMut(&T) -> bool, + ) -> Result<OccupiedEntry<'_, T, A>, AbsentEntry<'_, T, A>> { + match self.raw.find(hash, eq) { + Some(bucket) => Ok(OccupiedEntry { + hash, + bucket, + table: self, + }), + None => Err(AbsentEntry { table: self }), + } + } + + /// Returns an `Entry` for an entry in the table with the given hash + /// and which satisfies the equality function passed. + /// + /// This can be used to remove the entry from the table, or insert a new + /// entry with the given hash if one doesn't already exist. + /// + /// This method will call `eq` for all entries with the given hash, but may + /// also call it for entries with a different hash. `eq` should only return + /// true for the desired entry, at which point the search is stopped. + /// + /// This method may grow the table in preparation for an insertion. Call + /// [`HashTable::find_entry`] if this is undesirable. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0)); + /// if let Entry::Occupied(entry) = table.entry(hasher(&1), |val| val.0 == 1, |val| hasher(&val.0)) + /// { + /// entry.remove(); + /// } + /// if let Entry::Vacant(entry) = table.entry(hasher(&2), |val| val.0 == 2, |val| hasher(&val.0)) { + /// entry.insert((2, "b")); + /// } + /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), None); + /// assert_eq!(table.find(hasher(&2), |val| val.0 == 2), Some(&(2, "b"))); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn entry( + &mut self, + hash: u64, + eq: impl FnMut(&T) -> bool, + hasher: impl Fn(&T) -> u64, + ) -> Entry<'_, T, A> { + match self.raw.find_or_find_insert_slot(hash, eq, hasher) { + Ok(bucket) => Entry::Occupied(OccupiedEntry { + hash, + bucket, + table: self, + }), + Err(insert_slot) => Entry::Vacant(VacantEntry { + hash, + insert_slot, + table: self, + }), + } + } + + /// Inserts an element into the `HashTable` with the given hash value, but + /// without checking whether an equivalent element already exists within the + /// table. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut v = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// v.insert_unique(hasher(&1), 1, hasher); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn insert_unique( + &mut self, + hash: u64, + value: T, + hasher: impl Fn(&T) -> u64, + ) -> OccupiedEntry<'_, T, A> { + let bucket = self.raw.insert(hash, value, hasher); + OccupiedEntry { + hash, + bucket, + table: self, + } + } + + /// Clears the table, removing all values. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut v = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// v.insert_unique(hasher(&1), 1, hasher); + /// v.clear(); + /// assert!(v.is_empty()); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn clear(&mut self) { + self.raw.clear(); + } + + /// Shrinks the capacity of the table as much as possible. It will drop + /// down as much as possible while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::with_capacity(100); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 1, hasher); + /// table.insert_unique(hasher(&2), 2, hasher); + /// assert!(table.capacity() >= 100); + /// table.shrink_to_fit(hasher); + /// assert!(table.capacity() >= 2); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn shrink_to_fit(&mut self, hasher: impl Fn(&T) -> u64) { + self.raw.shrink_to(self.len(), hasher) + } + + /// Shrinks the capacity of the table with a lower limit. It will drop + /// down no lower than the supplied limit while maintaining the internal rules + /// and possibly leaving some space in accordance with the resize policy. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// Panics if the current capacity is smaller than the supplied + /// minimum capacity. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::with_capacity(100); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 1, hasher); + /// table.insert_unique(hasher(&2), 2, hasher); + /// assert!(table.capacity() >= 100); + /// table.shrink_to(10, hasher); + /// assert!(table.capacity() >= 10); + /// table.shrink_to(0, hasher); + /// assert!(table.capacity() >= 2); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn shrink_to(&mut self, min_capacity: usize, hasher: impl Fn(&T) -> u64) { + self.raw.shrink_to(min_capacity, hasher); + } + + /// Reserves capacity for at least `additional` more elements to be inserted + /// in the `HashTable`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Panics + /// + /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program + /// in case of allocation error. Use [`try_reserve`](HashTable::try_reserve) instead + /// if you want to handle memory allocation failure. + /// + /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html + /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<i32> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.reserve(10, hasher); + /// assert!(table.capacity() >= 10); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) { + self.raw.reserve(additional, hasher) + } + + /// Tries to reserve capacity for at least `additional` more elements to be inserted + /// in the given `HashTable`. The collection may reserve more space to avoid + /// frequent reallocations. + /// + /// `hasher` is called if entries need to be moved or copied to a new table. + /// This must return the same hash value that each entry was inserted with. + /// + /// # Errors + /// + /// If the capacity overflows, or the allocator reports a failure, then an error + /// is returned. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<i32> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table + /// .try_reserve(10, hasher) + /// .expect("why is the test harness OOMing on 10 bytes?"); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn try_reserve( + &mut self, + additional: usize, + hasher: impl Fn(&T) -> u64, + ) -> Result<(), TryReserveError> { + self.raw.try_reserve(additional, hasher) + } + + /// Returns the number of elements the table can hold without reallocating. + /// + /// # Examples + /// + /// ``` + /// use hashbrown::HashTable; + /// let table: HashTable<i32> = HashTable::with_capacity(100); + /// assert!(table.capacity() >= 100); + /// ``` + pub fn capacity(&self) -> usize { + self.raw.capacity() + } + + /// Returns the number of elements in the table. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// let mut v = HashTable::new(); + /// assert_eq!(v.len(), 0); + /// v.insert_unique(hasher(&1), 1, hasher); + /// assert_eq!(v.len(), 1); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn len(&self) -> usize { + self.raw.len() + } + + /// Returns `true` if the set contains no elements. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// let mut v = HashTable::new(); + /// assert!(v.is_empty()); + /// v.insert_unique(hasher(&1), 1, hasher); + /// assert!(!v.is_empty()); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn is_empty(&self) -> bool { + self.raw.is_empty() + } + + /// An iterator visiting all elements in arbitrary order. + /// The iterator element type is `&'a T`. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"a"), "b", hasher); + /// table.insert_unique(hasher(&"b"), "b", hasher); + /// + /// // Will print in an arbitrary order. + /// for x in table.iter() { + /// println!("{}", x); + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn iter(&self) -> Iter<'_, T> { + Iter { + inner: unsafe { self.raw.iter() }, + marker: PhantomData, + } + } + + /// An iterator visiting all elements in arbitrary order, + /// with mutable references to the elements. + /// The iterator element type is `&'a mut T`. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&1), 1, hasher); + /// table.insert_unique(hasher(&2), 2, hasher); + /// table.insert_unique(hasher(&3), 3, hasher); + /// + /// // Update all values + /// for val in table.iter_mut() { + /// *val *= 2; + /// } + /// + /// assert_eq!(table.len(), 3); + /// let mut vec: Vec<i32> = Vec::new(); + /// + /// for val in &table { + /// println!("val: {}", val); + /// vec.push(*val); + /// } + /// + /// // The `Iter` iterator produces items in arbitrary order, so the + /// // items must be sorted to test them against a sorted array. + /// vec.sort_unstable(); + /// assert_eq!(vec, [2, 4, 6]); + /// + /// assert_eq!(table.len(), 3); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn iter_mut(&mut self) -> IterMut<'_, T> { + IterMut { + inner: unsafe { self.raw.iter() }, + marker: PhantomData, + } + } + + /// Retains only the elements specified by the predicate. + /// + /// In other words, remove all elements `e` such that `f(&e)` returns `false`. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for x in 1..=6 { + /// table.insert_unique(hasher(&x), x, hasher); + /// } + /// table.retain(|&mut x| x % 2 == 0); + /// assert_eq!(table.len(), 3); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn retain(&mut self, mut f: impl FnMut(&mut T) -> bool) { + // Here we only use `iter` as a temporary, preventing use-after-free + unsafe { + for item in self.raw.iter() { + if !f(item.as_mut()) { + self.raw.erase(item); + } + } + } + } + + /// Clears the set, returning all elements in an iterator. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for x in 1..=3 { + /// table.insert_unique(hasher(&x), x, hasher); + /// } + /// assert!(!table.is_empty()); + /// + /// // print 1, 2, 3 in an arbitrary order + /// for i in table.drain() { + /// println!("{}", i); + /// } + /// + /// assert!(table.is_empty()); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn drain(&mut self) -> Drain<'_, T, A> { + Drain { + inner: self.raw.drain(), + } + } + + /// Drains elements which are true under the given predicate, + /// and returns an iterator over the removed items. + /// + /// In other words, move all elements `e` such that `f(&e)` returns `true` out + /// into another iterator. + /// + /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating + /// or the iteration short-circuits, then the remaining elements will be retained. + /// Use [`retain()`] with a negated predicate if you do not need the returned iterator. + /// + /// [`retain()`]: HashTable::retain + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for x in 0..8 { + /// table.insert_unique(hasher(&x), x, hasher); + /// } + /// let drained: Vec<i32> = table.extract_if(|&mut v| v % 2 == 0).collect(); + /// + /// let mut evens = drained.into_iter().collect::<Vec<_>>(); + /// let mut odds = table.into_iter().collect::<Vec<_>>(); + /// evens.sort(); + /// odds.sort(); + /// + /// assert_eq!(evens, vec![0, 2, 4, 6]); + /// assert_eq!(odds, vec![1, 3, 5, 7]); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn extract_if<F>(&mut self, f: F) -> ExtractIf<'_, T, F, A> + where + F: FnMut(&mut T) -> bool, + { + ExtractIf { + f, + inner: RawExtractIf { + iter: unsafe { self.raw.iter() }, + table: &mut self.raw, + }, + } + } + + /// Attempts to get mutable references to `N` values in the map at once. + /// + /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to + /// the `i`th key to be looked up. + /// + /// Returns an array of length `N` with the results of each query. For soundness, at most one + /// mutable reference will be returned to any value. `None` will be returned if any of the + /// keys are duplicates or missing. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut libraries: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for (k, v) in [ + /// ("Bodleian Library", 1602), + /// ("Athenæum", 1807), + /// ("Herzogin-Anna-Amalia-Bibliothek", 1691), + /// ("Library of Congress", 1800), + /// ] { + /// libraries.insert_unique(hasher(&k), (k, v), |(k, _)| hasher(&k)); + /// } + /// + /// let keys = ["Athenæum", "Library of Congress"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!( + /// got, + /// Some([&mut ("Athenæum", 1807), &mut ("Library of Congress", 1800),]), + /// ); + /// + /// // Missing keys result in None + /// let keys = ["Athenæum", "New York Public Library"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!(got, None); + /// + /// // Duplicate keys result in None + /// let keys = ["Athenæum", "Athenæum"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!(got, None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn get_many_mut<const N: usize>( + &mut self, + hashes: [u64; N], + eq: impl FnMut(usize, &T) -> bool, + ) -> Option<[&'_ mut T; N]> { + self.raw.get_many_mut(hashes, eq) + } + + /// Attempts to get mutable references to `N` values in the map at once, without validating that + /// the values are unique. + /// + /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to + /// the `i`th key to be looked up. + /// + /// Returns an array of length `N` with the results of each query. `None` will be returned if + /// any of the keys are missing. + /// + /// For a safe alternative see [`get_many_mut`](`HashTable::get_many_mut`). + /// + /// # Safety + /// + /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting + /// references are not used. + /// + /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut libraries: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for (k, v) in [ + /// ("Bodleian Library", 1602), + /// ("Athenæum", 1807), + /// ("Herzogin-Anna-Amalia-Bibliothek", 1691), + /// ("Library of Congress", 1800), + /// ] { + /// libraries.insert_unique(hasher(&k), (k, v), |(k, _)| hasher(&k)); + /// } + /// + /// let keys = ["Athenæum", "Library of Congress"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!( + /// got, + /// Some([&mut ("Athenæum", 1807), &mut ("Library of Congress", 1800),]), + /// ); + /// + /// // Missing keys result in None + /// let keys = ["Athenæum", "New York Public Library"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!(got, None); + /// + /// // Duplicate keys result in None + /// let keys = ["Athenæum", "Athenæum"]; + /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0); + /// assert_eq!(got, None); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub unsafe fn get_many_unchecked_mut<const N: usize>( + &mut self, + hashes: [u64; N], + eq: impl FnMut(usize, &T) -> bool, + ) -> Option<[&'_ mut T; N]> { + self.raw.get_many_unchecked_mut(hashes, eq) + } +} + +impl<T, A> IntoIterator for HashTable<T, A> +where + A: Allocator, +{ + type Item = T; + type IntoIter = IntoIter<T, A>; + + fn into_iter(self) -> IntoIter<T, A> { + IntoIter { + inner: self.raw.into_iter(), + } + } +} + +impl<'a, T, A> IntoIterator for &'a HashTable<T, A> +where + A: Allocator, +{ + type Item = &'a T; + type IntoIter = Iter<'a, T>; + + fn into_iter(self) -> Iter<'a, T> { + self.iter() + } +} + +impl<'a, T, A> IntoIterator for &'a mut HashTable<T, A> +where + A: Allocator, +{ + type Item = &'a mut T; + type IntoIter = IterMut<'a, T>; + + fn into_iter(self) -> IterMut<'a, T> { + self.iter_mut() + } +} + +impl<T, A> Default for HashTable<T, A> +where + A: Allocator + Default, +{ + fn default() -> Self { + Self { + raw: Default::default(), + } + } +} + +impl<T, A> Clone for HashTable<T, A> +where + T: Clone, + A: Allocator + Clone, +{ + fn clone(&self) -> Self { + Self { + raw: self.raw.clone(), + } + } +} + +impl<T, A> fmt::Debug for HashTable<T, A> +where + T: fmt::Debug, + A: Allocator, +{ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_set().entries(self.iter()).finish() + } +} + +/// A view into a single entry in a table, which may either be vacant or occupied. +/// +/// This `enum` is constructed from the [`entry`] method on [`HashTable`]. +/// +/// [`HashTable`]: struct.HashTable.html +/// [`entry`]: struct.HashTable.html#method.entry +/// +/// # Examples +/// +/// ``` +/// # #[cfg(feature = "nightly")] +/// # fn test() { +/// use ahash::AHasher; +/// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry}; +/// use std::hash::{BuildHasher, BuildHasherDefault}; +/// +/// let mut table = HashTable::new(); +/// let hasher = BuildHasherDefault::<AHasher>::default(); +/// let hasher = |val: &_| hasher.hash_one(val); +/// for x in ["a", "b", "c"] { +/// table.insert_unique(hasher(&x), x, hasher); +/// } +/// assert_eq!(table.len(), 3); +/// +/// // Existing value (insert) +/// let entry: Entry<_> = table.entry(hasher(&"a"), |&x| x == "a", hasher); +/// let _raw_o: OccupiedEntry<_, _> = entry.insert("a"); +/// assert_eq!(table.len(), 3); +/// // Nonexistent value (insert) +/// table.entry(hasher(&"d"), |&x| x == "d", hasher).insert("d"); +/// +/// // Existing value (or_insert) +/// table +/// .entry(hasher(&"b"), |&x| x == "b", hasher) +/// .or_insert("b"); +/// // Nonexistent value (or_insert) +/// table +/// .entry(hasher(&"e"), |&x| x == "e", hasher) +/// .or_insert("e"); +/// +/// println!("Our HashTable: {:?}", table); +/// +/// let mut vec: Vec<_> = table.iter().copied().collect(); +/// // The `Iter` iterator produces items in arbitrary order, so the +/// // items must be sorted to test them against a sorted array. +/// vec.sort_unstable(); +/// assert_eq!(vec, ["a", "b", "c", "d", "e"]); +/// # } +/// # fn main() { +/// # #[cfg(feature = "nightly")] +/// # test() +/// # } +/// ``` +pub enum Entry<'a, T, A = Global> +where + A: Allocator, +{ + /// An occupied entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry}; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// for x in ["a", "b"] { + /// table.insert_unique(hasher(&x), x, hasher); + /// } + /// + /// match table.entry(hasher(&"a"), |&x| x == "a", hasher) { + /// Entry::Vacant(_) => unreachable!(), + /// Entry::Occupied(_) => {} + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + Occupied(OccupiedEntry<'a, T, A>), + + /// A vacant entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry}; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table = HashTable::<&str>::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// match table.entry(hasher(&"a"), |&x| x == "a", hasher) { + /// Entry::Vacant(_) => {} + /// Entry::Occupied(_) => unreachable!(), + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + Vacant(VacantEntry<'a, T, A>), +} + +impl<T: fmt::Debug, A: Allocator> fmt::Debug for Entry<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match *self { + Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(), + Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(), + } + } +} + +impl<'a, T, A> Entry<'a, T, A> +where + A: Allocator, +{ + /// Sets the value of the entry, replacing any existing value if there is + /// one, and returns an [`OccupiedEntry`]. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// let entry = table + /// .entry(hasher(&"horseyland"), |&x| x == "horseyland", hasher) + /// .insert("horseyland"); + /// + /// assert_eq!(entry.get(), &"horseyland"); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn insert(self, value: T) -> OccupiedEntry<'a, T, A> { + match self { + Entry::Occupied(mut entry) => { + *entry.get_mut() = value; + entry + } + Entry::Vacant(entry) => entry.insert(value), + } + } + + /// Ensures a value is in the entry by inserting if it was vacant. + /// + /// Returns an [`OccupiedEntry`] pointing to the now-occupied entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// // nonexistent key + /// table + /// .entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) + /// .or_insert("poneyland"); + /// assert!(table + /// .find(hasher(&"poneyland"), |&x| x == "poneyland") + /// .is_some()); + /// + /// // existing key + /// table + /// .entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) + /// .or_insert("poneyland"); + /// assert!(table + /// .find(hasher(&"poneyland"), |&x| x == "poneyland") + /// .is_some()); + /// assert_eq!(table.len(), 1); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn or_insert(self, default: T) -> OccupiedEntry<'a, T, A> { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => entry.insert(default), + } + } + + /// Ensures a value is in the entry by inserting the result of the default function if empty.. + /// + /// Returns an [`OccupiedEntry`] pointing to the now-occupied entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<String> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// table + /// .entry(hasher("poneyland"), |x| x == "poneyland", |val| hasher(val)) + /// .or_insert_with(|| "poneyland".to_string()); + /// + /// assert!(table + /// .find(hasher(&"poneyland"), |x| x == "poneyland") + /// .is_some()); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn or_insert_with(self, default: impl FnOnce() -> T) -> OccupiedEntry<'a, T, A> { + match self { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => entry.insert(default()), + } + } + + /// Provides in-place mutable access to an occupied entry before any + /// potential inserts into the table. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// table + /// .entry( + /// hasher(&"poneyland"), + /// |&(x, _)| x == "poneyland", + /// |(k, _)| hasher(&k), + /// ) + /// .and_modify(|(_, v)| *v += 1) + /// .or_insert(("poneyland", 42)); + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(k, _)| k == "poneyland"), + /// Some(&("poneyland", 42)) + /// ); + /// + /// table + /// .entry( + /// hasher(&"poneyland"), + /// |&(x, _)| x == "poneyland", + /// |(k, _)| hasher(&k), + /// ) + /// .and_modify(|(_, v)| *v += 1) + /// .or_insert(("poneyland", 42)); + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(k, _)| k == "poneyland"), + /// Some(&("poneyland", 43)) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn and_modify(self, f: impl FnOnce(&mut T)) -> Self { + match self { + Entry::Occupied(mut entry) => { + f(entry.get_mut()); + Entry::Occupied(entry) + } + Entry::Vacant(entry) => Entry::Vacant(entry), + } + } +} + +/// A view into an occupied entry in a `HashTable`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// # #[cfg(feature = "nightly")] +/// # fn test() { +/// use ahash::AHasher; +/// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry}; +/// use std::hash::{BuildHasher, BuildHasherDefault}; +/// +/// let mut table = HashTable::new(); +/// let hasher = BuildHasherDefault::<AHasher>::default(); +/// let hasher = |val: &_| hasher.hash_one(val); +/// for x in ["a", "b", "c"] { +/// table.insert_unique(hasher(&x), x, hasher); +/// } +/// assert_eq!(table.len(), 3); +/// +/// let _entry_o: OccupiedEntry<_, _> = table.find_entry(hasher(&"a"), |&x| x == "a").unwrap(); +/// assert_eq!(table.len(), 3); +/// +/// // Existing key +/// match table.entry(hasher(&"a"), |&x| x == "a", hasher) { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(view) => { +/// assert_eq!(view.get(), &"a"); +/// } +/// } +/// +/// assert_eq!(table.len(), 3); +/// +/// // Existing key (take) +/// match table.entry(hasher(&"c"), |&x| x == "c", hasher) { +/// Entry::Vacant(_) => unreachable!(), +/// Entry::Occupied(view) => { +/// assert_eq!(view.remove().0, "c"); +/// } +/// } +/// assert_eq!(table.find(hasher(&"c"), |&x| x == "c"), None); +/// assert_eq!(table.len(), 2); +/// # } +/// # fn main() { +/// # #[cfg(feature = "nightly")] +/// # test() +/// # } +/// ``` +pub struct OccupiedEntry<'a, T, A = Global> +where + A: Allocator, +{ + hash: u64, + bucket: Bucket<T>, + table: &'a mut HashTable<T, A>, +} + +unsafe impl<T, A> Send for OccupiedEntry<'_, T, A> +where + T: Send, + A: Send + Allocator, +{ +} +unsafe impl<T, A> Sync for OccupiedEntry<'_, T, A> +where + T: Sync, + A: Sync + Allocator, +{ +} + +impl<T: fmt::Debug, A: Allocator> fmt::Debug for OccupiedEntry<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("OccupiedEntry") + .field("value", self.get()) + .finish() + } +} + +impl<'a, T, A> OccupiedEntry<'a, T, A> +where + A: Allocator, +{ + /// Takes the value out of the entry, and returns it along with a + /// `VacantEntry` that can be used to insert another value with the same + /// hash as the one that was just removed. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// // The table is empty + /// assert!(table.is_empty() && table.capacity() == 0); + /// + /// table.insert_unique(hasher(&"poneyland"), "poneyland", hasher); + /// let capacity_before_remove = table.capacity(); + /// + /// if let Entry::Occupied(o) = table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) { + /// assert_eq!(o.remove().0, "poneyland"); + /// } + /// + /// assert!(table + /// .find(hasher(&"poneyland"), |&x| x == "poneyland") + /// .is_none()); + /// // Now table hold none elements but capacity is equal to the old one + /// assert!(table.len() == 0 && table.capacity() == capacity_before_remove); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn remove(self) -> (T, VacantEntry<'a, T, A>) { + let (val, slot) = unsafe { self.table.raw.remove(self.bucket) }; + ( + val, + VacantEntry { + hash: self.hash, + insert_slot: slot, + table: self.table, + }, + ) + } + + /// Gets a reference to the value in the entry. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"poneyland"), "poneyland", hasher); + /// + /// match table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) { + /// Entry::Vacant(_) => panic!(), + /// Entry::Occupied(entry) => assert_eq!(entry.get(), &"poneyland"), + /// } + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn get(&self) -> &T { + unsafe { self.bucket.as_ref() } + } + + /// Gets a mutable reference to the value in the entry. + /// + /// If you need a reference to the `OccupiedEntry` which may outlive the + /// destruction of the `Entry` value, see [`into_mut`]. + /// + /// [`into_mut`]: #method.into_mut + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"poneyland"), ("poneyland", 12), |(k, _)| hasher(&k)); + /// + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), + /// Some(&("poneyland", 12)) + /// ); + /// + /// if let Entry::Occupied(mut o) = table.entry( + /// hasher(&"poneyland"), + /// |&(x, _)| x == "poneyland", + /// |(k, _)| hasher(&k), + /// ) { + /// o.get_mut().1 += 10; + /// assert_eq!(o.get().1, 22); + /// + /// // We can use the same Entry multiple times. + /// o.get_mut().1 += 2; + /// } + /// + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), + /// Some(&("poneyland", 24)) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn get_mut(&mut self) -> &mut T { + unsafe { self.bucket.as_mut() } + } + + /// Converts the OccupiedEntry into a mutable reference to the value in the entry + /// with a lifetime bound to the table itself. + /// + /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`]. + /// + /// [`get_mut`]: #method.get_mut + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<(&str, u32)> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// table.insert_unique(hasher(&"poneyland"), ("poneyland", 12), |(k, _)| hasher(&k)); + /// + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), + /// Some(&("poneyland", 12)) + /// ); + /// + /// let value: &mut (&str, u32); + /// match table.entry( + /// hasher(&"poneyland"), + /// |&(x, _)| x == "poneyland", + /// |(k, _)| hasher(&k), + /// ) { + /// Entry::Occupied(entry) => value = entry.into_mut(), + /// Entry::Vacant(_) => panic!(), + /// } + /// value.1 += 10; + /// + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",), + /// Some(&("poneyland", 22)) + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn into_mut(self) -> &'a mut T { + unsafe { self.bucket.as_mut() } + } + + /// Converts the OccupiedEntry into a mutable reference to the underlying + /// table. + pub fn into_table(self) -> &'a mut HashTable<T, A> { + self.table + } +} + +/// A view into a vacant entry in a `HashTable`. +/// It is part of the [`Entry`] enum. +/// +/// [`Entry`]: enum.Entry.html +/// +/// # Examples +/// +/// ``` +/// # #[cfg(feature = "nightly")] +/// # fn test() { +/// use ahash::AHasher; +/// use hashbrown::hash_table::{Entry, HashTable, VacantEntry}; +/// use std::hash::{BuildHasher, BuildHasherDefault}; +/// +/// let mut table: HashTable<&str> = HashTable::new(); +/// let hasher = BuildHasherDefault::<AHasher>::default(); +/// let hasher = |val: &_| hasher.hash_one(val); +/// +/// let entry_v: VacantEntry<_, _> = match table.entry(hasher(&"a"), |&x| x == "a", hasher) { +/// Entry::Vacant(view) => view, +/// Entry::Occupied(_) => unreachable!(), +/// }; +/// entry_v.insert("a"); +/// assert!(table.find(hasher(&"a"), |&x| x == "a").is_some() && table.len() == 1); +/// +/// // Nonexistent key (insert) +/// match table.entry(hasher(&"b"), |&x| x == "b", hasher) { +/// Entry::Vacant(view) => { +/// view.insert("b"); +/// } +/// Entry::Occupied(_) => unreachable!(), +/// } +/// assert!(table.find(hasher(&"b"), |&x| x == "b").is_some() && table.len() == 2); +/// # } +/// # fn main() { +/// # #[cfg(feature = "nightly")] +/// # test() +/// # } +/// ``` +pub struct VacantEntry<'a, T, A = Global> +where + A: Allocator, +{ + hash: u64, + insert_slot: InsertSlot, + table: &'a mut HashTable<T, A>, +} + +impl<T: fmt::Debug, A: Allocator> fmt::Debug for VacantEntry<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("VacantEntry") + } +} + +impl<'a, T, A> VacantEntry<'a, T, A> +where + A: Allocator, +{ + /// Inserts a new element into the table with the hash that was used to + /// obtain the `VacantEntry`. + /// + /// An `OccupiedEntry` is returned for the newly inserted element. + /// + /// # Examples + /// + /// ``` + /// # #[cfg(feature = "nightly")] + /// # fn test() { + /// use ahash::AHasher; + /// use hashbrown::hash_table::Entry; + /// use hashbrown::HashTable; + /// use std::hash::{BuildHasher, BuildHasherDefault}; + /// + /// let mut table: HashTable<&str> = HashTable::new(); + /// let hasher = BuildHasherDefault::<AHasher>::default(); + /// let hasher = |val: &_| hasher.hash_one(val); + /// + /// if let Entry::Vacant(o) = table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) { + /// o.insert("poneyland"); + /// } + /// assert_eq!( + /// table.find(hasher(&"poneyland"), |&x| x == "poneyland"), + /// Some(&"poneyland") + /// ); + /// # } + /// # fn main() { + /// # #[cfg(feature = "nightly")] + /// # test() + /// # } + /// ``` + pub fn insert(self, value: T) -> OccupiedEntry<'a, T, A> { + let bucket = unsafe { + self.table + .raw + .insert_in_slot(self.hash, self.insert_slot, value) + }; + OccupiedEntry { + hash: self.hash, + bucket, + table: self.table, + } + } + + /// Converts the VacantEntry into a mutable reference to the underlying + /// table. + pub fn into_table(self) -> &'a mut HashTable<T, A> { + self.table + } +} + +/// Type representing the absence of an entry, as returned by [`HashTable::find_entry`]. +/// +/// This type only exists due to [limitations] in Rust's NLL borrow checker. In +/// the future, `find_entry` will return an `Option<OccupiedEntry>` and this +/// type will be removed. +/// +/// [limitations]: https://smallcultfollowing.com/babysteps/blog/2018/06/15/mir-based-borrow-check-nll-status-update/#polonius +/// +/// # Examples +/// +/// ``` +/// # #[cfg(feature = "nightly")] +/// # fn test() { +/// use ahash::AHasher; +/// use hashbrown::hash_table::{AbsentEntry, Entry, HashTable}; +/// use std::hash::{BuildHasher, BuildHasherDefault}; +/// +/// let mut table: HashTable<&str> = HashTable::new(); +/// let hasher = BuildHasherDefault::<AHasher>::default(); +/// let hasher = |val: &_| hasher.hash_one(val); +/// +/// let entry_v: AbsentEntry<_, _> = table.find_entry(hasher(&"a"), |&x| x == "a").unwrap_err(); +/// entry_v +/// .into_table() +/// .insert_unique(hasher(&"a"), "a", hasher); +/// assert!(table.find(hasher(&"a"), |&x| x == "a").is_some() && table.len() == 1); +/// +/// // Nonexistent key (insert) +/// match table.entry(hasher(&"b"), |&x| x == "b", hasher) { +/// Entry::Vacant(view) => { +/// view.insert("b"); +/// } +/// Entry::Occupied(_) => unreachable!(), +/// } +/// assert!(table.find(hasher(&"b"), |&x| x == "b").is_some() && table.len() == 2); +/// # } +/// # fn main() { +/// # #[cfg(feature = "nightly")] +/// # test() +/// # } +/// ``` +pub struct AbsentEntry<'a, T, A = Global> +where + A: Allocator, +{ + table: &'a mut HashTable<T, A>, +} + +impl<T: fmt::Debug, A: Allocator> fmt::Debug for AbsentEntry<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.write_str("AbsentEntry") + } +} + +impl<'a, T, A> AbsentEntry<'a, T, A> +where + A: Allocator, +{ + /// Converts the AbsentEntry into a mutable reference to the underlying + /// table. + pub fn into_table(self) -> &'a mut HashTable<T, A> { + self.table + } +} + +/// An iterator over the entries of a `HashTable` in arbitrary order. +/// The iterator element type is `&'a T`. +/// +/// This `struct` is created by the [`iter`] method on [`HashTable`]. See its +/// documentation for more. +/// +/// [`iter`]: struct.HashTable.html#method.iter +/// [`HashTable`]: struct.HashTable.html +pub struct Iter<'a, T> { + inner: RawIter<T>, + marker: PhantomData<&'a T>, +} + +impl<'a, T> Iterator for Iter<'a, T> { + type Item = &'a T; + + fn next(&mut self) -> Option<Self::Item> { + self.inner.next().map(|bucket| unsafe { bucket.as_ref() }) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.inner.size_hint() + } +} + +impl<T> ExactSizeIterator for Iter<'_, T> { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl<T> FusedIterator for Iter<'_, T> {} + +/// A mutable iterator over the entries of a `HashTable` in arbitrary order. +/// The iterator element type is `&'a mut T`. +/// +/// This `struct` is created by the [`iter_mut`] method on [`HashTable`]. See its +/// documentation for more. +/// +/// [`iter_mut`]: struct.HashTable.html#method.iter_mut +/// [`HashTable`]: struct.HashTable.html +pub struct IterMut<'a, T> { + inner: RawIter<T>, + marker: PhantomData<&'a mut T>, +} + +impl<'a, T> Iterator for IterMut<'a, T> { + type Item = &'a mut T; + + fn next(&mut self) -> Option<Self::Item> { + self.inner.next().map(|bucket| unsafe { bucket.as_mut() }) + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.inner.size_hint() + } +} + +impl<T> ExactSizeIterator for IterMut<'_, T> { + fn len(&self) -> usize { + self.inner.len() + } +} + +impl<T> FusedIterator for IterMut<'_, T> {} + +/// An owning iterator over the entries of a `HashTable` in arbitrary order. +/// The iterator element type is `T`. +/// +/// This `struct` is created by the [`into_iter`] method on [`HashTable`] +/// (provided by the [`IntoIterator`] trait). See its documentation for more. +/// The table cannot be used after calling that method. +/// +/// [`into_iter`]: struct.HashTable.html#method.into_iter +/// [`HashTable`]: struct.HashTable.html +/// [`IntoIterator`]: https://doc.rust-lang.org/core/iter/trait.IntoIterator.html +pub struct IntoIter<T, A = Global> +where + A: Allocator, +{ + inner: RawIntoIter<T, A>, +} + +impl<T, A> Iterator for IntoIter<T, A> +where + A: Allocator, +{ + type Item = T; + + fn next(&mut self) -> Option<Self::Item> { + self.inner.next() + } + + fn size_hint(&self) -> (usize, Option<usize>) { + self.inner.size_hint() + } +} + +impl<T, A> ExactSizeIterator for IntoIter<T, A> +where + A: Allocator, +{ + fn len(&self) -> usize { + self.inner.len() + } +} + +impl<T, A> FusedIterator for IntoIter<T, A> where A: Allocator {} + +/// A draining iterator over the items of a `HashTable`. +/// +/// This `struct` is created by the [`drain`] method on [`HashTable`]. +/// See its documentation for more. +/// +/// [`HashTable`]: struct.HashTable.html +/// [`drain`]: struct.HashTable.html#method.drain +pub struct Drain<'a, T, A: Allocator = Global> { + inner: RawDrain<'a, T, A>, +} + +impl<T, A: Allocator> Drain<'_, T, A> { + /// Returns a iterator of references over the remaining items. + fn iter(&self) -> Iter<'_, T> { + Iter { + inner: self.inner.iter(), + marker: PhantomData, + } + } +} + +impl<T, A: Allocator> Iterator for Drain<'_, T, A> { + type Item = T; + + fn next(&mut self) -> Option<T> { + self.inner.next() + } + fn size_hint(&self) -> (usize, Option<usize>) { + self.inner.size_hint() + } +} +impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> { + fn len(&self) -> usize { + self.inner.len() + } +} +impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {} + +impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_list().entries(self.iter()).finish() + } +} + +/// A draining iterator over entries of a `HashTable` which don't satisfy the predicate `f`. +/// +/// This `struct` is created by [`HashTable::extract_if`]. See its +/// documentation for more. +#[must_use = "Iterators are lazy unless consumed"] +pub struct ExtractIf<'a, T, F, A: Allocator = Global> +where + F: FnMut(&mut T) -> bool, +{ + f: F, + inner: RawExtractIf<'a, T, A>, +} + +impl<T, F, A: Allocator> Iterator for ExtractIf<'_, T, F, A> +where + F: FnMut(&mut T) -> bool, +{ + type Item = T; + + #[inline] + fn next(&mut self) -> Option<Self::Item> { + self.inner.next(|val| (self.f)(val)) + } + + #[inline] + fn size_hint(&self) -> (usize, Option<usize>) { + (0, self.inner.iter.size_hint().1) + } +} + +impl<T, F, A: Allocator> FusedIterator for ExtractIf<'_, T, F, A> where F: FnMut(&mut T) -> bool {} |