summaryrefslogtreecommitdiffstats
path: root/third_party/rust/hashbrown
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 05:43:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-12 05:43:14 +0000
commit8dd16259287f58f9273002717ec4d27e97127719 (patch)
tree3863e62a53829a84037444beab3abd4ed9dfc7d0 /third_party/rust/hashbrown
parentReleasing progress-linux version 126.0.1-1~progress7.99u1. (diff)
downloadfirefox-8dd16259287f58f9273002717ec4d27e97127719.tar.xz
firefox-8dd16259287f58f9273002717ec4d27e97127719.zip
Merging upstream version 127.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/hashbrown')
-rw-r--r--third_party/rust/hashbrown/.cargo-checksum.json2
-rw-r--r--third_party/rust/hashbrown/CHANGELOG.md166
-rw-r--r--third_party/rust/hashbrown/Cargo.toml40
-rw-r--r--third_party/rust/hashbrown/README.md77
-rw-r--r--third_party/rust/hashbrown/benches/bench.rs2
-rw-r--r--third_party/rust/hashbrown/src/external_trait_impls/mod.rs2
-rw-r--r--third_party/rust/hashbrown/src/external_trait_impls/rayon/map.rs47
-rw-r--r--third_party/rust/hashbrown/src/external_trait_impls/rayon/mod.rs1
-rw-r--r--third_party/rust/hashbrown/src/external_trait_impls/rayon/raw.rs23
-rw-r--r--third_party/rust/hashbrown/src/external_trait_impls/rayon/set.rs34
-rw-r--r--third_party/rust/hashbrown/src/external_trait_impls/rayon/table.rs252
-rw-r--r--third_party/rust/hashbrown/src/external_trait_impls/rkyv/hash_map.rs125
-rw-r--r--third_party/rust/hashbrown/src/external_trait_impls/rkyv/hash_set.rs123
-rw-r--r--third_party/rust/hashbrown/src/external_trait_impls/rkyv/mod.rs2
-rw-r--r--third_party/rust/hashbrown/src/external_trait_impls/serde.rs63
-rw-r--r--third_party/rust/hashbrown/src/lib.rs78
-rw-r--r--third_party/rust/hashbrown/src/macros.rs2
-rw-r--r--third_party/rust/hashbrown/src/map.rs1324
-rw-r--r--third_party/rust/hashbrown/src/raw/alloc.rs57
-rw-r--r--third_party/rust/hashbrown/src/raw/bitmask.rs99
-rw-r--r--third_party/rust/hashbrown/src/raw/generic.rs59
-rw-r--r--third_party/rust/hashbrown/src/raw/mod.rs3301
-rw-r--r--third_party/rust/hashbrown/src/raw/neon.rs124
-rw-r--r--third_party/rust/hashbrown/src/raw/sse2.rs31
-rw-r--r--third_party/rust/hashbrown/src/rustc_entry.rs32
-rw-r--r--third_party/rust/hashbrown/src/scopeguard.rs14
-rw-r--r--third_party/rust/hashbrown/src/set.rs516
-rw-r--r--third_party/rust/hashbrown/src/table.rs2070
-rw-r--r--third_party/rust/hashbrown/tests/equivalent_trait.rs53
-rw-r--r--third_party/rust/hashbrown/tests/raw.rs11
-rw-r--r--third_party/rust/hashbrown/tests/rayon.rs4
-rw-r--r--third_party/rust/hashbrown/tests/set.rs2
32 files changed, 7407 insertions, 1329 deletions
diff --git a/third_party/rust/hashbrown/.cargo-checksum.json b/third_party/rust/hashbrown/.cargo-checksum.json
index 5561cde80d..0c5744048e 100644
--- a/third_party/rust/hashbrown/.cargo-checksum.json
+++ b/third_party/rust/hashbrown/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"ade49a29d368e16ce508aee91b477ecbad7e2e52eb6fee7b4c1fc86199963f0e","Cargo.toml":"421b3a71d97faf0a7e52c3b2bfbe0f1c036b9dbf6232b4e5b41221bb54358f5a","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"a536b3bb3f3521e59836080f05a4783150fa8484f759a31468ce3b6dba1f33eb","benches/bench.rs":"aadc39d815eadf094ed9357d946319df2d93194203bbccb7c33cea6951d654df","benches/insert_unique_unchecked.rs":"cb84275f22d5f95a5ac995ac6b2df74ffcf342765b401d27c95f2955c7b7cb9f","clippy.toml":"7535949f908c6d9aea4f9a9f3a7625552c93fc29e963d059d40f4def9d77ea7b","src/external_trait_impls/mod.rs":"d69528827794524cfd9acbeacc1ac4f6131e3c7574311e6d919f818f65fbff07","src/external_trait_impls/rayon/helpers.rs":"ba105bf0853ebc45157f22116ad0f55d3bdab75e721d8e7a677c7b912d0c0c6d","src/external_trait_impls/rayon/map.rs":"2809e2a0071db8101c38789deb955f3830c5c3455eb1794ff64a0cf2ceb53fc7","src/external_trait_impls/rayon/mod.rs":"156de9c1ad0123334ea3b7e5a17444faf1b8bf971aa88a1f23e2f2d1c3021141","src/external_trait_impls/rayon/raw.rs":"e62c5f3ca5fffea47357e64b6f8c34cec94af62d9bd28a2b87934da46c22b66e","src/external_trait_impls/rayon/set.rs":"c4c44d44e56c2f59e9e1355662e29d8744ac96645ca4414127a359fb46cb0fbf","src/external_trait_impls/serde.rs":"0bc1a1f218d1ae7a5262557a5e3737b9334caf7d50c136dbdc75ff75680c223b","src/lib.rs":"c82fbee9684bfff40ef55d5f0c9f855c11f71f9fd1720fb084ef8331bdbc41d8","src/macros.rs":"36fe532656879c80f7753d13354b889f5b45caef451a1bb3a27dbc32d74c9878","src/map.rs":"df39edae67c569378dea9a4d928685cb4d06569712c6ac36a54df76fb5d87fe3","src/raw/alloc.rs":"184a0345bc2c7544b65c28724063be26b1f2b28dbaaa028a0b01192ccac25557","src/raw/bitmask.rs":"820d90b19b7e3433a1048ace008c9526331cd53a576cb0cfc1ff9960b6fe52f8","src/raw/generic.rs":"f5013a50d6d82d5cc8bad8b8c26c24d00fa810197f9f123256c58ac92e0d98f9","src/raw/mod.rs":"fa38247c6b3bd70636be50400debb9966a3446d49ee13e4f4e2dfe4ceed1b201","src/raw/sse2.rs":"838cfdb1daa1e70951ed25f985283b8b7ab4b46fa130f92eda152047ce6086f6","src/rustc_entry.rs":"cdd70972cba5b79ca1cad79869cb5e184d6dc4798ef90822e966ef89679ba011","src/scopeguard.rs":"d13de1b12897add7fe1c3eba6f906c9cc09d86509b6cfe06b95d63803fe9265c","src/set.rs":"6877d4a42eeadd681e3b8881528e4b20f14cfedbc11e9318bfcf425ef96d1546","tests/hasher.rs":"9a8fdf67e4415618e16729969c386eefe71408cded5d46cf7b67d969276a3452","tests/rayon.rs":"83d5289771542203f539a41cccb889fbe7ce70f5adf5b903ac9f051e3ba13cfa","tests/serde.rs":"6bac8054db722dd049901b37a6e006535bac30f425eb5cd91af19b5bc1dfe78e","tests/set.rs":"01cf39efb04646ef4c63a809ebb96dfa63cfec472bf8bdb6c121f6526d40c40e"},"package":"8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888"} \ No newline at end of file
+{"files":{"CHANGELOG.md":"1a844fe3b7466b41ca1d5914af197d5aeed7cb14f30ebe4be351367d7ca905d2","Cargo.toml":"c011f10385da722056537329f3fcf8c9b93af742e79e38885c0152a0105fc227","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"ff8f68cb076caf8cefe7a6430d4ac086ce6af2ca8ce2c4e5a2004d4552ef52a2","README.md":"84c222ce49510535419d338b7532a72a2bf22b7466e44de78d92d25b6c7d636b","benches/bench.rs":"ef7bc025922f077d307c565640c005d056e3d6c1713448a95aae92d3c22c1005","benches/insert_unique_unchecked.rs":"cb84275f22d5f95a5ac995ac6b2df74ffcf342765b401d27c95f2955c7b7cb9f","clippy.toml":"7535949f908c6d9aea4f9a9f3a7625552c93fc29e963d059d40f4def9d77ea7b","src/external_trait_impls/mod.rs":"0625e6a5e3b8ecc8901a12aeeea54393fd84617fb3a14d98a34d2d2bddb8d257","src/external_trait_impls/rayon/helpers.rs":"ba105bf0853ebc45157f22116ad0f55d3bdab75e721d8e7a677c7b912d0c0c6d","src/external_trait_impls/rayon/map.rs":"96fdf39b3f601f77152d7ce84541b8f51f32b9274b7da9c294862892e721a5d8","src/external_trait_impls/rayon/mod.rs":"126edc882501dddd25e442d9236508b5b386eb8c0a9f5d654f2dd081086c1616","src/external_trait_impls/rayon/raw.rs":"04012fb2e99648819b4bc0044107ed3cb94013e242b7865075c5bd9ebf1b6865","src/external_trait_impls/rayon/set.rs":"7539348ff7bc6e3cce6b3c019d62dc401eea0138c578fef729c2593e8ead1cfa","src/external_trait_impls/rayon/table.rs":"8778d29509c68b5b7cb66859db025d3939ce22e7cf370b20ff3dea4fe4b29fd0","src/external_trait_impls/rkyv/hash_map.rs":"7abe24318143b776016052b05840656afc858b1ba5252f3d418d61972477f53d","src/external_trait_impls/rkyv/hash_set.rs":"38d969125d17d606492ec4ec9fc06b7e7118eb903240dacf40de21b9b06fa5c8","src/external_trait_impls/rkyv/mod.rs":"54399ce5574fd1d84b7b0cb4238fa3e898575e89a6724299be009d2172bda02e","src/external_trait_impls/serde.rs":"6dbe104dee16b453b6b048b541c6e02c6d067d970dfafd243fc4360288b0168c","src/lib.rs":"74e250c18e55994a4a902eaa06aca034559d6de53501ed4bf9010fabc67e88a2","src/macros.rs":"98a26b908fc0fbe6a58d008a317e550013d615eb3cc17a5054a573c62c1d74cb","src/map.rs":"d484f2f81e5b4acf4b615f187241e34c3016aaaca53a5e71019cceb993c4ebd7","src/raw/alloc.rs":"902f8588d0fdee3e5c3dc02410f41d4b38ac88843727387f929f3186b3a2d322","src/raw/bitmask.rs":"3b3dce8d6a48856ada19085abf43908f124ab3419fcf434b9ca64d7bff243f67","src/raw/generic.rs":"efc5e603be3e9a17935aef1836a38ce01c78a0093b2af0671548eb5459b37921","src/raw/mod.rs":"16bbabf42dde9f3fb17c4f7e768aef47752d839bf624b81d24a48af3d418b3a2","src/raw/neon.rs":"9907d8ebc36fc3df562dde478ea9b72213fda65288a304718d8647f0029dc9ad","src/raw/sse2.rs":"39038e3344e49f4638e211bcdbf56565ac53e90dce56172cc3b526fea911c2af","src/rustc_entry.rs":"8142ed89b50155602ef8c1628382bd62d3ee903920fe49d403d4100a278c6ba4","src/scopeguard.rs":"1a246e08a63c06cd8ad934bd7da229421bf804f991ae93cd7e242da27ca6c601","src/set.rs":"a620ed68bd1610b76c4c1890615d71b2c04928bf5b345133a0588a065bce06fa","src/table.rs":"7b7174099d2e3cade0caeddd73e29b7395f3b9f4f1f21013f885b52cd93438cb","tests/equivalent_trait.rs":"84faa3fe9d67c375d03fec81f0f1412c47862477d42e84e7d235258236338d5b","tests/hasher.rs":"9a8fdf67e4415618e16729969c386eefe71408cded5d46cf7b67d969276a3452","tests/raw.rs":"43ed2f98877533a0905611d9a30f26b183dd3e103e3856eeab80e7b8ac7894d3","tests/rayon.rs":"39cb24ab45fce8087bb54948715c8b6973ebfba1a325292b5b3cd9aab50b5fd2","tests/serde.rs":"6bac8054db722dd049901b37a6e006535bac30f425eb5cd91af19b5bc1dfe78e","tests/set.rs":"9f8011c29d1059aadb54b6dd4623521d5178b4278b4a56021ef2cee4bbb19fd9"},"package":"e5274423e17b7c9fc20b6e7e208532f9b19825d82dfd615708b70edd83df41f1"} \ No newline at end of file
diff --git a/third_party/rust/hashbrown/CHANGELOG.md b/third_party/rust/hashbrown/CHANGELOG.md
index 3354b54bb3..8c4068089a 100644
--- a/third_party/rust/hashbrown/CHANGELOG.md
+++ b/third_party/rust/hashbrown/CHANGELOG.md
@@ -7,35 +7,157 @@ and this project adheres to [Semantic Versioning](https://semver.org/).
## [Unreleased]
+### Changed
+
+- Changed `hash_set::{Entry, VacantEntry}::insert` to return `OccupiedEntry`. (#495)
+
+## [v0.14.5] - 2024-04-28
+
+### Fixed
+
+- Fixed index calculation in panic guard of `clone_from_impl`. (#511)
+
+## ~~[v0.14.4] - 2024-03-19~~
+
+This release was _yanked_ due to a breaking change.
+
+## [v0.14.3] - 2023-11-26
+
+### Added
+
+- Specialized `fold` implementation of iterators. (#480)
+
+### Fixed
+
+- Avoid using unstable `ptr::invalid_mut` on nightly. (#481)
+
+## [v0.14.2] - 2023-10-19
+
+### Added
+
+- `HashTable` type which provides a low-level but safe API with explicit hashing. (#466)
+
+### Fixed
+
+- Disabled the use of NEON instructions on big-endian ARM. (#475)
+- Disabled the use of NEON instructions on Miri. (#476)
+
+## [v0.14.1] - 2023-09-28
+
+### Added
+
+- Allow serializing `HashMap`s that use a custom allocator. (#449)
+
+### Changed
+
+- Use the `Equivalent` trait from the `equivalent` crate. (#442)
+- Slightly improved performance of table resizing. (#451)
+- Relaxed MSRV to 1.63.0. (#457)
+- Removed `Clone` requirement from custom allocators. (#468)
+
+### Fixed
+
+- Fixed custom allocators being leaked in some situations. (#439, #465)
+
+## [v0.14.0] - 2023-06-01
+
+### Added
+
+- Support for `allocator-api2` crate
+ for interfacing with custom allocators on stable. (#417)
+- Optimized implementation for ARM using NEON instructions. (#430)
+- Support for rkyv serialization. (#432)
+- `Equivalent` trait to look up values without `Borrow`. (#345)
+- `Hash{Map,Set}::raw_table_mut` is added whic returns a mutable reference. (#404)
+- Fast path for `clear` on empty tables. (#428)
+
+### Changed
+
+- Optimized insertion to only perform a single lookup. (#277)
+- `DrainFilter` (`drain_filter`) has been renamed to `ExtractIf` and no longer drops remaining
+ elements when the iterator is dropped. #(374)
+- Bumped MSRV to 1.64.0. (#431)
+- `{Map,Set}::raw_table` now returns an immutable reference. (#404)
+- `VacantEntry` and `OccupiedEntry` now use the default hasher if none is
+ specified in generics. (#389)
+- `RawTable::data_start` now returns a `NonNull` to match `RawTable::data_end`. (#387)
+- `RawIter::{reflect_insert, reflect_remove}` are now unsafe. (#429)
+- `RawTable::find_potential` is renamed to `find_or_find_insert_slot` and returns an `InsertSlot`. (#429)
+- `RawTable::remove` now also returns an `InsertSlot`. (#429)
+- `InsertSlot` can be used to insert an element with `RawTable::insert_in_slot`. (#429)
+- `RawIterHash` no longer has a lifetime tied to that of the `RawTable`. (#427)
+- The trait bounds of `HashSet::raw_table` have been relaxed to not require `Eq + Hash`. (#423)
+- `EntryRef::and_replace_entry_with` and `OccupiedEntryRef::replace_entry_with`
+ were changed to give a `&K` instead of a `&Q` to the closure.
+
+### Removed
+
+- Support for `bumpalo` as an allocator with custom wrapper.
+ Use `allocator-api2` feature in `bumpalo` to use it as an allocator
+ for `hashbrown` collections. (#417)
+
+## [v0.13.2] - 2023-01-12
+
+### Fixed
+
+- Added `#[inline(always)]` to `find_inner`. (#375)
+- Fixed `RawTable::allocation_info` for empty tables. (#376)
+
+## [v0.13.1] - 2022-11-10
+
+### Added
+
+- Added `Equivalent` trait to customize key lookups. (#350)
+- Added support for 16-bit targets. (#368)
+- Added `RawTable::allocation_info` which provides information about the memory
+ usage of a table. (#371)
+
+### Changed
+
+- Bumped MSRV to 1.61.0.
+- Upgraded to `ahash` 0.8. (#357)
+- Make `with_hasher_in` const. (#355)
+- The following methods have been removed from the `RawTable` API in favor of
+ safer alternatives:
+ - `RawTable::erase_no_drop` => Use `RawTable::erase` or `RawTable::remove` instead.
+ - `Bucket::read` => Use `RawTable::remove` instead.
+ - `Bucket::drop` => Use `RawTable::erase` instead.
+ - `Bucket::write` => Use `Bucket::as_mut` instead.
+
+### Fixed
+
+- Ensure that `HashMap` allocations don't exceed `isize::MAX`. (#362)
+- Fixed issue with field retagging in scopeguard. (#359)
+
## [v0.12.3] - 2022-07-17
-## Fixed
+### Fixed
- Fixed double-drop in `RawTable::clone_from`. (#348)
## [v0.12.2] - 2022-07-09
-## Added
+### Added
- Added `Entry` API for `HashSet`. (#342)
- Added `Extend<&'a (K, V)> for HashMap<K, V, S, A>`. (#340)
- Added length-based short-circuiting for hash table iteration. (#338)
- Added a function to access the `RawTable` of a `HashMap`. (#335)
-## Changed
+### Changed
- Edited `do_alloc` to reduce LLVM IR generated. (#341)
## [v0.12.1] - 2022-05-02
-## Fixed
+### Fixed
- Fixed underflow in `RawIterRange::size_hint`. (#325)
- Fixed the implementation of `Debug` for `ValuesMut` and `IntoValues`. (#325)
## [v0.12.0] - 2022-01-17
-## Added
+### Added
- Added `From<[T; N]>` and `From<[(K, V); N]>` for `HashSet` and `HashMap` respectively. (#297)
- Added an `allocator()` getter to HashMap and HashSet. (#257)
@@ -44,7 +166,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/).
- Implement `From<array>` on `HashSet` and `HashMap`. (#298)
- Added `entry_ref` API to `HashMap`. (#201)
-## Changed
+### Changed
- Bumped minimum Rust version to 1.56.1 and edition to 2021.
- Use u64 for the GroupWord on WebAssembly. (#271)
@@ -56,7 +178,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/).
- Rename `get_each_mut` to `get_many_mut` and align API with the stdlib. (#291)
- Don't hash the key when searching in an empty table. (#305)
-## Fixed
+### Fixed
- Guard against allocations exceeding isize::MAX. (#268)
- Made `RawTable::insert_no_grow` unsafe. (#254)
@@ -65,19 +187,19 @@ and this project adheres to [Semantic Versioning](https://semver.org/).
## [v0.11.2] - 2021-03-25
-## Fixed
+### Fixed
- Added missing allocator type parameter to `HashMap`'s and `HashSet`'s `Clone` impls. (#252)
## [v0.11.1] - 2021-03-20
-## Fixed
+### Fixed
- Added missing `pub` modifier to `BumpWrapper`. (#251)
## [v0.11.0] - 2021-03-14
-## Added
+### Added
- Added safe `try_insert_no_grow` method to `RawTable`. (#229)
- Added support for `bumpalo` as an allocator without the `nightly` feature. (#231)
- Implemented `Default` for `RawTable`. (#237)
@@ -86,22 +208,22 @@ and this project adheres to [Semantic Versioning](https://semver.org/).
- Added `From<HashMap<T, ()>>` for `HashSet<T>`. (#235)
- Added `try_insert` method to `HashMap`. (#247)
-## Changed
+### Changed
- The minimum Rust version has been bumped to 1.49.0. (#230)
- Significantly improved compilation times by reducing the amount of generated IR. (#205)
-## Removed
+### Removed
- We no longer re-export the unstable allocator items from the standard library, nor the stable shims approximating the same. (#227)
- Removed hasher specialization support from `aHash`, which was resulting in inconsistent hashes being generated for a key. (#248)
-## Fixed
+### Fixed
- Fixed union length comparison. (#228)
## ~~[v0.10.0] - 2021-01-16~~
This release was _yanked_ due to inconsistent hashes being generated with the `nightly` feature. (#248)
-## Changed
+### Changed
- Parametrized `RawTable`, `HashSet` and `HashMap` over an allocator. (#133)
- Improved branch prediction hints on stable. (#209)
- Optimized hashing of primitive types with AHash using specialization. (#207)
@@ -109,7 +231,7 @@ This release was _yanked_ due to inconsistent hashes being generated with the `n
## [v0.9.1] - 2020-09-28
-## Added
+### Added
- Added safe methods to `RawTable` (#202):
- `get`: `find` and `as_ref`
- `get_mut`: `find` and `as_mut`
@@ -117,7 +239,7 @@ This release was _yanked_ due to inconsistent hashes being generated with the `n
- `remove_entry`: `find` and `remove`
- `erase_entry`: `find` and `erase`
-## Changed
+### Changed
- Removed `from_key_hashed_nocheck`'s `Q: Hash`. (#200)
- Made `RawTable::drain` safe. (#201)
@@ -215,7 +337,7 @@ This release was _yanked_ due to inconsistent hashes being generated with the `n
## [v0.6.2] - 2019-10-23
### Added
-- Added an `inline-more` feature (enabled by default) which allows choosing a tradeoff between
+- Added an `inline-more` feature (enabled by default) which allows choosing a tradeoff between
runtime performance and compilation time. (#119)
## [v0.6.1] - 2019-10-04
@@ -363,7 +485,15 @@ This release was _yanked_ due to a breaking change for users of `no-default-feat
- Initial release
-[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.12.3...HEAD
+[Unreleased]: https://github.com/rust-lang/hashbrown/compare/v0.14.5...HEAD
+[v0.14.5]: https://github.com/rust-lang/hashbrown/compare/v0.14.4...v0.14.5
+[v0.14.4]: https://github.com/rust-lang/hashbrown/compare/v0.14.3...v0.14.4
+[v0.14.3]: https://github.com/rust-lang/hashbrown/compare/v0.14.2...v0.14.3
+[v0.14.2]: https://github.com/rust-lang/hashbrown/compare/v0.14.1...v0.14.2
+[v0.14.1]: https://github.com/rust-lang/hashbrown/compare/v0.14.0...v0.14.1
+[v0.14.0]: https://github.com/rust-lang/hashbrown/compare/v0.13.2...v0.14.0
+[v0.13.2]: https://github.com/rust-lang/hashbrown/compare/v0.13.1...v0.13.2
+[v0.13.1]: https://github.com/rust-lang/hashbrown/compare/v0.12.3...v0.13.1
[v0.12.3]: https://github.com/rust-lang/hashbrown/compare/v0.12.2...v0.12.3
[v0.12.2]: https://github.com/rust-lang/hashbrown/compare/v0.12.1...v0.12.2
[v0.12.1]: https://github.com/rust-lang/hashbrown/compare/v0.12.0...v0.12.1
diff --git a/third_party/rust/hashbrown/Cargo.toml b/third_party/rust/hashbrown/Cargo.toml
index fb130d24d2..0a5434e494 100644
--- a/third_party/rust/hashbrown/Cargo.toml
+++ b/third_party/rust/hashbrown/Cargo.toml
@@ -11,9 +11,9 @@
[package]
edition = "2021"
-rust-version = "1.56.0"
+rust-version = "1.63.0"
name = "hashbrown"
-version = "0.12.3"
+version = "0.14.5"
authors = ["Amanieu d'Antras <amanieu@gmail.com>"]
exclude = [
".github",
@@ -33,7 +33,6 @@ categories = [
]
license = "MIT OR Apache-2.0"
repository = "https://github.com/rust-lang/hashbrown"
-resolver = "2"
[package.metadata.docs.rs]
features = [
@@ -42,9 +41,10 @@ features = [
"serde",
"raw",
]
+rustdoc-args = ["--generate-link-to-definition"]
[dependencies.ahash]
-version = "0.7.0"
+version = "0.8.7"
optional = true
default-features = false
@@ -53,9 +53,11 @@ version = "1.0.0"
optional = true
package = "rustc-std-workspace-alloc"
-[dependencies.bumpalo]
-version = "3.5.0"
+[dependencies.allocator-api2]
+version = "0.2.9"
+features = ["alloc"]
optional = true
+default-features = false
[dependencies.compiler_builtins]
version = "0.1.2"
@@ -66,15 +68,30 @@ version = "1.0.0"
optional = true
package = "rustc-std-workspace-core"
+[dependencies.equivalent]
+version = "1.0"
+optional = true
+default-features = false
+
[dependencies.rayon]
version = "1.0"
optional = true
+[dependencies.rkyv]
+version = "0.7.42"
+features = ["alloc"]
+optional = true
+default-features = false
+
[dependencies.serde]
version = "1.0.25"
optional = true
default-features = false
+[dev-dependencies.bumpalo]
+version = "3.13.0"
+features = ["allocator-api2"]
+
[dev-dependencies.doc-comment]
version = "0.3.1"
@@ -91,17 +108,24 @@ features = ["small_rng"]
[dev-dependencies.rayon]
version = "1.0"
+[dev-dependencies.rkyv]
+version = "0.7.42"
+features = ["validation"]
+
[dev-dependencies.serde_test]
version = "1.0"
[features]
-ahash-compile-time-rng = ["ahash/compile-time-rng"]
default = [
"ahash",
"inline-more",
+ "allocator-api2",
]
inline-more = []
-nightly = []
+nightly = [
+ "allocator-api2?/nightly",
+ "bumpalo/allocator_api",
+]
raw = []
rustc-dep-of-std = [
"nightly",
diff --git a/third_party/rust/hashbrown/README.md b/third_party/rust/hashbrown/README.md
index 2eddcf3e29..5eaef8bd01 100644
--- a/third_party/rust/hashbrown/README.md
+++ b/third_party/rust/hashbrown/README.md
@@ -4,7 +4,7 @@ hashbrown
[![Build Status](https://github.com/rust-lang/hashbrown/actions/workflows/rust.yml/badge.svg)](https://github.com/rust-lang/hashbrown/actions)
[![Crates.io](https://img.shields.io/crates/v/hashbrown.svg)](https://crates.io/crates/hashbrown)
[![Documentation](https://docs.rs/hashbrown/badge.svg)](https://docs.rs/hashbrown)
-[![Rust](https://img.shields.io/badge/rust-1.56.1%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown)
+[![Rust](https://img.shields.io/badge/rust-1.63.0%2B-blue.svg?maxAge=3600)](https://github.com/rust-lang/hashbrown)
This crate is a Rust port of Google's high-performance [SwissTable] hash
map, adapted to make it a drop-in replacement for Rust's standard `HashMap`
@@ -40,44 +40,44 @@ Compared to the previous implementation of `std::collections::HashMap` (Rust 1.3
With the hashbrown default AHash hasher:
-| name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup |
-|:------------------------|:-------------------:|------------------:|:------------:|---------:|---------|
-| insert_ahash_highbits | 18,865 | 8,020 | -10,845 | -57.49% | x 2.35 |
-| insert_ahash_random | 19,711 | 8,019 | -11,692 | -59.32% | x 2.46 |
-| insert_ahash_serial | 19,365 | 6,463 | -12,902 | -66.63% | x 3.00 |
-| insert_erase_ahash_highbits | 51,136 | 17,916 | -33,220 | -64.96% | x 2.85 |
-| insert_erase_ahash_random | 51,157 | 17,688 | -33,469 | -65.42% | x 2.89 |
-| insert_erase_ahash_serial | 45,479 | 14,895 | -30,584 | -67.25% | x 3.05 |
-| iter_ahash_highbits | 1,399 | 1,092 | -307 | -21.94% | x 1.28 |
-| iter_ahash_random | 1,586 | 1,059 | -527 | -33.23% | x 1.50 |
-| iter_ahash_serial | 3,168 | 1,079 | -2,089 | -65.94% | x 2.94 |
-| lookup_ahash_highbits | 32,351 | 4,792 | -27,559 | -85.19% | x 6.75 |
-| lookup_ahash_random | 17,419 | 4,817 | -12,602 | -72.35% | x 3.62 |
-| lookup_ahash_serial | 15,254 | 3,606 | -11,648 | -76.36% | x 4.23 |
-| lookup_fail_ahash_highbits | 21,187 | 4,369 | -16,818 | -79.38% | x 4.85 |
-| lookup_fail_ahash_random | 21,550 | 4,395 | -17,155 | -79.61% | x 4.90 |
-| lookup_fail_ahash_serial | 19,450 | 3,176 | -16,274 | -83.67% | x 6.12 |
+| name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup |
+| :-------------------------- | :----------------: | ----------------: | :----------: | ------: | ------- |
+| insert_ahash_highbits | 18,865 | 8,020 | -10,845 | -57.49% | x 2.35 |
+| insert_ahash_random | 19,711 | 8,019 | -11,692 | -59.32% | x 2.46 |
+| insert_ahash_serial | 19,365 | 6,463 | -12,902 | -66.63% | x 3.00 |
+| insert_erase_ahash_highbits | 51,136 | 17,916 | -33,220 | -64.96% | x 2.85 |
+| insert_erase_ahash_random | 51,157 | 17,688 | -33,469 | -65.42% | x 2.89 |
+| insert_erase_ahash_serial | 45,479 | 14,895 | -30,584 | -67.25% | x 3.05 |
+| iter_ahash_highbits | 1,399 | 1,092 | -307 | -21.94% | x 1.28 |
+| iter_ahash_random | 1,586 | 1,059 | -527 | -33.23% | x 1.50 |
+| iter_ahash_serial | 3,168 | 1,079 | -2,089 | -65.94% | x 2.94 |
+| lookup_ahash_highbits | 32,351 | 4,792 | -27,559 | -85.19% | x 6.75 |
+| lookup_ahash_random | 17,419 | 4,817 | -12,602 | -72.35% | x 3.62 |
+| lookup_ahash_serial | 15,254 | 3,606 | -11,648 | -76.36% | x 4.23 |
+| lookup_fail_ahash_highbits | 21,187 | 4,369 | -16,818 | -79.38% | x 4.85 |
+| lookup_fail_ahash_random | 21,550 | 4,395 | -17,155 | -79.61% | x 4.90 |
+| lookup_fail_ahash_serial | 19,450 | 3,176 | -16,274 | -83.67% | x 6.12 |
With the libstd default SipHash hasher:
-|name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup |
-|:------------------------|:-------------------:|------------------:|:------------:|---------:|---------|
-|insert_std_highbits |19,216 |16,885 | -2,331 | -12.13% | x 1.14 |
-|insert_std_random |19,179 |17,034 | -2,145 | -11.18% | x 1.13 |
-|insert_std_serial |19,462 |17,493 | -1,969 | -10.12% | x 1.11 |
-|insert_erase_std_highbits |50,825 |35,847 | -14,978 | -29.47% | x 1.42 |
-|insert_erase_std_random |51,448 |35,392 | -16,056 | -31.21% | x 1.45 |
-|insert_erase_std_serial |87,711 |38,091 | -49,620 | -56.57% | x 2.30 |
-|iter_std_highbits |1,378 |1,159 | -219 | -15.89% | x 1.19 |
-|iter_std_random |1,395 |1,132 | -263 | -18.85% | x 1.23 |
-|iter_std_serial |1,704 |1,105 | -599 | -35.15% | x 1.54 |
-|lookup_std_highbits |17,195 |13,642 | -3,553 | -20.66% | x 1.26 |
-|lookup_std_random |17,181 |13,773 | -3,408 | -19.84% | x 1.25 |
-|lookup_std_serial |15,483 |13,651 | -1,832 | -11.83% | x 1.13 |
-|lookup_fail_std_highbits |20,926 |13,474 | -7,452 | -35.61% | x 1.55 |
-|lookup_fail_std_random |21,766 |13,505 | -8,261 | -37.95% | x 1.61 |
-|lookup_fail_std_serial |19,336 |13,519 | -5,817 | -30.08% | x 1.43 |
+| name | oldstdhash ns/iter | hashbrown ns/iter | diff ns/iter | diff % | speedup |
+| :------------------------ | :----------------: | ----------------: | :----------: | ------: | ------- |
+| insert_std_highbits | 19,216 | 16,885 | -2,331 | -12.13% | x 1.14 |
+| insert_std_random | 19,179 | 17,034 | -2,145 | -11.18% | x 1.13 |
+| insert_std_serial | 19,462 | 17,493 | -1,969 | -10.12% | x 1.11 |
+| insert_erase_std_highbits | 50,825 | 35,847 | -14,978 | -29.47% | x 1.42 |
+| insert_erase_std_random | 51,448 | 35,392 | -16,056 | -31.21% | x 1.45 |
+| insert_erase_std_serial | 87,711 | 38,091 | -49,620 | -56.57% | x 2.30 |
+| iter_std_highbits | 1,378 | 1,159 | -219 | -15.89% | x 1.19 |
+| iter_std_random | 1,395 | 1,132 | -263 | -18.85% | x 1.23 |
+| iter_std_serial | 1,704 | 1,105 | -599 | -35.15% | x 1.54 |
+| lookup_std_highbits | 17,195 | 13,642 | -3,553 | -20.66% | x 1.26 |
+| lookup_std_random | 17,181 | 13,773 | -3,408 | -19.84% | x 1.25 |
+| lookup_std_serial | 15,483 | 13,651 | -1,832 | -11.83% | x 1.13 |
+| lookup_fail_std_highbits | 20,926 | 13,474 | -7,452 | -35.61% | x 1.55 |
+| lookup_fail_std_random | 21,766 | 13,505 | -8,261 | -37.95% | x 1.61 |
+| lookup_fail_std_serial | 19,336 | 13,519 | -5,817 | -30.08% | x 1.43 |
## Usage
@@ -85,7 +85,7 @@ Add this to your `Cargo.toml`:
```toml
[dependencies]
-hashbrown = "0.12"
+hashbrown = "0.14"
```
Then:
@@ -101,14 +101,13 @@ This crate has the following Cargo features:
- `nightly`: Enables nightly-only features including: `#[may_dangle]`.
- `serde`: Enables serde serialization support.
+- `rkyv`: Enables rkyv serialization support.
- `rayon`: Enables rayon parallel iterator support.
- `raw`: Enables access to the experimental and unsafe `RawTable` API.
- `inline-more`: Adds inline hints to most functions, improving run-time performance at the cost
of compilation time. (enabled by default)
-- `bumpalo`: Provides a `BumpWrapper` type which allows `bumpalo` to be used for memory allocation.
- `ahash`: Compiles with ahash as default hasher. (enabled by default)
-- `ahash-compile-time-rng`: Activates the `compile-time-rng` feature of ahash. For targets with no random number generator
-this pre-generates seeds at compile time and embeds them as constants. See [aHash's documentation](https://github.com/tkaitchuck/aHash#flags) (disabled by default)
+- `allocator-api2`: Enables support for allocators that support `allocator-api2`. (enabled by default)
## License
diff --git a/third_party/rust/hashbrown/benches/bench.rs b/third_party/rust/hashbrown/benches/bench.rs
index c393b9a706..346bd7ef89 100644
--- a/third_party/rust/hashbrown/benches/bench.rs
+++ b/third_party/rust/hashbrown/benches/bench.rs
@@ -311,7 +311,7 @@ fn rehash_in_place(b: &mut Bencher) {
// Each loop triggers one rehash
for _ in 0..10 {
- for i in 0..224 {
+ for i in 0..223 {
set.insert(i);
}
diff --git a/third_party/rust/hashbrown/src/external_trait_impls/mod.rs b/third_party/rust/hashbrown/src/external_trait_impls/mod.rs
index ef497836cb..01d386b046 100644
--- a/third_party/rust/hashbrown/src/external_trait_impls/mod.rs
+++ b/third_party/rust/hashbrown/src/external_trait_impls/mod.rs
@@ -1,4 +1,6 @@
#[cfg(feature = "rayon")]
pub(crate) mod rayon;
+#[cfg(feature = "rkyv")]
+mod rkyv;
#[cfg(feature = "serde")]
mod serde;
diff --git a/third_party/rust/hashbrown/src/external_trait_impls/rayon/map.rs b/third_party/rust/hashbrown/src/external_trait_impls/rayon/map.rs
index 14d91c220c..2534dc9b2b 100644
--- a/third_party/rust/hashbrown/src/external_trait_impls/rayon/map.rs
+++ b/third_party/rust/hashbrown/src/external_trait_impls/rayon/map.rs
@@ -232,11 +232,11 @@ impl<K: Eq + Hash, V: fmt::Debug> fmt::Debug for ParValuesMut<'_, K, V> {
/// [`into_par_iter`]: /hashbrown/struct.HashMap.html#method.into_par_iter
/// [`HashMap`]: /hashbrown/struct.HashMap.html
/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html
-pub struct IntoParIter<K, V, A: Allocator + Clone = Global> {
+pub struct IntoParIter<K, V, A: Allocator = Global> {
inner: RawIntoParIter<(K, V), A>,
}
-impl<K: Send, V: Send, A: Allocator + Clone + Send> ParallelIterator for IntoParIter<K, V, A> {
+impl<K: Send, V: Send, A: Allocator + Send> ParallelIterator for IntoParIter<K, V, A> {
type Item = (K, V);
#[cfg_attr(feature = "inline-more", inline)]
@@ -248,9 +248,7 @@ impl<K: Send, V: Send, A: Allocator + Clone + Send> ParallelIterator for IntoPar
}
}
-impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator + Clone> fmt::Debug
- for IntoParIter<K, V, A>
-{
+impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator> fmt::Debug for IntoParIter<K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParIter {
inner: unsafe { self.inner.par_iter() },
@@ -267,11 +265,11 @@ impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator + Clone> fmt::Debug
///
/// [`par_drain`]: /hashbrown/struct.HashMap.html#method.par_drain
/// [`HashMap`]: /hashbrown/struct.HashMap.html
-pub struct ParDrain<'a, K, V, A: Allocator + Clone = Global> {
+pub struct ParDrain<'a, K, V, A: Allocator = Global> {
inner: RawParDrain<'a, (K, V), A>,
}
-impl<K: Send, V: Send, A: Allocator + Clone + Sync> ParallelIterator for ParDrain<'_, K, V, A> {
+impl<K: Send, V: Send, A: Allocator + Sync> ParallelIterator for ParDrain<'_, K, V, A> {
type Item = (K, V);
#[cfg_attr(feature = "inline-more", inline)]
@@ -283,9 +281,7 @@ impl<K: Send, V: Send, A: Allocator + Clone + Sync> ParallelIterator for ParDrai
}
}
-impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator + Clone> fmt::Debug
- for ParDrain<'_, K, V, A>
-{
+impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator> fmt::Debug for ParDrain<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
ParIter {
inner: unsafe { self.inner.par_iter() },
@@ -295,7 +291,7 @@ impl<K: fmt::Debug + Eq + Hash, V: fmt::Debug, A: Allocator + Clone> fmt::Debug
}
}
-impl<K: Sync, V: Sync, S, A: Allocator + Clone> HashMap<K, V, S, A> {
+impl<K: Sync, V: Sync, S, A: Allocator> HashMap<K, V, S, A> {
/// Visits (potentially in parallel) immutably borrowed keys in an arbitrary order.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_keys(&self) -> ParKeys<'_, K, V> {
@@ -315,7 +311,7 @@ impl<K: Sync, V: Sync, S, A: Allocator + Clone> HashMap<K, V, S, A> {
}
}
-impl<K: Send, V: Send, S, A: Allocator + Clone> HashMap<K, V, S, A> {
+impl<K: Send, V: Send, S, A: Allocator> HashMap<K, V, S, A> {
/// Visits (potentially in parallel) mutably borrowed values in an arbitrary order.
#[cfg_attr(feature = "inline-more", inline)]
pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> {
@@ -340,7 +336,7 @@ where
K: Eq + Hash + Sync,
V: PartialEq + Sync,
S: BuildHasher + Sync,
- A: Allocator + Clone + Sync,
+ A: Allocator + Sync,
{
/// Returns `true` if the map is equal to another,
/// i.e. both maps contain the same keys mapped to the same values.
@@ -354,9 +350,7 @@ where
}
}
-impl<K: Send, V: Send, S, A: Allocator + Clone + Send> IntoParallelIterator
- for HashMap<K, V, S, A>
-{
+impl<K: Send, V: Send, S, A: Allocator + Send> IntoParallelIterator for HashMap<K, V, S, A> {
type Item = (K, V);
type Iter = IntoParIter<K, V, A>;
@@ -368,9 +362,7 @@ impl<K: Send, V: Send, S, A: Allocator + Clone + Send> IntoParallelIterator
}
}
-impl<'a, K: Sync, V: Sync, S, A: Allocator + Clone> IntoParallelIterator
- for &'a HashMap<K, V, S, A>
-{
+impl<'a, K: Sync, V: Sync, S, A: Allocator> IntoParallelIterator for &'a HashMap<K, V, S, A> {
type Item = (&'a K, &'a V);
type Iter = ParIter<'a, K, V>;
@@ -383,9 +375,7 @@ impl<'a, K: Sync, V: Sync, S, A: Allocator + Clone> IntoParallelIterator
}
}
-impl<'a, K: Sync, V: Send, S, A: Allocator + Clone> IntoParallelIterator
- for &'a mut HashMap<K, V, S, A>
-{
+impl<'a, K: Sync, V: Send, S, A: Allocator> IntoParallelIterator for &'a mut HashMap<K, V, S, A> {
type Item = (&'a K, &'a mut V);
type Iter = ParIterMut<'a, K, V>;
@@ -424,7 +414,7 @@ where
K: Eq + Hash + Send,
V: Send,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
fn par_extend<I>(&mut self, par_iter: I)
where
@@ -440,7 +430,7 @@ where
K: Copy + Eq + Hash + Sync,
V: Copy + Sync,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
fn par_extend<I>(&mut self, par_iter: I)
where
@@ -456,7 +446,7 @@ where
K: Eq + Hash,
S: BuildHasher,
I: IntoParallelIterator,
- A: Allocator + Clone,
+ A: Allocator,
HashMap<K, V, S, A>: Extend<I::Item>,
{
let (list, len) = super::helpers::collect(par_iter);
@@ -561,10 +551,7 @@ mod test_par_map {
assert_eq!(value.load(Ordering::Relaxed), 100);
// retain only half
- let _v: Vec<_> = hm
- .into_par_iter()
- .filter(|&(ref key, _)| key.k < 50)
- .collect();
+ let _v: Vec<_> = hm.into_par_iter().filter(|(key, _)| key.k < 50).collect();
assert_eq!(key.load(Ordering::Relaxed), 50);
assert_eq!(value.load(Ordering::Relaxed), 50);
@@ -611,7 +598,7 @@ mod test_par_map {
assert_eq!(value.load(Ordering::Relaxed), 100);
// retain only half
- let _v: Vec<_> = hm.drain().filter(|&(ref key, _)| key.k < 50).collect();
+ let _v: Vec<_> = hm.drain().filter(|(key, _)| key.k < 50).collect();
assert!(hm.is_empty());
assert_eq!(key.load(Ordering::Relaxed), 50);
diff --git a/third_party/rust/hashbrown/src/external_trait_impls/rayon/mod.rs b/third_party/rust/hashbrown/src/external_trait_impls/rayon/mod.rs
index 99337a1ce3..61ca69b61d 100644
--- a/third_party/rust/hashbrown/src/external_trait_impls/rayon/mod.rs
+++ b/third_party/rust/hashbrown/src/external_trait_impls/rayon/mod.rs
@@ -2,3 +2,4 @@ mod helpers;
pub(crate) mod map;
pub(crate) mod raw;
pub(crate) mod set;
+pub(crate) mod table;
diff --git a/third_party/rust/hashbrown/src/external_trait_impls/rayon/raw.rs b/third_party/rust/hashbrown/src/external_trait_impls/rayon/raw.rs
index 883303e278..612be47a55 100644
--- a/third_party/rust/hashbrown/src/external_trait_impls/rayon/raw.rs
+++ b/third_party/rust/hashbrown/src/external_trait_impls/rayon/raw.rs
@@ -1,7 +1,6 @@
use crate::raw::Bucket;
use crate::raw::{Allocator, Global, RawIter, RawIterRange, RawTable};
use crate::scopeguard::guard;
-use alloc::alloc::dealloc;
use core::marker::PhantomData;
use core::mem;
use core::ptr::NonNull;
@@ -76,18 +75,18 @@ impl<T> UnindexedProducer for ParIterProducer<T> {
}
/// Parallel iterator which consumes a table and returns elements.
-pub struct RawIntoParIter<T, A: Allocator + Clone = Global> {
+pub struct RawIntoParIter<T, A: Allocator = Global> {
table: RawTable<T, A>,
}
-impl<T, A: Allocator + Clone> RawIntoParIter<T, A> {
+impl<T, A: Allocator> RawIntoParIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub(super) unsafe fn par_iter(&self) -> RawParIter<T> {
self.table.par_iter()
}
}
-impl<T: Send, A: Allocator + Clone + Send> ParallelIterator for RawIntoParIter<T, A> {
+impl<T: Send, A: Allocator + Send> ParallelIterator for RawIntoParIter<T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
@@ -97,9 +96,9 @@ impl<T: Send, A: Allocator + Clone + Send> ParallelIterator for RawIntoParIter<T
{
let iter = unsafe { self.table.iter().iter };
let _guard = guard(self.table.into_allocation(), |alloc| {
- if let Some((ptr, layout)) = *alloc {
+ if let Some((ptr, layout, ref alloc)) = *alloc {
unsafe {
- dealloc(ptr.as_ptr(), layout);
+ alloc.deallocate(ptr, layout);
}
}
});
@@ -109,23 +108,23 @@ impl<T: Send, A: Allocator + Clone + Send> ParallelIterator for RawIntoParIter<T
}
/// Parallel iterator which consumes elements without freeing the table storage.
-pub struct RawParDrain<'a, T, A: Allocator + Clone = Global> {
+pub struct RawParDrain<'a, T, A: Allocator = Global> {
// We don't use a &'a mut RawTable<T> because we want RawParDrain to be
// covariant over T.
table: NonNull<RawTable<T, A>>,
marker: PhantomData<&'a RawTable<T, A>>,
}
-unsafe impl<T: Send, A: Allocator + Clone> Send for RawParDrain<'_, T, A> {}
+unsafe impl<T: Send, A: Allocator> Send for RawParDrain<'_, T, A> {}
-impl<T, A: Allocator + Clone> RawParDrain<'_, T, A> {
+impl<T, A: Allocator> RawParDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub(super) unsafe fn par_iter(&self) -> RawParIter<T> {
self.table.as_ref().par_iter()
}
}
-impl<T: Send, A: Allocator + Clone> ParallelIterator for RawParDrain<'_, T, A> {
+impl<T: Send, A: Allocator> ParallelIterator for RawParDrain<'_, T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
@@ -143,7 +142,7 @@ impl<T: Send, A: Allocator + Clone> ParallelIterator for RawParDrain<'_, T, A> {
}
}
-impl<T, A: Allocator + Clone> Drop for RawParDrain<'_, T, A> {
+impl<T, A: Allocator> Drop for RawParDrain<'_, T, A> {
fn drop(&mut self) {
// If drive_unindexed is not called then simply clear the table.
unsafe {
@@ -204,7 +203,7 @@ impl<T> Drop for ParDrainProducer<T> {
}
}
-impl<T, A: Allocator + Clone> RawTable<T, A> {
+impl<T, A: Allocator> RawTable<T, A> {
/// Returns a parallel iterator over the elements in a `RawTable`.
#[cfg_attr(feature = "inline-more", inline)]
pub unsafe fn par_iter(&self) -> RawParIter<T> {
diff --git a/third_party/rust/hashbrown/src/external_trait_impls/rayon/set.rs b/third_party/rust/hashbrown/src/external_trait_impls/rayon/set.rs
index ee4f6e6693..3de98fccb8 100644
--- a/third_party/rust/hashbrown/src/external_trait_impls/rayon/set.rs
+++ b/third_party/rust/hashbrown/src/external_trait_impls/rayon/set.rs
@@ -16,11 +16,11 @@ use rayon::iter::{FromParallelIterator, IntoParallelIterator, ParallelExtend, Pa
/// [`into_par_iter`]: /hashbrown/struct.HashSet.html#method.into_par_iter
/// [`HashSet`]: /hashbrown/struct.HashSet.html
/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html
-pub struct IntoParIter<T, A: Allocator + Clone = Global> {
+pub struct IntoParIter<T, A: Allocator = Global> {
inner: map::IntoParIter<T, (), A>,
}
-impl<T: Send, A: Allocator + Clone + Send> ParallelIterator for IntoParIter<T, A> {
+impl<T: Send, A: Allocator + Send> ParallelIterator for IntoParIter<T, A> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
@@ -38,11 +38,11 @@ impl<T: Send, A: Allocator + Clone + Send> ParallelIterator for IntoParIter<T, A
///
/// [`par_drain`]: /hashbrown/struct.HashSet.html#method.par_drain
/// [`HashSet`]: /hashbrown/struct.HashSet.html
-pub struct ParDrain<'a, T, A: Allocator + Clone = Global> {
+pub struct ParDrain<'a, T, A: Allocator = Global> {
inner: map::ParDrain<'a, T, (), A>,
}
-impl<T: Send, A: Allocator + Clone + Send + Sync> ParallelIterator for ParDrain<'_, T, A> {
+impl<T: Send, A: Allocator + Send + Sync> ParallelIterator for ParDrain<'_, T, A> {
type Item = T;
fn drive_unindexed<C>(self, consumer: C) -> C::Result
@@ -85,7 +85,7 @@ impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> {
///
/// [`par_difference`]: /hashbrown/struct.HashSet.html#method.par_difference
/// [`HashSet`]: /hashbrown/struct.HashSet.html
-pub struct ParDifference<'a, T, S, A: Allocator + Clone = Global> {
+pub struct ParDifference<'a, T, S, A: Allocator = Global> {
a: &'a HashSet<T, S, A>,
b: &'a HashSet<T, S, A>,
}
@@ -94,7 +94,7 @@ impl<'a, T, S, A> ParallelIterator for ParDifference<'a, T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
- A: Allocator + Clone + Sync,
+ A: Allocator + Sync,
{
type Item = &'a T;
@@ -118,7 +118,7 @@ where
///
/// [`par_symmetric_difference`]: /hashbrown/struct.HashSet.html#method.par_symmetric_difference
/// [`HashSet`]: /hashbrown/struct.HashSet.html
-pub struct ParSymmetricDifference<'a, T, S, A: Allocator + Clone = Global> {
+pub struct ParSymmetricDifference<'a, T, S, A: Allocator = Global> {
a: &'a HashSet<T, S, A>,
b: &'a HashSet<T, S, A>,
}
@@ -127,7 +127,7 @@ impl<'a, T, S, A> ParallelIterator for ParSymmetricDifference<'a, T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
- A: Allocator + Clone + Sync,
+ A: Allocator + Sync,
{
type Item = &'a T;
@@ -150,7 +150,7 @@ where
///
/// [`par_intersection`]: /hashbrown/struct.HashSet.html#method.par_intersection
/// [`HashSet`]: /hashbrown/struct.HashSet.html
-pub struct ParIntersection<'a, T, S, A: Allocator + Clone = Global> {
+pub struct ParIntersection<'a, T, S, A: Allocator = Global> {
a: &'a HashSet<T, S, A>,
b: &'a HashSet<T, S, A>,
}
@@ -159,7 +159,7 @@ impl<'a, T, S, A> ParallelIterator for ParIntersection<'a, T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
- A: Allocator + Clone + Sync,
+ A: Allocator + Sync,
{
type Item = &'a T;
@@ -181,7 +181,7 @@ where
///
/// [`par_union`]: /hashbrown/struct.HashSet.html#method.par_union
/// [`HashSet`]: /hashbrown/struct.HashSet.html
-pub struct ParUnion<'a, T, S, A: Allocator + Clone = Global> {
+pub struct ParUnion<'a, T, S, A: Allocator = Global> {
a: &'a HashSet<T, S, A>,
b: &'a HashSet<T, S, A>,
}
@@ -190,7 +190,7 @@ impl<'a, T, S, A> ParallelIterator for ParUnion<'a, T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
- A: Allocator + Clone + Sync,
+ A: Allocator + Sync,
{
type Item = &'a T;
@@ -216,7 +216,7 @@ impl<T, S, A> HashSet<T, S, A>
where
T: Eq + Hash + Sync,
S: BuildHasher + Sync,
- A: Allocator + Clone + Sync,
+ A: Allocator + Sync,
{
/// Visits (potentially in parallel) the values representing the union,
/// i.e. all the values in `self` or `other`, without duplicates.
@@ -289,7 +289,7 @@ where
impl<T, S, A> HashSet<T, S, A>
where
T: Eq + Hash + Send,
- A: Allocator + Clone + Send,
+ A: Allocator + Send,
{
/// Consumes (potentially in parallel) all values in an arbitrary order,
/// while preserving the set's allocated memory for reuse.
@@ -301,7 +301,7 @@ where
}
}
-impl<T: Send, S, A: Allocator + Clone + Send> IntoParallelIterator for HashSet<T, S, A> {
+impl<T: Send, S, A: Allocator + Send> IntoParallelIterator for HashSet<T, S, A> {
type Item = T;
type Iter = IntoParIter<T, A>;
@@ -313,7 +313,7 @@ impl<T: Send, S, A: Allocator + Clone + Send> IntoParallelIterator for HashSet<T
}
}
-impl<'a, T: Sync, S, A: Allocator + Clone> IntoParallelIterator for &'a HashSet<T, S, A> {
+impl<'a, T: Sync, S, A: Allocator> IntoParallelIterator for &'a HashSet<T, S, A> {
type Item = &'a T;
type Iter = ParIter<'a, T>;
@@ -374,7 +374,7 @@ fn extend<T, S, I, A>(set: &mut HashSet<T, S, A>, par_iter: I)
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
I: IntoParallelIterator,
HashSet<T, S, A>: Extend<I::Item>,
{
diff --git a/third_party/rust/hashbrown/src/external_trait_impls/rayon/table.rs b/third_party/rust/hashbrown/src/external_trait_impls/rayon/table.rs
new file mode 100644
index 0000000000..e8e50944ad
--- /dev/null
+++ b/third_party/rust/hashbrown/src/external_trait_impls/rayon/table.rs
@@ -0,0 +1,252 @@
+//! Rayon extensions for `HashTable`.
+
+use super::raw::{RawIntoParIter, RawParDrain, RawParIter};
+use crate::hash_table::HashTable;
+use crate::raw::{Allocator, Global};
+use core::fmt;
+use core::marker::PhantomData;
+use rayon::iter::plumbing::UnindexedConsumer;
+use rayon::iter::{IntoParallelIterator, ParallelIterator};
+
+/// Parallel iterator over shared references to entries in a map.
+///
+/// This iterator is created by the [`par_iter`] method on [`HashTable`]
+/// (provided by the [`IntoParallelRefIterator`] trait).
+/// See its documentation for more.
+///
+/// [`par_iter`]: /hashbrown/struct.HashTable.html#method.par_iter
+/// [`HashTable`]: /hashbrown/struct.HashTable.html
+/// [`IntoParallelRefIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefIterator.html
+pub struct ParIter<'a, T> {
+ inner: RawParIter<T>,
+ marker: PhantomData<&'a T>,
+}
+
+impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> {
+ type Item = &'a T;
+
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ self.inner
+ .map(|x| unsafe { x.as_ref() })
+ .drive_unindexed(consumer)
+ }
+}
+
+impl<T> Clone for ParIter<'_, T> {
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn clone(&self) -> Self {
+ Self {
+ inner: self.inner.clone(),
+ marker: PhantomData,
+ }
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for ParIter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = unsafe { self.inner.iter() }.map(|x| unsafe { x.as_ref() });
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+/// Parallel iterator over mutable references to entries in a map.
+///
+/// This iterator is created by the [`par_iter_mut`] method on [`HashTable`]
+/// (provided by the [`IntoParallelRefMutIterator`] trait).
+/// See its documentation for more.
+///
+/// [`par_iter_mut`]: /hashbrown/struct.HashTable.html#method.par_iter_mut
+/// [`HashTable`]: /hashbrown/struct.HashTable.html
+/// [`IntoParallelRefMutIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelRefMutIterator.html
+pub struct ParIterMut<'a, T> {
+ inner: RawParIter<T>,
+ marker: PhantomData<&'a mut T>,
+}
+
+impl<'a, T: Send> ParallelIterator for ParIterMut<'a, T> {
+ type Item = &'a mut T;
+
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ self.inner
+ .map(|x| unsafe { x.as_mut() })
+ .drive_unindexed(consumer)
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for ParIterMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ParIter {
+ inner: self.inner.clone(),
+ marker: PhantomData,
+ }
+ .fmt(f)
+ }
+}
+
+/// Parallel iterator over entries of a consumed map.
+///
+/// This iterator is created by the [`into_par_iter`] method on [`HashTable`]
+/// (provided by the [`IntoParallelIterator`] trait).
+/// See its documentation for more.
+///
+/// [`into_par_iter`]: /hashbrown/struct.HashTable.html#method.into_par_iter
+/// [`HashTable`]: /hashbrown/struct.HashTable.html
+/// [`IntoParallelIterator`]: https://docs.rs/rayon/1.0/rayon/iter/trait.IntoParallelIterator.html
+pub struct IntoParIter<T, A: Allocator = Global> {
+ inner: RawIntoParIter<T, A>,
+}
+
+impl<T: Send, A: Allocator + Send> ParallelIterator for IntoParIter<T, A> {
+ type Item = T;
+
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ self.inner.drive_unindexed(consumer)
+ }
+}
+
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoParIter<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ParIter {
+ inner: unsafe { self.inner.par_iter() },
+ marker: PhantomData,
+ }
+ .fmt(f)
+ }
+}
+
+/// Parallel draining iterator over entries of a map.
+///
+/// This iterator is created by the [`par_drain`] method on [`HashTable`].
+/// See its documentation for more.
+///
+/// [`par_drain`]: /hashbrown/struct.HashTable.html#method.par_drain
+/// [`HashTable`]: /hashbrown/struct.HashTable.html
+pub struct ParDrain<'a, T, A: Allocator = Global> {
+ inner: RawParDrain<'a, T, A>,
+}
+
+impl<T: Send, A: Allocator + Sync> ParallelIterator for ParDrain<'_, T, A> {
+ type Item = T;
+
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ self.inner.drive_unindexed(consumer)
+ }
+}
+
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for ParDrain<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ ParIter {
+ inner: unsafe { self.inner.par_iter() },
+ marker: PhantomData,
+ }
+ .fmt(f)
+ }
+}
+
+impl<T: Send, A: Allocator> HashTable<T, A> {
+ /// Consumes (potentially in parallel) all values in an arbitrary order,
+ /// while preserving the map's allocated memory for reuse.
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn par_drain(&mut self) -> ParDrain<'_, T, A> {
+ ParDrain {
+ inner: self.raw.par_drain(),
+ }
+ }
+}
+
+impl<T: Send, A: Allocator + Send> IntoParallelIterator for HashTable<T, A> {
+ type Item = T;
+ type Iter = IntoParIter<T, A>;
+
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn into_par_iter(self) -> Self::Iter {
+ IntoParIter {
+ inner: self.raw.into_par_iter(),
+ }
+ }
+}
+
+impl<'a, T: Sync, A: Allocator> IntoParallelIterator for &'a HashTable<T, A> {
+ type Item = &'a T;
+ type Iter = ParIter<'a, T>;
+
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn into_par_iter(self) -> Self::Iter {
+ ParIter {
+ inner: unsafe { self.raw.par_iter() },
+ marker: PhantomData,
+ }
+ }
+}
+
+impl<'a, T: Send, A: Allocator> IntoParallelIterator for &'a mut HashTable<T, A> {
+ type Item = &'a mut T;
+ type Iter = ParIterMut<'a, T>;
+
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn into_par_iter(self) -> Self::Iter {
+ ParIterMut {
+ inner: unsafe { self.raw.par_iter() },
+ marker: PhantomData,
+ }
+ }
+}
+
+#[cfg(test)]
+mod test_par_table {
+ use alloc::vec::Vec;
+ use core::sync::atomic::{AtomicUsize, Ordering};
+
+ use rayon::prelude::*;
+
+ use crate::{
+ hash_map::{make_hash, DefaultHashBuilder},
+ hash_table::HashTable,
+ };
+
+ #[test]
+ fn test_iterate() {
+ let hasher = DefaultHashBuilder::default();
+ let mut a = HashTable::new();
+ for i in 0..32 {
+ a.insert_unique(make_hash(&hasher, &i), i, |x| make_hash(&hasher, x));
+ }
+ let observed = AtomicUsize::new(0);
+ a.par_iter().for_each(|k| {
+ observed.fetch_or(1 << *k, Ordering::Relaxed);
+ });
+ assert_eq!(observed.into_inner(), 0xFFFF_FFFF);
+ }
+
+ #[test]
+ fn test_move_iter() {
+ let hasher = DefaultHashBuilder::default();
+ let hs = {
+ let mut hs = HashTable::new();
+
+ hs.insert_unique(make_hash(&hasher, &'a'), 'a', |x| make_hash(&hasher, x));
+ hs.insert_unique(make_hash(&hasher, &'b'), 'b', |x| make_hash(&hasher, x));
+
+ hs
+ };
+
+ let v = hs.into_par_iter().collect::<Vec<char>>();
+ assert!(v == ['a', 'b'] || v == ['b', 'a']);
+ }
+}
diff --git a/third_party/rust/hashbrown/src/external_trait_impls/rkyv/hash_map.rs b/third_party/rust/hashbrown/src/external_trait_impls/rkyv/hash_map.rs
new file mode 100644
index 0000000000..fae7f76763
--- /dev/null
+++ b/third_party/rust/hashbrown/src/external_trait_impls/rkyv/hash_map.rs
@@ -0,0 +1,125 @@
+use crate::HashMap;
+use core::{
+ borrow::Borrow,
+ hash::{BuildHasher, Hash},
+};
+use rkyv::{
+ collections::hash_map::{ArchivedHashMap, HashMapResolver},
+ ser::{ScratchSpace, Serializer},
+ Archive, Deserialize, Fallible, Serialize,
+};
+
+impl<K: Archive + Hash + Eq, V: Archive, S> Archive for HashMap<K, V, S>
+where
+ K::Archived: Hash + Eq,
+{
+ type Archived = ArchivedHashMap<K::Archived, V::Archived>;
+ type Resolver = HashMapResolver;
+
+ #[inline]
+ unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) {
+ ArchivedHashMap::resolve_from_len(self.len(), pos, resolver, out);
+ }
+}
+
+impl<K, V, S, RandomState> Serialize<S> for HashMap<K, V, RandomState>
+where
+ K: Serialize<S> + Hash + Eq,
+ K::Archived: Hash + Eq,
+ V: Serialize<S>,
+ S: Serializer + ScratchSpace + ?Sized,
+{
+ #[inline]
+ fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
+ unsafe { ArchivedHashMap::serialize_from_iter(self.iter(), serializer) }
+ }
+}
+
+impl<K: Archive + Hash + Eq, V: Archive, D: Fallible + ?Sized, S: Default + BuildHasher>
+ Deserialize<HashMap<K, V, S>, D> for ArchivedHashMap<K::Archived, V::Archived>
+where
+ K::Archived: Deserialize<K, D> + Hash + Eq,
+ V::Archived: Deserialize<V, D>,
+{
+ #[inline]
+ fn deserialize(&self, deserializer: &mut D) -> Result<HashMap<K, V, S>, D::Error> {
+ let mut result = HashMap::with_capacity_and_hasher(self.len(), S::default());
+ for (k, v) in self.iter() {
+ result.insert(k.deserialize(deserializer)?, v.deserialize(deserializer)?);
+ }
+ Ok(result)
+ }
+}
+
+impl<K: Hash + Eq + Borrow<AK>, V, AK: Hash + Eq, AV: PartialEq<V>, S: BuildHasher>
+ PartialEq<HashMap<K, V, S>> for ArchivedHashMap<AK, AV>
+{
+ #[inline]
+ fn eq(&self, other: &HashMap<K, V, S>) -> bool {
+ if self.len() != other.len() {
+ false
+ } else {
+ self.iter()
+ .all(|(key, value)| other.get(key).map_or(false, |v| value.eq(v)))
+ }
+ }
+}
+
+impl<K: Hash + Eq + Borrow<AK>, V, AK: Hash + Eq, AV: PartialEq<V>>
+ PartialEq<ArchivedHashMap<AK, AV>> for HashMap<K, V>
+{
+ #[inline]
+ fn eq(&self, other: &ArchivedHashMap<AK, AV>) -> bool {
+ other.eq(self)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::HashMap;
+ use alloc::string::String;
+ use rkyv::{
+ archived_root, check_archived_root,
+ ser::{serializers::AllocSerializer, Serializer},
+ Deserialize, Infallible,
+ };
+
+ #[test]
+ fn index_map() {
+ let mut value = HashMap::new();
+ value.insert(String::from("foo"), 10);
+ value.insert(String::from("bar"), 20);
+ value.insert(String::from("baz"), 40);
+ value.insert(String::from("bat"), 80);
+
+ let mut serializer = AllocSerializer::<4096>::default();
+ serializer.serialize_value(&value).unwrap();
+ let result = serializer.into_serializer().into_inner();
+ let archived = unsafe { archived_root::<HashMap<String, i32>>(result.as_ref()) };
+
+ assert_eq!(value.len(), archived.len());
+ for (k, v) in value.iter() {
+ let (ak, av) = archived.get_key_value(k.as_str()).unwrap();
+ assert_eq!(k, ak);
+ assert_eq!(v, av);
+ }
+
+ let deserialized: HashMap<String, i32> = archived.deserialize(&mut Infallible).unwrap();
+ assert_eq!(value, deserialized);
+ }
+
+ #[test]
+ fn validate_index_map() {
+ let mut value = HashMap::new();
+ value.insert(String::from("foo"), 10);
+ value.insert(String::from("bar"), 20);
+ value.insert(String::from("baz"), 40);
+ value.insert(String::from("bat"), 80);
+
+ let mut serializer = AllocSerializer::<4096>::default();
+ serializer.serialize_value(&value).unwrap();
+ let result = serializer.into_serializer().into_inner();
+ check_archived_root::<HashMap<String, i32>>(result.as_ref())
+ .expect("failed to validate archived index map");
+ }
+}
diff --git a/third_party/rust/hashbrown/src/external_trait_impls/rkyv/hash_set.rs b/third_party/rust/hashbrown/src/external_trait_impls/rkyv/hash_set.rs
new file mode 100644
index 0000000000..c8a69cf4fc
--- /dev/null
+++ b/third_party/rust/hashbrown/src/external_trait_impls/rkyv/hash_set.rs
@@ -0,0 +1,123 @@
+use crate::HashSet;
+use core::{
+ borrow::Borrow,
+ hash::{BuildHasher, Hash},
+};
+use rkyv::{
+ collections::hash_set::{ArchivedHashSet, HashSetResolver},
+ ser::{ScratchSpace, Serializer},
+ Archive, Deserialize, Fallible, Serialize,
+};
+
+impl<K: Archive + Hash + Eq, S> Archive for HashSet<K, S>
+where
+ K::Archived: Hash + Eq,
+{
+ type Archived = ArchivedHashSet<K::Archived>;
+ type Resolver = HashSetResolver;
+
+ #[inline]
+ unsafe fn resolve(&self, pos: usize, resolver: Self::Resolver, out: *mut Self::Archived) {
+ ArchivedHashSet::<K::Archived>::resolve_from_len(self.len(), pos, resolver, out);
+ }
+}
+
+impl<K, S, RS> Serialize<S> for HashSet<K, RS>
+where
+ K::Archived: Hash + Eq,
+ K: Serialize<S> + Hash + Eq,
+ S: ScratchSpace + Serializer + ?Sized,
+{
+ #[inline]
+ fn serialize(&self, serializer: &mut S) -> Result<Self::Resolver, S::Error> {
+ unsafe { ArchivedHashSet::serialize_from_iter(self.iter(), serializer) }
+ }
+}
+
+impl<K, D, S> Deserialize<HashSet<K, S>, D> for ArchivedHashSet<K::Archived>
+where
+ K: Archive + Hash + Eq,
+ K::Archived: Deserialize<K, D> + Hash + Eq,
+ D: Fallible + ?Sized,
+ S: Default + BuildHasher,
+{
+ #[inline]
+ fn deserialize(&self, deserializer: &mut D) -> Result<HashSet<K, S>, D::Error> {
+ let mut result = HashSet::with_hasher(S::default());
+ for k in self.iter() {
+ result.insert(k.deserialize(deserializer)?);
+ }
+ Ok(result)
+ }
+}
+
+impl<K: Hash + Eq + Borrow<AK>, AK: Hash + Eq, S: BuildHasher> PartialEq<HashSet<K, S>>
+ for ArchivedHashSet<AK>
+{
+ #[inline]
+ fn eq(&self, other: &HashSet<K, S>) -> bool {
+ if self.len() != other.len() {
+ false
+ } else {
+ self.iter().all(|key| other.get(key).is_some())
+ }
+ }
+}
+
+impl<K: Hash + Eq + Borrow<AK>, AK: Hash + Eq, S: BuildHasher> PartialEq<ArchivedHashSet<AK>>
+ for HashSet<K, S>
+{
+ #[inline]
+ fn eq(&self, other: &ArchivedHashSet<AK>) -> bool {
+ other.eq(self)
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use crate::HashSet;
+ use alloc::string::String;
+ use rkyv::{
+ archived_root, check_archived_root,
+ ser::{serializers::AllocSerializer, Serializer},
+ Deserialize, Infallible,
+ };
+
+ #[test]
+ fn index_set() {
+ let mut value = HashSet::new();
+ value.insert(String::from("foo"));
+ value.insert(String::from("bar"));
+ value.insert(String::from("baz"));
+ value.insert(String::from("bat"));
+
+ let mut serializer = AllocSerializer::<4096>::default();
+ serializer.serialize_value(&value).unwrap();
+ let result = serializer.into_serializer().into_inner();
+ let archived = unsafe { archived_root::<HashSet<String>>(result.as_ref()) };
+
+ assert_eq!(value.len(), archived.len());
+ for k in value.iter() {
+ let ak = archived.get(k.as_str()).unwrap();
+ assert_eq!(k, ak);
+ }
+
+ let deserialized: HashSet<String> = archived.deserialize(&mut Infallible).unwrap();
+ assert_eq!(value, deserialized);
+ }
+
+ #[test]
+ fn validate_index_set() {
+ let mut value = HashSet::new();
+ value.insert(String::from("foo"));
+ value.insert(String::from("bar"));
+ value.insert(String::from("baz"));
+ value.insert(String::from("bat"));
+
+ let mut serializer = AllocSerializer::<4096>::default();
+ serializer.serialize_value(&value).unwrap();
+ let result = serializer.into_serializer().into_inner();
+ check_archived_root::<HashSet<String>>(result.as_ref())
+ .expect("failed to validate archived index set");
+ }
+}
diff --git a/third_party/rust/hashbrown/src/external_trait_impls/rkyv/mod.rs b/third_party/rust/hashbrown/src/external_trait_impls/rkyv/mod.rs
new file mode 100644
index 0000000000..2bde6a0653
--- /dev/null
+++ b/third_party/rust/hashbrown/src/external_trait_impls/rkyv/mod.rs
@@ -0,0 +1,2 @@
+mod hash_map;
+mod hash_set;
diff --git a/third_party/rust/hashbrown/src/external_trait_impls/serde.rs b/third_party/rust/hashbrown/src/external_trait_impls/serde.rs
index 4d62deeb7a..0a76dbec25 100644
--- a/third_party/rust/hashbrown/src/external_trait_impls/serde.rs
+++ b/third_party/rust/hashbrown/src/external_trait_impls/serde.rs
@@ -11,6 +11,7 @@ mod size_hint {
}
mod map {
+ use crate::raw::Allocator;
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::marker::PhantomData;
@@ -21,11 +22,12 @@ mod map {
use super::size_hint;
- impl<K, V, H> Serialize for HashMap<K, V, H>
+ impl<K, V, H, A> Serialize for HashMap<K, V, H, A>
where
K: Serialize + Eq + Hash,
V: Serialize,
H: BuildHasher,
+ A: Allocator,
{
#[cfg_attr(feature = "inline-more", inline)]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@@ -36,40 +38,46 @@ mod map {
}
}
- impl<'de, K, V, S> Deserialize<'de> for HashMap<K, V, S>
+ impl<'de, K, V, S, A> Deserialize<'de> for HashMap<K, V, S, A>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
S: BuildHasher + Default,
+ A: Allocator + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
- struct MapVisitor<K, V, S> {
- marker: PhantomData<HashMap<K, V, S>>,
+ struct MapVisitor<K, V, S, A>
+ where
+ A: Allocator,
+ {
+ marker: PhantomData<HashMap<K, V, S, A>>,
}
- impl<'de, K, V, S> Visitor<'de> for MapVisitor<K, V, S>
+ impl<'de, K, V, S, A> Visitor<'de> for MapVisitor<K, V, S, A>
where
K: Deserialize<'de> + Eq + Hash,
V: Deserialize<'de>,
S: BuildHasher + Default,
+ A: Allocator + Default,
{
- type Value = HashMap<K, V, S>;
+ type Value = HashMap<K, V, S, A>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a map")
}
#[cfg_attr(feature = "inline-more", inline)]
- fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
+ fn visit_map<M>(self, mut map: M) -> Result<Self::Value, M::Error>
where
- A: MapAccess<'de>,
+ M: MapAccess<'de>,
{
- let mut values = HashMap::with_capacity_and_hasher(
+ let mut values = HashMap::with_capacity_and_hasher_in(
size_hint::cautious(map.size_hint()),
S::default(),
+ A::default(),
);
while let Some((key, value)) = map.next_entry()? {
@@ -89,6 +97,7 @@ mod map {
}
mod set {
+ use crate::raw::Allocator;
use core::fmt;
use core::hash::{BuildHasher, Hash};
use core::marker::PhantomData;
@@ -99,10 +108,11 @@ mod set {
use super::size_hint;
- impl<T, H> Serialize for HashSet<T, H>
+ impl<T, H, A> Serialize for HashSet<T, H, A>
where
T: Serialize + Eq + Hash,
H: BuildHasher,
+ A: Allocator,
{
#[cfg_attr(feature = "inline-more", inline)]
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
@@ -113,38 +123,44 @@ mod set {
}
}
- impl<'de, T, S> Deserialize<'de> for HashSet<T, S>
+ impl<'de, T, S, A> Deserialize<'de> for HashSet<T, S, A>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
+ A: Allocator + Default,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
- struct SeqVisitor<T, S> {
- marker: PhantomData<HashSet<T, S>>,
+ struct SeqVisitor<T, S, A>
+ where
+ A: Allocator,
+ {
+ marker: PhantomData<HashSet<T, S, A>>,
}
- impl<'de, T, S> Visitor<'de> for SeqVisitor<T, S>
+ impl<'de, T, S, A> Visitor<'de> for SeqVisitor<T, S, A>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
+ A: Allocator + Default,
{
- type Value = HashSet<T, S>;
+ type Value = HashSet<T, S, A>;
fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
formatter.write_str("a sequence")
}
#[cfg_attr(feature = "inline-more", inline)]
- fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
+ fn visit_seq<M>(self, mut seq: M) -> Result<Self::Value, M::Error>
where
- A: SeqAccess<'de>,
+ M: SeqAccess<'de>,
{
- let mut values = HashSet::with_capacity_and_hasher(
+ let mut values = HashSet::with_capacity_and_hasher_in(
size_hint::cautious(seq.size_hint()),
S::default(),
+ A::default(),
);
while let Some(value) = seq.next_element()? {
@@ -166,12 +182,15 @@ mod set {
where
D: Deserializer<'de>,
{
- struct SeqInPlaceVisitor<'a, T, S>(&'a mut HashSet<T, S>);
+ struct SeqInPlaceVisitor<'a, T, S, A>(&'a mut HashSet<T, S, A>)
+ where
+ A: Allocator;
- impl<'a, 'de, T, S> Visitor<'de> for SeqInPlaceVisitor<'a, T, S>
+ impl<'a, 'de, T, S, A> Visitor<'de> for SeqInPlaceVisitor<'a, T, S, A>
where
T: Deserialize<'de> + Eq + Hash,
S: BuildHasher + Default,
+ A: Allocator,
{
type Value = ();
@@ -180,9 +199,9 @@ mod set {
}
#[cfg_attr(feature = "inline-more", inline)]
- fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
+ fn visit_seq<M>(self, mut seq: M) -> Result<Self::Value, M::Error>
where
- A: SeqAccess<'de>,
+ M: SeqAccess<'de>,
{
self.0.clear();
self.0.reserve(size_hint::cautious(seq.size_hint()));
diff --git a/third_party/rust/hashbrown/src/lib.rs b/third_party/rust/hashbrown/src/lib.rs
index bc1c971303..f03ddb6ad9 100644
--- a/third_party/rust/hashbrown/src/lib.rs
+++ b/third_party/rust/hashbrown/src/lib.rs
@@ -20,9 +20,8 @@
extend_one,
allocator_api,
slice_ptr_get,
- nonnull_slice_from_raw_parts,
maybe_uninit_array_assume_init,
- build_hasher_simple_hash_one
+ strict_provenance
)
)]
#![allow(
@@ -37,6 +36,8 @@
)]
#![warn(missing_docs)]
#![warn(rust_2018_idioms)]
+#![cfg_attr(feature = "nightly", warn(fuzzy_provenance_casts))]
+#![cfg_attr(feature = "nightly", allow(internal_features))]
#[cfg(test)]
#[macro_use]
@@ -81,6 +82,7 @@ mod map;
mod rustc_entry;
mod scopeguard;
mod set;
+mod table;
pub mod hash_map {
//! A hash map implemented with quadratic probing and SIMD lookup.
@@ -113,9 +115,63 @@ pub mod hash_set {
pub use crate::external_trait_impls::rayon::set::*;
}
}
+pub mod hash_table {
+ //! A hash table implemented with quadratic probing and SIMD lookup.
+ pub use crate::table::*;
+
+ #[cfg(feature = "rayon")]
+ /// [rayon]-based parallel iterator types for hash tables.
+ /// You will rarely need to interact with it directly unless you have need
+ /// to name one of the iterator types.
+ ///
+ /// [rayon]: https://docs.rs/rayon/1.0/rayon
+ pub mod rayon {
+ pub use crate::external_trait_impls::rayon::table::*;
+ }
+}
pub use crate::map::HashMap;
pub use crate::set::HashSet;
+pub use crate::table::HashTable;
+
+#[cfg(feature = "equivalent")]
+pub use equivalent::Equivalent;
+
+// This is only used as a fallback when building as part of `std`.
+#[cfg(not(feature = "equivalent"))]
+/// Key equivalence trait.
+///
+/// This trait defines the function used to compare the input value with the
+/// map keys (or set values) during a lookup operation such as [`HashMap::get`]
+/// or [`HashSet::contains`].
+/// It is provided with a blanket implementation based on the
+/// [`Borrow`](core::borrow::Borrow) trait.
+///
+/// # Correctness
+///
+/// Equivalent values must hash to the same value.
+pub trait Equivalent<K: ?Sized> {
+ /// Checks if this value is equivalent to the given key.
+ ///
+ /// Returns `true` if both values are equivalent, and `false` otherwise.
+ ///
+ /// # Correctness
+ ///
+ /// When this function returns `true`, both `self` and `key` must hash to
+ /// the same value.
+ fn equivalent(&self, key: &K) -> bool;
+}
+
+#[cfg(not(feature = "equivalent"))]
+impl<Q: ?Sized, K: ?Sized> Equivalent<K> for Q
+where
+ Q: Eq,
+ K: core::borrow::Borrow<Q>,
+{
+ fn equivalent(&self, key: &K) -> bool {
+ self == key.borrow()
+ }
+}
/// The error type for `try_reserve` methods.
#[derive(Clone, PartialEq, Eq, Debug)]
@@ -130,21 +186,3 @@ pub enum TryReserveError {
layout: alloc::alloc::Layout,
},
}
-
-/// Wrapper around `Bump` which allows it to be used as an allocator for
-/// `HashMap`, `HashSet` and `RawTable`.
-///
-/// `Bump` can be used directly without this wrapper on nightly if you enable
-/// the `allocator-api` feature of the `bumpalo` crate.
-#[cfg(feature = "bumpalo")]
-#[derive(Clone, Copy, Debug)]
-pub struct BumpWrapper<'a>(pub &'a bumpalo::Bump);
-
-#[cfg(feature = "bumpalo")]
-#[test]
-fn test_bumpalo() {
- use bumpalo::Bump;
- let bump = Bump::new();
- let mut map = HashMap::new_in(BumpWrapper(&bump));
- map.insert(0, 1);
-}
diff --git a/third_party/rust/hashbrown/src/macros.rs b/third_party/rust/hashbrown/src/macros.rs
index f8ef917b14..eaba6bed1f 100644
--- a/third_party/rust/hashbrown/src/macros.rs
+++ b/third_party/rust/hashbrown/src/macros.rs
@@ -37,7 +37,7 @@ macro_rules! cfg_if {
// semicolon is all the remaining items
(@__items ($($not:meta,)*) ; ) => {};
(@__items ($($not:meta,)*) ; ( ($($m:meta),*) ($($it:item)*) ), $($rest:tt)*) => {
- // Emit all items within one block, applying an approprate #[cfg]. The
+ // Emit all items within one block, applying an appropriate #[cfg]. The
// #[cfg] will require all `$m` matchers specified and must also negate
// all previous matchers.
cfg_if! { @__apply cfg(all($($m,)* not(any($($not),*)))), $($it)* }
diff --git a/third_party/rust/hashbrown/src/map.rs b/third_party/rust/hashbrown/src/map.rs
index a5d3ccb97e..88a826582b 100644
--- a/third_party/rust/hashbrown/src/map.rs
+++ b/third_party/rust/hashbrown/src/map.rs
@@ -1,16 +1,18 @@
-use crate::raw::{Allocator, Bucket, Global, RawDrain, RawIntoIter, RawIter, RawTable};
-use crate::TryReserveError;
+use crate::raw::{
+ Allocator, Bucket, Global, RawDrain, RawExtractIf, RawIntoIter, RawIter, RawTable,
+};
+use crate::{Equivalent, TryReserveError};
use core::borrow::Borrow;
use core::fmt::{self, Debug};
use core::hash::{BuildHasher, Hash};
-use core::iter::{FromIterator, FusedIterator};
+use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
use core::ops::Index;
/// Default hasher for `HashMap`.
#[cfg(feature = "ahash")]
-pub type DefaultHashBuilder = ahash::RandomState;
+pub type DefaultHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
/// Dummy default hasher for `HashMap`.
#[cfg(not(feature = "ahash"))]
@@ -182,10 +184,10 @@ pub enum DefaultHashBuilder {}
/// use hashbrown::HashMap;
///
/// let timber_resources: HashMap<&str, i32> = [("Norway", 100), ("Denmark", 50), ("Iceland", 10)]
-/// .iter().cloned().collect();
+/// .into_iter().collect();
/// // use the values stored in map
/// ```
-pub struct HashMap<K, V, S = DefaultHashBuilder, A: Allocator + Clone = Global> {
+pub struct HashMap<K, V, S = DefaultHashBuilder, A: Allocator = Global> {
pub(crate) hash_builder: S,
pub(crate) table: RawTable<(K, V), A>,
}
@@ -209,13 +211,12 @@ impl<K: Clone, V: Clone, S: Clone, A: Allocator + Clone> Clone for HashMap<K, V,
/// Ensures that a single closure type across uses of this which, in turn prevents multiple
/// instances of any functions like RawTable::reserve from being generated
#[cfg_attr(feature = "inline-more", inline)]
-pub(crate) fn make_hasher<K, Q, V, S>(hash_builder: &S) -> impl Fn(&(Q, V)) -> u64 + '_
+pub(crate) fn make_hasher<Q, V, S>(hash_builder: &S) -> impl Fn(&(Q, V)) -> u64 + '_
where
- K: Borrow<Q>,
Q: Hash,
S: BuildHasher,
{
- move |val| make_hash::<K, Q, S>(hash_builder, &val.0)
+ move |val| make_hash::<Q, S>(hash_builder, &val.0)
}
/// Ensures that a single closure type across uses of this which, in turn prevents multiple
@@ -223,10 +224,9 @@ where
#[cfg_attr(feature = "inline-more", inline)]
fn equivalent_key<Q, K, V>(k: &Q) -> impl Fn(&(K, V)) -> bool + '_
where
- K: Borrow<Q>,
- Q: ?Sized + Eq,
+ Q: ?Sized + Equivalent<K>,
{
- move |x| k.eq(x.0.borrow())
+ move |x| k.equivalent(&x.0)
}
/// Ensures that a single closure type across uses of this which, in turn prevents multiple
@@ -234,17 +234,15 @@ where
#[cfg_attr(feature = "inline-more", inline)]
fn equivalent<Q, K>(k: &Q) -> impl Fn(&K) -> bool + '_
where
- K: Borrow<Q>,
- Q: ?Sized + Eq,
+ Q: ?Sized + Equivalent<K>,
{
- move |x| k.eq(x.borrow())
+ move |x| k.equivalent(x)
}
#[cfg(not(feature = "nightly"))]
#[cfg_attr(feature = "inline-more", inline)]
-pub(crate) fn make_hash<K, Q, S>(hash_builder: &S, val: &Q) -> u64
+pub(crate) fn make_hash<Q, S>(hash_builder: &S, val: &Q) -> u64
where
- K: Borrow<Q>,
Q: Hash + ?Sized,
S: BuildHasher,
{
@@ -256,38 +254,14 @@ where
#[cfg(feature = "nightly")]
#[cfg_attr(feature = "inline-more", inline)]
-pub(crate) fn make_hash<K, Q, S>(hash_builder: &S, val: &Q) -> u64
+pub(crate) fn make_hash<Q, S>(hash_builder: &S, val: &Q) -> u64
where
- K: Borrow<Q>,
Q: Hash + ?Sized,
S: BuildHasher,
{
hash_builder.hash_one(val)
}
-#[cfg(not(feature = "nightly"))]
-#[cfg_attr(feature = "inline-more", inline)]
-pub(crate) fn make_insert_hash<K, S>(hash_builder: &S, val: &K) -> u64
-where
- K: Hash,
- S: BuildHasher,
-{
- use core::hash::Hasher;
- let mut state = hash_builder.build_hasher();
- val.hash(&mut state);
- state.finish()
-}
-
-#[cfg(feature = "nightly")]
-#[cfg_attr(feature = "inline-more", inline)]
-pub(crate) fn make_insert_hash<K, S>(hash_builder: &S, val: &K) -> u64
-where
- K: Hash,
- S: BuildHasher,
-{
- hash_builder.hash_one(val)
-}
-
#[cfg(feature = "ahash")]
impl<K, V> HashMap<K, V, DefaultHashBuilder> {
/// Creates an empty `HashMap`.
@@ -295,6 +269,18 @@ impl<K, V> HashMap<K, V, DefaultHashBuilder> {
/// The hash map is initially created with a capacity of 0, so it will not allocate until it
/// is first inserted into.
///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`], for example with
+ /// [`with_hasher`](HashMap::with_hasher) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
/// # Examples
///
/// ```
@@ -313,6 +299,18 @@ impl<K, V> HashMap<K, V, DefaultHashBuilder> {
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`], for example with
+ /// [`with_capacity_and_hasher`](HashMap::with_capacity_and_hasher) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
/// # Examples
///
/// ```
@@ -328,11 +326,46 @@ impl<K, V> HashMap<K, V, DefaultHashBuilder> {
}
#[cfg(feature = "ahash")]
-impl<K, V, A: Allocator + Clone> HashMap<K, V, DefaultHashBuilder, A> {
+impl<K, V, A: Allocator> HashMap<K, V, DefaultHashBuilder, A> {
/// Creates an empty `HashMap` using the given allocator.
///
/// The hash map is initially created with a capacity of 0, so it will not allocate until it
/// is first inserted into.
+ ///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`], for example with
+ /// [`with_hasher_in`](HashMap::with_hasher_in) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ /// use bumpalo::Bump;
+ ///
+ /// let bump = Bump::new();
+ /// let mut map = HashMap::new_in(&bump);
+ ///
+ /// // The created HashMap holds none elements
+ /// assert_eq!(map.len(), 0);
+ ///
+ /// // The created HashMap also doesn't allocate memory
+ /// assert_eq!(map.capacity(), 0);
+ ///
+ /// // Now we insert element inside created HashMap
+ /// map.insert("One", 1);
+ /// // We can see that the HashMap holds 1 element
+ /// assert_eq!(map.len(), 1);
+ /// // And it also allocates some capacity
+ /// assert!(map.capacity() > 1);
+ /// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn new_in(alloc: A) -> Self {
Self::with_hasher_in(DefaultHashBuilder::default(), alloc)
@@ -342,6 +375,46 @@ impl<K, V, A: Allocator + Clone> HashMap<K, V, DefaultHashBuilder, A> {
///
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
+ ///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`], for example with
+ /// [`with_capacity_and_hasher_in`](HashMap::with_capacity_and_hasher_in) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashMap;
+ /// use bumpalo::Bump;
+ ///
+ /// let bump = Bump::new();
+ /// let mut map = HashMap::with_capacity_in(5, &bump);
+ ///
+ /// // The created HashMap holds none elements
+ /// assert_eq!(map.len(), 0);
+ /// // But it can hold at least 5 elements without reallocating
+ /// let empty_map_capacity = map.capacity();
+ /// assert!(empty_map_capacity >= 5);
+ ///
+ /// // Now we insert some 5 elements inside created HashMap
+ /// map.insert("One", 1);
+ /// map.insert("Two", 2);
+ /// map.insert("Three", 3);
+ /// map.insert("Four", 4);
+ /// map.insert("Five", 5);
+ ///
+ /// // We can see that the HashMap holds 5 elements
+ /// assert_eq!(map.len(), 5);
+ /// // But its capacity isn't changed
+ /// assert_eq!(map.capacity(), empty_map_capacity)
+ /// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
Self::with_capacity_and_hasher_in(capacity, DefaultHashBuilder::default(), alloc)
@@ -355,14 +428,21 @@ impl<K, V, S> HashMap<K, V, S> {
/// The hash map is initially created with a capacity of 0, so it will not
/// allocate until it is first inserted into.
///
- /// Warning: `hash_builder` is normally randomly generated, and
- /// is designed to allow HashMaps to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`].
///
/// The `hash_builder` passed should implement the [`BuildHasher`] trait for
/// the HashMap to be useful, see its documentation for details.
///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
+ ///
/// # Examples
///
/// ```
@@ -376,8 +456,6 @@ impl<K, V, S> HashMap<K, V, S> {
///
/// map.insert(1, 2);
/// ```
- ///
- /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
#[cfg_attr(feature = "inline-more", inline)]
pub const fn with_hasher(hash_builder: S) -> Self {
Self {
@@ -392,14 +470,21 @@ impl<K, V, S> HashMap<K, V, S> {
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
///
- /// Warning: `hash_builder` is normally randomly generated, and
- /// is designed to allow HashMaps to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`].
///
/// The `hash_builder` passed should implement the [`BuildHasher`] trait for
/// the HashMap to be useful, see its documentation for details.
///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
+ ///
/// # Examples
///
/// ```
@@ -413,8 +498,6 @@ impl<K, V, S> HashMap<K, V, S> {
///
/// map.insert(1, 2);
/// ```
- ///
- /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
#[cfg_attr(feature = "inline-more", inline)]
pub fn with_capacity_and_hasher(capacity: usize, hash_builder: S) -> Self {
Self {
@@ -424,7 +507,7 @@ impl<K, V, S> HashMap<K, V, S> {
}
}
-impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
+impl<K, V, S, A: Allocator> HashMap<K, V, S, A> {
/// Returns a reference to the underlying allocator.
#[inline]
pub fn allocator(&self) -> &A {
@@ -434,12 +517,19 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// Creates an empty `HashMap` which will use the given hash builder to hash
/// keys. It will be allocated with the given allocator.
///
- /// The created map has the default initial capacity.
+ /// The hash map is initially created with a capacity of 0, so it will not allocate until it
+ /// is first inserted into.
+ ///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`].
///
- /// Warning: `hash_builder` is normally randomly generated, and
- /// is designed to allow HashMaps to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
///
/// # Examples
///
@@ -452,7 +542,7 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// map.insert(1, 2);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
- pub fn with_hasher_in(hash_builder: S, alloc: A) -> Self {
+ pub const fn with_hasher_in(hash_builder: S, alloc: A) -> Self {
Self {
hash_builder,
table: RawTable::new_in(alloc),
@@ -465,10 +555,16 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// The hash map will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash map will not allocate.
///
- /// Warning: `hash_builder` is normally randomly generated, and
- /// is designed to allow HashMaps to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashMap` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashMap`].
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
///
/// # Examples
///
@@ -810,14 +906,11 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
///
/// let mut map: HashMap<i32, i32> = (0..8).map(|x|(x, x*10)).collect();
/// assert_eq!(map.len(), 8);
- /// let capacity_before_retain = map.capacity();
///
/// map.retain(|&k, _| k % 2 == 0);
///
/// // We can see, that the number of elements inside map is changed.
/// assert_eq!(map.len(), 4);
- /// // But map capacity is equal to old one.
- /// assert_eq!(map.capacity(), capacity_before_retain);
///
/// let mut vec: Vec<(i32, i32)> = map.iter().map(|(&k, &v)| (k, v)).collect();
/// vec.sort_unstable();
@@ -844,26 +937,25 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// In other words, move all pairs `(k, v)` such that `f(&k, &mut v)` returns `true` out
/// into another iterator.
///
- /// Note that `drain_filter` lets you mutate every value in the filter closure, regardless of
+ /// Note that `extract_if` lets you mutate every value in the filter closure, regardless of
/// whether you choose to keep or remove it.
///
- /// When the returned DrainedFilter is dropped, any remaining elements that satisfy
- /// the predicate are dropped from the table.
- ///
- /// It is unspecified how many more elements will be subjected to the closure
- /// if a panic occurs in the closure, or a panic occurs while dropping an element,
- /// or if the `DrainFilter` value is leaked.
+ /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating
+ /// or the iteration short-circuits, then the remaining elements will be retained.
+ /// Use [`retain()`] with a negated predicate if you do not need the returned iterator.
///
/// Keeps the allocated memory for reuse.
///
+ /// [`retain()`]: HashMap::retain
+ ///
/// # Examples
///
/// ```
/// use hashbrown::HashMap;
///
/// let mut map: HashMap<i32, i32> = (0..8).map(|x| (x, x)).collect();
- /// let capacity_before_drain_filter = map.capacity();
- /// let drained: HashMap<i32, i32> = map.drain_filter(|k, _v| k % 2 == 0).collect();
+ ///
+ /// let drained: HashMap<i32, i32> = map.extract_if(|k, _v| k % 2 == 0).collect();
///
/// let mut evens = drained.keys().cloned().collect::<Vec<_>>();
/// let mut odds = map.keys().cloned().collect::<Vec<_>>();
@@ -872,27 +964,24 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
///
/// assert_eq!(evens, vec![0, 2, 4, 6]);
/// assert_eq!(odds, vec![1, 3, 5, 7]);
- /// // Map capacity is equal to old one.
- /// assert_eq!(map.capacity(), capacity_before_drain_filter);
///
/// let mut map: HashMap<i32, i32> = (0..8).map(|x| (x, x)).collect();
///
/// { // Iterator is dropped without being consumed.
- /// let d = map.drain_filter(|k, _v| k % 2 != 0);
+ /// let d = map.extract_if(|k, _v| k % 2 != 0);
/// }
///
- /// // But the map lens have been reduced by half
- /// // even if we do not use DrainFilter iterator.
- /// assert_eq!(map.len(), 4);
+ /// // ExtractIf was not exhausted, therefore no elements were drained.
+ /// assert_eq!(map.len(), 8);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
- pub fn drain_filter<F>(&mut self, f: F) -> DrainFilter<'_, K, V, F, A>
+ pub fn extract_if<F>(&mut self, f: F) -> ExtractIf<'_, K, V, F, A>
where
F: FnMut(&K, &mut V) -> bool,
{
- DrainFilter {
+ ExtractIf {
f,
- inner: DrainFilterInner {
+ inner: RawExtractIf {
iter: unsafe { self.table.iter() },
table: &mut self.table,
},
@@ -984,7 +1073,7 @@ impl<K, V, S, A> HashMap<K, V, S, A>
where
K: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
/// Reserves capacity for at least `additional` more elements to be inserted
/// in the `HashMap`. The collection may reserve more space to avoid
@@ -992,9 +1081,12 @@ where
///
/// # Panics
///
- /// Panics if the new allocation size overflows [`usize`].
+ /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
+ /// in case of allocation error. Use [`try_reserve`](HashMap::try_reserve) instead
+ /// if you want to handle memory allocation failure.
///
- /// [`usize`]: https://doc.rust-lang.org/std/primitive.usize.html
+ /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html
+ /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
///
/// # Examples
///
@@ -1012,7 +1104,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn reserve(&mut self, additional: usize) {
self.table
- .reserve(additional, make_hasher::<K, _, V, S>(&self.hash_builder));
+ .reserve(additional, make_hasher::<_, V, S>(&self.hash_builder));
}
/// Tries to reserve capacity for at least `additional` more elements to be inserted
@@ -1062,7 +1154,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
self.table
- .try_reserve(additional, make_hasher::<K, _, V, S>(&self.hash_builder))
+ .try_reserve(additional, make_hasher::<_, V, S>(&self.hash_builder))
}
/// Shrinks the capacity of the map as much as possible. It will drop
@@ -1084,7 +1176,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn shrink_to_fit(&mut self) {
self.table
- .shrink_to(0, make_hasher::<K, _, V, S>(&self.hash_builder));
+ .shrink_to(0, make_hasher::<_, V, S>(&self.hash_builder));
}
/// Shrinks the capacity of the map with a lower limit. It will drop
@@ -1113,7 +1205,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn shrink_to(&mut self, min_capacity: usize) {
self.table
- .shrink_to(min_capacity, make_hasher::<K, _, V, S>(&self.hash_builder));
+ .shrink_to(min_capacity, make_hasher::<_, V, S>(&self.hash_builder));
}
/// Gets the given key's corresponding entry in the map for in-place manipulation.
@@ -1137,7 +1229,7 @@ where
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn entry(&mut self, key: K) -> Entry<'_, K, V, S, A> {
- let hash = make_insert_hash::<K, S>(&self.hash_builder, &key);
+ let hash = make_hash::<K, S>(&self.hash_builder, &key);
if let Some(elem) = self.table.find(hash, equivalent_key(&key)) {
Entry::Occupied(OccupiedEntry {
hash,
@@ -1174,10 +1266,9 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn entry_ref<'a, 'b, Q: ?Sized>(&'a mut self, key: &'b Q) -> EntryRef<'a, 'b, K, Q, V, S, A>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
- let hash = make_hash::<K, Q, S>(&self.hash_builder, key);
+ let hash = make_hash::<Q, S>(&self.hash_builder, key);
if let Some(elem) = self.table.find(hash, equivalent_key(key)) {
EntryRef::Occupied(OccupiedEntryRef {
hash,
@@ -1216,12 +1307,11 @@ where
#[inline]
pub fn get<Q: ?Sized>(&self, k: &Q) -> Option<&V>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.get_inner(k) {
- Some(&(_, ref v)) => Some(v),
+ Some((_, v)) => Some(v),
None => None,
}
}
@@ -1248,12 +1338,11 @@ where
#[inline]
pub fn get_key_value<Q: ?Sized>(&self, k: &Q) -> Option<(&K, &V)>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.get_inner(k) {
- Some(&(ref key, ref value)) => Some((key, value)),
+ Some((key, value)) => Some((key, value)),
None => None,
}
}
@@ -1261,13 +1350,12 @@ where
#[inline]
fn get_inner<Q: ?Sized>(&self, k: &Q) -> Option<&(K, V)>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
if self.table.is_empty() {
None
} else {
- let hash = make_hash::<K, Q, S>(&self.hash_builder, k);
+ let hash = make_hash::<Q, S>(&self.hash_builder, k);
self.table.get(hash, equivalent_key(k))
}
}
@@ -1298,8 +1386,7 @@ where
#[inline]
pub fn get_key_value_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<(&K, &mut V)>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.get_inner_mut(k) {
@@ -1330,8 +1417,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn contains_key<Q: ?Sized>(&self, k: &Q) -> bool
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
self.get_inner(k).is_some()
}
@@ -1362,8 +1448,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn get_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut V>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.get_inner_mut(k) {
@@ -1375,13 +1460,12 @@ where
#[inline]
fn get_inner_mut<Q: ?Sized>(&mut self, k: &Q) -> Option<&mut (K, V)>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
if self.table.is_empty() {
None
} else {
- let hash = make_hash::<K, Q, S>(&self.hash_builder, k);
+ let hash = make_hash::<Q, S>(&self.hash_builder, k);
self.table.get_mut(hash, equivalent_key(k))
}
}
@@ -1431,8 +1515,7 @@ where
/// ```
pub fn get_many_mut<Q: ?Sized, const N: usize>(&mut self, ks: [&Q; N]) -> Option<[&'_ mut V; N]>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
self.get_many_mut_inner(ks).map(|res| res.map(|(_, v)| v))
}
@@ -1487,8 +1570,7 @@ where
ks: [&Q; N],
) -> Option<[&'_ mut V; N]>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
self.get_many_unchecked_mut_inner(ks)
.map(|res| res.map(|(_, v)| v))
@@ -1543,8 +1625,7 @@ where
ks: [&Q; N],
) -> Option<[(&'_ K, &'_ mut V); N]>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
self.get_many_mut_inner(ks)
.map(|res| res.map(|(k, v)| (&*k, v)))
@@ -1599,8 +1680,7 @@ where
ks: [&Q; N],
) -> Option<[(&'_ K, &'_ mut V); N]>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
self.get_many_unchecked_mut_inner(ks)
.map(|res| res.map(|(k, v)| (&*k, v)))
@@ -1611,12 +1691,11 @@ where
ks: [&Q; N],
) -> Option<[&'_ mut (K, V); N]>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
let hashes = self.build_hashes_inner(ks);
self.table
- .get_many_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow()))
+ .get_many_mut(hashes, |i, (k, _)| ks[i].equivalent(k))
}
unsafe fn get_many_unchecked_mut_inner<Q: ?Sized, const N: usize>(
@@ -1624,22 +1703,20 @@ where
ks: [&Q; N],
) -> Option<[&'_ mut (K, V); N]>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
let hashes = self.build_hashes_inner(ks);
self.table
- .get_many_unchecked_mut(hashes, |i, (k, _)| ks[i].eq(k.borrow()))
+ .get_many_unchecked_mut(hashes, |i, (k, _)| ks[i].equivalent(k))
}
fn build_hashes_inner<Q: ?Sized, const N: usize>(&self, ks: [&Q; N]) -> [u64; N]
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
let mut hashes = [0_u64; N];
for i in 0..N {
- hashes[i] = make_hash::<K, Q, S>(&self.hash_builder, ks[i]);
+ hashes[i] = make_hash::<Q, S>(&self.hash_builder, ks[i]);
}
hashes
}
@@ -1672,13 +1749,19 @@ where
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, k: K, v: V) -> Option<V> {
- let hash = make_insert_hash::<K, S>(&self.hash_builder, &k);
- if let Some((_, item)) = self.table.get_mut(hash, equivalent_key(&k)) {
- Some(mem::replace(item, v))
- } else {
- self.table
- .insert(hash, (k, v), make_hasher::<K, _, V, S>(&self.hash_builder));
- None
+ let hash = make_hash::<K, S>(&self.hash_builder, &k);
+ let hasher = make_hasher::<_, V, S>(&self.hash_builder);
+ match self
+ .table
+ .find_or_find_insert_slot(hash, equivalent_key(&k), hasher)
+ {
+ Ok(bucket) => Some(mem::replace(unsafe { &mut bucket.as_mut().1 }, v)),
+ Err(slot) => {
+ unsafe {
+ self.table.insert_in_slot(hash, slot, (k, v));
+ }
+ None
+ }
}
}
@@ -1733,10 +1816,10 @@ where
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert_unique_unchecked(&mut self, k: K, v: V) -> (&K, &mut V) {
- let hash = make_insert_hash::<K, S>(&self.hash_builder, &k);
+ let hash = make_hash::<K, S>(&self.hash_builder, &k);
let bucket = self
.table
- .insert(hash, (k, v), make_hasher::<K, _, V, S>(&self.hash_builder));
+ .insert(hash, (k, v), make_hasher::<_, V, S>(&self.hash_builder));
let (k_ref, v_ref) = unsafe { bucket.as_mut() };
(k_ref, v_ref)
}
@@ -1801,19 +1884,17 @@ where
/// assert!(map.is_empty() && map.capacity() == 0);
///
/// map.insert(1, "a");
- /// let capacity_before_remove = map.capacity();
///
/// assert_eq!(map.remove(&1), Some("a"));
/// assert_eq!(map.remove(&1), None);
///
- /// // Now map holds none elements but capacity is equal to the old one
- /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove);
+ /// // Now map holds none elements
+ /// assert!(map.is_empty());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove<Q: ?Sized>(&mut self, k: &Q) -> Option<V>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.remove_entry(k) {
@@ -1842,26 +1923,24 @@ where
/// assert!(map.is_empty() && map.capacity() == 0);
///
/// map.insert(1, "a");
- /// let capacity_before_remove = map.capacity();
///
/// assert_eq!(map.remove_entry(&1), Some((1, "a")));
/// assert_eq!(map.remove(&1), None);
///
- /// // Now map hold none elements but capacity is equal to the old one
- /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove);
+ /// // Now map hold none elements
+ /// assert!(map.is_empty());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry<Q: ?Sized>(&mut self, k: &Q) -> Option<(K, V)>
where
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
- let hash = make_hash::<K, Q, S>(&self.hash_builder, k);
+ let hash = make_hash::<Q, S>(&self.hash_builder, k);
self.table.remove_entry(hash, equivalent_key(k))
}
}
-impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
+impl<K, V, S, A: Allocator> HashMap<K, V, S, A> {
/// Creates a raw entry builder for the HashMap.
///
/// Raw entries provide the lowest level of control for searching and
@@ -2013,19 +2092,31 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
RawEntryBuilder { map: self }
}
+ /// Returns a reference to the [`RawTable`] used underneath [`HashMap`].
+ /// This function is only available if the `raw` feature of the crate is enabled.
+ ///
+ /// See [`raw_table_mut`] for more.
+ ///
+ /// [`raw_table_mut`]: Self::raw_table_mut
+ #[cfg(feature = "raw")]
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn raw_table(&self) -> &RawTable<(K, V), A> {
+ &self.table
+ }
+
/// Returns a mutable reference to the [`RawTable`] used underneath [`HashMap`].
/// This function is only available if the `raw` feature of the crate is enabled.
///
/// # Note
///
- /// Calling the function safe, but using raw hash table API's may require
+ /// Calling this function is safe, but using the raw hash table API may require
/// unsafe functions or blocks.
///
/// `RawTable` API gives the lowest level of control under the map that can be useful
/// for extending the HashMap's API, but may lead to *[undefined behavior]*.
///
/// [`HashMap`]: struct.HashMap.html
- /// [`RawTable`]: raw/struct.RawTable.html
+ /// [`RawTable`]: crate::raw::RawTable
/// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
///
/// # Examples
@@ -2049,9 +2140,9 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// where
/// F: Fn(&(K, V)) -> bool,
/// {
- /// let raw_table = map.raw_table();
+ /// let raw_table = map.raw_table_mut();
/// match raw_table.find(hash, is_match) {
- /// Some(bucket) => Some(unsafe { raw_table.remove(bucket) }),
+ /// Some(bucket) => Some(unsafe { raw_table.remove(bucket).0 }),
/// None => None,
/// }
/// }
@@ -2070,7 +2161,7 @@ impl<K, V, S, A: Allocator + Clone> HashMap<K, V, S, A> {
/// ```
#[cfg(feature = "raw")]
#[cfg_attr(feature = "inline-more", inline)]
- pub fn raw_table(&mut self) -> &mut RawTable<(K, V), A> {
+ pub fn raw_table_mut(&mut self) -> &mut RawTable<(K, V), A> {
&mut self.table
}
}
@@ -2080,7 +2171,7 @@ where
K: Eq + Hash,
V: PartialEq,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
fn eq(&self, other: &Self) -> bool {
if self.len() != other.len() {
@@ -2097,7 +2188,7 @@ where
K: Eq + Hash,
V: Eq,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
}
@@ -2105,7 +2196,7 @@ impl<K, V, S, A> Debug for HashMap<K, V, S, A>
where
K: Debug,
V: Debug,
- A: Allocator + Clone,
+ A: Allocator,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
@@ -2115,7 +2206,7 @@ where
impl<K, V, S, A> Default for HashMap<K, V, S, A>
where
S: Default,
- A: Default + Allocator + Clone,
+ A: Default + Allocator,
{
/// Creates an empty `HashMap<K, V, S, A>`, with the `Default` value for the hasher and allocator.
///
@@ -2140,10 +2231,10 @@ where
impl<K, Q: ?Sized, V, S, A> Index<&Q> for HashMap<K, V, S, A>
where
- K: Eq + Hash + Borrow<Q>,
- Q: Eq + Hash,
+ K: Eq + Hash,
+ Q: Hash + Equivalent<K>,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
type Output = V;
@@ -2174,7 +2265,7 @@ where
impl<K, V, A, const N: usize> From<[(K, V); N]> for HashMap<K, V, DefaultHashBuilder, A>
where
K: Eq + Hash,
- A: Default + Allocator + Clone,
+ A: Default + Allocator,
{
/// # Examples
///
@@ -2319,11 +2410,11 @@ impl<K, V> IterMut<'_, K, V> {
/// assert_eq!(iter.next(), None);
/// assert_eq!(iter.next(), None);
/// ```
-pub struct IntoIter<K, V, A: Allocator + Clone = Global> {
+pub struct IntoIter<K, V, A: Allocator = Global> {
inner: RawIntoIter<(K, V), A>,
}
-impl<K, V, A: Allocator + Clone> IntoIter<K, V, A> {
+impl<K, V, A: Allocator> IntoIter<K, V, A> {
/// Returns a iterator of references over the remaining items.
#[cfg_attr(feature = "inline-more", inline)]
pub(super) fn iter(&self) -> Iter<'_, K, V> {
@@ -2363,11 +2454,11 @@ impl<K, V, A: Allocator + Clone> IntoIter<K, V, A> {
/// assert_eq!(keys.next(), None);
/// assert_eq!(keys.next(), None);
/// ```
-pub struct IntoKeys<K, V, A: Allocator + Clone = Global> {
+pub struct IntoKeys<K, V, A: Allocator = Global> {
inner: IntoIter<K, V, A>,
}
-impl<K, V, A: Allocator + Clone> Iterator for IntoKeys<K, V, A> {
+impl<K, V, A: Allocator> Iterator for IntoKeys<K, V, A> {
type Item = K;
#[inline]
@@ -2378,18 +2469,26 @@ impl<K, V, A: Allocator + Clone> Iterator for IntoKeys<K, V, A> {
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
+ #[inline]
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.inner.fold(init, |acc, (k, _)| f(acc, k))
+ }
}
-impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoKeys<K, V, A> {
+impl<K, V, A: Allocator> ExactSizeIterator for IntoKeys<K, V, A> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
-impl<K, V, A: Allocator + Clone> FusedIterator for IntoKeys<K, V, A> {}
+impl<K, V, A: Allocator> FusedIterator for IntoKeys<K, V, A> {}
-impl<K: Debug, V: Debug, A: Allocator + Clone> fmt::Debug for IntoKeys<K, V, A> {
+impl<K: Debug, V: Debug, A: Allocator> fmt::Debug for IntoKeys<K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list()
.entries(self.inner.iter().map(|(k, _)| k))
@@ -2425,11 +2524,11 @@ impl<K: Debug, V: Debug, A: Allocator + Clone> fmt::Debug for IntoKeys<K, V, A>
/// assert_eq!(values.next(), None);
/// assert_eq!(values.next(), None);
/// ```
-pub struct IntoValues<K, V, A: Allocator + Clone = Global> {
+pub struct IntoValues<K, V, A: Allocator = Global> {
inner: IntoIter<K, V, A>,
}
-impl<K, V, A: Allocator + Clone> Iterator for IntoValues<K, V, A> {
+impl<K, V, A: Allocator> Iterator for IntoValues<K, V, A> {
type Item = V;
#[inline]
@@ -2440,18 +2539,26 @@ impl<K, V, A: Allocator + Clone> Iterator for IntoValues<K, V, A> {
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
+ #[inline]
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.inner.fold(init, |acc, (_, v)| f(acc, v))
+ }
}
-impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoValues<K, V, A> {
+impl<K, V, A: Allocator> ExactSizeIterator for IntoValues<K, V, A> {
#[inline]
fn len(&self) -> usize {
self.inner.len()
}
}
-impl<K, V, A: Allocator + Clone> FusedIterator for IntoValues<K, V, A> {}
+impl<K, V, A: Allocator> FusedIterator for IntoValues<K, V, A> {}
-impl<K, V: Debug, A: Allocator + Clone> fmt::Debug for IntoValues<K, V, A> {
+impl<K, V: Debug, A: Allocator> fmt::Debug for IntoValues<K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list()
.entries(self.inner.iter().map(|(_, v)| v))
@@ -2583,11 +2690,11 @@ impl<K, V: Debug> fmt::Debug for Values<'_, K, V> {
/// assert_eq!(drain_iter.next(), None);
/// assert_eq!(drain_iter.next(), None);
/// ```
-pub struct Drain<'a, K, V, A: Allocator + Clone = Global> {
+pub struct Drain<'a, K, V, A: Allocator = Global> {
inner: RawDrain<'a, (K, V), A>,
}
-impl<K, V, A: Allocator + Clone> Drain<'_, K, V, A> {
+impl<K, V, A: Allocator> Drain<'_, K, V, A> {
/// Returns a iterator of references over the remaining items.
#[cfg_attr(feature = "inline-more", inline)]
pub(super) fn iter(&self) -> Iter<'_, K, V> {
@@ -2601,10 +2708,10 @@ impl<K, V, A: Allocator + Clone> Drain<'_, K, V, A> {
/// A draining iterator over entries of a `HashMap` which don't satisfy the predicate
/// `f(&k, &mut v)` in arbitrary order. The iterator element type is `(K, V)`.
///
-/// This `struct` is created by the [`drain_filter`] method on [`HashMap`]. See its
+/// This `struct` is created by the [`extract_if`] method on [`HashMap`]. See its
/// documentation for more.
///
-/// [`drain_filter`]: struct.HashMap.html#method.drain_filter
+/// [`extract_if`]: struct.HashMap.html#method.extract_if
/// [`HashMap`]: struct.HashMap.html
///
/// # Examples
@@ -2614,63 +2721,40 @@ impl<K, V, A: Allocator + Clone> Drain<'_, K, V, A> {
///
/// let mut map: HashMap<i32, &str> = [(1, "a"), (2, "b"), (3, "c")].into();
///
-/// let mut drain_filter = map.drain_filter(|k, _v| k % 2 != 0);
-/// let mut vec = vec![drain_filter.next(), drain_filter.next()];
+/// let mut extract_if = map.extract_if(|k, _v| k % 2 != 0);
+/// let mut vec = vec![extract_if.next(), extract_if.next()];
///
-/// // The `DrainFilter` iterator produces items in arbitrary order, so the
+/// // The `ExtractIf` iterator produces items in arbitrary order, so the
/// // items must be sorted to test them against a sorted array.
/// vec.sort_unstable();
/// assert_eq!(vec, [Some((1, "a")),Some((3, "c"))]);
///
/// // It is fused iterator
-/// assert_eq!(drain_filter.next(), None);
-/// assert_eq!(drain_filter.next(), None);
-/// drop(drain_filter);
+/// assert_eq!(extract_if.next(), None);
+/// assert_eq!(extract_if.next(), None);
+/// drop(extract_if);
///
/// assert_eq!(map.len(), 1);
/// ```
-pub struct DrainFilter<'a, K, V, F, A: Allocator + Clone = Global>
+#[must_use = "Iterators are lazy unless consumed"]
+pub struct ExtractIf<'a, K, V, F, A: Allocator = Global>
where
F: FnMut(&K, &mut V) -> bool,
{
f: F,
- inner: DrainFilterInner<'a, K, V, A>,
-}
-
-impl<'a, K, V, F, A> Drop for DrainFilter<'a, K, V, F, A>
-where
- F: FnMut(&K, &mut V) -> bool,
- A: Allocator + Clone,
-{
- #[cfg_attr(feature = "inline-more", inline)]
- fn drop(&mut self) {
- while let Some(item) = self.next() {
- let guard = ConsumeAllOnDrop(self);
- drop(item);
- mem::forget(guard);
- }
- }
-}
-
-pub(super) struct ConsumeAllOnDrop<'a, T: Iterator>(pub &'a mut T);
-
-impl<T: Iterator> Drop for ConsumeAllOnDrop<'_, T> {
- #[cfg_attr(feature = "inline-more", inline)]
- fn drop(&mut self) {
- self.0.for_each(drop);
- }
+ inner: RawExtractIf<'a, (K, V), A>,
}
-impl<K, V, F, A> Iterator for DrainFilter<'_, K, V, F, A>
+impl<K, V, F, A> Iterator for ExtractIf<'_, K, V, F, A>
where
F: FnMut(&K, &mut V) -> bool,
- A: Allocator + Clone,
+ A: Allocator,
{
type Item = (K, V);
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Self::Item> {
- self.inner.next(&mut self.f)
+ self.inner.next(|&mut (ref k, ref mut v)| (self.f)(k, v))
}
#[inline]
@@ -2679,31 +2763,7 @@ where
}
}
-impl<K, V, F> FusedIterator for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {}
-
-/// Portions of `DrainFilter` shared with `set::DrainFilter`
-pub(super) struct DrainFilterInner<'a, K, V, A: Allocator + Clone> {
- pub iter: RawIter<(K, V)>,
- pub table: &'a mut RawTable<(K, V), A>,
-}
-
-impl<K, V, A: Allocator + Clone> DrainFilterInner<'_, K, V, A> {
- #[cfg_attr(feature = "inline-more", inline)]
- pub(super) fn next<F>(&mut self, f: &mut F) -> Option<(K, V)>
- where
- F: FnMut(&K, &mut V) -> bool,
- {
- unsafe {
- for item in &mut self.iter {
- let &mut (ref key, ref mut value) = item.as_mut();
- if f(key, value) {
- return Some(self.table.remove(item));
- }
- }
- }
- None
- }
-}
+impl<K, V, F> FusedIterator for ExtractIf<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {}
/// A mutable iterator over the values of a `HashMap` in arbitrary order.
/// The iterator element type is `&'a mut V`.
@@ -2791,7 +2851,7 @@ pub struct ValuesMut<'a, K, V> {
///
/// assert_eq!(map.len(), 6);
/// ```
-pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator + Clone = Global> {
+pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator = Global> {
map: &'a mut HashMap<K, V, S, A>,
}
@@ -2879,7 +2939,7 @@ pub struct RawEntryBuilderMut<'a, K, V, S, A: Allocator + Clone = Global> {
/// vec.sort_unstable();
/// assert_eq!(vec, [('a', 10), ('b', 20), ('c', 30), ('d', 40), ('e', 50), ('f', 60)]);
/// ```
-pub enum RawEntryMut<'a, K, V, S, A: Allocator + Clone = Global> {
+pub enum RawEntryMut<'a, K, V, S, A: Allocator = Global> {
/// An occupied entry.
///
/// # Examples
@@ -2970,7 +3030,7 @@ pub enum RawEntryMut<'a, K, V, S, A: Allocator + Clone = Global> {
/// assert_eq!(map.get(&"b"), None);
/// assert_eq!(map.len(), 1);
/// ```
-pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator + Clone = Global> {
+pub struct RawOccupiedEntryMut<'a, K, V, S, A: Allocator = Global> {
elem: Bucket<(K, V)>,
table: &'a mut RawTable<(K, V), A>,
hash_builder: &'a S,
@@ -2981,7 +3041,7 @@ where
K: Send,
V: Send,
S: Send,
- A: Send + Allocator + Clone,
+ A: Send + Allocator,
{
}
unsafe impl<K, V, S, A> Sync for RawOccupiedEntryMut<'_, K, V, S, A>
@@ -2989,7 +3049,7 @@ where
K: Sync,
V: Sync,
S: Sync,
- A: Sync + Allocator + Clone,
+ A: Sync + Allocator,
{
}
@@ -3041,7 +3101,7 @@ where
/// }
/// assert!(map[&"c"] == 30 && map.len() == 3);
/// ```
-pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator + Clone = Global> {
+pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator = Global> {
table: &'a mut RawTable<(K, V), A>,
hash_builder: &'a S,
}
@@ -3080,11 +3140,11 @@ pub struct RawVacantEntryMut<'a, K, V, S, A: Allocator + Clone = Global> {
/// assert_eq!(map.raw_entry().from_key_hashed_nocheck(hash, &k), kv);
/// }
/// ```
-pub struct RawEntryBuilder<'a, K, V, S, A: Allocator + Clone = Global> {
+pub struct RawEntryBuilder<'a, K, V, S, A: Allocator = Global> {
map: &'a HashMap<K, V, S, A>,
}
-impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> {
+impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> {
/// Creates a `RawEntryMut` from the given key.
///
/// # Examples
@@ -3103,10 +3163,9 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> {
pub fn from_key<Q: ?Sized>(self, k: &Q) -> RawEntryMut<'a, K, V, S, A>
where
S: BuildHasher,
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
- let hash = make_hash::<K, Q, S>(&self.map.hash_builder, k);
+ let hash = make_hash::<Q, S>(&self.map.hash_builder, k);
self.from_key_hashed_nocheck(hash, k)
}
@@ -3136,14 +3195,13 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> {
#[allow(clippy::wrong_self_convention)]
pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, k: &Q) -> RawEntryMut<'a, K, V, S, A>
where
- K: Borrow<Q>,
- Q: Eq,
+ Q: Equivalent<K>,
{
self.from_hash(hash, equivalent(k))
}
}
-impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> {
+impl<'a, K, V, S, A: Allocator> RawEntryBuilderMut<'a, K, V, S, A> {
/// Creates a `RawEntryMut` from the given hash and matching function.
///
/// # Examples
@@ -3194,7 +3252,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilderMut<'a, K, V, S, A> {
}
}
-impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> {
+impl<'a, K, V, S, A: Allocator> RawEntryBuilder<'a, K, V, S, A> {
/// Access an immutable entry by key.
///
/// # Examples
@@ -3211,10 +3269,9 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> {
pub fn from_key<Q: ?Sized>(self, k: &Q) -> Option<(&'a K, &'a V)>
where
S: BuildHasher,
- K: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<K>,
{
- let hash = make_hash::<K, Q, S>(&self.map.hash_builder, k);
+ let hash = make_hash::<Q, S>(&self.map.hash_builder, k);
self.from_key_hashed_nocheck(hash, k)
}
@@ -3242,8 +3299,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> {
#[allow(clippy::wrong_self_convention)]
pub fn from_key_hashed_nocheck<Q: ?Sized>(self, hash: u64, k: &Q) -> Option<(&'a K, &'a V)>
where
- K: Borrow<Q>,
- Q: Eq,
+ Q: Equivalent<K>,
{
self.from_hash(hash, equivalent(k))
}
@@ -3254,7 +3310,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> {
F: FnMut(&K) -> bool,
{
match self.map.table.get(hash, |(k, _)| is_match(k)) {
- Some(&(ref key, ref value)) => Some((key, value)),
+ Some((key, value)) => Some((key, value)),
None => None,
}
}
@@ -3289,7 +3345,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryBuilder<'a, K, V, S, A> {
}
}
-impl<'a, K, V, S, A: Allocator + Clone> RawEntryMut<'a, K, V, S, A> {
+impl<'a, K, V, S, A: Allocator> RawEntryMut<'a, K, V, S, A> {
/// Sets the value of the entry, and returns a RawOccupiedEntryMut.
///
/// # Examples
@@ -3483,7 +3539,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawEntryMut<'a, K, V, S, A> {
}
}
-impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> {
+impl<'a, K, V, S, A: Allocator> RawOccupiedEntryMut<'a, K, V, S, A> {
/// Gets a reference to the key in the entry.
///
/// # Examples
@@ -3650,7 +3706,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn get_key_value(&self) -> (&K, &V) {
unsafe {
- let &(ref key, ref value) = self.elem.as_ref();
+ let (key, value) = self.elem.as_ref();
(key, value)
}
}
@@ -3822,7 +3878,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> {
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
- unsafe { self.table.remove(self.elem) }
+ unsafe { self.table.remove(self.elem).0 }
}
/// Provides shared access to the key and owned access to the value of
@@ -3882,7 +3938,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawOccupiedEntryMut<'a, K, V, S, A> {
}
}
-impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> {
+impl<'a, K, V, S, A: Allocator> RawVacantEntryMut<'a, K, V, S, A> {
/// Sets the value of the entry with the VacantEntry's key,
/// and returns a mutable reference to it.
///
@@ -3906,7 +3962,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> {
K: Hash,
S: BuildHasher,
{
- let hash = make_insert_hash::<K, S>(self.hash_builder, &key);
+ let hash = make_hash::<K, S>(self.hash_builder, &key);
self.insert_hashed_nocheck(hash, key, value)
}
@@ -3950,7 +4006,7 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> {
let &mut (ref mut k, ref mut v) = self.table.insert_entry(
hash,
(key, value),
- make_hasher::<K, _, V, S>(self.hash_builder),
+ make_hasher::<_, V, S>(self.hash_builder),
);
(k, v)
}
@@ -4014,11 +4070,11 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> {
K: Hash,
S: BuildHasher,
{
- let hash = make_insert_hash::<K, S>(self.hash_builder, &key);
+ let hash = make_hash::<K, S>(self.hash_builder, &key);
let elem = self.table.insert(
hash,
(key, value),
- make_hasher::<K, _, V, S>(self.hash_builder),
+ make_hasher::<_, V, S>(self.hash_builder),
);
RawOccupiedEntryMut {
elem,
@@ -4028,13 +4084,13 @@ impl<'a, K, V, S, A: Allocator + Clone> RawVacantEntryMut<'a, K, V, S, A> {
}
}
-impl<K, V, S, A: Allocator + Clone> Debug for RawEntryBuilderMut<'_, K, V, S, A> {
+impl<K, V, S, A: Allocator> Debug for RawEntryBuilderMut<'_, K, V, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RawEntryBuilder").finish()
}
}
-impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for RawEntryMut<'_, K, V, S, A> {
+impl<K: Debug, V: Debug, S, A: Allocator> Debug for RawEntryMut<'_, K, V, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
RawEntryMut::Vacant(ref v) => f.debug_tuple("RawEntry").field(v).finish(),
@@ -4043,7 +4099,7 @@ impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for RawEntryMut<'_, K, V
}
}
-impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for RawOccupiedEntryMut<'_, K, V, S, A> {
+impl<K: Debug, V: Debug, S, A: Allocator> Debug for RawOccupiedEntryMut<'_, K, V, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RawOccupiedEntryMut")
.field("key", self.key())
@@ -4052,13 +4108,13 @@ impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for RawOccupiedEntryMut<
}
}
-impl<K, V, S, A: Allocator + Clone> Debug for RawVacantEntryMut<'_, K, V, S, A> {
+impl<K, V, S, A: Allocator> Debug for RawVacantEntryMut<'_, K, V, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RawVacantEntryMut").finish()
}
}
-impl<K, V, S, A: Allocator + Clone> Debug for RawEntryBuilder<'_, K, V, S, A> {
+impl<K, V, S, A: Allocator> Debug for RawEntryBuilder<'_, K, V, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("RawEntryBuilder").finish()
}
@@ -4109,7 +4165,7 @@ impl<K, V, S, A: Allocator + Clone> Debug for RawEntryBuilder<'_, K, V, S, A> {
/// ```
pub enum Entry<'a, K, V, S, A = Global>
where
- A: Allocator + Clone,
+ A: Allocator,
{
/// An occupied entry.
///
@@ -4142,7 +4198,7 @@ where
Vacant(VacantEntry<'a, K, V, S, A>),
}
-impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for Entry<'_, K, V, S, A> {
+impl<K: Debug, V: Debug, S, A: Allocator> Debug for Entry<'_, K, V, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
@@ -4191,7 +4247,7 @@ impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for Entry<'_, K, V, S, A
/// assert_eq!(map.get(&"c"), None);
/// assert_eq!(map.len(), 2);
/// ```
-pub struct OccupiedEntry<'a, K, V, S, A: Allocator + Clone = Global> {
+pub struct OccupiedEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator = Global> {
hash: u64,
key: Option<K>,
elem: Bucket<(K, V)>,
@@ -4203,7 +4259,7 @@ where
K: Send,
V: Send,
S: Send,
- A: Send + Allocator + Clone,
+ A: Send + Allocator,
{
}
unsafe impl<K, V, S, A> Sync for OccupiedEntry<'_, K, V, S, A>
@@ -4211,11 +4267,11 @@ where
K: Sync,
V: Sync,
S: Sync,
- A: Sync + Allocator + Clone,
+ A: Sync + Allocator,
{
}
-impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for OccupiedEntry<'_, K, V, S, A> {
+impl<K: Debug, V: Debug, S, A: Allocator> Debug for OccupiedEntry<'_, K, V, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccupiedEntry")
.field("key", self.key())
@@ -4254,13 +4310,13 @@ impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for OccupiedEntry<'_, K,
/// }
/// assert!(map[&"b"] == 20 && map.len() == 2);
/// ```
-pub struct VacantEntry<'a, K, V, S, A: Allocator + Clone = Global> {
+pub struct VacantEntry<'a, K, V, S = DefaultHashBuilder, A: Allocator = Global> {
hash: u64,
key: K,
table: &'a mut HashMap<K, V, S, A>,
}
-impl<K: Debug, V, S, A: Allocator + Clone> Debug for VacantEntry<'_, K, V, S, A> {
+impl<K: Debug, V, S, A: Allocator> Debug for VacantEntry<'_, K, V, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("VacantEntry").field(self.key()).finish()
}
@@ -4320,7 +4376,7 @@ impl<K: Debug, V, S, A: Allocator + Clone> Debug for VacantEntry<'_, K, V, S, A>
/// ```
pub enum EntryRef<'a, 'b, K, Q: ?Sized, V, S, A = Global>
where
- A: Allocator + Clone,
+ A: Allocator,
{
/// An occupied entry.
///
@@ -4353,7 +4409,7 @@ where
Vacant(VacantEntryRef<'a, 'b, K, Q, V, S, A>),
}
-impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug
+impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator> Debug
for EntryRef<'_, '_, K, Q, V, S, A>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -4431,7 +4487,7 @@ impl<'a, K: Borrow<Q>, Q: ?Sized> AsRef<Q> for KeyOrRef<'a, K, Q> {
/// assert_eq!(map.get("c"), None);
/// assert_eq!(map.len(), 2);
/// ```
-pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> {
+pub struct OccupiedEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator = Global> {
hash: u64,
key: Option<KeyOrRef<'b, K, Q>>,
elem: Bucket<(K, V)>,
@@ -4444,7 +4500,7 @@ where
Q: Sync + ?Sized,
V: Send,
S: Send,
- A: Send + Allocator + Clone,
+ A: Send + Allocator,
{
}
unsafe impl<'a, 'b, K, Q, V, S, A> Sync for OccupiedEntryRef<'a, 'b, K, Q, V, S, A>
@@ -4453,16 +4509,16 @@ where
Q: Sync + ?Sized,
V: Sync,
S: Sync,
- A: Sync + Allocator + Clone,
+ A: Sync + Allocator,
{
}
-impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug
+impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator> Debug
for OccupiedEntryRef<'_, '_, K, Q, V, S, A>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccupiedEntryRef")
- .field("key", &self.key())
+ .field("key", &self.key().borrow())
.field("value", &self.get())
.finish()
}
@@ -4498,13 +4554,13 @@ impl<K: Borrow<Q>, Q: ?Sized + Debug, V: Debug, S, A: Allocator + Clone> Debug
/// }
/// assert!(map["b"] == 20 && map.len() == 2);
/// ```
-pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone = Global> {
+pub struct VacantEntryRef<'a, 'b, K, Q: ?Sized, V, S, A: Allocator = Global> {
hash: u64,
key: KeyOrRef<'b, K, Q>,
table: &'a mut HashMap<K, V, S, A>,
}
-impl<K: Borrow<Q>, Q: ?Sized + Debug, V, S, A: Allocator + Clone> Debug
+impl<K: Borrow<Q>, Q: ?Sized + Debug, V, S, A: Allocator> Debug
for VacantEntryRef<'_, '_, K, Q, V, S, A>
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@@ -4536,14 +4592,14 @@ impl<K: Borrow<Q>, Q: ?Sized + Debug, V, S, A: Allocator + Clone> Debug
/// }
/// assert_eq!(map[&"a"], 100);
/// ```
-pub struct OccupiedError<'a, K, V, S, A: Allocator + Clone = Global> {
+pub struct OccupiedError<'a, K, V, S, A: Allocator = Global> {
/// The entry in the map that was already occupied.
pub entry: OccupiedEntry<'a, K, V, S, A>,
/// The value which was not inserted, because the entry was already occupied.
pub value: V,
}
-impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for OccupiedError<'_, K, V, S, A> {
+impl<K: Debug, V: Debug, S, A: Allocator> Debug for OccupiedError<'_, K, V, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccupiedError")
.field("key", self.entry.key())
@@ -4553,9 +4609,7 @@ impl<K: Debug, V: Debug, S, A: Allocator + Clone> Debug for OccupiedError<'_, K,
}
}
-impl<'a, K: Debug, V: Debug, S, A: Allocator + Clone> fmt::Display
- for OccupiedError<'a, K, V, S, A>
-{
+impl<'a, K: Debug, V: Debug, S, A: Allocator> fmt::Display for OccupiedError<'a, K, V, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
@@ -4567,7 +4621,7 @@ impl<'a, K: Debug, V: Debug, S, A: Allocator + Clone> fmt::Display
}
}
-impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a HashMap<K, V, S, A> {
+impl<'a, K, V, S, A: Allocator> IntoIterator for &'a HashMap<K, V, S, A> {
type Item = (&'a K, &'a V);
type IntoIter = Iter<'a, K, V>;
@@ -4599,7 +4653,7 @@ impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a HashMap<K, V, S, A>
}
}
-impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a mut HashMap<K, V, S, A> {
+impl<'a, K, V, S, A: Allocator> IntoIterator for &'a mut HashMap<K, V, S, A> {
type Item = (&'a K, &'a mut V);
type IntoIter = IterMut<'a, K, V>;
@@ -4636,7 +4690,7 @@ impl<'a, K, V, S, A: Allocator + Clone> IntoIterator for &'a mut HashMap<K, V, S
}
}
-impl<K, V, S, A: Allocator + Clone> IntoIterator for HashMap<K, V, S, A> {
+impl<K, V, S, A: Allocator> IntoIterator for HashMap<K, V, S, A> {
type Item = (K, V);
type IntoIter = IntoIter<K, V, A>;
@@ -4684,6 +4738,17 @@ impl<'a, K, V> Iterator for Iter<'a, K, V> {
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.inner.fold(init, |acc, x| unsafe {
+ let (k, v) = x.as_ref();
+ f(acc, (k, v))
+ })
+ }
}
impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
#[cfg_attr(feature = "inline-more", inline)]
@@ -4712,6 +4777,17 @@ impl<'a, K, V> Iterator for IterMut<'a, K, V> {
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.inner.fold(init, |acc, x| unsafe {
+ let (k, v) = x.as_mut();
+ f(acc, (k, v))
+ })
+ }
}
impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
#[cfg_attr(feature = "inline-more", inline)]
@@ -4731,7 +4807,7 @@ where
}
}
-impl<K, V, A: Allocator + Clone> Iterator for IntoIter<K, V, A> {
+impl<K, V, A: Allocator> Iterator for IntoIter<K, V, A> {
type Item = (K, V);
#[cfg_attr(feature = "inline-more", inline)]
@@ -4742,16 +4818,24 @@ impl<K, V, A: Allocator + Clone> Iterator for IntoIter<K, V, A> {
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.inner.fold(init, f)
+ }
}
-impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoIter<K, V, A> {
+impl<K, V, A: Allocator> ExactSizeIterator for IntoIter<K, V, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn len(&self) -> usize {
self.inner.len()
}
}
-impl<K, V, A: Allocator + Clone> FusedIterator for IntoIter<K, V, A> {}
+impl<K, V, A: Allocator> FusedIterator for IntoIter<K, V, A> {}
-impl<K: Debug, V: Debug, A: Allocator + Clone> fmt::Debug for IntoIter<K, V, A> {
+impl<K: Debug, V: Debug, A: Allocator> fmt::Debug for IntoIter<K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
@@ -4772,6 +4856,14 @@ impl<'a, K, V> Iterator for Keys<'a, K, V> {
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.inner.fold(init, |acc, (k, _)| f(acc, k))
+ }
}
impl<K, V> ExactSizeIterator for Keys<'_, K, V> {
#[cfg_attr(feature = "inline-more", inline)]
@@ -4796,6 +4888,14 @@ impl<'a, K, V> Iterator for Values<'a, K, V> {
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.inner.fold(init, |acc, (_, v)| f(acc, v))
+ }
}
impl<K, V> ExactSizeIterator for Values<'_, K, V> {
#[cfg_attr(feature = "inline-more", inline)]
@@ -4820,6 +4920,14 @@ impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.inner.fold(init, |acc, (_, v)| f(acc, v))
+ }
}
impl<K, V> ExactSizeIterator for ValuesMut<'_, K, V> {
#[cfg_attr(feature = "inline-more", inline)]
@@ -4837,7 +4945,7 @@ impl<K, V: Debug> fmt::Debug for ValuesMut<'_, K, V> {
}
}
-impl<'a, K, V, A: Allocator + Clone> Iterator for Drain<'a, K, V, A> {
+impl<'a, K, V, A: Allocator> Iterator for Drain<'a, K, V, A> {
type Item = (K, V);
#[cfg_attr(feature = "inline-more", inline)]
@@ -4848,27 +4956,35 @@ impl<'a, K, V, A: Allocator + Clone> Iterator for Drain<'a, K, V, A> {
fn size_hint(&self) -> (usize, Option<usize>) {
self.inner.size_hint()
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.inner.fold(init, f)
+ }
}
-impl<K, V, A: Allocator + Clone> ExactSizeIterator for Drain<'_, K, V, A> {
+impl<K, V, A: Allocator> ExactSizeIterator for Drain<'_, K, V, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn len(&self) -> usize {
self.inner.len()
}
}
-impl<K, V, A: Allocator + Clone> FusedIterator for Drain<'_, K, V, A> {}
+impl<K, V, A: Allocator> FusedIterator for Drain<'_, K, V, A> {}
impl<K, V, A> fmt::Debug for Drain<'_, K, V, A>
where
K: fmt::Debug,
V: fmt::Debug,
- A: Allocator + Clone,
+ A: Allocator,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.iter()).finish()
}
}
-impl<'a, K, V, S, A: Allocator + Clone> Entry<'a, K, V, S, A> {
+impl<'a, K, V, S, A: Allocator> Entry<'a, K, V, S, A> {
/// Sets the value of the entry, and returns an OccupiedEntry.
///
/// # Examples
@@ -5115,7 +5231,7 @@ impl<'a, K, V, S, A: Allocator + Clone> Entry<'a, K, V, S, A> {
}
}
-impl<'a, K, V: Default, S, A: Allocator + Clone> Entry<'a, K, V, S, A> {
+impl<'a, K, V: Default, S, A: Allocator> Entry<'a, K, V, S, A> {
/// Ensures a value is in the entry by inserting the default value if empty,
/// and returns a mutable reference to the value in the entry.
///
@@ -5148,7 +5264,7 @@ impl<'a, K, V: Default, S, A: Allocator + Clone> Entry<'a, K, V, S, A> {
}
}
-impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> {
+impl<'a, K, V, S, A: Allocator> OccupiedEntry<'a, K, V, S, A> {
/// Gets a reference to the key in the entry.
///
/// # Examples
@@ -5183,7 +5299,6 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> {
/// assert!(map.is_empty() && map.capacity() == 0);
///
/// map.entry("poneyland").or_insert(12);
- /// let capacity_before_remove = map.capacity();
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// // We delete the entry from the map.
@@ -5191,12 +5306,12 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> {
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
- /// // Now map hold none elements but capacity is equal to the old one
- /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove);
+ /// // Now map hold none elements
+ /// assert!(map.is_empty());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
- unsafe { self.table.table.remove(self.elem) }
+ unsafe { self.table.table.remove(self.elem).0 }
}
/// Gets a reference to the value in the entry.
@@ -5319,15 +5434,14 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> {
/// assert!(map.is_empty() && map.capacity() == 0);
///
/// map.entry("poneyland").or_insert(12);
- /// let capacity_before_remove = map.capacity();
///
/// if let Entry::Occupied(o) = map.entry("poneyland") {
/// assert_eq!(o.remove(), 12);
/// }
///
/// assert_eq!(map.contains_key("poneyland"), false);
- /// // Now map hold none elements but capacity is equal to the old one
- /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove);
+ /// // Now map hold none elements
+ /// assert!(map.is_empty());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove(self) -> V {
@@ -5505,7 +5619,7 @@ impl<'a, K, V, S, A: Allocator + Clone> OccupiedEntry<'a, K, V, S, A> {
}
}
-impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> {
+impl<'a, K, V, S, A: Allocator> VacantEntry<'a, K, V, S, A> {
/// Gets a reference to the key that would be used when inserting a value
/// through the `VacantEntry`.
///
@@ -5567,7 +5681,7 @@ impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> {
let entry = table.insert_entry(
self.hash,
(self.key, value),
- make_hasher::<K, _, V, S>(&self.table.hash_builder),
+ make_hasher::<_, V, S>(&self.table.hash_builder),
);
&mut entry.1
}
@@ -5581,7 +5695,7 @@ impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> {
let elem = self.table.table.insert(
self.hash,
(self.key, value),
- make_hasher::<K, _, V, S>(&self.table.hash_builder),
+ make_hasher::<_, V, S>(&self.table.hash_builder),
);
OccupiedEntry {
hash: self.hash,
@@ -5592,7 +5706,7 @@ impl<'a, K, V, S, A: Allocator + Clone> VacantEntry<'a, K, V, S, A> {
}
}
-impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> {
+impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> EntryRef<'a, 'b, K, Q, V, S, A> {
/// Sets the value of the entry, and returns an OccupiedEntryRef.
///
/// # Examples
@@ -5682,10 +5796,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V,
/// Ensures a value is in the entry by inserting, if empty, the result of the default function.
/// This method allows for generating key-derived values for insertion by providing the default
- /// function a reference to the key that was moved during the `.entry_ref(key)` method call.
- ///
- /// The reference to the moved key is provided so that cloning or copying the key is
- /// unnecessary, unlike with `.or_insert_with(|| ... )`.
+ /// function an access to the borrower form of the key.
///
/// # Examples
///
@@ -5737,7 +5848,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V,
K: Borrow<Q>,
{
match *self {
- EntryRef::Occupied(ref entry) => entry.key(),
+ EntryRef::Occupied(ref entry) => entry.key().borrow(),
EntryRef::Vacant(ref entry) => entry.key(),
}
}
@@ -5833,8 +5944,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V,
#[cfg_attr(feature = "inline-more", inline)]
pub fn and_replace_entry_with<F>(self, f: F) -> Self
where
- F: FnOnce(&Q, V) -> Option<V>,
- K: Borrow<Q>,
+ F: FnOnce(&K, V) -> Option<V>,
{
match self {
EntryRef::Occupied(entry) => entry.replace_entry_with(f),
@@ -5843,7 +5953,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V,
}
}
-impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator + Clone> EntryRef<'a, 'b, K, Q, V, S, A> {
+impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator> EntryRef<'a, 'b, K, Q, V, S, A> {
/// Ensures a value is in the entry by inserting the default value if empty,
/// and returns a mutable reference to the value in the entry.
///
@@ -5876,7 +5986,7 @@ impl<'a, 'b, K, Q: ?Sized, V: Default, S, A: Allocator + Clone> EntryRef<'a, 'b,
}
}
-impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> {
+impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> OccupiedEntryRef<'a, 'b, K, Q, V, S, A> {
/// Gets a reference to the key in the entry.
///
/// # Examples
@@ -5893,11 +6003,8 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
/// }
/// ```
#[cfg_attr(feature = "inline-more", inline)]
- pub fn key(&self) -> &Q
- where
- K: Borrow<Q>,
- {
- unsafe { &self.elem.as_ref().0 }.borrow()
+ pub fn key(&self) -> &K {
+ unsafe { &self.elem.as_ref().0 }
}
/// Take the ownership of the key and value from the map.
@@ -5914,7 +6021,6 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
/// assert!(map.is_empty() && map.capacity() == 0);
///
/// map.entry_ref("poneyland").or_insert(12);
- /// let capacity_before_remove = map.capacity();
///
/// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") {
/// // We delete the entry from the map.
@@ -5923,11 +6029,11 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// // Now map hold none elements but capacity is equal to the old one
- /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove);
+ /// assert!(map.is_empty());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
- unsafe { self.table.table.remove(self.elem) }
+ unsafe { self.table.table.remove(self.elem).0 }
}
/// Gets a reference to the value in the entry.
@@ -6048,7 +6154,6 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
/// assert!(map.is_empty() && map.capacity() == 0);
///
/// map.entry_ref("poneyland").or_insert(12);
- /// let capacity_before_remove = map.capacity();
///
/// if let EntryRef::Occupied(o) = map.entry_ref("poneyland") {
/// assert_eq!(o.remove(), 12);
@@ -6056,7 +6161,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
///
/// assert_eq!(map.contains_key("poneyland"), false);
/// // Now map hold none elements but capacity is equal to the old one
- /// assert!(map.len() == 0 && map.capacity() == capacity_before_remove);
+ /// assert!(map.is_empty());
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove(self) -> V {
@@ -6068,7 +6173,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
///
/// # Panics
///
- /// Will panic if this OccupiedEntry was created through [`EntryRef::insert`].
+ /// Will panic if this OccupiedEntryRef was created through [`EntryRef::insert`].
///
/// # Examples
///
@@ -6110,7 +6215,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
///
/// # Panics
///
- /// Will panic if this OccupiedEntry was created through [`Entry::insert`].
+ /// Will panic if this OccupiedEntryRef was created through [`EntryRef::insert`].
///
/// # Examples
///
@@ -6138,7 +6243,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
/// fn reclaim_memory(map: &mut HashMap<Rc<str>, usize>, keys: &[Rc<str>]) {
/// for key in keys {
/// if let EntryRef::Occupied(entry) = map.entry_ref(key.as_ref()) {
- /// /// Replaces the entry's key with our version of it in `keys`.
+ /// // Replaces the entry's key with our version of it in `keys`.
/// entry.replace_key();
/// }
/// }
@@ -6204,8 +6309,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
#[cfg_attr(feature = "inline-more", inline)]
pub fn replace_entry_with<F>(self, f: F) -> EntryRef<'a, 'b, K, Q, V, S, A>
where
- F: FnOnce(&Q, V) -> Option<V>,
- K: Borrow<Q>,
+ F: FnOnce(&K, V) -> Option<V>,
{
unsafe {
let mut spare_key = None;
@@ -6213,7 +6317,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
self.table
.table
.replace_bucket_with(self.elem.clone(), |(key, value)| {
- if let Some(new_value) = f(key.borrow(), value) {
+ if let Some(new_value) = f(&key, value) {
Some((key, new_value))
} else {
spare_key = Some(KeyOrRef::Owned(key));
@@ -6234,7 +6338,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> OccupiedEntryRef<'a, 'b,
}
}
-impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K, Q, V, S, A> {
+impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator> VacantEntryRef<'a, 'b, K, Q, V, S, A> {
/// Gets a reference to the key that would be used when inserting a value
/// through the `VacantEntryRef`.
///
@@ -6305,7 +6409,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K,
let entry = table.insert_entry(
self.hash,
(self.key.into_owned(), value),
- make_hasher::<K, _, V, S>(&self.table.hash_builder),
+ make_hasher::<_, V, S>(&self.table.hash_builder),
);
&mut entry.1
}
@@ -6319,7 +6423,7 @@ impl<'a, 'b, K, Q: ?Sized, V, S, A: Allocator + Clone> VacantEntryRef<'a, 'b, K,
let elem = self.table.table.insert(
self.hash,
(self.key.into_owned(), value),
- make_hasher::<K, _, V, S>(&self.table.hash_builder),
+ make_hasher::<_, V, S>(&self.table.hash_builder),
);
OccupiedEntryRef {
hash: self.hash,
@@ -6334,7 +6438,7 @@ impl<K, V, S, A> FromIterator<(K, V)> for HashMap<K, V, S, A>
where
K: Eq + Hash,
S: BuildHasher + Default,
- A: Default + Allocator + Clone,
+ A: Default + Allocator,
{
#[cfg_attr(feature = "inline-more", inline)]
fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> Self {
@@ -6354,7 +6458,7 @@ impl<K, V, S, A> Extend<(K, V)> for HashMap<K, V, S, A>
where
K: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
/// Inserts all new key-values from the iterator to existing `HashMap<K, V, S, A>`.
/// Replace values with existing keys with new values returned from the iterator.
@@ -6438,7 +6542,7 @@ where
K: Eq + Hash + Copy,
V: Copy,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
/// Inserts all new key-values from the iterator to existing `HashMap<K, V, S, A>`.
/// Replace values with existing keys with new values returned from the iterator.
@@ -6455,17 +6559,17 @@ where
/// map.insert(1, 100);
///
/// let arr = [(1, 1), (2, 2)];
- /// let some_iter = arr.iter().map(|&(k, v)| (k, v));
+ /// let some_iter = arr.iter().map(|(k, v)| (k, v));
/// map.extend(some_iter);
/// // Replace values with existing keys with new values returned from the iterator.
/// // So that the map.get(&1) doesn't return Some(&100).
/// assert_eq!(map.get(&1), Some(&1));
///
/// let some_vec: Vec<_> = vec![(3, 3), (4, 4)];
- /// map.extend(some_vec.iter().map(|&(k, v)| (k, v)));
+ /// map.extend(some_vec.iter().map(|(k, v)| (k, v)));
///
/// let some_arr = [(5, 5), (6, 6)];
- /// map.extend(some_arr.iter().map(|&(k, v)| (k, v)));
+ /// map.extend(some_arr.iter().map(|(k, v)| (k, v)));
///
/// // You can also extend from another HashMap
/// let mut new_map = HashMap::new();
@@ -6503,7 +6607,7 @@ where
K: Eq + Hash + Copy,
V: Copy,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
/// Inserts all new key-values from the iterator to existing `HashMap<K, V, S, A>`.
/// Replace values with existing keys with new values returned from the iterator.
@@ -6570,12 +6674,12 @@ fn assert_covariance() {
fn iter_val<'a, 'new>(v: Iter<'a, u8, &'static str>) -> Iter<'a, u8, &'new str> {
v
}
- fn into_iter_key<'new, A: Allocator + Clone>(
+ fn into_iter_key<'new, A: Allocator>(
v: IntoIter<&'static str, u8, A>,
) -> IntoIter<&'new str, u8, A> {
v
}
- fn into_iter_val<'new, A: Allocator + Clone>(
+ fn into_iter_val<'new, A: Allocator>(
v: IntoIter<u8, &'static str, A>,
) -> IntoIter<u8, &'new str, A> {
v
@@ -6605,6 +6709,12 @@ mod test_map {
use super::Entry::{Occupied, Vacant};
use super::EntryRef;
use super::{HashMap, RawEntryMut};
+ use alloc::string::{String, ToString};
+ use alloc::sync::Arc;
+ use allocator_api2::alloc::{AllocError, Allocator, Global};
+ use core::alloc::Layout;
+ use core::ptr::NonNull;
+ use core::sync::atomic::{AtomicI8, Ordering};
use rand::{rngs::SmallRng, Rng, SeedableRng};
use std::borrow::ToOwned;
use std::cell::RefCell;
@@ -6695,7 +6805,7 @@ mod test_map {
assert_eq!(m2.len(), 2);
}
- thread_local! { static DROP_VECTOR: RefCell<Vec<i32>> = RefCell::new(Vec::new()) }
+ thread_local! { static DROP_VECTOR: RefCell<Vec<i32>> = const { RefCell::new(Vec::new()) } }
#[derive(Hash, PartialEq, Eq)]
struct Droppable {
@@ -6827,7 +6937,6 @@ mod test_map {
}
});
- #[allow(clippy::let_underscore_drop)] // kind-of a false positive
for _ in half.by_ref() {}
DROP_VECTOR.with(|v| {
@@ -7155,10 +7264,10 @@ mod test_map {
map.insert(1, 2);
map.insert(3, 4);
- let map_str = format!("{:?}", map);
+ let map_str = format!("{map:?}");
assert!(map_str == "{1: 2, 3: 4}" || map_str == "{3: 4, 1: 2}");
- assert_eq!(format!("{:?}", empty), "{}");
+ assert_eq!(format!("{empty:?}"), "{}");
}
#[test]
@@ -7474,7 +7583,7 @@ mod test_map {
// Test for #19292
fn check(m: &HashMap<i32, ()>) {
for k in m.keys() {
- assert!(m.contains_key(k), "{} is in keys() but not in the map?", k);
+ assert!(m.contains_key(k), "{k} is in keys() but not in the map?");
}
}
@@ -7510,7 +7619,7 @@ mod test_map {
// Test for #19292
fn check(m: &HashMap<std::string::String, ()>) {
for k in m.keys() {
- assert!(m.contains_key(k), "{} is in keys() but not in the map?", k);
+ assert!(m.contains_key(k), "{k} is in keys() but not in the map?");
}
}
@@ -7559,6 +7668,7 @@ mod test_map {
}
#[test]
+ #[allow(clippy::needless_borrow)]
fn test_extend_ref_kv_tuple() {
use std::ops::AddAssign;
let mut a = HashMap::new();
@@ -7580,7 +7690,7 @@ mod test_map {
let vec: Vec<_> = (100..200).map(|i| (i, i)).collect();
a.extend(iter);
a.extend(&vec);
- a.extend(&create_arr::<i32, 100>(200, 1));
+ a.extend(create_arr::<i32, 100>(200, 1));
assert_eq!(a.len(), 300);
@@ -7981,7 +8091,7 @@ mod test_map {
// Test for #19292
fn check(m: &HashMap<i32, ()>) {
for k in m.keys() {
- assert!(m.contains_key(k), "{} is in keys() but not in the map?", k);
+ assert!(m.contains_key(k), "{k} is in keys() but not in the map?");
}
}
@@ -8011,7 +8121,7 @@ mod test_map {
// Test for #19292
fn check(m: &HashMap<std::string::String, ()>) {
for k in m.keys() {
- assert!(m.contains_key(k), "{} is in keys() but not in the map?", k);
+ assert!(m.contains_key(k), "{k} is in keys() but not in the map?");
}
}
@@ -8049,10 +8159,10 @@ mod test_map {
}
#[test]
- fn test_drain_filter() {
+ fn test_extract_if() {
{
let mut map: HashMap<i32, i32> = (0..8).map(|x| (x, x * 10)).collect();
- let drained = map.drain_filter(|&k, _| k % 2 == 0);
+ let drained = map.extract_if(|&k, _| k % 2 == 0);
let mut out = drained.collect::<Vec<_>>();
out.sort_unstable();
assert_eq!(vec![(0, 0), (2, 20), (4, 40), (6, 60)], out);
@@ -8060,7 +8170,7 @@ mod test_map {
}
{
let mut map: HashMap<i32, i32> = (0..8).map(|x| (x, x * 10)).collect();
- drop(map.drain_filter(|&k, _| k % 2 == 0));
+ map.extract_if(|&k, _| k % 2 == 0).for_each(drop);
assert_eq!(map.len(), 4);
}
}
@@ -8070,27 +8180,32 @@ mod test_map {
fn test_try_reserve() {
use crate::TryReserveError::{AllocError, CapacityOverflow};
- const MAX_USIZE: usize = usize::MAX;
+ const MAX_ISIZE: usize = isize::MAX as usize;
let mut empty_bytes: HashMap<u8, u8> = HashMap::new();
- if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_USIZE) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(usize::MAX) {
} else {
panic!("usize::MAX should trigger an overflow!");
}
- if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_USIZE / 16) {
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_ISIZE) {
+ } else {
+ panic!("isize::MAX should trigger an overflow!");
+ }
+
+ if let Err(AllocError { .. }) = empty_bytes.try_reserve(MAX_ISIZE / 5) {
} else {
// This may succeed if there is enough free memory. Attempt to
// allocate a few more hashmaps to ensure the allocation will fail.
let mut empty_bytes2: HashMap<u8, u8> = HashMap::new();
- let _ = empty_bytes2.try_reserve(MAX_USIZE / 16);
+ let _ = empty_bytes2.try_reserve(MAX_ISIZE / 5);
let mut empty_bytes3: HashMap<u8, u8> = HashMap::new();
- let _ = empty_bytes3.try_reserve(MAX_USIZE / 16);
+ let _ = empty_bytes3.try_reserve(MAX_ISIZE / 5);
let mut empty_bytes4: HashMap<u8, u8> = HashMap::new();
- if let Err(AllocError { .. }) = empty_bytes4.try_reserve(MAX_USIZE / 16) {
+ if let Err(AllocError { .. }) = empty_bytes4.try_reserve(MAX_ISIZE / 5) {
} else {
- panic!("usize::MAX / 8 should trigger an OOM!");
+ panic!("isize::MAX / 5 should trigger an OOM!");
}
}
}
@@ -8104,7 +8219,7 @@ mod test_map {
let mut map: HashMap<_, _> = xs.iter().copied().collect();
let compute_hash = |map: &HashMap<i32, i32>, k: i32| -> u64 {
- super::make_insert_hash::<i32, _>(map.hasher(), &k)
+ super::make_hash::<i32, _>(map.hasher(), &k)
};
// Existing key (insert)
@@ -8266,21 +8381,21 @@ mod test_map {
loop {
// occasionally remove some elements
if i < n && rng.gen_bool(0.1) {
- let hash_value = super::make_insert_hash(&hash_builder, &i);
+ let hash_value = super::make_hash(&hash_builder, &i);
unsafe {
let e = map.table.find(hash_value, |q| q.0.eq(&i));
if let Some(e) = e {
it.reflect_remove(&e);
- let t = map.table.remove(e);
+ let t = map.table.remove(e).0;
removed.push(t);
left -= 1;
} else {
- assert!(removed.contains(&(i, 2 * i)), "{} not in {:?}", i, removed);
+ assert!(removed.contains(&(i, 2 * i)), "{i} not in {removed:?}");
let e = map.table.insert(
hash_value,
(i, 2 * i),
- super::make_hasher::<usize, _, usize, _>(&hash_builder),
+ super::make_hasher::<_, usize, _>(&hash_builder),
);
it.reflect_insert(&e);
if let Some(p) = removed.iter().position(|e| e == &(i, 2 * i)) {
@@ -8405,4 +8520,441 @@ mod test_map {
map2.clone_from(&map1);
}
+
+ #[test]
+ #[should_panic = "panic in clone"]
+ fn test_clone_from_memory_leaks() {
+ use alloc::vec::Vec;
+
+ struct CheckedClone {
+ panic_in_clone: bool,
+ need_drop: Vec<i32>,
+ }
+ impl Clone for CheckedClone {
+ fn clone(&self) -> Self {
+ if self.panic_in_clone {
+ panic!("panic in clone")
+ }
+ Self {
+ panic_in_clone: self.panic_in_clone,
+ need_drop: self.need_drop.clone(),
+ }
+ }
+ }
+ let mut map1 = HashMap::new();
+ map1.insert(
+ 1,
+ CheckedClone {
+ panic_in_clone: false,
+ need_drop: vec![0, 1, 2],
+ },
+ );
+ map1.insert(
+ 2,
+ CheckedClone {
+ panic_in_clone: false,
+ need_drop: vec![3, 4, 5],
+ },
+ );
+ map1.insert(
+ 3,
+ CheckedClone {
+ panic_in_clone: true,
+ need_drop: vec![6, 7, 8],
+ },
+ );
+ let _map2 = map1.clone();
+ }
+
+ struct MyAllocInner {
+ drop_count: Arc<AtomicI8>,
+ }
+
+ #[derive(Clone)]
+ struct MyAlloc {
+ _inner: Arc<MyAllocInner>,
+ }
+
+ impl MyAlloc {
+ fn new(drop_count: Arc<AtomicI8>) -> Self {
+ MyAlloc {
+ _inner: Arc::new(MyAllocInner { drop_count }),
+ }
+ }
+ }
+
+ impl Drop for MyAllocInner {
+ fn drop(&mut self) {
+ println!("MyAlloc freed.");
+ self.drop_count.fetch_sub(1, Ordering::SeqCst);
+ }
+ }
+
+ unsafe impl Allocator for MyAlloc {
+ fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> {
+ let g = Global;
+ g.allocate(layout)
+ }
+
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ let g = Global;
+ g.deallocate(ptr, layout)
+ }
+ }
+
+ #[test]
+ fn test_hashmap_into_iter_bug() {
+ let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(1));
+
+ {
+ let mut map = HashMap::with_capacity_in(10, MyAlloc::new(dropped.clone()));
+ for i in 0..10 {
+ map.entry(i).or_insert_with(|| "i".to_string());
+ }
+
+ for (k, v) in map {
+ println!("{}, {}", k, v);
+ }
+ }
+
+ // All allocator clones should already be dropped.
+ assert_eq!(dropped.load(Ordering::SeqCst), 0);
+ }
+
+ #[derive(Debug)]
+ struct CheckedCloneDrop<T> {
+ panic_in_clone: bool,
+ panic_in_drop: bool,
+ dropped: bool,
+ data: T,
+ }
+
+ impl<T> CheckedCloneDrop<T> {
+ fn new(panic_in_clone: bool, panic_in_drop: bool, data: T) -> Self {
+ CheckedCloneDrop {
+ panic_in_clone,
+ panic_in_drop,
+ dropped: false,
+ data,
+ }
+ }
+ }
+
+ impl<T: Clone> Clone for CheckedCloneDrop<T> {
+ fn clone(&self) -> Self {
+ if self.panic_in_clone {
+ panic!("panic in clone")
+ }
+ Self {
+ panic_in_clone: self.panic_in_clone,
+ panic_in_drop: self.panic_in_drop,
+ dropped: self.dropped,
+ data: self.data.clone(),
+ }
+ }
+ }
+
+ impl<T> Drop for CheckedCloneDrop<T> {
+ fn drop(&mut self) {
+ if self.panic_in_drop {
+ self.dropped = true;
+ panic!("panic in drop");
+ }
+ if self.dropped {
+ panic!("double drop");
+ }
+ self.dropped = true;
+ }
+ }
+
+ /// Return hashmap with predefined distribution of elements.
+ /// All elements will be located in the same order as elements
+ /// returned by iterator.
+ ///
+ /// This function does not panic, but returns an error as a `String`
+ /// to distinguish between a test panic and an error in the input data.
+ fn get_test_map<I, T, A>(
+ iter: I,
+ mut fun: impl FnMut(u64) -> T,
+ alloc: A,
+ ) -> Result<HashMap<u64, CheckedCloneDrop<T>, DefaultHashBuilder, A>, String>
+ where
+ I: Iterator<Item = (bool, bool)> + Clone + ExactSizeIterator,
+ A: Allocator,
+ T: PartialEq + core::fmt::Debug,
+ {
+ use crate::scopeguard::guard;
+
+ let mut map: HashMap<u64, CheckedCloneDrop<T>, _, A> =
+ HashMap::with_capacity_in(iter.size_hint().0, alloc);
+ {
+ let mut guard = guard(&mut map, |map| {
+ for (_, value) in map.iter_mut() {
+ value.panic_in_drop = false
+ }
+ });
+
+ let mut count = 0;
+ // Hash and Key must be equal to each other for controlling the elements placement.
+ for (panic_in_clone, panic_in_drop) in iter.clone() {
+ if core::mem::needs_drop::<T>() && panic_in_drop {
+ return Err(String::from(
+ "panic_in_drop can be set with a type that doesn't need to be dropped",
+ ));
+ }
+ guard.table.insert(
+ count,
+ (
+ count,
+ CheckedCloneDrop::new(panic_in_clone, panic_in_drop, fun(count)),
+ ),
+ |(k, _)| *k,
+ );
+ count += 1;
+ }
+
+ // Let's check that all elements are located as we wanted
+ let mut check_count = 0;
+ for ((key, value), (panic_in_clone, panic_in_drop)) in guard.iter().zip(iter) {
+ if *key != check_count {
+ return Err(format!(
+ "key != check_count,\nkey: `{}`,\ncheck_count: `{}`",
+ key, check_count
+ ));
+ }
+ if value.dropped
+ || value.panic_in_clone != panic_in_clone
+ || value.panic_in_drop != panic_in_drop
+ || value.data != fun(check_count)
+ {
+ return Err(format!(
+ "Value is not equal to expected,\nvalue: `{:?}`,\nexpected: \
+ `CheckedCloneDrop {{ panic_in_clone: {}, panic_in_drop: {}, dropped: {}, data: {:?} }}`",
+ value, panic_in_clone, panic_in_drop, false, fun(check_count)
+ ));
+ }
+ check_count += 1;
+ }
+
+ if guard.len() != check_count as usize {
+ return Err(format!(
+ "map.len() != check_count,\nmap.len(): `{}`,\ncheck_count: `{}`",
+ guard.len(),
+ check_count
+ ));
+ }
+
+ if count != check_count {
+ return Err(format!(
+ "count != check_count,\ncount: `{}`,\ncheck_count: `{}`",
+ count, check_count
+ ));
+ }
+ core::mem::forget(guard);
+ }
+ Ok(map)
+ }
+
+ const DISARMED: bool = false;
+ const ARMED: bool = true;
+
+ const ARMED_FLAGS: [bool; 8] = [
+ DISARMED, DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
+ ];
+
+ const DISARMED_FLAGS: [bool; 8] = [
+ DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED, DISARMED,
+ ];
+
+ #[test]
+ #[should_panic = "panic in clone"]
+ fn test_clone_memory_leaks_and_double_drop_one() {
+ let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
+
+ {
+ assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len());
+
+ let map: HashMap<u64, CheckedCloneDrop<Vec<u64>>, DefaultHashBuilder, MyAlloc> =
+ match get_test_map(
+ ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS),
+ |n| vec![n],
+ MyAlloc::new(dropped.clone()),
+ ) {
+ Ok(map) => map,
+ Err(msg) => panic!("{msg}"),
+ };
+
+ // Clone should normally clone a few elements, and then (when the
+ // clone function panics), deallocate both its own memory, memory
+ // of `dropped: Arc<AtomicI8>` and the memory of already cloned
+ // elements (Vec<i32> memory inside CheckedCloneDrop).
+ let _map2 = map.clone();
+ }
+ }
+
+ #[test]
+ #[should_panic = "panic in drop"]
+ fn test_clone_memory_leaks_and_double_drop_two() {
+ let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
+
+ {
+ assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len());
+
+ let map: HashMap<u64, CheckedCloneDrop<u64>, DefaultHashBuilder, _> = match get_test_map(
+ DISARMED_FLAGS.into_iter().zip(DISARMED_FLAGS),
+ |n| n,
+ MyAlloc::new(dropped.clone()),
+ ) {
+ Ok(map) => map,
+ Err(msg) => panic!("{msg}"),
+ };
+
+ let mut map2 = match get_test_map(
+ DISARMED_FLAGS.into_iter().zip(ARMED_FLAGS),
+ |n| n,
+ MyAlloc::new(dropped.clone()),
+ ) {
+ Ok(map) => map,
+ Err(msg) => panic!("{msg}"),
+ };
+
+ // The `clone_from` should try to drop the elements of `map2` without
+ // double drop and leaking the allocator. Elements that have not been
+ // dropped leak their memory.
+ map2.clone_from(&map);
+ }
+ }
+
+ /// We check that we have a working table if the clone operation from another
+ /// thread ended in a panic (when buckets of maps are equal to each other).
+ #[test]
+ fn test_catch_panic_clone_from_when_len_is_equal() {
+ use std::thread;
+
+ let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
+
+ {
+ assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len());
+
+ let mut map = match get_test_map(
+ DISARMED_FLAGS.into_iter().zip(DISARMED_FLAGS),
+ |n| vec![n],
+ MyAlloc::new(dropped.clone()),
+ ) {
+ Ok(map) => map,
+ Err(msg) => panic!("{msg}"),
+ };
+
+ thread::scope(|s| {
+ let result: thread::ScopedJoinHandle<'_, String> = s.spawn(|| {
+ let scope_map =
+ match get_test_map(ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS), |n| vec![n * 2], MyAlloc::new(dropped.clone())) {
+ Ok(map) => map,
+ Err(msg) => return msg,
+ };
+ if map.table.buckets() != scope_map.table.buckets() {
+ return format!(
+ "map.table.buckets() != scope_map.table.buckets(),\nleft: `{}`,\nright: `{}`",
+ map.table.buckets(), scope_map.table.buckets()
+ );
+ }
+ map.clone_from(&scope_map);
+ "We must fail the cloning!!!".to_owned()
+ });
+ if let Ok(msg) = result.join() {
+ panic!("{msg}")
+ }
+ });
+
+ // Let's check that all iterators work fine and do not return elements
+ // (especially `RawIterRange`, which does not depend on the number of
+ // elements in the table, but looks directly at the control bytes)
+ //
+ // SAFETY: We know for sure that `RawTable` will outlive
+ // the returned `RawIter / RawIterRange` iterator.
+ assert_eq!(map.len(), 0);
+ assert_eq!(map.iter().count(), 0);
+ assert_eq!(unsafe { map.table.iter().count() }, 0);
+ assert_eq!(unsafe { map.table.iter().iter.count() }, 0);
+
+ for idx in 0..map.table.buckets() {
+ let idx = idx as u64;
+ assert!(
+ map.table.find(idx, |(k, _)| *k == idx).is_none(),
+ "Index: {idx}"
+ );
+ }
+ }
+
+ // All allocator clones should already be dropped.
+ assert_eq!(dropped.load(Ordering::SeqCst), 0);
+ }
+
+ /// We check that we have a working table if the clone operation from another
+ /// thread ended in a panic (when buckets of maps are not equal to each other).
+ #[test]
+ fn test_catch_panic_clone_from_when_len_is_not_equal() {
+ use std::thread;
+
+ let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
+
+ {
+ assert_eq!(ARMED_FLAGS.len(), DISARMED_FLAGS.len());
+
+ let mut map = match get_test_map(
+ [DISARMED].into_iter().zip([DISARMED]),
+ |n| vec![n],
+ MyAlloc::new(dropped.clone()),
+ ) {
+ Ok(map) => map,
+ Err(msg) => panic!("{msg}"),
+ };
+
+ thread::scope(|s| {
+ let result: thread::ScopedJoinHandle<'_, String> = s.spawn(|| {
+ let scope_map = match get_test_map(
+ ARMED_FLAGS.into_iter().zip(DISARMED_FLAGS),
+ |n| vec![n * 2],
+ MyAlloc::new(dropped.clone()),
+ ) {
+ Ok(map) => map,
+ Err(msg) => return msg,
+ };
+ if map.table.buckets() == scope_map.table.buckets() {
+ return format!(
+ "map.table.buckets() == scope_map.table.buckets(): `{}`",
+ map.table.buckets()
+ );
+ }
+ map.clone_from(&scope_map);
+ "We must fail the cloning!!!".to_owned()
+ });
+ if let Ok(msg) = result.join() {
+ panic!("{msg}")
+ }
+ });
+
+ // Let's check that all iterators work fine and do not return elements
+ // (especially `RawIterRange`, which does not depend on the number of
+ // elements in the table, but looks directly at the control bytes)
+ //
+ // SAFETY: We know for sure that `RawTable` will outlive
+ // the returned `RawIter / RawIterRange` iterator.
+ assert_eq!(map.len(), 0);
+ assert_eq!(map.iter().count(), 0);
+ assert_eq!(unsafe { map.table.iter().count() }, 0);
+ assert_eq!(unsafe { map.table.iter().iter.count() }, 0);
+
+ for idx in 0..map.table.buckets() {
+ let idx = idx as u64;
+ assert!(
+ map.table.find(idx, |(k, _)| *k == idx).is_none(),
+ "Index: {idx}"
+ );
+ }
+ }
+
+ // All allocator clones should already be dropped.
+ assert_eq!(dropped.load(Ordering::SeqCst), 0);
+ }
}
diff --git a/third_party/rust/hashbrown/src/raw/alloc.rs b/third_party/rust/hashbrown/src/raw/alloc.rs
index ba09ea9de7..15299e7b09 100644
--- a/third_party/rust/hashbrown/src/raw/alloc.rs
+++ b/third_party/rust/hashbrown/src/raw/alloc.rs
@@ -1,5 +1,9 @@
pub(crate) use self::inner::{do_alloc, Allocator, Global};
+// Nightly-case.
+// Use unstable `allocator_api` feature.
+// This is compatible with `allocator-api2` which can be enabled or not.
+// This is used when building for `std`.
#[cfg(feature = "nightly")]
mod inner {
use crate::alloc::alloc::Layout;
@@ -7,28 +11,44 @@ mod inner {
use core::ptr::NonNull;
#[allow(clippy::map_err_ignore)]
- pub fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
+ pub(crate) fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
match alloc.allocate(layout) {
Ok(ptr) => Ok(ptr.as_non_null_ptr()),
Err(_) => Err(()),
}
}
+}
- #[cfg(feature = "bumpalo")]
- unsafe impl Allocator for crate::BumpWrapper<'_> {
- #[inline]
- fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
- match self.0.try_alloc_layout(layout) {
- Ok(ptr) => Ok(NonNull::slice_from_raw_parts(ptr, layout.size())),
- Err(_) => Err(core::alloc::AllocError),
- }
+// Basic non-nightly case.
+// This uses `allocator-api2` enabled by default.
+// If any crate enables "nightly" in `allocator-api2`,
+// this will be equivalent to the nightly case,
+// since `allocator_api2::alloc::Allocator` would be re-export of
+// `core::alloc::Allocator`.
+#[cfg(all(not(feature = "nightly"), feature = "allocator-api2"))]
+mod inner {
+ use crate::alloc::alloc::Layout;
+ pub use allocator_api2::alloc::{Allocator, Global};
+ use core::ptr::NonNull;
+
+ #[allow(clippy::map_err_ignore)]
+ pub(crate) fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
+ match alloc.allocate(layout) {
+ Ok(ptr) => Ok(ptr.cast()),
+ Err(_) => Err(()),
}
- #[inline]
- unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {}
}
}
-#[cfg(not(feature = "nightly"))]
+// No-defaults case.
+// When building with default-features turned off and
+// neither `nightly` nor `allocator-api2` is enabled,
+// this will be used.
+// Making it impossible to use any custom allocator with collections defined
+// in this crate.
+// Any crate in build-tree can enable `allocator-api2`,
+// or `nightly` without disturbing users that don't want to use it.
+#[cfg(not(any(feature = "nightly", feature = "allocator-api2")))]
mod inner {
use crate::alloc::alloc::{alloc, dealloc, Layout};
use core::ptr::NonNull;
@@ -41,6 +61,7 @@ mod inner {
#[derive(Copy, Clone)]
pub struct Global;
+
unsafe impl Allocator for Global {
#[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<u8>, ()> {
@@ -51,6 +72,7 @@ mod inner {
dealloc(ptr.as_ptr(), layout);
}
}
+
impl Default for Global {
#[inline]
fn default() -> Self {
@@ -58,16 +80,7 @@ mod inner {
}
}
- pub fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
+ pub(crate) fn do_alloc<A: Allocator>(alloc: &A, layout: Layout) -> Result<NonNull<u8>, ()> {
alloc.allocate(layout)
}
-
- #[cfg(feature = "bumpalo")]
- unsafe impl Allocator for crate::BumpWrapper<'_> {
- #[allow(clippy::map_err_ignore)]
- fn allocate(&self, layout: Layout) -> Result<NonNull<u8>, ()> {
- self.0.try_alloc_layout(layout).map_err(|_| ())
- }
- unsafe fn deallocate(&self, _ptr: NonNull<u8>, _layout: Layout) {}
- }
}
diff --git a/third_party/rust/hashbrown/src/raw/bitmask.rs b/third_party/rust/hashbrown/src/raw/bitmask.rs
index 7d4f9fc387..6576b3c5c0 100644
--- a/third_party/rust/hashbrown/src/raw/bitmask.rs
+++ b/third_party/rust/hashbrown/src/raw/bitmask.rs
@@ -1,6 +1,6 @@
-use super::imp::{BitMaskWord, BITMASK_MASK, BITMASK_STRIDE};
-#[cfg(feature = "nightly")]
-use core::intrinsics;
+use super::imp::{
+ BitMaskWord, NonZeroBitMaskWord, BITMASK_ITER_MASK, BITMASK_MASK, BITMASK_STRIDE,
+};
/// A bit mask which contains the result of a `Match` operation on a `Group` and
/// allows iterating through them.
@@ -8,75 +8,55 @@ use core::intrinsics;
/// The bit mask is arranged so that low-order bits represent lower memory
/// addresses for group match results.
///
-/// For implementation reasons, the bits in the set may be sparsely packed, so
-/// that there is only one bit-per-byte used (the high bit, 7). If this is the
+/// For implementation reasons, the bits in the set may be sparsely packed with
+/// groups of 8 bits representing one element. If any of these bits are non-zero
+/// then this element is considered to true in the mask. If this is the
/// case, `BITMASK_STRIDE` will be 8 to indicate a divide-by-8 should be
/// performed on counts/indices to normalize this difference. `BITMASK_MASK` is
/// similarly a mask of all the actually-used bits.
+///
+/// To iterate over a bit mask, it must be converted to a form where only 1 bit
+/// is set per element. This is done by applying `BITMASK_ITER_MASK` on the
+/// mask bits.
#[derive(Copy, Clone)]
-pub struct BitMask(pub BitMaskWord);
+pub(crate) struct BitMask(pub(crate) BitMaskWord);
#[allow(clippy::use_self)]
impl BitMask {
/// Returns a new `BitMask` with all bits inverted.
#[inline]
#[must_use]
- pub fn invert(self) -> Self {
+ #[allow(dead_code)]
+ pub(crate) fn invert(self) -> Self {
BitMask(self.0 ^ BITMASK_MASK)
}
- /// Flip the bit in the mask for the entry at the given index.
- ///
- /// Returns the bit's previous state.
- #[inline]
- #[allow(clippy::cast_ptr_alignment)]
- #[cfg(feature = "raw")]
- pub unsafe fn flip(&mut self, index: usize) -> bool {
- // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit.
- let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1);
- self.0 ^= mask;
- // The bit was set if the bit is now 0.
- self.0 & mask == 0
- }
-
/// Returns a new `BitMask` with the lowest bit removed.
#[inline]
#[must_use]
- pub fn remove_lowest_bit(self) -> Self {
+ fn remove_lowest_bit(self) -> Self {
BitMask(self.0 & (self.0 - 1))
}
+
/// Returns whether the `BitMask` has at least one set bit.
#[inline]
- pub fn any_bit_set(self) -> bool {
+ pub(crate) fn any_bit_set(self) -> bool {
self.0 != 0
}
/// Returns the first set bit in the `BitMask`, if there is one.
#[inline]
- pub fn lowest_set_bit(self) -> Option<usize> {
- if self.0 == 0 {
- None
+ pub(crate) fn lowest_set_bit(self) -> Option<usize> {
+ if let Some(nonzero) = NonZeroBitMaskWord::new(self.0) {
+ Some(Self::nonzero_trailing_zeros(nonzero))
} else {
- Some(unsafe { self.lowest_set_bit_nonzero() })
+ None
}
}
- /// Returns the first set bit in the `BitMask`, if there is one. The
- /// bitmask must not be empty.
- #[inline]
- #[cfg(feature = "nightly")]
- pub unsafe fn lowest_set_bit_nonzero(self) -> usize {
- intrinsics::cttz_nonzero(self.0) as usize / BITMASK_STRIDE
- }
- #[inline]
- #[cfg(not(feature = "nightly"))]
- pub unsafe fn lowest_set_bit_nonzero(self) -> usize {
- self.trailing_zeros()
- }
-
/// Returns the number of trailing zeroes in the `BitMask`.
#[inline]
- pub fn trailing_zeros(self) -> usize {
+ pub(crate) fn trailing_zeros(self) -> usize {
// ARM doesn't have a trailing_zeroes instruction, and instead uses
// reverse_bits (RBIT) + leading_zeroes (CLZ). However older ARM
// versions (pre-ARMv7) don't have RBIT and need to emulate it
@@ -89,9 +69,21 @@ impl BitMask {
}
}
+ /// Same as above but takes a `NonZeroBitMaskWord`.
+ #[inline]
+ fn nonzero_trailing_zeros(nonzero: NonZeroBitMaskWord) -> usize {
+ if cfg!(target_arch = "arm") && BITMASK_STRIDE % 8 == 0 {
+ // SAFETY: A byte-swapped non-zero value is still non-zero.
+ let swapped = unsafe { NonZeroBitMaskWord::new_unchecked(nonzero.get().swap_bytes()) };
+ swapped.leading_zeros() as usize / BITMASK_STRIDE
+ } else {
+ nonzero.trailing_zeros() as usize / BITMASK_STRIDE
+ }
+ }
+
/// Returns the number of leading zeroes in the `BitMask`.
#[inline]
- pub fn leading_zeros(self) -> usize {
+ pub(crate) fn leading_zeros(self) -> usize {
self.0.leading_zeros() as usize / BITMASK_STRIDE
}
}
@@ -102,13 +94,32 @@ impl IntoIterator for BitMask {
#[inline]
fn into_iter(self) -> BitMaskIter {
- BitMaskIter(self)
+ // A BitMask only requires each element (group of bits) to be non-zero.
+ // However for iteration we need each element to only contain 1 bit.
+ BitMaskIter(BitMask(self.0 & BITMASK_ITER_MASK))
}
}
/// Iterator over the contents of a `BitMask`, returning the indices of set
/// bits.
-pub struct BitMaskIter(BitMask);
+#[derive(Copy, Clone)]
+pub(crate) struct BitMaskIter(pub(crate) BitMask);
+
+impl BitMaskIter {
+ /// Flip the bit in the mask for the entry at the given index.
+ ///
+ /// Returns the bit's previous state.
+ #[inline]
+ #[allow(clippy::cast_ptr_alignment)]
+ #[cfg(feature = "raw")]
+ pub(crate) unsafe fn flip(&mut self, index: usize) -> bool {
+ // NOTE: The + BITMASK_STRIDE - 1 is to set the high bit.
+ let mask = 1 << (index * BITMASK_STRIDE + BITMASK_STRIDE - 1);
+ self.0 .0 ^= mask;
+ // The bit was set if the bit is now 0.
+ self.0 .0 & mask == 0
+ }
+}
impl Iterator for BitMaskIter {
type Item = usize;
diff --git a/third_party/rust/hashbrown/src/raw/generic.rs b/third_party/rust/hashbrown/src/raw/generic.rs
index b4d31e62c2..c668b0642a 100644
--- a/third_party/rust/hashbrown/src/raw/generic.rs
+++ b/third_party/rust/hashbrown/src/raw/generic.rs
@@ -5,26 +5,29 @@ use core::{mem, ptr};
// Use the native word size as the group size. Using a 64-bit group size on
// a 32-bit architecture will just end up being more expensive because
// shifts and multiplies will need to be emulated.
-#[cfg(any(
- target_pointer_width = "64",
- target_arch = "aarch64",
- target_arch = "x86_64",
- target_arch = "wasm32",
-))]
-type GroupWord = u64;
-#[cfg(all(
- target_pointer_width = "32",
- not(target_arch = "aarch64"),
- not(target_arch = "x86_64"),
- not(target_arch = "wasm32"),
-))]
-type GroupWord = u32;
-pub type BitMaskWord = GroupWord;
-pub const BITMASK_STRIDE: usize = 8;
+cfg_if! {
+ if #[cfg(any(
+ target_pointer_width = "64",
+ target_arch = "aarch64",
+ target_arch = "x86_64",
+ target_arch = "wasm32",
+ ))] {
+ type GroupWord = u64;
+ type NonZeroGroupWord = core::num::NonZeroU64;
+ } else {
+ type GroupWord = u32;
+ type NonZeroGroupWord = core::num::NonZeroU32;
+ }
+}
+
+pub(crate) type BitMaskWord = GroupWord;
+pub(crate) type NonZeroBitMaskWord = NonZeroGroupWord;
+pub(crate) const BITMASK_STRIDE: usize = 8;
// We only care about the highest bit of each byte for the mask.
#[allow(clippy::cast_possible_truncation, clippy::unnecessary_cast)]
-pub const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord;
+pub(crate) const BITMASK_MASK: BitMaskWord = 0x8080_8080_8080_8080_u64 as GroupWord;
+pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0;
/// Helper function to replicate a byte across a `GroupWord`.
#[inline]
@@ -37,7 +40,7 @@ fn repeat(byte: u8) -> GroupWord {
///
/// This implementation uses a word-sized integer.
#[derive(Copy, Clone)]
-pub struct Group(GroupWord);
+pub(crate) struct Group(GroupWord);
// We perform all operations in the native endianness, and convert to
// little-endian just before creating a BitMask. The can potentially
@@ -46,14 +49,14 @@ pub struct Group(GroupWord);
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
- pub const WIDTH: usize = mem::size_of::<Self>();
+ pub(crate) const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty bytes, suitable for use as the initial
/// value for an empty hash table.
///
/// This is guaranteed to be aligned to the group size.
#[inline]
- pub const fn static_empty() -> &'static [u8; Group::WIDTH] {
+ pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] {
#[repr(C)]
struct AlignedBytes {
_align: [Group; 0],
@@ -69,7 +72,7 @@ impl Group {
/// Loads a group of bytes starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
- pub unsafe fn load(ptr: *const u8) -> Self {
+ pub(crate) unsafe fn load(ptr: *const u8) -> Self {
Group(ptr::read_unaligned(ptr.cast()))
}
@@ -77,7 +80,7 @@ impl Group {
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
- pub unsafe fn load_aligned(ptr: *const u8) -> Self {
+ pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
Group(ptr::read(ptr.cast()))
@@ -87,7 +90,7 @@ impl Group {
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
- pub unsafe fn store_aligned(self, ptr: *mut u8) {
+ pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
ptr::write(ptr.cast(), self.0);
@@ -104,7 +107,7 @@ impl Group {
/// - This only happens if there is at least 1 true match.
/// - The chance of this happening is very low (< 1% chance per byte).
#[inline]
- pub fn match_byte(self, byte: u8) -> BitMask {
+ pub(crate) fn match_byte(self, byte: u8) -> BitMask {
// This algorithm is derived from
// https://graphics.stanford.edu/~seander/bithacks.html##ValueInWord
let cmp = self.0 ^ repeat(byte);
@@ -114,7 +117,7 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY`.
#[inline]
- pub fn match_empty(self) -> BitMask {
+ pub(crate) fn match_empty(self) -> BitMask {
// If the high bit is set, then the byte must be either:
// 1111_1111 (EMPTY) or 1000_0000 (DELETED).
// So we can just check if the top two bits are 1 by ANDing them.
@@ -124,14 +127,14 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
- pub fn match_empty_or_deleted(self) -> BitMask {
+ pub(crate) fn match_empty_or_deleted(self) -> BitMask {
// A byte is EMPTY or DELETED iff the high bit is set
BitMask((self.0 & repeat(0x80)).to_le())
}
/// Returns a `BitMask` indicating all bytes in the group which are full.
#[inline]
- pub fn match_full(self) -> BitMask {
+ pub(crate) fn match_full(self) -> BitMask {
self.match_empty_or_deleted().invert()
}
@@ -140,7 +143,7 @@ impl Group {
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
- pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
+ pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
diff --git a/third_party/rust/hashbrown/src/raw/mod.rs b/third_party/rust/hashbrown/src/raw/mod.rs
index 211b818a5f..c8e8e29122 100644
--- a/third_party/rust/hashbrown/src/raw/mod.rs
+++ b/third_party/rust/hashbrown/src/raw/mod.rs
@@ -4,7 +4,6 @@ use crate::TryReserveError;
use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
-use core::mem::ManuallyDrop;
use core::mem::MaybeUninit;
use core::ptr::NonNull;
use core::{hint, ptr};
@@ -21,12 +20,21 @@ cfg_if! {
if #[cfg(all(
target_feature = "sse2",
any(target_arch = "x86", target_arch = "x86_64"),
- not(miri)
+ not(miri),
))] {
mod sse2;
use sse2 as imp;
+ } else if #[cfg(all(
+ target_arch = "aarch64",
+ target_feature = "neon",
+ // NEON intrinsics are currently broken on big-endian targets.
+ // See https://github.com/rust-lang/stdarch/issues/1484.
+ target_endian = "little",
+ not(miri),
+ ))] {
+ mod neon;
+ use neon as imp;
} else {
- #[path = "generic.rs"]
mod generic;
use generic as imp;
}
@@ -37,36 +45,24 @@ pub(crate) use self::alloc::{do_alloc, Allocator, Global};
mod bitmask;
-use self::bitmask::{BitMask, BitMaskIter};
+use self::bitmask::BitMaskIter;
use self::imp::Group;
// Branch prediction hint. This is currently only available on nightly but it
// consistently improves performance by 10-15%.
+#[cfg(not(feature = "nightly"))]
+use core::convert::identity as likely;
+#[cfg(not(feature = "nightly"))]
+use core::convert::identity as unlikely;
#[cfg(feature = "nightly")]
use core::intrinsics::{likely, unlikely};
-// On stable we can use #[cold] to get a equivalent effect: this attributes
-// suggests that the function is unlikely to be called
-#[cfg(not(feature = "nightly"))]
-#[inline]
-#[cold]
-fn cold() {}
-
-#[cfg(not(feature = "nightly"))]
-#[inline]
-fn likely(b: bool) -> bool {
- if !b {
- cold();
- }
- b
-}
-#[cfg(not(feature = "nightly"))]
-#[inline]
-fn unlikely(b: bool) -> bool {
- if b {
- cold();
- }
- b
+// FIXME: use strict provenance functions once they are stable.
+// Implement it with a transmute for now.
+#[inline(always)]
+#[allow(clippy::useless_transmute)] // clippy is wrong, cast and transmute are different here
+fn invalid_mut<T>(addr: usize) -> *mut T {
+ unsafe { core::mem::transmute(addr) }
}
#[inline]
@@ -101,6 +97,13 @@ impl Fallibility {
}
}
+trait SizedTypeProperties: Sized {
+ const IS_ZERO_SIZED: bool = mem::size_of::<Self>() == 0;
+ const NEEDS_DROP: bool = mem::needs_drop::<Self>();
+}
+
+impl<T> SizedTypeProperties for T {}
+
/// Control byte value for an empty bucket.
const EMPTY: u8 = 0b1111_1111;
@@ -134,6 +137,13 @@ fn h1(hash: u64) -> usize {
hash as usize
}
+// Constant for h2 function that grabing the top 7 bits of the hash.
+const MIN_HASH_LEN: usize = if mem::size_of::<usize>() < mem::size_of::<u64>() {
+ mem::size_of::<usize>()
+} else {
+ mem::size_of::<u64>()
+};
+
/// Secondary hash function, saved in the low 7 bits of the control byte.
#[inline]
#[allow(clippy::cast_possible_truncation)]
@@ -141,8 +151,8 @@ fn h2(hash: u64) -> u8 {
// Grab the top 7 bits of the hash. While the hash is normally a full 64-bit
// value, some hash functions (such as FxHash) produce a usize result
// instead, which means that the top 32 bits are 0 on 32-bit platforms.
- let hash_len = usize::min(mem::size_of::<usize>(), mem::size_of::<u64>());
- let top7 = hash >> (hash_len * 8 - 7);
+ // So we use MIN_HASH_LEN constant to handle this.
+ let top7 = hash >> (MIN_HASH_LEN * 8 - 7);
(top7 & 0x7f) as u8 // truncation
}
@@ -230,11 +240,15 @@ struct TableLayout {
impl TableLayout {
#[inline]
- fn new<T>() -> Self {
+ const fn new<T>() -> Self {
let layout = Layout::new::<T>();
Self {
size: layout.size(),
- ctrl_align: usize::max(layout.align(), Group::WIDTH),
+ ctrl_align: if layout.align() > Group::WIDTH {
+ layout.align()
+ } else {
+ Group::WIDTH
+ },
}
}
@@ -248,6 +262,12 @@ impl TableLayout {
size.checked_mul(buckets)?.checked_add(ctrl_align - 1)? & !(ctrl_align - 1);
let len = ctrl_offset.checked_add(buckets + Group::WIDTH)?;
+ // We need an additional check to ensure that the allocation doesn't
+ // exceed `isize::MAX` (https://github.com/rust-lang/rust/pull/95295).
+ if len > isize::MAX as usize - (ctrl_align - 1) {
+ return None;
+ }
+
Some((
unsafe { Layout::from_size_align_unchecked(len, ctrl_align) },
ctrl_offset,
@@ -255,14 +275,9 @@ impl TableLayout {
}
}
-/// Returns a Layout which describes the allocation required for a hash table,
-/// and the offset of the control bytes in the allocation.
-/// (the offset is also one past last element of buckets)
-///
-/// Returns `None` if an overflow occurs.
-#[cfg_attr(feature = "inline-more", inline)]
-fn calculate_layout<T>(buckets: usize) -> Option<(Layout, usize)> {
- TableLayout::new::<T>().calculate_layout_for(buckets)
+/// A reference to an empty bucket into which an can be inserted.
+pub struct InsertSlot {
+ index: usize,
}
/// A reference to a hash table bucket containing a `T`.
@@ -290,11 +305,79 @@ impl<T> Clone for Bucket<T> {
}
impl<T> Bucket<T> {
+ /// Creates a [`Bucket`] that contain pointer to the data.
+ /// The pointer calculation is performed by calculating the
+ /// offset from given `base` pointer (convenience for
+ /// `base.as_ptr().sub(index)`).
+ ///
+ /// `index` is in units of `T`; e.g., an `index` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// If the `T` is a ZST, then we instead track the index of the element
+ /// in the table so that `erase` works properly (return
+ /// `NonNull::new_unchecked((index + 1) as *mut T)`)
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
+ /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and the safety
+ /// rules of [`NonNull::new_unchecked`] function.
+ ///
+ /// Thus, in order to uphold the safety contracts for the [`<*mut T>::sub`] method
+ /// and [`NonNull::new_unchecked`] function, as well as for the correct
+ /// logic of the work of this crate, the following rules are necessary and
+ /// sufficient:
+ ///
+ /// * the `base` pointer must not be `dangling` and must points to the
+ /// end of the first `value element` from the `data part` of the table, i.e.
+ /// must be the pointer that returned by [`RawTable::data_end`] or by
+ /// [`RawTableInner::data_end<T>`];
+ ///
+ /// * `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
+ /// must be no greater than the number returned by the function
+ /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
+ ///
+ /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
+ /// `index` must not be greater than `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)`
+ /// must be no greater than the number returned by the function
+ /// [`RawTable::buckets`] or [`RawTableInner::buckets`].
+ ///
+ /// [`Bucket`]: crate::raw::Bucket
+ /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
+ /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
+ /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
+ /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
+ /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
#[inline]
unsafe fn from_base_index(base: NonNull<T>, index: usize) -> Self {
- let ptr = if mem::size_of::<T>() == 0 {
- // won't overflow because index must be less than length
- (index + 1) as *mut T
+ // If mem::size_of::<T>() != 0 then return a pointer to an `element` in
+ // the data part of the table (we start counting from "0", so that
+ // in the expression T[last], the "last" index actually one less than the
+ // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask"):
+ //
+ // `from_base_index(base, 1).as_ptr()` returns a pointer that
+ // points here in the data part of the table
+ // (to the start of T1)
+ // |
+ // | `base: NonNull<T>` must point here
+ // | (to the end of T0 or to the start of C0)
+ // v v
+ // [Padding], Tlast, ..., |T1|, T0, |C0, C1, ..., Clast
+ // ^
+ // `from_base_index(base, 1)` returns a pointer
+ // that points here in the data part of the table
+ // (to the end of T1)
+ //
+ // where: T0...Tlast - our stored data; C0...Clast - control bytes
+ // or metadata for data.
+ let ptr = if T::IS_ZERO_SIZED {
+ // won't overflow because index must be less than length (bucket_mask)
+ // and bucket_mask is guaranteed to be less than `isize::MAX`
+ // (see TableLayout::calculate_layout_for method)
+ invalid_mut(index + 1)
} else {
base.as_ptr().sub(index)
};
@@ -302,27 +385,183 @@ impl<T> Bucket<T> {
ptr: NonNull::new_unchecked(ptr),
}
}
+
+ /// Calculates the index of a [`Bucket`] as distance between two pointers
+ /// (convenience for `base.as_ptr().offset_from(self.ptr.as_ptr()) as usize`).
+ /// The returned value is in units of T: the distance in bytes divided by
+ /// [`core::mem::size_of::<T>()`].
+ ///
+ /// If the `T` is a ZST, then we return the index of the element in
+ /// the table so that `erase` works properly (return `self.ptr.as_ptr() as usize - 1`).
+ ///
+ /// This function is the inverse of [`from_base_index`].
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
+ /// from the safety rules for [`<*const T>::offset_from`] method of `*const T`.
+ ///
+ /// Thus, in order to uphold the safety contracts for [`<*const T>::offset_from`]
+ /// method, as well as for the correct logic of the work of this crate, the
+ /// following rules are necessary and sufficient:
+ ///
+ /// * `base` contained pointer must not be `dangling` and must point to the
+ /// end of the first `element` from the `data part` of the table, i.e.
+ /// must be a pointer that returns by [`RawTable::data_end`] or by
+ /// [`RawTableInner::data_end<T>`];
+ ///
+ /// * `self` also must not contain dangling pointer;
+ ///
+ /// * both `self` and `base` must be created from the same [`RawTable`]
+ /// (or [`RawTableInner`]).
+ ///
+ /// If `mem::size_of::<T>() == 0`, this function is always safe.
+ ///
+ /// [`Bucket`]: crate::raw::Bucket
+ /// [`from_base_index`]: crate::raw::Bucket::from_base_index
+ /// [`RawTable::data_end`]: crate::raw::RawTable::data_end
+ /// [`RawTableInner::data_end<T>`]: RawTableInner::data_end<T>
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [`RawTableInner`]: RawTableInner
+ /// [`<*const T>::offset_from`]: https://doc.rust-lang.org/nightly/core/primitive.pointer.html#method.offset_from
#[inline]
unsafe fn to_base_index(&self, base: NonNull<T>) -> usize {
- if mem::size_of::<T>() == 0 {
+ // If mem::size_of::<T>() != 0 then return an index under which we used to store the
+ // `element` in the data part of the table (we start counting from "0", so
+ // that in the expression T[last], the "last" index actually is one less than the
+ // "buckets" number in the table, i.e. "last = RawTableInner.bucket_mask").
+ // For example for 5th element in table calculation is performed like this:
+ //
+ // mem::size_of::<T>()
+ // |
+ // | `self = from_base_index(base, 5)` that returns pointer
+ // | that points here in tha data part of the table
+ // | (to the end of T5)
+ // | | `base: NonNull<T>` must point here
+ // v | (to the end of T0 or to the start of C0)
+ // /???\ v v
+ // [Padding], Tlast, ..., |T10|, ..., T5|, T4, T3, T2, T1, T0, |C0, C1, C2, C3, C4, C5, ..., C10, ..., Clast
+ // \__________ __________/
+ // \/
+ // `bucket.to_base_index(base)` = 5
+ // (base.as_ptr() as usize - self.ptr.as_ptr() as usize) / mem::size_of::<T>()
+ //
+ // where: T0...Tlast - our stored data; C0...Clast - control bytes or metadata for data.
+ if T::IS_ZERO_SIZED {
+ // this can not be UB
self.ptr.as_ptr() as usize - 1
} else {
offset_from(base.as_ptr(), self.ptr.as_ptr())
}
}
+
+ /// Acquires the underlying raw pointer `*mut T` to `data`.
+ ///
+ /// # Note
+ ///
+ /// If `T` is not [`Copy`], do not use `*mut T` methods that can cause calling the
+ /// destructor of `T` (for example the [`<*mut T>::drop_in_place`] method), because
+ /// for properly dropping the data we also need to clear `data` control bytes. If we
+ /// drop data, but do not clear `data control byte` it leads to double drop when
+ /// [`RawTable`] goes out of scope.
+ ///
+ /// If you modify an already initialized `value`, so [`Hash`] and [`Eq`] on the new
+ /// `T` value and its borrowed form *must* match those for the old `T` value, as the map
+ /// will not re-evaluate where the new value should go, meaning the value may become
+ /// "lost" if their location does not reflect their state.
+ ///
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [`<*mut T>::drop_in_place`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.drop_in_place
+ /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
+ /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "raw")]
+ /// # fn test() {
+ /// use core::hash::{BuildHasher, Hash};
+ /// use hashbrown::raw::{Bucket, RawTable};
+ ///
+ /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
+ ///
+ /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
+ /// use core::hash::Hasher;
+ /// let mut state = hash_builder.build_hasher();
+ /// key.hash(&mut state);
+ /// state.finish()
+ /// }
+ ///
+ /// let hash_builder = NewHashBuilder::default();
+ /// let mut table = RawTable::new();
+ ///
+ /// let value = ("a", 100);
+ /// let hash = make_hash(&hash_builder, &value.0);
+ ///
+ /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
+ ///
+ /// let bucket: Bucket<(&str, i32)> = table.find(hash, |(k1, _)| k1 == &value.0).unwrap();
+ ///
+ /// assert_eq!(unsafe { &*bucket.as_ptr() }, &("a", 100));
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "raw")]
+ /// # test()
+ /// # }
+ /// ```
#[inline]
pub fn as_ptr(&self) -> *mut T {
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZERO_SIZED {
// Just return an arbitrary ZST pointer which is properly aligned
- mem::align_of::<T>() as *mut T
+ // invalid pointer is good enough for ZST
+ invalid_mut(mem::align_of::<T>())
} else {
unsafe { self.ptr.as_ptr().sub(1) }
}
}
+
+ /// Create a new [`Bucket`] that is offset from the `self` by the given
+ /// `offset`. The pointer calculation is performed by calculating the
+ /// offset from `self` pointer (convenience for `self.ptr.as_ptr().sub(offset)`).
+ /// This function is used for iterators.
+ ///
+ /// `offset` is in units of `T`; e.g., a `offset` of 3 represents a pointer
+ /// offset of `3 * size_of::<T>()` bytes.
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived
+ /// from the safety rules for [`<*mut T>::sub`] method of `*mut T` and safety
+ /// rules of [`NonNull::new_unchecked`] function.
+ ///
+ /// Thus, in order to uphold the safety contracts for [`<*mut T>::sub`] method
+ /// and [`NonNull::new_unchecked`] function, as well as for the correct
+ /// logic of the work of this crate, the following rules are necessary and
+ /// sufficient:
+ ///
+ /// * `self` contained pointer must not be `dangling`;
+ ///
+ /// * `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
+ /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other
+ /// words, `self.to_base_index() + ofset + 1` must be no greater than the number returned
+ /// by the function [`RawTable::buckets`] or [`RawTableInner::buckets`].
+ ///
+ /// If `mem::size_of::<T>() == 0`, then the only requirement is that the
+ /// `self.to_base_index() + ofset` must not be greater than `RawTableInner.bucket_mask`,
+ /// i.e. `(self.to_base_index() + ofset) <= RawTableInner.bucket_mask` or, in other words,
+ /// `self.to_base_index() + ofset + 1` must be no greater than the number returned by the
+ /// function [`RawTable::buckets`] or [`RawTableInner::buckets`].
+ ///
+ /// [`Bucket`]: crate::raw::Bucket
+ /// [`<*mut T>::sub`]: https://doc.rust-lang.org/core/primitive.pointer.html#method.sub-1
+ /// [`NonNull::new_unchecked`]: https://doc.rust-lang.org/stable/std/ptr/struct.NonNull.html#method.new_unchecked
+ /// [`RawTable::buckets`]: crate::raw::RawTable::buckets
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
#[inline]
unsafe fn next_n(&self, offset: usize) -> Self {
- let ptr = if mem::size_of::<T>() == 0 {
- (self.ptr.as_ptr() as usize + offset) as *mut T
+ let ptr = if T::IS_ZERO_SIZED {
+ // invalid pointer is good enough for ZST
+ invalid_mut(self.ptr.as_ptr() as usize + offset)
} else {
self.ptr.as_ptr().sub(offset)
};
@@ -330,26 +569,212 @@ impl<T> Bucket<T> {
ptr: NonNull::new_unchecked(ptr),
}
}
+
+ /// Executes the destructor (if any) of the pointed-to `data`.
+ ///
+ /// # Safety
+ ///
+ /// See [`ptr::drop_in_place`] for safety concerns.
+ ///
+ /// You should use [`RawTable::erase`] instead of this function,
+ /// or be careful with calling this function directly, because for
+ /// properly dropping the data we need also clear `data` control bytes.
+ /// If we drop data, but do not erase `data control byte` it leads to
+ /// double drop when [`RawTable`] goes out of scope.
+ ///
+ /// [`ptr::drop_in_place`]: https://doc.rust-lang.org/core/ptr/fn.drop_in_place.html
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [`RawTable::erase`]: crate::raw::RawTable::erase
#[cfg_attr(feature = "inline-more", inline)]
- pub unsafe fn drop(&self) {
+ pub(crate) unsafe fn drop(&self) {
self.as_ptr().drop_in_place();
}
+
+ /// Reads the `value` from `self` without moving it. This leaves the
+ /// memory in `self` unchanged.
+ ///
+ /// # Safety
+ ///
+ /// See [`ptr::read`] for safety concerns.
+ ///
+ /// You should use [`RawTable::remove`] instead of this function,
+ /// or be careful with calling this function directly, because compiler
+ /// calls its destructor when readed `value` goes out of scope. It
+ /// can cause double dropping when [`RawTable`] goes out of scope,
+ /// because of not erased `data control byte`.
+ ///
+ /// [`ptr::read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [`RawTable::remove`]: crate::raw::RawTable::remove
#[inline]
- pub unsafe fn read(&self) -> T {
+ pub(crate) unsafe fn read(&self) -> T {
self.as_ptr().read()
}
+
+ /// Overwrites a memory location with the given `value` without reading
+ /// or dropping the old value (like [`ptr::write`] function).
+ ///
+ /// # Safety
+ ///
+ /// See [`ptr::write`] for safety concerns.
+ ///
+ /// # Note
+ ///
+ /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
+ /// those for the old `T` value, as the map will not re-evaluate where the new
+ /// value should go, meaning the value may become "lost" if their location
+ /// does not reflect their state.
+ ///
+ /// [`ptr::write`]: https://doc.rust-lang.org/core/ptr/fn.write.html
+ /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
+ /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
#[inline]
- pub unsafe fn write(&self, val: T) {
+ pub(crate) unsafe fn write(&self, val: T) {
self.as_ptr().write(val);
}
+
+ /// Returns a shared immutable reference to the `value`.
+ ///
+ /// # Safety
+ ///
+ /// See [`NonNull::as_ref`] for safety concerns.
+ ///
+ /// [`NonNull::as_ref`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_ref
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "raw")]
+ /// # fn test() {
+ /// use core::hash::{BuildHasher, Hash};
+ /// use hashbrown::raw::{Bucket, RawTable};
+ ///
+ /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
+ ///
+ /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
+ /// use core::hash::Hasher;
+ /// let mut state = hash_builder.build_hasher();
+ /// key.hash(&mut state);
+ /// state.finish()
+ /// }
+ ///
+ /// let hash_builder = NewHashBuilder::default();
+ /// let mut table = RawTable::new();
+ ///
+ /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
+ /// let hash = make_hash(&hash_builder, &value.0);
+ ///
+ /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
+ ///
+ /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
+ ///
+ /// assert_eq!(
+ /// unsafe { bucket.as_ref() },
+ /// &("A pony", "is a small horse".to_owned())
+ /// );
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "raw")]
+ /// # test()
+ /// # }
+ /// ```
#[inline]
pub unsafe fn as_ref<'a>(&self) -> &'a T {
&*self.as_ptr()
}
+
+ /// Returns a unique mutable reference to the `value`.
+ ///
+ /// # Safety
+ ///
+ /// See [`NonNull::as_mut`] for safety concerns.
+ ///
+ /// # Note
+ ///
+ /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
+ /// those for the old `T` value, as the map will not re-evaluate where the new
+ /// value should go, meaning the value may become "lost" if their location
+ /// does not reflect their state.
+ ///
+ /// [`NonNull::as_mut`]: https://doc.rust-lang.org/core/ptr/struct.NonNull.html#method.as_mut
+ /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
+ /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "raw")]
+ /// # fn test() {
+ /// use core::hash::{BuildHasher, Hash};
+ /// use hashbrown::raw::{Bucket, RawTable};
+ ///
+ /// type NewHashBuilder = core::hash::BuildHasherDefault<ahash::AHasher>;
+ ///
+ /// fn make_hash<K: Hash + ?Sized, S: BuildHasher>(hash_builder: &S, key: &K) -> u64 {
+ /// use core::hash::Hasher;
+ /// let mut state = hash_builder.build_hasher();
+ /// key.hash(&mut state);
+ /// state.finish()
+ /// }
+ ///
+ /// let hash_builder = NewHashBuilder::default();
+ /// let mut table = RawTable::new();
+ ///
+ /// let value: (&str, String) = ("A pony", "is a small horse".to_owned());
+ /// let hash = make_hash(&hash_builder, &value.0);
+ ///
+ /// table.insert(hash, value.clone(), |val| make_hash(&hash_builder, &val.0));
+ ///
+ /// let bucket: Bucket<(&str, String)> = table.find(hash, |(k, _)| k == &value.0).unwrap();
+ ///
+ /// unsafe {
+ /// bucket
+ /// .as_mut()
+ /// .1
+ /// .push_str(" less than 147 cm at the withers")
+ /// };
+ /// assert_eq!(
+ /// unsafe { bucket.as_ref() },
+ /// &(
+ /// "A pony",
+ /// "is a small horse less than 147 cm at the withers".to_owned()
+ /// )
+ /// );
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "raw")]
+ /// # test()
+ /// # }
+ /// ```
#[inline]
pub unsafe fn as_mut<'a>(&self) -> &'a mut T {
&mut *self.as_ptr()
}
+
+ /// Copies `size_of<T>` bytes from `other` to `self`. The source
+ /// and destination may *not* overlap.
+ ///
+ /// # Safety
+ ///
+ /// See [`ptr::copy_nonoverlapping`] for safety concerns.
+ ///
+ /// Like [`read`], `copy_nonoverlapping` creates a bitwise copy of `T`, regardless of
+ /// whether `T` is [`Copy`]. If `T` is not [`Copy`], using *both* the values
+ /// in the region beginning at `*self` and the region beginning at `*other` can
+ /// [violate memory safety].
+ ///
+ /// # Note
+ ///
+ /// [`Hash`] and [`Eq`] on the new `T` value and its borrowed form *must* match
+ /// those for the old `T` value, as the map will not re-evaluate where the new
+ /// value should go, meaning the value may become "lost" if their location
+ /// does not reflect their state.
+ ///
+ /// [`ptr::copy_nonoverlapping`]: https://doc.rust-lang.org/core/ptr/fn.copy_nonoverlapping.html
+ /// [`read`]: https://doc.rust-lang.org/core/ptr/fn.read.html
+ /// [violate memory safety]: https://doc.rust-lang.org/std/ptr/fn.read.html#ownership-of-the-returned-value
+ /// [`Hash`]: https://doc.rust-lang.org/core/hash/trait.Hash.html
+ /// [`Eq`]: https://doc.rust-lang.org/core/cmp/trait.Eq.html
#[cfg(feature = "raw")]
#[inline]
pub unsafe fn copy_from_nonoverlapping(&self, other: &Self) {
@@ -358,15 +783,16 @@ impl<T> Bucket<T> {
}
/// A raw hash table with an unsafe API.
-pub struct RawTable<T, A: Allocator + Clone = Global> {
- table: RawTableInner<A>,
+pub struct RawTable<T, A: Allocator = Global> {
+ table: RawTableInner,
+ alloc: A,
// Tell dropck that we own instances of T.
marker: PhantomData<T>,
}
/// Non-generic part of `RawTable` which allows functions to be instantiated only once regardless
/// of how many different key-value types are used.
-struct RawTableInner<A> {
+struct RawTableInner {
// Mask to get an index from a hash value. The value is one less than the
// number of buckets in the table.
bucket_mask: usize,
@@ -380,8 +806,6 @@ struct RawTableInner<A> {
// Number of elements in the table, only really used by len()
items: usize,
-
- alloc: A,
}
impl<T> RawTable<T, Global> {
@@ -393,7 +817,8 @@ impl<T> RawTable<T, Global> {
#[inline]
pub const fn new() -> Self {
Self {
- table: RawTableInner::new_in(Global),
+ table: RawTableInner::NEW,
+ alloc: Global,
marker: PhantomData,
}
}
@@ -412,7 +837,9 @@ impl<T> RawTable<T, Global> {
}
}
-impl<T, A: Allocator + Clone> RawTable<T, A> {
+impl<T, A: Allocator> RawTable<T, A> {
+ const TABLE_LAYOUT: TableLayout = TableLayout::new::<T>();
+
/// Creates a new empty hash table without allocating any memory, using the
/// given allocator.
///
@@ -420,9 +847,10 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// leave the data pointer dangling since that bucket is never written to
/// due to our load factor forcing us to always have at least 1 free bucket.
#[inline]
- pub fn new_in(alloc: A) -> Self {
+ pub const fn new_in(alloc: A) -> Self {
Self {
- table: RawTableInner::new_in(alloc),
+ table: RawTableInner::NEW,
+ alloc,
marker: PhantomData,
}
}
@@ -440,73 +868,97 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
Ok(Self {
table: RawTableInner::new_uninitialized(
- alloc,
- TableLayout::new::<T>(),
+ &alloc,
+ Self::TABLE_LAYOUT,
buckets,
fallibility,
)?,
+ alloc,
marker: PhantomData,
})
}
- /// Attempts to allocate a new hash table with at least enough capacity
- /// for inserting the given number of elements without reallocating.
- fn fallible_with_capacity(
- alloc: A,
- capacity: usize,
- fallibility: Fallibility,
- ) -> Result<Self, TryReserveError> {
+ /// Attempts to allocate a new hash table using the given allocator, with at least enough
+ /// capacity for inserting the given number of elements without reallocating.
+ #[cfg(feature = "raw")]
+ pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
Ok(Self {
table: RawTableInner::fallible_with_capacity(
- alloc,
- TableLayout::new::<T>(),
+ &alloc,
+ Self::TABLE_LAYOUT,
capacity,
- fallibility,
+ Fallibility::Fallible,
)?,
+ alloc,
marker: PhantomData,
})
}
- /// Attempts to allocate a new hash table using the given allocator, with at least enough
- /// capacity for inserting the given number of elements without reallocating.
- #[cfg(feature = "raw")]
- pub fn try_with_capacity_in(capacity: usize, alloc: A) -> Result<Self, TryReserveError> {
- Self::fallible_with_capacity(alloc, capacity, Fallibility::Fallible)
- }
-
/// Allocates a new hash table using the given allocator, with at least enough capacity for
/// inserting the given number of elements without reallocating.
pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
- // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
- match Self::fallible_with_capacity(alloc, capacity, Fallibility::Infallible) {
- Ok(capacity) => capacity,
- Err(_) => unsafe { hint::unreachable_unchecked() },
+ Self {
+ table: RawTableInner::with_capacity(&alloc, Self::TABLE_LAYOUT, capacity),
+ alloc,
+ marker: PhantomData,
}
}
/// Returns a reference to the underlying allocator.
#[inline]
pub fn allocator(&self) -> &A {
- &self.table.alloc
+ &self.alloc
}
- /// Deallocates the table without dropping any entries.
- #[cfg_attr(feature = "inline-more", inline)]
- unsafe fn free_buckets(&mut self) {
- self.table.free_buckets(TableLayout::new::<T>());
+ /// Returns pointer to one past last `data` element in the table as viewed from
+ /// the start point of the allocation.
+ ///
+ /// The caller must ensure that the `RawTable` outlives the returned [`NonNull<T>`],
+ /// otherwise using it may result in [`undefined behavior`].
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[inline]
+ pub fn data_end(&self) -> NonNull<T> {
+ // `self.table.ctrl.cast()` returns pointer that
+ // points here (to the end of `T0`)
+ // ∨
+ // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
+ // \________ ________/
+ // \/
+ // `n = buckets - 1`, i.e. `RawTable::buckets() - 1`
+ //
+ // where: T0...T_n - our stored data;
+ // CT0...CT_n - control bytes or metadata for `data`.
+ // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
+ // with loading `Group` bytes from the heap works properly, even if the result
+ // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
+ // `RawTableInner::set_ctrl` function.
+ //
+ // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
+ // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
+ self.table.ctrl.cast()
}
- /// Returns pointer to one past last element of data table.
+ /// Returns pointer to start of data table.
#[inline]
- pub unsafe fn data_end(&self) -> NonNull<T> {
- NonNull::new_unchecked(self.table.ctrl.as_ptr().cast())
+ #[cfg(any(feature = "raw", feature = "nightly"))]
+ pub unsafe fn data_start(&self) -> NonNull<T> {
+ NonNull::new_unchecked(self.data_end().as_ptr().wrapping_sub(self.buckets()))
}
- /// Returns pointer to start of data table.
+ /// Return the information about memory allocated by the table.
+ ///
+ /// `RawTable` allocates single memory block to store both data and metadata.
+ /// This function returns allocation size and alignment and the beginning of the area.
+ /// These are the arguments which will be passed to `dealloc` when the table is dropped.
+ ///
+ /// This function might be useful for memory profiling.
#[inline]
- #[cfg(feature = "nightly")]
- pub unsafe fn data_start(&self) -> *mut T {
- self.data_end().as_ptr().wrapping_sub(self.buckets())
+ #[cfg(feature = "raw")]
+ pub fn allocation_info(&self) -> (NonNull<u8>, Layout) {
+ // SAFETY: We use the same `table_layout` that was used to allocate
+ // this table.
+ unsafe { self.table.allocation_info_or_zero(Self::TABLE_LAYOUT) }
}
/// Returns the index of a bucket from a `Bucket`.
@@ -516,8 +968,55 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
/// Returns a pointer to an element in the table.
+ ///
+ /// The caller must ensure that the `RawTable` outlives the returned [`Bucket<T>`],
+ /// otherwise using it may result in [`undefined behavior`].
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the caller of this function must observe the
+ /// following safety rules:
+ ///
+ /// * The table must already be allocated;
+ ///
+ /// * The `index` must not be greater than the number returned by the [`RawTable::buckets`]
+ /// function, i.e. `(index + 1) <= self.buckets()`.
+ ///
+ /// It is safe to call this function with index of zero (`index == 0`) on a table that has
+ /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
+ ///
+ /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
+ /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
+ /// `(index + 1) <= self.buckets()`.
+ ///
+ /// [`RawTable::buckets`]: RawTable::buckets
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
pub unsafe fn bucket(&self, index: usize) -> Bucket<T> {
+ // If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
+ // (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
+ // the "buckets" number of our `RawTable`, i.e. "n = RawTable::buckets() - 1"):
+ //
+ // `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
+ // part of the `RawTable`, i.e. to the start of T3 (see `Bucket::as_ptr`)
+ // |
+ // | `base = self.data_end()` points here
+ // | (to the start of CT0 or to the end of T0)
+ // v v
+ // [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
+ // ^ \__________ __________/
+ // `table.bucket(3)` returns a pointer that points \/
+ // here in the `data` part of the `RawTable` (to additional control bytes
+ // the end of T3) `m = Group::WIDTH - 1`
+ //
+ // where: T0...T_n - our stored data;
+ // CT0...CT_n - control bytes or metadata for `data`;
+ // CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
+ // the heap works properly, even if the result of `h1(hash) & self.table.bucket_mask`
+ // is equal to `self.table.bucket_mask`). See also `RawTableInner::set_ctrl` function.
+ //
+ // P.S. `h1(hash) & self.table.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
+ // of buckets is a power of two, and `self.table.bucket_mask = self.buckets() - 1`.
debug_assert_ne!(self.table.bucket_mask, 0);
debug_assert!(index < self.buckets());
Bucket::from_base_index(self.data_end(), index)
@@ -525,8 +1024,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Erases an element from the table without dropping it.
#[cfg_attr(feature = "inline-more", inline)]
- #[deprecated(since = "0.8.1", note = "use erase or remove instead")]
- pub unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
+ unsafe fn erase_no_drop(&mut self, item: &Bucket<T>) {
let index = self.bucket_index(item);
self.table.erase(index);
}
@@ -534,7 +1032,6 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Erases an element from the table, dropping it in place.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
- #[allow(deprecated)]
pub unsafe fn erase(&mut self, item: Bucket<T>) {
// Erase the element from the table first since drop might panic.
self.erase_no_drop(&item);
@@ -558,12 +1055,18 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
/// Removes an element from the table, returning it.
+ ///
+ /// This also returns an `InsertSlot` pointing to the newly free bucket.
#[cfg_attr(feature = "inline-more", inline)]
#[allow(clippy::needless_pass_by_value)]
- #[allow(deprecated)]
- pub unsafe fn remove(&mut self, item: Bucket<T>) -> T {
+ pub unsafe fn remove(&mut self, item: Bucket<T>) -> (T, InsertSlot) {
self.erase_no_drop(&item);
- item.read()
+ (
+ item.read(),
+ InsertSlot {
+ index: self.bucket_index(&item),
+ },
+ )
}
/// Finds and removes an element from the table, returning it.
@@ -571,7 +1074,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
pub fn remove_entry(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<T> {
// Avoid `Option::map` because it bloats LLVM IR.
match self.find(hash, eq) {
- Some(bucket) => Some(unsafe { self.remove(bucket) }),
+ Some(bucket) => Some(unsafe { self.remove(bucket).0 }),
None => None,
}
}
@@ -585,18 +1088,17 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Removes all elements from the table without freeing the backing memory.
#[cfg_attr(feature = "inline-more", inline)]
pub fn clear(&mut self) {
+ if self.is_empty() {
+ // Special case empty table to avoid surprising O(capacity) time.
+ return;
+ }
// Ensure that the table is reset even if one of the drops panic
let mut self_ = guard(self, |self_| self_.clear_no_drop());
unsafe {
- self_.drop_elements();
- }
- }
-
- unsafe fn drop_elements(&mut self) {
- if mem::needs_drop::<T>() && !self.is_empty() {
- for item in self.iter() {
- item.drop();
- }
+ // SAFETY: ScopeGuard sets to zero the `items` field of the table
+ // even in case of panic during the dropping of the elements so
+ // that there will be no double drop of the elements.
+ self_.table.drop_elements::<T>();
}
}
@@ -607,7 +1109,16 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
// space for.
let min_size = usize::max(self.table.items, min_size);
if min_size == 0 {
- *self = Self::new_in(self.table.alloc.clone());
+ let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
+ unsafe {
+ // SAFETY:
+ // 1. We call the function only once;
+ // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
+ // and [`TableLayout`] that were used to allocate this table.
+ // 3. If any elements' drop function panics, then there will only be a memory leak,
+ // because we have replaced the inner table with a new one.
+ old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
+ }
return;
}
@@ -624,14 +1135,33 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
if min_buckets < self.buckets() {
// Fast path if the table is empty
if self.table.items == 0 {
- *self = Self::with_capacity_in(min_size, self.table.alloc.clone());
+ let new_inner =
+ RawTableInner::with_capacity(&self.alloc, Self::TABLE_LAYOUT, min_size);
+ let mut old_inner = mem::replace(&mut self.table, new_inner);
+ unsafe {
+ // SAFETY:
+ // 1. We call the function only once;
+ // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
+ // and [`TableLayout`] that were used to allocate this table.
+ // 3. If any elements' drop function panics, then there will only be a memory leak,
+ // because we have replaced the inner table with a new one.
+ old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
+ }
} else {
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
- if self
- .resize(min_size, hasher, Fallibility::Infallible)
- .is_err()
- {
- unsafe { hint::unreachable_unchecked() }
+ unsafe {
+ // SAFETY:
+ // 1. We know for sure that `min_size >= self.table.items`.
+ // 2. The [`RawTableInner`] must already have properly initialized control bytes since
+ // we will never expose RawTable::new_uninitialized in a public API.
+ if self
+ .resize(min_size, hasher, Fallibility::Infallible)
+ .is_err()
+ {
+ // SAFETY: The result of calling the `resize` function cannot be an error
+ // because `fallibility == Fallibility::Infallible.
+ hint::unreachable_unchecked()
+ }
}
}
}
@@ -641,13 +1171,18 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// without reallocation.
#[cfg_attr(feature = "inline-more", inline)]
pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
- if additional > self.table.growth_left {
+ if unlikely(additional > self.table.growth_left) {
// Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
- if self
- .reserve_rehash(additional, hasher, Fallibility::Infallible)
- .is_err()
- {
- unsafe { hint::unreachable_unchecked() }
+ unsafe {
+ // SAFETY: The [`RawTableInner`] must already have properly initialized control
+ // bytes since we will never expose RawTable::new_uninitialized in a public API.
+ if self
+ .reserve_rehash(additional, hasher, Fallibility::Infallible)
+ .is_err()
+ {
+ // SAFETY: All allocation errors will be caught inside `RawTableInner::reserve_rehash`.
+ hint::unreachable_unchecked()
+ }
}
}
}
@@ -661,28 +1196,45 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
hasher: impl Fn(&T) -> u64,
) -> Result<(), TryReserveError> {
if additional > self.table.growth_left {
- self.reserve_rehash(additional, hasher, Fallibility::Fallible)
+ // SAFETY: The [`RawTableInner`] must already have properly initialized control
+ // bytes since we will never expose RawTable::new_uninitialized in a public API.
+ unsafe { self.reserve_rehash(additional, hasher, Fallibility::Fallible) }
} else {
Ok(())
}
}
/// Out-of-line slow path for `reserve` and `try_reserve`.
+ ///
+ /// # Safety
+ ///
+ /// The [`RawTableInner`] must have properly initialized control bytes,
+ /// otherwise calling this function results in [`undefined behavior`]
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[cold]
#[inline(never)]
- fn reserve_rehash(
+ unsafe fn reserve_rehash(
&mut self,
additional: usize,
hasher: impl Fn(&T) -> u64,
fallibility: Fallibility,
) -> Result<(), TryReserveError> {
unsafe {
+ // SAFETY:
+ // 1. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
+ // [`TableLayout`] that were used to allocate this table.
+ // 2. The `drop` function is the actual drop function of the elements stored in
+ // the table.
+ // 3. The caller ensures that the control bytes of the `RawTableInner`
+ // are already initialized.
self.table.reserve_rehash_inner(
+ &self.alloc,
additional,
&|table, index| hasher(table.bucket::<T>(index).as_ref()),
fallibility,
- TableLayout::new::<T>(),
- if mem::needs_drop::<T>() {
+ Self::TABLE_LAYOUT,
+ if T::NEEDS_DROP {
Some(mem::transmute(ptr::drop_in_place::<T> as unsafe fn(*mut T)))
} else {
None
@@ -693,20 +1245,50 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// Allocates a new table of a different size and moves the contents of the
/// current table into it.
- fn resize(
+ ///
+ /// # Safety
+ ///
+ /// The [`RawTableInner`] must have properly initialized control bytes,
+ /// otherwise calling this function results in [`undefined behavior`]
+ ///
+ /// The caller of this function must ensure that `capacity >= self.table.items`
+ /// otherwise:
+ ///
+ /// * If `self.table.items != 0`, calling of this function with `capacity`
+ /// equal to 0 (`capacity == 0`) results in [`undefined behavior`].
+ ///
+ /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
+ /// `self.table.items > capacity_to_buckets(capacity)`
+ /// calling this function results in [`undefined behavior`].
+ ///
+ /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
+ /// `self.table.items > capacity_to_buckets(capacity)`
+ /// calling this function are never return (will go into an
+ /// infinite loop).
+ ///
+ /// See [`RawTableInner::find_insert_slot`] for more information.
+ ///
+ /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ unsafe fn resize(
&mut self,
capacity: usize,
hasher: impl Fn(&T) -> u64,
fallibility: Fallibility,
) -> Result<(), TryReserveError> {
- unsafe {
- self.table.resize_inner(
- capacity,
- &|table, index| hasher(table.bucket::<T>(index).as_ref()),
- fallibility,
- TableLayout::new::<T>(),
- )
- }
+ // SAFETY:
+ // 1. The caller of this function guarantees that `capacity >= self.table.items`.
+ // 2. We know for sure that `alloc` and `layout` matches the [`Allocator`] and
+ // [`TableLayout`] that were used to allocate this table.
+ // 3. The caller ensures that the control bytes of the `RawTableInner`
+ // are already initialized.
+ self.table.resize_inner(
+ &self.alloc,
+ capacity,
+ &|table, index| hasher(table.bucket::<T>(index).as_ref()),
+ fallibility,
+ Self::TABLE_LAYOUT,
+ )
}
/// Inserts a new element into the table, and returns its raw bucket.
@@ -715,22 +1297,27 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn insert(&mut self, hash: u64, value: T, hasher: impl Fn(&T) -> u64) -> Bucket<T> {
unsafe {
- let mut index = self.table.find_insert_slot(hash);
+ // SAFETY:
+ // 1. The [`RawTableInner`] must already have properly initialized control bytes since
+ // we will never expose `RawTable::new_uninitialized` in a public API.
+ //
+ // 2. We reserve additional space (if necessary) right after calling this function.
+ let mut slot = self.table.find_insert_slot(hash);
- // We can avoid growing the table once we have reached our load
- // factor if we are replacing a tombstone. This works since the
- // number of EMPTY slots does not change in this case.
- let old_ctrl = *self.table.ctrl(index);
+ // We can avoid growing the table once we have reached our load factor if we are replacing
+ // a tombstone. This works since the number of EMPTY slots does not change in this case.
+ //
+ // SAFETY: The function is guaranteed to return [`InsertSlot`] that contains an index
+ // in the range `0..=self.buckets()`.
+ let old_ctrl = *self.table.ctrl(slot.index);
if unlikely(self.table.growth_left == 0 && special_is_empty(old_ctrl)) {
self.reserve(1, hasher);
- index = self.table.find_insert_slot(hash);
+ // SAFETY: We know for sure that `RawTableInner` has control bytes
+ // initialized and that there is extra space in the table.
+ slot = self.table.find_insert_slot(hash);
}
- self.table.record_item_insert_at(index, old_ctrl, hash);
-
- let bucket = self.bucket(index);
- bucket.write(value);
- bucket
+ self.insert_in_slot(hash, slot, value)
}
}
@@ -796,9 +1383,9 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
{
let index = self.bucket_index(&bucket);
let old_ctrl = *self.table.ctrl(index);
- debug_assert!(is_full(old_ctrl));
+ debug_assert!(self.is_bucket_full(index));
let old_growth_left = self.table.growth_left;
- let item = self.remove(bucket);
+ let item = self.remove(bucket).0;
if let Some(new_item) = f(item) {
self.table.growth_left = old_growth_left;
self.table.set_ctrl(index, old_ctrl);
@@ -810,17 +1397,78 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
}
+ /// Searches for an element in the table. If the element is not found,
+ /// returns `Err` with the position of a slot where an element with the
+ /// same hash could be inserted.
+ ///
+ /// This function may resize the table if additional space is required for
+ /// inserting an element.
+ #[inline]
+ pub fn find_or_find_insert_slot(
+ &mut self,
+ hash: u64,
+ mut eq: impl FnMut(&T) -> bool,
+ hasher: impl Fn(&T) -> u64,
+ ) -> Result<Bucket<T>, InsertSlot> {
+ self.reserve(1, hasher);
+
+ unsafe {
+ // SAFETY:
+ // 1. We know for sure that there is at least one empty `bucket` in the table.
+ // 2. The [`RawTableInner`] must already have properly initialized control bytes since we will
+ // never expose `RawTable::new_uninitialized` in a public API.
+ // 3. The `find_or_find_insert_slot_inner` function returns the `index` of only the full bucket,
+ // which is in the range `0..self.buckets()` (since there is at least one empty `bucket` in
+ // the table), so calling `self.bucket(index)` and `Bucket::as_ref` is safe.
+ match self
+ .table
+ .find_or_find_insert_slot_inner(hash, &mut |index| eq(self.bucket(index).as_ref()))
+ {
+ // SAFETY: See explanation above.
+ Ok(index) => Ok(self.bucket(index)),
+ Err(slot) => Err(slot),
+ }
+ }
+ }
+
+ /// Inserts a new element into the table in the given slot, and returns its
+ /// raw bucket.
+ ///
+ /// # Safety
+ ///
+ /// `slot` must point to a slot previously returned by
+ /// `find_or_find_insert_slot`, and no mutation of the table must have
+ /// occurred since that call.
+ #[inline]
+ pub unsafe fn insert_in_slot(&mut self, hash: u64, slot: InsertSlot, value: T) -> Bucket<T> {
+ let old_ctrl = *self.table.ctrl(slot.index);
+ self.table.record_item_insert_at(slot.index, old_ctrl, hash);
+
+ let bucket = self.bucket(slot.index);
+ bucket.write(value);
+ bucket
+ }
+
/// Searches for an element in the table.
#[inline]
pub fn find(&self, hash: u64, mut eq: impl FnMut(&T) -> bool) -> Option<Bucket<T>> {
- let result = self.table.find_inner(hash, &mut |index| unsafe {
- eq(self.bucket(index).as_ref())
- });
-
- // Avoid `Option::map` because it bloats LLVM IR.
- match result {
- Some(index) => Some(unsafe { self.bucket(index) }),
- None => None,
+ unsafe {
+ // SAFETY:
+ // 1. The [`RawTableInner`] must already have properly initialized control bytes since we
+ // will never expose `RawTable::new_uninitialized` in a public API.
+ // 1. The `find_inner` function returns the `index` of only the full bucket, which is in
+ // the range `0..self.buckets()`, so calling `self.bucket(index)` and `Bucket::as_ref`
+ // is safe.
+ let result = self
+ .table
+ .find_inner(hash, &mut |index| eq(self.bucket(index).as_ref()));
+
+ // Avoid `Option::map` because it bloats LLVM IR.
+ match result {
+ // SAFETY: See explanation above.
+ Some(index) => Some(self.bucket(index)),
+ None => None,
+ }
}
}
@@ -928,17 +1576,27 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
self.table.bucket_mask + 1
}
+ /// Checks whether the bucket at `index` is full.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure `index` is less than the number of buckets.
+ #[inline]
+ pub unsafe fn is_bucket_full(&self, index: usize) -> bool {
+ self.table.is_bucket_full(index)
+ }
+
/// Returns an iterator over every element in the table. It is up to
/// the caller to ensure that the `RawTable` outlives the `RawIter`.
/// Because we cannot make the `next` method unsafe on the `RawIter`
/// struct, we have to make the `iter` method unsafe.
#[inline]
pub unsafe fn iter(&self) -> RawIter<T> {
- let data = Bucket::from_base_index(self.data_end(), 0);
- RawIter {
- iter: RawIterRange::new(self.table.ctrl.as_ptr(), data, self.table.buckets()),
- items: self.table.items,
- }
+ // SAFETY:
+ // 1. The caller must uphold the safety contract for `iter` method.
+ // 2. The [`RawTableInner`] must already have properly initialized control bytes since
+ // we will never expose RawTable::new_uninitialized in a public API.
+ self.table.iter()
}
/// Returns an iterator over occupied buckets that could match a given hash.
@@ -952,7 +1610,7 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
/// `RawIterHash` struct, we have to make the `iter_hash` method unsafe.
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "raw")]
- pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<'_, T, A> {
+ pub unsafe fn iter_hash(&self, hash: u64) -> RawIterHash<T> {
RawIterHash::new(self, hash)
}
@@ -978,8 +1636,8 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
debug_assert_eq!(iter.len(), self.len());
RawDrain {
iter,
- table: ManuallyDrop::new(mem::replace(self, Self::new_in(self.table.alloc.clone()))),
- orig_table: NonNull::from(self),
+ table: mem::replace(&mut self.table, RawTableInner::NEW),
+ orig_table: NonNull::from(&mut self.table),
marker: PhantomData,
}
}
@@ -993,31 +1651,31 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
pub unsafe fn into_iter_from(self, iter: RawIter<T>) -> RawIntoIter<T, A> {
debug_assert_eq!(iter.len(), self.len());
- let alloc = self.table.alloc.clone();
let allocation = self.into_allocation();
RawIntoIter {
iter,
allocation,
marker: PhantomData,
- alloc,
}
}
/// Converts the table into a raw allocation. The contents of the table
/// should be dropped using a `RawIter` before freeing the allocation.
#[cfg_attr(feature = "inline-more", inline)]
- pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout)> {
+ pub(crate) fn into_allocation(self) -> Option<(NonNull<u8>, Layout, A)> {
let alloc = if self.table.is_empty_singleton() {
None
} else {
// Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
- let (layout, ctrl_offset) = match calculate_layout::<T>(self.table.buckets()) {
- Some(lco) => lco,
- None => unsafe { hint::unreachable_unchecked() },
- };
+ let (layout, ctrl_offset) =
+ match Self::TABLE_LAYOUT.calculate_layout_for(self.table.buckets()) {
+ Some(lco) => lco,
+ None => unsafe { hint::unreachable_unchecked() },
+ };
Some((
unsafe { NonNull::new_unchecked(self.table.ctrl.as_ptr().sub(ctrl_offset)) },
layout,
+ unsafe { ptr::read(&self.alloc) },
))
};
mem::forget(self);
@@ -1025,41 +1683,62 @@ impl<T, A: Allocator + Clone> RawTable<T, A> {
}
}
-unsafe impl<T, A: Allocator + Clone> Send for RawTable<T, A>
+unsafe impl<T, A: Allocator> Send for RawTable<T, A>
where
T: Send,
A: Send,
{
}
-unsafe impl<T, A: Allocator + Clone> Sync for RawTable<T, A>
+unsafe impl<T, A: Allocator> Sync for RawTable<T, A>
where
T: Sync,
A: Sync,
{
}
-impl<A> RawTableInner<A> {
+impl RawTableInner {
+ const NEW: Self = RawTableInner::new();
+
+ /// Creates a new empty hash table without allocating any memory.
+ ///
+ /// In effect this returns a table with exactly 1 bucket. However we can
+ /// leave the data pointer dangling since that bucket is never accessed
+ /// due to our load factor forcing us to always have at least 1 free bucket.
#[inline]
- const fn new_in(alloc: A) -> Self {
+ const fn new() -> Self {
Self {
// Be careful to cast the entire slice to a raw pointer.
ctrl: unsafe { NonNull::new_unchecked(Group::static_empty() as *const _ as *mut u8) },
bucket_mask: 0,
items: 0,
growth_left: 0,
- alloc,
}
}
}
-impl<A: Allocator + Clone> RawTableInner<A> {
+impl RawTableInner {
+ /// Allocates a new [`RawTableInner`] with the given number of buckets.
+ /// The control bytes and buckets are left uninitialized.
+ ///
+ /// # Safety
+ ///
+ /// The caller of this function must ensure that the `buckets` is power of two
+ /// and also initialize all control bytes of the length `self.bucket_mask + 1 +
+ /// Group::WIDTH` with the [`EMPTY`] bytes.
+ ///
+ /// See also [`Allocator`] API for other safety concerns.
+ ///
+ /// [`Allocator`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html
#[cfg_attr(feature = "inline-more", inline)]
- unsafe fn new_uninitialized(
- alloc: A,
+ unsafe fn new_uninitialized<A>(
+ alloc: &A,
table_layout: TableLayout,
buckets: usize,
fallibility: Fallibility,
- ) -> Result<Self, TryReserveError> {
+ ) -> Result<Self, TryReserveError>
+ where
+ A: Allocator,
+ {
debug_assert!(buckets.is_power_of_two());
// Avoid `Option::ok_or_else` because it bloats LLVM IR.
@@ -1068,45 +1747,48 @@ impl<A: Allocator + Clone> RawTableInner<A> {
None => return Err(fallibility.capacity_overflow()),
};
- // We need an additional check to ensure that the allocation doesn't
- // exceed `isize::MAX`. We can skip this check on 64-bit systems since
- // such allocations will never succeed anyways.
- //
- // This mirrors what Vec does in the standard library.
- if mem::size_of::<usize>() < 8 && layout.size() > isize::MAX as usize {
- return Err(fallibility.capacity_overflow());
- }
-
- let ptr: NonNull<u8> = match do_alloc(&alloc, layout) {
+ let ptr: NonNull<u8> = match do_alloc(alloc, layout) {
Ok(block) => block.cast(),
Err(_) => return Err(fallibility.alloc_err(layout)),
};
+ // SAFETY: null pointer will be caught in above check
let ctrl = NonNull::new_unchecked(ptr.as_ptr().add(ctrl_offset));
Ok(Self {
ctrl,
bucket_mask: buckets - 1,
items: 0,
growth_left: bucket_mask_to_capacity(buckets - 1),
- alloc,
})
}
+ /// Attempts to allocate a new [`RawTableInner`] with at least enough
+ /// capacity for inserting the given number of elements without reallocating.
+ ///
+ /// All the control bytes are initialized with the [`EMPTY`] bytes.
#[inline]
- fn fallible_with_capacity(
- alloc: A,
+ fn fallible_with_capacity<A>(
+ alloc: &A,
table_layout: TableLayout,
capacity: usize,
fallibility: Fallibility,
- ) -> Result<Self, TryReserveError> {
+ ) -> Result<Self, TryReserveError>
+ where
+ A: Allocator,
+ {
if capacity == 0 {
- Ok(Self::new_in(alloc))
+ Ok(Self::NEW)
} else {
+ // SAFETY: We checked that we could successfully allocate the new table, and then
+ // initialized all control bytes with the constant `EMPTY` byte.
unsafe {
let buckets =
capacity_to_buckets(capacity).ok_or_else(|| fallibility.capacity_overflow())?;
let result = Self::new_uninitialized(alloc, table_layout, buckets, fallibility)?;
+ // SAFETY: We checked that the table is allocated and therefore the table already has
+ // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
+ // so writing `self.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
result.ctrl(0).write_bytes(EMPTY, result.num_ctrl_bytes());
Ok(result)
@@ -1114,66 +1796,397 @@ impl<A: Allocator + Clone> RawTableInner<A> {
}
}
- /// Searches for an empty or deleted bucket which is suitable for inserting
- /// a new element and sets the hash for that slot.
+ /// Allocates a new [`RawTableInner`] with at least enough capacity for inserting
+ /// the given number of elements without reallocating.
+ ///
+ /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
+ /// in case of allocation error. Use [`fallible_with_capacity`] instead if you want to
+ /// handle memory allocation failure.
+ ///
+ /// All the control bytes are initialized with the [`EMPTY`] bytes.
+ ///
+ /// [`fallible_with_capacity`]: RawTableInner::fallible_with_capacity
+ /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
+ fn with_capacity<A>(alloc: &A, table_layout: TableLayout, capacity: usize) -> Self
+ where
+ A: Allocator,
+ {
+ // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
+ match Self::fallible_with_capacity(alloc, table_layout, capacity, Fallibility::Infallible) {
+ Ok(table_inner) => table_inner,
+ // SAFETY: All allocation errors will be caught inside `RawTableInner::new_uninitialized`.
+ Err(_) => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+
+ /// Fixes up an insertion slot returned by the [`RawTableInner::find_insert_slot_in_group`] method.
+ ///
+ /// In tables smaller than the group width (`self.buckets() < Group::WIDTH`), trailing control
+ /// bytes outside the range of the table are filled with [`EMPTY`] entries. These will unfortunately
+ /// trigger a match of [`RawTableInner::find_insert_slot_in_group`] function. This is because
+ /// the `Some(bit)` returned by `group.match_empty_or_deleted().lowest_set_bit()` after masking
+ /// (`(probe_seq.pos + bit) & self.bucket_mask`) may point to a full bucket that is already occupied.
+ /// We detect this situation here and perform a second scan starting at the beginning of the table.
+ /// This second scan is guaranteed to find an empty slot (due to the load factor) before hitting the
+ /// trailing control bytes (containing [`EMPTY`] bytes).
+ ///
+ /// If this function is called correctly, it is guaranteed to return [`InsertSlot`] with an
+ /// index of an empty or deleted bucket in the range `0..self.buckets()` (see `Warning` and
+ /// `Safety`).
+ ///
+ /// # Warning
+ ///
+ /// The table must have at least 1 empty or deleted `bucket`, otherwise if the table is less than
+ /// the group width (`self.buckets() < Group::WIDTH`) this function returns an index outside of the
+ /// table indices range `0..self.buckets()` (`0..=self.bucket_mask`). Attempt to write data at that
+ /// index will cause immediate [`undefined behavior`].
+ ///
+ /// # Safety
+ ///
+ /// The safety rules are directly derived from the safety rules for [`RawTableInner::ctrl`] method.
+ /// Thus, in order to uphold those safety contracts, as well as for the correct logic of the work
+ /// of this crate, the following rules are necessary and sufficient:
///
- /// There must be at least 1 empty bucket in the table.
+ /// * The [`RawTableInner`] must have properly initialized control bytes otherwise calling this
+ /// function results in [`undefined behavior`].
+ ///
+ /// * This function must only be used on insertion slots found by [`RawTableInner::find_insert_slot_in_group`]
+ /// (after the `find_insert_slot_in_group` function, but before insertion into the table).
+ ///
+ /// * The `index` must not be greater than the `self.bucket_mask`, i.e. `(index + 1) <= self.buckets()`
+ /// (this one is provided by the [`RawTableInner::find_insert_slot_in_group`] function).
+ ///
+ /// Calling this function with an index not provided by [`RawTableInner::find_insert_slot_in_group`]
+ /// may result in [`undefined behavior`] even if the index satisfies the safety rules of the
+ /// [`RawTableInner::ctrl`] function (`index < self.bucket_mask + 1 + Group::WIDTH`).
+ ///
+ /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
+ /// [`RawTableInner::find_insert_slot_in_group`]: RawTableInner::find_insert_slot_in_group
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- unsafe fn prepare_insert_slot(&self, hash: u64) -> (usize, u8) {
- let index = self.find_insert_slot(hash);
+ unsafe fn fix_insert_slot(&self, mut index: usize) -> InsertSlot {
+ // SAFETY: The caller of this function ensures that `index` is in the range `0..=self.bucket_mask`.
+ if unlikely(self.is_bucket_full(index)) {
+ debug_assert!(self.bucket_mask < Group::WIDTH);
+ // SAFETY:
+ //
+ // * Since the caller of this function ensures that the control bytes are properly
+ // initialized and `ptr = self.ctrl(0)` points to the start of the array of control
+ // bytes, therefore: `ctrl` is valid for reads, properly aligned to `Group::WIDTH`
+ // and points to the properly initialized control bytes (see also
+ // `TableLayout::calculate_layout_for` and `ptr::read`);
+ //
+ // * Because the caller of this function ensures that the index was provided by the
+ // `self.find_insert_slot_in_group()` function, so for for tables larger than the
+ // group width (self.buckets() >= Group::WIDTH), we will never end up in the given
+ // branch, since `(probe_seq.pos + bit) & self.bucket_mask` in `find_insert_slot_in_group`
+ // cannot return a full bucket index. For tables smaller than the group width, calling
+ // the `unwrap_unchecked` function is also safe, as the trailing control bytes outside
+ // the range of the table are filled with EMPTY bytes (and we know for sure that there
+ // is at least one FULL bucket), so this second scan either finds an empty slot (due to
+ // the load factor) or hits the trailing control bytes (containing EMPTY).
+ index = Group::load_aligned(self.ctrl(0))
+ .match_empty_or_deleted()
+ .lowest_set_bit()
+ .unwrap_unchecked();
+ }
+ InsertSlot { index }
+ }
+
+ /// Finds the position to insert something in a group.
+ ///
+ /// **This may have false positives and must be fixed up with `fix_insert_slot`
+ /// before it's used.**
+ ///
+ /// The function is guaranteed to return the index of an empty or deleted [`Bucket`]
+ /// in the range `0..self.buckets()` (`0..=self.bucket_mask`).
+ #[inline]
+ fn find_insert_slot_in_group(&self, group: &Group, probe_seq: &ProbeSeq) -> Option<usize> {
+ let bit = group.match_empty_or_deleted().lowest_set_bit();
+
+ if likely(bit.is_some()) {
+ // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
+ // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
+ Some((probe_seq.pos + bit.unwrap()) & self.bucket_mask)
+ } else {
+ None
+ }
+ }
+
+ /// Searches for an element in the table, or a potential slot where that element could
+ /// be inserted (an empty or deleted [`Bucket`] index).
+ ///
+ /// This uses dynamic dispatch to reduce the amount of code generated, but that is
+ /// eliminated by LLVM optimizations.
+ ///
+ /// This function does not make any changes to the `data` part of the table, or any
+ /// changes to the `items` or `growth_left` field of the table.
+ ///
+ /// The table must have at least 1 empty or deleted `bucket`, otherwise, if the
+ /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`, this function
+ /// will never return (will go into an infinite loop) for tables larger than the group
+ /// width, or return an index outside of the table indices range if the table is less
+ /// than the group width.
+ ///
+ /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
+ /// function with only `FULL` buckets' indices and return the `index` of the found
+ /// element (as `Ok(index)`). If the element is not found and there is at least 1
+ /// empty or deleted [`Bucket`] in the table, the function is guaranteed to return
+ /// [InsertSlot] with an index in the range `0..self.buckets()`, but in any case,
+ /// if this function returns [`InsertSlot`], it will contain an index in the range
+ /// `0..=self.buckets()`.
+ ///
+ /// # Safety
+ ///
+ /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
+ /// this function results in [`undefined behavior`].
+ ///
+ /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
+ /// less than the group width and if there was not at least one empty or deleted bucket in
+ /// the table will cause immediate [`undefined behavior`]. This is because in this case the
+ /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY]
+ /// control bytes outside the table range.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[inline]
+ unsafe fn find_or_find_insert_slot_inner(
+ &self,
+ hash: u64,
+ eq: &mut dyn FnMut(usize) -> bool,
+ ) -> Result<usize, InsertSlot> {
+ let mut insert_slot = None;
+
+ let h2_hash = h2(hash);
+ let mut probe_seq = self.probe_seq(hash);
+
+ loop {
+ // SAFETY:
+ // * Caller of this function ensures that the control bytes are properly initialized.
+ //
+ // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
+ // of the table due to masking with `self.bucket_mask` and also because mumber of
+ // buckets is a power of two (see `self.probe_seq` function).
+ //
+ // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
+ // call `Group::load` due to the extended control bytes range, which is
+ // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
+ // byte will never be read for the allocated table);
+ //
+ // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
+ // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
+ // bytes, which is safe (see RawTableInner::new).
+ let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
+
+ for bit in group.match_byte(h2_hash) {
+ let index = (probe_seq.pos + bit) & self.bucket_mask;
+
+ if likely(eq(index)) {
+ return Ok(index);
+ }
+ }
+
+ // We didn't find the element we were looking for in the group, try to get an
+ // insertion slot from the group if we don't have one yet.
+ if likely(insert_slot.is_none()) {
+ insert_slot = self.find_insert_slot_in_group(&group, &probe_seq);
+ }
+
+ // Only stop the search if the group contains at least one empty element.
+ // Otherwise, the element that we are looking for might be in a following group.
+ if likely(group.match_empty().any_bit_set()) {
+ // We must have found a insert slot by now, since the current group contains at
+ // least one. For tables smaller than the group width, there will still be an
+ // empty element in the current (and only) group due to the load factor.
+ unsafe {
+ // SAFETY:
+ // * Caller of this function ensures that the control bytes are properly initialized.
+ //
+ // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
+ return Err(self.fix_insert_slot(insert_slot.unwrap_unchecked()));
+ }
+ }
+
+ probe_seq.move_next(self.bucket_mask);
+ }
+ }
+
+ /// Searches for an empty or deleted bucket which is suitable for inserting a new
+ /// element and sets the hash for that slot. Returns an index of that slot and the
+ /// old control byte stored in the found index.
+ ///
+ /// This function does not check if the given element exists in the table. Also,
+ /// this function does not check if there is enough space in the table to insert
+ /// a new element. Caller of the funtion must make ensure that the table has at
+ /// least 1 empty or deleted `bucket`, otherwise this function will never return
+ /// (will go into an infinite loop) for tables larger than the group width, or
+ /// return an index outside of the table indices range if the table is less than
+ /// the group width.
+ ///
+ /// If there is at least 1 empty or deleted `bucket` in the table, the function is
+ /// guaranteed to return an `index` in the range `0..self.buckets()`, but in any case,
+ /// if this function returns an `index` it will be in the range `0..=self.buckets()`.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// The safety rules are directly derived from the safety rules for the
+ /// [`RawTableInner::set_ctrl_h2`] and [`RawTableInner::find_insert_slot`] methods.
+ /// Thus, in order to uphold the safety contracts for that methods, as well as for
+ /// the correct logic of the work of this crate, you must observe the following rules
+ /// when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated and has properly initialized
+ /// control bytes otherwise calling this function results in [`undefined behavior`].
+ ///
+ /// * The caller of this function must ensure that the "data" parts of the table
+ /// will have an entry in the returned index (matching the given hash) right
+ /// after calling this function.
+ ///
+ /// Attempt to write data at the `index` returned by this function when the table is
+ /// less than the group width and if there was not at least one empty or deleted bucket in
+ /// the table will cause immediate [`undefined behavior`]. This is because in this case the
+ /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY]
+ /// control bytes outside the table range.
+ ///
+ /// The caller must independently increase the `items` field of the table, and also,
+ /// if the old control byte was [`EMPTY`], then decrease the table's `growth_left`
+ /// field, and do not change it if the old control byte was [`DELETED`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ /// [`RawTableInner::ctrl`]: RawTableInner::ctrl
+ /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
+ /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
+ #[inline]
+ unsafe fn prepare_insert_slot(&mut self, hash: u64) -> (usize, u8) {
+ // SAFETY: Caller of this function ensures that the control bytes are properly initialized.
+ let index: usize = self.find_insert_slot(hash).index;
+ // SAFETY:
+ // 1. The `find_insert_slot` function either returns an `index` less than or
+ // equal to `self.buckets() = self.bucket_mask + 1` of the table, or never
+ // returns if it cannot find an empty or deleted slot.
+ // 2. The caller of this function guarantees that the table has already been
+ // allocated
let old_ctrl = *self.ctrl(index);
self.set_ctrl_h2(index, hash);
(index, old_ctrl)
}
/// Searches for an empty or deleted bucket which is suitable for inserting
- /// a new element.
+ /// a new element, returning the `index` for the new [`Bucket`].
+ ///
+ /// This function does not make any changes to the `data` part of the table, or any
+ /// changes to the `items` or `growth_left` field of the table.
+ ///
+ /// The table must have at least 1 empty or deleted `bucket`, otherwise this function
+ /// will never return (will go into an infinite loop) for tables larger than the group
+ /// width, or return an index outside of the table indices range if the table is less
+ /// than the group width.
+ ///
+ /// If there is at least 1 empty or deleted `bucket` in the table, the function is
+ /// guaranteed to return [`InsertSlot`] with an index in the range `0..self.buckets()`,
+ /// but in any case, if this function returns [`InsertSlot`], it will contain an index
+ /// in the range `0..=self.buckets()`.
///
- /// There must be at least 1 empty bucket in the table.
+ /// # Safety
+ ///
+ /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
+ /// this function results in [`undefined behavior`].
+ ///
+ /// Attempt to write data at the [`InsertSlot`] returned by this function when the table is
+ /// less than the group width and if there was not at least one empty or deleted bucket in
+ /// the table will cause immediate [`undefined behavior`]. This is because in this case the
+ /// function will return `self.bucket_mask + 1` as an index due to the trailing [`EMPTY]
+ /// control bytes outside the table range.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- fn find_insert_slot(&self, hash: u64) -> usize {
+ unsafe fn find_insert_slot(&self, hash: u64) -> InsertSlot {
let mut probe_seq = self.probe_seq(hash);
loop {
- unsafe {
- let group = Group::load(self.ctrl(probe_seq.pos));
- if let Some(bit) = group.match_empty_or_deleted().lowest_set_bit() {
- let result = (probe_seq.pos + bit) & self.bucket_mask;
-
- // In tables smaller than the group width, trailing control
- // bytes outside the range of the table are filled with
- // EMPTY entries. These will unfortunately trigger a
- // match, but once masked may point to a full bucket that
- // is already occupied. We detect this situation here and
- // perform a second scan starting at the beginning of the
- // table. This second scan is guaranteed to find an empty
- // slot (due to the load factor) before hitting the trailing
- // control bytes (containing EMPTY).
- if unlikely(is_full(*self.ctrl(result))) {
- debug_assert!(self.bucket_mask < Group::WIDTH);
- debug_assert_ne!(probe_seq.pos, 0);
- return Group::load_aligned(self.ctrl(0))
- .match_empty_or_deleted()
- .lowest_set_bit_nonzero();
- }
+ // SAFETY:
+ // * Caller of this function ensures that the control bytes are properly initialized.
+ //
+ // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
+ // of the table due to masking with `self.bucket_mask` and also because mumber of
+ // buckets is a power of two (see `self.probe_seq` function).
+ //
+ // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
+ // call `Group::load` due to the extended control bytes range, which is
+ // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
+ // byte will never be read for the allocated table);
+ //
+ // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
+ // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
+ // bytes, which is safe (see RawTableInner::new).
+ let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
- return result;
+ let index = self.find_insert_slot_in_group(&group, &probe_seq);
+ if likely(index.is_some()) {
+ // SAFETY:
+ // * Caller of this function ensures that the control bytes are properly initialized.
+ //
+ // * We use this function with the slot / index found by `self.find_insert_slot_in_group`
+ unsafe {
+ return self.fix_insert_slot(index.unwrap_unchecked());
}
}
probe_seq.move_next(self.bucket_mask);
}
}
- /// Searches for an element in the table. This uses dynamic dispatch to reduce the amount of
- /// code generated, but it is eliminated by LLVM optimizations.
- #[inline]
- fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
+ /// Searches for an element in a table, returning the `index` of the found element.
+ /// This uses dynamic dispatch to reduce the amount of code generated, but it is
+ /// eliminated by LLVM optimizations.
+ ///
+ /// This function does not make any changes to the `data` part of the table, or any
+ /// changes to the `items` or `growth_left` field of the table.
+ ///
+ /// The table must have at least 1 empty `bucket`, otherwise, if the
+ /// `eq: &mut dyn FnMut(usize) -> bool` function does not return `true`,
+ /// this function will also never return (will go into an infinite loop).
+ ///
+ /// This function is guaranteed to provide the `eq: &mut dyn FnMut(usize) -> bool`
+ /// function with only `FULL` buckets' indices and return the `index` of the found
+ /// element as `Some(index)`, so the index will always be in the range
+ /// `0..self.buckets()`.
+ ///
+ /// # Safety
+ ///
+ /// The [`RawTableInner`] must have properly initialized control bytes otherwise calling
+ /// this function results in [`undefined behavior`].
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[inline(always)]
+ unsafe fn find_inner(&self, hash: u64, eq: &mut dyn FnMut(usize) -> bool) -> Option<usize> {
let h2_hash = h2(hash);
let mut probe_seq = self.probe_seq(hash);
loop {
+ // SAFETY:
+ // * Caller of this function ensures that the control bytes are properly initialized.
+ //
+ // * `ProbeSeq.pos` cannot be greater than `self.bucket_mask = self.buckets() - 1`
+ // of the table due to masking with `self.bucket_mask`.
+ //
+ // * Even if `ProbeSeq.pos` returns `position == self.bucket_mask`, it is safe to
+ // call `Group::load` due to the extended control bytes range, which is
+ // `self.bucket_mask + 1 + Group::WIDTH` (in fact, this means that the last control
+ // byte will never be read for the allocated table);
+ //
+ // * Also, even if `RawTableInner` is not already allocated, `ProbeSeq.pos` will
+ // always return "0" (zero), so Group::load will read unaligned `Group::static_empty()`
+ // bytes, which is safe (see RawTableInner::new_in).
let group = unsafe { Group::load(self.ctrl(probe_seq.pos)) };
for bit in group.match_byte(h2_hash) {
+ // This is the same as `(probe_seq.pos + bit) % self.buckets()` because the number
+ // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
let index = (probe_seq.pos + bit) & self.bucket_mask;
if likely(eq(index)) {
@@ -1189,12 +2202,52 @@ impl<A: Allocator + Clone> RawTableInner<A> {
}
}
+ /// Prepares for rehashing data in place (that is, without allocating new memory).
+ /// Converts all full index `control bytes` to `DELETED` and all `DELETED` control
+ /// bytes to `EMPTY`, i.e. performs the following conversion:
+ ///
+ /// - `EMPTY` control bytes -> `EMPTY`;
+ /// - `DELETED` control bytes -> `EMPTY`;
+ /// - `FULL` control bytes -> `DELETED`.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// You must observe the following safety rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The caller of this function must convert the `DELETED` bytes back to `FULL`
+ /// bytes when re-inserting them into their ideal position (which was impossible
+ /// to do during the first insert due to tombstones). If the caller does not do
+ /// this, then calling this function may result in a memory leak.
+ ///
+ /// * The [`RawTableInner`] must have properly initialized control bytes otherwise
+ /// calling this function results in [`undefined behavior`].
+ ///
+ /// Calling this function on a table that has not been allocated results in
+ /// [`undefined behavior`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[allow(clippy::mut_mut)]
#[inline]
unsafe fn prepare_rehash_in_place(&mut self) {
- // Bulk convert all full control bytes to DELETED, and all DELETED
- // control bytes to EMPTY. This effectively frees up all buckets
- // containing a DELETED entry.
+ // Bulk convert all full control bytes to DELETED, and all DELETED control bytes to EMPTY.
+ // This effectively frees up all buckets containing a DELETED entry.
+ //
+ // SAFETY:
+ // 1. `i` is guaranteed to be within bounds since we are iterating from zero to `buckets - 1`;
+ // 2. Even if `i` will be `i == self.bucket_mask`, it is safe to call `Group::load_aligned`
+ // due to the extended control bytes range, which is `self.bucket_mask + 1 + Group::WIDTH`;
+ // 3. The caller of this function guarantees that [`RawTableInner`] has already been allocated;
+ // 4. We can use `Group::load_aligned` and `Group::store_aligned` here since we start from 0
+ // and go to the end with a step equal to `Group::WIDTH` (see TableLayout::calculate_layout_for).
for i in (0..self.buckets()).step_by(Group::WIDTH) {
let group = Group::load_aligned(self.ctrl(i));
let group = group.convert_special_to_empty_and_full_to_deleted();
@@ -1203,15 +2256,245 @@ impl<A: Allocator + Clone> RawTableInner<A> {
// Fix up the trailing control bytes. See the comments in set_ctrl
// for the handling of tables smaller than the group width.
- if self.buckets() < Group::WIDTH {
+ //
+ // SAFETY: The caller of this function guarantees that [`RawTableInner`]
+ // has already been allocated
+ if unlikely(self.buckets() < Group::WIDTH) {
+ // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
+ // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
+ // `Group::WIDTH` is safe
self.ctrl(0)
.copy_to(self.ctrl(Group::WIDTH), self.buckets());
} else {
+ // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
+ // control bytes,so copying `Group::WIDTH` bytes with offset equal
+ // to `self.buckets() == self.bucket_mask + 1` is safe
self.ctrl(0)
.copy_to(self.ctrl(self.buckets()), Group::WIDTH);
}
}
+ /// Returns an iterator over every element in the table.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result
+ /// is [`undefined behavior`]:
+ ///
+ /// * The caller has to ensure that the `RawTableInner` outlives the
+ /// `RawIter`. Because we cannot make the `next` method unsafe on
+ /// the `RawIter` struct, we have to make the `iter` method unsafe.
+ ///
+ /// * The [`RawTableInner`] must have properly initialized control bytes.
+ ///
+ /// The type `T` must be the actual type of the elements stored in the table,
+ /// otherwise using the returned [`RawIter`] results in [`undefined behavior`].
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[inline]
+ unsafe fn iter<T>(&self) -> RawIter<T> {
+ // SAFETY:
+ // 1. Since the caller of this function ensures that the control bytes
+ // are properly initialized and `self.data_end()` points to the start
+ // of the array of control bytes, therefore: `ctrl` is valid for reads,
+ // properly aligned to `Group::WIDTH` and points to the properly initialized
+ // control bytes.
+ // 2. `data` bucket index in the table is equal to the `ctrl` index (i.e.
+ // equal to zero).
+ // 3. We pass the exact value of buckets of the table to the function.
+ //
+ // `ctrl` points here (to the start
+ // of the first control byte `CT0`)
+ // ∨
+ // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
+ // \________ ________/
+ // \/
+ // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
+ //
+ // where: T0...T_n - our stored data;
+ // CT0...CT_n - control bytes or metadata for `data`.
+ // CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
+ // with loading `Group` bytes from the heap works properly, even if the result
+ // of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
+ // `RawTableInner::set_ctrl` function.
+ //
+ // P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
+ // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
+ let data = Bucket::from_base_index(self.data_end(), 0);
+ RawIter {
+ // SAFETY: See explanation above
+ iter: RawIterRange::new(self.ctrl.as_ptr(), data, self.buckets()),
+ items: self.items,
+ }
+ }
+
+ /// Executes the destructors (if any) of the values stored in the table.
+ ///
+ /// # Note
+ ///
+ /// This function does not erase the control bytes of the table and does
+ /// not make any changes to the `items` or `growth_left` fields of the
+ /// table. If necessary, the caller of this function must manually set
+ /// up these table fields, for example using the [`clear_no_drop`] function.
+ ///
+ /// Be careful during calling this function, because drop function of
+ /// the elements can panic, and this can leave table in an inconsistent
+ /// state.
+ ///
+ /// # Safety
+ ///
+ /// The type `T` must be the actual type of the elements stored in the table,
+ /// otherwise calling this function may result in [`undefined behavior`].
+ ///
+ /// If `T` is a type that should be dropped and **the table is not empty**,
+ /// calling this function more than once results in [`undefined behavior`].
+ ///
+ /// If `T` is not [`Copy`], attempting to use values stored in the table after
+ /// calling this function may result in [`undefined behavior`].
+ ///
+ /// It is safe to call this function on a table that has not been allocated,
+ /// on a table with uninitialized control bytes, and on a table with no actual
+ /// data but with `Full` control bytes if `self.items == 0`.
+ ///
+ /// See also [`Bucket::drop`] / [`Bucket::as_ptr`] methods, for more information
+ /// about of properly removing or saving `element` from / into the [`RawTable`] /
+ /// [`RawTableInner`].
+ ///
+ /// [`Bucket::drop`]: Bucket::drop
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`clear_no_drop`]: RawTableInner::clear_no_drop
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ unsafe fn drop_elements<T>(&mut self) {
+ // Check that `self.items != 0`. Protects against the possibility
+ // of creating an iterator on an table with uninitialized control bytes.
+ if T::NEEDS_DROP && self.items != 0 {
+ // SAFETY: We know for sure that RawTableInner will outlive the
+ // returned `RawIter` iterator, and the caller of this function
+ // must uphold the safety contract for `drop_elements` method.
+ for item in self.iter::<T>() {
+ // SAFETY: The caller must uphold the safety contract for
+ // `drop_elements` method.
+ item.drop();
+ }
+ }
+ }
+
+ /// Executes the destructors (if any) of the values stored in the table and than
+ /// deallocates the table.
+ ///
+ /// # Note
+ ///
+ /// Calling this function automatically makes invalid (dangling) all instances of
+ /// buckets ([`Bucket`]) and makes invalid (dangling) the `ctrl` field of the table.
+ ///
+ /// This function does not make any changes to the `bucket_mask`, `items` or `growth_left`
+ /// fields of the table. If necessary, the caller of this function must manually set
+ /// up these table fields.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is [`undefined behavior`]:
+ ///
+ /// * Calling this function more than once;
+ ///
+ /// * The type `T` must be the actual type of the elements stored in the table.
+ ///
+ /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
+ /// to allocate this table.
+ ///
+ /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that
+ /// was used to allocate this table.
+ ///
+ /// The caller of this function should pay attention to the possibility of the
+ /// elements' drop function panicking, because this:
+ ///
+ /// * May leave the table in an inconsistent state;
+ ///
+ /// * Memory is never deallocated, so a memory leak may occur.
+ ///
+ /// Attempt to use the `ctrl` field of the table (dereference) after calling this
+ /// function results in [`undefined behavior`].
+ ///
+ /// It is safe to call this function on a table that has not been allocated,
+ /// on a table with uninitialized control bytes, and on a table with no actual
+ /// data but with `Full` control bytes if `self.items == 0`.
+ ///
+ /// See also [`RawTableInner::drop_elements`] or [`RawTableInner::free_buckets`]
+ /// for more information.
+ ///
+ /// [`RawTableInner::drop_elements`]: RawTableInner::drop_elements
+ /// [`RawTableInner::free_buckets`]: RawTableInner::free_buckets
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ unsafe fn drop_inner_table<T, A: Allocator>(&mut self, alloc: &A, table_layout: TableLayout) {
+ if !self.is_empty_singleton() {
+ unsafe {
+ // SAFETY: The caller must uphold the safety contract for `drop_inner_table` method.
+ self.drop_elements::<T>();
+ // SAFETY:
+ // 1. We have checked that our table is allocated.
+ // 2. The caller must uphold the safety contract for `drop_inner_table` method.
+ self.free_buckets(alloc, table_layout);
+ }
+ }
+ }
+
+ /// Returns a pointer to an element in the table (convenience for
+ /// `Bucket::from_base_index(self.data_end::<T>(), index)`).
+ ///
+ /// The caller must ensure that the `RawTableInner` outlives the returned [`Bucket<T>`],
+ /// otherwise using it may result in [`undefined behavior`].
+ ///
+ /// # Safety
+ ///
+ /// If `mem::size_of::<T>() != 0`, then the safety rules are directly derived from the
+ /// safety rules of the [`Bucket::from_base_index`] function. Therefore, when calling
+ /// this function, the following safety rules must be observed:
+ ///
+ /// * The table must already be allocated;
+ ///
+ /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
+ /// function, i.e. `(index + 1) <= self.buckets()`.
+ ///
+ /// * The type `T` must be the actual type of the elements stored in the table, otherwise
+ /// using the returned [`Bucket`] may result in [`undefined behavior`].
+ ///
+ /// It is safe to call this function with index of zero (`index == 0`) on a table that has
+ /// not been allocated, but using the returned [`Bucket`] results in [`undefined behavior`].
+ ///
+ /// If `mem::size_of::<T>() == 0`, then the only requirement is that the `index` must
+ /// not be greater than the number returned by the [`RawTable::buckets`] function, i.e.
+ /// `(index + 1) <= self.buckets()`.
+ ///
+ /// ```none
+ /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
+ /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
+ /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
+ ///
+ /// `table.bucket(3).as_ptr()` returns a pointer that points here in the `data`
+ /// part of the `RawTableInner`, i.e. to the start of T3 (see [`Bucket::as_ptr`])
+ /// |
+ /// | `base = table.data_end::<T>()` points here
+ /// | (to the start of CT0 or to the end of T0)
+ /// v v
+ /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
+ /// ^ \__________ __________/
+ /// `table.bucket(3)` returns a pointer that points \/
+ /// here in the `data` part of the `RawTableInner` additional control bytes
+ /// (to the end of T3) `m = Group::WIDTH - 1`
+ ///
+ /// where: T0...T_n - our stored data;
+ /// CT0...CT_n - control bytes or metadata for `data`;
+ /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
+ /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
+ /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
+ ///
+ /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
+ /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
+ /// ```
+ ///
+ /// [`Bucket::from_base_index`]: Bucket::from_base_index
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn bucket<T>(&self, index: usize) -> Bucket<T> {
debug_assert_ne!(self.bucket_mask, 0);
@@ -1219,6 +2502,52 @@ impl<A: Allocator + Clone> RawTableInner<A> {
Bucket::from_base_index(self.data_end(), index)
}
+ /// Returns a raw `*mut u8` pointer to the start of the `data` element in the table
+ /// (convenience for `self.data_end::<u8>().as_ptr().sub((index + 1) * size_of)`).
+ ///
+ /// The caller must ensure that the `RawTableInner` outlives the returned `*mut u8`,
+ /// otherwise using it may result in [`undefined behavior`].
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is [`undefined behavior`]:
+ ///
+ /// * The table must already be allocated;
+ ///
+ /// * The `index` must not be greater than the number returned by the [`RawTableInner::buckets`]
+ /// function, i.e. `(index + 1) <= self.buckets()`;
+ ///
+ /// * The `size_of` must be equal to the size of the elements stored in the table;
+ ///
+ /// ```none
+ /// If mem::size_of::<T>() != 0 then return a pointer to the `element` in the `data part` of the table
+ /// (we start counting from "0", so that in the expression T[n], the "n" index actually one less than
+ /// the "buckets" number of our `RawTableInner`, i.e. "n = RawTableInner::buckets() - 1"):
+ ///
+ /// `table.bucket_ptr(3, mem::size_of::<T>())` returns a pointer that points here in the
+ /// `data` part of the `RawTableInner`, i.e. to the start of T3
+ /// |
+ /// | `base = table.data_end::<u8>()` points here
+ /// | (to the start of CT0 or to the end of T0)
+ /// v v
+ /// [Pad], T_n, ..., |T3|, T2, T1, T0, |CT0, CT1, CT2, CT3, ..., CT_n, CTa_0, CTa_1, ..., CTa_m
+ /// \__________ __________/
+ /// \/
+ /// additional control bytes
+ /// `m = Group::WIDTH - 1`
+ ///
+ /// where: T0...T_n - our stored data;
+ /// CT0...CT_n - control bytes or metadata for `data`;
+ /// CTa_0...CTa_m - additional control bytes (so that the search with loading `Group` bytes from
+ /// the heap works properly, even if the result of `h1(hash) & self.bucket_mask`
+ /// is equal to `self.bucket_mask`). See also `RawTableInner::set_ctrl` function.
+ ///
+ /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
+ /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
+ /// ```
+ ///
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn bucket_ptr(&self, index: usize, size_of: usize) -> *mut u8 {
debug_assert_ne!(self.bucket_mask, 0);
@@ -1227,9 +2556,44 @@ impl<A: Allocator + Clone> RawTableInner<A> {
base.sub((index + 1) * size_of)
}
+ /// Returns pointer to one past last `data` element in the table as viewed from
+ /// the start point of the allocation (convenience for `self.ctrl.cast()`).
+ ///
+ /// This function actually returns a pointer to the end of the `data element` at
+ /// index "0" (zero).
+ ///
+ /// The caller must ensure that the `RawTableInner` outlives the returned [`NonNull<T>`],
+ /// otherwise using it may result in [`undefined behavior`].
+ ///
+ /// # Note
+ ///
+ /// The type `T` must be the actual type of the elements stored in the table, otherwise
+ /// using the returned [`NonNull<T>`] may result in [`undefined behavior`].
+ ///
+ /// ```none
+ /// `table.data_end::<T>()` returns pointer that points here
+ /// (to the end of `T0`)
+ /// ∨
+ /// [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, CTa_0, CTa_1, ..., CTa_m
+ /// \________ ________/
+ /// \/
+ /// `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
+ ///
+ /// where: T0...T_n - our stored data;
+ /// CT0...CT_n - control bytes or metadata for `data`.
+ /// CTa_0...CTa_m - additional control bytes, where `m = Group::WIDTH - 1` (so that the search
+ /// with loading `Group` bytes from the heap works properly, even if the result
+ /// of `h1(hash) & self.bucket_mask` is equal to `self.bucket_mask`). See also
+ /// `RawTableInner::set_ctrl` function.
+ ///
+ /// P.S. `h1(hash) & self.bucket_mask` is the same as `hash as usize % self.buckets()` because the number
+ /// of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
+ /// ```
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- unsafe fn data_end<T>(&self) -> NonNull<T> {
- NonNull::new_unchecked(self.ctrl.as_ptr().cast())
+ fn data_end<T>(&self) -> NonNull<T> {
+ self.ctrl.cast()
}
/// Returns an iterator-like object for a probe sequence on the table.
@@ -1240,6 +2604,8 @@ impl<A: Allocator + Clone> RawTableInner<A> {
#[inline]
fn probe_seq(&self, hash: u64) -> ProbeSeq {
ProbeSeq {
+ // This is the same as `hash as usize % self.buckets()` because the number
+ // of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
pos: h1(hash) & self.bucket_mask,
stride: 0,
}
@@ -1250,7 +2616,7 @@ impl<A: Allocator + Clone> RawTableInner<A> {
#[cfg(feature = "raw")]
#[inline]
unsafe fn prepare_insert_no_grow(&mut self, hash: u64) -> Result<usize, ()> {
- let index = self.find_insert_slot(hash);
+ let index = self.find_insert_slot(hash).index;
let old_ctrl = *self.ctrl(index);
if unlikely(self.growth_left == 0 && special_is_empty(old_ctrl)) {
Err(())
@@ -1277,13 +2643,68 @@ impl<A: Allocator + Clone> RawTableInner<A> {
/// Sets a control byte to the hash, and possibly also the replicated control byte at
/// the end of the array.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl`]
+ /// method. Thus, in order to uphold the safety contracts for the method, you must observe the
+ /// following rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
+ /// be no greater than the number returned by the function [`RawTableInner::buckets`].
+ ///
+ /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`RawTableInner::set_ctrl`]: RawTableInner::set_ctrl
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- unsafe fn set_ctrl_h2(&self, index: usize, hash: u64) {
+ unsafe fn set_ctrl_h2(&mut self, index: usize, hash: u64) {
+ // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl_h2`]
self.set_ctrl(index, h2(hash));
}
+ /// Replaces the hash in the control byte at the given index with the provided one,
+ /// and possibly also replicates the new control byte at the end of the array of control
+ /// bytes, returning the old control byte.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// The safety rules are directly derived from the safety rules for [`RawTableInner::set_ctrl_h2`]
+ /// and [`RawTableInner::ctrl`] methods. Thus, in order to uphold the safety contracts for both
+ /// methods, you must observe the following rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
+ /// be no greater than the number returned by the function [`RawTableInner::buckets`].
+ ///
+ /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`RawTableInner::set_ctrl_h2`]: RawTableInner::set_ctrl_h2
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- unsafe fn replace_ctrl_h2(&self, index: usize, hash: u64) -> u8 {
+ unsafe fn replace_ctrl_h2(&mut self, index: usize, hash: u64) -> u8 {
+ // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::replace_ctrl_h2`]
let prev_ctrl = *self.ctrl(index);
self.set_ctrl_h2(index, hash);
prev_ctrl
@@ -1291,10 +2712,35 @@ impl<A: Allocator + Clone> RawTableInner<A> {
/// Sets a control byte, and possibly also the replicated control byte at
/// the end of the array.
+ ///
+ /// This function does not make any changes to the `data` parts of the table,
+ /// or any changes to the `items` or `growth_left` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// You must observe the following safety rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
+ /// be no greater than the number returned by the function [`RawTableInner::buckets`].
+ ///
+ /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
- unsafe fn set_ctrl(&self, index: usize, ctrl: u8) {
+ unsafe fn set_ctrl(&mut self, index: usize, ctrl: u8) {
// Replicate the first Group::WIDTH control bytes at the end of
- // the array without using a branch:
+ // the array without using a branch. If the tables smaller than
+ // the group width (self.buckets() < Group::WIDTH),
+ // `index2 = Group::WIDTH + index`, otherwise `index2` is:
+ //
// - If index >= Group::WIDTH then index == index2.
// - Otherwise index2 == self.bucket_mask + 1 + index.
//
@@ -1311,16 +2757,43 @@ impl<A: Allocator + Clone> RawTableInner<A> {
// ---------------------------------------------
// | [A] | [B] | [EMPTY] | [EMPTY] | [A] | [B] |
// ---------------------------------------------
+
+ // This is the same as `(index.wrapping_sub(Group::WIDTH)) % self.buckets() + Group::WIDTH`
+ // because the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
let index2 = ((index.wrapping_sub(Group::WIDTH)) & self.bucket_mask) + Group::WIDTH;
+ // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::set_ctrl`]
*self.ctrl(index) = ctrl;
*self.ctrl(index2) = ctrl;
}
/// Returns a pointer to a control byte.
+ ///
+ /// # Safety
+ ///
+ /// For the allocated [`RawTableInner`], the result is [`Undefined Behavior`],
+ /// if the `index` is greater than the `self.bucket_mask + 1 + Group::WIDTH`.
+ /// In that case, calling this function with `index == self.bucket_mask + 1 + Group::WIDTH`
+ /// will return a pointer to the end of the allocated table and it is useless on its own.
+ ///
+ /// Calling this function with `index >= self.bucket_mask + 1 + Group::WIDTH` on a
+ /// table that has not been allocated results in [`Undefined Behavior`].
+ ///
+ /// So to satisfy both requirements you should always follow the rule that
+ /// `index < self.bucket_mask + 1 + Group::WIDTH`
+ ///
+ /// Calling this function on [`RawTableInner`] that are not already allocated is safe
+ /// for read-only purpose.
+ ///
+ /// See also [`Bucket::as_ptr()`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`Bucket::as_ptr()`]: Bucket::as_ptr()
+ /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn ctrl(&self, index: usize) -> *mut u8 {
debug_assert!(index < self.num_ctrl_bytes());
+ // SAFETY: The caller must uphold the safety rules for the [`RawTableInner::ctrl`]
self.ctrl.as_ptr().add(index)
}
@@ -1329,6 +2802,17 @@ impl<A: Allocator + Clone> RawTableInner<A> {
self.bucket_mask + 1
}
+ /// Checks whether the bucket at `index` is full.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure `index` is less than the number of buckets.
+ #[inline]
+ unsafe fn is_bucket_full(&self, index: usize) -> bool {
+ debug_assert!(index < self.buckets());
+ is_full(*self.ctrl(index))
+ }
+
#[inline]
fn num_ctrl_bytes(&self) -> usize {
self.bucket_mask + 1 + Group::WIDTH
@@ -1339,25 +2823,45 @@ impl<A: Allocator + Clone> RawTableInner<A> {
self.bucket_mask == 0
}
+ /// Attempts to allocate a new hash table with at least enough capacity
+ /// for inserting the given number of elements without reallocating,
+ /// and return it inside ScopeGuard to protect against panic in the hash
+ /// function.
+ ///
+ /// # Note
+ ///
+ /// It is recommended (but not required):
+ ///
+ /// * That the new table's `capacity` be greater than or equal to `self.items`.
+ ///
+ /// * The `alloc` is the same [`Allocator`] as the `Allocator` used
+ /// to allocate this table.
+ ///
+ /// * The `table_layout` is the same [`TableLayout`] as the `TableLayout` used
+ /// to allocate this table.
+ ///
+ /// If `table_layout` does not match the `TableLayout` that was used to allocate
+ /// this table, then using `mem::swap` with the `self` and the new table returned
+ /// by this function results in [`undefined behavior`].
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[allow(clippy::mut_mut)]
#[inline]
- unsafe fn prepare_resize(
+ fn prepare_resize<'a, A>(
&self,
+ alloc: &'a A,
table_layout: TableLayout,
capacity: usize,
fallibility: Fallibility,
- ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self)>, TryReserveError> {
+ ) -> Result<crate::scopeguard::ScopeGuard<Self, impl FnMut(&mut Self) + 'a>, TryReserveError>
+ where
+ A: Allocator,
+ {
debug_assert!(self.items <= capacity);
// Allocate and initialize the new table.
- let mut new_table = RawTableInner::fallible_with_capacity(
- self.alloc.clone(),
- table_layout,
- capacity,
- fallibility,
- )?;
- new_table.growth_left -= self.items;
- new_table.items = self.items;
+ let new_table =
+ RawTableInner::fallible_with_capacity(alloc, table_layout, capacity, fallibility)?;
// The hash function may panic, in which case we simply free the new
// table without dropping any elements that may have been copied into
@@ -1367,7 +2871,11 @@ impl<A: Allocator + Clone> RawTableInner<A> {
// the comment at the bottom of this function.
Ok(guard(new_table, move |self_| {
if !self_.is_empty_singleton() {
- self_.free_buckets(table_layout);
+ // SAFETY:
+ // 1. We have checked that our table is allocated.
+ // 2. We know for sure that the `alloc` and `table_layout` matches the
+ // [`Allocator`] and [`TableLayout`] used to allocate this table.
+ unsafe { self_.free_buckets(alloc, table_layout) };
}
}))
}
@@ -1376,16 +2884,38 @@ impl<A: Allocator + Clone> RawTableInner<A> {
///
/// This uses dynamic dispatch to reduce the amount of
/// code generated, but it is eliminated by LLVM optimizations when inlined.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is
+ /// [`undefined behavior`]:
+ ///
+ /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
+ /// to allocate this table.
+ ///
+ /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
+ /// used to allocate this table.
+ ///
+ /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
+ /// the elements stored in the table.
+ ///
+ /// * The [`RawTableInner`] must have properly initialized control bytes.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[allow(clippy::inline_always)]
#[inline(always)]
- unsafe fn reserve_rehash_inner(
+ unsafe fn reserve_rehash_inner<A>(
&mut self,
+ alloc: &A,
additional: usize,
hasher: &dyn Fn(&mut Self, usize) -> u64,
fallibility: Fallibility,
layout: TableLayout,
drop: Option<fn(*mut u8)>,
- ) -> Result<(), TryReserveError> {
+ ) -> Result<(), TryReserveError>
+ where
+ A: Allocator,
+ {
// Avoid `Option::ok_or_else` because it bloats LLVM IR.
let new_items = match self.items.checked_add(additional) {
Some(new_items) => new_items,
@@ -1395,12 +2925,30 @@ impl<A: Allocator + Clone> RawTableInner<A> {
if new_items <= full_capacity / 2 {
// Rehash in-place without re-allocating if we have plenty of spare
// capacity that is locked up due to DELETED entries.
+
+ // SAFETY:
+ // 1. We know for sure that `[`RawTableInner`]` has already been allocated
+ // (since new_items <= full_capacity / 2);
+ // 2. The caller ensures that `drop` function is the actual drop function of
+ // the elements stored in the table.
+ // 3. The caller ensures that `layout` matches the [`TableLayout`] that was
+ // used to allocate this table.
+ // 4. The caller ensures that the control bytes of the `RawTableInner`
+ // are already initialized.
self.rehash_in_place(hasher, layout.size, drop);
Ok(())
} else {
// Otherwise, conservatively resize to at least the next size up
// to avoid churning deletes into frequent rehashes.
+ //
+ // SAFETY:
+ // 1. We know for sure that `capacity >= self.items`.
+ // 2. The caller ensures that `alloc` and `layout` matches the [`Allocator`] and
+ // [`TableLayout`] that were used to allocate this table.
+ // 3. The caller ensures that the control bytes of the `RawTableInner`
+ // are already initialized.
self.resize_inner(
+ alloc,
usize::max(new_items, full_capacity + 1),
hasher,
fallibility,
@@ -1409,48 +2957,160 @@ impl<A: Allocator + Clone> RawTableInner<A> {
}
}
+ /// Returns an iterator over full buckets indices in the table.
+ ///
+ /// # Safety
+ ///
+ /// Behavior is undefined if any of the following conditions are violated:
+ ///
+ /// * The caller has to ensure that the `RawTableInner` outlives the
+ /// `FullBucketsIndices`. Because we cannot make the `next` method
+ /// unsafe on the `FullBucketsIndices` struct, we have to make the
+ /// `full_buckets_indices` method unsafe.
+ ///
+ /// * The [`RawTableInner`] must have properly initialized control bytes.
+ #[inline(always)]
+ unsafe fn full_buckets_indices(&self) -> FullBucketsIndices {
+ // SAFETY:
+ // 1. Since the caller of this function ensures that the control bytes
+ // are properly initialized and `self.ctrl(0)` points to the start
+ // of the array of control bytes, therefore: `ctrl` is valid for reads,
+ // properly aligned to `Group::WIDTH` and points to the properly initialized
+ // control bytes.
+ // 2. The value of `items` is equal to the amount of data (values) added
+ // to the table.
+ //
+ // `ctrl` points here (to the start
+ // of the first control byte `CT0`)
+ // ∨
+ // [Pad], T_n, ..., T1, T0, |CT0, CT1, ..., CT_n|, Group::WIDTH
+ // \________ ________/
+ // \/
+ // `n = buckets - 1`, i.e. `RawTableInner::buckets() - 1`
+ //
+ // where: T0...T_n - our stored data;
+ // CT0...CT_n - control bytes or metadata for `data`.
+ let ctrl = NonNull::new_unchecked(self.ctrl(0));
+
+ FullBucketsIndices {
+ // Load the first group
+ // SAFETY: See explanation above.
+ current_group: Group::load_aligned(ctrl.as_ptr()).match_full().into_iter(),
+ group_first_index: 0,
+ ctrl,
+ items: self.items,
+ }
+ }
+
/// Allocates a new table of a different size and moves the contents of the
/// current table into it.
///
/// This uses dynamic dispatch to reduce the amount of
/// code generated, but it is eliminated by LLVM optimizations when inlined.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is
+ /// [`undefined behavior`]:
+ ///
+ /// * The `alloc` must be the same [`Allocator`] as the `Allocator` used
+ /// to allocate this table;
+ ///
+ /// * The `layout` must be the same [`TableLayout`] as the `TableLayout`
+ /// used to allocate this table;
+ ///
+ /// * The [`RawTableInner`] must have properly initialized control bytes.
+ ///
+ /// The caller of this function must ensure that `capacity >= self.items`
+ /// otherwise:
+ ///
+ /// * If `self.items != 0`, calling of this function with `capacity == 0`
+ /// results in [`undefined behavior`].
+ ///
+ /// * If `capacity_to_buckets(capacity) < Group::WIDTH` and
+ /// `self.items > capacity_to_buckets(capacity)` calling this function
+ /// results in [`undefined behavior`].
+ ///
+ /// * If `capacity_to_buckets(capacity) >= Group::WIDTH` and
+ /// `self.items > capacity_to_buckets(capacity)` calling this function
+ /// are never return (will go into an infinite loop).
+ ///
+ /// Note: It is recommended (but not required) that the new table's `capacity`
+ /// be greater than or equal to `self.items`. In case if `capacity <= self.items`
+ /// this function can never return. See [`RawTableInner::find_insert_slot`] for
+ /// more information.
+ ///
+ /// [`RawTableInner::find_insert_slot`]: RawTableInner::find_insert_slot
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[allow(clippy::inline_always)]
#[inline(always)]
- unsafe fn resize_inner(
+ unsafe fn resize_inner<A>(
&mut self,
+ alloc: &A,
capacity: usize,
hasher: &dyn Fn(&mut Self, usize) -> u64,
fallibility: Fallibility,
layout: TableLayout,
- ) -> Result<(), TryReserveError> {
- let mut new_table = self.prepare_resize(layout, capacity, fallibility)?;
-
- // Copy all elements to the new table.
- for i in 0..self.buckets() {
- if !is_full(*self.ctrl(i)) {
- continue;
- }
-
+ ) -> Result<(), TryReserveError>
+ where
+ A: Allocator,
+ {
+ // SAFETY: We know for sure that `alloc` and `layout` matches the [`Allocator`] and [`TableLayout`]
+ // that were used to allocate this table.
+ let mut new_table = self.prepare_resize(alloc, layout, capacity, fallibility)?;
+
+ // SAFETY: We know for sure that RawTableInner will outlive the
+ // returned `FullBucketsIndices` iterator, and the caller of this
+ // function ensures that the control bytes are properly initialized.
+ for full_byte_index in self.full_buckets_indices() {
// This may panic.
- let hash = hasher(self, i);
+ let hash = hasher(self, full_byte_index);
+ // SAFETY:
// We can use a simpler version of insert() here since:
- // - there are no DELETED entries.
- // - we know there is enough space in the table.
- // - all elements are unique.
- let (index, _) = new_table.prepare_insert_slot(hash);
-
+ // 1. There are no DELETED entries.
+ // 2. We know there is enough space in the table.
+ // 3. All elements are unique.
+ // 4. The caller of this function guarantees that `capacity > 0`
+ // so `new_table` must already have some allocated memory.
+ // 5. We set `growth_left` and `items` fields of the new table
+ // after the loop.
+ // 6. We insert into the table, at the returned index, the data
+ // matching the given hash immediately after calling this function.
+ let (new_index, _) = new_table.prepare_insert_slot(hash);
+
+ // SAFETY:
+ //
+ // * `src` is valid for reads of `layout.size` bytes, since the
+ // table is alive and the `full_byte_index` is guaranteed to be
+ // within bounds (see `FullBucketsIndices::next_impl`);
+ //
+ // * `dst` is valid for writes of `layout.size` bytes, since the
+ // caller ensures that `table_layout` matches the [`TableLayout`]
+ // that was used to allocate old table and we have the `new_index`
+ // returned by `prepare_insert_slot`.
+ //
+ // * Both `src` and `dst` are properly aligned.
+ //
+ // * Both `src` and `dst` point to different region of memory.
ptr::copy_nonoverlapping(
- self.bucket_ptr(i, layout.size),
- new_table.bucket_ptr(index, layout.size),
+ self.bucket_ptr(full_byte_index, layout.size),
+ new_table.bucket_ptr(new_index, layout.size),
layout.size,
);
}
+ // The hash function didn't panic, so we can safely set the
+ // `growth_left` and `items` fields of the new table.
+ new_table.growth_left -= self.items;
+ new_table.items = self.items;
+
// We successfully copied all elements without panicking. Now replace
// self with the new table. The old table will have its memory freed but
// the items will not be dropped (since they have been moved into the
// new table).
+ // SAFETY: The caller ensures that `table_layout` matches the [`TableLayout`]
+ // that was used to allocate this table.
mem::swap(self, &mut new_table);
Ok(())
@@ -1463,6 +3123,21 @@ impl<A: Allocator + Clone> RawTableInner<A> {
///
/// This uses dynamic dispatch to reduce the amount of
/// code generated, but it is eliminated by LLVM optimizations when inlined.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is [`undefined behavior`]:
+ ///
+ /// * The `size_of` must be equal to the size of the elements stored in the table;
+ ///
+ /// * The `drop` function (`fn(*mut u8)`) must be the actual drop function of
+ /// the elements stored in the table.
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The [`RawTableInner`] must have properly initialized control bytes.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[allow(clippy::inline_always)]
#[cfg_attr(feature = "inline-more", inline(always))]
#[cfg_attr(not(feature = "inline-more"), inline)]
@@ -1506,8 +3181,10 @@ impl<A: Allocator + Clone> RawTableInner<A> {
let hash = hasher(*guard, i);
// Search for a suitable place to put it
- let new_i = guard.find_insert_slot(hash);
- let new_i_p = guard.bucket_ptr(new_i, size_of);
+ //
+ // SAFETY: Caller of this function ensures that the control bytes
+ // are properly initialized.
+ let new_i = guard.find_insert_slot(hash).index;
// Probing works by scanning through all of the control
// bytes in groups, which may not be aligned to the group
@@ -1519,6 +3196,8 @@ impl<A: Allocator + Clone> RawTableInner<A> {
continue 'outer;
}
+ let new_i_p = guard.bucket_ptr(new_i, size_of);
+
// We are moving the current item to a new position. Write
// our H2 to the control byte of the new position.
let prev_ctrl = guard.replace_ctrl_h2(new_i, hash);
@@ -1545,17 +3224,107 @@ impl<A: Allocator + Clone> RawTableInner<A> {
mem::forget(guard);
}
+ /// Deallocates the table without dropping any entries.
+ ///
+ /// # Note
+ ///
+ /// This function must be called only after [`drop_elements`](RawTableInner::drop_elements),
+ /// else it can lead to leaking of memory. Also calling this function automatically
+ /// makes invalid (dangling) all instances of buckets ([`Bucket`]) and makes invalid
+ /// (dangling) the `ctrl` field of the table.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is [`Undefined Behavior`]:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * The `alloc` must be the same [`Allocator`] as the `Allocator` that was used
+ /// to allocate this table.
+ ///
+ /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout` that was used
+ /// to allocate this table.
+ ///
+ /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information.
+ ///
+ /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
+ /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
+ #[inline]
+ unsafe fn free_buckets<A>(&mut self, alloc: &A, table_layout: TableLayout)
+ where
+ A: Allocator,
+ {
+ // SAFETY: The caller must uphold the safety contract for `free_buckets`
+ // method.
+ let (ptr, layout) = self.allocation_info(table_layout);
+ alloc.deallocate(ptr, layout);
+ }
+
+ /// Returns a pointer to the allocated memory and the layout that was used to
+ /// allocate the table.
+ ///
+ /// # Safety
+ ///
+ /// Caller of this function must observe the following safety rules:
+ ///
+ /// * The [`RawTableInner`] has already been allocated, otherwise
+ /// calling this function results in [`undefined behavior`]
+ ///
+ /// * The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
+ /// that was used to allocate this table. Failure to comply with this condition
+ /// may result in [`undefined behavior`].
+ ///
+ /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
+ /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
#[inline]
- unsafe fn free_buckets(&mut self, table_layout: TableLayout) {
+ unsafe fn allocation_info(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
+ debug_assert!(
+ !self.is_empty_singleton(),
+ "this function can only be called on non-empty tables"
+ );
+
// Avoid `Option::unwrap_or_else` because it bloats LLVM IR.
let (layout, ctrl_offset) = match table_layout.calculate_layout_for(self.buckets()) {
Some(lco) => lco,
- None => hint::unreachable_unchecked(),
+ None => unsafe { hint::unreachable_unchecked() },
};
- self.alloc.deallocate(
- NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)),
+ (
+ // SAFETY: The caller must uphold the safety contract for `allocation_info` method.
+ unsafe { NonNull::new_unchecked(self.ctrl.as_ptr().sub(ctrl_offset)) },
layout,
- );
+ )
+ }
+
+ /// Returns a pointer to the allocated memory and the layout that was used to
+ /// allocate the table. If [`RawTableInner`] has not been allocated, this
+ /// function return `dangling` pointer and `()` (unit) layout.
+ ///
+ /// # Safety
+ ///
+ /// The `table_layout` must be the same [`TableLayout`] as the `TableLayout`
+ /// that was used to allocate this table. Failure to comply with this condition
+ /// may result in [`undefined behavior`].
+ ///
+ /// See also [`GlobalAlloc::dealloc`] or [`Allocator::deallocate`] for more information.
+ ///
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ /// [`GlobalAlloc::dealloc`]: https://doc.rust-lang.org/alloc/alloc/trait.GlobalAlloc.html#tymethod.dealloc
+ /// [`Allocator::deallocate`]: https://doc.rust-lang.org/alloc/alloc/trait.Allocator.html#tymethod.deallocate
+ #[cfg(feature = "raw")]
+ unsafe fn allocation_info_or_zero(&self, table_layout: TableLayout) -> (NonNull<u8>, Layout) {
+ if self.is_empty_singleton() {
+ (NonNull::dangling(), Layout::new::<()>())
+ } else {
+ // SAFETY:
+ // 1. We have checked that our table is allocated.
+ // 2. The caller ensures that `table_layout` matches the [`TableLayout`]
+ // that was used to allocate this table.
+ unsafe { self.allocation_info(table_layout) }
+ }
}
/// Marks all table buckets as empty without dropping their contents.
@@ -1570,27 +3339,95 @@ impl<A: Allocator + Clone> RawTableInner<A> {
self.growth_left = bucket_mask_to_capacity(self.bucket_mask);
}
+ /// Erases the [`Bucket`]'s control byte at the given index so that it does not
+ /// triggered as full, decreases the `items` of the table and, if it can be done,
+ /// increases `self.growth_left`.
+ ///
+ /// This function does not actually erase / drop the [`Bucket`] itself, i.e. it
+ /// does not make any changes to the `data` parts of the table. The caller of this
+ /// function must take care to properly drop the `data`, otherwise calling this
+ /// function may result in a memory leak.
+ ///
+ /// # Safety
+ ///
+ /// You must observe the following safety rules when calling this function:
+ ///
+ /// * The [`RawTableInner`] has already been allocated;
+ ///
+ /// * It must be the full control byte at the given position;
+ ///
+ /// * The `index` must not be greater than the `RawTableInner.bucket_mask`, i.e.
+ /// `index <= RawTableInner.bucket_mask` or, in other words, `(index + 1)` must
+ /// be no greater than the number returned by the function [`RawTableInner::buckets`].
+ ///
+ /// Calling this function on a table that has not been allocated results in [`undefined behavior`].
+ ///
+ /// Calling this function on a table with no elements is unspecified, but calling subsequent
+ /// functions is likely to result in [`undefined behavior`] due to overflow subtraction
+ /// (`self.items -= 1 cause overflow when self.items == 0`).
+ ///
+ /// See also [`Bucket::as_ptr`] method, for more information about of properly removing
+ /// or saving `data element` from / into the [`RawTable`] / [`RawTableInner`].
+ ///
+ /// [`RawTableInner::buckets`]: RawTableInner::buckets
+ /// [`Bucket::as_ptr`]: Bucket::as_ptr
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[inline]
unsafe fn erase(&mut self, index: usize) {
- debug_assert!(is_full(*self.ctrl(index)));
+ debug_assert!(self.is_bucket_full(index));
+
+ // This is the same as `index.wrapping_sub(Group::WIDTH) % self.buckets()` because
+ // the number of buckets is a power of two, and `self.bucket_mask = self.buckets() - 1`.
let index_before = index.wrapping_sub(Group::WIDTH) & self.bucket_mask;
+ // SAFETY:
+ // - The caller must uphold the safety contract for `erase` method;
+ // - `index_before` is guaranteed to be in range due to masking with `self.bucket_mask`
let empty_before = Group::load(self.ctrl(index_before)).match_empty();
let empty_after = Group::load(self.ctrl(index)).match_empty();
- // If we are inside a continuous block of Group::WIDTH full or deleted
- // cells then a probe window may have seen a full block when trying to
- // insert. We therefore need to keep that block non-empty so that
- // lookups will continue searching to the next probe window.
+ // Inserting and searching in the map is performed by two key functions:
+ //
+ // - The `find_insert_slot` function that looks up the index of any `EMPTY` or `DELETED`
+ // slot in a group to be able to insert. If it doesn't find an `EMPTY` or `DELETED`
+ // slot immediately in the first group, it jumps to the next `Group` looking for it,
+ // and so on until it has gone through all the groups in the control bytes.
+ //
+ // - The `find_inner` function that looks for the index of the desired element by looking
+ // at all the `FULL` bytes in the group. If it did not find the element right away, and
+ // there is no `EMPTY` byte in the group, then this means that the `find_insert_slot`
+ // function may have found a suitable slot in the next group. Therefore, `find_inner`
+ // jumps further, and if it does not find the desired element and again there is no `EMPTY`
+ // byte, then it jumps further, and so on. The search stops only if `find_inner` function
+ // finds the desired element or hits an `EMPTY` slot/byte.
+ //
+ // Accordingly, this leads to two consequences:
//
- // Note that in this context `leading_zeros` refers to the bytes at the
- // end of a group, while `trailing_zeros` refers to the bytes at the
- // beginning of a group.
+ // - The map must have `EMPTY` slots (bytes);
+ //
+ // - You can't just mark the byte to be erased as `EMPTY`, because otherwise the `find_inner`
+ // function may stumble upon an `EMPTY` byte before finding the desired element and stop
+ // searching.
+ //
+ // Thus it is necessary to check all bytes after and before the erased element. If we are in
+ // a contiguous `Group` of `FULL` or `DELETED` bytes (the number of `FULL` or `DELETED` bytes
+ // before and after is greater than or equal to `Group::WIDTH`), then we must mark our byte as
+ // `DELETED` in order for the `find_inner` function to go further. On the other hand, if there
+ // is at least one `EMPTY` slot in the `Group`, then the `find_inner` function will still stumble
+ // upon an `EMPTY` byte, so we can safely mark our erased byte as `EMPTY` as well.
+ //
+ // Finally, since `index_before == (index.wrapping_sub(Group::WIDTH) & self.bucket_mask) == index`
+ // and given all of the above, tables smaller than the group width (self.buckets() < Group::WIDTH)
+ // cannot have `DELETED` bytes.
+ //
+ // Note that in this context `leading_zeros` refers to the bytes at the end of a group, while
+ // `trailing_zeros` refers to the bytes at the beginning of a group.
let ctrl = if empty_before.leading_zeros() + empty_after.trailing_zeros() >= Group::WIDTH {
DELETED
} else {
self.growth_left += 1;
EMPTY
};
+ // SAFETY: the caller must uphold the safety contract for `erase` method.
self.set_ctrl(index, ctrl);
self.items -= 1;
}
@@ -1599,12 +3436,16 @@ impl<A: Allocator + Clone> RawTableInner<A> {
impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
fn clone(&self) -> Self {
if self.table.is_empty_singleton() {
- Self::new_in(self.table.alloc.clone())
+ Self::new_in(self.alloc.clone())
} else {
unsafe {
// Avoid `Result::ok_or_else` because it bloats LLVM IR.
- let new_table = match Self::new_uninitialized(
- self.table.alloc.clone(),
+ //
+ // SAFETY: This is safe as we are taking the size of an already allocated table
+ // and therefore сapacity overflow cannot occur, `self.table.buckets()` is power
+ // of two and all allocator errors will be caught inside `RawTableInner::new_uninitialized`.
+ let mut new_table = match Self::new_uninitialized(
+ self.alloc.clone(),
self.table.buckets(),
Fallibility::Infallible,
) {
@@ -1612,24 +3453,32 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
Err(_) => hint::unreachable_unchecked(),
};
- // If cloning fails then we need to free the allocation for the
- // new table. However we don't run its drop since its control
- // bytes are not initialized yet.
- let mut guard = guard(ManuallyDrop::new(new_table), |new_table| {
- new_table.free_buckets();
- });
-
- guard.clone_from_spec(self);
-
- // Disarm the scope guard and return the newly created table.
- ManuallyDrop::into_inner(ScopeGuard::into_inner(guard))
+ // Cloning elements may fail (the clone function may panic). But we don't
+ // need to worry about uninitialized control bits, since:
+ // 1. The number of items (elements) in the table is zero, which means that
+ // the control bits will not be readed by Drop function.
+ // 2. The `clone_from_spec` method will first copy all control bits from
+ // `self` (thus initializing them). But this will not affect the `Drop`
+ // function, since the `clone_from_spec` function sets `items` only after
+ // successfully clonning all elements.
+ new_table.clone_from_spec(self);
+ new_table
}
}
}
fn clone_from(&mut self, source: &Self) {
if source.table.is_empty_singleton() {
- *self = Self::new_in(self.table.alloc.clone());
+ let mut old_inner = mem::replace(&mut self.table, RawTableInner::NEW);
+ unsafe {
+ // SAFETY:
+ // 1. We call the function only once;
+ // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
+ // and [`TableLayout`] that were used to allocate this table.
+ // 3. If any elements' drop function panics, then there will only be a memory leak,
+ // because we have replaced the inner table with a new one.
+ old_inner.drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
+ }
} else {
unsafe {
// Make sure that if any panics occurs, we clear the table and
@@ -1644,27 +3493,38 @@ impl<T: Clone, A: Allocator + Clone> Clone for RawTable<T, A> {
//
// This leak is unavoidable: we can't try dropping more elements
// since this could lead to another panic and abort the process.
- self_.drop_elements();
+ //
+ // SAFETY: If something gets wrong we clear our table right after
+ // dropping the elements, so there is no double drop, since `items`
+ // will be equal to zero.
+ self_.table.drop_elements::<T>();
// If necessary, resize our table to match the source.
if self_.buckets() != source.buckets() {
- // Skip our drop by using ptr::write.
- if !self_.table.is_empty_singleton() {
- self_.free_buckets();
+ let new_inner = match RawTableInner::new_uninitialized(
+ &self_.alloc,
+ Self::TABLE_LAYOUT,
+ source.buckets(),
+ Fallibility::Infallible,
+ ) {
+ Ok(table) => table,
+ Err(_) => hint::unreachable_unchecked(),
+ };
+ // Replace the old inner with new uninitialized one. It's ok, since if something gets
+ // wrong `ScopeGuard` will initialize all control bytes and leave empty table.
+ let mut old_inner = mem::replace(&mut self_.table, new_inner);
+ if !old_inner.is_empty_singleton() {
+ // SAFETY:
+ // 1. We have checked that our table is allocated.
+ // 2. We know for sure that `alloc` and `table_layout` matches
+ // the [`Allocator`] and [`TableLayout`] that were used to allocate this table.
+ old_inner.free_buckets(&self_.alloc, Self::TABLE_LAYOUT);
}
- (&mut **self_ as *mut Self).write(
- // Avoid `Result::unwrap_or_else` because it bloats LLVM IR.
- match Self::new_uninitialized(
- self_.table.alloc.clone(),
- source.buckets(),
- Fallibility::Infallible,
- ) {
- Ok(table) => table,
- Err(_) => hint::unreachable_unchecked(),
- },
- );
}
+ // Cloning elements may fail (the clone function may panic), but the `ScopeGuard`
+ // inside the `clone_from_impl` function will take care of that, dropping all
+ // cloned elements if necessary. Our `ScopeGuard` will clear the table.
self_.clone_from_spec(source);
// Disarm the scope guard if cloning was successful.
@@ -1696,7 +3556,8 @@ impl<T: Copy, A: Allocator + Clone> RawTableClone for RawTable<T, A> {
.copy_to_nonoverlapping(self.table.ctrl(0), self.table.num_ctrl_bytes());
source
.data_start()
- .copy_to_nonoverlapping(self.data_start(), self.table.buckets());
+ .as_ptr()
+ .copy_to_nonoverlapping(self.data_start().as_ptr(), self.table.buckets());
self.table.items = source.table.items;
self.table.growth_left = source.table.growth_left;
@@ -1720,9 +3581,9 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
// to make sure we drop only the elements that have been
// cloned so far.
let mut guard = guard((0, &mut *self), |(index, self_)| {
- if mem::needs_drop::<T>() && !self_.is_empty() {
- for i in 0..=*index {
- if is_full(*self_.table.ctrl(i)) {
+ if T::NEEDS_DROP {
+ for i in 0..*index {
+ if self_.is_bucket_full(i) {
self_.bucket(i).drop();
}
}
@@ -1735,7 +3596,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
to.write(from.as_ref().clone());
// Update the index in case we need to unwind.
- guard.0 = index;
+ guard.0 = index + 1;
}
// Successfully cloned all items, no need to clean up.
@@ -1757,7 +3618,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
{
self.clear();
- let guard_self = guard(&mut *self, |self_| {
+ let mut guard_self = guard(&mut *self, |self_| {
// Clear the partially copied table if a panic occurs, otherwise
// items and growth_left will be out of sync with the contents
// of the table.
@@ -1790,7 +3651,7 @@ impl<T: Clone, A: Allocator + Clone> RawTable<T, A> {
}
}
-impl<T, A: Allocator + Clone + Default> Default for RawTable<T, A> {
+impl<T, A: Allocator + Default> Default for RawTable<T, A> {
#[inline]
fn default() -> Self {
Self::new_in(Default::default())
@@ -1798,31 +3659,41 @@ impl<T, A: Allocator + Clone + Default> Default for RawTable<T, A> {
}
#[cfg(feature = "nightly")]
-unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawTable<T, A> {
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
- if !self.table.is_empty_singleton() {
- unsafe {
- self.drop_elements();
- self.free_buckets();
- }
+ unsafe {
+ // SAFETY:
+ // 1. We call the function only once;
+ // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
+ // and [`TableLayout`] that were used to allocate this table.
+ // 3. If the drop function of any elements fails, then only a memory leak will occur,
+ // and we don't care because we are inside the `Drop` function of the `RawTable`,
+ // so there won't be any table left in an inconsistent state.
+ self.table
+ .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
}
}
}
#[cfg(not(feature = "nightly"))]
-impl<T, A: Allocator + Clone> Drop for RawTable<T, A> {
+impl<T, A: Allocator> Drop for RawTable<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
- if !self.table.is_empty_singleton() {
- unsafe {
- self.drop_elements();
- self.free_buckets();
- }
+ unsafe {
+ // SAFETY:
+ // 1. We call the function only once;
+ // 2. We know for sure that `alloc` and `table_layout` matches the [`Allocator`]
+ // and [`TableLayout`] that were used to allocate this table.
+ // 3. If the drop function of any elements fails, then only a memory leak will occur,
+ // and we don't care because we are inside the `Drop` function of the `RawTable`,
+ // so there won't be any table left in an inconsistent state.
+ self.table
+ .drop_inner_table::<T, _>(&self.alloc, Self::TABLE_LAYOUT);
}
}
}
-impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> {
+impl<T, A: Allocator> IntoIterator for RawTable<T, A> {
type Item = T;
type IntoIter = RawIntoIter<T, A>;
@@ -1840,7 +3711,7 @@ impl<T, A: Allocator + Clone> IntoIterator for RawTable<T, A> {
pub(crate) struct RawIterRange<T> {
// Mask of full buckets in the current group. Bits are cleared from this
// mask as each element is processed.
- current_group: BitMask,
+ current_group: BitMaskIter,
// Pointer to the buckets for the current group.
data: Bucket<T>,
@@ -1856,19 +3727,44 @@ pub(crate) struct RawIterRange<T> {
impl<T> RawIterRange<T> {
/// Returns a `RawIterRange` covering a subset of a table.
///
- /// The control byte address must be aligned to the group size.
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is
+ /// [`undefined behavior`]:
+ ///
+ /// * `ctrl` must be [valid] for reads, i.e. table outlives the `RawIterRange`;
+ ///
+ /// * `ctrl` must be properly aligned to the group size (Group::WIDTH);
+ ///
+ /// * `ctrl` must point to the array of properly initialized control bytes;
+ ///
+ /// * `data` must be the [`Bucket`] at the `ctrl` index in the table;
+ ///
+ /// * the value of `len` must be less than or equal to the number of table buckets,
+ /// and the returned value of `ctrl.as_ptr().add(len).offset_from(ctrl.as_ptr())`
+ /// must be positive.
+ ///
+ /// * The `ctrl.add(len)` pointer must be either in bounds or one
+ /// byte past the end of the same [allocated table].
+ ///
+ /// * The `len` must be a power of two.
+ ///
+ /// [valid]: https://doc.rust-lang.org/std/ptr/index.html#safety
+ /// [`undefined behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn new(ctrl: *const u8, data: Bucket<T>, len: usize) -> Self {
debug_assert_ne!(len, 0);
debug_assert_eq!(ctrl as usize % Group::WIDTH, 0);
+ // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
let end = ctrl.add(len);
// Load the first group and advance ctrl to point to the next group
+ // SAFETY: The caller must uphold the safety rules for the [`RawIterRange::new`]
let current_group = Group::load_aligned(ctrl).match_full();
let next_ctrl = ctrl.add(Group::WIDTH);
Self {
- current_group,
+ current_group: current_group.into_iter(),
data,
next_ctrl,
end,
@@ -1925,8 +3821,7 @@ impl<T> RawIterRange<T> {
#[cfg_attr(feature = "inline-more", inline)]
unsafe fn next_impl<const DO_CHECK_PTR_RANGE: bool>(&mut self) -> Option<Bucket<T>> {
loop {
- if let Some(index) = self.current_group.lowest_set_bit() {
- self.current_group = self.current_group.remove_lowest_bit();
+ if let Some(index) = self.current_group.next() {
return Some(self.data.next_n(index));
}
@@ -1939,7 +3834,86 @@ impl<T> RawIterRange<T> {
// than the group size where the trailing control bytes are all
// EMPTY. On larger tables self.end is guaranteed to be aligned
// to the group size (since tables are power-of-two sized).
- self.current_group = Group::load_aligned(self.next_ctrl).match_full();
+ self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter();
+ self.data = self.data.next_n(Group::WIDTH);
+ self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
+ }
+ }
+
+ /// Folds every element into an accumulator by applying an operation,
+ /// returning the final result.
+ ///
+ /// `fold_impl()` takes three arguments: the number of items remaining in
+ /// the iterator, an initial value, and a closure with two arguments: an
+ /// 'accumulator', and an element. The closure returns the value that the
+ /// accumulator should have for the next iteration.
+ ///
+ /// The initial value is the value the accumulator will have on the first call.
+ ///
+ /// After applying this closure to every element of the iterator, `fold_impl()`
+ /// returns the accumulator.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is
+ /// [`Undefined Behavior`]:
+ ///
+ /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
+ /// i.e. table outlives the `RawIterRange`;
+ ///
+ /// * The provided `n` value must match the actual number of items
+ /// in the table.
+ ///
+ /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[allow(clippy::while_let_on_iterator)]
+ #[cfg_attr(feature = "inline-more", inline)]
+ unsafe fn fold_impl<F, B>(mut self, mut n: usize, mut acc: B, mut f: F) -> B
+ where
+ F: FnMut(B, Bucket<T>) -> B,
+ {
+ loop {
+ while let Some(index) = self.current_group.next() {
+ // The returned `index` will always be in the range `0..Group::WIDTH`,
+ // so that calling `self.data.next_n(index)` is safe (see detailed explanation below).
+ debug_assert!(n != 0);
+ let bucket = self.data.next_n(index);
+ acc = f(acc, bucket);
+ n -= 1;
+ }
+
+ if n == 0 {
+ return acc;
+ }
+
+ // SAFETY: The caller of this function ensures that:
+ //
+ // 1. The provided `n` value matches the actual number of items in the table;
+ // 2. The table is alive and did not moved.
+ //
+ // Taking the above into account, we always stay within the bounds, because:
+ //
+ // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
+ // we will never end up in the given branch, since we should have already
+ // yielded all the elements of the table.
+ //
+ // 2. For tables larger than the group width. The number of buckets is a
+ // power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
+ // `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
+ // start of the array of control bytes, and never try to iterate after
+ // getting all the elements, the last `self.current_group` will read bytes
+ // from the `self.buckets() - Group::WIDTH` index. We know also that
+ // `self.current_group.next()` will always retun indices within the range
+ // `0..Group::WIDTH`.
+ //
+ // Knowing all of the above and taking into account that we are synchronizing
+ // the `self.data` index with the index we used to read the `self.current_group`,
+ // the subsequent `self.data.next_n(index)` will always return a bucket with
+ // an index number less than `self.buckets()`.
+ //
+ // The last `self.next_ctrl`, whose index would be `self.buckets()`, will never
+ // actually be read, since we should have already yielded all the elements of
+ // the table.
+ self.current_group = Group::load_aligned(self.next_ctrl).match_full().into_iter();
self.data = self.data.next_n(Group::WIDTH);
self.next_ctrl = self.next_ctrl.add(Group::WIDTH);
}
@@ -2016,7 +3990,7 @@ impl<T> RawIter<T> {
/// This method should be called _before_ the removal is made. It is not necessary to call this
/// method if you are removing an item that this iterator yielded in the past.
#[cfg(feature = "raw")]
- pub fn reflect_remove(&mut self, b: &Bucket<T>) {
+ pub unsafe fn reflect_remove(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, false);
}
@@ -2030,36 +4004,76 @@ impl<T> RawIter<T> {
///
/// This method should be called _after_ the given insert is made.
#[cfg(feature = "raw")]
- pub fn reflect_insert(&mut self, b: &Bucket<T>) {
+ pub unsafe fn reflect_insert(&mut self, b: &Bucket<T>) {
self.reflect_toggle_full(b, true);
}
/// Refresh the iterator so that it reflects a change to the state of the given bucket.
#[cfg(feature = "raw")]
- fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
- unsafe {
- if b.as_ptr() > self.iter.data.as_ptr() {
- // The iterator has already passed the bucket's group.
- // So the toggle isn't relevant to this iterator.
- return;
+ unsafe fn reflect_toggle_full(&mut self, b: &Bucket<T>, is_insert: bool) {
+ if b.as_ptr() > self.iter.data.as_ptr() {
+ // The iterator has already passed the bucket's group.
+ // So the toggle isn't relevant to this iterator.
+ return;
+ }
+
+ if self.iter.next_ctrl < self.iter.end
+ && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
+ {
+ // The iterator has not yet reached the bucket's group.
+ // We don't need to reload anything, but we do need to adjust the item count.
+
+ if cfg!(debug_assertions) {
+ // Double-check that the user isn't lying to us by checking the bucket state.
+ // To do that, we need to find its control byte. We know that self.iter.data is
+ // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
+ let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
+ let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
+ // This method should be called _before_ a removal, or _after_ an insert,
+ // so in both cases the ctrl byte should indicate that the bucket is full.
+ assert!(is_full(*ctrl));
}
- if self.iter.next_ctrl < self.iter.end
- && b.as_ptr() <= self.iter.data.next_n(Group::WIDTH).as_ptr()
- {
- // The iterator has not yet reached the bucket's group.
- // We don't need to reload anything, but we do need to adjust the item count.
+ if is_insert {
+ self.items += 1;
+ } else {
+ self.items -= 1;
+ }
- if cfg!(debug_assertions) {
- // Double-check that the user isn't lying to us by checking the bucket state.
- // To do that, we need to find its control byte. We know that self.iter.data is
- // at self.iter.next_ctrl - Group::WIDTH, so we work from there:
- let offset = offset_from(self.iter.data.as_ptr(), b.as_ptr());
- let ctrl = self.iter.next_ctrl.sub(Group::WIDTH).add(offset);
- // This method should be called _before_ a removal, or _after_ an insert,
- // so in both cases the ctrl byte should indicate that the bucket is full.
- assert!(is_full(*ctrl));
- }
+ return;
+ }
+
+ // The iterator is at the bucket group that the toggled bucket is in.
+ // We need to do two things:
+ //
+ // - Determine if the iterator already yielded the toggled bucket.
+ // If it did, we're done.
+ // - Otherwise, update the iterator cached group so that it won't
+ // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
+ // We'll also need to update the item count accordingly.
+ if let Some(index) = self.iter.current_group.0.lowest_set_bit() {
+ let next_bucket = self.iter.data.next_n(index);
+ if b.as_ptr() > next_bucket.as_ptr() {
+ // The toggled bucket is "before" the bucket the iterator would yield next. We
+ // therefore don't need to do anything --- the iterator has already passed the
+ // bucket in question.
+ //
+ // The item count must already be correct, since a removal or insert "prior" to
+ // the iterator's position wouldn't affect the item count.
+ } else {
+ // The removed bucket is an upcoming bucket. We need to make sure it does _not_
+ // get yielded, and also that it's no longer included in the item count.
+ //
+ // NOTE: We can't just reload the group here, both since that might reflect
+ // inserts we've already passed, and because that might inadvertently unset the
+ // bits for _other_ removals. If we do that, we'd have to also decrement the
+ // item count for those other bits that we unset. But the presumably subsequent
+ // call to reflect for those buckets might _also_ decrement the item count.
+ // Instead, we _just_ flip the bit for the particular bucket the caller asked
+ // us to reflect.
+ let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
+ let was_full = self.iter.current_group.flip(our_bit);
+ debug_assert_ne!(was_full, is_insert);
if is_insert {
self.items += 1;
@@ -2067,65 +4081,23 @@ impl<T> RawIter<T> {
self.items -= 1;
}
- return;
- }
-
- // The iterator is at the bucket group that the toggled bucket is in.
- // We need to do two things:
- //
- // - Determine if the iterator already yielded the toggled bucket.
- // If it did, we're done.
- // - Otherwise, update the iterator cached group so that it won't
- // yield a to-be-removed bucket, or _will_ yield a to-be-added bucket.
- // We'll also need to update the item count accordingly.
- if let Some(index) = self.iter.current_group.lowest_set_bit() {
- let next_bucket = self.iter.data.next_n(index);
- if b.as_ptr() > next_bucket.as_ptr() {
- // The toggled bucket is "before" the bucket the iterator would yield next. We
- // therefore don't need to do anything --- the iterator has already passed the
- // bucket in question.
- //
- // The item count must already be correct, since a removal or insert "prior" to
- // the iterator's position wouldn't affect the item count.
- } else {
- // The removed bucket is an upcoming bucket. We need to make sure it does _not_
- // get yielded, and also that it's no longer included in the item count.
- //
- // NOTE: We can't just reload the group here, both since that might reflect
- // inserts we've already passed, and because that might inadvertently unset the
- // bits for _other_ removals. If we do that, we'd have to also decrement the
- // item count for those other bits that we unset. But the presumably subsequent
- // call to reflect for those buckets might _also_ decrement the item count.
- // Instead, we _just_ flip the bit for the particular bucket the caller asked
- // us to reflect.
- let our_bit = offset_from(self.iter.data.as_ptr(), b.as_ptr());
- let was_full = self.iter.current_group.flip(our_bit);
- debug_assert_ne!(was_full, is_insert);
-
- if is_insert {
- self.items += 1;
+ if cfg!(debug_assertions) {
+ if b.as_ptr() == next_bucket.as_ptr() {
+ // The removed bucket should no longer be next
+ debug_assert_ne!(self.iter.current_group.0.lowest_set_bit(), Some(index));
} else {
- self.items -= 1;
- }
-
- if cfg!(debug_assertions) {
- if b.as_ptr() == next_bucket.as_ptr() {
- // The removed bucket should no longer be next
- debug_assert_ne!(self.iter.current_group.lowest_set_bit(), Some(index));
- } else {
- // We should not have changed what bucket comes next.
- debug_assert_eq!(self.iter.current_group.lowest_set_bit(), Some(index));
- }
+ // We should not have changed what bucket comes next.
+ debug_assert_eq!(self.iter.current_group.0.lowest_set_bit(), Some(index));
}
}
- } else {
- // We must have already iterated past the removed item.
}
+ } else {
+ // We must have already iterated past the removed item.
}
}
unsafe fn drop_elements(&mut self) {
- if mem::needs_drop::<T>() && self.len() != 0 {
+ if T::NEEDS_DROP && self.items != 0 {
for item in self {
item.drop();
}
@@ -2159,9 +4131,8 @@ impl<T> Iterator for RawIter<T> {
self.iter.next_impl::<false>()
};
- if nxt.is_some() {
- self.items -= 1;
- }
+ debug_assert!(nxt.is_some());
+ self.items -= 1;
nxt
}
@@ -2170,33 +4141,160 @@ impl<T> Iterator for RawIter<T> {
fn size_hint(&self) -> (usize, Option<usize>) {
(self.items, Some(self.items))
}
+
+ #[inline]
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ unsafe { self.iter.fold_impl(self.items, init, f) }
+ }
}
impl<T> ExactSizeIterator for RawIter<T> {}
impl<T> FusedIterator for RawIter<T> {}
+/// Iterator which returns an index of every full bucket in the table.
+///
+/// For maximum flexibility this iterator is not bound by a lifetime, but you
+/// must observe several rules when using it:
+/// - You must not free the hash table while iterating (including via growing/shrinking).
+/// - It is fine to erase a bucket that has been yielded by the iterator.
+/// - Erasing a bucket that has not yet been yielded by the iterator may still
+/// result in the iterator yielding index of that bucket.
+/// - It is unspecified whether an element inserted after the iterator was
+/// created will be yielded by that iterator.
+/// - The order in which the iterator yields indices of the buckets is unspecified
+/// and may change in the future.
+pub(crate) struct FullBucketsIndices {
+ // Mask of full buckets in the current group. Bits are cleared from this
+ // mask as each element is processed.
+ current_group: BitMaskIter,
+
+ // Initial value of the bytes' indices of the current group (relative
+ // to the start of the control bytes).
+ group_first_index: usize,
+
+ // Pointer to the current group of control bytes,
+ // Must be aligned to the group size (Group::WIDTH).
+ ctrl: NonNull<u8>,
+
+ // Number of elements in the table.
+ items: usize,
+}
+
+impl FullBucketsIndices {
+ /// Advances the iterator and returns the next value.
+ ///
+ /// # Safety
+ ///
+ /// If any of the following conditions are violated, the result is
+ /// [`Undefined Behavior`]:
+ ///
+ /// * The [`RawTableInner`] / [`RawTable`] must be alive and not moved,
+ /// i.e. table outlives the `FullBucketsIndices`;
+ ///
+ /// * It never tries to iterate after getting all elements.
+ ///
+ /// [`Undefined Behavior`]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[inline(always)]
+ unsafe fn next_impl(&mut self) -> Option<usize> {
+ loop {
+ if let Some(index) = self.current_group.next() {
+ // The returned `self.group_first_index + index` will always
+ // be in the range `0..self.buckets()`. See explanation below.
+ return Some(self.group_first_index + index);
+ }
+
+ // SAFETY: The caller of this function ensures that:
+ //
+ // 1. It never tries to iterate after getting all the elements;
+ // 2. The table is alive and did not moved;
+ // 3. The first `self.ctrl` pointed to the start of the array of control bytes.
+ //
+ // Taking the above into account, we always stay within the bounds, because:
+ //
+ // 1. For tables smaller than the group width (self.buckets() <= Group::WIDTH),
+ // we will never end up in the given branch, since we should have already
+ // yielded all the elements of the table.
+ //
+ // 2. For tables larger than the group width. The number of buckets is a
+ // power of two (2 ^ n), Group::WIDTH is also power of two (2 ^ k). Since
+ // `(2 ^ n) > (2 ^ k)`, than `(2 ^ n) % (2 ^ k) = 0`. As we start from the
+ // the start of the array of control bytes, and never try to iterate after
+ // getting all the elements, the last `self.ctrl` will be equal to
+ // the `self.buckets() - Group::WIDTH`, so `self.current_group.next()`
+ // will always contains indices within the range `0..Group::WIDTH`,
+ // and subsequent `self.group_first_index + index` will always return a
+ // number less than `self.buckets()`.
+ self.ctrl = NonNull::new_unchecked(self.ctrl.as_ptr().add(Group::WIDTH));
+
+ // SAFETY: See explanation above.
+ self.current_group = Group::load_aligned(self.ctrl.as_ptr())
+ .match_full()
+ .into_iter();
+ self.group_first_index += Group::WIDTH;
+ }
+ }
+}
+
+impl Iterator for FullBucketsIndices {
+ type Item = usize;
+
+ /// Advances the iterator and returns the next value. It is up to
+ /// the caller to ensure that the `RawTable` outlives the `FullBucketsIndices`,
+ /// because we cannot make the `next` method unsafe.
+ #[inline(always)]
+ fn next(&mut self) -> Option<usize> {
+ // Return if we already yielded all items.
+ if self.items == 0 {
+ return None;
+ }
+
+ let nxt = unsafe {
+ // SAFETY:
+ // 1. We check number of items to yield using `items` field.
+ // 2. The caller ensures that the table is alive and has not moved.
+ self.next_impl()
+ };
+
+ debug_assert!(nxt.is_some());
+ self.items -= 1;
+
+ nxt
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.items, Some(self.items))
+ }
+}
+
+impl ExactSizeIterator for FullBucketsIndices {}
+impl FusedIterator for FullBucketsIndices {}
+
/// Iterator which consumes a table and returns elements.
-pub struct RawIntoIter<T, A: Allocator + Clone = Global> {
+pub struct RawIntoIter<T, A: Allocator = Global> {
iter: RawIter<T>,
- allocation: Option<(NonNull<u8>, Layout)>,
+ allocation: Option<(NonNull<u8>, Layout, A)>,
marker: PhantomData<T>,
- alloc: A,
}
-impl<T, A: Allocator + Clone> RawIntoIter<T, A> {
+impl<T, A: Allocator> RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
-unsafe impl<T, A: Allocator + Clone> Send for RawIntoIter<T, A>
+unsafe impl<T, A: Allocator> Send for RawIntoIter<T, A>
where
T: Send,
A: Send,
{
}
-unsafe impl<T, A: Allocator + Clone> Sync for RawIntoIter<T, A>
+unsafe impl<T, A: Allocator> Sync for RawIntoIter<T, A>
where
T: Sync,
A: Sync,
@@ -2204,7 +4302,7 @@ where
}
#[cfg(feature = "nightly")]
-unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
@@ -2212,14 +4310,14 @@ unsafe impl<#[may_dangle] T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
self.iter.drop_elements();
// Free the table
- if let Some((ptr, layout)) = self.allocation {
- self.alloc.deallocate(ptr, layout);
+ if let Some((ptr, layout, ref alloc)) = self.allocation {
+ alloc.deallocate(ptr, layout);
}
}
}
}
#[cfg(not(feature = "nightly"))]
-impl<T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
+impl<T, A: Allocator> Drop for RawIntoIter<T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
@@ -2227,14 +4325,14 @@ impl<T, A: Allocator + Clone> Drop for RawIntoIter<T, A> {
self.iter.drop_elements();
// Free the table
- if let Some((ptr, layout)) = self.allocation {
- self.alloc.deallocate(ptr, layout);
+ if let Some((ptr, layout, ref alloc)) = self.allocation {
+ alloc.deallocate(ptr, layout);
}
}
}
}
-impl<T, A: Allocator + Clone> Iterator for RawIntoIter<T, A> {
+impl<T, A: Allocator> Iterator for RawIntoIter<T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
@@ -2248,45 +4346,45 @@ impl<T, A: Allocator + Clone> Iterator for RawIntoIter<T, A> {
}
}
-impl<T, A: Allocator + Clone> ExactSizeIterator for RawIntoIter<T, A> {}
-impl<T, A: Allocator + Clone> FusedIterator for RawIntoIter<T, A> {}
+impl<T, A: Allocator> ExactSizeIterator for RawIntoIter<T, A> {}
+impl<T, A: Allocator> FusedIterator for RawIntoIter<T, A> {}
/// Iterator which consumes elements without freeing the table storage.
-pub struct RawDrain<'a, T, A: Allocator + Clone = Global> {
+pub struct RawDrain<'a, T, A: Allocator = Global> {
iter: RawIter<T>,
// The table is moved into the iterator for the duration of the drain. This
// ensures that an empty table is left if the drain iterator is leaked
// without dropping.
- table: ManuallyDrop<RawTable<T, A>>,
- orig_table: NonNull<RawTable<T, A>>,
+ table: RawTableInner,
+ orig_table: NonNull<RawTableInner>,
// We don't use a &'a mut RawTable<T> because we want RawDrain to be
// covariant over T.
marker: PhantomData<&'a RawTable<T, A>>,
}
-impl<T, A: Allocator + Clone> RawDrain<'_, T, A> {
+impl<T, A: Allocator> RawDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
pub fn iter(&self) -> RawIter<T> {
self.iter.clone()
}
}
-unsafe impl<T, A: Allocator + Copy> Send for RawDrain<'_, T, A>
+unsafe impl<T, A: Allocator> Send for RawDrain<'_, T, A>
where
T: Send,
A: Send,
{
}
-unsafe impl<T, A: Allocator + Copy> Sync for RawDrain<'_, T, A>
+unsafe impl<T, A: Allocator> Sync for RawDrain<'_, T, A>
where
T: Sync,
A: Sync,
{
}
-impl<T, A: Allocator + Clone> Drop for RawDrain<'_, T, A> {
+impl<T, A: Allocator> Drop for RawDrain<'_, T, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn drop(&mut self) {
unsafe {
@@ -2300,12 +4398,12 @@ impl<T, A: Allocator + Clone> Drop for RawDrain<'_, T, A> {
// Move the now empty table back to its original location.
self.orig_table
.as_ptr()
- .copy_from_nonoverlapping(&*self.table, 1);
+ .copy_from_nonoverlapping(&self.table, 1);
}
}
}
-impl<T, A: Allocator + Clone> Iterator for RawDrain<'_, T, A> {
+impl<T, A: Allocator> Iterator for RawDrain<'_, T, A> {
type Item = T;
#[cfg_attr(feature = "inline-more", inline)]
@@ -2322,21 +4420,36 @@ impl<T, A: Allocator + Clone> Iterator for RawDrain<'_, T, A> {
}
}
-impl<T, A: Allocator + Clone> ExactSizeIterator for RawDrain<'_, T, A> {}
-impl<T, A: Allocator + Clone> FusedIterator for RawDrain<'_, T, A> {}
+impl<T, A: Allocator> ExactSizeIterator for RawDrain<'_, T, A> {}
+impl<T, A: Allocator> FusedIterator for RawDrain<'_, T, A> {}
/// Iterator over occupied buckets that could match a given hash.
///
/// `RawTable` only stores 7 bits of the hash value, so this iterator may return
/// items that have a hash value different than the one provided. You should
/// always validate the returned values before using them.
-pub struct RawIterHash<'a, T, A: Allocator + Clone = Global> {
- inner: RawIterHashInner<'a, A>,
+///
+/// For maximum flexibility this iterator is not bound by a lifetime, but you
+/// must observe several rules when using it:
+/// - You must not free the hash table while iterating (including via growing/shrinking).
+/// - It is fine to erase a bucket that has been yielded by the iterator.
+/// - Erasing a bucket that has not yet been yielded by the iterator may still
+/// result in the iterator yielding that bucket.
+/// - It is unspecified whether an element inserted after the iterator was
+/// created will be yielded by that iterator.
+/// - The order in which the iterator yields buckets is unspecified and may
+/// change in the future.
+pub struct RawIterHash<T> {
+ inner: RawIterHashInner,
_marker: PhantomData<T>,
}
-struct RawIterHashInner<'a, A: Allocator + Clone> {
- table: &'a RawTableInner<A>,
+struct RawIterHashInner {
+ // See `RawTableInner`'s corresponding fields for details.
+ // We can't store a `*const RawTableInner` as it would get
+ // invalidated by the user calling `&mut` methods on `RawTable`.
+ bucket_mask: usize,
+ ctrl: NonNull<u8>,
// The top 7 bits of the hash.
h2_hash: u8,
@@ -2350,71 +4463,105 @@ struct RawIterHashInner<'a, A: Allocator + Clone> {
bitmask: BitMaskIter,
}
-impl<'a, T, A: Allocator + Clone> RawIterHash<'a, T, A> {
+impl<T> RawIterHash<T> {
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "raw")]
- fn new(table: &'a RawTable<T, A>, hash: u64) -> Self {
+ unsafe fn new<A: Allocator>(table: &RawTable<T, A>, hash: u64) -> Self {
RawIterHash {
inner: RawIterHashInner::new(&table.table, hash),
_marker: PhantomData,
}
}
}
-impl<'a, A: Allocator + Clone> RawIterHashInner<'a, A> {
+impl RawIterHashInner {
#[cfg_attr(feature = "inline-more", inline)]
#[cfg(feature = "raw")]
- fn new(table: &'a RawTableInner<A>, hash: u64) -> Self {
- unsafe {
- let h2_hash = h2(hash);
- let probe_seq = table.probe_seq(hash);
- let group = Group::load(table.ctrl(probe_seq.pos));
- let bitmask = group.match_byte(h2_hash).into_iter();
-
- RawIterHashInner {
- table,
- h2_hash,
- probe_seq,
- group,
- bitmask,
- }
+ unsafe fn new(table: &RawTableInner, hash: u64) -> Self {
+ let h2_hash = h2(hash);
+ let probe_seq = table.probe_seq(hash);
+ let group = Group::load(table.ctrl(probe_seq.pos));
+ let bitmask = group.match_byte(h2_hash).into_iter();
+
+ RawIterHashInner {
+ bucket_mask: table.bucket_mask,
+ ctrl: table.ctrl,
+ h2_hash,
+ probe_seq,
+ group,
+ bitmask,
}
}
}
-impl<'a, T, A: Allocator + Clone> Iterator for RawIterHash<'a, T, A> {
+impl<T> Iterator for RawIterHash<T> {
type Item = Bucket<T>;
fn next(&mut self) -> Option<Bucket<T>> {
unsafe {
match self.inner.next() {
- Some(index) => Some(self.inner.table.bucket(index)),
+ Some(index) => {
+ // Can't use `RawTable::bucket` here as we don't have
+ // an actual `RawTable` reference to use.
+ debug_assert!(index <= self.inner.bucket_mask);
+ let bucket = Bucket::from_base_index(self.inner.ctrl.cast(), index);
+ Some(bucket)
+ }
None => None,
}
}
}
}
-impl<'a, A: Allocator + Clone> Iterator for RawIterHashInner<'a, A> {
+impl Iterator for RawIterHashInner {
type Item = usize;
fn next(&mut self) -> Option<Self::Item> {
unsafe {
loop {
if let Some(bit) = self.bitmask.next() {
- let index = (self.probe_seq.pos + bit) & self.table.bucket_mask;
+ let index = (self.probe_seq.pos + bit) & self.bucket_mask;
return Some(index);
}
if likely(self.group.match_empty().any_bit_set()) {
return None;
}
- self.probe_seq.move_next(self.table.bucket_mask);
- self.group = Group::load(self.table.ctrl(self.probe_seq.pos));
+ self.probe_seq.move_next(self.bucket_mask);
+
+ // Can't use `RawTableInner::ctrl` here as we don't have
+ // an actual `RawTableInner` reference to use.
+ let index = self.probe_seq.pos;
+ debug_assert!(index < self.bucket_mask + 1 + Group::WIDTH);
+ let group_ctrl = self.ctrl.as_ptr().add(index);
+
+ self.group = Group::load(group_ctrl);
self.bitmask = self.group.match_byte(self.h2_hash).into_iter();
}
}
}
}
+pub(crate) struct RawExtractIf<'a, T, A: Allocator> {
+ pub iter: RawIter<T>,
+ pub table: &'a mut RawTable<T, A>,
+}
+
+impl<T, A: Allocator> RawExtractIf<'_, T, A> {
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub(crate) fn next<F>(&mut self, mut f: F) -> Option<T>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ unsafe {
+ for item in &mut self.iter {
+ if f(item.as_mut()) {
+ return Some(self.table.remove(item).0);
+ }
+ }
+ }
+ None
+ }
+}
+
#[cfg(test)]
mod test_map {
use super::*;
@@ -2457,4 +4604,214 @@ mod test_map {
assert!(table.find(i + 100, |x| *x == i + 100).is_none());
}
}
+
+ /// CHECKING THAT WE ARE NOT TRYING TO READ THE MEMORY OF
+ /// AN UNINITIALIZED TABLE DURING THE DROP
+ #[test]
+ fn test_drop_uninitialized() {
+ use ::alloc::vec::Vec;
+
+ let table = unsafe {
+ // SAFETY: The `buckets` is power of two and we're not
+ // trying to actually use the returned RawTable.
+ RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
+ .unwrap()
+ };
+ drop(table);
+ }
+
+ /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
+ /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
+ #[test]
+ fn test_drop_zero_items() {
+ use ::alloc::vec::Vec;
+ unsafe {
+ // SAFETY: The `buckets` is power of two and we're not
+ // trying to actually use the returned RawTable.
+ let table =
+ RawTable::<(u64, Vec<i32>)>::new_uninitialized(Global, 8, Fallibility::Infallible)
+ .unwrap();
+
+ // WE SIMULATE, AS IT WERE, A FULL TABLE.
+
+ // SAFETY: We checked that the table is allocated and therefore the table already has
+ // `self.bucket_mask + 1 + Group::WIDTH` number of control bytes (see TableLayout::calculate_layout_for)
+ // so writing `table.table.num_ctrl_bytes() == bucket_mask + 1 + Group::WIDTH` bytes is safe.
+ table
+ .table
+ .ctrl(0)
+ .write_bytes(EMPTY, table.table.num_ctrl_bytes());
+
+ // SAFETY: table.capacity() is guaranteed to be smaller than table.buckets()
+ table.table.ctrl(0).write_bytes(0, table.capacity());
+
+ // Fix up the trailing control bytes. See the comments in set_ctrl
+ // for the handling of tables smaller than the group width.
+ if table.buckets() < Group::WIDTH {
+ // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of control bytes,
+ // so copying `self.buckets() == self.bucket_mask + 1` bytes with offset equal to
+ // `Group::WIDTH` is safe
+ table
+ .table
+ .ctrl(0)
+ .copy_to(table.table.ctrl(Group::WIDTH), table.table.buckets());
+ } else {
+ // SAFETY: We have `self.bucket_mask + 1 + Group::WIDTH` number of
+ // control bytes,so copying `Group::WIDTH` bytes with offset equal
+ // to `self.buckets() == self.bucket_mask + 1` is safe
+ table
+ .table
+ .ctrl(0)
+ .copy_to(table.table.ctrl(table.table.buckets()), Group::WIDTH);
+ }
+ drop(table);
+ }
+ }
+
+ /// CHECKING THAT WE DON'T TRY TO DROP DATA IF THE `ITEMS`
+ /// ARE ZERO, EVEN IF WE HAVE `FULL` CONTROL BYTES.
+ #[test]
+ fn test_catch_panic_clone_from() {
+ use ::alloc::sync::Arc;
+ use ::alloc::vec::Vec;
+ use allocator_api2::alloc::{AllocError, Allocator, Global};
+ use core::sync::atomic::{AtomicI8, Ordering};
+ use std::thread;
+
+ struct MyAllocInner {
+ drop_count: Arc<AtomicI8>,
+ }
+
+ #[derive(Clone)]
+ struct MyAlloc {
+ _inner: Arc<MyAllocInner>,
+ }
+
+ impl Drop for MyAllocInner {
+ fn drop(&mut self) {
+ println!("MyAlloc freed.");
+ self.drop_count.fetch_sub(1, Ordering::SeqCst);
+ }
+ }
+
+ unsafe impl Allocator for MyAlloc {
+ fn allocate(&self, layout: Layout) -> std::result::Result<NonNull<[u8]>, AllocError> {
+ let g = Global;
+ g.allocate(layout)
+ }
+
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ let g = Global;
+ g.deallocate(ptr, layout)
+ }
+ }
+
+ const DISARMED: bool = false;
+ const ARMED: bool = true;
+
+ struct CheckedCloneDrop {
+ panic_in_clone: bool,
+ dropped: bool,
+ need_drop: Vec<u64>,
+ }
+
+ impl Clone for CheckedCloneDrop {
+ fn clone(&self) -> Self {
+ if self.panic_in_clone {
+ panic!("panic in clone")
+ }
+ Self {
+ panic_in_clone: self.panic_in_clone,
+ dropped: self.dropped,
+ need_drop: self.need_drop.clone(),
+ }
+ }
+ }
+
+ impl Drop for CheckedCloneDrop {
+ fn drop(&mut self) {
+ if self.dropped {
+ panic!("double drop");
+ }
+ self.dropped = true;
+ }
+ }
+
+ let dropped: Arc<AtomicI8> = Arc::new(AtomicI8::new(2));
+
+ let mut table = RawTable::new_in(MyAlloc {
+ _inner: Arc::new(MyAllocInner {
+ drop_count: dropped.clone(),
+ }),
+ });
+
+ for (idx, panic_in_clone) in core::iter::repeat(DISARMED).take(7).enumerate() {
+ let idx = idx as u64;
+ table.insert(
+ idx,
+ (
+ idx,
+ CheckedCloneDrop {
+ panic_in_clone,
+ dropped: false,
+ need_drop: vec![idx],
+ },
+ ),
+ |(k, _)| *k,
+ );
+ }
+
+ assert_eq!(table.len(), 7);
+
+ thread::scope(|s| {
+ let result = s.spawn(|| {
+ let armed_flags = [
+ DISARMED, DISARMED, ARMED, DISARMED, DISARMED, DISARMED, DISARMED,
+ ];
+ let mut scope_table = RawTable::new_in(MyAlloc {
+ _inner: Arc::new(MyAllocInner {
+ drop_count: dropped.clone(),
+ }),
+ });
+ for (idx, &panic_in_clone) in armed_flags.iter().enumerate() {
+ let idx = idx as u64;
+ scope_table.insert(
+ idx,
+ (
+ idx,
+ CheckedCloneDrop {
+ panic_in_clone,
+ dropped: false,
+ need_drop: vec![idx + 100],
+ },
+ ),
+ |(k, _)| *k,
+ );
+ }
+ table.clone_from(&scope_table);
+ });
+ assert!(result.join().is_err());
+ });
+
+ // Let's check that all iterators work fine and do not return elements
+ // (especially `RawIterRange`, which does not depend on the number of
+ // elements in the table, but looks directly at the control bytes)
+ //
+ // SAFETY: We know for sure that `RawTable` will outlive
+ // the returned `RawIter / RawIterRange` iterator.
+ assert_eq!(table.len(), 0);
+ assert_eq!(unsafe { table.iter().count() }, 0);
+ assert_eq!(unsafe { table.iter().iter.count() }, 0);
+
+ for idx in 0..table.buckets() {
+ let idx = idx as u64;
+ assert!(
+ table.find(idx, |(k, _)| *k == idx).is_none(),
+ "Index: {idx}"
+ );
+ }
+
+ // All allocator clones should already be dropped.
+ assert_eq!(dropped.load(Ordering::SeqCst), 1);
+ }
}
diff --git a/third_party/rust/hashbrown/src/raw/neon.rs b/third_party/rust/hashbrown/src/raw/neon.rs
new file mode 100644
index 0000000000..44e82d57d5
--- /dev/null
+++ b/third_party/rust/hashbrown/src/raw/neon.rs
@@ -0,0 +1,124 @@
+use super::bitmask::BitMask;
+use super::EMPTY;
+use core::arch::aarch64 as neon;
+use core::mem;
+use core::num::NonZeroU64;
+
+pub(crate) type BitMaskWord = u64;
+pub(crate) type NonZeroBitMaskWord = NonZeroU64;
+pub(crate) const BITMASK_STRIDE: usize = 8;
+pub(crate) const BITMASK_MASK: BitMaskWord = !0;
+pub(crate) const BITMASK_ITER_MASK: BitMaskWord = 0x8080_8080_8080_8080;
+
+/// Abstraction over a group of control bytes which can be scanned in
+/// parallel.
+///
+/// This implementation uses a 64-bit NEON value.
+#[derive(Copy, Clone)]
+pub(crate) struct Group(neon::uint8x8_t);
+
+#[allow(clippy::use_self)]
+impl Group {
+ /// Number of bytes in the group.
+ pub(crate) const WIDTH: usize = mem::size_of::<Self>();
+
+ /// Returns a full group of empty bytes, suitable for use as the initial
+ /// value for an empty hash table.
+ ///
+ /// This is guaranteed to be aligned to the group size.
+ #[inline]
+ pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] {
+ #[repr(C)]
+ struct AlignedBytes {
+ _align: [Group; 0],
+ bytes: [u8; Group::WIDTH],
+ }
+ const ALIGNED_BYTES: AlignedBytes = AlignedBytes {
+ _align: [],
+ bytes: [EMPTY; Group::WIDTH],
+ };
+ &ALIGNED_BYTES.bytes
+ }
+
+ /// Loads a group of bytes starting at the given address.
+ #[inline]
+ #[allow(clippy::cast_ptr_alignment)] // unaligned load
+ pub(crate) unsafe fn load(ptr: *const u8) -> Self {
+ Group(neon::vld1_u8(ptr))
+ }
+
+ /// Loads a group of bytes starting at the given address, which must be
+ /// aligned to `mem::align_of::<Group>()`.
+ #[inline]
+ #[allow(clippy::cast_ptr_alignment)]
+ pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self {
+ // FIXME: use align_offset once it stabilizes
+ debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
+ Group(neon::vld1_u8(ptr))
+ }
+
+ /// Stores the group of bytes to the given address, which must be
+ /// aligned to `mem::align_of::<Group>()`.
+ #[inline]
+ #[allow(clippy::cast_ptr_alignment)]
+ pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) {
+ // FIXME: use align_offset once it stabilizes
+ debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
+ neon::vst1_u8(ptr, self.0);
+ }
+
+ /// Returns a `BitMask` indicating all bytes in the group which *may*
+ /// have the given value.
+ #[inline]
+ pub(crate) fn match_byte(self, byte: u8) -> BitMask {
+ unsafe {
+ let cmp = neon::vceq_u8(self.0, neon::vdup_n_u8(byte));
+ BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0))
+ }
+ }
+
+ /// Returns a `BitMask` indicating all bytes in the group which are
+ /// `EMPTY`.
+ #[inline]
+ pub(crate) fn match_empty(self) -> BitMask {
+ self.match_byte(EMPTY)
+ }
+
+ /// Returns a `BitMask` indicating all bytes in the group which are
+ /// `EMPTY` or `DELETED`.
+ #[inline]
+ pub(crate) fn match_empty_or_deleted(self) -> BitMask {
+ unsafe {
+ let cmp = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0));
+ BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0))
+ }
+ }
+
+ /// Returns a `BitMask` indicating all bytes in the group which are full.
+ #[inline]
+ pub(crate) fn match_full(self) -> BitMask {
+ unsafe {
+ let cmp = neon::vcgez_s8(neon::vreinterpret_s8_u8(self.0));
+ BitMask(neon::vget_lane_u64(neon::vreinterpret_u64_u8(cmp), 0))
+ }
+ }
+
+ /// Performs the following transformation on all bytes in the group:
+ /// - `EMPTY => EMPTY`
+ /// - `DELETED => EMPTY`
+ /// - `FULL => DELETED`
+ #[inline]
+ pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
+ // Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
+ // and high_bit = 0 (FULL) to 1000_0000
+ //
+ // Here's this logic expanded to concrete values:
+ // let special = 0 > byte = 1111_1111 (true) or 0000_0000 (false)
+ // 1111_1111 | 1000_0000 = 1111_1111
+ // 0000_0000 | 1000_0000 = 1000_0000
+ unsafe {
+ let special = neon::vcltz_s8(neon::vreinterpret_s8_u8(self.0));
+ Group(neon::vorr_u8(special, neon::vdup_n_u8(0x80)))
+ }
+ }
+}
diff --git a/third_party/rust/hashbrown/src/raw/sse2.rs b/third_party/rust/hashbrown/src/raw/sse2.rs
index a0bf6da804..956ba5d265 100644
--- a/third_party/rust/hashbrown/src/raw/sse2.rs
+++ b/third_party/rust/hashbrown/src/raw/sse2.rs
@@ -1,28 +1,31 @@
use super::bitmask::BitMask;
use super::EMPTY;
use core::mem;
+use core::num::NonZeroU16;
#[cfg(target_arch = "x86")]
use core::arch::x86;
#[cfg(target_arch = "x86_64")]
use core::arch::x86_64 as x86;
-pub type BitMaskWord = u16;
-pub const BITMASK_STRIDE: usize = 1;
-pub const BITMASK_MASK: BitMaskWord = 0xffff;
+pub(crate) type BitMaskWord = u16;
+pub(crate) type NonZeroBitMaskWord = NonZeroU16;
+pub(crate) const BITMASK_STRIDE: usize = 1;
+pub(crate) const BITMASK_MASK: BitMaskWord = 0xffff;
+pub(crate) const BITMASK_ITER_MASK: BitMaskWord = !0;
/// Abstraction over a group of control bytes which can be scanned in
/// parallel.
///
/// This implementation uses a 128-bit SSE value.
#[derive(Copy, Clone)]
-pub struct Group(x86::__m128i);
+pub(crate) struct Group(x86::__m128i);
// FIXME: https://github.com/rust-lang/rust-clippy/issues/3859
#[allow(clippy::use_self)]
impl Group {
/// Number of bytes in the group.
- pub const WIDTH: usize = mem::size_of::<Self>();
+ pub(crate) const WIDTH: usize = mem::size_of::<Self>();
/// Returns a full group of empty bytes, suitable for use as the initial
/// value for an empty hash table.
@@ -30,7 +33,7 @@ impl Group {
/// This is guaranteed to be aligned to the group size.
#[inline]
#[allow(clippy::items_after_statements)]
- pub const fn static_empty() -> &'static [u8; Group::WIDTH] {
+ pub(crate) const fn static_empty() -> &'static [u8; Group::WIDTH] {
#[repr(C)]
struct AlignedBytes {
_align: [Group; 0],
@@ -46,7 +49,7 @@ impl Group {
/// Loads a group of bytes starting at the given address.
#[inline]
#[allow(clippy::cast_ptr_alignment)] // unaligned load
- pub unsafe fn load(ptr: *const u8) -> Self {
+ pub(crate) unsafe fn load(ptr: *const u8) -> Self {
Group(x86::_mm_loadu_si128(ptr.cast()))
}
@@ -54,7 +57,7 @@ impl Group {
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
- pub unsafe fn load_aligned(ptr: *const u8) -> Self {
+ pub(crate) unsafe fn load_aligned(ptr: *const u8) -> Self {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
Group(x86::_mm_load_si128(ptr.cast()))
@@ -64,7 +67,7 @@ impl Group {
/// aligned to `mem::align_of::<Group>()`.
#[inline]
#[allow(clippy::cast_ptr_alignment)]
- pub unsafe fn store_aligned(self, ptr: *mut u8) {
+ pub(crate) unsafe fn store_aligned(self, ptr: *mut u8) {
// FIXME: use align_offset once it stabilizes
debug_assert_eq!(ptr as usize & (mem::align_of::<Self>() - 1), 0);
x86::_mm_store_si128(ptr.cast(), self.0);
@@ -73,7 +76,7 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which have
/// the given value.
#[inline]
- pub fn match_byte(self, byte: u8) -> BitMask {
+ pub(crate) fn match_byte(self, byte: u8) -> BitMask {
#[allow(
clippy::cast_possible_wrap, // byte: u8 as i8
// byte: i32 as u16
@@ -91,14 +94,14 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY`.
#[inline]
- pub fn match_empty(self) -> BitMask {
+ pub(crate) fn match_empty(self) -> BitMask {
self.match_byte(EMPTY)
}
/// Returns a `BitMask` indicating all bytes in the group which are
/// `EMPTY` or `DELETED`.
#[inline]
- pub fn match_empty_or_deleted(self) -> BitMask {
+ pub(crate) fn match_empty_or_deleted(self) -> BitMask {
#[allow(
// byte: i32 as u16
// note: _mm_movemask_epi8 returns a 16-bit mask in a i32, the
@@ -114,7 +117,7 @@ impl Group {
/// Returns a `BitMask` indicating all bytes in the group which are full.
#[inline]
- pub fn match_full(&self) -> BitMask {
+ pub(crate) fn match_full(&self) -> BitMask {
self.match_empty_or_deleted().invert()
}
@@ -123,7 +126,7 @@ impl Group {
/// - `DELETED => EMPTY`
/// - `FULL => DELETED`
#[inline]
- pub fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
+ pub(crate) fn convert_special_to_empty_and_full_to_deleted(self) -> Self {
// Map high_bit = 1 (EMPTY or DELETED) to 1111_1111
// and high_bit = 0 (FULL) to 1000_0000
//
diff --git a/third_party/rust/hashbrown/src/rustc_entry.rs b/third_party/rust/hashbrown/src/rustc_entry.rs
index 2e84595269..defbd4bb88 100644
--- a/third_party/rust/hashbrown/src/rustc_entry.rs
+++ b/third_party/rust/hashbrown/src/rustc_entry.rs
@@ -1,5 +1,5 @@
use self::RustcEntry::*;
-use crate::map::{make_insert_hash, Drain, HashMap, IntoIter, Iter, IterMut};
+use crate::map::{make_hash, Drain, HashMap, IntoIter, Iter, IterMut};
use crate::raw::{Allocator, Bucket, Global, RawTable};
use core::fmt::{self, Debug};
use core::hash::{BuildHasher, Hash};
@@ -9,7 +9,7 @@ impl<K, V, S, A> HashMap<K, V, S, A>
where
K: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
/// Gets the given key's corresponding entry in the map for in-place manipulation.
///
@@ -32,7 +32,7 @@ where
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn rustc_entry(&mut self, key: K) -> RustcEntry<'_, K, V, A> {
- let hash = make_insert_hash(&self.hash_builder, &key);
+ let hash = make_hash(&self.hash_builder, &key);
if let Some(elem) = self.table.find(hash, |q| q.0.eq(&key)) {
RustcEntry::Occupied(RustcOccupiedEntry {
key: Some(key),
@@ -62,7 +62,7 @@ where
/// [`rustc_entry`]: struct.HashMap.html#method.rustc_entry
pub enum RustcEntry<'a, K, V, A = Global>
where
- A: Allocator + Clone,
+ A: Allocator,
{
/// An occupied entry.
Occupied(RustcOccupiedEntry<'a, K, V, A>),
@@ -71,7 +71,7 @@ where
Vacant(RustcVacantEntry<'a, K, V, A>),
}
-impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for RustcEntry<'_, K, V, A> {
+impl<K: Debug, V: Debug, A: Allocator> Debug for RustcEntry<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
@@ -86,7 +86,7 @@ impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for RustcEntry<'_, K, V, A>
/// [`RustcEntry`]: enum.RustcEntry.html
pub struct RustcOccupiedEntry<'a, K, V, A = Global>
where
- A: Allocator + Clone,
+ A: Allocator,
{
key: Option<K>,
elem: Bucket<(K, V)>,
@@ -97,18 +97,18 @@ unsafe impl<K, V, A> Send for RustcOccupiedEntry<'_, K, V, A>
where
K: Send,
V: Send,
- A: Allocator + Clone + Send,
+ A: Allocator + Send,
{
}
unsafe impl<K, V, A> Sync for RustcOccupiedEntry<'_, K, V, A>
where
K: Sync,
V: Sync,
- A: Allocator + Clone + Sync,
+ A: Allocator + Sync,
{
}
-impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for RustcOccupiedEntry<'_, K, V, A> {
+impl<K: Debug, V: Debug, A: Allocator> Debug for RustcOccupiedEntry<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccupiedEntry")
.field("key", self.key())
@@ -123,20 +123,20 @@ impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for RustcOccupiedEntry<'_,
/// [`RustcEntry`]: enum.RustcEntry.html
pub struct RustcVacantEntry<'a, K, V, A = Global>
where
- A: Allocator + Clone,
+ A: Allocator,
{
hash: u64,
key: K,
table: &'a mut RawTable<(K, V), A>,
}
-impl<K: Debug, V, A: Allocator + Clone> Debug for RustcVacantEntry<'_, K, V, A> {
+impl<K: Debug, V, A: Allocator> Debug for RustcVacantEntry<'_, K, V, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("VacantEntry").field(self.key()).finish()
}
}
-impl<'a, K, V, A: Allocator + Clone> RustcEntry<'a, K, V, A> {
+impl<'a, K, V, A: Allocator> RustcEntry<'a, K, V, A> {
/// Sets the value of the entry, and returns a RustcOccupiedEntry.
///
/// # Examples
@@ -265,7 +265,7 @@ impl<'a, K, V, A: Allocator + Clone> RustcEntry<'a, K, V, A> {
}
}
-impl<'a, K, V: Default, A: Allocator + Clone> RustcEntry<'a, K, V, A> {
+impl<'a, K, V: Default, A: Allocator> RustcEntry<'a, K, V, A> {
/// Ensures a value is in the entry by inserting the default value if empty,
/// and returns a mutable reference to the value in the entry.
///
@@ -293,7 +293,7 @@ impl<'a, K, V: Default, A: Allocator + Clone> RustcEntry<'a, K, V, A> {
}
}
-impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> {
+impl<'a, K, V, A: Allocator> RustcOccupiedEntry<'a, K, V, A> {
/// Gets a reference to the key in the entry.
///
/// # Examples
@@ -330,7 +330,7 @@ impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> {
/// ```
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove_entry(self) -> (K, V) {
- unsafe { self.table.remove(self.elem) }
+ unsafe { self.table.remove(self.elem).0 }
}
/// Gets a reference to the value in the entry.
@@ -518,7 +518,7 @@ impl<'a, K, V, A: Allocator + Clone> RustcOccupiedEntry<'a, K, V, A> {
}
}
-impl<'a, K, V, A: Allocator + Clone> RustcVacantEntry<'a, K, V, A> {
+impl<'a, K, V, A: Allocator> RustcVacantEntry<'a, K, V, A> {
/// Gets a reference to the key that would be used when inserting a value
/// through the `RustcVacantEntry`.
///
diff --git a/third_party/rust/hashbrown/src/scopeguard.rs b/third_party/rust/hashbrown/src/scopeguard.rs
index f85e6ab0ed..382d06043e 100644
--- a/third_party/rust/hashbrown/src/scopeguard.rs
+++ b/third_party/rust/hashbrown/src/scopeguard.rs
@@ -1,6 +1,6 @@
// Extracted from the scopeguard crate
use core::{
- mem,
+ mem::ManuallyDrop,
ops::{Deref, DerefMut},
ptr,
};
@@ -28,15 +28,13 @@ where
#[inline]
pub fn into_inner(guard: Self) -> T {
// Cannot move out of Drop-implementing types, so
- // ptr::read the value and forget the guard.
+ // ptr::read the value out of a ManuallyDrop<Self>
+ // Don't use mem::forget as that might invalidate value
+ let guard = ManuallyDrop::new(guard);
unsafe {
let value = ptr::read(&guard.value);
- // read the closure so that it is dropped, and assign it to a local
- // variable to ensure that it is only dropped after the guard has
- // been forgotten. (In case the Drop impl of the closure, or that
- // of any consumed captured variable, panics).
- let _dropfn = ptr::read(&guard.dropfn);
- mem::forget(guard);
+ // read the closure so that it is dropped
+ let _ = ptr::read(&guard.dropfn);
value
}
}
diff --git a/third_party/rust/hashbrown/src/set.rs b/third_party/rust/hashbrown/src/set.rs
index 2a4dcea52c..2125a7ac81 100644
--- a/third_party/rust/hashbrown/src/set.rs
+++ b/third_party/rust/hashbrown/src/set.rs
@@ -1,14 +1,14 @@
-use crate::TryReserveError;
+#[cfg(feature = "raw")]
+use crate::raw::RawTable;
+use crate::{Equivalent, TryReserveError};
use alloc::borrow::ToOwned;
-use core::borrow::Borrow;
use core::fmt;
use core::hash::{BuildHasher, Hash};
-use core::iter::{Chain, FromIterator, FusedIterator};
-use core::mem;
+use core::iter::{Chain, FusedIterator};
use core::ops::{BitAnd, BitOr, BitXor, Sub};
-use super::map::{self, ConsumeAllOnDrop, DefaultHashBuilder, DrainFilterInner, HashMap, Keys};
-use crate::raw::{Allocator, Global};
+use super::map::{self, DefaultHashBuilder, HashMap, Keys};
+use crate::raw::{Allocator, Global, RawExtractIf};
// Future Optimization (FIXME!)
// =============================
@@ -102,7 +102,7 @@ use crate::raw::{Allocator, Global};
/// use hashbrown::HashSet;
///
/// let viking_names: HashSet<&'static str> =
-/// [ "Einar", "Olaf", "Harald" ].iter().cloned().collect();
+/// [ "Einar", "Olaf", "Harald" ].into_iter().collect();
/// // use the values stored in the set
/// ```
///
@@ -112,7 +112,7 @@ use crate::raw::{Allocator, Global};
/// [`HashMap`]: struct.HashMap.html
/// [`PartialEq`]: https://doc.rust-lang.org/std/cmp/trait.PartialEq.html
/// [`RefCell`]: https://doc.rust-lang.org/std/cell/struct.RefCell.html
-pub struct HashSet<T, S = DefaultHashBuilder, A: Allocator + Clone = Global> {
+pub struct HashSet<T, S = DefaultHashBuilder, A: Allocator = Global> {
pub(crate) map: HashMap<T, (), S, A>,
}
@@ -135,6 +135,18 @@ impl<T> HashSet<T, DefaultHashBuilder> {
/// The hash set is initially created with a capacity of 0, so it will not allocate until it
/// is first inserted into.
///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`], for example with
+ /// [`with_hasher`](HashSet::with_hasher) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
/// # Examples
///
/// ```
@@ -153,6 +165,18 @@ impl<T> HashSet<T, DefaultHashBuilder> {
/// The hash set will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash set will not allocate.
///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`], for example with
+ /// [`with_capacity_and_hasher`](HashSet::with_capacity_and_hasher) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
/// # Examples
///
/// ```
@@ -169,12 +193,24 @@ impl<T> HashSet<T, DefaultHashBuilder> {
}
#[cfg(feature = "ahash")]
-impl<T: Hash + Eq, A: Allocator + Clone> HashSet<T, DefaultHashBuilder, A> {
+impl<T: Hash + Eq, A: Allocator> HashSet<T, DefaultHashBuilder, A> {
/// Creates an empty `HashSet`.
///
/// The hash set is initially created with a capacity of 0, so it will not allocate until it
/// is first inserted into.
///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`], for example with
+ /// [`with_hasher_in`](HashSet::with_hasher_in) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
/// # Examples
///
/// ```
@@ -193,6 +229,18 @@ impl<T: Hash + Eq, A: Allocator + Clone> HashSet<T, DefaultHashBuilder, A> {
/// The hash set will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash set will not allocate.
///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`], for example with
+ /// [`with_capacity_and_hasher_in`](HashSet::with_capacity_and_hasher_in) method.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ ///
/// # Examples
///
/// ```
@@ -208,7 +256,7 @@ impl<T: Hash + Eq, A: Allocator + Clone> HashSet<T, DefaultHashBuilder, A> {
}
}
-impl<T, S, A: Allocator + Clone> HashSet<T, S, A> {
+impl<T, S, A: Allocator> HashSet<T, S, A> {
/// Returns the number of elements the set can hold without reallocating.
///
/// # Examples
@@ -287,7 +335,7 @@ impl<T, S, A: Allocator + Clone> HashSet<T, S, A> {
/// ```
/// use hashbrown::HashSet;
///
- /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect();
+ /// let mut set: HashSet<_> = [1, 2, 3].into_iter().collect();
/// assert!(!set.is_empty());
///
/// // print 1, 2, 3 in an arbitrary order
@@ -314,7 +362,7 @@ impl<T, S, A: Allocator + Clone> HashSet<T, S, A> {
/// use hashbrown::HashSet;
///
/// let xs = [1,2,3,4,5,6];
- /// let mut set: HashSet<i32> = xs.iter().cloned().collect();
+ /// let mut set: HashSet<i32> = xs.into_iter().collect();
/// set.retain(|&k| k % 2 == 0);
/// assert_eq!(set.len(), 3);
/// ```
@@ -331,8 +379,11 @@ impl<T, S, A: Allocator + Clone> HashSet<T, S, A> {
/// In other words, move all elements `e` such that `f(&e)` returns `true` out
/// into another iterator.
///
- /// When the returned DrainedFilter is dropped, any remaining elements that satisfy
- /// the predicate are dropped from the set.
+ /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating
+ /// or the iteration short-circuits, then the remaining elements will be retained.
+ /// Use [`retain()`] with a negated predicate if you do not need the returned iterator.
+ ///
+ /// [`retain()`]: HashSet::retain
///
/// # Examples
///
@@ -340,7 +391,7 @@ impl<T, S, A: Allocator + Clone> HashSet<T, S, A> {
/// use hashbrown::HashSet;
///
/// let mut set: HashSet<i32> = (0..8).collect();
- /// let drained: HashSet<i32> = set.drain_filter(|v| v % 2 == 0).collect();
+ /// let drained: HashSet<i32> = set.extract_if(|v| v % 2 == 0).collect();
///
/// let mut evens = drained.into_iter().collect::<Vec<_>>();
/// let mut odds = set.into_iter().collect::<Vec<_>>();
@@ -351,13 +402,13 @@ impl<T, S, A: Allocator + Clone> HashSet<T, S, A> {
/// assert_eq!(odds, vec![1, 3, 5, 7]);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
- pub fn drain_filter<F>(&mut self, f: F) -> DrainFilter<'_, T, F, A>
+ pub fn extract_if<F>(&mut self, f: F) -> ExtractIf<'_, T, F, A>
where
F: FnMut(&T) -> bool,
{
- DrainFilter {
+ ExtractIf {
f,
- inner: DrainFilterInner {
+ inner: RawExtractIf {
iter: unsafe { self.map.table.iter() },
table: &mut self.map.table,
},
@@ -386,16 +437,23 @@ impl<T, S> HashSet<T, S, Global> {
/// Creates a new empty hash set which will use the given hasher to hash
/// keys.
///
- /// The hash set is also created with the default initial capacity.
+ /// The hash set is initially created with a capacity of 0, so it will not
+ /// allocate until it is first inserted into.
+ ///
+ /// # HashDoS resistance
///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow `HashSet`s to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`].
///
/// The `hash_builder` passed should implement the [`BuildHasher`] trait for
- /// the HashMap to be useful, see its documentation for details.
+ /// the HashSet to be useful, see its documentation for details.
///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
///
/// # Examples
///
@@ -407,8 +465,6 @@ impl<T, S> HashSet<T, S, Global> {
/// let mut set = HashSet::with_hasher(s);
/// set.insert(2);
/// ```
- ///
- /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html
#[cfg_attr(feature = "inline-more", inline)]
pub const fn with_hasher(hasher: S) -> Self {
Self {
@@ -422,13 +478,20 @@ impl<T, S> HashSet<T, S, Global> {
/// The hash set will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash set will not allocate.
///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow `HashSet`s to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`].
///
/// The `hash_builder` passed should implement the [`BuildHasher`] trait for
- /// the HashMap to be useful, see its documentation for details.
+ /// the HashSet to be useful, see its documentation for details.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
///
/// # Examples
///
@@ -440,8 +503,6 @@ impl<T, S> HashSet<T, S, Global> {
/// let mut set = HashSet::with_capacity_and_hasher(10, s);
/// set.insert(1);
/// ```
- ///
- /// [`BuildHasher`]: ../../std/hash/trait.BuildHasher.html
#[cfg_attr(feature = "inline-more", inline)]
pub fn with_capacity_and_hasher(capacity: usize, hasher: S) -> Self {
Self {
@@ -452,7 +513,7 @@ impl<T, S> HashSet<T, S, Global> {
impl<T, S, A> HashSet<T, S, A>
where
- A: Allocator + Clone,
+ A: Allocator,
{
/// Returns a reference to the underlying allocator.
#[inline]
@@ -463,12 +524,23 @@ where
/// Creates a new empty hash set which will use the given hasher to hash
/// keys.
///
- /// The hash set is also created with the default initial capacity.
+ /// The hash set is initially created with a capacity of 0, so it will not
+ /// allocate until it is first inserted into.
+ ///
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`].
+ ///
+ /// The `hash_builder` passed should implement the [`BuildHasher`] trait for
+ /// the HashSet to be useful, see its documentation for details.
///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow `HashSet`s to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
///
/// # Examples
///
@@ -481,7 +553,7 @@ where
/// set.insert(2);
/// ```
#[cfg_attr(feature = "inline-more", inline)]
- pub fn with_hasher_in(hasher: S, alloc: A) -> Self {
+ pub const fn with_hasher_in(hasher: S, alloc: A) -> Self {
Self {
map: HashMap::with_hasher_in(hasher, alloc),
}
@@ -493,10 +565,20 @@ where
/// The hash set will be able to hold at least `capacity` elements without
/// reallocating. If `capacity` is 0, the hash set will not allocate.
///
- /// Warning: `hasher` is normally randomly generated, and
- /// is designed to allow `HashSet`s to be resistant to attacks that
- /// cause many collisions and very poor performance. Setting it
- /// manually using this function can expose a DoS attack vector.
+ /// # HashDoS resistance
+ ///
+ /// The `hash_builder` normally use a fixed key by default and that does
+ /// not allow the `HashSet` to be protected against attacks such as [`HashDoS`].
+ /// Users who require HashDoS resistance should explicitly use
+ /// [`ahash::RandomState`] or [`std::collections::hash_map::RandomState`]
+ /// as the hasher when creating a [`HashSet`].
+ ///
+ /// The `hash_builder` passed should implement the [`BuildHasher`] trait for
+ /// the HashSet to be useful, see its documentation for details.
+ ///
+ /// [`HashDoS`]: https://en.wikipedia.org/wiki/Collision_attack
+ /// [`std::collections::hash_map::RandomState`]: https://doc.rust-lang.org/std/collections/hash_map/struct.RandomState.html
+ /// [`BuildHasher`]: https://doc.rust-lang.org/std/hash/trait.BuildHasher.html
///
/// # Examples
///
@@ -539,7 +621,7 @@ impl<T, S, A> HashSet<T, S, A>
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
/// Reserves capacity for at least `additional` more elements to be inserted
/// in the `HashSet`. The collection may reserve more space to avoid
@@ -547,7 +629,12 @@ where
///
/// # Panics
///
- /// Panics if the new allocation size overflows `usize`.
+ /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
+ /// in case of allocation error. Use [`try_reserve`](HashSet::try_reserve) instead
+ /// if you want to handle memory allocation failure.
+ ///
+ /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html
+ /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
///
/// # Examples
///
@@ -637,8 +724,8 @@ where
///
/// ```
/// use hashbrown::HashSet;
- /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect();
- /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect();
+ /// let a: HashSet<_> = [1, 2, 3].into_iter().collect();
+ /// let b: HashSet<_> = [4, 2, 3, 4].into_iter().collect();
///
/// // Can be seen as `a - b`.
/// for x in a.difference(&b) {
@@ -668,8 +755,8 @@ where
///
/// ```
/// use hashbrown::HashSet;
- /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect();
- /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect();
+ /// let a: HashSet<_> = [1, 2, 3].into_iter().collect();
+ /// let b: HashSet<_> = [4, 2, 3, 4].into_iter().collect();
///
/// // Print 1, 4 in arbitrary order.
/// for x in a.symmetric_difference(&b) {
@@ -696,8 +783,8 @@ where
///
/// ```
/// use hashbrown::HashSet;
- /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect();
- /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect();
+ /// let a: HashSet<_> = [1, 2, 3].into_iter().collect();
+ /// let b: HashSet<_> = [4, 2, 3, 4].into_iter().collect();
///
/// // Print 2, 3 in arbitrary order.
/// for x in a.intersection(&b) {
@@ -727,8 +814,8 @@ where
///
/// ```
/// use hashbrown::HashSet;
- /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect();
- /// let b: HashSet<_> = [4, 2, 3, 4].iter().cloned().collect();
+ /// let a: HashSet<_> = [1, 2, 3].into_iter().collect();
+ /// let b: HashSet<_> = [4, 2, 3, 4].into_iter().collect();
///
/// // Print 1, 2, 3, 4 in arbitrary order.
/// for x in a.union(&b) {
@@ -763,7 +850,7 @@ where
/// ```
/// use hashbrown::HashSet;
///
- /// let set: HashSet<_> = [1, 2, 3].iter().cloned().collect();
+ /// let set: HashSet<_> = [1, 2, 3].into_iter().collect();
/// assert_eq!(set.contains(&1), true);
/// assert_eq!(set.contains(&4), false);
/// ```
@@ -773,8 +860,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn contains<Q: ?Sized>(&self, value: &Q) -> bool
where
- T: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<T>,
{
self.map.contains_key(value)
}
@@ -790,7 +876,7 @@ where
/// ```
/// use hashbrown::HashSet;
///
- /// let set: HashSet<_> = [1, 2, 3].iter().cloned().collect();
+ /// let set: HashSet<_> = [1, 2, 3].into_iter().collect();
/// assert_eq!(set.get(&2), Some(&2));
/// assert_eq!(set.get(&4), None);
/// ```
@@ -800,8 +886,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T>
where
- T: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<T>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.map.get_key_value(value) {
@@ -818,7 +903,7 @@ where
/// ```
/// use hashbrown::HashSet;
///
- /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect();
+ /// let mut set: HashSet<_> = [1, 2, 3].into_iter().collect();
/// assert_eq!(set.len(), 3);
/// assert_eq!(set.get_or_insert(2), &2);
/// assert_eq!(set.get_or_insert(100), &100);
@@ -856,8 +941,7 @@ where
#[inline]
pub fn get_or_insert_owned<Q: ?Sized>(&mut self, value: &Q) -> &T
where
- T: Borrow<Q>,
- Q: Hash + Eq + ToOwned<Owned = T>,
+ Q: Hash + Equivalent<T> + ToOwned<Owned = T>,
{
// Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with
// `get`. Key mutation is "raw" because you're not supposed to affect `Eq` or `Hash`.
@@ -889,8 +973,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn get_or_insert_with<Q: ?Sized, F>(&mut self, value: &Q, f: F) -> &T
where
- T: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<T>,
F: FnOnce(&Q) -> T,
{
// Although the raw entry gives us `&mut T`, we only return `&T` to be consistent with
@@ -951,7 +1034,7 @@ where
/// ```
/// use hashbrown::HashSet;
///
- /// let a: HashSet<_> = [1, 2, 3].iter().cloned().collect();
+ /// let a: HashSet<_> = [1, 2, 3].into_iter().collect();
/// let mut b = HashSet::new();
///
/// assert_eq!(a.is_disjoint(&b), true);
@@ -972,7 +1055,7 @@ where
/// ```
/// use hashbrown::HashSet;
///
- /// let sup: HashSet<_> = [1, 2, 3].iter().cloned().collect();
+ /// let sup: HashSet<_> = [1, 2, 3].into_iter().collect();
/// let mut set = HashSet::new();
///
/// assert_eq!(set.is_subset(&sup), true);
@@ -993,7 +1076,7 @@ where
/// ```
/// use hashbrown::HashSet;
///
- /// let sub: HashSet<_> = [1, 2].iter().cloned().collect();
+ /// let sub: HashSet<_> = [1, 2].into_iter().collect();
/// let mut set = HashSet::new();
///
/// assert_eq!(set.is_superset(&sub), false);
@@ -1106,8 +1189,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn remove<Q: ?Sized>(&mut self, value: &Q) -> bool
where
- T: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<T>,
{
self.map.remove(value).is_some()
}
@@ -1123,7 +1205,7 @@ where
/// ```
/// use hashbrown::HashSet;
///
- /// let mut set: HashSet<_> = [1, 2, 3].iter().cloned().collect();
+ /// let mut set: HashSet<_> = [1, 2, 3].into_iter().collect();
/// assert_eq!(set.take(&2), Some(2));
/// assert_eq!(set.take(&2), None);
/// ```
@@ -1133,8 +1215,7 @@ where
#[cfg_attr(feature = "inline-more", inline)]
pub fn take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
where
- T: Borrow<Q>,
- Q: Hash + Eq,
+ Q: Hash + Equivalent<T>,
{
// Avoid `Option::map` because it bloats LLVM IR.
match self.map.remove_entry(value) {
@@ -1144,11 +1225,53 @@ where
}
}
+impl<T, S, A: Allocator> HashSet<T, S, A> {
+ /// Returns a reference to the [`RawTable`] used underneath [`HashSet`].
+ /// This function is only available if the `raw` feature of the crate is enabled.
+ ///
+ /// # Note
+ ///
+ /// Calling this function is safe, but using the raw hash table API may require
+ /// unsafe functions or blocks.
+ ///
+ /// `RawTable` API gives the lowest level of control under the set that can be useful
+ /// for extending the HashSet's API, but may lead to *[undefined behavior]*.
+ ///
+ /// [`HashSet`]: struct.HashSet.html
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[cfg(feature = "raw")]
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn raw_table(&self) -> &RawTable<(T, ()), A> {
+ self.map.raw_table()
+ }
+
+ /// Returns a mutable reference to the [`RawTable`] used underneath [`HashSet`].
+ /// This function is only available if the `raw` feature of the crate is enabled.
+ ///
+ /// # Note
+ ///
+ /// Calling this function is safe, but using the raw hash table API may require
+ /// unsafe functions or blocks.
+ ///
+ /// `RawTable` API gives the lowest level of control under the set that can be useful
+ /// for extending the HashSet's API, but may lead to *[undefined behavior]*.
+ ///
+ /// [`HashSet`]: struct.HashSet.html
+ /// [`RawTable`]: crate::raw::RawTable
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ #[cfg(feature = "raw")]
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn raw_table_mut(&mut self) -> &mut RawTable<(T, ()), A> {
+ self.map.raw_table_mut()
+ }
+}
+
impl<T, S, A> PartialEq for HashSet<T, S, A>
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
fn eq(&self, other: &Self) -> bool {
if self.len() != other.len() {
@@ -1163,14 +1286,14 @@ impl<T, S, A> Eq for HashSet<T, S, A>
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
}
impl<T, S, A> fmt::Debug for HashSet<T, S, A>
where
T: fmt::Debug,
- A: Allocator + Clone,
+ A: Allocator,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_set().entries(self.iter()).finish()
@@ -1179,7 +1302,7 @@ where
impl<T, S, A> From<HashMap<T, (), S, A>> for HashSet<T, S, A>
where
- A: Allocator + Clone,
+ A: Allocator,
{
fn from(map: HashMap<T, (), S, A>) -> Self {
Self { map }
@@ -1190,7 +1313,7 @@ impl<T, S, A> FromIterator<T> for HashSet<T, S, A>
where
T: Eq + Hash,
S: BuildHasher + Default,
- A: Default + Allocator + Clone,
+ A: Default + Allocator,
{
#[cfg_attr(feature = "inline-more", inline)]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
@@ -1205,7 +1328,7 @@ where
impl<T, A, const N: usize> From<[T; N]> for HashSet<T, DefaultHashBuilder, A>
where
T: Eq + Hash,
- A: Default + Allocator + Clone,
+ A: Default + Allocator,
{
/// # Examples
///
@@ -1225,7 +1348,7 @@ impl<T, S, A> Extend<T> for HashSet<T, S, A>
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
#[cfg_attr(feature = "inline-more", inline)]
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
@@ -1249,7 +1372,7 @@ impl<'a, T, S, A> Extend<&'a T> for HashSet<T, S, A>
where
T: 'a + Eq + Hash + Copy,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
#[cfg_attr(feature = "inline-more", inline)]
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
@@ -1272,7 +1395,7 @@ where
impl<T, S, A> Default for HashSet<T, S, A>
where
S: Default,
- A: Default + Allocator + Clone,
+ A: Default + Allocator,
{
/// Creates an empty `HashSet<T, S>` with the `Default` value for the hasher.
#[cfg_attr(feature = "inline-more", inline)]
@@ -1287,7 +1410,7 @@ impl<T, S, A> BitOr<&HashSet<T, S, A>> for &HashSet<T, S, A>
where
T: Eq + Hash + Clone,
S: BuildHasher + Default,
- A: Allocator + Clone,
+ A: Allocator,
{
type Output = HashSet<T, S>;
@@ -1320,7 +1443,7 @@ impl<T, S, A> BitAnd<&HashSet<T, S, A>> for &HashSet<T, S, A>
where
T: Eq + Hash + Clone,
S: BuildHasher + Default,
- A: Allocator + Clone,
+ A: Allocator,
{
type Output = HashSet<T, S>;
@@ -1431,7 +1554,7 @@ pub struct Iter<'a, K> {
///
/// [`HashSet`]: struct.HashSet.html
/// [`into_iter`]: struct.HashSet.html#method.into_iter
-pub struct IntoIter<K, A: Allocator + Clone = Global> {
+pub struct IntoIter<K, A: Allocator = Global> {
iter: map::IntoIter<K, (), A>,
}
@@ -1442,23 +1565,24 @@ pub struct IntoIter<K, A: Allocator + Clone = Global> {
///
/// [`HashSet`]: struct.HashSet.html
/// [`drain`]: struct.HashSet.html#method.drain
-pub struct Drain<'a, K, A: Allocator + Clone = Global> {
+pub struct Drain<'a, K, A: Allocator = Global> {
iter: map::Drain<'a, K, (), A>,
}
/// A draining iterator over entries of a `HashSet` which don't satisfy the predicate `f`.
///
-/// This `struct` is created by the [`drain_filter`] method on [`HashSet`]. See its
+/// This `struct` is created by the [`extract_if`] method on [`HashSet`]. See its
/// documentation for more.
///
-/// [`drain_filter`]: struct.HashSet.html#method.drain_filter
+/// [`extract_if`]: struct.HashSet.html#method.extract_if
/// [`HashSet`]: struct.HashSet.html
-pub struct DrainFilter<'a, K, F, A: Allocator + Clone = Global>
+#[must_use = "Iterators are lazy unless consumed"]
+pub struct ExtractIf<'a, K, F, A: Allocator = Global>
where
F: FnMut(&K) -> bool,
{
f: F,
- inner: DrainFilterInner<'a, K, (), A>,
+ inner: RawExtractIf<'a, (K, ()), A>,
}
/// A lazy iterator producing elements in the intersection of `HashSet`s.
@@ -1468,7 +1592,7 @@ where
///
/// [`HashSet`]: struct.HashSet.html
/// [`intersection`]: struct.HashSet.html#method.intersection
-pub struct Intersection<'a, T, S, A: Allocator + Clone = Global> {
+pub struct Intersection<'a, T, S, A: Allocator = Global> {
// iterator of the first set
iter: Iter<'a, T>,
// the second set
@@ -1482,7 +1606,7 @@ pub struct Intersection<'a, T, S, A: Allocator + Clone = Global> {
///
/// [`HashSet`]: struct.HashSet.html
/// [`difference`]: struct.HashSet.html#method.difference
-pub struct Difference<'a, T, S, A: Allocator + Clone = Global> {
+pub struct Difference<'a, T, S, A: Allocator = Global> {
// iterator of the first set
iter: Iter<'a, T>,
// the second set
@@ -1496,7 +1620,7 @@ pub struct Difference<'a, T, S, A: Allocator + Clone = Global> {
///
/// [`HashSet`]: struct.HashSet.html
/// [`symmetric_difference`]: struct.HashSet.html#method.symmetric_difference
-pub struct SymmetricDifference<'a, T, S, A: Allocator + Clone = Global> {
+pub struct SymmetricDifference<'a, T, S, A: Allocator = Global> {
iter: Chain<Difference<'a, T, S, A>, Difference<'a, T, S, A>>,
}
@@ -1507,11 +1631,11 @@ pub struct SymmetricDifference<'a, T, S, A: Allocator + Clone = Global> {
///
/// [`HashSet`]: struct.HashSet.html
/// [`union`]: struct.HashSet.html#method.union
-pub struct Union<'a, T, S, A: Allocator + Clone = Global> {
+pub struct Union<'a, T, S, A: Allocator = Global> {
iter: Chain<Iter<'a, T>, Difference<'a, T, S, A>>,
}
-impl<'a, T, S, A: Allocator + Clone> IntoIterator for &'a HashSet<T, S, A> {
+impl<'a, T, S, A: Allocator> IntoIterator for &'a HashSet<T, S, A> {
type Item = &'a T;
type IntoIter = Iter<'a, T>;
@@ -1521,7 +1645,7 @@ impl<'a, T, S, A: Allocator + Clone> IntoIterator for &'a HashSet<T, S, A> {
}
}
-impl<T, S, A: Allocator + Clone> IntoIterator for HashSet<T, S, A> {
+impl<T, S, A: Allocator> IntoIterator for HashSet<T, S, A> {
type Item = T;
type IntoIter = IntoIter<T, A>;
@@ -1572,6 +1696,14 @@ impl<'a, K> Iterator for Iter<'a, K> {
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.fold(init, f)
+ }
}
impl<'a, K> ExactSizeIterator for Iter<'a, K> {
#[cfg_attr(feature = "inline-more", inline)]
@@ -1587,7 +1719,7 @@ impl<K: fmt::Debug> fmt::Debug for Iter<'_, K> {
}
}
-impl<K, A: Allocator + Clone> Iterator for IntoIter<K, A> {
+impl<K, A: Allocator> Iterator for IntoIter<K, A> {
type Item = K;
#[cfg_attr(feature = "inline-more", inline)]
@@ -1602,23 +1734,31 @@ impl<K, A: Allocator + Clone> Iterator for IntoIter<K, A> {
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.fold(init, |acc, (k, ())| f(acc, k))
+ }
}
-impl<K, A: Allocator + Clone> ExactSizeIterator for IntoIter<K, A> {
+impl<K, A: Allocator> ExactSizeIterator for IntoIter<K, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn len(&self) -> usize {
self.iter.len()
}
}
-impl<K, A: Allocator + Clone> FusedIterator for IntoIter<K, A> {}
+impl<K, A: Allocator> FusedIterator for IntoIter<K, A> {}
-impl<K: fmt::Debug, A: Allocator + Clone> fmt::Debug for IntoIter<K, A> {
+impl<K: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<K, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let entries_iter = self.iter.iter().map(|(k, _)| k);
f.debug_list().entries(entries_iter).finish()
}
}
-impl<K, A: Allocator + Clone> Iterator for Drain<'_, K, A> {
+impl<K, A: Allocator> Iterator for Drain<'_, K, A> {
type Item = K;
#[cfg_attr(feature = "inline-more", inline)]
@@ -1633,37 +1773,31 @@ impl<K, A: Allocator + Clone> Iterator for Drain<'_, K, A> {
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.fold(init, |acc, (k, ())| f(acc, k))
+ }
}
-impl<K, A: Allocator + Clone> ExactSizeIterator for Drain<'_, K, A> {
+impl<K, A: Allocator> ExactSizeIterator for Drain<'_, K, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn len(&self) -> usize {
self.iter.len()
}
}
-impl<K, A: Allocator + Clone> FusedIterator for Drain<'_, K, A> {}
+impl<K, A: Allocator> FusedIterator for Drain<'_, K, A> {}
-impl<K: fmt::Debug, A: Allocator + Clone> fmt::Debug for Drain<'_, K, A> {
+impl<K: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, K, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let entries_iter = self.iter.iter().map(|(k, _)| k);
f.debug_list().entries(entries_iter).finish()
}
}
-impl<'a, K, F, A: Allocator + Clone> Drop for DrainFilter<'a, K, F, A>
-where
- F: FnMut(&K) -> bool,
-{
- #[cfg_attr(feature = "inline-more", inline)]
- fn drop(&mut self) {
- while let Some(item) = self.next() {
- let guard = ConsumeAllOnDrop(self);
- drop(item);
- mem::forget(guard);
- }
- }
-}
-
-impl<K, F, A: Allocator + Clone> Iterator for DrainFilter<'_, K, F, A>
+impl<K, F, A: Allocator> Iterator for ExtractIf<'_, K, F, A>
where
F: FnMut(&K) -> bool,
{
@@ -1671,9 +1805,9 @@ where
#[cfg_attr(feature = "inline-more", inline)]
fn next(&mut self) -> Option<Self::Item> {
- let f = &mut self.f;
- let (k, _) = self.inner.next(&mut |k, _| f(k))?;
- Some(k)
+ self.inner
+ .next(|&mut (ref k, ())| (self.f)(k))
+ .map(|(k, ())| k)
}
#[inline]
@@ -1682,12 +1816,9 @@ where
}
}
-impl<K, F, A: Allocator + Clone> FusedIterator for DrainFilter<'_, K, F, A> where
- F: FnMut(&K) -> bool
-{
-}
+impl<K, F, A: Allocator> FusedIterator for ExtractIf<'_, K, F, A> where F: FnMut(&K) -> bool {}
-impl<T, S, A: Allocator + Clone> Clone for Intersection<'_, T, S, A> {
+impl<T, S, A: Allocator> Clone for Intersection<'_, T, S, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Intersection {
@@ -1701,7 +1832,7 @@ impl<'a, T, S, A> Iterator for Intersection<'a, T, S, A>
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
type Item = &'a T;
@@ -1720,13 +1851,27 @@ where
let (_, upper) = self.iter.size_hint();
(0, upper)
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.fold(init, |acc, elt| {
+ if self.other.contains(elt) {
+ f(acc, elt)
+ } else {
+ acc
+ }
+ })
+ }
}
impl<T, S, A> fmt::Debug for Intersection<'_, T, S, A>
where
T: fmt::Debug + Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.clone()).finish()
@@ -1737,11 +1882,11 @@ impl<T, S, A> FusedIterator for Intersection<'_, T, S, A>
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
}
-impl<T, S, A: Allocator + Clone> Clone for Difference<'_, T, S, A> {
+impl<T, S, A: Allocator> Clone for Difference<'_, T, S, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Difference {
@@ -1755,7 +1900,7 @@ impl<'a, T, S, A> Iterator for Difference<'a, T, S, A>
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
type Item = &'a T;
@@ -1774,13 +1919,27 @@ where
let (_, upper) = self.iter.size_hint();
(0, upper)
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.fold(init, |acc, elt| {
+ if self.other.contains(elt) {
+ acc
+ } else {
+ f(acc, elt)
+ }
+ })
+ }
}
impl<T, S, A> FusedIterator for Difference<'_, T, S, A>
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
}
@@ -1788,14 +1947,14 @@ impl<T, S, A> fmt::Debug for Difference<'_, T, S, A>
where
T: fmt::Debug + Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.clone()).finish()
}
}
-impl<T, S, A: Allocator + Clone> Clone for SymmetricDifference<'_, T, S, A> {
+impl<T, S, A: Allocator> Clone for SymmetricDifference<'_, T, S, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
SymmetricDifference {
@@ -1808,7 +1967,7 @@ impl<'a, T, S, A> Iterator for SymmetricDifference<'a, T, S, A>
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
type Item = &'a T;
@@ -1820,13 +1979,21 @@ where
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.fold(init, f)
+ }
}
impl<T, S, A> FusedIterator for SymmetricDifference<'_, T, S, A>
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
}
@@ -1834,14 +2001,14 @@ impl<T, S, A> fmt::Debug for SymmetricDifference<'_, T, S, A>
where
T: fmt::Debug + Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.clone()).finish()
}
}
-impl<T, S, A: Allocator + Clone> Clone for Union<'_, T, S, A> {
+impl<T, S, A: Allocator> Clone for Union<'_, T, S, A> {
#[cfg_attr(feature = "inline-more", inline)]
fn clone(&self) -> Self {
Union {
@@ -1854,7 +2021,7 @@ impl<T, S, A> FusedIterator for Union<'_, T, S, A>
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
}
@@ -1862,7 +2029,7 @@ impl<T, S, A> fmt::Debug for Union<'_, T, S, A>
where
T: fmt::Debug + Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_list().entries(self.clone()).finish()
@@ -1873,7 +2040,7 @@ impl<'a, T, S, A> Iterator for Union<'a, T, S, A>
where
T: Eq + Hash,
S: BuildHasher,
- A: Allocator + Clone,
+ A: Allocator,
{
type Item = &'a T;
@@ -1885,6 +2052,14 @@ where
fn size_hint(&self) -> (usize, Option<usize>) {
self.iter.size_hint()
}
+ #[cfg_attr(feature = "inline-more", inline)]
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.fold(init, f)
+ }
}
/// A view into a single entry in a set, which may either be vacant or occupied.
@@ -1925,7 +2100,7 @@ where
/// ```
pub enum Entry<'a, T, S, A = Global>
where
- A: Allocator + Clone,
+ A: Allocator,
{
/// An occupied entry.
///
@@ -1958,7 +2133,7 @@ where
Vacant(VacantEntry<'a, T, S, A>),
}
-impl<T: fmt::Debug, S, A: Allocator + Clone> fmt::Debug for Entry<'_, T, S, A> {
+impl<T: fmt::Debug, S, A: Allocator> fmt::Debug for Entry<'_, T, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
@@ -2003,11 +2178,11 @@ impl<T: fmt::Debug, S, A: Allocator + Clone> fmt::Debug for Entry<'_, T, S, A> {
/// assert_eq!(set.get(&"c"), None);
/// assert_eq!(set.len(), 2);
/// ```
-pub struct OccupiedEntry<'a, T, S, A: Allocator + Clone = Global> {
+pub struct OccupiedEntry<'a, T, S, A: Allocator = Global> {
inner: map::OccupiedEntry<'a, T, (), S, A>,
}
-impl<T: fmt::Debug, S, A: Allocator + Clone> fmt::Debug for OccupiedEntry<'_, T, S, A> {
+impl<T: fmt::Debug, S, A: Allocator> fmt::Debug for OccupiedEntry<'_, T, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("OccupiedEntry")
.field("value", self.get())
@@ -2041,17 +2216,17 @@ impl<T: fmt::Debug, S, A: Allocator + Clone> fmt::Debug for OccupiedEntry<'_, T,
/// }
/// assert!(set.contains("b") && set.len() == 2);
/// ```
-pub struct VacantEntry<'a, T, S, A: Allocator + Clone = Global> {
+pub struct VacantEntry<'a, T, S, A: Allocator = Global> {
inner: map::VacantEntry<'a, T, (), S, A>,
}
-impl<T: fmt::Debug, S, A: Allocator + Clone> fmt::Debug for VacantEntry<'_, T, S, A> {
+impl<T: fmt::Debug, S, A: Allocator> fmt::Debug for VacantEntry<'_, T, S, A> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_tuple("VacantEntry").field(self.get()).finish()
}
}
-impl<'a, T, S, A: Allocator + Clone> Entry<'a, T, S, A> {
+impl<'a, T, S, A: Allocator> Entry<'a, T, S, A> {
/// Sets the value of the entry, and returns an OccupiedEntry.
///
/// # Examples
@@ -2128,7 +2303,7 @@ impl<'a, T, S, A: Allocator + Clone> Entry<'a, T, S, A> {
}
}
-impl<T, S, A: Allocator + Clone> OccupiedEntry<'_, T, S, A> {
+impl<T, S, A: Allocator> OccupiedEntry<'_, T, S, A> {
/// Gets a reference to the value in the entry.
///
/// # Examples
@@ -2215,7 +2390,7 @@ impl<T, S, A: Allocator + Clone> OccupiedEntry<'_, T, S, A> {
}
}
-impl<'a, T, S, A: Allocator + Clone> VacantEntry<'a, T, S, A> {
+impl<'a, T, S, A: Allocator> VacantEntry<'a, T, S, A> {
/// Gets a reference to the value that would be used when inserting
/// through the `VacantEntry`.
///
@@ -2295,34 +2470,30 @@ fn assert_covariance() {
fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> {
v
}
- fn into_iter<'new, A: Allocator + Clone>(
- v: IntoIter<&'static str, A>,
- ) -> IntoIter<&'new str, A> {
+ fn into_iter<'new, A: Allocator>(v: IntoIter<&'static str, A>) -> IntoIter<&'new str, A> {
v
}
- fn difference<'a, 'new, A: Allocator + Clone>(
+ fn difference<'a, 'new, A: Allocator>(
v: Difference<'a, &'static str, DefaultHashBuilder, A>,
) -> Difference<'a, &'new str, DefaultHashBuilder, A> {
v
}
- fn symmetric_difference<'a, 'new, A: Allocator + Clone>(
+ fn symmetric_difference<'a, 'new, A: Allocator>(
v: SymmetricDifference<'a, &'static str, DefaultHashBuilder, A>,
) -> SymmetricDifference<'a, &'new str, DefaultHashBuilder, A> {
v
}
- fn intersection<'a, 'new, A: Allocator + Clone>(
+ fn intersection<'a, 'new, A: Allocator>(
v: Intersection<'a, &'static str, DefaultHashBuilder, A>,
) -> Intersection<'a, &'new str, DefaultHashBuilder, A> {
v
}
- fn union<'a, 'new, A: Allocator + Clone>(
+ fn union<'a, 'new, A: Allocator>(
v: Union<'a, &'static str, DefaultHashBuilder, A>,
) -> Union<'a, &'new str, DefaultHashBuilder, A> {
v
}
- fn drain<'new, A: Allocator + Clone>(
- d: Drain<'static, &'static str, A>,
- ) -> Drain<'new, &'new str, A> {
+ fn drain<'new, A: Allocator>(d: Drain<'static, &'static str, A>) -> Drain<'new, &'new str, A> {
d
}
}
@@ -2613,10 +2784,10 @@ mod test_set {
set.insert(1);
set.insert(2);
- let set_str = format!("{:?}", set);
+ let set_str = format!("{set:?}");
assert!(set_str == "{1, 2}" || set_str == "{2, 1}");
- assert_eq!(format!("{:?}", empty), "{}");
+ assert_eq!(format!("{empty:?}"), "{}");
}
#[test]
@@ -2649,7 +2820,7 @@ mod test_set {
assert_eq!(last_i, 49);
}
- for _ in &s {
+ if !s.is_empty() {
panic!("s should be empty!");
}
@@ -2663,6 +2834,7 @@ mod test_set {
use core::hash;
#[derive(Debug)]
+ #[allow(dead_code)]
struct Foo(&'static str, i32);
impl PartialEq for Foo {
@@ -2691,11 +2863,12 @@ mod test_set {
}
#[test]
+ #[allow(clippy::needless_borrow)]
fn test_extend_ref() {
let mut a = HashSet::new();
a.insert(1);
- a.extend(&[2, 3, 4]);
+ a.extend([2, 3, 4]);
assert_eq!(a.len(), 4);
assert!(a.contains(&1));
@@ -2730,10 +2903,10 @@ mod test_set {
}
#[test]
- fn test_drain_filter() {
+ fn test_extract_if() {
{
let mut set: HashSet<i32> = (0..8).collect();
- let drained = set.drain_filter(|&k| k % 2 == 0);
+ let drained = set.extract_if(|&k| k % 2 == 0);
let mut out = drained.collect::<Vec<_>>();
out.sort_unstable();
assert_eq!(vec![0, 2, 4, 6], out);
@@ -2741,7 +2914,7 @@ mod test_set {
}
{
let mut set: HashSet<i32> = (0..8).collect();
- drop(set.drain_filter(|&k| k % 2 == 0));
+ set.extract_if(|&k| k % 2 == 0).for_each(drop);
assert_eq!(set.len(), 4, "Removes non-matching items on drop");
}
}
@@ -2787,4 +2960,11 @@ mod test_set {
set.insert(i);
}
}
+
+ #[test]
+ fn collect() {
+ // At the time of writing, this hits the ZST case in from_base_index
+ // (and without the `map`, it does not).
+ let mut _set: HashSet<_> = (0..3).map(|_| ()).collect();
+ }
}
diff --git a/third_party/rust/hashbrown/src/table.rs b/third_party/rust/hashbrown/src/table.rs
new file mode 100644
index 0000000000..faf8a6330f
--- /dev/null
+++ b/third_party/rust/hashbrown/src/table.rs
@@ -0,0 +1,2070 @@
+use core::{fmt, iter::FusedIterator, marker::PhantomData};
+
+use crate::{
+ raw::{
+ Allocator, Bucket, Global, InsertSlot, RawDrain, RawExtractIf, RawIntoIter, RawIter,
+ RawTable,
+ },
+ TryReserveError,
+};
+
+/// Low-level hash table with explicit hashing.
+///
+/// The primary use case for this type over [`HashMap`] or [`HashSet`] is to
+/// support types that do not implement the [`Hash`] and [`Eq`] traits, but
+/// instead require additional data not contained in the key itself to compute a
+/// hash and compare two elements for equality.
+///
+/// Examples of when this can be useful include:
+/// - An `IndexMap` implementation where indices into a `Vec` are stored as
+/// elements in a `HashTable<usize>`. Hashing and comparing the elements
+/// requires indexing the associated `Vec` to get the actual value referred to
+/// by the index.
+/// - Avoiding re-computing a hash when it is already known.
+/// - Mutating the key of an element in a way that doesn't affect its hash.
+///
+/// To achieve this, `HashTable` methods that search for an element in the table
+/// require a hash value and equality function to be explicitly passed in as
+/// arguments. The method will then iterate over the elements with the given
+/// hash and call the equality function on each of them, until a match is found.
+///
+/// In most cases, a `HashTable` will not be exposed directly in an API. It will
+/// instead be wrapped in a helper type which handles the work of calculating
+/// hash values and comparing elements.
+///
+/// Due to its low-level nature, this type provides fewer guarantees than
+/// [`HashMap`] and [`HashSet`]. Specifically, the API allows you to shoot
+/// yourself in the foot by having multiple elements with identical keys in the
+/// table. The table itself will still function correctly and lookups will
+/// arbitrarily return one of the matching elements. However you should avoid
+/// doing this because it changes the runtime of hash table operations from
+/// `O(1)` to `O(k)` where `k` is the number of duplicate entries.
+///
+/// [`HashMap`]: super::HashMap
+/// [`HashSet`]: super::HashSet
+pub struct HashTable<T, A = Global>
+where
+ A: Allocator,
+{
+ pub(crate) raw: RawTable<T, A>,
+}
+
+impl<T> HashTable<T, Global> {
+ /// Creates an empty `HashTable`.
+ ///
+ /// The hash table is initially created with a capacity of 0, so it will not allocate until it
+ /// is first inserted into.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashTable;
+ /// let mut table: HashTable<&str> = HashTable::new();
+ /// assert_eq!(table.len(), 0);
+ /// assert_eq!(table.capacity(), 0);
+ /// ```
+ pub const fn new() -> Self {
+ Self {
+ raw: RawTable::new(),
+ }
+ }
+
+ /// Creates an empty `HashTable` with the specified capacity.
+ ///
+ /// The hash table will be able to hold at least `capacity` elements without
+ /// reallocating. If `capacity` is 0, the hash table will not allocate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashTable;
+ /// let mut table: HashTable<&str> = HashTable::with_capacity(10);
+ /// assert_eq!(table.len(), 0);
+ /// assert!(table.capacity() >= 10);
+ /// ```
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self {
+ raw: RawTable::with_capacity(capacity),
+ }
+ }
+}
+
+impl<T, A> HashTable<T, A>
+where
+ A: Allocator,
+{
+ /// Creates an empty `HashTable` using the given allocator.
+ ///
+ /// The hash table is initially created with a capacity of 0, so it will not allocate until it
+ /// is first inserted into.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use bumpalo::Bump;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let bump = Bump::new();
+ /// let mut table = HashTable::new_in(&bump);
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ ///
+ /// // The created HashTable holds none elements
+ /// assert_eq!(table.len(), 0);
+ ///
+ /// // The created HashTable also doesn't allocate memory
+ /// assert_eq!(table.capacity(), 0);
+ ///
+ /// // Now we insert element inside created HashTable
+ /// table.insert_unique(hasher(&"One"), "One", hasher);
+ /// // We can see that the HashTable holds 1 element
+ /// assert_eq!(table.len(), 1);
+ /// // And it also allocates some capacity
+ /// assert!(table.capacity() > 1);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub const fn new_in(alloc: A) -> Self {
+ Self {
+ raw: RawTable::new_in(alloc),
+ }
+ }
+
+ /// Creates an empty `HashTable` with the specified capacity using the given allocator.
+ ///
+ /// The hash table will be able to hold at least `capacity` elements without
+ /// reallocating. If `capacity` is 0, the hash table will not allocate.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use bumpalo::Bump;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let bump = Bump::new();
+ /// let mut table = HashTable::with_capacity_in(5, &bump);
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ ///
+ /// // The created HashTable holds none elements
+ /// assert_eq!(table.len(), 0);
+ /// // But it can hold at least 5 elements without reallocating
+ /// let empty_map_capacity = table.capacity();
+ /// assert!(empty_map_capacity >= 5);
+ ///
+ /// // Now we insert some 5 elements inside created HashTable
+ /// table.insert_unique(hasher(&"One"), "One", hasher);
+ /// table.insert_unique(hasher(&"Two"), "Two", hasher);
+ /// table.insert_unique(hasher(&"Three"), "Three", hasher);
+ /// table.insert_unique(hasher(&"Four"), "Four", hasher);
+ /// table.insert_unique(hasher(&"Five"), "Five", hasher);
+ ///
+ /// // We can see that the HashTable holds 5 elements
+ /// assert_eq!(table.len(), 5);
+ /// // But its capacity isn't changed
+ /// assert_eq!(table.capacity(), empty_map_capacity)
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+ Self {
+ raw: RawTable::with_capacity_in(capacity, alloc),
+ }
+ }
+
+ /// Returns a reference to the underlying allocator.
+ pub fn allocator(&self) -> &A {
+ self.raw.allocator()
+ }
+
+ /// Returns a reference to an entry in the table with the given hash and
+ /// which satisfies the equality function passed.
+ ///
+ /// This method will call `eq` for all entries with the given hash, but may
+ /// also call it for entries with a different hash. `eq` should only return
+ /// true for the desired entry, at which point the search is stopped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table.insert_unique(hasher(&1), 1, hasher);
+ /// table.insert_unique(hasher(&2), 2, hasher);
+ /// table.insert_unique(hasher(&3), 3, hasher);
+ /// assert_eq!(table.find(hasher(&2), |&val| val == 2), Some(&2));
+ /// assert_eq!(table.find(hasher(&4), |&val| val == 4), None);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn find(&self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&T> {
+ self.raw.get(hash, eq)
+ }
+
+ /// Returns a mutable reference to an entry in the table with the given hash
+ /// and which satisfies the equality function passed.
+ ///
+ /// This method will call `eq` for all entries with the given hash, but may
+ /// also call it for entries with a different hash. `eq` should only return
+ /// true for the desired entry, at which point the search is stopped.
+ ///
+ /// When mutating an entry, you should ensure that it still retains the same
+ /// hash value as when it was inserted, otherwise lookups of that entry may
+ /// fail to find it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0));
+ /// if let Some(val) = table.find_mut(hasher(&1), |val| val.0 == 1) {
+ /// val.1 = "b";
+ /// }
+ /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), Some(&(1, "b")));
+ /// assert_eq!(table.find(hasher(&2), |val| val.0 == 2), None);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn find_mut(&mut self, hash: u64, eq: impl FnMut(&T) -> bool) -> Option<&mut T> {
+ self.raw.get_mut(hash, eq)
+ }
+
+ /// Returns an `OccupiedEntry` for an entry in the table with the given hash
+ /// and which satisfies the equality function passed.
+ ///
+ /// This can be used to remove the entry from the table. Call
+ /// [`HashTable::entry`] instead if you wish to insert an entry if the
+ /// lookup fails.
+ ///
+ /// This method will call `eq` for all entries with the given hash, but may
+ /// also call it for entries with a different hash. `eq` should only return
+ /// true for the desired entry, at which point the search is stopped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0));
+ /// if let Ok(entry) = table.find_entry(hasher(&1), |val| val.0 == 1) {
+ /// entry.remove();
+ /// }
+ /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), None);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn find_entry(
+ &mut self,
+ hash: u64,
+ eq: impl FnMut(&T) -> bool,
+ ) -> Result<OccupiedEntry<'_, T, A>, AbsentEntry<'_, T, A>> {
+ match self.raw.find(hash, eq) {
+ Some(bucket) => Ok(OccupiedEntry {
+ hash,
+ bucket,
+ table: self,
+ }),
+ None => Err(AbsentEntry { table: self }),
+ }
+ }
+
+ /// Returns an `Entry` for an entry in the table with the given hash
+ /// and which satisfies the equality function passed.
+ ///
+ /// This can be used to remove the entry from the table, or insert a new
+ /// entry with the given hash if one doesn't already exist.
+ ///
+ /// This method will call `eq` for all entries with the given hash, but may
+ /// also call it for entries with a different hash. `eq` should only return
+ /// true for the desired entry, at which point the search is stopped.
+ ///
+ /// This method may grow the table in preparation for an insertion. Call
+ /// [`HashTable::find_entry`] if this is undesirable.
+ ///
+ /// `hasher` is called if entries need to be moved or copied to a new table.
+ /// This must return the same hash value that each entry was inserted with.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::hash_table::Entry;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table.insert_unique(hasher(&1), (1, "a"), |val| hasher(&val.0));
+ /// if let Entry::Occupied(entry) = table.entry(hasher(&1), |val| val.0 == 1, |val| hasher(&val.0))
+ /// {
+ /// entry.remove();
+ /// }
+ /// if let Entry::Vacant(entry) = table.entry(hasher(&2), |val| val.0 == 2, |val| hasher(&val.0)) {
+ /// entry.insert((2, "b"));
+ /// }
+ /// assert_eq!(table.find(hasher(&1), |val| val.0 == 1), None);
+ /// assert_eq!(table.find(hasher(&2), |val| val.0 == 2), Some(&(2, "b")));
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn entry(
+ &mut self,
+ hash: u64,
+ eq: impl FnMut(&T) -> bool,
+ hasher: impl Fn(&T) -> u64,
+ ) -> Entry<'_, T, A> {
+ match self.raw.find_or_find_insert_slot(hash, eq, hasher) {
+ Ok(bucket) => Entry::Occupied(OccupiedEntry {
+ hash,
+ bucket,
+ table: self,
+ }),
+ Err(insert_slot) => Entry::Vacant(VacantEntry {
+ hash,
+ insert_slot,
+ table: self,
+ }),
+ }
+ }
+
+ /// Inserts an element into the `HashTable` with the given hash value, but
+ /// without checking whether an equivalent element already exists within the
+ /// table.
+ ///
+ /// `hasher` is called if entries need to be moved or copied to a new table.
+ /// This must return the same hash value that each entry was inserted with.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut v = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// v.insert_unique(hasher(&1), 1, hasher);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn insert_unique(
+ &mut self,
+ hash: u64,
+ value: T,
+ hasher: impl Fn(&T) -> u64,
+ ) -> OccupiedEntry<'_, T, A> {
+ let bucket = self.raw.insert(hash, value, hasher);
+ OccupiedEntry {
+ hash,
+ bucket,
+ table: self,
+ }
+ }
+
+ /// Clears the table, removing all values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut v = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// v.insert_unique(hasher(&1), 1, hasher);
+ /// v.clear();
+ /// assert!(v.is_empty());
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn clear(&mut self) {
+ self.raw.clear();
+ }
+
+ /// Shrinks the capacity of the table as much as possible. It will drop
+ /// down as much as possible while maintaining the internal rules
+ /// and possibly leaving some space in accordance with the resize policy.
+ ///
+ /// `hasher` is called if entries need to be moved or copied to a new table.
+ /// This must return the same hash value that each entry was inserted with.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::with_capacity(100);
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table.insert_unique(hasher(&1), 1, hasher);
+ /// table.insert_unique(hasher(&2), 2, hasher);
+ /// assert!(table.capacity() >= 100);
+ /// table.shrink_to_fit(hasher);
+ /// assert!(table.capacity() >= 2);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn shrink_to_fit(&mut self, hasher: impl Fn(&T) -> u64) {
+ self.raw.shrink_to(self.len(), hasher)
+ }
+
+ /// Shrinks the capacity of the table with a lower limit. It will drop
+ /// down no lower than the supplied limit while maintaining the internal rules
+ /// and possibly leaving some space in accordance with the resize policy.
+ ///
+ /// `hasher` is called if entries need to be moved or copied to a new table.
+ /// This must return the same hash value that each entry was inserted with.
+ ///
+ /// Panics if the current capacity is smaller than the supplied
+ /// minimum capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::with_capacity(100);
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table.insert_unique(hasher(&1), 1, hasher);
+ /// table.insert_unique(hasher(&2), 2, hasher);
+ /// assert!(table.capacity() >= 100);
+ /// table.shrink_to(10, hasher);
+ /// assert!(table.capacity() >= 10);
+ /// table.shrink_to(0, hasher);
+ /// assert!(table.capacity() >= 2);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn shrink_to(&mut self, min_capacity: usize, hasher: impl Fn(&T) -> u64) {
+ self.raw.shrink_to(min_capacity, hasher);
+ }
+
+ /// Reserves capacity for at least `additional` more elements to be inserted
+ /// in the `HashTable`. The collection may reserve more space to avoid
+ /// frequent reallocations.
+ ///
+ /// `hasher` is called if entries need to be moved or copied to a new table.
+ /// This must return the same hash value that each entry was inserted with.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds [`isize::MAX`] bytes and [`abort`] the program
+ /// in case of allocation error. Use [`try_reserve`](HashTable::try_reserve) instead
+ /// if you want to handle memory allocation failure.
+ ///
+ /// [`isize::MAX`]: https://doc.rust-lang.org/std/primitive.isize.html
+ /// [`abort`]: https://doc.rust-lang.org/alloc/alloc/fn.handle_alloc_error.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table: HashTable<i32> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table.reserve(10, hasher);
+ /// assert!(table.capacity() >= 10);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn reserve(&mut self, additional: usize, hasher: impl Fn(&T) -> u64) {
+ self.raw.reserve(additional, hasher)
+ }
+
+ /// Tries to reserve capacity for at least `additional` more elements to be inserted
+ /// in the given `HashTable`. The collection may reserve more space to avoid
+ /// frequent reallocations.
+ ///
+ /// `hasher` is called if entries need to be moved or copied to a new table.
+ /// This must return the same hash value that each entry was inserted with.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table: HashTable<i32> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table
+ /// .try_reserve(10, hasher)
+ /// .expect("why is the test harness OOMing on 10 bytes?");
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn try_reserve(
+ &mut self,
+ additional: usize,
+ hasher: impl Fn(&T) -> u64,
+ ) -> Result<(), TryReserveError> {
+ self.raw.try_reserve(additional, hasher)
+ }
+
+ /// Returns the number of elements the table can hold without reallocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use hashbrown::HashTable;
+ /// let table: HashTable<i32> = HashTable::with_capacity(100);
+ /// assert!(table.capacity() >= 100);
+ /// ```
+ pub fn capacity(&self) -> usize {
+ self.raw.capacity()
+ }
+
+ /// Returns the number of elements in the table.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// let mut v = HashTable::new();
+ /// assert_eq!(v.len(), 0);
+ /// v.insert_unique(hasher(&1), 1, hasher);
+ /// assert_eq!(v.len(), 1);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn len(&self) -> usize {
+ self.raw.len()
+ }
+
+ /// Returns `true` if the set contains no elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// let mut v = HashTable::new();
+ /// assert!(v.is_empty());
+ /// v.insert_unique(hasher(&1), 1, hasher);
+ /// assert!(!v.is_empty());
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn is_empty(&self) -> bool {
+ self.raw.is_empty()
+ }
+
+ /// An iterator visiting all elements in arbitrary order.
+ /// The iterator element type is `&'a T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table.insert_unique(hasher(&"a"), "b", hasher);
+ /// table.insert_unique(hasher(&"b"), "b", hasher);
+ ///
+ /// // Will print in an arbitrary order.
+ /// for x in table.iter() {
+ /// println!("{}", x);
+ /// }
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter {
+ inner: unsafe { self.raw.iter() },
+ marker: PhantomData,
+ }
+ }
+
+ /// An iterator visiting all elements in arbitrary order,
+ /// with mutable references to the elements.
+ /// The iterator element type is `&'a mut T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table.insert_unique(hasher(&1), 1, hasher);
+ /// table.insert_unique(hasher(&2), 2, hasher);
+ /// table.insert_unique(hasher(&3), 3, hasher);
+ ///
+ /// // Update all values
+ /// for val in table.iter_mut() {
+ /// *val *= 2;
+ /// }
+ ///
+ /// assert_eq!(table.len(), 3);
+ /// let mut vec: Vec<i32> = Vec::new();
+ ///
+ /// for val in &table {
+ /// println!("val: {}", val);
+ /// vec.push(*val);
+ /// }
+ ///
+ /// // The `Iter` iterator produces items in arbitrary order, so the
+ /// // items must be sorted to test them against a sorted array.
+ /// vec.sort_unstable();
+ /// assert_eq!(vec, [2, 4, 6]);
+ ///
+ /// assert_eq!(table.len(), 3);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+ IterMut {
+ inner: unsafe { self.raw.iter() },
+ marker: PhantomData,
+ }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` such that `f(&e)` returns `false`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// for x in 1..=6 {
+ /// table.insert_unique(hasher(&x), x, hasher);
+ /// }
+ /// table.retain(|&mut x| x % 2 == 0);
+ /// assert_eq!(table.len(), 3);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn retain(&mut self, mut f: impl FnMut(&mut T) -> bool) {
+ // Here we only use `iter` as a temporary, preventing use-after-free
+ unsafe {
+ for item in self.raw.iter() {
+ if !f(item.as_mut()) {
+ self.raw.erase(item);
+ }
+ }
+ }
+ }
+
+ /// Clears the set, returning all elements in an iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// for x in 1..=3 {
+ /// table.insert_unique(hasher(&x), x, hasher);
+ /// }
+ /// assert!(!table.is_empty());
+ ///
+ /// // print 1, 2, 3 in an arbitrary order
+ /// for i in table.drain() {
+ /// println!("{}", i);
+ /// }
+ ///
+ /// assert!(table.is_empty());
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn drain(&mut self) -> Drain<'_, T, A> {
+ Drain {
+ inner: self.raw.drain(),
+ }
+ }
+
+ /// Drains elements which are true under the given predicate,
+ /// and returns an iterator over the removed items.
+ ///
+ /// In other words, move all elements `e` such that `f(&e)` returns `true` out
+ /// into another iterator.
+ ///
+ /// If the returned `ExtractIf` is not exhausted, e.g. because it is dropped without iterating
+ /// or the iteration short-circuits, then the remaining elements will be retained.
+ /// Use [`retain()`] with a negated predicate if you do not need the returned iterator.
+ ///
+ /// [`retain()`]: HashTable::retain
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// for x in 0..8 {
+ /// table.insert_unique(hasher(&x), x, hasher);
+ /// }
+ /// let drained: Vec<i32> = table.extract_if(|&mut v| v % 2 == 0).collect();
+ ///
+ /// let mut evens = drained.into_iter().collect::<Vec<_>>();
+ /// let mut odds = table.into_iter().collect::<Vec<_>>();
+ /// evens.sort();
+ /// odds.sort();
+ ///
+ /// assert_eq!(evens, vec![0, 2, 4, 6]);
+ /// assert_eq!(odds, vec![1, 3, 5, 7]);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn extract_if<F>(&mut self, f: F) -> ExtractIf<'_, T, F, A>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ ExtractIf {
+ f,
+ inner: RawExtractIf {
+ iter: unsafe { self.raw.iter() },
+ table: &mut self.raw,
+ },
+ }
+ }
+
+ /// Attempts to get mutable references to `N` values in the map at once.
+ ///
+ /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
+ /// the `i`th key to be looked up.
+ ///
+ /// Returns an array of length `N` with the results of each query. For soundness, at most one
+ /// mutable reference will be returned to any value. `None` will be returned if any of the
+ /// keys are duplicates or missing.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::hash_table::Entry;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut libraries: HashTable<(&str, u32)> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// for (k, v) in [
+ /// ("Bodleian Library", 1602),
+ /// ("Athenæum", 1807),
+ /// ("Herzogin-Anna-Amalia-Bibliothek", 1691),
+ /// ("Library of Congress", 1800),
+ /// ] {
+ /// libraries.insert_unique(hasher(&k), (k, v), |(k, _)| hasher(&k));
+ /// }
+ ///
+ /// let keys = ["Athenæum", "Library of Congress"];
+ /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0);
+ /// assert_eq!(
+ /// got,
+ /// Some([&mut ("Athenæum", 1807), &mut ("Library of Congress", 1800),]),
+ /// );
+ ///
+ /// // Missing keys result in None
+ /// let keys = ["Athenæum", "New York Public Library"];
+ /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0);
+ /// assert_eq!(got, None);
+ ///
+ /// // Duplicate keys result in None
+ /// let keys = ["Athenæum", "Athenæum"];
+ /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0);
+ /// assert_eq!(got, None);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn get_many_mut<const N: usize>(
+ &mut self,
+ hashes: [u64; N],
+ eq: impl FnMut(usize, &T) -> bool,
+ ) -> Option<[&'_ mut T; N]> {
+ self.raw.get_many_mut(hashes, eq)
+ }
+
+ /// Attempts to get mutable references to `N` values in the map at once, without validating that
+ /// the values are unique.
+ ///
+ /// The `eq` argument should be a closure such that `eq(i, k)` returns true if `k` is equal to
+ /// the `i`th key to be looked up.
+ ///
+ /// Returns an array of length `N` with the results of each query. `None` will be returned if
+ /// any of the keys are missing.
+ ///
+ /// For a safe alternative see [`get_many_mut`](`HashTable::get_many_mut`).
+ ///
+ /// # Safety
+ ///
+ /// Calling this method with overlapping keys is *[undefined behavior]* even if the resulting
+ /// references are not used.
+ ///
+ /// [undefined behavior]: https://doc.rust-lang.org/reference/behavior-considered-undefined.html
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::hash_table::Entry;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut libraries: HashTable<(&str, u32)> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// for (k, v) in [
+ /// ("Bodleian Library", 1602),
+ /// ("Athenæum", 1807),
+ /// ("Herzogin-Anna-Amalia-Bibliothek", 1691),
+ /// ("Library of Congress", 1800),
+ /// ] {
+ /// libraries.insert_unique(hasher(&k), (k, v), |(k, _)| hasher(&k));
+ /// }
+ ///
+ /// let keys = ["Athenæum", "Library of Congress"];
+ /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0);
+ /// assert_eq!(
+ /// got,
+ /// Some([&mut ("Athenæum", 1807), &mut ("Library of Congress", 1800),]),
+ /// );
+ ///
+ /// // Missing keys result in None
+ /// let keys = ["Athenæum", "New York Public Library"];
+ /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0);
+ /// assert_eq!(got, None);
+ ///
+ /// // Duplicate keys result in None
+ /// let keys = ["Athenæum", "Athenæum"];
+ /// let got = libraries.get_many_mut(keys.map(|k| hasher(&k)), |i, val| keys[i] == val.0);
+ /// assert_eq!(got, None);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub unsafe fn get_many_unchecked_mut<const N: usize>(
+ &mut self,
+ hashes: [u64; N],
+ eq: impl FnMut(usize, &T) -> bool,
+ ) -> Option<[&'_ mut T; N]> {
+ self.raw.get_many_unchecked_mut(hashes, eq)
+ }
+}
+
+impl<T, A> IntoIterator for HashTable<T, A>
+where
+ A: Allocator,
+{
+ type Item = T;
+ type IntoIter = IntoIter<T, A>;
+
+ fn into_iter(self) -> IntoIter<T, A> {
+ IntoIter {
+ inner: self.raw.into_iter(),
+ }
+ }
+}
+
+impl<'a, T, A> IntoIterator for &'a HashTable<T, A>
+where
+ A: Allocator,
+{
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+impl<'a, T, A> IntoIterator for &'a mut HashTable<T, A>
+where
+ A: Allocator,
+{
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+impl<T, A> Default for HashTable<T, A>
+where
+ A: Allocator + Default,
+{
+ fn default() -> Self {
+ Self {
+ raw: Default::default(),
+ }
+ }
+}
+
+impl<T, A> Clone for HashTable<T, A>
+where
+ T: Clone,
+ A: Allocator + Clone,
+{
+ fn clone(&self) -> Self {
+ Self {
+ raw: self.raw.clone(),
+ }
+ }
+}
+
+impl<T, A> fmt::Debug for HashTable<T, A>
+where
+ T: fmt::Debug,
+ A: Allocator,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_set().entries(self.iter()).finish()
+ }
+}
+
+/// A view into a single entry in a table, which may either be vacant or occupied.
+///
+/// This `enum` is constructed from the [`entry`] method on [`HashTable`].
+///
+/// [`HashTable`]: struct.HashTable.html
+/// [`entry`]: struct.HashTable.html#method.entry
+///
+/// # Examples
+///
+/// ```
+/// # #[cfg(feature = "nightly")]
+/// # fn test() {
+/// use ahash::AHasher;
+/// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry};
+/// use std::hash::{BuildHasher, BuildHasherDefault};
+///
+/// let mut table = HashTable::new();
+/// let hasher = BuildHasherDefault::<AHasher>::default();
+/// let hasher = |val: &_| hasher.hash_one(val);
+/// for x in ["a", "b", "c"] {
+/// table.insert_unique(hasher(&x), x, hasher);
+/// }
+/// assert_eq!(table.len(), 3);
+///
+/// // Existing value (insert)
+/// let entry: Entry<_> = table.entry(hasher(&"a"), |&x| x == "a", hasher);
+/// let _raw_o: OccupiedEntry<_, _> = entry.insert("a");
+/// assert_eq!(table.len(), 3);
+/// // Nonexistent value (insert)
+/// table.entry(hasher(&"d"), |&x| x == "d", hasher).insert("d");
+///
+/// // Existing value (or_insert)
+/// table
+/// .entry(hasher(&"b"), |&x| x == "b", hasher)
+/// .or_insert("b");
+/// // Nonexistent value (or_insert)
+/// table
+/// .entry(hasher(&"e"), |&x| x == "e", hasher)
+/// .or_insert("e");
+///
+/// println!("Our HashTable: {:?}", table);
+///
+/// let mut vec: Vec<_> = table.iter().copied().collect();
+/// // The `Iter` iterator produces items in arbitrary order, so the
+/// // items must be sorted to test them against a sorted array.
+/// vec.sort_unstable();
+/// assert_eq!(vec, ["a", "b", "c", "d", "e"]);
+/// # }
+/// # fn main() {
+/// # #[cfg(feature = "nightly")]
+/// # test()
+/// # }
+/// ```
+pub enum Entry<'a, T, A = Global>
+where
+ A: Allocator,
+{
+ /// An occupied entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry};
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// for x in ["a", "b"] {
+ /// table.insert_unique(hasher(&x), x, hasher);
+ /// }
+ ///
+ /// match table.entry(hasher(&"a"), |&x| x == "a", hasher) {
+ /// Entry::Vacant(_) => unreachable!(),
+ /// Entry::Occupied(_) => {}
+ /// }
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ Occupied(OccupiedEntry<'a, T, A>),
+
+ /// A vacant entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry};
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table = HashTable::<&str>::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ ///
+ /// match table.entry(hasher(&"a"), |&x| x == "a", hasher) {
+ /// Entry::Vacant(_) => {}
+ /// Entry::Occupied(_) => unreachable!(),
+ /// }
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ Vacant(VacantEntry<'a, T, A>),
+}
+
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Entry<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Entry::Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
+ Entry::Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(),
+ }
+ }
+}
+
+impl<'a, T, A> Entry<'a, T, A>
+where
+ A: Allocator,
+{
+ /// Sets the value of the entry, replacing any existing value if there is
+ /// one, and returns an [`OccupiedEntry`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table: HashTable<&str> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ ///
+ /// let entry = table
+ /// .entry(hasher(&"horseyland"), |&x| x == "horseyland", hasher)
+ /// .insert("horseyland");
+ ///
+ /// assert_eq!(entry.get(), &"horseyland");
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn insert(self, value: T) -> OccupiedEntry<'a, T, A> {
+ match self {
+ Entry::Occupied(mut entry) => {
+ *entry.get_mut() = value;
+ entry
+ }
+ Entry::Vacant(entry) => entry.insert(value),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting if it was vacant.
+ ///
+ /// Returns an [`OccupiedEntry`] pointing to the now-occupied entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table: HashTable<&str> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ ///
+ /// // nonexistent key
+ /// table
+ /// .entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher)
+ /// .or_insert("poneyland");
+ /// assert!(table
+ /// .find(hasher(&"poneyland"), |&x| x == "poneyland")
+ /// .is_some());
+ ///
+ /// // existing key
+ /// table
+ /// .entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher)
+ /// .or_insert("poneyland");
+ /// assert!(table
+ /// .find(hasher(&"poneyland"), |&x| x == "poneyland")
+ /// .is_some());
+ /// assert_eq!(table.len(), 1);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn or_insert(self, default: T) -> OccupiedEntry<'a, T, A> {
+ match self {
+ Entry::Occupied(entry) => entry,
+ Entry::Vacant(entry) => entry.insert(default),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting the result of the default function if empty..
+ ///
+ /// Returns an [`OccupiedEntry`] pointing to the now-occupied entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table: HashTable<String> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ ///
+ /// table
+ /// .entry(hasher("poneyland"), |x| x == "poneyland", |val| hasher(val))
+ /// .or_insert_with(|| "poneyland".to_string());
+ ///
+ /// assert!(table
+ /// .find(hasher(&"poneyland"), |x| x == "poneyland")
+ /// .is_some());
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn or_insert_with(self, default: impl FnOnce() -> T) -> OccupiedEntry<'a, T, A> {
+ match self {
+ Entry::Occupied(entry) => entry,
+ Entry::Vacant(entry) => entry.insert(default()),
+ }
+ }
+
+ /// Provides in-place mutable access to an occupied entry before any
+ /// potential inserts into the table.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table: HashTable<(&str, u32)> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ ///
+ /// table
+ /// .entry(
+ /// hasher(&"poneyland"),
+ /// |&(x, _)| x == "poneyland",
+ /// |(k, _)| hasher(&k),
+ /// )
+ /// .and_modify(|(_, v)| *v += 1)
+ /// .or_insert(("poneyland", 42));
+ /// assert_eq!(
+ /// table.find(hasher(&"poneyland"), |&(k, _)| k == "poneyland"),
+ /// Some(&("poneyland", 42))
+ /// );
+ ///
+ /// table
+ /// .entry(
+ /// hasher(&"poneyland"),
+ /// |&(x, _)| x == "poneyland",
+ /// |(k, _)| hasher(&k),
+ /// )
+ /// .and_modify(|(_, v)| *v += 1)
+ /// .or_insert(("poneyland", 42));
+ /// assert_eq!(
+ /// table.find(hasher(&"poneyland"), |&(k, _)| k == "poneyland"),
+ /// Some(&("poneyland", 43))
+ /// );
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn and_modify(self, f: impl FnOnce(&mut T)) -> Self {
+ match self {
+ Entry::Occupied(mut entry) => {
+ f(entry.get_mut());
+ Entry::Occupied(entry)
+ }
+ Entry::Vacant(entry) => Entry::Vacant(entry),
+ }
+ }
+}
+
+/// A view into an occupied entry in a `HashTable`.
+/// It is part of the [`Entry`] enum.
+///
+/// [`Entry`]: enum.Entry.html
+///
+/// # Examples
+///
+/// ```
+/// # #[cfg(feature = "nightly")]
+/// # fn test() {
+/// use ahash::AHasher;
+/// use hashbrown::hash_table::{Entry, HashTable, OccupiedEntry};
+/// use std::hash::{BuildHasher, BuildHasherDefault};
+///
+/// let mut table = HashTable::new();
+/// let hasher = BuildHasherDefault::<AHasher>::default();
+/// let hasher = |val: &_| hasher.hash_one(val);
+/// for x in ["a", "b", "c"] {
+/// table.insert_unique(hasher(&x), x, hasher);
+/// }
+/// assert_eq!(table.len(), 3);
+///
+/// let _entry_o: OccupiedEntry<_, _> = table.find_entry(hasher(&"a"), |&x| x == "a").unwrap();
+/// assert_eq!(table.len(), 3);
+///
+/// // Existing key
+/// match table.entry(hasher(&"a"), |&x| x == "a", hasher) {
+/// Entry::Vacant(_) => unreachable!(),
+/// Entry::Occupied(view) => {
+/// assert_eq!(view.get(), &"a");
+/// }
+/// }
+///
+/// assert_eq!(table.len(), 3);
+///
+/// // Existing key (take)
+/// match table.entry(hasher(&"c"), |&x| x == "c", hasher) {
+/// Entry::Vacant(_) => unreachable!(),
+/// Entry::Occupied(view) => {
+/// assert_eq!(view.remove().0, "c");
+/// }
+/// }
+/// assert_eq!(table.find(hasher(&"c"), |&x| x == "c"), None);
+/// assert_eq!(table.len(), 2);
+/// # }
+/// # fn main() {
+/// # #[cfg(feature = "nightly")]
+/// # test()
+/// # }
+/// ```
+pub struct OccupiedEntry<'a, T, A = Global>
+where
+ A: Allocator,
+{
+ hash: u64,
+ bucket: Bucket<T>,
+ table: &'a mut HashTable<T, A>,
+}
+
+unsafe impl<T, A> Send for OccupiedEntry<'_, T, A>
+where
+ T: Send,
+ A: Send + Allocator,
+{
+}
+unsafe impl<T, A> Sync for OccupiedEntry<'_, T, A>
+where
+ T: Sync,
+ A: Sync + Allocator,
+{
+}
+
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for OccupiedEntry<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OccupiedEntry")
+ .field("value", self.get())
+ .finish()
+ }
+}
+
+impl<'a, T, A> OccupiedEntry<'a, T, A>
+where
+ A: Allocator,
+{
+ /// Takes the value out of the entry, and returns it along with a
+ /// `VacantEntry` that can be used to insert another value with the same
+ /// hash as the one that was just removed.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::hash_table::Entry;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table: HashTable<&str> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// // The table is empty
+ /// assert!(table.is_empty() && table.capacity() == 0);
+ ///
+ /// table.insert_unique(hasher(&"poneyland"), "poneyland", hasher);
+ /// let capacity_before_remove = table.capacity();
+ ///
+ /// if let Entry::Occupied(o) = table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) {
+ /// assert_eq!(o.remove().0, "poneyland");
+ /// }
+ ///
+ /// assert!(table
+ /// .find(hasher(&"poneyland"), |&x| x == "poneyland")
+ /// .is_none());
+ /// // Now table hold none elements but capacity is equal to the old one
+ /// assert!(table.len() == 0 && table.capacity() == capacity_before_remove);
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ #[cfg_attr(feature = "inline-more", inline)]
+ pub fn remove(self) -> (T, VacantEntry<'a, T, A>) {
+ let (val, slot) = unsafe { self.table.raw.remove(self.bucket) };
+ (
+ val,
+ VacantEntry {
+ hash: self.hash,
+ insert_slot: slot,
+ table: self.table,
+ },
+ )
+ }
+
+ /// Gets a reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::hash_table::Entry;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table: HashTable<&str> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table.insert_unique(hasher(&"poneyland"), "poneyland", hasher);
+ ///
+ /// match table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) {
+ /// Entry::Vacant(_) => panic!(),
+ /// Entry::Occupied(entry) => assert_eq!(entry.get(), &"poneyland"),
+ /// }
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ #[inline]
+ pub fn get(&self) -> &T {
+ unsafe { self.bucket.as_ref() }
+ }
+
+ /// Gets a mutable reference to the value in the entry.
+ ///
+ /// If you need a reference to the `OccupiedEntry` which may outlive the
+ /// destruction of the `Entry` value, see [`into_mut`].
+ ///
+ /// [`into_mut`]: #method.into_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::hash_table::Entry;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table: HashTable<(&str, u32)> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table.insert_unique(hasher(&"poneyland"), ("poneyland", 12), |(k, _)| hasher(&k));
+ ///
+ /// assert_eq!(
+ /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",),
+ /// Some(&("poneyland", 12))
+ /// );
+ ///
+ /// if let Entry::Occupied(mut o) = table.entry(
+ /// hasher(&"poneyland"),
+ /// |&(x, _)| x == "poneyland",
+ /// |(k, _)| hasher(&k),
+ /// ) {
+ /// o.get_mut().1 += 10;
+ /// assert_eq!(o.get().1, 22);
+ ///
+ /// // We can use the same Entry multiple times.
+ /// o.get_mut().1 += 2;
+ /// }
+ ///
+ /// assert_eq!(
+ /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",),
+ /// Some(&("poneyland", 24))
+ /// );
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ #[inline]
+ pub fn get_mut(&mut self) -> &mut T {
+ unsafe { self.bucket.as_mut() }
+ }
+
+ /// Converts the OccupiedEntry into a mutable reference to the value in the entry
+ /// with a lifetime bound to the table itself.
+ ///
+ /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`].
+ ///
+ /// [`get_mut`]: #method.get_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::hash_table::Entry;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table: HashTable<(&str, u32)> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ /// table.insert_unique(hasher(&"poneyland"), ("poneyland", 12), |(k, _)| hasher(&k));
+ ///
+ /// assert_eq!(
+ /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",),
+ /// Some(&("poneyland", 12))
+ /// );
+ ///
+ /// let value: &mut (&str, u32);
+ /// match table.entry(
+ /// hasher(&"poneyland"),
+ /// |&(x, _)| x == "poneyland",
+ /// |(k, _)| hasher(&k),
+ /// ) {
+ /// Entry::Occupied(entry) => value = entry.into_mut(),
+ /// Entry::Vacant(_) => panic!(),
+ /// }
+ /// value.1 += 10;
+ ///
+ /// assert_eq!(
+ /// table.find(hasher(&"poneyland"), |&(x, _)| x == "poneyland",),
+ /// Some(&("poneyland", 22))
+ /// );
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ pub fn into_mut(self) -> &'a mut T {
+ unsafe { self.bucket.as_mut() }
+ }
+
+ /// Converts the OccupiedEntry into a mutable reference to the underlying
+ /// table.
+ pub fn into_table(self) -> &'a mut HashTable<T, A> {
+ self.table
+ }
+}
+
+/// A view into a vacant entry in a `HashTable`.
+/// It is part of the [`Entry`] enum.
+///
+/// [`Entry`]: enum.Entry.html
+///
+/// # Examples
+///
+/// ```
+/// # #[cfg(feature = "nightly")]
+/// # fn test() {
+/// use ahash::AHasher;
+/// use hashbrown::hash_table::{Entry, HashTable, VacantEntry};
+/// use std::hash::{BuildHasher, BuildHasherDefault};
+///
+/// let mut table: HashTable<&str> = HashTable::new();
+/// let hasher = BuildHasherDefault::<AHasher>::default();
+/// let hasher = |val: &_| hasher.hash_one(val);
+///
+/// let entry_v: VacantEntry<_, _> = match table.entry(hasher(&"a"), |&x| x == "a", hasher) {
+/// Entry::Vacant(view) => view,
+/// Entry::Occupied(_) => unreachable!(),
+/// };
+/// entry_v.insert("a");
+/// assert!(table.find(hasher(&"a"), |&x| x == "a").is_some() && table.len() == 1);
+///
+/// // Nonexistent key (insert)
+/// match table.entry(hasher(&"b"), |&x| x == "b", hasher) {
+/// Entry::Vacant(view) => {
+/// view.insert("b");
+/// }
+/// Entry::Occupied(_) => unreachable!(),
+/// }
+/// assert!(table.find(hasher(&"b"), |&x| x == "b").is_some() && table.len() == 2);
+/// # }
+/// # fn main() {
+/// # #[cfg(feature = "nightly")]
+/// # test()
+/// # }
+/// ```
+pub struct VacantEntry<'a, T, A = Global>
+where
+ A: Allocator,
+{
+ hash: u64,
+ insert_slot: InsertSlot,
+ table: &'a mut HashTable<T, A>,
+}
+
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for VacantEntry<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("VacantEntry")
+ }
+}
+
+impl<'a, T, A> VacantEntry<'a, T, A>
+where
+ A: Allocator,
+{
+ /// Inserts a new element into the table with the hash that was used to
+ /// obtain the `VacantEntry`.
+ ///
+ /// An `OccupiedEntry` is returned for the newly inserted element.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[cfg(feature = "nightly")]
+ /// # fn test() {
+ /// use ahash::AHasher;
+ /// use hashbrown::hash_table::Entry;
+ /// use hashbrown::HashTable;
+ /// use std::hash::{BuildHasher, BuildHasherDefault};
+ ///
+ /// let mut table: HashTable<&str> = HashTable::new();
+ /// let hasher = BuildHasherDefault::<AHasher>::default();
+ /// let hasher = |val: &_| hasher.hash_one(val);
+ ///
+ /// if let Entry::Vacant(o) = table.entry(hasher(&"poneyland"), |&x| x == "poneyland", hasher) {
+ /// o.insert("poneyland");
+ /// }
+ /// assert_eq!(
+ /// table.find(hasher(&"poneyland"), |&x| x == "poneyland"),
+ /// Some(&"poneyland")
+ /// );
+ /// # }
+ /// # fn main() {
+ /// # #[cfg(feature = "nightly")]
+ /// # test()
+ /// # }
+ /// ```
+ #[inline]
+ pub fn insert(self, value: T) -> OccupiedEntry<'a, T, A> {
+ let bucket = unsafe {
+ self.table
+ .raw
+ .insert_in_slot(self.hash, self.insert_slot, value)
+ };
+ OccupiedEntry {
+ hash: self.hash,
+ bucket,
+ table: self.table,
+ }
+ }
+
+ /// Converts the VacantEntry into a mutable reference to the underlying
+ /// table.
+ pub fn into_table(self) -> &'a mut HashTable<T, A> {
+ self.table
+ }
+}
+
+/// Type representing the absence of an entry, as returned by [`HashTable::find_entry`].
+///
+/// This type only exists due to [limitations] in Rust's NLL borrow checker. In
+/// the future, `find_entry` will return an `Option<OccupiedEntry>` and this
+/// type will be removed.
+///
+/// [limitations]: https://smallcultfollowing.com/babysteps/blog/2018/06/15/mir-based-borrow-check-nll-status-update/#polonius
+///
+/// # Examples
+///
+/// ```
+/// # #[cfg(feature = "nightly")]
+/// # fn test() {
+/// use ahash::AHasher;
+/// use hashbrown::hash_table::{AbsentEntry, Entry, HashTable};
+/// use std::hash::{BuildHasher, BuildHasherDefault};
+///
+/// let mut table: HashTable<&str> = HashTable::new();
+/// let hasher = BuildHasherDefault::<AHasher>::default();
+/// let hasher = |val: &_| hasher.hash_one(val);
+///
+/// let entry_v: AbsentEntry<_, _> = table.find_entry(hasher(&"a"), |&x| x == "a").unwrap_err();
+/// entry_v
+/// .into_table()
+/// .insert_unique(hasher(&"a"), "a", hasher);
+/// assert!(table.find(hasher(&"a"), |&x| x == "a").is_some() && table.len() == 1);
+///
+/// // Nonexistent key (insert)
+/// match table.entry(hasher(&"b"), |&x| x == "b", hasher) {
+/// Entry::Vacant(view) => {
+/// view.insert("b");
+/// }
+/// Entry::Occupied(_) => unreachable!(),
+/// }
+/// assert!(table.find(hasher(&"b"), |&x| x == "b").is_some() && table.len() == 2);
+/// # }
+/// # fn main() {
+/// # #[cfg(feature = "nightly")]
+/// # test()
+/// # }
+/// ```
+pub struct AbsentEntry<'a, T, A = Global>
+where
+ A: Allocator,
+{
+ table: &'a mut HashTable<T, A>,
+}
+
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for AbsentEntry<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("AbsentEntry")
+ }
+}
+
+impl<'a, T, A> AbsentEntry<'a, T, A>
+where
+ A: Allocator,
+{
+ /// Converts the AbsentEntry into a mutable reference to the underlying
+ /// table.
+ pub fn into_table(self) -> &'a mut HashTable<T, A> {
+ self.table
+ }
+}
+
+/// An iterator over the entries of a `HashTable` in arbitrary order.
+/// The iterator element type is `&'a T`.
+///
+/// This `struct` is created by the [`iter`] method on [`HashTable`]. See its
+/// documentation for more.
+///
+/// [`iter`]: struct.HashTable.html#method.iter
+/// [`HashTable`]: struct.HashTable.html
+pub struct Iter<'a, T> {
+ inner: RawIter<T>,
+ marker: PhantomData<&'a T>,
+}
+
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ // Avoid `Option::map` because it bloats LLVM IR.
+ match self.inner.next() {
+ Some(bucket) => Some(unsafe { bucket.as_ref() }),
+ None => None,
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.inner
+ .fold(init, |acc, bucket| unsafe { f(acc, bucket.as_ref()) })
+ }
+}
+
+impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+impl<T> FusedIterator for Iter<'_, T> {}
+
+/// A mutable iterator over the entries of a `HashTable` in arbitrary order.
+/// The iterator element type is `&'a mut T`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`HashTable`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: struct.HashTable.html#method.iter_mut
+/// [`HashTable`]: struct.HashTable.html
+pub struct IterMut<'a, T> {
+ inner: RawIter<T>,
+ marker: PhantomData<&'a mut T>,
+}
+
+impl<'a, T> Iterator for IterMut<'a, T> {
+ type Item = &'a mut T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ // Avoid `Option::map` because it bloats LLVM IR.
+ match self.inner.next() {
+ Some(bucket) => Some(unsafe { bucket.as_mut() }),
+ None => None,
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn fold<B, F>(self, init: B, mut f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.inner
+ .fold(init, |acc, bucket| unsafe { f(acc, bucket.as_mut()) })
+ }
+}
+
+impl<T> ExactSizeIterator for IterMut<'_, T> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+impl<T> FusedIterator for IterMut<'_, T> {}
+
+/// An owning iterator over the entries of a `HashTable` in arbitrary order.
+/// The iterator element type is `T`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`HashTable`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+/// The table cannot be used after calling that method.
+///
+/// [`into_iter`]: struct.HashTable.html#method.into_iter
+/// [`HashTable`]: struct.HashTable.html
+/// [`IntoIterator`]: https://doc.rust-lang.org/core/iter/trait.IntoIterator.html
+pub struct IntoIter<T, A = Global>
+where
+ A: Allocator,
+{
+ inner: RawIntoIter<T, A>,
+}
+
+impl<T, A> Iterator for IntoIter<T, A>
+where
+ A: Allocator,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.inner.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.inner.fold(init, f)
+ }
+}
+
+impl<T, A> ExactSizeIterator for IntoIter<T, A>
+where
+ A: Allocator,
+{
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+impl<T, A> FusedIterator for IntoIter<T, A> where A: Allocator {}
+
+/// A draining iterator over the items of a `HashTable`.
+///
+/// This `struct` is created by the [`drain`] method on [`HashTable`].
+/// See its documentation for more.
+///
+/// [`HashTable`]: struct.HashTable.html
+/// [`drain`]: struct.HashTable.html#method.drain
+pub struct Drain<'a, T, A: Allocator = Global> {
+ inner: RawDrain<'a, T, A>,
+}
+
+impl<T, A: Allocator> Drain<'_, T, A> {
+ /// Returns a iterator of references over the remaining items.
+ fn iter(&self) -> Iter<'_, T> {
+ Iter {
+ inner: self.inner.iter(),
+ marker: PhantomData,
+ }
+ }
+}
+
+impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ self.inner.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}
+
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+/// A draining iterator over entries of a `HashTable` which don't satisfy the predicate `f`.
+///
+/// This `struct` is created by [`HashTable::extract_if`]. See its
+/// documentation for more.
+#[must_use = "Iterators are lazy unless consumed"]
+pub struct ExtractIf<'a, T, F, A: Allocator = Global>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ f: F,
+ inner: RawExtractIf<'a, T, A>,
+}
+
+impl<T, F, A: Allocator> Iterator for ExtractIf<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.inner.next(|val| (self.f)(val))
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, self.inner.iter.size_hint().1)
+ }
+}
+
+impl<T, F, A: Allocator> FusedIterator for ExtractIf<'_, T, F, A> where F: FnMut(&mut T) -> bool {}
diff --git a/third_party/rust/hashbrown/tests/equivalent_trait.rs b/third_party/rust/hashbrown/tests/equivalent_trait.rs
new file mode 100644
index 0000000000..713dddd53c
--- /dev/null
+++ b/third_party/rust/hashbrown/tests/equivalent_trait.rs
@@ -0,0 +1,53 @@
+use hashbrown::Equivalent;
+use hashbrown::HashMap;
+
+use std::hash::Hash;
+
+#[derive(Debug, Hash)]
+pub struct Pair<A, B>(pub A, pub B);
+
+impl<A, B, C, D> PartialEq<(A, B)> for Pair<C, D>
+where
+ C: PartialEq<A>,
+ D: PartialEq<B>,
+{
+ fn eq(&self, rhs: &(A, B)) -> bool {
+ self.0 == rhs.0 && self.1 == rhs.1
+ }
+}
+
+impl<A, B, X> Equivalent<X> for Pair<A, B>
+where
+ Pair<A, B>: PartialEq<X>,
+ A: Hash + Eq,
+ B: Hash + Eq,
+{
+ fn equivalent(&self, other: &X) -> bool {
+ *self == *other
+ }
+}
+
+#[test]
+fn test_lookup() {
+ let s = String::from;
+ let mut map = HashMap::new();
+ map.insert((s("a"), s("b")), 1);
+ map.insert((s("a"), s("x")), 2);
+
+ assert!(map.contains_key(&Pair("a", "b")));
+ assert!(!map.contains_key(&Pair("b", "a")));
+}
+
+#[test]
+fn test_string_str() {
+ let s = String::from;
+ let mut map = HashMap::new();
+ map.insert(s("a"), 1);
+ map.insert(s("b"), 2);
+ map.insert(s("x"), 3);
+ map.insert(s("y"), 4);
+
+ assert!(map.contains_key("a"));
+ assert!(!map.contains_key("z"));
+ assert_eq!(map.remove("b"), Some(2));
+}
diff --git a/third_party/rust/hashbrown/tests/raw.rs b/third_party/rust/hashbrown/tests/raw.rs
new file mode 100644
index 0000000000..858836e63b
--- /dev/null
+++ b/third_party/rust/hashbrown/tests/raw.rs
@@ -0,0 +1,11 @@
+#![cfg(feature = "raw")]
+
+use hashbrown::raw::RawTable;
+use std::mem;
+
+#[test]
+fn test_allocation_info() {
+ assert_eq!(RawTable::<()>::new().allocation_info().1.size(), 0);
+ assert_eq!(RawTable::<u32>::new().allocation_info().1.size(), 0);
+ assert!(RawTable::<u32>::with_capacity(1).allocation_info().1.size() > mem::size_of::<u32>());
+}
diff --git a/third_party/rust/hashbrown/tests/rayon.rs b/third_party/rust/hashbrown/tests/rayon.rs
index 8c603c5c41..d55e5a9804 100644
--- a/third_party/rust/hashbrown/tests/rayon.rs
+++ b/third_party/rust/hashbrown/tests/rayon.rs
@@ -356,7 +356,9 @@ fn set_seq_par_equivalence_into_iter_empty() {
let vec_seq = SET_EMPTY.clone().into_iter().collect::<Vec<_>>();
let vec_par = SET_EMPTY.clone().into_par_iter().collect::<Vec<_>>();
- assert_eq3!(vec_seq, vec_par, []);
+ // Work around type inference failure introduced by rend dev-dependency.
+ let empty: [char; 0] = [];
+ assert_eq3!(vec_seq, vec_par, empty);
}
#[test]
diff --git a/third_party/rust/hashbrown/tests/set.rs b/third_party/rust/hashbrown/tests/set.rs
index 5ae1ec98ec..86ec964766 100644
--- a/third_party/rust/hashbrown/tests/set.rs
+++ b/third_party/rust/hashbrown/tests/set.rs
@@ -27,7 +27,7 @@ fn test_hashset_insert_remove() {
assert_eq!(m.insert(x.clone()), true);
}
for (i, x) in tx.iter().enumerate() {
- println!("removing {} {:?}", i, x);
+ println!("removing {i} {x:?}");
assert_eq!(m.remove(x), true);
}
}