summaryrefslogtreecommitdiffstats
path: root/vendor/indexmap
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-04 12:47:55 +0000
commit2aadc03ef15cb5ca5cc2af8a7c08e070742f0ac4 (patch)
tree033cc839730fda84ff08db877037977be94e5e3a /vendor/indexmap
parentInitial commit. (diff)
downloadcargo-upstream.tar.xz
cargo-upstream.zip
Adding upstream version 0.70.1+ds1.upstream/0.70.1+ds1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--vendor/indexmap-1.9.3/.cargo-checksum.json1
-rw-r--r--vendor/indexmap-1.9.3/Cargo.toml108
-rw-r--r--vendor/indexmap-1.9.3/LICENSE-APACHE201
-rw-r--r--vendor/indexmap-1.9.3/LICENSE-MIT25
-rw-r--r--vendor/indexmap-1.9.3/README.md55
-rw-r--r--vendor/indexmap-1.9.3/RELEASES.md388
-rw-r--r--vendor/indexmap-1.9.3/benches/bench.rs763
-rw-r--r--vendor/indexmap-1.9.3/benches/faststring.rs185
-rw-r--r--vendor/indexmap-1.9.3/build.rs8
-rw-r--r--vendor/indexmap-1.9.3/src/arbitrary.rs75
-rw-r--r--vendor/indexmap-1.9.3/src/equivalent.rs27
-rw-r--r--vendor/indexmap-1.9.3/src/lib.rs194
-rw-r--r--vendor/indexmap-1.9.3/src/macros.rs178
-rw-r--r--vendor/indexmap-1.9.3/src/map.rs1947
-rw-r--r--vendor/indexmap-1.9.3/src/map/core.rs700
-rw-r--r--vendor/indexmap-1.9.3/src/map/core/raw.rs191
-rw-r--r--vendor/indexmap-1.9.3/src/mutable_keys.rs75
-rw-r--r--vendor/indexmap-1.9.3/src/rayon/map.rs583
-rw-r--r--vendor/indexmap-1.9.3/src/rayon/mod.rs27
-rw-r--r--vendor/indexmap-1.9.3/src/rayon/set.rs741
-rw-r--r--vendor/indexmap-1.9.3/src/rustc.rs158
-rw-r--r--vendor/indexmap-1.9.3/src/serde.rs155
-rw-r--r--vendor/indexmap-1.9.3/src/serde_seq.rs112
-rw-r--r--vendor/indexmap-1.9.3/src/set.rs1912
-rw-r--r--vendor/indexmap-1.9.3/src/util.rs31
-rw-r--r--vendor/indexmap-1.9.3/tests/equivalent_trait.rs53
-rw-r--r--vendor/indexmap-1.9.3/tests/macros_full_path.rs19
-rw-r--r--vendor/indexmap-1.9.3/tests/quick.rs573
-rw-r--r--vendor/indexmap-1.9.3/tests/tests.rs28
-rw-r--r--vendor/indexmap/.cargo-checksum.json1
-rw-r--r--vendor/indexmap/Cargo.toml112
-rw-r--r--vendor/indexmap/LICENSE-APACHE201
-rw-r--r--vendor/indexmap/LICENSE-MIT25
-rw-r--r--vendor/indexmap/README.md55
-rw-r--r--vendor/indexmap/RELEASES.md434
-rw-r--r--vendor/indexmap/benches/bench.rs763
-rw-r--r--vendor/indexmap/benches/faststring.rs185
-rw-r--r--vendor/indexmap/debian/patches/drop-quickcheck.diff22
-rw-r--r--vendor/indexmap/debian/patches/drop-rustc-rayon.diff23
-rw-r--r--vendor/indexmap/debian/patches/series2
-rw-r--r--vendor/indexmap/src/arbitrary.rs77
-rw-r--r--vendor/indexmap/src/lib.rs278
-rw-r--r--vendor/indexmap/src/macros.rs178
-rw-r--r--vendor/indexmap/src/map.rs1223
-rw-r--r--vendor/indexmap/src/map/core.rs762
-rw-r--r--vendor/indexmap/src/map/core/raw.rs194
-rw-r--r--vendor/indexmap/src/map/iter.rs541
-rw-r--r--vendor/indexmap/src/map/serde_seq.rs138
-rw-r--r--vendor/indexmap/src/map/slice.rs471
-rw-r--r--vendor/indexmap/src/map/tests.rs449
-rw-r--r--vendor/indexmap/src/mutable_keys.rs91
-rw-r--r--vendor/indexmap/src/rayon/map.rs679
-rw-r--r--vendor/indexmap/src/rayon/mod.rs27
-rw-r--r--vendor/indexmap/src/rayon/set.rs774
-rw-r--r--vendor/indexmap/src/rustc.rs158
-rw-r--r--vendor/indexmap/src/serde.rs151
-rw-r--r--vendor/indexmap/src/set.rs1018
-rw-r--r--vendor/indexmap/src/set/iter.rs543
-rw-r--r--vendor/indexmap/src/set/slice.rs278
-rw-r--r--vendor/indexmap/src/set/tests.rs545
-rw-r--r--vendor/indexmap/src/util.rs53
-rw-r--r--vendor/indexmap/tests/equivalent_trait.rs53
-rw-r--r--vendor/indexmap/tests/macros_full_path.rs19
-rw-r--r--vendor/indexmap/tests/quick.rs579
-rw-r--r--vendor/indexmap/tests/tests.rs28
65 files changed, 20643 insertions, 0 deletions
diff --git a/vendor/indexmap-1.9.3/.cargo-checksum.json b/vendor/indexmap-1.9.3/.cargo-checksum.json
new file mode 100644
index 0000000..52b1b64
--- /dev/null
+++ b/vendor/indexmap-1.9.3/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{},"package":"bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99"} \ No newline at end of file
diff --git a/vendor/indexmap-1.9.3/Cargo.toml b/vendor/indexmap-1.9.3/Cargo.toml
new file mode 100644
index 0000000..28f3618
--- /dev/null
+++ b/vendor/indexmap-1.9.3/Cargo.toml
@@ -0,0 +1,108 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.56"
+name = "indexmap"
+version = "1.9.3"
+description = "A hash table with consistent order and fast iteration."
+documentation = "https://docs.rs/indexmap/"
+readme = "README.md"
+keywords = [
+ "hashmap",
+ "no_std",
+]
+categories = [
+ "data-structures",
+ "no-std",
+]
+license = "Apache-2.0 OR MIT"
+repository = "https://github.com/bluss/indexmap"
+
+[package.metadata.release]
+no-dev-version = true
+tag-name = "{{version}}"
+
+[package.metadata.docs.rs]
+features = [
+ "arbitrary",
+ "quickcheck",
+ "serde-1",
+ "rayon",
+]
+
+[profile.bench]
+debug = true
+
+[lib]
+bench = false
+
+[dependencies.arbitrary]
+version = "1.0"
+optional = true
+default-features = false
+
+[dependencies.hashbrown]
+version = "0.12"
+features = ["raw"]
+default-features = false
+
+[dependencies.quickcheck]
+version = "1.0"
+optional = true
+default-features = false
+
+[dependencies.rayon]
+version = "1.4.1"
+optional = true
+
+[dependencies.rustc-rayon]
+version = "0.5"
+optional = true
+package = "rustc-rayon"
+
+[dependencies.serde]
+version = "1.0"
+optional = true
+default-features = false
+
+[dev-dependencies.fnv]
+version = "1.0"
+
+[dev-dependencies.fxhash]
+version = "0.2.1"
+
+[dev-dependencies.itertools]
+version = "0.10"
+
+[dev-dependencies.lazy_static]
+version = "1.3"
+
+[dev-dependencies.quickcheck]
+version = "1.0"
+default-features = false
+
+[dev-dependencies.rand]
+version = "0.8"
+features = ["small_rng"]
+
+[dev-dependencies.serde_derive]
+version = "1.0"
+
+[build-dependencies.autocfg]
+version = "1"
+
+[features]
+serde-1 = ["serde"]
+std = []
+test_debug = []
+test_low_transition_point = []
diff --git a/vendor/indexmap-1.9.3/LICENSE-APACHE b/vendor/indexmap-1.9.3/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/vendor/indexmap-1.9.3/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/indexmap-1.9.3/LICENSE-MIT b/vendor/indexmap-1.9.3/LICENSE-MIT
new file mode 100644
index 0000000..8b81810
--- /dev/null
+++ b/vendor/indexmap-1.9.3/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016--2017
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/indexmap-1.9.3/README.md b/vendor/indexmap-1.9.3/README.md
new file mode 100644
index 0000000..d80b709
--- /dev/null
+++ b/vendor/indexmap-1.9.3/README.md
@@ -0,0 +1,55 @@
+# indexmap
+
+[![build status](https://github.com/bluss/indexmap/workflows/Continuous%20integration/badge.svg?branch=master)](https://github.com/bluss/indexmap/actions)
+[![crates.io](https://img.shields.io/crates/v/indexmap.svg)](https://crates.io/crates/indexmap)
+[![docs](https://docs.rs/indexmap/badge.svg)](https://docs.rs/indexmap)
+[![rustc](https://img.shields.io/badge/rust-1.56%2B-orange.svg)](https://img.shields.io/badge/rust-1.56%2B-orange.svg)
+
+A pure-Rust hash table which preserves (in a limited sense) insertion order.
+
+This crate implements compact map and set data-structures,
+where the iteration order of the keys is independent from their hash or
+value. It preserves insertion order (except after removals), and it
+allows lookup of entries by either hash table key or numerical index.
+
+Note: this crate was originally released under the name `ordermap`,
+but it was renamed to `indexmap` to better reflect its features.
+
+# Background
+
+This was inspired by Python 3.6's new dict implementation (which remembers
+the insertion order and is fast to iterate, and is compact in memory).
+
+Some of those features were translated to Rust, and some were not. The result
+was indexmap, a hash table that has following properties:
+
+- Order is **independent of hash function** and hash values of keys.
+- Fast to iterate.
+- Indexed in compact space.
+- Preserves insertion order **as long** as you don't call `.remove()`.
+- Uses hashbrown for the inner table, just like Rust's libstd `HashMap` does.
+
+## Performance
+
+`IndexMap` derives a couple of performance facts directly from how it is constructed,
+which is roughly:
+
+> A raw hash table of key-value indices, and a vector of key-value pairs.
+
+- Iteration is very fast since it is on the dense key-values.
+- Removal is fast since it moves memory areas only in the table,
+ and uses a single swap in the vector.
+- Lookup is fast-ish because the initial 7-bit hash lookup uses SIMD, and indices are
+ densely stored. Lookup also is slow-ish since the actual key-value pairs are stored
+ separately. (Visible when cpu caches size is limiting.)
+
+- In practice, `IndexMap` has been tested out as the hashmap in rustc in [PR45282] and
+ the performance was roughly on par across the whole workload.
+- If you want the properties of `IndexMap`, or its strongest performance points
+ fits your workload, it might be the best hash table implementation.
+
+[PR45282]: https://github.com/rust-lang/rust/pull/45282
+
+# Recent Changes
+
+See [RELEASES.md](https://github.com/bluss/indexmap/blob/master/RELEASES.md).
diff --git a/vendor/indexmap-1.9.3/RELEASES.md b/vendor/indexmap-1.9.3/RELEASES.md
new file mode 100644
index 0000000..7ea8b70
--- /dev/null
+++ b/vendor/indexmap-1.9.3/RELEASES.md
@@ -0,0 +1,388 @@
+- 1.9.3
+
+ - Bump the `rustc-rayon` dependency, for compiler use only.
+
+- 1.9.2
+
+ - `IndexMap` and `IndexSet` both implement `arbitrary::Arbitrary<'_>` and
+ `quickcheck::Arbitrary` if those optional dependency features are enabled.
+
+- 1.9.1
+
+ - The MSRV now allows Rust 1.56.0 as well. However, currently `hashbrown`
+ 0.12.1 requires 1.56.1, so users on 1.56.0 should downgrade that to 0.12.0
+ until there is a later published version relaxing its requirement.
+
+- 1.9.0
+
+ - **MSRV**: Rust 1.56.1 or later is now required.
+
+ - The `hashbrown` dependency has been updated to version 0.12.
+
+ - `IterMut` and `ValuesMut` now implement `Debug`.
+
+ - The new `IndexMap::shrink_to` and `IndexSet::shrink_to` methods shrink
+ the capacity with a lower bound.
+
+ - The new `IndexMap::move_index` and `IndexSet::move_index` methods change
+ the position of an item from one index to another, shifting the items
+ between to accommodate the move.
+
+- 1.8.2
+
+ - Bump the `rustc-rayon` dependency, for compiler use only.
+
+- 1.8.1
+
+ - The new `IndexSet::replace_full` will return the index of the item along
+ with the replaced value, if any, by @zakcutner in PR [222].
+
+[222]: https://github.com/bluss/indexmap/pull/222
+
+- 1.8.0
+
+ - The new `IndexMap::into_keys` and `IndexMap::into_values` will consume
+ the map into keys or values, respectively, matching Rust 1.54's `HashMap`
+ methods, by @taiki-e in PR [195].
+
+ - More of the iterator types implement `Debug`, `ExactSizeIterator`, and
+ `FusedIterator`, by @cuviper in PR [196].
+
+ - `IndexMap` and `IndexSet` now implement rayon's `ParallelDrainRange`,
+ by @cuviper in PR [197].
+
+ - `IndexMap::with_hasher` and `IndexSet::with_hasher` are now `const`
+ functions, allowing static maps and sets, by @mwillsey in PR [203].
+
+ - `IndexMap` and `IndexSet` now implement `From` for arrays, matching
+ Rust 1.56's implementation for `HashMap`, by @rouge8 in PR [205].
+
+ - `IndexMap` and `IndexSet` now have methods `sort_unstable_keys`,
+ `sort_unstable_by`, `sorted_unstable_by`, and `par_*` equivalents,
+ which sort in-place without preserving the order of equal items, by
+ @bhgomes in PR [211].
+
+[195]: https://github.com/bluss/indexmap/pull/195
+[196]: https://github.com/bluss/indexmap/pull/196
+[197]: https://github.com/bluss/indexmap/pull/197
+[203]: https://github.com/bluss/indexmap/pull/203
+[205]: https://github.com/bluss/indexmap/pull/205
+[211]: https://github.com/bluss/indexmap/pull/211
+
+- 1.7.0
+
+ - **MSRV**: Rust 1.49 or later is now required.
+
+ - The `hashbrown` dependency has been updated to version 0.11.
+
+- 1.6.2
+
+ - Fixed to match `std` behavior, `OccupiedEntry::key` now references the
+ existing key in the map instead of the lookup key, by @cuviper in PR [170].
+
+ - The new `Entry::or_insert_with_key` matches Rust 1.50's `Entry` method,
+ passing `&K` to the callback to create a value, by @cuviper in PR [175].
+
+[170]: https://github.com/bluss/indexmap/pull/170
+[175]: https://github.com/bluss/indexmap/pull/175
+
+- 1.6.1
+
+ - The new `serde_seq` module implements `IndexMap` serialization as a
+ sequence to ensure order is preserved, by @cuviper in PR [158].
+
+ - New methods on maps and sets work like the `Vec`/slice methods by the same name:
+ `truncate`, `split_off`, `first`, `first_mut`, `last`, `last_mut`, and
+ `swap_indices`, by @cuviper in PR [160].
+
+[158]: https://github.com/bluss/indexmap/pull/158
+[160]: https://github.com/bluss/indexmap/pull/160
+
+- 1.6.0
+
+ - **MSRV**: Rust 1.36 or later is now required.
+
+ - The `hashbrown` dependency has been updated to version 0.9.
+
+- 1.5.2
+
+ - The new "std" feature will force the use of `std` for users that explicitly
+ want the default `S = RandomState`, bypassing the autodetection added in 1.3.0,
+ by @cuviper in PR [145].
+
+[145]: https://github.com/bluss/indexmap/pull/145
+
+- 1.5.1
+
+ - Values can now be indexed by their `usize` position by @cuviper in PR [132].
+
+ - Some of the generic bounds have been relaxed to match `std` by @cuviper in PR [141].
+
+ - `drain` now accepts any `R: RangeBounds<usize>` by @cuviper in PR [142].
+
+[132]: https://github.com/bluss/indexmap/pull/132
+[141]: https://github.com/bluss/indexmap/pull/141
+[142]: https://github.com/bluss/indexmap/pull/142
+
+- 1.5.0
+
+ - **MSRV**: Rust 1.32 or later is now required.
+
+ - The inner hash table is now based on `hashbrown` by @cuviper in PR [131].
+ This also completes the method `reserve` and adds `shrink_to_fit`.
+
+ - Add new methods `get_key_value`, `remove_entry`, `swap_remove_entry`,
+ and `shift_remove_entry`, by @cuviper in PR [136]
+
+ - `Clone::clone_from` reuses allocations by @cuviper in PR [125]
+
+ - Add new method `reverse` by @linclelinkpart5 in PR [128]
+
+[125]: https://github.com/bluss/indexmap/pull/125
+[128]: https://github.com/bluss/indexmap/pull/128
+[131]: https://github.com/bluss/indexmap/pull/131
+[136]: https://github.com/bluss/indexmap/pull/136
+
+- 1.4.0
+
+ - Add new method `get_index_of` by @Thermatrix in PR [115] and [120]
+
+ - Fix build script rebuild-if-changed configuration to use "build.rs";
+ fixes issue [123]. Fix by @cuviper.
+
+ - Dev-dependencies (rand and quickcheck) have been updated. The crate's tests
+ now run using Rust 1.32 or later (MSRV for building the crate has not changed).
+ by @kjeremy and @bluss
+
+[123]: https://github.com/bluss/indexmap/issues/123
+[115]: https://github.com/bluss/indexmap/pull/115
+[120]: https://github.com/bluss/indexmap/pull/120
+
+- 1.3.2
+
+ - Maintenance update to regenerate the published `Cargo.toml`.
+
+- 1.3.1
+
+ - Maintenance update for formatting and `autocfg` 1.0.
+
+- 1.3.0
+
+ - The deprecation messages in the previous version have been removed.
+ (The methods have not otherwise changed.) Docs for removal methods have been
+ improved.
+ - From Rust 1.36, this crate supports being built **without std**, requiring
+ `alloc` instead. This is enabled automatically when it is detected that
+ `std` is not available. There is no crate feature to enable/disable to
+ trigger this. The new build-dep `autocfg` enables this.
+
+- 1.2.0
+
+ - Plain `.remove()` now has a deprecation message, it informs the user
+ about picking one of the removal functions `swap_remove` and `shift_remove`
+ which have different performance and order semantics.
+ Plain `.remove()` will not be removed, the warning message and method
+ will remain until further.
+
+ - Add new method `shift_remove` for order preserving removal on the map,
+ and `shift_take` for the corresponding operation on the set.
+
+ - Add methods `swap_remove`, `swap_remove_entry` to `Entry`.
+
+ - Fix indexset/indexmap to support full paths, like `indexmap::indexmap!()`
+
+ - Internal improvements: fix warnings, deprecations and style lints
+
+- 1.1.0
+
+ - Added optional feature `"rayon"` that adds parallel iterator support
+ to `IndexMap` and `IndexSet` using Rayon. This includes all the regular
+ iterators in parallel versions, and parallel sort.
+
+ - Implemented `Clone` for `map::{Iter, Keys, Values}` and
+ `set::{Difference, Intersection, Iter, SymmetricDifference, Union}`
+
+ - Implemented `Debug` for `map::{Entry, IntoIter, Iter, Keys, Values}` and
+ `set::{Difference, Intersection, IntoIter, Iter, SymmetricDifference, Union}`
+
+ - Serde trait `IntoDeserializer` are implemented for `IndexMap` and `IndexSet`.
+
+ - Minimum Rust version requirement increased to Rust 1.30 for development builds.
+
+- 1.0.2
+
+ - The new methods `IndexMap::insert_full` and `IndexSet::insert_full` are
+ both like `insert` with the index included in the return value.
+
+ - The new method `Entry::and_modify` can be used to modify occupied
+ entries, matching the new methods of `std` maps in Rust 1.26.
+
+ - The new method `Entry::or_default` inserts a default value in unoccupied
+ entries, matching the new methods of `std` maps in Rust 1.28.
+
+- 1.0.1
+
+ - Document Rust version policy for the crate (see rustdoc)
+
+- 1.0.0
+
+ - This is the 1.0 release for `indexmap`! (the crate and datastructure
+ formerly known as “ordermap”)
+ - `OccupiedEntry::insert` changed its signature, to use `&mut self` for
+ the method receiver, matching the equivalent method for a standard
+ `HashMap`. Thanks to @dtolnay for finding this bug.
+ - The deprecated old names from ordermap were removed: `OrderMap`,
+ `OrderSet`, `ordermap!{}`, `orderset!{}`. Use the new `IndexMap`
+ etc names instead.
+
+- 0.4.1
+
+ - Renamed crate to `indexmap`; the `ordermap` crate is now deprecated
+ and the types `OrderMap/Set` now have a deprecation notice.
+
+- 0.4.0
+
+ - This is the last release series for this `ordermap` under that name,
+ because the crate is **going to be renamed** to `indexmap` (with types
+ `IndexMap`, `IndexSet`) and no change in functionality!
+ - The map and its associated structs moved into the `map` submodule of the
+ crate, so that the map and set are symmetric
+
+ + The iterators, `Entry` and other structs are now under `ordermap::map::`
+
+ - Internally refactored `OrderMap<K, V, S>` so that all the main algorithms
+ (insertion, lookup, removal etc) that don't use the `S` parameter (the
+ hasher) are compiled without depending on `S`, which reduces generics bloat.
+
+ - `Entry<K, V>` no longer has a type parameter `S`, which is just like
+ the standard `HashMap`'s entry.
+
+ - Minimum Rust version requirement increased to Rust 1.18
+
+- 0.3.5
+
+ - Documentation improvements
+
+- 0.3.4
+
+ - The `.retain()` methods for `OrderMap` and `OrderSet` now
+ traverse the elements in order, and the retained elements **keep their order**
+ - Added new methods `.sort_by()`, `.sort_keys()` to `OrderMap` and
+ `.sort_by()`, `.sort()` to `OrderSet`. These methods allow you to
+ sort the maps in place efficiently.
+
+- 0.3.3
+
+ - Document insertion behaviour better by @lucab
+ - Updated dependences (no feature changes) by @ignatenkobrain
+
+- 0.3.2
+
+ - Add `OrderSet` by @cuviper!
+ - `OrderMap::drain` is now (too) a double ended iterator.
+
+- 0.3.1
+
+ - In all ordermap iterators, forward the `collect` method to the underlying
+ iterator as well.
+ - Add crates.io categories.
+
+- 0.3.0
+
+ - The methods `get_pair`, `get_pair_index` were both replaced by
+ `get_full` (and the same for the mutable case).
+ - Method `swap_remove_pair` replaced by `swap_remove_full`.
+ - Add trait `MutableKeys` for opt-in mutable key access. Mutable key access
+ is only possible through the methods of this extension trait.
+ - Add new trait `Equivalent` for key equivalence. This extends the
+ `Borrow` trait mechanism for `OrderMap::get` in a backwards compatible
+ way, just some minor type inference related issues may become apparent.
+ See [#10] for more information.
+ - Implement `Extend<(&K, &V)>` by @xfix.
+
+[#10]: https://github.com/bluss/ordermap/pull/10
+
+- 0.2.13
+
+ - Fix deserialization to support custom hashers by @Techcable.
+ - Add methods `.index()` on the entry types by @garro95.
+
+- 0.2.12
+
+ - Add methods `.with_hasher()`, `.hasher()`.
+
+- 0.2.11
+
+ - Support `ExactSizeIterator` for the iterators. By @Binero.
+ - Use `Box<[Pos]>` internally, saving a word in the `OrderMap` struct.
+ - Serde support, with crate feature `"serde-1"`. By @xfix.
+
+- 0.2.10
+
+ - Add iterator `.drain(..)` by @stevej.
+
+- 0.2.9
+
+ - Add method `.is_empty()` by @overvenus.
+ - Implement `PartialEq, Eq` by @overvenus.
+ - Add method `.sorted_by()`.
+
+- 0.2.8
+
+ - Add iterators `.values()` and `.values_mut()`.
+ - Fix compatibility with 32-bit platforms.
+
+- 0.2.7
+
+ - Add `.retain()`.
+
+- 0.2.6
+
+ - Add `OccupiedEntry::remove_entry` and other minor entry methods,
+ so that it now has all the features of `HashMap`'s entries.
+
+- 0.2.5
+
+ - Improved `.pop()` slightly.
+
+- 0.2.4
+
+ - Improved performance of `.insert()` ([#3]) by @pczarn.
+
+[#3]: https://github.com/bluss/ordermap/pull/3
+
+- 0.2.3
+
+ - Generalize `Entry` for now, so that it works on hashmaps with non-default
+ hasher. However, there's a lingering compat issue since libstd `HashMap`
+ does not parameterize its entries by the hasher (`S` typarm).
+ - Special case some iterator methods like `.nth()`.
+
+- 0.2.2
+
+ - Disable the verbose `Debug` impl by default.
+
+- 0.2.1
+
+ - Fix doc links and clarify docs.
+
+- 0.2.0
+
+ - Add more `HashMap` methods & compat with its API.
+ - Experimental support for `.entry()` (the simplest parts of the API).
+ - Add `.reserve()` (placeholder impl).
+ - Add `.remove()` as synonym for `.swap_remove()`.
+ - Changed `.insert()` to swap value if the entry already exists, and
+ return `Option`.
+ - Experimental support as an *indexed* hash map! Added methods
+ `.get_index()`, `.get_index_mut()`, `.swap_remove_index()`,
+ `.get_pair_index()`, `.get_pair_index_mut()`.
+
+- 0.1.2
+
+ - Implement the 32/32 split idea for `Pos` which improves cache utilization
+ and lookup performance.
+
+- 0.1.1
+
+ - Initial release.
diff --git a/vendor/indexmap-1.9.3/benches/bench.rs b/vendor/indexmap-1.9.3/benches/bench.rs
new file mode 100644
index 0000000..a4e8e21
--- /dev/null
+++ b/vendor/indexmap-1.9.3/benches/bench.rs
@@ -0,0 +1,763 @@
+#![feature(test)]
+
+extern crate test;
+#[macro_use]
+extern crate lazy_static;
+
+use fnv::FnvHasher;
+use std::hash::BuildHasherDefault;
+use std::hash::Hash;
+type FnvBuilder = BuildHasherDefault<FnvHasher>;
+
+use test::black_box;
+use test::Bencher;
+
+use indexmap::IndexMap;
+
+use std::collections::HashMap;
+
+use rand::rngs::SmallRng;
+use rand::seq::SliceRandom;
+use rand::SeedableRng;
+
+/// Use a consistently seeded Rng for benchmark stability
+fn small_rng() -> SmallRng {
+ let seed = u64::from_le_bytes(*b"indexmap");
+ SmallRng::seed_from_u64(seed)
+}
+
+#[bench]
+fn new_hashmap(b: &mut Bencher) {
+ b.iter(|| HashMap::<String, String>::new());
+}
+
+#[bench]
+fn new_indexmap(b: &mut Bencher) {
+ b.iter(|| IndexMap::<String, String>::new());
+}
+
+#[bench]
+fn with_capacity_10e5_hashmap(b: &mut Bencher) {
+ b.iter(|| HashMap::<String, String>::with_capacity(10_000));
+}
+
+#[bench]
+fn with_capacity_10e5_indexmap(b: &mut Bencher) {
+ b.iter(|| IndexMap::<String, String>::with_capacity(10_000));
+}
+
+#[bench]
+fn insert_hashmap_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x, ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x, ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_hashmap_string_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x.to_string(), ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_string_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x.to_string(), ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_hashmap_str_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let ss = Vec::from_iter((0..c).map(|x| x.to_string()));
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for key in &ss {
+ map.insert(&key[..], ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_str_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let ss = Vec::from_iter((0..c).map(|x| x.to_string()));
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for key in &ss {
+ map.insert(&key[..], ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_hashmap_int_bigvalue_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let value = [0u64; 10];
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for i in 0..c {
+ map.insert(i, value);
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_int_bigvalue_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let value = [0u64; 10];
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for i in 0..c {
+ map.insert(i, value);
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_hashmap_100_000(b: &mut Bencher) {
+ let c = 100_000;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x, ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_100_000(b: &mut Bencher) {
+ let c = 100_000;
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x, ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_hashmap_150(b: &mut Bencher) {
+ let c = 150;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x, ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_150(b: &mut Bencher) {
+ let c = 150;
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x, ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn entry_hashmap_150(b: &mut Bencher) {
+ let c = 150;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.entry(x).or_insert(());
+ }
+ map
+ });
+}
+
+#[bench]
+fn entry_indexmap_150(b: &mut Bencher) {
+ let c = 150;
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for x in 0..c {
+ map.entry(x).or_insert(());
+ }
+ map
+ });
+}
+
+#[bench]
+fn iter_sum_hashmap_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = HashMap::with_capacity(c);
+ let len = c - c / 10;
+ for x in 0..len {
+ map.insert(x, ());
+ }
+ assert_eq!(map.len(), len);
+ b.iter(|| map.keys().sum::<usize>());
+}
+
+#[bench]
+fn iter_sum_indexmap_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = IndexMap::with_capacity(c);
+ let len = c - c / 10;
+ for x in 0..len {
+ map.insert(x, ());
+ }
+ assert_eq!(map.len(), len);
+ b.iter(|| map.keys().sum::<usize>());
+}
+
+#[bench]
+fn iter_black_box_hashmap_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = HashMap::with_capacity(c);
+ let len = c - c / 10;
+ for x in 0..len {
+ map.insert(x, ());
+ }
+ assert_eq!(map.len(), len);
+ b.iter(|| {
+ for &key in map.keys() {
+ black_box(key);
+ }
+ });
+}
+
+#[bench]
+fn iter_black_box_indexmap_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = IndexMap::with_capacity(c);
+ let len = c - c / 10;
+ for x in 0..len {
+ map.insert(x, ());
+ }
+ assert_eq!(map.len(), len);
+ b.iter(|| {
+ for &key in map.keys() {
+ black_box(key);
+ }
+ });
+}
+
+fn shuffled_keys<I>(iter: I) -> Vec<I::Item>
+where
+ I: IntoIterator,
+{
+ let mut v = Vec::from_iter(iter);
+ let mut rng = small_rng();
+ v.shuffle(&mut rng);
+ v
+}
+
+#[bench]
+fn lookup_hashmap_10_000_exist(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = HashMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(key, 1);
+ }
+ b.iter(|| {
+ let mut found = 0;
+ for key in 5000..c {
+ found += map.get(&key).is_some() as i32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_hashmap_10_000_noexist(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = HashMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(key, 1);
+ }
+ b.iter(|| {
+ let mut found = 0;
+ for key in c..15000 {
+ found += map.get(&key).is_some() as i32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_indexmap_10_000_exist(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = IndexMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(key, 1);
+ }
+ b.iter(|| {
+ let mut found = 0;
+ for key in 5000..c {
+ found += map.get(&key).is_some() as i32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_indexmap_10_000_noexist(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = IndexMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(key, 1);
+ }
+ b.iter(|| {
+ let mut found = 0;
+ for key in c..15000 {
+ found += map.get(&key).is_some() as i32;
+ }
+ found
+ });
+}
+
+// number of items to look up
+const LOOKUP_MAP_SIZE: u32 = 100_000_u32;
+const LOOKUP_SAMPLE_SIZE: u32 = 5000;
+const SORT_MAP_SIZE: usize = 10_000;
+
+// use lazy_static so that comparison benchmarks use the exact same inputs
+lazy_static! {
+ static ref KEYS: Vec<u32> = shuffled_keys(0..LOOKUP_MAP_SIZE);
+}
+
+lazy_static! {
+ static ref HMAP_100K: HashMap<u32, u32> = {
+ let c = LOOKUP_MAP_SIZE;
+ let mut map = HashMap::with_capacity(c as usize);
+ let keys = &*KEYS;
+ for &key in keys {
+ map.insert(key, key);
+ }
+ map
+ };
+}
+
+lazy_static! {
+ static ref IMAP_100K: IndexMap<u32, u32> = {
+ let c = LOOKUP_MAP_SIZE;
+ let mut map = IndexMap::with_capacity(c as usize);
+ let keys = &*KEYS;
+ for &key in keys {
+ map.insert(key, key);
+ }
+ map
+ };
+}
+
+lazy_static! {
+ static ref IMAP_SORT_U32: IndexMap<u32, u32> = {
+ let mut map = IndexMap::with_capacity(SORT_MAP_SIZE);
+ for &key in &KEYS[..SORT_MAP_SIZE] {
+ map.insert(key, key);
+ }
+ map
+ };
+}
+lazy_static! {
+ static ref IMAP_SORT_S: IndexMap<String, String> = {
+ let mut map = IndexMap::with_capacity(SORT_MAP_SIZE);
+ for &key in &KEYS[..SORT_MAP_SIZE] {
+ map.insert(format!("{:^16x}", &key), String::new());
+ }
+ map
+ };
+}
+
+#[bench]
+fn lookup_hashmap_100_000_multi(b: &mut Bencher) {
+ let map = &*HMAP_100K;
+ b.iter(|| {
+ let mut found = 0;
+ for key in 0..LOOKUP_SAMPLE_SIZE {
+ found += map.get(&key).is_some() as u32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_indexmap_100_000_multi(b: &mut Bencher) {
+ let map = &*IMAP_100K;
+ b.iter(|| {
+ let mut found = 0;
+ for key in 0..LOOKUP_SAMPLE_SIZE {
+ found += map.get(&key).is_some() as u32;
+ }
+ found
+ });
+}
+
+// inorder: Test looking up keys in the same order as they were inserted
+#[bench]
+fn lookup_hashmap_100_000_inorder_multi(b: &mut Bencher) {
+ let map = &*HMAP_100K;
+ let keys = &*KEYS;
+ b.iter(|| {
+ let mut found = 0;
+ for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] {
+ found += map.get(key).is_some() as u32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_indexmap_100_000_inorder_multi(b: &mut Bencher) {
+ let map = &*IMAP_100K;
+ let keys = &*KEYS;
+ b.iter(|| {
+ let mut found = 0;
+ for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] {
+ found += map.get(key).is_some() as u32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_hashmap_100_000_single(b: &mut Bencher) {
+ let map = &*HMAP_100K;
+ let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle();
+ b.iter(|| {
+ let key = iter.next().unwrap();
+ map.get(&key).is_some()
+ });
+}
+
+#[bench]
+fn lookup_indexmap_100_000_single(b: &mut Bencher) {
+ let map = &*IMAP_100K;
+ let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle();
+ b.iter(|| {
+ let key = iter.next().unwrap();
+ map.get(&key).is_some()
+ });
+}
+
+const GROW_SIZE: usize = 100_000;
+type GrowKey = u32;
+
+// Test grow/resize without preallocation
+#[bench]
+fn grow_fnv_hashmap_100_000(b: &mut Bencher) {
+ b.iter(|| {
+ let mut map: HashMap<_, _, FnvBuilder> = HashMap::default();
+ for x in 0..GROW_SIZE {
+ map.insert(x as GrowKey, x as GrowKey);
+ }
+ map
+ });
+}
+
+#[bench]
+fn grow_fnv_indexmap_100_000(b: &mut Bencher) {
+ b.iter(|| {
+ let mut map: IndexMap<_, _, FnvBuilder> = IndexMap::default();
+ for x in 0..GROW_SIZE {
+ map.insert(x as GrowKey, x as GrowKey);
+ }
+ map
+ });
+}
+
+const MERGE: u64 = 10_000;
+#[bench]
+fn hashmap_merge_simple(b: &mut Bencher) {
+ let first_map: HashMap<u64, _> = (0..MERGE).map(|i| (i, ())).collect();
+ let second_map: HashMap<u64, _> = (MERGE..MERGE * 2).map(|i| (i, ())).collect();
+ b.iter(|| {
+ let mut merged = first_map.clone();
+ merged.extend(second_map.iter().map(|(&k, &v)| (k, v)));
+ merged
+ });
+}
+
+#[bench]
+fn hashmap_merge_shuffle(b: &mut Bencher) {
+ let first_map: HashMap<u64, _> = (0..MERGE).map(|i| (i, ())).collect();
+ let second_map: HashMap<u64, _> = (MERGE..MERGE * 2).map(|i| (i, ())).collect();
+ let mut v = Vec::new();
+ let mut rng = small_rng();
+ b.iter(|| {
+ let mut merged = first_map.clone();
+ v.extend(second_map.iter().map(|(&k, &v)| (k, v)));
+ v.shuffle(&mut rng);
+ merged.extend(v.drain(..));
+
+ merged
+ });
+}
+
+#[bench]
+fn indexmap_merge_simple(b: &mut Bencher) {
+ let first_map: IndexMap<u64, _> = (0..MERGE).map(|i| (i, ())).collect();
+ let second_map: IndexMap<u64, _> = (MERGE..MERGE * 2).map(|i| (i, ())).collect();
+ b.iter(|| {
+ let mut merged = first_map.clone();
+ merged.extend(second_map.iter().map(|(&k, &v)| (k, v)));
+ merged
+ });
+}
+
+#[bench]
+fn indexmap_merge_shuffle(b: &mut Bencher) {
+ let first_map: IndexMap<u64, _> = (0..MERGE).map(|i| (i, ())).collect();
+ let second_map: IndexMap<u64, _> = (MERGE..MERGE * 2).map(|i| (i, ())).collect();
+ let mut v = Vec::new();
+ let mut rng = small_rng();
+ b.iter(|| {
+ let mut merged = first_map.clone();
+ v.extend(second_map.iter().map(|(&k, &v)| (k, v)));
+ v.shuffle(&mut rng);
+ merged.extend(v.drain(..));
+
+ merged
+ });
+}
+
+#[bench]
+fn swap_remove_indexmap_100_000(b: &mut Bencher) {
+ let map = IMAP_100K.clone();
+ let mut keys = Vec::from_iter(map.keys().copied());
+ let mut rng = small_rng();
+ keys.shuffle(&mut rng);
+
+ b.iter(|| {
+ let mut map = map.clone();
+ for key in &keys {
+ map.swap_remove(key);
+ }
+ assert_eq!(map.len(), 0);
+ map
+ });
+}
+
+#[bench]
+fn shift_remove_indexmap_100_000_few(b: &mut Bencher) {
+ let map = IMAP_100K.clone();
+ let mut keys = Vec::from_iter(map.keys().copied());
+ let mut rng = small_rng();
+ keys.shuffle(&mut rng);
+ keys.truncate(50);
+
+ b.iter(|| {
+ let mut map = map.clone();
+ for key in &keys {
+ map.shift_remove(key);
+ }
+ assert_eq!(map.len(), IMAP_100K.len() - keys.len());
+ map
+ });
+}
+
+#[bench]
+fn shift_remove_indexmap_2_000_full(b: &mut Bencher) {
+ let mut keys = KEYS[..2_000].to_vec();
+ let mut map = IndexMap::with_capacity(keys.len());
+ for &key in &keys {
+ map.insert(key, key);
+ }
+ let mut rng = small_rng();
+ keys.shuffle(&mut rng);
+
+ b.iter(|| {
+ let mut map = map.clone();
+ for key in &keys {
+ map.shift_remove(key);
+ }
+ assert_eq!(map.len(), 0);
+ map
+ });
+}
+
+#[bench]
+fn pop_indexmap_100_000(b: &mut Bencher) {
+ let map = IMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ while !map.is_empty() {
+ map.pop();
+ }
+ assert_eq!(map.len(), 0);
+ map
+ });
+}
+
+#[bench]
+fn few_retain_indexmap_100_000(b: &mut Bencher) {
+ let map = IMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ map.retain(|k, _| *k % 7 == 0);
+ map
+ });
+}
+
+#[bench]
+fn few_retain_hashmap_100_000(b: &mut Bencher) {
+ let map = HMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ map.retain(|k, _| *k % 7 == 0);
+ map
+ });
+}
+
+#[bench]
+fn half_retain_indexmap_100_000(b: &mut Bencher) {
+ let map = IMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ map.retain(|k, _| *k % 2 == 0);
+ map
+ });
+}
+
+#[bench]
+fn half_retain_hashmap_100_000(b: &mut Bencher) {
+ let map = HMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ map.retain(|k, _| *k % 2 == 0);
+ map
+ });
+}
+
+#[bench]
+fn many_retain_indexmap_100_000(b: &mut Bencher) {
+ let map = IMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ map.retain(|k, _| *k % 100 != 0);
+ map
+ });
+}
+
+#[bench]
+fn many_retain_hashmap_100_000(b: &mut Bencher) {
+ let map = HMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ map.retain(|k, _| *k % 100 != 0);
+ map
+ });
+}
+
+// simple sort impl for comparison
+pub fn simple_sort<K: Ord + Hash, V>(m: &mut IndexMap<K, V>) {
+ let mut ordered: Vec<_> = m.drain(..).collect();
+ ordered.sort_by(|left, right| left.0.cmp(&right.0));
+ m.extend(ordered);
+}
+
+#[bench]
+fn indexmap_sort_s(b: &mut Bencher) {
+ let map = IMAP_SORT_S.clone();
+
+ // there's a map clone there, but it's still useful to profile this
+ b.iter(|| {
+ let mut map = map.clone();
+ map.sort_keys();
+ map
+ });
+}
+
+#[bench]
+fn indexmap_simple_sort_s(b: &mut Bencher) {
+ let map = IMAP_SORT_S.clone();
+
+ // there's a map clone there, but it's still useful to profile this
+ b.iter(|| {
+ let mut map = map.clone();
+ simple_sort(&mut map);
+ map
+ });
+}
+
+#[bench]
+fn indexmap_sort_u32(b: &mut Bencher) {
+ let map = IMAP_SORT_U32.clone();
+
+ // there's a map clone there, but it's still useful to profile this
+ b.iter(|| {
+ let mut map = map.clone();
+ map.sort_keys();
+ map
+ });
+}
+
+#[bench]
+fn indexmap_simple_sort_u32(b: &mut Bencher) {
+ let map = IMAP_SORT_U32.clone();
+
+ // there's a map clone there, but it's still useful to profile this
+ b.iter(|| {
+ let mut map = map.clone();
+ simple_sort(&mut map);
+ map
+ });
+}
+
+// measure the fixed overhead of cloning in sort benchmarks
+#[bench]
+fn indexmap_clone_for_sort_s(b: &mut Bencher) {
+ let map = IMAP_SORT_S.clone();
+
+ b.iter(|| map.clone());
+}
+
+#[bench]
+fn indexmap_clone_for_sort_u32(b: &mut Bencher) {
+ let map = IMAP_SORT_U32.clone();
+
+ b.iter(|| map.clone());
+}
diff --git a/vendor/indexmap-1.9.3/benches/faststring.rs b/vendor/indexmap-1.9.3/benches/faststring.rs
new file mode 100644
index 0000000..ecc28b4
--- /dev/null
+++ b/vendor/indexmap-1.9.3/benches/faststring.rs
@@ -0,0 +1,185 @@
+#![feature(test)]
+
+extern crate test;
+
+use test::Bencher;
+
+use indexmap::IndexMap;
+
+use std::collections::HashMap;
+
+use rand::rngs::SmallRng;
+use rand::seq::SliceRandom;
+use rand::SeedableRng;
+
+use std::hash::{Hash, Hasher};
+
+use std::borrow::Borrow;
+use std::ops::Deref;
+
+/// Use a consistently seeded Rng for benchmark stability
+fn small_rng() -> SmallRng {
+ let seed = u64::from_le_bytes(*b"indexmap");
+ SmallRng::seed_from_u64(seed)
+}
+
+#[derive(PartialEq, Eq, Copy, Clone)]
+#[repr(transparent)]
+pub struct OneShot<T: ?Sized>(pub T);
+
+impl Hash for OneShot<str> {
+ fn hash<H: Hasher>(&self, h: &mut H) {
+ h.write(self.0.as_bytes())
+ }
+}
+
+impl<'a, S> From<&'a S> for &'a OneShot<str>
+where
+ S: AsRef<str>,
+{
+ fn from(s: &'a S) -> Self {
+ let s: &str = s.as_ref();
+ unsafe { &*(s as *const str as *const OneShot<str>) }
+ }
+}
+
+impl Hash for OneShot<String> {
+ fn hash<H: Hasher>(&self, h: &mut H) {
+ h.write(self.0.as_bytes())
+ }
+}
+
+impl Borrow<OneShot<str>> for OneShot<String> {
+ fn borrow(&self) -> &OneShot<str> {
+ <&OneShot<str>>::from(&self.0)
+ }
+}
+
+impl<T> Deref for OneShot<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+fn shuffled_keys<I>(iter: I) -> Vec<I::Item>
+where
+ I: IntoIterator,
+{
+ let mut v = Vec::from_iter(iter);
+ let mut rng = small_rng();
+ v.shuffle(&mut rng);
+ v
+}
+
+#[bench]
+fn insert_hashmap_string_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x.to_string(), ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_hashmap_string_oneshot_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(OneShot(x.to_string()), ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_string_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x.to_string(), ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn lookup_hashmap_10_000_exist_string(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = HashMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(key.to_string(), 1);
+ }
+ let lookups = (5000..c).map(|x| x.to_string()).collect::<Vec<_>>();
+ b.iter(|| {
+ let mut found = 0;
+ for key in &lookups {
+ found += map.get(key).is_some() as i32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_hashmap_10_000_exist_string_oneshot(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = HashMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(OneShot(key.to_string()), 1);
+ }
+ let lookups = (5000..c)
+ .map(|x| OneShot(x.to_string()))
+ .collect::<Vec<_>>();
+ b.iter(|| {
+ let mut found = 0;
+ for key in &lookups {
+ found += map.get(key).is_some() as i32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_indexmap_10_000_exist_string(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = IndexMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(key.to_string(), 1);
+ }
+ let lookups = (5000..c).map(|x| x.to_string()).collect::<Vec<_>>();
+ b.iter(|| {
+ let mut found = 0;
+ for key in &lookups {
+ found += map.get(key).is_some() as i32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_indexmap_10_000_exist_string_oneshot(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = IndexMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(OneShot(key.to_string()), 1);
+ }
+ let lookups = (5000..c)
+ .map(|x| OneShot(x.to_string()))
+ .collect::<Vec<_>>();
+ b.iter(|| {
+ let mut found = 0;
+ for key in &lookups {
+ found += map.get(key).is_some() as i32;
+ }
+ found
+ });
+}
diff --git a/vendor/indexmap-1.9.3/build.rs b/vendor/indexmap-1.9.3/build.rs
new file mode 100644
index 0000000..9f9fa05
--- /dev/null
+++ b/vendor/indexmap-1.9.3/build.rs
@@ -0,0 +1,8 @@
+fn main() {
+ // If "std" is explicitly requested, don't bother probing the target for it.
+ match std::env::var_os("CARGO_FEATURE_STD") {
+ Some(_) => autocfg::emit("has_std"),
+ None => autocfg::new().emit_sysroot_crate("std"),
+ }
+ autocfg::rerun_path("build.rs");
+}
diff --git a/vendor/indexmap-1.9.3/src/arbitrary.rs b/vendor/indexmap-1.9.3/src/arbitrary.rs
new file mode 100644
index 0000000..1347c8b
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/arbitrary.rs
@@ -0,0 +1,75 @@
+#[cfg(feature = "arbitrary")]
+mod impl_arbitrary {
+ use crate::{IndexMap, IndexSet};
+ use arbitrary::{Arbitrary, Result, Unstructured};
+ use core::hash::{BuildHasher, Hash};
+
+ impl<'a, K, V, S> Arbitrary<'a> for IndexMap<K, V, S>
+ where
+ K: Arbitrary<'a> + Hash + Eq,
+ V: Arbitrary<'a>,
+ S: BuildHasher + Default,
+ {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ u.arbitrary_iter()?.collect()
+ }
+
+ fn arbitrary_take_rest(u: Unstructured<'a>) -> Result<Self> {
+ u.arbitrary_take_rest_iter()?.collect()
+ }
+ }
+
+ impl<'a, T, S> Arbitrary<'a> for IndexSet<T, S>
+ where
+ T: Arbitrary<'a> + Hash + Eq,
+ S: BuildHasher + Default,
+ {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ u.arbitrary_iter()?.collect()
+ }
+
+ fn arbitrary_take_rest(u: Unstructured<'a>) -> Result<Self> {
+ u.arbitrary_take_rest_iter()?.collect()
+ }
+ }
+}
+
+#[cfg(feature = "quickcheck")]
+mod impl_quickcheck {
+ use crate::{IndexMap, IndexSet};
+ use alloc::boxed::Box;
+ use alloc::vec::Vec;
+ use core::hash::{BuildHasher, Hash};
+ use quickcheck::{Arbitrary, Gen};
+
+ impl<K, V, S> Arbitrary for IndexMap<K, V, S>
+ where
+ K: Arbitrary + Hash + Eq,
+ V: Arbitrary,
+ S: BuildHasher + Default + Clone + 'static,
+ {
+ fn arbitrary(g: &mut Gen) -> Self {
+ Self::from_iter(Vec::arbitrary(g))
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ let vec = Vec::from_iter(self.clone());
+ Box::new(vec.shrink().map(Self::from_iter))
+ }
+ }
+
+ impl<T, S> Arbitrary for IndexSet<T, S>
+ where
+ T: Arbitrary + Hash + Eq,
+ S: BuildHasher + Default + Clone + 'static,
+ {
+ fn arbitrary(g: &mut Gen) -> Self {
+ Self::from_iter(Vec::arbitrary(g))
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ let vec = Vec::from_iter(self.clone());
+ Box::new(vec.shrink().map(Self::from_iter))
+ }
+ }
+}
diff --git a/vendor/indexmap-1.9.3/src/equivalent.rs b/vendor/indexmap-1.9.3/src/equivalent.rs
new file mode 100644
index 0000000..ad6635f
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/equivalent.rs
@@ -0,0 +1,27 @@
+use core::borrow::Borrow;
+
+/// Key equivalence trait.
+///
+/// This trait allows hash table lookup to be customized.
+/// It has one blanket implementation that uses the regular `Borrow` solution,
+/// just like `HashMap` and `BTreeMap` do, so that you can pass `&str` to lookup
+/// into a map with `String` keys and so on.
+///
+/// # Contract
+///
+/// The implementor **must** hash like `K`, if it is hashable.
+pub trait Equivalent<K: ?Sized> {
+ /// Compare self to `key` and return `true` if they are equal.
+ fn equivalent(&self, key: &K) -> bool;
+}
+
+impl<Q: ?Sized, K: ?Sized> Equivalent<K> for Q
+where
+ Q: Eq,
+ K: Borrow<Q>,
+{
+ #[inline]
+ fn equivalent(&self, key: &K) -> bool {
+ *self == *key.borrow()
+ }
+}
diff --git a/vendor/indexmap-1.9.3/src/lib.rs b/vendor/indexmap-1.9.3/src/lib.rs
new file mode 100644
index 0000000..6e94936
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/lib.rs
@@ -0,0 +1,194 @@
+// We *mostly* avoid unsafe code, but `map::core::raw` allows it to use `RawTable` buckets.
+#![deny(unsafe_code)]
+#![warn(rust_2018_idioms)]
+#![doc(html_root_url = "https://docs.rs/indexmap/1/")]
+#![no_std]
+
+//! [`IndexMap`] is a hash table where the iteration order of the key-value
+//! pairs is independent of the hash values of the keys.
+//!
+//! [`IndexSet`] is a corresponding hash set using the same implementation and
+//! with similar properties.
+//!
+//! [`IndexMap`]: map/struct.IndexMap.html
+//! [`IndexSet`]: set/struct.IndexSet.html
+//!
+//!
+//! ### Feature Highlights
+//!
+//! [`IndexMap`] and [`IndexSet`] are drop-in compatible with the std `HashMap`
+//! and `HashSet`, but they also have some features of note:
+//!
+//! - The ordering semantics (see their documentation for details)
+//! - Sorting methods and the [`.pop()`][IndexMap::pop] methods.
+//! - The [`Equivalent`] trait, which offers more flexible equality definitions
+//! between borrowed and owned versions of keys.
+//! - The [`MutableKeys`][map::MutableKeys] trait, which gives opt-in mutable
+//! access to hash map keys.
+//!
+//! ### Alternate Hashers
+//!
+//! [`IndexMap`] and [`IndexSet`] have a default hasher type `S = RandomState`,
+//! just like the standard `HashMap` and `HashSet`, which is resistant to
+//! HashDoS attacks but not the most performant. Type aliases can make it easier
+//! to use alternate hashers:
+//!
+//! ```
+//! use fnv::FnvBuildHasher;
+//! use fxhash::FxBuildHasher;
+//! use indexmap::{IndexMap, IndexSet};
+//!
+//! type FnvIndexMap<K, V> = IndexMap<K, V, FnvBuildHasher>;
+//! type FnvIndexSet<T> = IndexSet<T, FnvBuildHasher>;
+//!
+//! type FxIndexMap<K, V> = IndexMap<K, V, FxBuildHasher>;
+//! type FxIndexSet<T> = IndexSet<T, FxBuildHasher>;
+//!
+//! let std: IndexSet<i32> = (0..100).collect();
+//! let fnv: FnvIndexSet<i32> = (0..100).collect();
+//! let fx: FxIndexSet<i32> = (0..100).collect();
+//! assert_eq!(std, fnv);
+//! assert_eq!(std, fx);
+//! ```
+//!
+//! ### Rust Version
+//!
+//! This version of indexmap requires Rust 1.56 or later.
+//!
+//! The indexmap 1.x release series will use a carefully considered version
+//! upgrade policy, where in a later 1.x version, we will raise the minimum
+//! required Rust version.
+//!
+//! ## No Standard Library Targets
+//!
+//! This crate supports being built without `std`, requiring
+//! `alloc` instead. This is enabled automatically when it is detected that
+//! `std` is not available. There is no crate feature to enable/disable to
+//! trigger this. It can be tested by building for a std-less target.
+//!
+//! - Creating maps and sets using [`new`][IndexMap::new] and
+//! [`with_capacity`][IndexMap::with_capacity] is unavailable without `std`.
+//! Use methods [`IndexMap::default`][def],
+//! [`with_hasher`][IndexMap::with_hasher],
+//! [`with_capacity_and_hasher`][IndexMap::with_capacity_and_hasher] instead.
+//! A no-std compatible hasher will be needed as well, for example
+//! from the crate `twox-hash`.
+//! - Macros [`indexmap!`] and [`indexset!`] are unavailable without `std`.
+//!
+//! [def]: map/struct.IndexMap.html#impl-Default
+
+extern crate alloc;
+
+#[cfg(has_std)]
+#[macro_use]
+extern crate std;
+
+use alloc::vec::{self, Vec};
+
+mod arbitrary;
+#[macro_use]
+mod macros;
+mod equivalent;
+mod mutable_keys;
+#[cfg(feature = "serde")]
+mod serde;
+#[cfg(feature = "serde")]
+pub mod serde_seq;
+mod util;
+
+pub mod map;
+pub mod set;
+
+// Placed after `map` and `set` so new `rayon` methods on the types
+// are documented after the "normal" methods.
+#[cfg(feature = "rayon")]
+mod rayon;
+
+#[cfg(feature = "rustc-rayon")]
+mod rustc;
+
+pub use crate::equivalent::Equivalent;
+pub use crate::map::IndexMap;
+pub use crate::set::IndexSet;
+
+// shared private items
+
+/// Hash value newtype. Not larger than usize, since anything larger
+/// isn't used for selecting position anyway.
+#[derive(Clone, Copy, Debug, PartialEq)]
+struct HashValue(usize);
+
+impl HashValue {
+ #[inline(always)]
+ fn get(self) -> u64 {
+ self.0 as u64
+ }
+}
+
+#[derive(Copy, Debug)]
+struct Bucket<K, V> {
+ hash: HashValue,
+ key: K,
+ value: V,
+}
+
+impl<K, V> Clone for Bucket<K, V>
+where
+ K: Clone,
+ V: Clone,
+{
+ fn clone(&self) -> Self {
+ Bucket {
+ hash: self.hash,
+ key: self.key.clone(),
+ value: self.value.clone(),
+ }
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ self.hash = other.hash;
+ self.key.clone_from(&other.key);
+ self.value.clone_from(&other.value);
+ }
+}
+
+impl<K, V> Bucket<K, V> {
+ // field accessors -- used for `f` instead of closures in `.map(f)`
+ fn key_ref(&self) -> &K {
+ &self.key
+ }
+ fn value_ref(&self) -> &V {
+ &self.value
+ }
+ fn value_mut(&mut self) -> &mut V {
+ &mut self.value
+ }
+ fn key(self) -> K {
+ self.key
+ }
+ fn value(self) -> V {
+ self.value
+ }
+ fn key_value(self) -> (K, V) {
+ (self.key, self.value)
+ }
+ fn refs(&self) -> (&K, &V) {
+ (&self.key, &self.value)
+ }
+ fn ref_mut(&mut self) -> (&K, &mut V) {
+ (&self.key, &mut self.value)
+ }
+ fn muts(&mut self) -> (&mut K, &mut V) {
+ (&mut self.key, &mut self.value)
+ }
+}
+
+trait Entries {
+ type Entry;
+ fn into_entries(self) -> Vec<Self::Entry>;
+ fn as_entries(&self) -> &[Self::Entry];
+ fn as_entries_mut(&mut self) -> &mut [Self::Entry];
+ fn with_entries<F>(&mut self, f: F)
+ where
+ F: FnOnce(&mut [Self::Entry]);
+}
diff --git a/vendor/indexmap-1.9.3/src/macros.rs b/vendor/indexmap-1.9.3/src/macros.rs
new file mode 100644
index 0000000..ca26287
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/macros.rs
@@ -0,0 +1,178 @@
+#[cfg(has_std)]
+#[macro_export]
+/// Create an `IndexMap` from a list of key-value pairs
+///
+/// ## Example
+///
+/// ```
+/// use indexmap::indexmap;
+///
+/// let map = indexmap!{
+/// "a" => 1,
+/// "b" => 2,
+/// };
+/// assert_eq!(map["a"], 1);
+/// assert_eq!(map["b"], 2);
+/// assert_eq!(map.get("c"), None);
+///
+/// // "a" is the first key
+/// assert_eq!(map.keys().next(), Some(&"a"));
+/// ```
+macro_rules! indexmap {
+ (@single $($x:tt)*) => (());
+ (@count $($rest:expr),*) => (<[()]>::len(&[$($crate::indexmap!(@single $rest)),*]));
+
+ ($($key:expr => $value:expr,)+) => { $crate::indexmap!($($key => $value),+) };
+ ($($key:expr => $value:expr),*) => {
+ {
+ let _cap = $crate::indexmap!(@count $($key),*);
+ let mut _map = $crate::IndexMap::with_capacity(_cap);
+ $(
+ _map.insert($key, $value);
+ )*
+ _map
+ }
+ };
+}
+
+#[cfg(has_std)]
+#[macro_export]
+/// Create an `IndexSet` from a list of values
+///
+/// ## Example
+///
+/// ```
+/// use indexmap::indexset;
+///
+/// let set = indexset!{
+/// "a",
+/// "b",
+/// };
+/// assert!(set.contains("a"));
+/// assert!(set.contains("b"));
+/// assert!(!set.contains("c"));
+///
+/// // "a" is the first value
+/// assert_eq!(set.iter().next(), Some(&"a"));
+/// ```
+macro_rules! indexset {
+ (@single $($x:tt)*) => (());
+ (@count $($rest:expr),*) => (<[()]>::len(&[$($crate::indexset!(@single $rest)),*]));
+
+ ($($value:expr,)+) => { $crate::indexset!($($value),+) };
+ ($($value:expr),*) => {
+ {
+ let _cap = $crate::indexset!(@count $($value),*);
+ let mut _set = $crate::IndexSet::with_capacity(_cap);
+ $(
+ _set.insert($value);
+ )*
+ _set
+ }
+ };
+}
+
+// generate all the Iterator methods by just forwarding to the underlying
+// self.iter and mapping its element.
+macro_rules! iterator_methods {
+ // $map_elt is the mapping function from the underlying iterator's element
+ // same mapping function for both options and iterators
+ ($map_elt:expr) => {
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter.next().map($map_elt)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ fn count(self) -> usize {
+ self.iter.len()
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter.nth(n).map($map_elt)
+ }
+
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+
+ fn collect<C>(self) -> C
+ where
+ C: FromIterator<Self::Item>,
+ {
+ // NB: forwarding this directly to standard iterators will
+ // allow it to leverage unstable traits like `TrustedLen`.
+ self.iter.map($map_elt).collect()
+ }
+ };
+}
+
+macro_rules! double_ended_iterator_methods {
+ // $map_elt is the mapping function from the underlying iterator's element
+ // same mapping function for both options and iterators
+ ($map_elt:expr) => {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.iter.next_back().map($map_elt)
+ }
+
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter.nth_back(n).map($map_elt)
+ }
+ };
+}
+
+// generate `ParallelIterator` methods by just forwarding to the underlying
+// self.entries and mapping its elements.
+#[cfg(any(feature = "rayon", feature = "rustc-rayon"))]
+macro_rules! parallel_iterator_methods {
+ // $map_elt is the mapping function from the underlying iterator's element
+ ($map_elt:expr) => {
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ self.entries
+ .into_par_iter()
+ .map($map_elt)
+ .drive_unindexed(consumer)
+ }
+
+ // NB: This allows indexed collection, e.g. directly into a `Vec`, but the
+ // underlying iterator must really be indexed. We should remove this if we
+ // start having tombstones that must be filtered out.
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.entries.len())
+ }
+ };
+}
+
+// generate `IndexedParallelIterator` methods by just forwarding to the underlying
+// self.entries and mapping its elements.
+#[cfg(any(feature = "rayon", feature = "rustc-rayon"))]
+macro_rules! indexed_parallel_iterator_methods {
+ // $map_elt is the mapping function from the underlying iterator's element
+ ($map_elt:expr) => {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ self.entries.into_par_iter().map($map_elt).drive(consumer)
+ }
+
+ fn len(&self) -> usize {
+ self.entries.len()
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ self.entries
+ .into_par_iter()
+ .map($map_elt)
+ .with_producer(callback)
+ }
+ };
+}
diff --git a/vendor/indexmap-1.9.3/src/map.rs b/vendor/indexmap-1.9.3/src/map.rs
new file mode 100644
index 0000000..d39448d
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/map.rs
@@ -0,0 +1,1947 @@
+//! `IndexMap` is a hash table where the iteration order of the key-value
+//! pairs is independent of the hash values of the keys.
+
+mod core;
+
+pub use crate::mutable_keys::MutableKeys;
+
+#[cfg(feature = "rayon")]
+pub use crate::rayon::map as rayon;
+
+use crate::vec::{self, Vec};
+use ::core::cmp::Ordering;
+use ::core::fmt;
+use ::core::hash::{BuildHasher, Hash, Hasher};
+use ::core::iter::FusedIterator;
+use ::core::ops::{Index, IndexMut, RangeBounds};
+use ::core::slice::{Iter as SliceIter, IterMut as SliceIterMut};
+
+#[cfg(has_std)]
+use std::collections::hash_map::RandomState;
+
+use self::core::IndexMapCore;
+use crate::equivalent::Equivalent;
+use crate::util::third;
+use crate::{Bucket, Entries, HashValue};
+
+pub use self::core::{Entry, OccupiedEntry, VacantEntry};
+
+/// A hash table where the iteration order of the key-value pairs is independent
+/// of the hash values of the keys.
+///
+/// The interface is closely compatible with the standard `HashMap`, but also
+/// has additional features.
+///
+/// # Order
+///
+/// The key-value pairs have a consistent order that is determined by
+/// the sequence of insertion and removal calls on the map. The order does
+/// not depend on the keys or the hash function at all.
+///
+/// All iterators traverse the map in *the order*.
+///
+/// The insertion order is preserved, with **notable exceptions** like the
+/// `.remove()` or `.swap_remove()` methods. Methods such as `.sort_by()` of
+/// course result in a new order, depending on the sorting order.
+///
+/// # Indices
+///
+/// The key-value pairs are indexed in a compact range without holes in the
+/// range `0..self.len()`. For example, the method `.get_full` looks up the
+/// index for a key, and the method `.get_index` looks up the key-value pair by
+/// index.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexMap;
+///
+/// // count the frequency of each letter in a sentence.
+/// let mut letters = IndexMap::new();
+/// for ch in "a short treatise on fungi".chars() {
+/// *letters.entry(ch).or_insert(0) += 1;
+/// }
+///
+/// assert_eq!(letters[&'s'], 2);
+/// assert_eq!(letters[&'t'], 3);
+/// assert_eq!(letters[&'u'], 1);
+/// assert_eq!(letters.get(&'y'), None);
+/// ```
+#[cfg(has_std)]
+pub struct IndexMap<K, V, S = RandomState> {
+ pub(crate) core: IndexMapCore<K, V>,
+ hash_builder: S,
+}
+#[cfg(not(has_std))]
+pub struct IndexMap<K, V, S> {
+ pub(crate) core: IndexMapCore<K, V>,
+ hash_builder: S,
+}
+
+impl<K, V, S> Clone for IndexMap<K, V, S>
+where
+ K: Clone,
+ V: Clone,
+ S: Clone,
+{
+ fn clone(&self) -> Self {
+ IndexMap {
+ core: self.core.clone(),
+ hash_builder: self.hash_builder.clone(),
+ }
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ self.core.clone_from(&other.core);
+ self.hash_builder.clone_from(&other.hash_builder);
+ }
+}
+
+impl<K, V, S> Entries for IndexMap<K, V, S> {
+ type Entry = Bucket<K, V>;
+
+ #[inline]
+ fn into_entries(self) -> Vec<Self::Entry> {
+ self.core.into_entries()
+ }
+
+ #[inline]
+ fn as_entries(&self) -> &[Self::Entry] {
+ self.core.as_entries()
+ }
+
+ #[inline]
+ fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
+ self.core.as_entries_mut()
+ }
+
+ fn with_entries<F>(&mut self, f: F)
+ where
+ F: FnOnce(&mut [Self::Entry]),
+ {
+ self.core.with_entries(f);
+ }
+}
+
+impl<K, V, S> fmt::Debug for IndexMap<K, V, S>
+where
+ K: fmt::Debug,
+ V: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if cfg!(not(feature = "test_debug")) {
+ f.debug_map().entries(self.iter()).finish()
+ } else {
+ // Let the inner `IndexMapCore` print all of its details
+ f.debug_struct("IndexMap")
+ .field("core", &self.core)
+ .finish()
+ }
+ }
+}
+
+#[cfg(has_std)]
+impl<K, V> IndexMap<K, V> {
+ /// Create a new map. (Does not allocate.)
+ #[inline]
+ pub fn new() -> Self {
+ Self::with_capacity(0)
+ }
+
+ /// Create a new map with capacity for `n` key-value pairs. (Does not
+ /// allocate if `n` is zero.)
+ ///
+ /// Computes in **O(n)** time.
+ #[inline]
+ pub fn with_capacity(n: usize) -> Self {
+ Self::with_capacity_and_hasher(n, <_>::default())
+ }
+}
+
+impl<K, V, S> IndexMap<K, V, S> {
+ /// Create a new map with capacity for `n` key-value pairs. (Does not
+ /// allocate if `n` is zero.)
+ ///
+ /// Computes in **O(n)** time.
+ #[inline]
+ pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self {
+ if n == 0 {
+ Self::with_hasher(hash_builder)
+ } else {
+ IndexMap {
+ core: IndexMapCore::with_capacity(n),
+ hash_builder,
+ }
+ }
+ }
+
+ /// Create a new map with `hash_builder`.
+ ///
+ /// This function is `const`, so it
+ /// can be called in `static` contexts.
+ pub const fn with_hasher(hash_builder: S) -> Self {
+ IndexMap {
+ core: IndexMapCore::new(),
+ hash_builder,
+ }
+ }
+
+ /// Computes in **O(1)** time.
+ pub fn capacity(&self) -> usize {
+ self.core.capacity()
+ }
+
+ /// Return a reference to the map's `BuildHasher`.
+ pub fn hasher(&self) -> &S {
+ &self.hash_builder
+ }
+
+ /// Return the number of key-value pairs in the map.
+ ///
+ /// Computes in **O(1)** time.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.core.len()
+ }
+
+ /// Returns true if the map contains no elements.
+ ///
+ /// Computes in **O(1)** time.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Return an iterator over the key-value pairs of the map, in their order
+ pub fn iter(&self) -> Iter<'_, K, V> {
+ Iter {
+ iter: self.as_entries().iter(),
+ }
+ }
+
+ /// Return an iterator over the key-value pairs of the map, in their order
+ pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
+ IterMut {
+ iter: self.as_entries_mut().iter_mut(),
+ }
+ }
+
+ /// Return an iterator over the keys of the map, in their order
+ pub fn keys(&self) -> Keys<'_, K, V> {
+ Keys {
+ iter: self.as_entries().iter(),
+ }
+ }
+
+ /// Return an owning iterator over the keys of the map, in their order
+ pub fn into_keys(self) -> IntoKeys<K, V> {
+ IntoKeys {
+ iter: self.into_entries().into_iter(),
+ }
+ }
+
+ /// Return an iterator over the values of the map, in their order
+ pub fn values(&self) -> Values<'_, K, V> {
+ Values {
+ iter: self.as_entries().iter(),
+ }
+ }
+
+ /// Return an iterator over mutable references to the values of the map,
+ /// in their order
+ pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> {
+ ValuesMut {
+ iter: self.as_entries_mut().iter_mut(),
+ }
+ }
+
+ /// Return an owning iterator over the values of the map, in their order
+ pub fn into_values(self) -> IntoValues<K, V> {
+ IntoValues {
+ iter: self.into_entries().into_iter(),
+ }
+ }
+
+ /// Remove all key-value pairs in the map, while preserving its capacity.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn clear(&mut self) {
+ self.core.clear();
+ }
+
+ /// Shortens the map, keeping the first `len` elements and dropping the rest.
+ ///
+ /// If `len` is greater than the map's current length, this has no effect.
+ pub fn truncate(&mut self, len: usize) {
+ self.core.truncate(len);
+ }
+
+ /// Clears the `IndexMap` in the given index range, returning those
+ /// key-value pairs as a drain iterator.
+ ///
+ /// The range may be any type that implements `RangeBounds<usize>`,
+ /// including all of the `std::ops::Range*` types, or even a tuple pair of
+ /// `Bound` start and end values. To drain the map entirely, use `RangeFull`
+ /// like `map.drain(..)`.
+ ///
+ /// This shifts down all entries following the drained range to fill the
+ /// gap, and keeps the allocated memory for reuse.
+ ///
+ /// ***Panics*** if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the map.
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_, K, V>
+ where
+ R: RangeBounds<usize>,
+ {
+ Drain {
+ iter: self.core.drain(range),
+ }
+ }
+
+ /// Splits the collection into two at the given index.
+ ///
+ /// Returns a newly allocated map containing the elements in the range
+ /// `[at, len)`. After the call, the original map will be left containing
+ /// the elements `[0, at)` with its previous capacity unchanged.
+ ///
+ /// ***Panics*** if `at > len`.
+ pub fn split_off(&mut self, at: usize) -> Self
+ where
+ S: Clone,
+ {
+ Self {
+ core: self.core.split_off(at),
+ hash_builder: self.hash_builder.clone(),
+ }
+ }
+}
+
+impl<K, V, S> IndexMap<K, V, S>
+where
+ K: Hash + Eq,
+ S: BuildHasher,
+{
+ /// Reserve capacity for `additional` more key-value pairs.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn reserve(&mut self, additional: usize) {
+ self.core.reserve(additional);
+ }
+
+ /// Shrink the capacity of the map as much as possible.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn shrink_to_fit(&mut self) {
+ self.core.shrink_to(0);
+ }
+
+ /// Shrink the capacity of the map with a lower limit.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.core.shrink_to(min_capacity);
+ }
+
+ fn hash<Q: ?Sized + Hash>(&self, key: &Q) -> HashValue {
+ let mut h = self.hash_builder.build_hasher();
+ key.hash(&mut h);
+ HashValue(h.finish() as usize)
+ }
+
+ /// Insert a key-value pair in the map.
+ ///
+ /// If an equivalent key already exists in the map: the key remains and
+ /// retains in its place in the order, its corresponding value is updated
+ /// with `value` and the older value is returned inside `Some(_)`.
+ ///
+ /// If no equivalent key existed in the map: the new key-value pair is
+ /// inserted, last in order, and `None` is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ ///
+ /// See also [`entry`](#method.entry) if you you want to insert *or* modify
+ /// or if you need to get the index of the corresponding key-value pair.
+ pub fn insert(&mut self, key: K, value: V) -> Option<V> {
+ self.insert_full(key, value).1
+ }
+
+ /// Insert a key-value pair in the map, and get their index.
+ ///
+ /// If an equivalent key already exists in the map: the key remains and
+ /// retains in its place in the order, its corresponding value is updated
+ /// with `value` and the older value is returned inside `(index, Some(_))`.
+ ///
+ /// If no equivalent key existed in the map: the new key-value pair is
+ /// inserted, last in order, and `(index, None)` is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ ///
+ /// See also [`entry`](#method.entry) if you you want to insert *or* modify
+ /// or if you need to get the index of the corresponding key-value pair.
+ pub fn insert_full(&mut self, key: K, value: V) -> (usize, Option<V>) {
+ let hash = self.hash(&key);
+ self.core.insert_full(hash, key, value)
+ }
+
+ /// Get the given key’s corresponding entry in the map for insertion and/or
+ /// in-place manipulation.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn entry(&mut self, key: K) -> Entry<'_, K, V> {
+ let hash = self.hash(&key);
+ self.core.entry(hash, key)
+ }
+
+ /// Return `true` if an equivalent to `key` exists in the map.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn contains_key<Q: ?Sized>(&self, key: &Q) -> bool
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ self.get_index_of(key).is_some()
+ }
+
+ /// Return a reference to the value stored for `key`, if it is present,
+ /// else `None`.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn get<Q: ?Sized>(&self, key: &Q) -> Option<&V>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if let Some(i) = self.get_index_of(key) {
+ let entry = &self.as_entries()[i];
+ Some(&entry.value)
+ } else {
+ None
+ }
+ }
+
+ /// Return references to the key-value pair stored for `key`,
+ /// if it is present, else `None`.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn get_key_value<Q: ?Sized>(&self, key: &Q) -> Option<(&K, &V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if let Some(i) = self.get_index_of(key) {
+ let entry = &self.as_entries()[i];
+ Some((&entry.key, &entry.value))
+ } else {
+ None
+ }
+ }
+
+ /// Return item index, key and value
+ pub fn get_full<Q: ?Sized>(&self, key: &Q) -> Option<(usize, &K, &V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if let Some(i) = self.get_index_of(key) {
+ let entry = &self.as_entries()[i];
+ Some((i, &entry.key, &entry.value))
+ } else {
+ None
+ }
+ }
+
+ /// Return item index, if it exists in the map
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn get_index_of<Q: ?Sized>(&self, key: &Q) -> Option<usize>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if self.is_empty() {
+ None
+ } else {
+ let hash = self.hash(key);
+ self.core.get_index_of(hash, key)
+ }
+ }
+
+ pub fn get_mut<Q: ?Sized>(&mut self, key: &Q) -> Option<&mut V>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if let Some(i) = self.get_index_of(key) {
+ let entry = &mut self.as_entries_mut()[i];
+ Some(&mut entry.value)
+ } else {
+ None
+ }
+ }
+
+ pub fn get_full_mut<Q: ?Sized>(&mut self, key: &Q) -> Option<(usize, &K, &mut V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if let Some(i) = self.get_index_of(key) {
+ let entry = &mut self.as_entries_mut()[i];
+ Some((i, &entry.key, &mut entry.value))
+ } else {
+ None
+ }
+ }
+
+ pub(crate) fn get_full_mut2_impl<Q: ?Sized>(
+ &mut self,
+ key: &Q,
+ ) -> Option<(usize, &mut K, &mut V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if let Some(i) = self.get_index_of(key) {
+ let entry = &mut self.as_entries_mut()[i];
+ Some((i, &mut entry.key, &mut entry.value))
+ } else {
+ None
+ }
+ }
+
+ /// Remove the key-value pair equivalent to `key` and return
+ /// its value.
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove(key)`, if you need to
+ /// preserve the order of the keys in the map, use `.shift_remove(key)`
+ /// instead.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn remove<Q: ?Sized>(&mut self, key: &Q) -> Option<V>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ self.swap_remove(key)
+ }
+
+ /// Remove and return the key-value pair equivalent to `key`.
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove_entry(key)`, if you need to
+ /// preserve the order of the keys in the map, use `.shift_remove_entry(key)`
+ /// instead.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn remove_entry<Q: ?Sized>(&mut self, key: &Q) -> Option<(K, V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ self.swap_remove_entry(key)
+ }
+
+ /// Remove the key-value pair equivalent to `key` and return
+ /// its value.
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Return `None` if `key` is not in map.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove<Q: ?Sized>(&mut self, key: &Q) -> Option<V>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ self.swap_remove_full(key).map(third)
+ }
+
+ /// Remove and return the key-value pair equivalent to `key`.
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Return `None` if `key` is not in map.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove_entry<Q: ?Sized>(&mut self, key: &Q) -> Option<(K, V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ match self.swap_remove_full(key) {
+ Some((_, key, value)) => Some((key, value)),
+ None => None,
+ }
+ }
+
+ /// Remove the key-value pair equivalent to `key` and return it and
+ /// the index it had.
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Return `None` if `key` is not in map.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove_full<Q: ?Sized>(&mut self, key: &Q) -> Option<(usize, K, V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if self.is_empty() {
+ return None;
+ }
+ let hash = self.hash(key);
+ self.core.swap_remove_full(hash, key)
+ }
+
+ /// Remove the key-value pair equivalent to `key` and return
+ /// its value.
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Return `None` if `key` is not in map.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove<Q: ?Sized>(&mut self, key: &Q) -> Option<V>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ self.shift_remove_full(key).map(third)
+ }
+
+ /// Remove and return the key-value pair equivalent to `key`.
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Return `None` if `key` is not in map.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove_entry<Q: ?Sized>(&mut self, key: &Q) -> Option<(K, V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ match self.shift_remove_full(key) {
+ Some((_, key, value)) => Some((key, value)),
+ None => None,
+ }
+ }
+
+ /// Remove the key-value pair equivalent to `key` and return it and
+ /// the index it had.
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Return `None` if `key` is not in map.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove_full<Q: ?Sized>(&mut self, key: &Q) -> Option<(usize, K, V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if self.is_empty() {
+ return None;
+ }
+ let hash = self.hash(key);
+ self.core.shift_remove_full(hash, key)
+ }
+
+ /// Remove the last key-value pair
+ ///
+ /// This preserves the order of the remaining elements.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn pop(&mut self) -> Option<(K, V)> {
+ self.core.pop()
+ }
+
+ /// Scan through each key-value pair in the map and keep those where the
+ /// closure `keep` returns `true`.
+ ///
+ /// The elements are visited in order, and remaining elements keep their
+ /// order.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn retain<F>(&mut self, mut keep: F)
+ where
+ F: FnMut(&K, &mut V) -> bool,
+ {
+ self.core.retain_in_order(move |k, v| keep(k, v));
+ }
+
+ pub(crate) fn retain_mut<F>(&mut self, keep: F)
+ where
+ F: FnMut(&mut K, &mut V) -> bool,
+ {
+ self.core.retain_in_order(keep);
+ }
+
+ /// Sort the map’s key-value pairs by the default ordering of the keys.
+ ///
+ /// See [`sort_by`](Self::sort_by) for details.
+ pub fn sort_keys(&mut self)
+ where
+ K: Ord,
+ {
+ self.with_entries(move |entries| {
+ entries.sort_by(move |a, b| K::cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the map’s key-value pairs in place using the comparison
+ /// function `cmp`.
+ ///
+ /// The comparison function receives two key and value pairs to compare (you
+ /// can sort by keys or values or their combination as needed).
+ ///
+ /// Computes in **O(n log n + c)** time and **O(n)** space where *n* is
+ /// the length of the map and *c* the capacity. The sort is stable.
+ pub fn sort_by<F>(&mut self, mut cmp: F)
+ where
+ F: FnMut(&K, &V, &K, &V) -> Ordering,
+ {
+ self.with_entries(move |entries| {
+ entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ });
+ }
+
+ /// Sort the key-value pairs of the map and return a by-value iterator of
+ /// the key-value pairs with the result.
+ ///
+ /// The sort is stable.
+ pub fn sorted_by<F>(self, mut cmp: F) -> IntoIter<K, V>
+ where
+ F: FnMut(&K, &V, &K, &V) -> Ordering,
+ {
+ let mut entries = self.into_entries();
+ entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ IntoIter {
+ iter: entries.into_iter(),
+ }
+ }
+
+ /// Sort the map's key-value pairs by the default ordering of the keys, but
+ /// may not preserve the order of equal elements.
+ ///
+ /// See [`sort_unstable_by`](Self::sort_unstable_by) for details.
+ pub fn sort_unstable_keys(&mut self)
+ where
+ K: Ord,
+ {
+ self.with_entries(move |entries| {
+ entries.sort_unstable_by(move |a, b| K::cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the map's key-value pairs in place using the comparison function `cmp`, but
+ /// may not preserve the order of equal elements.
+ ///
+ /// The comparison function receives two key and value pairs to compare (you
+ /// can sort by keys or values or their combination as needed).
+ ///
+ /// Computes in **O(n log n + c)** time where *n* is
+ /// the length of the map and *c* is the capacity. The sort is unstable.
+ pub fn sort_unstable_by<F>(&mut self, mut cmp: F)
+ where
+ F: FnMut(&K, &V, &K, &V) -> Ordering,
+ {
+ self.with_entries(move |entries| {
+ entries.sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ });
+ }
+
+ /// Sort the key-value pairs of the map and return a by-value iterator of
+ /// the key-value pairs with the result.
+ ///
+ /// The sort is unstable.
+ #[inline]
+ pub fn sorted_unstable_by<F>(self, mut cmp: F) -> IntoIter<K, V>
+ where
+ F: FnMut(&K, &V, &K, &V) -> Ordering,
+ {
+ let mut entries = self.into_entries();
+ entries.sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ IntoIter {
+ iter: entries.into_iter(),
+ }
+ }
+
+ /// Reverses the order of the map’s key-value pairs in place.
+ ///
+ /// Computes in **O(n)** time and **O(1)** space.
+ pub fn reverse(&mut self) {
+ self.core.reverse()
+ }
+}
+
+impl<K, V, S> IndexMap<K, V, S> {
+ /// Get a key-value pair by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Computes in **O(1)** time.
+ pub fn get_index(&self, index: usize) -> Option<(&K, &V)> {
+ self.as_entries().get(index).map(Bucket::refs)
+ }
+
+ /// Get a key-value pair by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Computes in **O(1)** time.
+ pub fn get_index_mut(&mut self, index: usize) -> Option<(&mut K, &mut V)> {
+ self.as_entries_mut().get_mut(index).map(Bucket::muts)
+ }
+
+ /// Get the first key-value pair
+ ///
+ /// Computes in **O(1)** time.
+ pub fn first(&self) -> Option<(&K, &V)> {
+ self.as_entries().first().map(Bucket::refs)
+ }
+
+ /// Get the first key-value pair, with mutable access to the value
+ ///
+ /// Computes in **O(1)** time.
+ pub fn first_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.as_entries_mut().first_mut().map(Bucket::ref_mut)
+ }
+
+ /// Get the last key-value pair
+ ///
+ /// Computes in **O(1)** time.
+ pub fn last(&self) -> Option<(&K, &V)> {
+ self.as_entries().last().map(Bucket::refs)
+ }
+
+ /// Get the last key-value pair, with mutable access to the value
+ ///
+ /// Computes in **O(1)** time.
+ pub fn last_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.as_entries_mut().last_mut().map(Bucket::ref_mut)
+ }
+
+ /// Remove the key-value pair by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
+ self.core.swap_remove_index(index)
+ }
+
+ /// Remove the key-value pair by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
+ self.core.shift_remove_index(index)
+ }
+
+ /// Moves the position of a key-value pair from one index to another
+ /// by shifting all other pairs in-between.
+ ///
+ /// * If `from < to`, the other pairs will shift down while the targeted pair moves up.
+ /// * If `from > to`, the other pairs will shift up while the targeted pair moves down.
+ ///
+ /// ***Panics*** if `from` or `to` are out of bounds.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn move_index(&mut self, from: usize, to: usize) {
+ self.core.move_index(from, to)
+ }
+
+ /// Swaps the position of two key-value pairs in the map.
+ ///
+ /// ***Panics*** if `a` or `b` are out of bounds.
+ pub fn swap_indices(&mut self, a: usize, b: usize) {
+ self.core.swap_indices(a, b)
+ }
+}
+
+/// An iterator over the keys of a `IndexMap`.
+///
+/// This `struct` is created by the [`keys`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`keys`]: struct.IndexMap.html#method.keys
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct Keys<'a, K, V> {
+ iter: SliceIter<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Iterator for Keys<'a, K, V> {
+ type Item = &'a K;
+
+ iterator_methods!(Bucket::key_ref);
+}
+
+impl<K, V> DoubleEndedIterator for Keys<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::key_ref);
+}
+
+impl<K, V> ExactSizeIterator for Keys<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for Keys<'_, K, V> {}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+impl<K, V> Clone for Keys<'_, K, V> {
+ fn clone(&self) -> Self {
+ Keys {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<K: fmt::Debug, V> fmt::Debug for Keys<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// An owning iterator over the keys of a `IndexMap`.
+///
+/// This `struct` is created by the [`into_keys`] method on [`IndexMap`].
+/// See its documentation for more.
+///
+/// [`IndexMap`]: struct.IndexMap.html
+/// [`into_keys`]: struct.IndexMap.html#method.into_keys
+pub struct IntoKeys<K, V> {
+ iter: vec::IntoIter<Bucket<K, V>>,
+}
+
+impl<K, V> Iterator for IntoKeys<K, V> {
+ type Item = K;
+
+ iterator_methods!(Bucket::key);
+}
+
+impl<K, V> DoubleEndedIterator for IntoKeys<K, V> {
+ double_ended_iterator_methods!(Bucket::key);
+}
+
+impl<K, V> ExactSizeIterator for IntoKeys<K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for IntoKeys<K, V> {}
+
+impl<K: fmt::Debug, V> fmt::Debug for IntoKeys<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+/// An iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`values`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`values`]: struct.IndexMap.html#method.values
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct Values<'a, K, V> {
+ iter: SliceIter<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Iterator for Values<'a, K, V> {
+ type Item = &'a V;
+
+ iterator_methods!(Bucket::value_ref);
+}
+
+impl<K, V> DoubleEndedIterator for Values<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::value_ref);
+}
+
+impl<K, V> ExactSizeIterator for Values<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for Values<'_, K, V> {}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+impl<K, V> Clone for Values<'_, K, V> {
+ fn clone(&self) -> Self {
+ Values {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<K, V: fmt::Debug> fmt::Debug for Values<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A mutable iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`values_mut`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`values_mut`]: struct.IndexMap.html#method.values_mut
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct ValuesMut<'a, K, V> {
+ iter: SliceIterMut<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
+ type Item = &'a mut V;
+
+ iterator_methods!(Bucket::value_mut);
+}
+
+impl<K, V> DoubleEndedIterator for ValuesMut<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::value_mut);
+}
+
+impl<K, V> ExactSizeIterator for ValuesMut<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for ValuesMut<'_, K, V> {}
+
+impl<K, V: fmt::Debug> fmt::Debug for ValuesMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::value_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+/// An owning iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`into_values`] method on [`IndexMap`].
+/// See its documentation for more.
+///
+/// [`IndexMap`]: struct.IndexMap.html
+/// [`into_values`]: struct.IndexMap.html#method.into_values
+pub struct IntoValues<K, V> {
+ iter: vec::IntoIter<Bucket<K, V>>,
+}
+
+impl<K, V> Iterator for IntoValues<K, V> {
+ type Item = V;
+
+ iterator_methods!(Bucket::value);
+}
+
+impl<K, V> DoubleEndedIterator for IntoValues<K, V> {
+ double_ended_iterator_methods!(Bucket::value);
+}
+
+impl<K, V> ExactSizeIterator for IntoValues<K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for IntoValues<K, V> {}
+
+impl<K, V: fmt::Debug> fmt::Debug for IntoValues<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::value_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+/// An iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`iter`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`iter`]: struct.IndexMap.html#method.iter
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct Iter<'a, K, V> {
+ iter: SliceIter<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Iterator for Iter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ iterator_methods!(Bucket::refs);
+}
+
+impl<K, V> DoubleEndedIterator for Iter<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::refs);
+}
+
+impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for Iter<'_, K, V> {}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+impl<K, V> Clone for Iter<'_, K, V> {
+ fn clone(&self) -> Self {
+ Iter {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Iter<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A mutable iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: struct.IndexMap.html#method.iter_mut
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct IterMut<'a, K, V> {
+ iter: SliceIterMut<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Iterator for IterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ iterator_methods!(Bucket::ref_mut);
+}
+
+impl<K, V> DoubleEndedIterator for IterMut<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::ref_mut);
+}
+
+impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for IterMut<'_, K, V> {}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IterMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+/// An owning iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`IndexMap`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
+///
+/// [`into_iter`]: struct.IndexMap.html#method.into_iter
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct IntoIter<K, V> {
+ iter: vec::IntoIter<Bucket<K, V>>,
+}
+
+impl<K, V> Iterator for IntoIter<K, V> {
+ type Item = (K, V);
+
+ iterator_methods!(Bucket::key_value);
+}
+
+impl<K, V> DoubleEndedIterator for IntoIter<K, V> {
+ double_ended_iterator_methods!(Bucket::key_value);
+}
+
+impl<K, V> ExactSizeIterator for IntoIter<K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for IntoIter<K, V> {}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IntoIter<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+/// A draining iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`drain`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`drain`]: struct.IndexMap.html#method.drain
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct Drain<'a, K, V> {
+ pub(crate) iter: vec::Drain<'a, Bucket<K, V>>,
+}
+
+impl<K, V> Iterator for Drain<'_, K, V> {
+ type Item = (K, V);
+
+ iterator_methods!(Bucket::key_value);
+}
+
+impl<K, V> DoubleEndedIterator for Drain<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::key_value);
+}
+
+impl<K, V> ExactSizeIterator for Drain<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for Drain<'_, K, V> {}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Drain<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, K, V, S> IntoIterator for &'a IndexMap<K, V, S> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = Iter<'a, K, V>;
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, K, V, S> IntoIterator for &'a mut IndexMap<K, V, S> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = IterMut<'a, K, V>;
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+impl<K, V, S> IntoIterator for IndexMap<K, V, S> {
+ type Item = (K, V);
+ type IntoIter = IntoIter<K, V>;
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter {
+ iter: self.into_entries().into_iter(),
+ }
+ }
+}
+
+/// Access `IndexMap` values corresponding to a key.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// for word in "Lorem ipsum dolor sit amet".split_whitespace() {
+/// map.insert(word.to_lowercase(), word.to_uppercase());
+/// }
+/// assert_eq!(map["lorem"], "LOREM");
+/// assert_eq!(map["ipsum"], "IPSUM");
+/// ```
+///
+/// ```should_panic
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// map.insert("foo", 1);
+/// println!("{:?}", map["bar"]); // panics!
+/// ```
+impl<K, V, Q: ?Sized, S> Index<&Q> for IndexMap<K, V, S>
+where
+ Q: Hash + Equivalent<K>,
+ K: Hash + Eq,
+ S: BuildHasher,
+{
+ type Output = V;
+
+ /// Returns a reference to the value corresponding to the supplied `key`.
+ ///
+ /// ***Panics*** if `key` is not present in the map.
+ fn index(&self, key: &Q) -> &V {
+ self.get(key).expect("IndexMap: key not found")
+ }
+}
+
+/// Access `IndexMap` values corresponding to a key.
+///
+/// Mutable indexing allows changing / updating values of key-value
+/// pairs that are already present.
+///
+/// You can **not** insert new pairs with index syntax, use `.insert()`.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// for word in "Lorem ipsum dolor sit amet".split_whitespace() {
+/// map.insert(word.to_lowercase(), word.to_string());
+/// }
+/// let lorem = &mut map["lorem"];
+/// assert_eq!(lorem, "Lorem");
+/// lorem.retain(char::is_lowercase);
+/// assert_eq!(map["lorem"], "orem");
+/// ```
+///
+/// ```should_panic
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// map.insert("foo", 1);
+/// map["bar"] = 1; // panics!
+/// ```
+impl<K, V, Q: ?Sized, S> IndexMut<&Q> for IndexMap<K, V, S>
+where
+ Q: Hash + Equivalent<K>,
+ K: Hash + Eq,
+ S: BuildHasher,
+{
+ /// Returns a mutable reference to the value corresponding to the supplied `key`.
+ ///
+ /// ***Panics*** if `key` is not present in the map.
+ fn index_mut(&mut self, key: &Q) -> &mut V {
+ self.get_mut(key).expect("IndexMap: key not found")
+ }
+}
+
+/// Access `IndexMap` values at indexed positions.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// for word in "Lorem ipsum dolor sit amet".split_whitespace() {
+/// map.insert(word.to_lowercase(), word.to_uppercase());
+/// }
+/// assert_eq!(map[0], "LOREM");
+/// assert_eq!(map[1], "IPSUM");
+/// map.reverse();
+/// assert_eq!(map[0], "AMET");
+/// assert_eq!(map[1], "SIT");
+/// map.sort_keys();
+/// assert_eq!(map[0], "AMET");
+/// assert_eq!(map[1], "DOLOR");
+/// ```
+///
+/// ```should_panic
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// map.insert("foo", 1);
+/// println!("{:?}", map[10]); // panics!
+/// ```
+impl<K, V, S> Index<usize> for IndexMap<K, V, S> {
+ type Output = V;
+
+ /// Returns a reference to the value at the supplied `index`.
+ ///
+ /// ***Panics*** if `index` is out of bounds.
+ fn index(&self, index: usize) -> &V {
+ self.get_index(index)
+ .expect("IndexMap: index out of bounds")
+ .1
+ }
+}
+
+/// Access `IndexMap` values at indexed positions.
+///
+/// Mutable indexing allows changing / updating indexed values
+/// that are already present.
+///
+/// You can **not** insert new values with index syntax, use `.insert()`.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// for word in "Lorem ipsum dolor sit amet".split_whitespace() {
+/// map.insert(word.to_lowercase(), word.to_string());
+/// }
+/// let lorem = &mut map[0];
+/// assert_eq!(lorem, "Lorem");
+/// lorem.retain(char::is_lowercase);
+/// assert_eq!(map["lorem"], "orem");
+/// ```
+///
+/// ```should_panic
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// map.insert("foo", 1);
+/// map[10] = 1; // panics!
+/// ```
+impl<K, V, S> IndexMut<usize> for IndexMap<K, V, S> {
+ /// Returns a mutable reference to the value at the supplied `index`.
+ ///
+ /// ***Panics*** if `index` is out of bounds.
+ fn index_mut(&mut self, index: usize) -> &mut V {
+ self.get_index_mut(index)
+ .expect("IndexMap: index out of bounds")
+ .1
+ }
+}
+
+impl<K, V, S> FromIterator<(K, V)> for IndexMap<K, V, S>
+where
+ K: Hash + Eq,
+ S: BuildHasher + Default,
+{
+ /// Create an `IndexMap` from the sequence of key-value pairs in the
+ /// iterable.
+ ///
+ /// `from_iter` uses the same logic as `extend`. See
+ /// [`extend`](#method.extend) for more details.
+ fn from_iter<I: IntoIterator<Item = (K, V)>>(iterable: I) -> Self {
+ let iter = iterable.into_iter();
+ let (low, _) = iter.size_hint();
+ let mut map = Self::with_capacity_and_hasher(low, <_>::default());
+ map.extend(iter);
+ map
+ }
+}
+
+#[cfg(has_std)]
+impl<K, V, const N: usize> From<[(K, V); N]> for IndexMap<K, V, RandomState>
+where
+ K: Hash + Eq,
+{
+ /// # Examples
+ ///
+ /// ```
+ /// use indexmap::IndexMap;
+ ///
+ /// let map1 = IndexMap::from([(1, 2), (3, 4)]);
+ /// let map2: IndexMap<_, _> = [(1, 2), (3, 4)].into();
+ /// assert_eq!(map1, map2);
+ /// ```
+ fn from(arr: [(K, V); N]) -> Self {
+ Self::from_iter(arr)
+ }
+}
+
+impl<K, V, S> Extend<(K, V)> for IndexMap<K, V, S>
+where
+ K: Hash + Eq,
+ S: BuildHasher,
+{
+ /// Extend the map with all key-value pairs in the iterable.
+ ///
+ /// This is equivalent to calling [`insert`](#method.insert) for each of
+ /// them in order, which means that for keys that already existed
+ /// in the map, their value is updated but it keeps the existing order.
+ ///
+ /// New keys are inserted in the order they appear in the sequence. If
+ /// equivalents of a key occur more than once, the last corresponding value
+ /// prevails.
+ fn extend<I: IntoIterator<Item = (K, V)>>(&mut self, iterable: I) {
+ // (Note: this is a copy of `std`/`hashbrown`'s reservation logic.)
+ // Keys may be already present or show multiple times in the iterator.
+ // Reserve the entire hint lower bound if the map is empty.
+ // Otherwise reserve half the hint (rounded up), so the map
+ // will only resize twice in the worst case.
+ let iter = iterable.into_iter();
+ let reserve = if self.is_empty() {
+ iter.size_hint().0
+ } else {
+ (iter.size_hint().0 + 1) / 2
+ };
+ self.reserve(reserve);
+ iter.for_each(move |(k, v)| {
+ self.insert(k, v);
+ });
+ }
+}
+
+impl<'a, K, V, S> Extend<(&'a K, &'a V)> for IndexMap<K, V, S>
+where
+ K: Hash + Eq + Copy,
+ V: Copy,
+ S: BuildHasher,
+{
+ /// Extend the map with all key-value pairs in the iterable.
+ ///
+ /// See the first extend method for more details.
+ fn extend<I: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iterable: I) {
+ self.extend(iterable.into_iter().map(|(&key, &value)| (key, value)));
+ }
+}
+
+impl<K, V, S> Default for IndexMap<K, V, S>
+where
+ S: Default,
+{
+ /// Return an empty `IndexMap`
+ fn default() -> Self {
+ Self::with_capacity_and_hasher(0, S::default())
+ }
+}
+
+impl<K, V1, S1, V2, S2> PartialEq<IndexMap<K, V2, S2>> for IndexMap<K, V1, S1>
+where
+ K: Hash + Eq,
+ V1: PartialEq<V2>,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn eq(&self, other: &IndexMap<K, V2, S2>) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+
+ self.iter()
+ .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
+ }
+}
+
+impl<K, V, S> Eq for IndexMap<K, V, S>
+where
+ K: Eq + Hash,
+ V: Eq,
+ S: BuildHasher,
+{
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::string::String;
+
+ #[test]
+ fn it_works() {
+ let mut map = IndexMap::new();
+ assert_eq!(map.is_empty(), true);
+ map.insert(1, ());
+ map.insert(1, ());
+ assert_eq!(map.len(), 1);
+ assert!(map.get(&1).is_some());
+ assert_eq!(map.is_empty(), false);
+ }
+
+ #[test]
+ fn new() {
+ let map = IndexMap::<String, String>::new();
+ println!("{:?}", map);
+ assert_eq!(map.capacity(), 0);
+ assert_eq!(map.len(), 0);
+ assert_eq!(map.is_empty(), true);
+ }
+
+ #[test]
+ fn insert() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5];
+ let not_present = [1, 3, 6, 9, 10];
+ let mut map = IndexMap::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(map.len(), i);
+ map.insert(elt, elt);
+ assert_eq!(map.len(), i + 1);
+ assert_eq!(map.get(&elt), Some(&elt));
+ assert_eq!(map[&elt], elt);
+ }
+ println!("{:?}", map);
+
+ for &elt in &not_present {
+ assert!(map.get(&elt).is_none());
+ }
+ }
+
+ #[test]
+ fn insert_full() {
+ let insert = vec![9, 2, 7, 1, 4, 6, 13];
+ let present = vec![1, 6, 2];
+ let mut map = IndexMap::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(map.len(), i);
+ let (index, existing) = map.insert_full(elt, elt);
+ assert_eq!(existing, None);
+ assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0));
+ assert_eq!(map.len(), i + 1);
+ }
+
+ let len = map.len();
+ for &elt in &present {
+ let (index, existing) = map.insert_full(elt, elt);
+ assert_eq!(existing, Some(elt));
+ assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0));
+ assert_eq!(map.len(), len);
+ }
+ }
+
+ #[test]
+ fn insert_2() {
+ let mut map = IndexMap::with_capacity(16);
+
+ let mut keys = vec![];
+ keys.extend(0..16);
+ keys.extend(if cfg!(miri) { 32..64 } else { 128..267 });
+
+ for &i in &keys {
+ let old_map = map.clone();
+ map.insert(i, ());
+ for key in old_map.keys() {
+ if map.get(key).is_none() {
+ println!("old_map: {:?}", old_map);
+ println!("map: {:?}", map);
+ panic!("did not find {} in map", key);
+ }
+ }
+ }
+
+ for &i in &keys {
+ assert!(map.get(&i).is_some(), "did not find {}", i);
+ }
+ }
+
+ #[test]
+ fn insert_order() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut map = IndexMap::new();
+
+ for &elt in &insert {
+ map.insert(elt, ());
+ }
+
+ assert_eq!(map.keys().count(), map.len());
+ assert_eq!(map.keys().count(), insert.len());
+ for (a, b) in insert.iter().zip(map.keys()) {
+ assert_eq!(a, b);
+ }
+ for (i, k) in (0..insert.len()).zip(map.keys()) {
+ assert_eq!(map.get_index(i).unwrap().0, k);
+ }
+ }
+
+ #[test]
+ fn grow() {
+ let insert = [0, 4, 2, 12, 8, 7, 11];
+ let not_present = [1, 3, 6, 9, 10];
+ let mut map = IndexMap::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(map.len(), i);
+ map.insert(elt, elt);
+ assert_eq!(map.len(), i + 1);
+ assert_eq!(map.get(&elt), Some(&elt));
+ assert_eq!(map[&elt], elt);
+ }
+
+ println!("{:?}", map);
+ for &elt in &insert {
+ map.insert(elt * 10, elt);
+ }
+ for &elt in &insert {
+ map.insert(elt * 100, elt);
+ }
+ for (i, &elt) in insert.iter().cycle().enumerate().take(100) {
+ map.insert(elt * 100 + i as i32, elt);
+ }
+ println!("{:?}", map);
+ for &elt in &not_present {
+ assert!(map.get(&elt).is_none());
+ }
+ }
+
+ #[test]
+ fn reserve() {
+ let mut map = IndexMap::<usize, usize>::new();
+ assert_eq!(map.capacity(), 0);
+ map.reserve(100);
+ let capacity = map.capacity();
+ assert!(capacity >= 100);
+ for i in 0..capacity {
+ assert_eq!(map.len(), i);
+ map.insert(i, i * i);
+ assert_eq!(map.len(), i + 1);
+ assert_eq!(map.capacity(), capacity);
+ assert_eq!(map.get(&i), Some(&(i * i)));
+ }
+ map.insert(capacity, std::usize::MAX);
+ assert_eq!(map.len(), capacity + 1);
+ assert!(map.capacity() > capacity);
+ assert_eq!(map.get(&capacity), Some(&std::usize::MAX));
+ }
+
+ #[test]
+ fn shrink_to_fit() {
+ let mut map = IndexMap::<usize, usize>::new();
+ assert_eq!(map.capacity(), 0);
+ for i in 0..100 {
+ assert_eq!(map.len(), i);
+ map.insert(i, i * i);
+ assert_eq!(map.len(), i + 1);
+ assert!(map.capacity() >= i + 1);
+ assert_eq!(map.get(&i), Some(&(i * i)));
+ map.shrink_to_fit();
+ assert_eq!(map.len(), i + 1);
+ assert_eq!(map.capacity(), i + 1);
+ assert_eq!(map.get(&i), Some(&(i * i)));
+ }
+ }
+
+ #[test]
+ fn remove() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut map = IndexMap::new();
+
+ for &elt in &insert {
+ map.insert(elt, elt);
+ }
+
+ assert_eq!(map.keys().count(), map.len());
+ assert_eq!(map.keys().count(), insert.len());
+ for (a, b) in insert.iter().zip(map.keys()) {
+ assert_eq!(a, b);
+ }
+
+ let remove_fail = [99, 77];
+ let remove = [4, 12, 8, 7];
+
+ for &key in &remove_fail {
+ assert!(map.swap_remove_full(&key).is_none());
+ }
+ println!("{:?}", map);
+ for &key in &remove {
+ //println!("{:?}", map);
+ let index = map.get_full(&key).unwrap().0;
+ assert_eq!(map.swap_remove_full(&key), Some((index, key, key)));
+ }
+ println!("{:?}", map);
+
+ for key in &insert {
+ assert_eq!(map.get(key).is_some(), !remove.contains(key));
+ }
+ assert_eq!(map.len(), insert.len() - remove.len());
+ assert_eq!(map.keys().count(), insert.len() - remove.len());
+ }
+
+ #[test]
+ fn remove_to_empty() {
+ let mut map = indexmap! { 0 => 0, 4 => 4, 5 => 5 };
+ map.swap_remove(&5).unwrap();
+ map.swap_remove(&4).unwrap();
+ map.swap_remove(&0).unwrap();
+ assert!(map.is_empty());
+ }
+
+ #[test]
+ fn swap_remove_index() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut map = IndexMap::new();
+
+ for &elt in &insert {
+ map.insert(elt, elt * 2);
+ }
+
+ let mut vector = insert.to_vec();
+ let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1];
+
+ // check that the same swap remove sequence on vec and map
+ // have the same result.
+ for &rm in remove_sequence {
+ let out_vec = vector.swap_remove(rm);
+ let (out_map, _) = map.swap_remove_index(rm).unwrap();
+ assert_eq!(out_vec, out_map);
+ }
+ assert_eq!(vector.len(), map.len());
+ for (a, b) in vector.iter().zip(map.keys()) {
+ assert_eq!(a, b);
+ }
+ }
+
+ #[test]
+ fn partial_eq_and_eq() {
+ let mut map_a = IndexMap::new();
+ map_a.insert(1, "1");
+ map_a.insert(2, "2");
+ let mut map_b = map_a.clone();
+ assert_eq!(map_a, map_b);
+ map_b.swap_remove(&1);
+ assert_ne!(map_a, map_b);
+
+ let map_c: IndexMap<_, String> = map_b.into_iter().map(|(k, v)| (k, v.into())).collect();
+ assert_ne!(map_a, map_c);
+ assert_ne!(map_c, map_a);
+ }
+
+ #[test]
+ fn extend() {
+ let mut map = IndexMap::new();
+ map.extend(vec![(&1, &2), (&3, &4)]);
+ map.extend(vec![(5, 6)]);
+ assert_eq!(
+ map.into_iter().collect::<Vec<_>>(),
+ vec![(1, 2), (3, 4), (5, 6)]
+ );
+ }
+
+ #[test]
+ fn entry() {
+ let mut map = IndexMap::new();
+
+ map.insert(1, "1");
+ map.insert(2, "2");
+ {
+ let e = map.entry(3);
+ assert_eq!(e.index(), 2);
+ let e = e.or_insert("3");
+ assert_eq!(e, &"3");
+ }
+
+ let e = map.entry(2);
+ assert_eq!(e.index(), 1);
+ assert_eq!(e.key(), &2);
+ match e {
+ Entry::Occupied(ref e) => assert_eq!(e.get(), &"2"),
+ Entry::Vacant(_) => panic!(),
+ }
+ assert_eq!(e.or_insert("4"), &"2");
+ }
+
+ #[test]
+ fn entry_and_modify() {
+ let mut map = IndexMap::new();
+
+ map.insert(1, "1");
+ map.entry(1).and_modify(|x| *x = "2");
+ assert_eq!(Some(&"2"), map.get(&1));
+
+ map.entry(2).and_modify(|x| *x = "doesn't exist");
+ assert_eq!(None, map.get(&2));
+ }
+
+ #[test]
+ fn entry_or_default() {
+ let mut map = IndexMap::new();
+
+ #[derive(Debug, PartialEq)]
+ enum TestEnum {
+ DefaultValue,
+ NonDefaultValue,
+ }
+
+ impl Default for TestEnum {
+ fn default() -> Self {
+ TestEnum::DefaultValue
+ }
+ }
+
+ map.insert(1, TestEnum::NonDefaultValue);
+ assert_eq!(&mut TestEnum::NonDefaultValue, map.entry(1).or_default());
+
+ assert_eq!(&mut TestEnum::DefaultValue, map.entry(2).or_default());
+ }
+
+ #[test]
+ fn occupied_entry_key() {
+ // These keys match hash and equality, but their addresses are distinct.
+ let (k1, k2) = (&mut 1, &mut 1);
+ let k1_ptr = k1 as *const i32;
+ let k2_ptr = k2 as *const i32;
+ assert_ne!(k1_ptr, k2_ptr);
+
+ let mut map = IndexMap::new();
+ map.insert(k1, "value");
+ match map.entry(k2) {
+ Entry::Occupied(ref e) => {
+ // `OccupiedEntry::key` should reference the key in the map,
+ // not the key that was used to find the entry.
+ let ptr = *e.key() as *const i32;
+ assert_eq!(ptr, k1_ptr);
+ assert_ne!(ptr, k2_ptr);
+ }
+ Entry::Vacant(_) => panic!(),
+ }
+ }
+
+ #[test]
+ fn keys() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_iter().collect();
+ let keys: Vec<_> = map.keys().copied().collect();
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+ }
+
+ #[test]
+ fn into_keys() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_iter().collect();
+ let keys: Vec<i32> = map.into_keys().collect();
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+ }
+
+ #[test]
+ fn values() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_iter().collect();
+ let values: Vec<_> = map.values().copied().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+ }
+
+ #[test]
+ fn values_mut() {
+ let vec = vec![(1, 1), (2, 2), (3, 3)];
+ let mut map: IndexMap<_, _> = vec.into_iter().collect();
+ for value in map.values_mut() {
+ *value *= 2
+ }
+ let values: Vec<_> = map.values().copied().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&2));
+ assert!(values.contains(&4));
+ assert!(values.contains(&6));
+ }
+
+ #[test]
+ fn into_values() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_iter().collect();
+ let values: Vec<char> = map.into_values().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+ }
+
+ #[test]
+ #[cfg(has_std)]
+ fn from_array() {
+ let map = IndexMap::from([(1, 2), (3, 4)]);
+ let mut expected = IndexMap::new();
+ expected.insert(1, 2);
+ expected.insert(3, 4);
+
+ assert_eq!(map, expected)
+ }
+}
diff --git a/vendor/indexmap-1.9.3/src/map/core.rs b/vendor/indexmap-1.9.3/src/map/core.rs
new file mode 100644
index 0000000..ea7aaae
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/map/core.rs
@@ -0,0 +1,700 @@
+//! This is the core implementation that doesn't depend on the hasher at all.
+//!
+//! The methods of `IndexMapCore` don't use any Hash properties of K.
+//!
+//! It's cleaner to separate them out, then the compiler checks that we are not
+//! using Hash at all in these methods.
+//!
+//! However, we should probably not let this show in the public API or docs.
+
+mod raw;
+
+use hashbrown::raw::RawTable;
+
+use crate::vec::{Drain, Vec};
+use core::cmp;
+use core::fmt;
+use core::mem::replace;
+use core::ops::RangeBounds;
+
+use crate::equivalent::Equivalent;
+use crate::util::simplify_range;
+use crate::{Bucket, Entries, HashValue};
+
+/// Core of the map that does not depend on S
+pub(crate) struct IndexMapCore<K, V> {
+ /// indices mapping from the entry hash to its index.
+ indices: RawTable<usize>,
+ /// entries is a dense vec of entries in their order.
+ entries: Vec<Bucket<K, V>>,
+}
+
+#[inline(always)]
+fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
+ move |&i| entries[i].hash.get()
+}
+
+#[inline]
+fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
+ key: &'a Q,
+ entries: &'a [Bucket<K, V>],
+) -> impl Fn(&usize) -> bool + 'a {
+ move |&i| Q::equivalent(key, &entries[i].key)
+}
+
+#[inline]
+fn erase_index(table: &mut RawTable<usize>, hash: HashValue, index: usize) {
+ let erased = table.erase_entry(hash.get(), move |&i| i == index);
+ debug_assert!(erased);
+}
+
+#[inline]
+fn update_index(table: &mut RawTable<usize>, hash: HashValue, old: usize, new: usize) {
+ let index = table
+ .get_mut(hash.get(), move |&i| i == old)
+ .expect("index not found");
+ *index = new;
+}
+
+impl<K, V> Clone for IndexMapCore<K, V>
+where
+ K: Clone,
+ V: Clone,
+{
+ fn clone(&self) -> Self {
+ let indices = self.indices.clone();
+ let mut entries = Vec::with_capacity(indices.capacity());
+ entries.clone_from(&self.entries);
+ IndexMapCore { indices, entries }
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ let hasher = get_hash(&other.entries);
+ self.indices.clone_from_with_hasher(&other.indices, hasher);
+ if self.entries.capacity() < other.entries.len() {
+ // If we must resize, match the indices capacity
+ self.reserve_entries();
+ }
+ self.entries.clone_from(&other.entries);
+ }
+}
+
+impl<K, V> fmt::Debug for IndexMapCore<K, V>
+where
+ K: fmt::Debug,
+ V: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("IndexMapCore")
+ .field("indices", &raw::DebugIndices(&self.indices))
+ .field("entries", &self.entries)
+ .finish()
+ }
+}
+
+impl<K, V> Entries for IndexMapCore<K, V> {
+ type Entry = Bucket<K, V>;
+
+ #[inline]
+ fn into_entries(self) -> Vec<Self::Entry> {
+ self.entries
+ }
+
+ #[inline]
+ fn as_entries(&self) -> &[Self::Entry] {
+ &self.entries
+ }
+
+ #[inline]
+ fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
+ &mut self.entries
+ }
+
+ fn with_entries<F>(&mut self, f: F)
+ where
+ F: FnOnce(&mut [Self::Entry]),
+ {
+ f(&mut self.entries);
+ self.rebuild_hash_table();
+ }
+}
+
+impl<K, V> IndexMapCore<K, V> {
+ #[inline]
+ pub(crate) const fn new() -> Self {
+ IndexMapCore {
+ indices: RawTable::new(),
+ entries: Vec::new(),
+ }
+ }
+
+ #[inline]
+ pub(crate) fn with_capacity(n: usize) -> Self {
+ IndexMapCore {
+ indices: RawTable::with_capacity(n),
+ entries: Vec::with_capacity(n),
+ }
+ }
+
+ #[inline]
+ pub(crate) fn len(&self) -> usize {
+ self.indices.len()
+ }
+
+ #[inline]
+ pub(crate) fn capacity(&self) -> usize {
+ cmp::min(self.indices.capacity(), self.entries.capacity())
+ }
+
+ pub(crate) fn clear(&mut self) {
+ self.indices.clear();
+ self.entries.clear();
+ }
+
+ pub(crate) fn truncate(&mut self, len: usize) {
+ if len < self.len() {
+ self.erase_indices(len, self.entries.len());
+ self.entries.truncate(len);
+ }
+ }
+
+ pub(crate) fn drain<R>(&mut self, range: R) -> Drain<'_, Bucket<K, V>>
+ where
+ R: RangeBounds<usize>,
+ {
+ let range = simplify_range(range, self.entries.len());
+ self.erase_indices(range.start, range.end);
+ self.entries.drain(range)
+ }
+
+ #[cfg(feature = "rayon")]
+ pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
+ where
+ K: Send,
+ V: Send,
+ R: RangeBounds<usize>,
+ {
+ use rayon::iter::ParallelDrainRange;
+ let range = simplify_range(range, self.entries.len());
+ self.erase_indices(range.start, range.end);
+ self.entries.par_drain(range)
+ }
+
+ pub(crate) fn split_off(&mut self, at: usize) -> Self {
+ assert!(at <= self.entries.len());
+ self.erase_indices(at, self.entries.len());
+ let entries = self.entries.split_off(at);
+
+ let mut indices = RawTable::with_capacity(entries.len());
+ raw::insert_bulk_no_grow(&mut indices, &entries);
+ Self { indices, entries }
+ }
+
+ /// Reserve capacity for `additional` more key-value pairs.
+ pub(crate) fn reserve(&mut self, additional: usize) {
+ self.indices.reserve(additional, get_hash(&self.entries));
+ self.reserve_entries();
+ }
+
+ /// Reserve entries capacity to match the indices
+ fn reserve_entries(&mut self) {
+ let additional = self.indices.capacity() - self.entries.len();
+ self.entries.reserve_exact(additional);
+ }
+
+ /// Shrink the capacity of the map with a lower bound
+ pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
+ self.indices
+ .shrink_to(min_capacity, get_hash(&self.entries));
+ self.entries.shrink_to(min_capacity);
+ }
+
+ /// Remove the last key-value pair
+ pub(crate) fn pop(&mut self) -> Option<(K, V)> {
+ if let Some(entry) = self.entries.pop() {
+ let last = self.entries.len();
+ erase_index(&mut self.indices, entry.hash, last);
+ Some((entry.key, entry.value))
+ } else {
+ None
+ }
+ }
+
+ /// Append a key-value pair, *without* checking whether it already exists,
+ /// and return the pair's new index.
+ fn push(&mut self, hash: HashValue, key: K, value: V) -> usize {
+ let i = self.entries.len();
+ self.indices.insert(hash.get(), i, get_hash(&self.entries));
+ if i == self.entries.capacity() {
+ // Reserve our own capacity synced to the indices,
+ // rather than letting `Vec::push` just double it.
+ self.reserve_entries();
+ }
+ self.entries.push(Bucket { hash, key, value });
+ i
+ }
+
+ /// Return the index in `entries` where an equivalent key can be found
+ pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
+ where
+ Q: ?Sized + Equivalent<K>,
+ {
+ let eq = equivalent(key, &self.entries);
+ self.indices.get(hash.get(), eq).copied()
+ }
+
+ pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
+ where
+ K: Eq,
+ {
+ match self.get_index_of(hash, &key) {
+ Some(i) => (i, Some(replace(&mut self.entries[i].value, value))),
+ None => (self.push(hash, key, value), None),
+ }
+ }
+
+ /// Remove an entry by shifting all entries that follow it
+ pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
+ where
+ Q: ?Sized + Equivalent<K>,
+ {
+ let eq = equivalent(key, &self.entries);
+ match self.indices.remove_entry(hash.get(), eq) {
+ Some(index) => {
+ let (key, value) = self.shift_remove_finish(index);
+ Some((index, key, value))
+ }
+ None => None,
+ }
+ }
+
+ /// Remove an entry by shifting all entries that follow it
+ pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
+ match self.entries.get(index) {
+ Some(entry) => {
+ erase_index(&mut self.indices, entry.hash, index);
+ Some(self.shift_remove_finish(index))
+ }
+ None => None,
+ }
+ }
+
+ /// Remove an entry by shifting all entries that follow it
+ ///
+ /// The index should already be removed from `self.indices`.
+ fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
+ // Correct indices that point to the entries that followed the removed entry.
+ self.decrement_indices(index + 1, self.entries.len());
+
+ // Use Vec::remove to actually remove the entry.
+ let entry = self.entries.remove(index);
+ (entry.key, entry.value)
+ }
+
+ /// Decrement all indices in the range `start..end`.
+ ///
+ /// The index `start - 1` should not exist in `self.indices`.
+ /// All entries should still be in their original positions.
+ fn decrement_indices(&mut self, start: usize, end: usize) {
+ // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
+ let shifted_entries = &self.entries[start..end];
+ if shifted_entries.len() > self.indices.buckets() / 2 {
+ // Shift all indices in range.
+ for i in self.indices_mut() {
+ if start <= *i && *i < end {
+ *i -= 1;
+ }
+ }
+ } else {
+ // Find each entry in range to shift its index.
+ for (i, entry) in (start..end).zip(shifted_entries) {
+ update_index(&mut self.indices, entry.hash, i, i - 1);
+ }
+ }
+ }
+
+ /// Increment all indices in the range `start..end`.
+ ///
+ /// The index `end` should not exist in `self.indices`.
+ /// All entries should still be in their original positions.
+ fn increment_indices(&mut self, start: usize, end: usize) {
+ // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
+ let shifted_entries = &self.entries[start..end];
+ if shifted_entries.len() > self.indices.buckets() / 2 {
+ // Shift all indices in range.
+ for i in self.indices_mut() {
+ if start <= *i && *i < end {
+ *i += 1;
+ }
+ }
+ } else {
+ // Find each entry in range to shift its index, updated in reverse so
+ // we never have duplicated indices that might have a hash collision.
+ for (i, entry) in (start..end).zip(shifted_entries).rev() {
+ update_index(&mut self.indices, entry.hash, i, i + 1);
+ }
+ }
+ }
+
+ pub(super) fn move_index(&mut self, from: usize, to: usize) {
+ let from_hash = self.entries[from].hash;
+ if from != to {
+ // Use a sentinal index so other indices don't collide.
+ update_index(&mut self.indices, from_hash, from, usize::MAX);
+
+ // Update all other indices and rotate the entry positions.
+ if from < to {
+ self.decrement_indices(from + 1, to + 1);
+ self.entries[from..=to].rotate_left(1);
+ } else if to < from {
+ self.increment_indices(to, from);
+ self.entries[to..=from].rotate_right(1);
+ }
+
+ // Change the sentinal index to its final position.
+ update_index(&mut self.indices, from_hash, usize::MAX, to);
+ }
+ }
+
+ /// Remove an entry by swapping it with the last
+ pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
+ where
+ Q: ?Sized + Equivalent<K>,
+ {
+ let eq = equivalent(key, &self.entries);
+ match self.indices.remove_entry(hash.get(), eq) {
+ Some(index) => {
+ let (key, value) = self.swap_remove_finish(index);
+ Some((index, key, value))
+ }
+ None => None,
+ }
+ }
+
+ /// Remove an entry by swapping it with the last
+ pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
+ match self.entries.get(index) {
+ Some(entry) => {
+ erase_index(&mut self.indices, entry.hash, index);
+ Some(self.swap_remove_finish(index))
+ }
+ None => None,
+ }
+ }
+
+ /// Finish removing an entry by swapping it with the last
+ ///
+ /// The index should already be removed from `self.indices`.
+ fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
+ // use swap_remove, but then we need to update the index that points
+ // to the other entry that has to move
+ let entry = self.entries.swap_remove(index);
+
+ // correct index that points to the entry that had to swap places
+ if let Some(entry) = self.entries.get(index) {
+ // was not last element
+ // examine new element in `index` and find it in indices
+ let last = self.entries.len();
+ update_index(&mut self.indices, entry.hash, last, index);
+ }
+
+ (entry.key, entry.value)
+ }
+
+ /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
+ ///
+ /// All of these items should still be at their original location in `entries`.
+ /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
+ fn erase_indices(&mut self, start: usize, end: usize) {
+ let (init, shifted_entries) = self.entries.split_at(end);
+ let (start_entries, erased_entries) = init.split_at(start);
+
+ let erased = erased_entries.len();
+ let shifted = shifted_entries.len();
+ let half_capacity = self.indices.buckets() / 2;
+
+ // Use a heuristic between different strategies
+ if erased == 0 {
+ // Degenerate case, nothing to do
+ } else if start + shifted < half_capacity && start < erased {
+ // Reinsert everything, as there are few kept indices
+ self.indices.clear();
+
+ // Reinsert stable indices, then shifted indices
+ raw::insert_bulk_no_grow(&mut self.indices, start_entries);
+ raw::insert_bulk_no_grow(&mut self.indices, shifted_entries);
+ } else if erased + shifted < half_capacity {
+ // Find each affected index, as there are few to adjust
+
+ // Find erased indices
+ for (i, entry) in (start..).zip(erased_entries) {
+ erase_index(&mut self.indices, entry.hash, i);
+ }
+
+ // Find shifted indices
+ for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
+ update_index(&mut self.indices, entry.hash, old, new);
+ }
+ } else {
+ // Sweep the whole table for adjustments
+ self.erase_indices_sweep(start, end);
+ }
+
+ debug_assert_eq!(self.indices.len(), start + shifted);
+ }
+
+ pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
+ where
+ F: FnMut(&mut K, &mut V) -> bool,
+ {
+ // FIXME: This could use Vec::retain_mut with MSRV 1.61.
+ // Like Vec::retain in self.entries, but with mutable K and V.
+ // We swap-shift all the items we want to keep, truncate the rest,
+ // then rebuild the raw hash table with the new indexes.
+ let len = self.entries.len();
+ let mut n_deleted = 0;
+ for i in 0..len {
+ let will_keep = {
+ let entry = &mut self.entries[i];
+ keep(&mut entry.key, &mut entry.value)
+ };
+ if !will_keep {
+ n_deleted += 1;
+ } else if n_deleted > 0 {
+ self.entries.swap(i - n_deleted, i);
+ }
+ }
+ if n_deleted > 0 {
+ self.entries.truncate(len - n_deleted);
+ self.rebuild_hash_table();
+ }
+ }
+
+ fn rebuild_hash_table(&mut self) {
+ self.indices.clear();
+ raw::insert_bulk_no_grow(&mut self.indices, &self.entries);
+ }
+
+ pub(crate) fn reverse(&mut self) {
+ self.entries.reverse();
+
+ // No need to save hash indices, can easily calculate what they should
+ // be, given that this is an in-place reversal.
+ let len = self.entries.len();
+ for i in self.indices_mut() {
+ *i = len - *i - 1;
+ }
+ }
+}
+
+/// Entry for an existing key-value pair or a vacant location to
+/// insert one.
+pub enum Entry<'a, K, V> {
+ /// Existing slot with equivalent key.
+ Occupied(OccupiedEntry<'a, K, V>),
+ /// Vacant slot (no equivalent key in the map).
+ Vacant(VacantEntry<'a, K, V>),
+}
+
+impl<'a, K, V> Entry<'a, K, V> {
+ /// Inserts the given default value in the entry if it is vacant and returns a mutable
+ /// reference to it. Otherwise a mutable reference to an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_insert(self, default: V) -> &'a mut V {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => entry.insert(default),
+ }
+ }
+
+ /// Inserts the result of the `call` function in the entry if it is vacant and returns a mutable
+ /// reference to it. Otherwise a mutable reference to an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_insert_with<F>(self, call: F) -> &'a mut V
+ where
+ F: FnOnce() -> V,
+ {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => entry.insert(call()),
+ }
+ }
+
+ /// Inserts the result of the `call` function with a reference to the entry's key if it is
+ /// vacant, and returns a mutable reference to the new value. Otherwise a mutable reference to
+ /// an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_insert_with_key<F>(self, call: F) -> &'a mut V
+ where
+ F: FnOnce(&K) -> V,
+ {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => {
+ let value = call(&entry.key);
+ entry.insert(value)
+ }
+ }
+ }
+
+ /// Gets a reference to the entry's key, either within the map if occupied,
+ /// or else the new key that was used to find the entry.
+ pub fn key(&self) -> &K {
+ match *self {
+ Entry::Occupied(ref entry) => entry.key(),
+ Entry::Vacant(ref entry) => entry.key(),
+ }
+ }
+
+ /// Return the index where the key-value pair exists or will be inserted.
+ pub fn index(&self) -> usize {
+ match *self {
+ Entry::Occupied(ref entry) => entry.index(),
+ Entry::Vacant(ref entry) => entry.index(),
+ }
+ }
+
+ /// Modifies the entry if it is occupied.
+ pub fn and_modify<F>(self, f: F) -> Self
+ where
+ F: FnOnce(&mut V),
+ {
+ match self {
+ Entry::Occupied(mut o) => {
+ f(o.get_mut());
+ Entry::Occupied(o)
+ }
+ x => x,
+ }
+ }
+
+ /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable
+ /// reference to it. Otherwise a mutable reference to an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_default(self) -> &'a mut V
+ where
+ V: Default,
+ {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => entry.insert(V::default()),
+ }
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Entry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Entry::Vacant(ref v) => f.debug_tuple(stringify!(Entry)).field(v).finish(),
+ Entry::Occupied(ref o) => f.debug_tuple(stringify!(Entry)).field(o).finish(),
+ }
+ }
+}
+
+pub use self::raw::OccupiedEntry;
+
+// Extra methods that don't threaten the unsafe encapsulation.
+impl<K, V> OccupiedEntry<'_, K, V> {
+ /// Sets the value of the entry to `value`, and returns the entry's old value.
+ pub fn insert(&mut self, value: V) -> V {
+ replace(self.get_mut(), value)
+ }
+
+ /// Remove the key, value pair stored in the map for this entry, and return the value.
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove()`.
+ pub fn remove(self) -> V {
+ self.swap_remove()
+ }
+
+ /// Remove the key, value pair stored in the map for this entry, and return the value.
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove(self) -> V {
+ self.swap_remove_entry().1
+ }
+
+ /// Remove the key, value pair stored in the map for this entry, and return the value.
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove(self) -> V {
+ self.shift_remove_entry().1
+ }
+
+ /// Remove and return the key, value pair stored in the map for this entry
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove_entry()`.
+ pub fn remove_entry(self) -> (K, V) {
+ self.swap_remove_entry()
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for OccupiedEntry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct(stringify!(OccupiedEntry))
+ .field("key", self.key())
+ .field("value", self.get())
+ .finish()
+ }
+}
+
+/// A view into a vacant entry in a `IndexMap`.
+/// It is part of the [`Entry`] enum.
+///
+/// [`Entry`]: enum.Entry.html
+pub struct VacantEntry<'a, K, V> {
+ map: &'a mut IndexMapCore<K, V>,
+ hash: HashValue,
+ key: K,
+}
+
+impl<'a, K, V> VacantEntry<'a, K, V> {
+ /// Gets a reference to the key that was used to find the entry.
+ pub fn key(&self) -> &K {
+ &self.key
+ }
+
+ /// Takes ownership of the key, leaving the entry vacant.
+ pub fn into_key(self) -> K {
+ self.key
+ }
+
+ /// Return the index where the key-value pair will be inserted.
+ pub fn index(&self) -> usize {
+ self.map.len()
+ }
+
+ /// Inserts the entry's key and the given value into the map, and returns a mutable reference
+ /// to the value.
+ pub fn insert(self, value: V) -> &'a mut V {
+ let i = self.map.push(self.hash, self.key, value);
+ &mut self.map.entries[i].value
+ }
+}
+
+impl<K: fmt::Debug, V> fmt::Debug for VacantEntry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple(stringify!(VacantEntry))
+ .field(self.key())
+ .finish()
+ }
+}
+
+#[test]
+fn assert_send_sync() {
+ fn assert_send_sync<T: Send + Sync>() {}
+ assert_send_sync::<IndexMapCore<i32, i32>>();
+ assert_send_sync::<Entry<'_, i32, i32>>();
+}
diff --git a/vendor/indexmap-1.9.3/src/map/core/raw.rs b/vendor/indexmap-1.9.3/src/map/core/raw.rs
new file mode 100644
index 0000000..bf1672d
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/map/core/raw.rs
@@ -0,0 +1,191 @@
+#![allow(unsafe_code)]
+//! This module encapsulates the `unsafe` access to `hashbrown::raw::RawTable`,
+//! mostly in dealing with its bucket "pointers".
+
+use super::{equivalent, Bucket, Entry, HashValue, IndexMapCore, VacantEntry};
+use core::fmt;
+use core::mem::replace;
+use hashbrown::raw::RawTable;
+
+type RawBucket = hashbrown::raw::Bucket<usize>;
+
+/// Inserts many entries into a raw table without reallocating.
+///
+/// ***Panics*** if there is not sufficient capacity already.
+pub(super) fn insert_bulk_no_grow<K, V>(indices: &mut RawTable<usize>, entries: &[Bucket<K, V>]) {
+ assert!(indices.capacity() - indices.len() >= entries.len());
+ for entry in entries {
+ // SAFETY: we asserted that sufficient capacity exists for all entries.
+ unsafe {
+ indices.insert_no_grow(entry.hash.get(), indices.len());
+ }
+ }
+}
+
+pub(super) struct DebugIndices<'a>(pub &'a RawTable<usize>);
+impl fmt::Debug for DebugIndices<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // SAFETY: we're not letting any of the buckets escape this function
+ let indices = unsafe { self.0.iter().map(|raw_bucket| raw_bucket.read()) };
+ f.debug_list().entries(indices).finish()
+ }
+}
+
+impl<K, V> IndexMapCore<K, V> {
+ /// Sweep the whole table to erase indices start..end
+ pub(super) fn erase_indices_sweep(&mut self, start: usize, end: usize) {
+ // SAFETY: we're not letting any of the buckets escape this function
+ unsafe {
+ let offset = end - start;
+ for bucket in self.indices.iter() {
+ let i = bucket.read();
+ if i >= end {
+ bucket.write(i - offset);
+ } else if i >= start {
+ self.indices.erase(bucket);
+ }
+ }
+ }
+ }
+
+ pub(crate) fn entry(&mut self, hash: HashValue, key: K) -> Entry<'_, K, V>
+ where
+ K: Eq,
+ {
+ let eq = equivalent(&key, &self.entries);
+ match self.indices.find(hash.get(), eq) {
+ // SAFETY: The entry is created with a live raw bucket, at the same time
+ // we have a &mut reference to the map, so it can not be modified further.
+ Some(raw_bucket) => Entry::Occupied(OccupiedEntry {
+ map: self,
+ raw_bucket,
+ key,
+ }),
+ None => Entry::Vacant(VacantEntry {
+ map: self,
+ hash,
+ key,
+ }),
+ }
+ }
+
+ pub(super) fn indices_mut(&mut self) -> impl Iterator<Item = &mut usize> {
+ // SAFETY: we're not letting any of the buckets escape this function,
+ // only the item references that are appropriately bound to `&mut self`.
+ unsafe { self.indices.iter().map(|bucket| bucket.as_mut()) }
+ }
+
+ /// Return the raw bucket for the given index
+ fn find_index(&self, index: usize) -> RawBucket {
+ // We'll get a "nice" bounds-check from indexing `self.entries`,
+ // and then we expect to find it in the table as well.
+ let hash = self.entries[index].hash.get();
+ self.indices
+ .find(hash, move |&i| i == index)
+ .expect("index not found")
+ }
+
+ pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
+ // SAFETY: Can't take two `get_mut` references from one table, so we
+ // must use raw buckets to do the swap. This is still safe because we
+ // are locally sure they won't dangle, and we write them individually.
+ unsafe {
+ let raw_bucket_a = self.find_index(a);
+ let raw_bucket_b = self.find_index(b);
+ raw_bucket_a.write(b);
+ raw_bucket_b.write(a);
+ }
+ self.entries.swap(a, b);
+ }
+}
+
+/// A view into an occupied entry in a `IndexMap`.
+/// It is part of the [`Entry`] enum.
+///
+/// [`Entry`]: enum.Entry.html
+// SAFETY: The lifetime of the map reference also constrains the raw bucket,
+// which is essentially a raw pointer into the map indices.
+pub struct OccupiedEntry<'a, K, V> {
+ map: &'a mut IndexMapCore<K, V>,
+ raw_bucket: RawBucket,
+ key: K,
+}
+
+// `hashbrown::raw::Bucket` is only `Send`, not `Sync`.
+// SAFETY: `&self` only accesses the bucket to read it.
+unsafe impl<K: Sync, V: Sync> Sync for OccupiedEntry<'_, K, V> {}
+
+// The parent module also adds methods that don't threaten the unsafe encapsulation.
+impl<'a, K, V> OccupiedEntry<'a, K, V> {
+ /// Gets a reference to the entry's key in the map.
+ ///
+ /// Note that this is not the key that was used to find the entry. There may be an observable
+ /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like
+ /// extra fields or the memory address of an allocation.
+ pub fn key(&self) -> &K {
+ &self.map.entries[self.index()].key
+ }
+
+ /// Gets a reference to the entry's value in the map.
+ pub fn get(&self) -> &V {
+ &self.map.entries[self.index()].value
+ }
+
+ /// Gets a mutable reference to the entry's value in the map.
+ ///
+ /// If you need a reference which may outlive the destruction of the
+ /// `Entry` value, see `into_mut`.
+ pub fn get_mut(&mut self) -> &mut V {
+ let index = self.index();
+ &mut self.map.entries[index].value
+ }
+
+ /// Put the new key in the occupied entry's key slot
+ pub(crate) fn replace_key(self) -> K {
+ let index = self.index();
+ let old_key = &mut self.map.entries[index].key;
+ replace(old_key, self.key)
+ }
+
+ /// Return the index of the key-value pair
+ #[inline]
+ pub fn index(&self) -> usize {
+ // SAFETY: we have &mut map keep keeping the bucket stable
+ unsafe { self.raw_bucket.read() }
+ }
+
+ /// Converts into a mutable reference to the entry's value in the map,
+ /// with a lifetime bound to the map itself.
+ pub fn into_mut(self) -> &'a mut V {
+ let index = self.index();
+ &mut self.map.entries[index].value
+ }
+
+ /// Remove and return the key, value pair stored in the map for this entry
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove_entry(self) -> (K, V) {
+ // SAFETY: This is safe because it can only happen once (self is consumed)
+ // and map.indices have not been modified since entry construction
+ let index = unsafe { self.map.indices.remove(self.raw_bucket) };
+ self.map.swap_remove_finish(index)
+ }
+
+ /// Remove and return the key, value pair stored in the map for this entry
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove_entry(self) -> (K, V) {
+ // SAFETY: This is safe because it can only happen once (self is consumed)
+ // and map.indices have not been modified since entry construction
+ let index = unsafe { self.map.indices.remove(self.raw_bucket) };
+ self.map.shift_remove_finish(index)
+ }
+}
diff --git a/vendor/indexmap-1.9.3/src/mutable_keys.rs b/vendor/indexmap-1.9.3/src/mutable_keys.rs
new file mode 100644
index 0000000..35a90c4
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/mutable_keys.rs
@@ -0,0 +1,75 @@
+use core::hash::{BuildHasher, Hash};
+
+use super::{Equivalent, IndexMap};
+
+pub struct PrivateMarker {}
+
+/// Opt-in mutable access to keys.
+///
+/// These methods expose `&mut K`, mutable references to the key as it is stored
+/// in the map.
+/// You are allowed to modify the keys in the hashmap **if the modification
+/// does not change the key’s hash and equality**.
+///
+/// If keys are modified erroneously, you can no longer look them up.
+/// This is sound (memory safe) but a logical error hazard (just like
+/// implementing PartialEq, Eq, or Hash incorrectly would be).
+///
+/// `use` this trait to enable its methods for `IndexMap`.
+pub trait MutableKeys {
+ type Key;
+ type Value;
+
+ /// Return item index, mutable reference to key and value
+ fn get_full_mut2<Q: ?Sized>(
+ &mut self,
+ key: &Q,
+ ) -> Option<(usize, &mut Self::Key, &mut Self::Value)>
+ where
+ Q: Hash + Equivalent<Self::Key>;
+
+ /// Scan through each key-value pair in the map and keep those where the
+ /// closure `keep` returns `true`.
+ ///
+ /// The elements are visited in order, and remaining elements keep their
+ /// order.
+ ///
+ /// Computes in **O(n)** time (average).
+ fn retain2<F>(&mut self, keep: F)
+ where
+ F: FnMut(&mut Self::Key, &mut Self::Value) -> bool;
+
+ /// This method is not useful in itself – it is there to “seal” the trait
+ /// for external implementation, so that we can add methods without
+ /// causing breaking changes.
+ fn __private_marker(&self) -> PrivateMarker;
+}
+
+/// Opt-in mutable access to keys.
+///
+/// See [`MutableKeys`](trait.MutableKeys.html) for more information.
+impl<K, V, S> MutableKeys for IndexMap<K, V, S>
+where
+ K: Eq + Hash,
+ S: BuildHasher,
+{
+ type Key = K;
+ type Value = V;
+ fn get_full_mut2<Q: ?Sized>(&mut self, key: &Q) -> Option<(usize, &mut K, &mut V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ self.get_full_mut2_impl(key)
+ }
+
+ fn retain2<F>(&mut self, keep: F)
+ where
+ F: FnMut(&mut K, &mut V) -> bool,
+ {
+ self.retain_mut(keep)
+ }
+
+ fn __private_marker(&self) -> PrivateMarker {
+ PrivateMarker {}
+ }
+}
diff --git a/vendor/indexmap-1.9.3/src/rayon/map.rs b/vendor/indexmap-1.9.3/src/rayon/map.rs
new file mode 100644
index 0000000..8819f13
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/rayon/map.rs
@@ -0,0 +1,583 @@
+//! Parallel iterator types for `IndexMap` with [rayon](https://docs.rs/rayon/1.0/rayon).
+//!
+//! You will rarely need to interact with this module directly unless you need to name one of the
+//! iterator types.
+//!
+//! Requires crate feature `"rayon"`
+
+use super::collect;
+use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
+use rayon::prelude::*;
+
+use crate::vec::Vec;
+use core::cmp::Ordering;
+use core::fmt;
+use core::hash::{BuildHasher, Hash};
+use core::ops::RangeBounds;
+
+use crate::Bucket;
+use crate::Entries;
+use crate::IndexMap;
+
+/// Requires crate feature `"rayon"`.
+impl<K, V, S> IntoParallelIterator for IndexMap<K, V, S>
+where
+ K: Send,
+ V: Send,
+{
+ type Item = (K, V);
+ type Iter = IntoParIter<K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ IntoParIter {
+ entries: self.into_entries(),
+ }
+ }
+}
+
+/// A parallel owning iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`into_par_iter`] method on [`IndexMap`]
+/// (provided by rayon's `IntoParallelIterator` trait). See its documentation for more.
+///
+/// [`into_par_iter`]: ../struct.IndexMap.html#method.into_par_iter
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct IntoParIter<K, V> {
+ entries: Vec<Bucket<K, V>>,
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IntoParIter<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<K: Send, V: Send> ParallelIterator for IntoParIter<K, V> {
+ type Item = (K, V);
+
+ parallel_iterator_methods!(Bucket::key_value);
+}
+
+impl<K: Send, V: Send> IndexedParallelIterator for IntoParIter<K, V> {
+ indexed_parallel_iterator_methods!(Bucket::key_value);
+}
+
+/// Requires crate feature `"rayon"`.
+impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap<K, V, S>
+where
+ K: Sync,
+ V: Sync,
+{
+ type Item = (&'a K, &'a V);
+ type Iter = ParIter<'a, K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIter {
+ entries: self.as_entries(),
+ }
+ }
+}
+
+/// A parallel iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`par_iter`] method on [`IndexMap`]
+/// (provided by rayon's `IntoParallelRefIterator` trait). See its documentation for more.
+///
+/// [`par_iter`]: ../struct.IndexMap.html#method.par_iter
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct ParIter<'a, K, V> {
+ entries: &'a [Bucket<K, V>],
+}
+
+impl<K, V> Clone for ParIter<'_, K, V> {
+ fn clone(&self) -> Self {
+ ParIter { ..*self }
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for ParIter<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ parallel_iterator_methods!(Bucket::refs);
+}
+
+impl<K: Sync, V: Sync> IndexedParallelIterator for ParIter<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::refs);
+}
+
+/// Requires crate feature `"rayon"`.
+impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap<K, V, S>
+where
+ K: Sync + Send,
+ V: Send,
+{
+ type Item = (&'a K, &'a mut V);
+ type Iter = ParIterMut<'a, K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIterMut {
+ entries: self.as_entries_mut(),
+ }
+ }
+}
+
+/// A parallel mutable iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`par_iter_mut`] method on [`IndexMap`]
+/// (provided by rayon's `IntoParallelRefMutIterator` trait). See its documentation for more.
+///
+/// [`par_iter_mut`]: ../struct.IndexMap.html#method.par_iter_mut
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct ParIterMut<'a, K, V> {
+ entries: &'a mut [Bucket<K, V>],
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for ParIterMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ parallel_iterator_methods!(Bucket::ref_mut);
+}
+
+impl<K: Sync + Send, V: Send> IndexedParallelIterator for ParIterMut<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::ref_mut);
+}
+
+/// Requires crate feature `"rayon"`.
+impl<'a, K, V, S> ParallelDrainRange<usize> for &'a mut IndexMap<K, V, S>
+where
+ K: Send,
+ V: Send,
+{
+ type Item = (K, V);
+ type Iter = ParDrain<'a, K, V>;
+
+ fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
+ ParDrain {
+ entries: self.core.par_drain(range),
+ }
+ }
+}
+
+/// A parallel draining iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`par_drain`] method on [`IndexMap`]
+/// (provided by rayon's `ParallelDrainRange` trait). See its documentation for more.
+///
+/// [`par_drain`]: ../struct.IndexMap.html#method.par_drain
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct ParDrain<'a, K: Send, V: Send> {
+ entries: rayon::vec::Drain<'a, Bucket<K, V>>,
+}
+
+impl<K: Send, V: Send> ParallelIterator for ParDrain<'_, K, V> {
+ type Item = (K, V);
+
+ parallel_iterator_methods!(Bucket::key_value);
+}
+
+impl<K: Send, V: Send> IndexedParallelIterator for ParDrain<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::key_value);
+}
+
+/// Parallel iterator methods and other parallel methods.
+///
+/// The following methods **require crate feature `"rayon"`**.
+///
+/// See also the `IntoParallelIterator` implementations.
+impl<K, V, S> IndexMap<K, V, S>
+where
+ K: Sync,
+ V: Sync,
+{
+ /// Return a parallel iterator over the keys of the map.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the map is still preserved for operations like `reduce` and `collect`.
+ pub fn par_keys(&self) -> ParKeys<'_, K, V> {
+ ParKeys {
+ entries: self.as_entries(),
+ }
+ }
+
+ /// Return a parallel iterator over the values of the map.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the map is still preserved for operations like `reduce` and `collect`.
+ pub fn par_values(&self) -> ParValues<'_, K, V> {
+ ParValues {
+ entries: self.as_entries(),
+ }
+ }
+}
+
+impl<K, V, S> IndexMap<K, V, S>
+where
+ K: Hash + Eq + Sync,
+ V: Sync,
+ S: BuildHasher,
+{
+ /// Returns `true` if `self` contains all of the same key-value pairs as `other`,
+ /// regardless of each map's indexed order, determined in parallel.
+ pub fn par_eq<V2, S2>(&self, other: &IndexMap<K, V2, S2>) -> bool
+ where
+ V: PartialEq<V2>,
+ V2: Sync,
+ S2: BuildHasher + Sync,
+ {
+ self.len() == other.len()
+ && self
+ .par_iter()
+ .all(move |(key, value)| other.get(key).map_or(false, |v| *value == *v))
+ }
+}
+
+/// A parallel iterator over the keys of a `IndexMap`.
+///
+/// This `struct` is created by the [`par_keys`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`par_keys`]: ../struct.IndexMap.html#method.par_keys
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct ParKeys<'a, K, V> {
+ entries: &'a [Bucket<K, V>],
+}
+
+impl<K, V> Clone for ParKeys<'_, K, V> {
+ fn clone(&self) -> Self {
+ ParKeys { ..*self }
+ }
+}
+
+impl<K: fmt::Debug, V> fmt::Debug for ParKeys<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> {
+ type Item = &'a K;
+
+ parallel_iterator_methods!(Bucket::key_ref);
+}
+
+impl<K: Sync, V: Sync> IndexedParallelIterator for ParKeys<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::key_ref);
+}
+
+/// A parallel iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`par_values`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`par_values`]: ../struct.IndexMap.html#method.par_values
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct ParValues<'a, K, V> {
+ entries: &'a [Bucket<K, V>],
+}
+
+impl<K, V> Clone for ParValues<'_, K, V> {
+ fn clone(&self) -> Self {
+ ParValues { ..*self }
+ }
+}
+
+impl<K, V: fmt::Debug> fmt::Debug for ParValues<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::value_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> {
+ type Item = &'a V;
+
+ parallel_iterator_methods!(Bucket::value_ref);
+}
+
+impl<K: Sync, V: Sync> IndexedParallelIterator for ParValues<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::value_ref);
+}
+
+/// Requires crate feature `"rayon"`.
+impl<K, V, S> IndexMap<K, V, S>
+where
+ K: Send,
+ V: Send,
+{
+ /// Return a parallel iterator over mutable references to the values of the map
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the map is still preserved for operations like `reduce` and `collect`.
+ pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> {
+ ParValuesMut {
+ entries: self.as_entries_mut(),
+ }
+ }
+}
+
+impl<K, V, S> IndexMap<K, V, S>
+where
+ K: Hash + Eq + Send,
+ V: Send,
+ S: BuildHasher,
+{
+ /// Sort the map’s key-value pairs in parallel, by the default ordering of the keys.
+ pub fn par_sort_keys(&mut self)
+ where
+ K: Ord,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_by(|a, b| K::cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the map’s key-value pairs in place and in parallel, using the comparison
+ /// function `cmp`.
+ ///
+ /// The comparison function receives two key and value pairs to compare (you
+ /// can sort by keys or values or their combination as needed).
+ pub fn par_sort_by<F>(&mut self, cmp: F)
+ where
+ F: Fn(&K, &V, &K, &V) -> Ordering + Sync,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ });
+ }
+
+ /// Sort the key-value pairs of the map in parallel and return a by-value parallel
+ /// iterator of the key-value pairs with the result.
+ pub fn par_sorted_by<F>(self, cmp: F) -> IntoParIter<K, V>
+ where
+ F: Fn(&K, &V, &K, &V) -> Ordering + Sync,
+ {
+ let mut entries = self.into_entries();
+ entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ IntoParIter { entries }
+ }
+
+ /// Sort the map's key-value pairs in parallel, by the default ordering of the keys.
+ pub fn par_sort_unstable_keys(&mut self)
+ where
+ K: Ord,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_unstable_by(|a, b| K::cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the map's key-value pairs in place and in parallel, using the comparison
+ /// function `cmp`.
+ ///
+ /// The comparison function receives two key and value pairs to compare (you
+ /// can sort by keys or values or their combination as needed).
+ pub fn par_sort_unstable_by<F>(&mut self, cmp: F)
+ where
+ F: Fn(&K, &V, &K, &V) -> Ordering + Sync,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ });
+ }
+
+ /// Sort the key-value pairs of the map in parallel and return a by-value parallel
+ /// iterator of the key-value pairs with the result.
+ pub fn par_sorted_unstable_by<F>(self, cmp: F) -> IntoParIter<K, V>
+ where
+ F: Fn(&K, &V, &K, &V) -> Ordering + Sync,
+ {
+ let mut entries = self.into_entries();
+ entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ IntoParIter { entries }
+ }
+}
+
+/// A parallel mutable iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`par_values_mut`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`par_values_mut`]: ../struct.IndexMap.html#method.par_values_mut
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct ParValuesMut<'a, K, V> {
+ entries: &'a mut [Bucket<K, V>],
+}
+
+impl<K, V: fmt::Debug> fmt::Debug for ParValuesMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::value_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, K: Send, V: Send> ParallelIterator for ParValuesMut<'a, K, V> {
+ type Item = &'a mut V;
+
+ parallel_iterator_methods!(Bucket::value_mut);
+}
+
+impl<K: Send, V: Send> IndexedParallelIterator for ParValuesMut<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::value_mut);
+}
+
+/// Requires crate feature `"rayon"`.
+impl<K, V, S> FromParallelIterator<(K, V)> for IndexMap<K, V, S>
+where
+ K: Eq + Hash + Send,
+ V: Send,
+ S: BuildHasher + Default + Send,
+{
+ fn from_par_iter<I>(iter: I) -> Self
+ where
+ I: IntoParallelIterator<Item = (K, V)>,
+ {
+ let list = collect(iter);
+ let len = list.iter().map(Vec::len).sum();
+ let mut map = Self::with_capacity_and_hasher(len, S::default());
+ for vec in list {
+ map.extend(vec);
+ }
+ map
+ }
+}
+
+/// Requires crate feature `"rayon"`.
+impl<K, V, S> ParallelExtend<(K, V)> for IndexMap<K, V, S>
+where
+ K: Eq + Hash + Send,
+ V: Send,
+ S: BuildHasher + Send,
+{
+ fn par_extend<I>(&mut self, iter: I)
+ where
+ I: IntoParallelIterator<Item = (K, V)>,
+ {
+ for vec in collect(iter) {
+ self.extend(vec);
+ }
+ }
+}
+
+/// Requires crate feature `"rayon"`.
+impl<'a, K: 'a, V: 'a, S> ParallelExtend<(&'a K, &'a V)> for IndexMap<K, V, S>
+where
+ K: Copy + Eq + Hash + Send + Sync,
+ V: Copy + Send + Sync,
+ S: BuildHasher + Send,
+{
+ fn par_extend<I>(&mut self, iter: I)
+ where
+ I: IntoParallelIterator<Item = (&'a K, &'a V)>,
+ {
+ for vec in collect(iter) {
+ self.extend(vec);
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::string::String;
+
+ #[test]
+ fn insert_order() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut map = IndexMap::new();
+
+ for &elt in &insert {
+ map.insert(elt, ());
+ }
+
+ assert_eq!(map.par_keys().count(), map.len());
+ assert_eq!(map.par_keys().count(), insert.len());
+ insert.par_iter().zip(map.par_keys()).for_each(|(a, b)| {
+ assert_eq!(a, b);
+ });
+ (0..insert.len())
+ .into_par_iter()
+ .zip(map.par_keys())
+ .for_each(|(i, k)| {
+ assert_eq!(map.get_index(i).unwrap().0, k);
+ });
+ }
+
+ #[test]
+ fn partial_eq_and_eq() {
+ let mut map_a = IndexMap::new();
+ map_a.insert(1, "1");
+ map_a.insert(2, "2");
+ let mut map_b = map_a.clone();
+ assert!(map_a.par_eq(&map_b));
+ map_b.swap_remove(&1);
+ assert!(!map_a.par_eq(&map_b));
+ map_b.insert(3, "3");
+ assert!(!map_a.par_eq(&map_b));
+
+ let map_c: IndexMap<_, String> =
+ map_b.into_par_iter().map(|(k, v)| (k, v.into())).collect();
+ assert!(!map_a.par_eq(&map_c));
+ assert!(!map_c.par_eq(&map_a));
+ }
+
+ #[test]
+ fn extend() {
+ let mut map = IndexMap::new();
+ map.par_extend(vec![(&1, &2), (&3, &4)]);
+ map.par_extend(vec![(5, 6)]);
+ assert_eq!(
+ map.into_par_iter().collect::<Vec<_>>(),
+ vec![(1, 2), (3, 4), (5, 6)]
+ );
+ }
+
+ #[test]
+ fn keys() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_par_iter().collect();
+ let keys: Vec<_> = map.par_keys().copied().collect();
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+ }
+
+ #[test]
+ fn values() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_par_iter().collect();
+ let values: Vec<_> = map.par_values().copied().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+ }
+
+ #[test]
+ fn values_mut() {
+ let vec = vec![(1, 1), (2, 2), (3, 3)];
+ let mut map: IndexMap<_, _> = vec.into_par_iter().collect();
+ map.par_values_mut().for_each(|value| *value *= 2);
+ let values: Vec<_> = map.par_values().copied().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&2));
+ assert!(values.contains(&4));
+ assert!(values.contains(&6));
+ }
+}
diff --git a/vendor/indexmap-1.9.3/src/rayon/mod.rs b/vendor/indexmap-1.9.3/src/rayon/mod.rs
new file mode 100644
index 0000000..ebb1ac2
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/rayon/mod.rs
@@ -0,0 +1,27 @@
+use rayon::prelude::*;
+
+use alloc::collections::LinkedList;
+
+use crate::vec::Vec;
+
+pub mod map;
+pub mod set;
+
+// This form of intermediate collection is also how Rayon collects `HashMap`.
+// Note that the order will also be preserved!
+fn collect<I: IntoParallelIterator>(iter: I) -> LinkedList<Vec<I::Item>> {
+ iter.into_par_iter()
+ .fold(Vec::new, |mut vec, elem| {
+ vec.push(elem);
+ vec
+ })
+ .map(|vec| {
+ let mut list = LinkedList::new();
+ list.push_back(vec);
+ list
+ })
+ .reduce(LinkedList::new, |mut list1, mut list2| {
+ list1.append(&mut list2);
+ list1
+ })
+}
diff --git a/vendor/indexmap-1.9.3/src/rayon/set.rs b/vendor/indexmap-1.9.3/src/rayon/set.rs
new file mode 100644
index 0000000..6749dc0
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/rayon/set.rs
@@ -0,0 +1,741 @@
+//! Parallel iterator types for `IndexSet` with [rayon](https://docs.rs/rayon/1.0/rayon).
+//!
+//! You will rarely need to interact with this module directly unless you need to name one of the
+//! iterator types.
+//!
+//! Requires crate feature `"rayon"`.
+
+use super::collect;
+use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
+use rayon::prelude::*;
+
+use crate::vec::Vec;
+use core::cmp::Ordering;
+use core::fmt;
+use core::hash::{BuildHasher, Hash};
+use core::ops::RangeBounds;
+
+use crate::Entries;
+use crate::IndexSet;
+
+type Bucket<T> = crate::Bucket<T, ()>;
+
+/// Requires crate feature `"rayon"`.
+impl<T, S> IntoParallelIterator for IndexSet<T, S>
+where
+ T: Send,
+{
+ type Item = T;
+ type Iter = IntoParIter<T>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ IntoParIter {
+ entries: self.into_entries(),
+ }
+ }
+}
+
+/// A parallel owning iterator over the items of a `IndexSet`.
+///
+/// This `struct` is created by the [`into_par_iter`] method on [`IndexSet`]
+/// (provided by rayon's `IntoParallelIterator` trait). See its documentation for more.
+///
+/// [`IndexSet`]: ../struct.IndexSet.html
+/// [`into_par_iter`]: ../struct.IndexSet.html#method.into_par_iter
+pub struct IntoParIter<T> {
+ entries: Vec<Bucket<T>>,
+}
+
+impl<T: fmt::Debug> fmt::Debug for IntoParIter<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<T: Send> ParallelIterator for IntoParIter<T> {
+ type Item = T;
+
+ parallel_iterator_methods!(Bucket::key);
+}
+
+impl<T: Send> IndexedParallelIterator for IntoParIter<T> {
+ indexed_parallel_iterator_methods!(Bucket::key);
+}
+
+/// Requires crate feature `"rayon"`.
+impl<'a, T, S> IntoParallelIterator for &'a IndexSet<T, S>
+where
+ T: Sync,
+{
+ type Item = &'a T;
+ type Iter = ParIter<'a, T>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIter {
+ entries: self.as_entries(),
+ }
+ }
+}
+
+/// A parallel iterator over the items of a `IndexSet`.
+///
+/// This `struct` is created by the [`par_iter`] method on [`IndexSet`]
+/// (provided by rayon's `IntoParallelRefIterator` trait). See its documentation for more.
+///
+/// [`IndexSet`]: ../struct.IndexSet.html
+/// [`par_iter`]: ../struct.IndexSet.html#method.par_iter
+pub struct ParIter<'a, T> {
+ entries: &'a [Bucket<T>],
+}
+
+impl<T> Clone for ParIter<'_, T> {
+ fn clone(&self) -> Self {
+ ParIter { ..*self }
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for ParIter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> {
+ type Item = &'a T;
+
+ parallel_iterator_methods!(Bucket::key_ref);
+}
+
+impl<T: Sync> IndexedParallelIterator for ParIter<'_, T> {
+ indexed_parallel_iterator_methods!(Bucket::key_ref);
+}
+
+/// Requires crate feature `"rayon"`.
+impl<'a, T, S> ParallelDrainRange<usize> for &'a mut IndexSet<T, S>
+where
+ T: Send,
+{
+ type Item = T;
+ type Iter = ParDrain<'a, T>;
+
+ fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
+ ParDrain {
+ entries: self.map.core.par_drain(range),
+ }
+ }
+}
+
+/// A parallel draining iterator over the items of a `IndexSet`.
+///
+/// This `struct` is created by the [`par_drain`] method on [`IndexSet`]
+/// (provided by rayon's `ParallelDrainRange` trait). See its documentation for more.
+///
+/// [`par_drain`]: ../struct.IndexSet.html#method.par_drain
+/// [`IndexSet`]: ../struct.IndexSet.html
+pub struct ParDrain<'a, T: Send> {
+ entries: rayon::vec::Drain<'a, Bucket<T>>,
+}
+
+impl<T: Send> ParallelIterator for ParDrain<'_, T> {
+ type Item = T;
+
+ parallel_iterator_methods!(Bucket::key);
+}
+
+impl<T: Send> IndexedParallelIterator for ParDrain<'_, T> {
+ indexed_parallel_iterator_methods!(Bucket::key);
+}
+
+/// Parallel iterator methods and other parallel methods.
+///
+/// The following methods **require crate feature `"rayon"`**.
+///
+/// See also the `IntoParallelIterator` implementations.
+impl<T, S> IndexSet<T, S>
+where
+ T: Hash + Eq + Sync,
+ S: BuildHasher + Sync,
+{
+ /// Return a parallel iterator over the values that are in `self` but not `other`.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the `self` set is still preserved for operations like `reduce` and `collect`.
+ pub fn par_difference<'a, S2>(
+ &'a self,
+ other: &'a IndexSet<T, S2>,
+ ) -> ParDifference<'a, T, S, S2>
+ where
+ S2: BuildHasher + Sync,
+ {
+ ParDifference {
+ set1: self,
+ set2: other,
+ }
+ }
+
+ /// Return a parallel iterator over the values that are in `self` or `other`,
+ /// but not in both.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the sets is still preserved for operations like `reduce` and `collect`.
+ /// Values from `self` are produced in their original order, followed by
+ /// values from `other` in their original order.
+ pub fn par_symmetric_difference<'a, S2>(
+ &'a self,
+ other: &'a IndexSet<T, S2>,
+ ) -> ParSymmetricDifference<'a, T, S, S2>
+ where
+ S2: BuildHasher + Sync,
+ {
+ ParSymmetricDifference {
+ set1: self,
+ set2: other,
+ }
+ }
+
+ /// Return a parallel iterator over the values that are in both `self` and `other`.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the `self` set is still preserved for operations like `reduce` and `collect`.
+ pub fn par_intersection<'a, S2>(
+ &'a self,
+ other: &'a IndexSet<T, S2>,
+ ) -> ParIntersection<'a, T, S, S2>
+ where
+ S2: BuildHasher + Sync,
+ {
+ ParIntersection {
+ set1: self,
+ set2: other,
+ }
+ }
+
+ /// Return a parallel iterator over all values that are in `self` or `other`.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the sets is still preserved for operations like `reduce` and `collect`.
+ /// Values from `self` are produced in their original order, followed by
+ /// values that are unique to `other` in their original order.
+ pub fn par_union<'a, S2>(&'a self, other: &'a IndexSet<T, S2>) -> ParUnion<'a, T, S, S2>
+ where
+ S2: BuildHasher + Sync,
+ {
+ ParUnion {
+ set1: self,
+ set2: other,
+ }
+ }
+
+ /// Returns `true` if `self` contains all of the same values as `other`,
+ /// regardless of each set's indexed order, determined in parallel.
+ pub fn par_eq<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher + Sync,
+ {
+ self.len() == other.len() && self.par_is_subset(other)
+ }
+
+ /// Returns `true` if `self` has no elements in common with `other`,
+ /// determined in parallel.
+ pub fn par_is_disjoint<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher + Sync,
+ {
+ if self.len() <= other.len() {
+ self.par_iter().all(move |value| !other.contains(value))
+ } else {
+ other.par_iter().all(move |value| !self.contains(value))
+ }
+ }
+
+ /// Returns `true` if all elements of `other` are contained in `self`,
+ /// determined in parallel.
+ pub fn par_is_superset<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher + Sync,
+ {
+ other.par_is_subset(self)
+ }
+
+ /// Returns `true` if all elements of `self` are contained in `other`,
+ /// determined in parallel.
+ pub fn par_is_subset<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher + Sync,
+ {
+ self.len() <= other.len() && self.par_iter().all(move |value| other.contains(value))
+ }
+}
+
+/// A parallel iterator producing elements in the difference of `IndexSet`s.
+///
+/// This `struct` is created by the [`par_difference`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: ../struct.IndexSet.html
+/// [`par_difference`]: ../struct.IndexSet.html#method.par_difference
+pub struct ParDifference<'a, T, S1, S2> {
+ set1: &'a IndexSet<T, S1>,
+ set2: &'a IndexSet<T, S2>,
+}
+
+impl<T, S1, S2> Clone for ParDifference<'_, T, S1, S2> {
+ fn clone(&self) -> Self {
+ ParDifference { ..*self }
+ }
+}
+
+impl<T, S1, S2> fmt::Debug for ParDifference<'_, T, S1, S2>
+where
+ T: fmt::Debug + Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list()
+ .entries(self.set1.difference(self.set2))
+ .finish()
+ }
+}
+
+impl<'a, T, S1, S2> ParallelIterator for ParDifference<'a, T, S1, S2>
+where
+ T: Hash + Eq + Sync,
+ S1: BuildHasher + Sync,
+ S2: BuildHasher + Sync,
+{
+ type Item = &'a T;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let Self { set1, set2 } = self;
+
+ set1.par_iter()
+ .filter(move |&item| !set2.contains(item))
+ .drive_unindexed(consumer)
+ }
+}
+
+/// A parallel iterator producing elements in the intersection of `IndexSet`s.
+///
+/// This `struct` is created by the [`par_intersection`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: ../struct.IndexSet.html
+/// [`par_intersection`]: ../struct.IndexSet.html#method.par_intersection
+pub struct ParIntersection<'a, T, S1, S2> {
+ set1: &'a IndexSet<T, S1>,
+ set2: &'a IndexSet<T, S2>,
+}
+
+impl<T, S1, S2> Clone for ParIntersection<'_, T, S1, S2> {
+ fn clone(&self) -> Self {
+ ParIntersection { ..*self }
+ }
+}
+
+impl<T, S1, S2> fmt::Debug for ParIntersection<'_, T, S1, S2>
+where
+ T: fmt::Debug + Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list()
+ .entries(self.set1.intersection(self.set2))
+ .finish()
+ }
+}
+
+impl<'a, T, S1, S2> ParallelIterator for ParIntersection<'a, T, S1, S2>
+where
+ T: Hash + Eq + Sync,
+ S1: BuildHasher + Sync,
+ S2: BuildHasher + Sync,
+{
+ type Item = &'a T;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let Self { set1, set2 } = self;
+
+ set1.par_iter()
+ .filter(move |&item| set2.contains(item))
+ .drive_unindexed(consumer)
+ }
+}
+
+/// A parallel iterator producing elements in the symmetric difference of `IndexSet`s.
+///
+/// This `struct` is created by the [`par_symmetric_difference`] method on
+/// [`IndexSet`]. See its documentation for more.
+///
+/// [`IndexSet`]: ../struct.IndexSet.html
+/// [`par_symmetric_difference`]: ../struct.IndexSet.html#method.par_symmetric_difference
+pub struct ParSymmetricDifference<'a, T, S1, S2> {
+ set1: &'a IndexSet<T, S1>,
+ set2: &'a IndexSet<T, S2>,
+}
+
+impl<T, S1, S2> Clone for ParSymmetricDifference<'_, T, S1, S2> {
+ fn clone(&self) -> Self {
+ ParSymmetricDifference { ..*self }
+ }
+}
+
+impl<T, S1, S2> fmt::Debug for ParSymmetricDifference<'_, T, S1, S2>
+where
+ T: fmt::Debug + Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list()
+ .entries(self.set1.symmetric_difference(self.set2))
+ .finish()
+ }
+}
+
+impl<'a, T, S1, S2> ParallelIterator for ParSymmetricDifference<'a, T, S1, S2>
+where
+ T: Hash + Eq + Sync,
+ S1: BuildHasher + Sync,
+ S2: BuildHasher + Sync,
+{
+ type Item = &'a T;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let Self { set1, set2 } = self;
+
+ set1.par_difference(set2)
+ .chain(set2.par_difference(set1))
+ .drive_unindexed(consumer)
+ }
+}
+
+/// A parallel iterator producing elements in the union of `IndexSet`s.
+///
+/// This `struct` is created by the [`par_union`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: ../struct.IndexSet.html
+/// [`par_union`]: ../struct.IndexSet.html#method.par_union
+pub struct ParUnion<'a, T, S1, S2> {
+ set1: &'a IndexSet<T, S1>,
+ set2: &'a IndexSet<T, S2>,
+}
+
+impl<T, S1, S2> Clone for ParUnion<'_, T, S1, S2> {
+ fn clone(&self) -> Self {
+ ParUnion { ..*self }
+ }
+}
+
+impl<T, S1, S2> fmt::Debug for ParUnion<'_, T, S1, S2>
+where
+ T: fmt::Debug + Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.set1.union(self.set2)).finish()
+ }
+}
+
+impl<'a, T, S1, S2> ParallelIterator for ParUnion<'a, T, S1, S2>
+where
+ T: Hash + Eq + Sync,
+ S1: BuildHasher + Sync,
+ S2: BuildHasher + Sync,
+{
+ type Item = &'a T;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let Self { set1, set2 } = self;
+
+ set1.par_iter()
+ .chain(set2.par_difference(set1))
+ .drive_unindexed(consumer)
+ }
+}
+
+/// Parallel sorting methods.
+///
+/// The following methods **require crate feature `"rayon"`**.
+impl<T, S> IndexSet<T, S>
+where
+ T: Hash + Eq + Send,
+ S: BuildHasher + Send,
+{
+ /// Sort the set’s values in parallel by their default ordering.
+ pub fn par_sort(&mut self)
+ where
+ T: Ord,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_by(|a, b| T::cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the set’s values in place and in parallel, using the comparison function `cmp`.
+ pub fn par_sort_by<F>(&mut self, cmp: F)
+ where
+ F: Fn(&T, &T) -> Ordering + Sync,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_by(move |a, b| cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the values of the set in parallel and return a by-value parallel iterator of
+ /// the values with the result.
+ pub fn par_sorted_by<F>(self, cmp: F) -> IntoParIter<T>
+ where
+ F: Fn(&T, &T) -> Ordering + Sync,
+ {
+ let mut entries = self.into_entries();
+ entries.par_sort_by(move |a, b| cmp(&a.key, &b.key));
+ IntoParIter { entries }
+ }
+
+ /// Sort the set's values in parallel by their default ordering.
+ pub fn par_sort_unstable(&mut self)
+ where
+ T: Ord,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_unstable_by(|a, b| T::cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the set’s values in place and in parallel, using the comparison function `cmp`.
+ pub fn par_sort_unstable_by<F>(&mut self, cmp: F)
+ where
+ F: Fn(&T, &T) -> Ordering + Sync,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the values of the set in parallel and return a by-value parallel iterator of
+ /// the values with the result.
+ pub fn par_sorted_unstable_by<F>(self, cmp: F) -> IntoParIter<T>
+ where
+ F: Fn(&T, &T) -> Ordering + Sync,
+ {
+ let mut entries = self.into_entries();
+ entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key));
+ IntoParIter { entries }
+ }
+}
+
+/// Requires crate feature `"rayon"`.
+impl<T, S> FromParallelIterator<T> for IndexSet<T, S>
+where
+ T: Eq + Hash + Send,
+ S: BuildHasher + Default + Send,
+{
+ fn from_par_iter<I>(iter: I) -> Self
+ where
+ I: IntoParallelIterator<Item = T>,
+ {
+ let list = collect(iter);
+ let len = list.iter().map(Vec::len).sum();
+ let mut set = Self::with_capacity_and_hasher(len, S::default());
+ for vec in list {
+ set.extend(vec);
+ }
+ set
+ }
+}
+
+/// Requires crate feature `"rayon"`.
+impl<T, S> ParallelExtend<T> for IndexSet<T, S>
+where
+ T: Eq + Hash + Send,
+ S: BuildHasher + Send,
+{
+ fn par_extend<I>(&mut self, iter: I)
+ where
+ I: IntoParallelIterator<Item = T>,
+ {
+ for vec in collect(iter) {
+ self.extend(vec);
+ }
+ }
+}
+
+/// Requires crate feature `"rayon"`.
+impl<'a, T: 'a, S> ParallelExtend<&'a T> for IndexSet<T, S>
+where
+ T: Copy + Eq + Hash + Send + Sync,
+ S: BuildHasher + Send,
+{
+ fn par_extend<I>(&mut self, iter: I)
+ where
+ I: IntoParallelIterator<Item = &'a T>,
+ {
+ for vec in collect(iter) {
+ self.extend(vec);
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn insert_order() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut set = IndexSet::new();
+
+ for &elt in &insert {
+ set.insert(elt);
+ }
+
+ assert_eq!(set.par_iter().count(), set.len());
+ assert_eq!(set.par_iter().count(), insert.len());
+ insert.par_iter().zip(&set).for_each(|(a, b)| {
+ assert_eq!(a, b);
+ });
+ (0..insert.len())
+ .into_par_iter()
+ .zip(&set)
+ .for_each(|(i, v)| {
+ assert_eq!(set.get_index(i).unwrap(), v);
+ });
+ }
+
+ #[test]
+ fn partial_eq_and_eq() {
+ let mut set_a = IndexSet::new();
+ set_a.insert(1);
+ set_a.insert(2);
+ let mut set_b = set_a.clone();
+ assert!(set_a.par_eq(&set_b));
+ set_b.swap_remove(&1);
+ assert!(!set_a.par_eq(&set_b));
+ set_b.insert(3);
+ assert!(!set_a.par_eq(&set_b));
+
+ let set_c: IndexSet<_> = set_b.into_par_iter().collect();
+ assert!(!set_a.par_eq(&set_c));
+ assert!(!set_c.par_eq(&set_a));
+ }
+
+ #[test]
+ fn extend() {
+ let mut set = IndexSet::new();
+ set.par_extend(vec![&1, &2, &3, &4]);
+ set.par_extend(vec![5, 6]);
+ assert_eq!(
+ set.into_par_iter().collect::<Vec<_>>(),
+ vec![1, 2, 3, 4, 5, 6]
+ );
+ }
+
+ #[test]
+ fn comparisons() {
+ let set_a: IndexSet<_> = (0..3).collect();
+ let set_b: IndexSet<_> = (3..6).collect();
+ let set_c: IndexSet<_> = (0..6).collect();
+ let set_d: IndexSet<_> = (3..9).collect();
+
+ assert!(!set_a.par_is_disjoint(&set_a));
+ assert!(set_a.par_is_subset(&set_a));
+ assert!(set_a.par_is_superset(&set_a));
+
+ assert!(set_a.par_is_disjoint(&set_b));
+ assert!(set_b.par_is_disjoint(&set_a));
+ assert!(!set_a.par_is_subset(&set_b));
+ assert!(!set_b.par_is_subset(&set_a));
+ assert!(!set_a.par_is_superset(&set_b));
+ assert!(!set_b.par_is_superset(&set_a));
+
+ assert!(!set_a.par_is_disjoint(&set_c));
+ assert!(!set_c.par_is_disjoint(&set_a));
+ assert!(set_a.par_is_subset(&set_c));
+ assert!(!set_c.par_is_subset(&set_a));
+ assert!(!set_a.par_is_superset(&set_c));
+ assert!(set_c.par_is_superset(&set_a));
+
+ assert!(!set_c.par_is_disjoint(&set_d));
+ assert!(!set_d.par_is_disjoint(&set_c));
+ assert!(!set_c.par_is_subset(&set_d));
+ assert!(!set_d.par_is_subset(&set_c));
+ assert!(!set_c.par_is_superset(&set_d));
+ assert!(!set_d.par_is_superset(&set_c));
+ }
+
+ #[test]
+ fn iter_comparisons() {
+ use std::iter::empty;
+
+ fn check<'a, I1, I2>(iter1: I1, iter2: I2)
+ where
+ I1: ParallelIterator<Item = &'a i32>,
+ I2: Iterator<Item = i32>,
+ {
+ let v1: Vec<_> = iter1.copied().collect();
+ let v2: Vec<_> = iter2.collect();
+ assert_eq!(v1, v2);
+ }
+
+ let set_a: IndexSet<_> = (0..3).collect();
+ let set_b: IndexSet<_> = (3..6).collect();
+ let set_c: IndexSet<_> = (0..6).collect();
+ let set_d: IndexSet<_> = (3..9).rev().collect();
+
+ check(set_a.par_difference(&set_a), empty());
+ check(set_a.par_symmetric_difference(&set_a), empty());
+ check(set_a.par_intersection(&set_a), 0..3);
+ check(set_a.par_union(&set_a), 0..3);
+
+ check(set_a.par_difference(&set_b), 0..3);
+ check(set_b.par_difference(&set_a), 3..6);
+ check(set_a.par_symmetric_difference(&set_b), 0..6);
+ check(set_b.par_symmetric_difference(&set_a), (3..6).chain(0..3));
+ check(set_a.par_intersection(&set_b), empty());
+ check(set_b.par_intersection(&set_a), empty());
+ check(set_a.par_union(&set_b), 0..6);
+ check(set_b.par_union(&set_a), (3..6).chain(0..3));
+
+ check(set_a.par_difference(&set_c), empty());
+ check(set_c.par_difference(&set_a), 3..6);
+ check(set_a.par_symmetric_difference(&set_c), 3..6);
+ check(set_c.par_symmetric_difference(&set_a), 3..6);
+ check(set_a.par_intersection(&set_c), 0..3);
+ check(set_c.par_intersection(&set_a), 0..3);
+ check(set_a.par_union(&set_c), 0..6);
+ check(set_c.par_union(&set_a), 0..6);
+
+ check(set_c.par_difference(&set_d), 0..3);
+ check(set_d.par_difference(&set_c), (6..9).rev());
+ check(
+ set_c.par_symmetric_difference(&set_d),
+ (0..3).chain((6..9).rev()),
+ );
+ check(
+ set_d.par_symmetric_difference(&set_c),
+ (6..9).rev().chain(0..3),
+ );
+ check(set_c.par_intersection(&set_d), 3..6);
+ check(set_d.par_intersection(&set_c), (3..6).rev());
+ check(set_c.par_union(&set_d), (0..6).chain((6..9).rev()));
+ check(set_d.par_union(&set_c), (3..9).rev().chain(0..3));
+ }
+}
diff --git a/vendor/indexmap-1.9.3/src/rustc.rs b/vendor/indexmap-1.9.3/src/rustc.rs
new file mode 100644
index 0000000..b843858
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/rustc.rs
@@ -0,0 +1,158 @@
+//! Minimal support for `rustc-rayon`, not intended for general use.
+
+use crate::vec::Vec;
+use crate::{Bucket, Entries, IndexMap, IndexSet};
+
+use rustc_rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
+use rustc_rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator};
+
+mod map {
+ use super::*;
+
+ impl<K, V, S> IntoParallelIterator for IndexMap<K, V, S>
+ where
+ K: Send,
+ V: Send,
+ {
+ type Item = (K, V);
+ type Iter = IntoParIter<K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ IntoParIter {
+ entries: self.into_entries(),
+ }
+ }
+ }
+
+ pub struct IntoParIter<K, V> {
+ entries: Vec<Bucket<K, V>>,
+ }
+
+ impl<K: Send, V: Send> ParallelIterator for IntoParIter<K, V> {
+ type Item = (K, V);
+
+ parallel_iterator_methods!(Bucket::key_value);
+ }
+
+ impl<K: Send, V: Send> IndexedParallelIterator for IntoParIter<K, V> {
+ indexed_parallel_iterator_methods!(Bucket::key_value);
+ }
+
+ impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap<K, V, S>
+ where
+ K: Sync,
+ V: Sync,
+ {
+ type Item = (&'a K, &'a V);
+ type Iter = ParIter<'a, K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIter {
+ entries: self.as_entries(),
+ }
+ }
+ }
+
+ pub struct ParIter<'a, K, V> {
+ entries: &'a [Bucket<K, V>],
+ }
+
+ impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ parallel_iterator_methods!(Bucket::refs);
+ }
+
+ impl<K: Sync, V: Sync> IndexedParallelIterator for ParIter<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::refs);
+ }
+
+ impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap<K, V, S>
+ where
+ K: Sync + Send,
+ V: Send,
+ {
+ type Item = (&'a K, &'a mut V);
+ type Iter = ParIterMut<'a, K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIterMut {
+ entries: self.as_entries_mut(),
+ }
+ }
+ }
+
+ pub struct ParIterMut<'a, K, V> {
+ entries: &'a mut [Bucket<K, V>],
+ }
+
+ impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ parallel_iterator_methods!(Bucket::ref_mut);
+ }
+
+ impl<K: Sync + Send, V: Send> IndexedParallelIterator for ParIterMut<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::ref_mut);
+ }
+}
+
+mod set {
+ use super::*;
+
+ impl<T, S> IntoParallelIterator for IndexSet<T, S>
+ where
+ T: Send,
+ {
+ type Item = T;
+ type Iter = IntoParIter<T>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ IntoParIter {
+ entries: self.into_entries(),
+ }
+ }
+ }
+
+ pub struct IntoParIter<T> {
+ entries: Vec<Bucket<T, ()>>,
+ }
+
+ impl<T: Send> ParallelIterator for IntoParIter<T> {
+ type Item = T;
+
+ parallel_iterator_methods!(Bucket::key);
+ }
+
+ impl<T: Send> IndexedParallelIterator for IntoParIter<T> {
+ indexed_parallel_iterator_methods!(Bucket::key);
+ }
+
+ impl<'a, T, S> IntoParallelIterator for &'a IndexSet<T, S>
+ where
+ T: Sync,
+ {
+ type Item = &'a T;
+ type Iter = ParIter<'a, T>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIter {
+ entries: self.as_entries(),
+ }
+ }
+ }
+
+ pub struct ParIter<'a, T> {
+ entries: &'a [Bucket<T, ()>],
+ }
+
+ impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> {
+ type Item = &'a T;
+
+ parallel_iterator_methods!(Bucket::key_ref);
+ }
+
+ impl<T: Sync> IndexedParallelIterator for ParIter<'_, T> {
+ indexed_parallel_iterator_methods!(Bucket::key_ref);
+ }
+}
diff --git a/vendor/indexmap-1.9.3/src/serde.rs b/vendor/indexmap-1.9.3/src/serde.rs
new file mode 100644
index 0000000..c6dd6d5
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/serde.rs
@@ -0,0 +1,155 @@
+use serde::de::value::{MapDeserializer, SeqDeserializer};
+use serde::de::{
+ Deserialize, Deserializer, Error, IntoDeserializer, MapAccess, SeqAccess, Visitor,
+};
+use serde::ser::{Serialize, Serializer};
+
+use core::fmt::{self, Formatter};
+use core::hash::{BuildHasher, Hash};
+use core::marker::PhantomData;
+
+use crate::IndexMap;
+
+/// Requires crate feature `"serde"` or `"serde-1"`
+impl<K, V, S> Serialize for IndexMap<K, V, S>
+where
+ K: Serialize + Hash + Eq,
+ V: Serialize,
+ S: BuildHasher,
+{
+ fn serialize<T>(&self, serializer: T) -> Result<T::Ok, T::Error>
+ where
+ T: Serializer,
+ {
+ serializer.collect_map(self)
+ }
+}
+
+struct IndexMapVisitor<K, V, S>(PhantomData<(K, V, S)>);
+
+impl<'de, K, V, S> Visitor<'de> for IndexMapVisitor<K, V, S>
+where
+ K: Deserialize<'de> + Eq + Hash,
+ V: Deserialize<'de>,
+ S: Default + BuildHasher,
+{
+ type Value = IndexMap<K, V, S>;
+
+ fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
+ write!(formatter, "a map")
+ }
+
+ fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
+ where
+ A: MapAccess<'de>,
+ {
+ let mut values =
+ IndexMap::with_capacity_and_hasher(map.size_hint().unwrap_or(0), S::default());
+
+ while let Some((key, value)) = map.next_entry()? {
+ values.insert(key, value);
+ }
+
+ Ok(values)
+ }
+}
+
+/// Requires crate feature `"serde"` or `"serde-1"`
+impl<'de, K, V, S> Deserialize<'de> for IndexMap<K, V, S>
+where
+ K: Deserialize<'de> + Eq + Hash,
+ V: Deserialize<'de>,
+ S: Default + BuildHasher,
+{
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ deserializer.deserialize_map(IndexMapVisitor(PhantomData))
+ }
+}
+
+impl<'de, K, V, S, E> IntoDeserializer<'de, E> for IndexMap<K, V, S>
+where
+ K: IntoDeserializer<'de, E> + Eq + Hash,
+ V: IntoDeserializer<'de, E>,
+ S: BuildHasher,
+ E: Error,
+{
+ type Deserializer = MapDeserializer<'de, <Self as IntoIterator>::IntoIter, E>;
+
+ fn into_deserializer(self) -> Self::Deserializer {
+ MapDeserializer::new(self.into_iter())
+ }
+}
+
+use crate::IndexSet;
+
+/// Requires crate feature `"serde"` or `"serde-1"`
+impl<T, S> Serialize for IndexSet<T, S>
+where
+ T: Serialize + Hash + Eq,
+ S: BuildHasher,
+{
+ fn serialize<Se>(&self, serializer: Se) -> Result<Se::Ok, Se::Error>
+ where
+ Se: Serializer,
+ {
+ serializer.collect_seq(self)
+ }
+}
+
+struct IndexSetVisitor<T, S>(PhantomData<(T, S)>);
+
+impl<'de, T, S> Visitor<'de> for IndexSetVisitor<T, S>
+where
+ T: Deserialize<'de> + Eq + Hash,
+ S: Default + BuildHasher,
+{
+ type Value = IndexSet<T, S>;
+
+ fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
+ write!(formatter, "a set")
+ }
+
+ fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
+ where
+ A: SeqAccess<'de>,
+ {
+ let mut values =
+ IndexSet::with_capacity_and_hasher(seq.size_hint().unwrap_or(0), S::default());
+
+ while let Some(value) = seq.next_element()? {
+ values.insert(value);
+ }
+
+ Ok(values)
+ }
+}
+
+/// Requires crate feature `"serde"` or `"serde-1"`
+impl<'de, T, S> Deserialize<'de> for IndexSet<T, S>
+where
+ T: Deserialize<'de> + Eq + Hash,
+ S: Default + BuildHasher,
+{
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ deserializer.deserialize_seq(IndexSetVisitor(PhantomData))
+ }
+}
+
+impl<'de, T, S, E> IntoDeserializer<'de, E> for IndexSet<T, S>
+where
+ T: IntoDeserializer<'de, E> + Eq + Hash,
+ S: BuildHasher,
+ E: Error,
+{
+ type Deserializer = SeqDeserializer<<Self as IntoIterator>::IntoIter, E>;
+
+ fn into_deserializer(self) -> Self::Deserializer {
+ SeqDeserializer::new(self.into_iter())
+ }
+}
diff --git a/vendor/indexmap-1.9.3/src/serde_seq.rs b/vendor/indexmap-1.9.3/src/serde_seq.rs
new file mode 100644
index 0000000..d326a02
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/serde_seq.rs
@@ -0,0 +1,112 @@
+//! Functions to serialize and deserialize an `IndexMap` as an ordered sequence.
+//!
+//! The default `serde` implementation serializes `IndexMap` as a normal map,
+//! but there is no guarantee that serialization formats will preserve the order
+//! of the key-value pairs. This module serializes `IndexMap` as a sequence of
+//! `(key, value)` elements instead, in order.
+//!
+//! This module may be used in a field attribute for derived implementations:
+//!
+//! ```
+//! # use indexmap::IndexMap;
+//! # use serde_derive::{Deserialize, Serialize};
+//! #[derive(Deserialize, Serialize)]
+//! struct Data {
+//! #[serde(with = "indexmap::serde_seq")]
+//! map: IndexMap<i32, u64>,
+//! // ...
+//! }
+//! ```
+//!
+//! Requires crate feature `"serde"` or `"serde-1"`
+
+use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor};
+use serde::ser::{Serialize, Serializer};
+
+use core::fmt::{self, Formatter};
+use core::hash::{BuildHasher, Hash};
+use core::marker::PhantomData;
+
+use crate::IndexMap;
+
+/// Serializes an `IndexMap` as an ordered sequence.
+///
+/// This function may be used in a field attribute for deriving `Serialize`:
+///
+/// ```
+/// # use indexmap::IndexMap;
+/// # use serde_derive::Serialize;
+/// #[derive(Serialize)]
+/// struct Data {
+/// #[serde(serialize_with = "indexmap::serde_seq::serialize")]
+/// map: IndexMap<i32, u64>,
+/// // ...
+/// }
+/// ```
+///
+/// Requires crate feature `"serde"` or `"serde-1"`
+pub fn serialize<K, V, S, T>(map: &IndexMap<K, V, S>, serializer: T) -> Result<T::Ok, T::Error>
+where
+ K: Serialize + Hash + Eq,
+ V: Serialize,
+ S: BuildHasher,
+ T: Serializer,
+{
+ serializer.collect_seq(map)
+}
+
+/// Visitor to deserialize a *sequenced* `IndexMap`
+struct SeqVisitor<K, V, S>(PhantomData<(K, V, S)>);
+
+impl<'de, K, V, S> Visitor<'de> for SeqVisitor<K, V, S>
+where
+ K: Deserialize<'de> + Eq + Hash,
+ V: Deserialize<'de>,
+ S: Default + BuildHasher,
+{
+ type Value = IndexMap<K, V, S>;
+
+ fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
+ write!(formatter, "a sequenced map")
+ }
+
+ fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
+ where
+ A: SeqAccess<'de>,
+ {
+ let capacity = seq.size_hint().unwrap_or(0);
+ let mut map = IndexMap::with_capacity_and_hasher(capacity, S::default());
+
+ while let Some((key, value)) = seq.next_element()? {
+ map.insert(key, value);
+ }
+
+ Ok(map)
+ }
+}
+
+/// Deserializes an `IndexMap` from an ordered sequence.
+///
+/// This function may be used in a field attribute for deriving `Deserialize`:
+///
+/// ```
+/// # use indexmap::IndexMap;
+/// # use serde_derive::Deserialize;
+/// #[derive(Deserialize)]
+/// struct Data {
+/// #[serde(deserialize_with = "indexmap::serde_seq::deserialize")]
+/// map: IndexMap<i32, u64>,
+/// // ...
+/// }
+/// ```
+///
+/// Requires crate feature `"serde"` or `"serde-1"`
+pub fn deserialize<'de, D, K, V, S>(deserializer: D) -> Result<IndexMap<K, V, S>, D::Error>
+where
+ D: Deserializer<'de>,
+ K: Deserialize<'de> + Eq + Hash,
+ V: Deserialize<'de>,
+ S: Default + BuildHasher,
+{
+ deserializer.deserialize_seq(SeqVisitor(PhantomData))
+}
diff --git a/vendor/indexmap-1.9.3/src/set.rs b/vendor/indexmap-1.9.3/src/set.rs
new file mode 100644
index 0000000..3728947
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/set.rs
@@ -0,0 +1,1912 @@
+//! A hash set implemented using `IndexMap`
+
+#[cfg(feature = "rayon")]
+pub use crate::rayon::set as rayon;
+
+#[cfg(has_std)]
+use std::collections::hash_map::RandomState;
+
+use crate::vec::{self, Vec};
+use core::cmp::Ordering;
+use core::fmt;
+use core::hash::{BuildHasher, Hash};
+use core::iter::{Chain, FusedIterator};
+use core::ops::{BitAnd, BitOr, BitXor, Index, RangeBounds, Sub};
+use core::slice;
+
+use super::{Entries, Equivalent, IndexMap};
+
+type Bucket<T> = super::Bucket<T, ()>;
+
+/// A hash set where the iteration order of the values is independent of their
+/// hash values.
+///
+/// The interface is closely compatible with the standard `HashSet`, but also
+/// has additional features.
+///
+/// # Order
+///
+/// The values have a consistent order that is determined by the sequence of
+/// insertion and removal calls on the set. The order does not depend on the
+/// values or the hash function at all. Note that insertion order and value
+/// are not affected if a re-insertion is attempted once an element is
+/// already present.
+///
+/// All iterators traverse the set *in order*. Set operation iterators like
+/// `union` produce a concatenated order, as do their matching "bitwise"
+/// operators. See their documentation for specifics.
+///
+/// The insertion order is preserved, with **notable exceptions** like the
+/// `.remove()` or `.swap_remove()` methods. Methods such as `.sort_by()` of
+/// course result in a new order, depending on the sorting order.
+///
+/// # Indices
+///
+/// The values are indexed in a compact range without holes in the range
+/// `0..self.len()`. For example, the method `.get_full` looks up the index for
+/// a value, and the method `.get_index` looks up the value by index.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexSet;
+///
+/// // Collects which letters appear in a sentence.
+/// let letters: IndexSet<_> = "a short treatise on fungi".chars().collect();
+///
+/// assert!(letters.contains(&'s'));
+/// assert!(letters.contains(&'t'));
+/// assert!(letters.contains(&'u'));
+/// assert!(!letters.contains(&'y'));
+/// ```
+#[cfg(has_std)]
+pub struct IndexSet<T, S = RandomState> {
+ pub(crate) map: IndexMap<T, (), S>,
+}
+#[cfg(not(has_std))]
+pub struct IndexSet<T, S> {
+ pub(crate) map: IndexMap<T, (), S>,
+}
+
+impl<T, S> Clone for IndexSet<T, S>
+where
+ T: Clone,
+ S: Clone,
+{
+ fn clone(&self) -> Self {
+ IndexSet {
+ map: self.map.clone(),
+ }
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ self.map.clone_from(&other.map);
+ }
+}
+
+impl<T, S> Entries for IndexSet<T, S> {
+ type Entry = Bucket<T>;
+
+ #[inline]
+ fn into_entries(self) -> Vec<Self::Entry> {
+ self.map.into_entries()
+ }
+
+ #[inline]
+ fn as_entries(&self) -> &[Self::Entry] {
+ self.map.as_entries()
+ }
+
+ #[inline]
+ fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
+ self.map.as_entries_mut()
+ }
+
+ fn with_entries<F>(&mut self, f: F)
+ where
+ F: FnOnce(&mut [Self::Entry]),
+ {
+ self.map.with_entries(f);
+ }
+}
+
+impl<T, S> fmt::Debug for IndexSet<T, S>
+where
+ T: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if cfg!(not(feature = "test_debug")) {
+ f.debug_set().entries(self.iter()).finish()
+ } else {
+ // Let the inner `IndexMap` print all of its details
+ f.debug_struct("IndexSet").field("map", &self.map).finish()
+ }
+ }
+}
+
+#[cfg(has_std)]
+impl<T> IndexSet<T> {
+ /// Create a new set. (Does not allocate.)
+ pub fn new() -> Self {
+ IndexSet {
+ map: IndexMap::new(),
+ }
+ }
+
+ /// Create a new set with capacity for `n` elements.
+ /// (Does not allocate if `n` is zero.)
+ ///
+ /// Computes in **O(n)** time.
+ pub fn with_capacity(n: usize) -> Self {
+ IndexSet {
+ map: IndexMap::with_capacity(n),
+ }
+ }
+}
+
+impl<T, S> IndexSet<T, S> {
+ /// Create a new set with capacity for `n` elements.
+ /// (Does not allocate if `n` is zero.)
+ ///
+ /// Computes in **O(n)** time.
+ pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self {
+ IndexSet {
+ map: IndexMap::with_capacity_and_hasher(n, hash_builder),
+ }
+ }
+
+ /// Create a new set with `hash_builder`.
+ ///
+ /// This function is `const`, so it
+ /// can be called in `static` contexts.
+ pub const fn with_hasher(hash_builder: S) -> Self {
+ IndexSet {
+ map: IndexMap::with_hasher(hash_builder),
+ }
+ }
+
+ /// Computes in **O(1)** time.
+ pub fn capacity(&self) -> usize {
+ self.map.capacity()
+ }
+
+ /// Return a reference to the set's `BuildHasher`.
+ pub fn hasher(&self) -> &S {
+ self.map.hasher()
+ }
+
+ /// Return the number of elements in the set.
+ ///
+ /// Computes in **O(1)** time.
+ pub fn len(&self) -> usize {
+ self.map.len()
+ }
+
+ /// Returns true if the set contains no elements.
+ ///
+ /// Computes in **O(1)** time.
+ pub fn is_empty(&self) -> bool {
+ self.map.is_empty()
+ }
+
+ /// Return an iterator over the values of the set, in their order
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter {
+ iter: self.map.as_entries().iter(),
+ }
+ }
+
+ /// Remove all elements in the set, while preserving its capacity.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn clear(&mut self) {
+ self.map.clear();
+ }
+
+ /// Shortens the set, keeping the first `len` elements and dropping the rest.
+ ///
+ /// If `len` is greater than the set's current length, this has no effect.
+ pub fn truncate(&mut self, len: usize) {
+ self.map.truncate(len);
+ }
+
+ /// Clears the `IndexSet` in the given index range, returning those values
+ /// as a drain iterator.
+ ///
+ /// The range may be any type that implements `RangeBounds<usize>`,
+ /// including all of the `std::ops::Range*` types, or even a tuple pair of
+ /// `Bound` start and end values. To drain the set entirely, use `RangeFull`
+ /// like `set.drain(..)`.
+ ///
+ /// This shifts down all entries following the drained range to fill the
+ /// gap, and keeps the allocated memory for reuse.
+ ///
+ /// ***Panics*** if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the set.
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_, T>
+ where
+ R: RangeBounds<usize>,
+ {
+ Drain {
+ iter: self.map.drain(range).iter,
+ }
+ }
+
+ /// Splits the collection into two at the given index.
+ ///
+ /// Returns a newly allocated set containing the elements in the range
+ /// `[at, len)`. After the call, the original set will be left containing
+ /// the elements `[0, at)` with its previous capacity unchanged.
+ ///
+ /// ***Panics*** if `at > len`.
+ pub fn split_off(&mut self, at: usize) -> Self
+ where
+ S: Clone,
+ {
+ Self {
+ map: self.map.split_off(at),
+ }
+ }
+}
+
+impl<T, S> IndexSet<T, S>
+where
+ T: Hash + Eq,
+ S: BuildHasher,
+{
+ /// Reserve capacity for `additional` more values.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn reserve(&mut self, additional: usize) {
+ self.map.reserve(additional);
+ }
+
+ /// Shrink the capacity of the set as much as possible.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn shrink_to_fit(&mut self) {
+ self.map.shrink_to_fit();
+ }
+
+ /// Shrink the capacity of the set with a lower limit.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.map.shrink_to(min_capacity);
+ }
+
+ /// Insert the value into the set.
+ ///
+ /// If an equivalent item already exists in the set, it returns
+ /// `false` leaving the original value in the set and without
+ /// altering its insertion order. Otherwise, it inserts the new
+ /// item and returns `true`.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn insert(&mut self, value: T) -> bool {
+ self.map.insert(value, ()).is_none()
+ }
+
+ /// Insert the value into the set, and get its index.
+ ///
+ /// If an equivalent item already exists in the set, it returns
+ /// the index of the existing item and `false`, leaving the
+ /// original value in the set and without altering its insertion
+ /// order. Otherwise, it inserts the new item and returns the index
+ /// of the inserted item and `true`.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn insert_full(&mut self, value: T) -> (usize, bool) {
+ use super::map::Entry::*;
+
+ match self.map.entry(value) {
+ Occupied(e) => (e.index(), false),
+ Vacant(e) => {
+ let index = e.index();
+ e.insert(());
+ (index, true)
+ }
+ }
+ }
+
+ /// Return an iterator over the values that are in `self` but not `other`.
+ ///
+ /// Values are produced in the same order that they appear in `self`.
+ pub fn difference<'a, S2>(&'a self, other: &'a IndexSet<T, S2>) -> Difference<'a, T, S2>
+ where
+ S2: BuildHasher,
+ {
+ Difference {
+ iter: self.iter(),
+ other,
+ }
+ }
+
+ /// Return an iterator over the values that are in `self` or `other`,
+ /// but not in both.
+ ///
+ /// Values from `self` are produced in their original order, followed by
+ /// values from `other` in their original order.
+ pub fn symmetric_difference<'a, S2>(
+ &'a self,
+ other: &'a IndexSet<T, S2>,
+ ) -> SymmetricDifference<'a, T, S, S2>
+ where
+ S2: BuildHasher,
+ {
+ SymmetricDifference {
+ iter: self.difference(other).chain(other.difference(self)),
+ }
+ }
+
+ /// Return an iterator over the values that are in both `self` and `other`.
+ ///
+ /// Values are produced in the same order that they appear in `self`.
+ pub fn intersection<'a, S2>(&'a self, other: &'a IndexSet<T, S2>) -> Intersection<'a, T, S2>
+ where
+ S2: BuildHasher,
+ {
+ Intersection {
+ iter: self.iter(),
+ other,
+ }
+ }
+
+ /// Return an iterator over all values that are in `self` or `other`.
+ ///
+ /// Values from `self` are produced in their original order, followed by
+ /// values that are unique to `other` in their original order.
+ pub fn union<'a, S2>(&'a self, other: &'a IndexSet<T, S2>) -> Union<'a, T, S>
+ where
+ S2: BuildHasher,
+ {
+ Union {
+ iter: self.iter().chain(other.difference(self)),
+ }
+ }
+
+ /// Return `true` if an equivalent to `value` exists in the set.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn contains<Q: ?Sized>(&self, value: &Q) -> bool
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.contains_key(value)
+ }
+
+ /// Return a reference to the value stored in the set, if it is present,
+ /// else `None`.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.get_key_value(value).map(|(x, &())| x)
+ }
+
+ /// Return item index and value
+ pub fn get_full<Q: ?Sized>(&self, value: &Q) -> Option<(usize, &T)>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.get_full(value).map(|(i, x, &())| (i, x))
+ }
+
+ /// Return item index, if it exists in the set
+ pub fn get_index_of<Q: ?Sized>(&self, value: &Q) -> Option<usize>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.get_index_of(value)
+ }
+
+ /// Adds a value to the set, replacing the existing value, if any, that is
+ /// equal to the given one, without altering its insertion order. Returns
+ /// the replaced value.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn replace(&mut self, value: T) -> Option<T> {
+ self.replace_full(value).1
+ }
+
+ /// Adds a value to the set, replacing the existing value, if any, that is
+ /// equal to the given one, without altering its insertion order. Returns
+ /// the index of the item and its replaced value.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn replace_full(&mut self, value: T) -> (usize, Option<T>) {
+ use super::map::Entry::*;
+
+ match self.map.entry(value) {
+ Vacant(e) => {
+ let index = e.index();
+ e.insert(());
+ (index, None)
+ }
+ Occupied(e) => (e.index(), Some(e.replace_key())),
+ }
+ }
+
+ /// Remove the value from the set, and return `true` if it was present.
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove(value)`, if you want
+ /// to preserve the order of the values in the set, use `.shift_remove(value)`.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn remove<Q: ?Sized>(&mut self, value: &Q) -> bool
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.swap_remove(value)
+ }
+
+ /// Remove the value from the set, and return `true` if it was present.
+ ///
+ /// Like `Vec::swap_remove`, the value is removed by swapping it with the
+ /// last element of the set and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Return `false` if `value` was not in the set.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove<Q: ?Sized>(&mut self, value: &Q) -> bool
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.swap_remove(value).is_some()
+ }
+
+ /// Remove the value from the set, and return `true` if it was present.
+ ///
+ /// Like `Vec::remove`, the value is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Return `false` if `value` was not in the set.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove<Q: ?Sized>(&mut self, value: &Q) -> bool
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.shift_remove(value).is_some()
+ }
+
+ /// Removes and returns the value in the set, if any, that is equal to the
+ /// given one.
+ ///
+ /// **NOTE:** This is equivalent to `.swap_take(value)`, if you need to
+ /// preserve the order of the values in the set, use `.shift_take(value)`
+ /// instead.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.swap_take(value)
+ }
+
+ /// Removes and returns the value in the set, if any, that is equal to the
+ /// given one.
+ ///
+ /// Like `Vec::swap_remove`, the value is removed by swapping it with the
+ /// last element of the set and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Return `None` if `value` was not in the set.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.swap_remove_entry(value).map(|(x, ())| x)
+ }
+
+ /// Removes and returns the value in the set, if any, that is equal to the
+ /// given one.
+ ///
+ /// Like `Vec::remove`, the value is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Return `None` if `value` was not in the set.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.shift_remove_entry(value).map(|(x, ())| x)
+ }
+
+ /// Remove the value from the set return it and the index it had.
+ ///
+ /// Like `Vec::swap_remove`, the value is removed by swapping it with the
+ /// last element of the set and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Return `None` if `value` was not in the set.
+ pub fn swap_remove_full<Q: ?Sized>(&mut self, value: &Q) -> Option<(usize, T)>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.swap_remove_full(value).map(|(i, x, ())| (i, x))
+ }
+
+ /// Remove the value from the set return it and the index it had.
+ ///
+ /// Like `Vec::remove`, the value is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Return `None` if `value` was not in the set.
+ pub fn shift_remove_full<Q: ?Sized>(&mut self, value: &Q) -> Option<(usize, T)>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.shift_remove_full(value).map(|(i, x, ())| (i, x))
+ }
+
+ /// Remove the last value
+ ///
+ /// This preserves the order of the remaining elements.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn pop(&mut self) -> Option<T> {
+ self.map.pop().map(|(x, ())| x)
+ }
+
+ /// Scan through each value in the set and keep those where the
+ /// closure `keep` returns `true`.
+ ///
+ /// The elements are visited in order, and remaining elements keep their
+ /// order.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn retain<F>(&mut self, mut keep: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ self.map.retain(move |x, &mut ()| keep(x))
+ }
+
+ /// Sort the set’s values by their default ordering.
+ ///
+ /// See [`sort_by`](Self::sort_by) for details.
+ pub fn sort(&mut self)
+ where
+ T: Ord,
+ {
+ self.map.sort_keys()
+ }
+
+ /// Sort the set’s values in place using the comparison function `cmp`.
+ ///
+ /// Computes in **O(n log n)** time and **O(n)** space. The sort is stable.
+ pub fn sort_by<F>(&mut self, mut cmp: F)
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ self.map.sort_by(move |a, _, b, _| cmp(a, b));
+ }
+
+ /// Sort the values of the set and return a by-value iterator of
+ /// the values with the result.
+ ///
+ /// The sort is stable.
+ pub fn sorted_by<F>(self, mut cmp: F) -> IntoIter<T>
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ let mut entries = self.into_entries();
+ entries.sort_by(move |a, b| cmp(&a.key, &b.key));
+ IntoIter {
+ iter: entries.into_iter(),
+ }
+ }
+
+ /// Sort the set's values by their default ordering.
+ ///
+ /// See [`sort_unstable_by`](Self::sort_unstable_by) for details.
+ pub fn sort_unstable(&mut self)
+ where
+ T: Ord,
+ {
+ self.map.sort_unstable_keys()
+ }
+
+ /// Sort the set's values in place using the comparison funtion `cmp`.
+ ///
+ /// Computes in **O(n log n)** time. The sort is unstable.
+ pub fn sort_unstable_by<F>(&mut self, mut cmp: F)
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ self.map.sort_unstable_by(move |a, _, b, _| cmp(a, b))
+ }
+
+ /// Sort the values of the set and return a by-value iterator of
+ /// the values with the result.
+ pub fn sorted_unstable_by<F>(self, mut cmp: F) -> IntoIter<T>
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ let mut entries = self.into_entries();
+ entries.sort_unstable_by(move |a, b| cmp(&a.key, &b.key));
+ IntoIter {
+ iter: entries.into_iter(),
+ }
+ }
+
+ /// Reverses the order of the set’s values in place.
+ ///
+ /// Computes in **O(n)** time and **O(1)** space.
+ pub fn reverse(&mut self) {
+ self.map.reverse()
+ }
+}
+
+impl<T, S> IndexSet<T, S> {
+ /// Get a value by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Computes in **O(1)** time.
+ pub fn get_index(&self, index: usize) -> Option<&T> {
+ self.as_entries().get(index).map(Bucket::key_ref)
+ }
+
+ /// Get the first value
+ ///
+ /// Computes in **O(1)** time.
+ pub fn first(&self) -> Option<&T> {
+ self.as_entries().first().map(Bucket::key_ref)
+ }
+
+ /// Get the last value
+ ///
+ /// Computes in **O(1)** time.
+ pub fn last(&self) -> Option<&T> {
+ self.as_entries().last().map(Bucket::key_ref)
+ }
+
+ /// Remove the value by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Like `Vec::swap_remove`, the value is removed by swapping it with the
+ /// last element of the set and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove_index(&mut self, index: usize) -> Option<T> {
+ self.map.swap_remove_index(index).map(|(x, ())| x)
+ }
+
+ /// Remove the value by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Like `Vec::remove`, the value is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove_index(&mut self, index: usize) -> Option<T> {
+ self.map.shift_remove_index(index).map(|(x, ())| x)
+ }
+
+ /// Moves the position of a value from one index to another
+ /// by shifting all other values in-between.
+ ///
+ /// * If `from < to`, the other values will shift down while the targeted value moves up.
+ /// * If `from > to`, the other values will shift up while the targeted value moves down.
+ ///
+ /// ***Panics*** if `from` or `to` are out of bounds.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn move_index(&mut self, from: usize, to: usize) {
+ self.map.move_index(from, to)
+ }
+
+ /// Swaps the position of two values in the set.
+ ///
+ /// ***Panics*** if `a` or `b` are out of bounds.
+ pub fn swap_indices(&mut self, a: usize, b: usize) {
+ self.map.swap_indices(a, b)
+ }
+}
+
+/// Access `IndexSet` values at indexed positions.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexSet;
+///
+/// let mut set = IndexSet::new();
+/// for word in "Lorem ipsum dolor sit amet".split_whitespace() {
+/// set.insert(word.to_string());
+/// }
+/// assert_eq!(set[0], "Lorem");
+/// assert_eq!(set[1], "ipsum");
+/// set.reverse();
+/// assert_eq!(set[0], "amet");
+/// assert_eq!(set[1], "sit");
+/// set.sort();
+/// assert_eq!(set[0], "Lorem");
+/// assert_eq!(set[1], "amet");
+/// ```
+///
+/// ```should_panic
+/// use indexmap::IndexSet;
+///
+/// let mut set = IndexSet::new();
+/// set.insert("foo");
+/// println!("{:?}", set[10]); // panics!
+/// ```
+impl<T, S> Index<usize> for IndexSet<T, S> {
+ type Output = T;
+
+ /// Returns a reference to the value at the supplied `index`.
+ ///
+ /// ***Panics*** if `index` is out of bounds.
+ fn index(&self, index: usize) -> &T {
+ self.get_index(index)
+ .expect("IndexSet: index out of bounds")
+ }
+}
+
+/// An owning iterator over the items of a `IndexSet`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`IndexSet`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`into_iter`]: struct.IndexSet.html#method.into_iter
+pub struct IntoIter<T> {
+ iter: vec::IntoIter<Bucket<T>>,
+}
+
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+
+ iterator_methods!(Bucket::key);
+}
+
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ double_ended_iterator_methods!(Bucket::key);
+}
+
+impl<T> ExactSizeIterator for IntoIter<T> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<T> FusedIterator for IntoIter<T> {}
+
+impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+/// An iterator over the items of a `IndexSet`.
+///
+/// This `struct` is created by the [`iter`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`iter`]: struct.IndexSet.html#method.iter
+pub struct Iter<'a, T> {
+ iter: slice::Iter<'a, Bucket<T>>,
+}
+
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ iterator_methods!(Bucket::key_ref);
+}
+
+impl<T> DoubleEndedIterator for Iter<'_, T> {
+ double_ended_iterator_methods!(Bucket::key_ref);
+}
+
+impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<T> FusedIterator for Iter<'_, T> {}
+
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A draining iterator over the items of a `IndexSet`.
+///
+/// This `struct` is created by the [`drain`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`drain`]: struct.IndexSet.html#method.drain
+pub struct Drain<'a, T> {
+ iter: vec::Drain<'a, Bucket<T>>,
+}
+
+impl<T> Iterator for Drain<'_, T> {
+ type Item = T;
+
+ iterator_methods!(Bucket::key);
+}
+
+impl<T> DoubleEndedIterator for Drain<'_, T> {
+ double_ended_iterator_methods!(Bucket::key);
+}
+
+impl<T> ExactSizeIterator for Drain<'_, T> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<T> FusedIterator for Drain<'_, T> {}
+
+impl<T: fmt::Debug> fmt::Debug for Drain<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, T, S> IntoIterator for &'a IndexSet<T, S> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<T, S> IntoIterator for IndexSet<T, S> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter {
+ iter: self.into_entries().into_iter(),
+ }
+ }
+}
+
+impl<T, S> FromIterator<T> for IndexSet<T, S>
+where
+ T: Hash + Eq,
+ S: BuildHasher + Default,
+{
+ fn from_iter<I: IntoIterator<Item = T>>(iterable: I) -> Self {
+ let iter = iterable.into_iter().map(|x| (x, ()));
+ IndexSet {
+ map: IndexMap::from_iter(iter),
+ }
+ }
+}
+
+#[cfg(has_std)]
+impl<T, const N: usize> From<[T; N]> for IndexSet<T, RandomState>
+where
+ T: Eq + Hash,
+{
+ /// # Examples
+ ///
+ /// ```
+ /// use indexmap::IndexSet;
+ ///
+ /// let set1 = IndexSet::from([1, 2, 3, 4]);
+ /// let set2: IndexSet<_> = [1, 2, 3, 4].into();
+ /// assert_eq!(set1, set2);
+ /// ```
+ fn from(arr: [T; N]) -> Self {
+ Self::from_iter(arr)
+ }
+}
+
+impl<T, S> Extend<T> for IndexSet<T, S>
+where
+ T: Hash + Eq,
+ S: BuildHasher,
+{
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iterable: I) {
+ let iter = iterable.into_iter().map(|x| (x, ()));
+ self.map.extend(iter);
+ }
+}
+
+impl<'a, T, S> Extend<&'a T> for IndexSet<T, S>
+where
+ T: Hash + Eq + Copy + 'a,
+ S: BuildHasher,
+{
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iterable: I) {
+ let iter = iterable.into_iter().copied();
+ self.extend(iter);
+ }
+}
+
+impl<T, S> Default for IndexSet<T, S>
+where
+ S: Default,
+{
+ /// Return an empty `IndexSet`
+ fn default() -> Self {
+ IndexSet {
+ map: IndexMap::default(),
+ }
+ }
+}
+
+impl<T, S1, S2> PartialEq<IndexSet<T, S2>> for IndexSet<T, S1>
+where
+ T: Hash + Eq,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn eq(&self, other: &IndexSet<T, S2>) -> bool {
+ self.len() == other.len() && self.is_subset(other)
+ }
+}
+
+impl<T, S> Eq for IndexSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+impl<T, S> IndexSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ /// Returns `true` if `self` has no elements in common with `other`.
+ pub fn is_disjoint<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher,
+ {
+ if self.len() <= other.len() {
+ self.iter().all(move |value| !other.contains(value))
+ } else {
+ other.iter().all(move |value| !self.contains(value))
+ }
+ }
+
+ /// Returns `true` if all elements of `self` are contained in `other`.
+ pub fn is_subset<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher,
+ {
+ self.len() <= other.len() && self.iter().all(move |value| other.contains(value))
+ }
+
+ /// Returns `true` if all elements of `other` are contained in `self`.
+ pub fn is_superset<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher,
+ {
+ other.is_subset(self)
+ }
+}
+
+/// A lazy iterator producing elements in the difference of `IndexSet`s.
+///
+/// This `struct` is created by the [`difference`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`difference`]: struct.IndexSet.html#method.difference
+pub struct Difference<'a, T, S> {
+ iter: Iter<'a, T>,
+ other: &'a IndexSet<T, S>,
+}
+
+impl<'a, T, S> Iterator for Difference<'a, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ while let Some(item) = self.iter.next() {
+ if !self.other.contains(item) {
+ return Some(item);
+ }
+ }
+ None
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, self.iter.size_hint().1)
+ }
+}
+
+impl<T, S> DoubleEndedIterator for Difference<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ while let Some(item) = self.iter.next_back() {
+ if !self.other.contains(item) {
+ return Some(item);
+ }
+ }
+ None
+ }
+}
+
+impl<T, S> FusedIterator for Difference<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+impl<T, S> Clone for Difference<'_, T, S> {
+ fn clone(&self) -> Self {
+ Difference {
+ iter: self.iter.clone(),
+ ..*self
+ }
+ }
+}
+
+impl<T, S> fmt::Debug for Difference<'_, T, S>
+where
+ T: fmt::Debug + Eq + Hash,
+ S: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the intersection of `IndexSet`s.
+///
+/// This `struct` is created by the [`intersection`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`intersection`]: struct.IndexSet.html#method.intersection
+pub struct Intersection<'a, T, S> {
+ iter: Iter<'a, T>,
+ other: &'a IndexSet<T, S>,
+}
+
+impl<'a, T, S> Iterator for Intersection<'a, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ while let Some(item) = self.iter.next() {
+ if self.other.contains(item) {
+ return Some(item);
+ }
+ }
+ None
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, self.iter.size_hint().1)
+ }
+}
+
+impl<T, S> DoubleEndedIterator for Intersection<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ while let Some(item) = self.iter.next_back() {
+ if self.other.contains(item) {
+ return Some(item);
+ }
+ }
+ None
+ }
+}
+
+impl<T, S> FusedIterator for Intersection<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+impl<T, S> Clone for Intersection<'_, T, S> {
+ fn clone(&self) -> Self {
+ Intersection {
+ iter: self.iter.clone(),
+ ..*self
+ }
+ }
+}
+
+impl<T, S> fmt::Debug for Intersection<'_, T, S>
+where
+ T: fmt::Debug + Eq + Hash,
+ S: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the symmetric difference of `IndexSet`s.
+///
+/// This `struct` is created by the [`symmetric_difference`] method on
+/// [`IndexSet`]. See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`symmetric_difference`]: struct.IndexSet.html#method.symmetric_difference
+pub struct SymmetricDifference<'a, T, S1, S2> {
+ iter: Chain<Difference<'a, T, S2>, Difference<'a, T, S1>>,
+}
+
+impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2>
+where
+ T: Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.fold(init, f)
+ }
+}
+
+impl<T, S1, S2> DoubleEndedIterator for SymmetricDifference<'_, T, S1, S2>
+where
+ T: Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.iter.next_back()
+ }
+
+ fn rfold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.rfold(init, f)
+ }
+}
+
+impl<T, S1, S2> FusedIterator for SymmetricDifference<'_, T, S1, S2>
+where
+ T: Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+}
+
+impl<T, S1, S2> Clone for SymmetricDifference<'_, T, S1, S2> {
+ fn clone(&self) -> Self {
+ SymmetricDifference {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<T, S1, S2> fmt::Debug for SymmetricDifference<'_, T, S1, S2>
+where
+ T: fmt::Debug + Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the union of `IndexSet`s.
+///
+/// This `struct` is created by the [`union`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`union`]: struct.IndexSet.html#method.union
+pub struct Union<'a, T, S> {
+ iter: Chain<Iter<'a, T>, Difference<'a, T, S>>,
+}
+
+impl<'a, T, S> Iterator for Union<'a, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.fold(init, f)
+ }
+}
+
+impl<T, S> DoubleEndedIterator for Union<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.iter.next_back()
+ }
+
+ fn rfold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.rfold(init, f)
+ }
+}
+
+impl<T, S> FusedIterator for Union<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+impl<T, S> Clone for Union<'_, T, S> {
+ fn clone(&self) -> Self {
+ Union {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<T, S> fmt::Debug for Union<'_, T, S>
+where
+ T: fmt::Debug + Eq + Hash,
+ S: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+impl<T, S1, S2> BitAnd<&IndexSet<T, S2>> for &IndexSet<T, S1>
+where
+ T: Eq + Hash + Clone,
+ S1: BuildHasher + Default,
+ S2: BuildHasher,
+{
+ type Output = IndexSet<T, S1>;
+
+ /// Returns the set intersection, cloned into a new set.
+ ///
+ /// Values are collected in the same order that they appear in `self`.
+ fn bitand(self, other: &IndexSet<T, S2>) -> Self::Output {
+ self.intersection(other).cloned().collect()
+ }
+}
+
+impl<T, S1, S2> BitOr<&IndexSet<T, S2>> for &IndexSet<T, S1>
+where
+ T: Eq + Hash + Clone,
+ S1: BuildHasher + Default,
+ S2: BuildHasher,
+{
+ type Output = IndexSet<T, S1>;
+
+ /// Returns the set union, cloned into a new set.
+ ///
+ /// Values from `self` are collected in their original order, followed by
+ /// values that are unique to `other` in their original order.
+ fn bitor(self, other: &IndexSet<T, S2>) -> Self::Output {
+ self.union(other).cloned().collect()
+ }
+}
+
+impl<T, S1, S2> BitXor<&IndexSet<T, S2>> for &IndexSet<T, S1>
+where
+ T: Eq + Hash + Clone,
+ S1: BuildHasher + Default,
+ S2: BuildHasher,
+{
+ type Output = IndexSet<T, S1>;
+
+ /// Returns the set symmetric-difference, cloned into a new set.
+ ///
+ /// Values from `self` are collected in their original order, followed by
+ /// values from `other` in their original order.
+ fn bitxor(self, other: &IndexSet<T, S2>) -> Self::Output {
+ self.symmetric_difference(other).cloned().collect()
+ }
+}
+
+impl<T, S1, S2> Sub<&IndexSet<T, S2>> for &IndexSet<T, S1>
+where
+ T: Eq + Hash + Clone,
+ S1: BuildHasher + Default,
+ S2: BuildHasher,
+{
+ type Output = IndexSet<T, S1>;
+
+ /// Returns the set difference, cloned into a new set.
+ ///
+ /// Values are collected in the same order that they appear in `self`.
+ fn sub(self, other: &IndexSet<T, S2>) -> Self::Output {
+ self.difference(other).cloned().collect()
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::string::String;
+
+ #[test]
+ fn it_works() {
+ let mut set = IndexSet::new();
+ assert_eq!(set.is_empty(), true);
+ set.insert(1);
+ set.insert(1);
+ assert_eq!(set.len(), 1);
+ assert!(set.get(&1).is_some());
+ assert_eq!(set.is_empty(), false);
+ }
+
+ #[test]
+ fn new() {
+ let set = IndexSet::<String>::new();
+ println!("{:?}", set);
+ assert_eq!(set.capacity(), 0);
+ assert_eq!(set.len(), 0);
+ assert_eq!(set.is_empty(), true);
+ }
+
+ #[test]
+ fn insert() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5];
+ let not_present = [1, 3, 6, 9, 10];
+ let mut set = IndexSet::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(set.len(), i);
+ set.insert(elt);
+ assert_eq!(set.len(), i + 1);
+ assert_eq!(set.get(&elt), Some(&elt));
+ }
+ println!("{:?}", set);
+
+ for &elt in &not_present {
+ assert!(set.get(&elt).is_none());
+ }
+ }
+
+ #[test]
+ fn insert_full() {
+ let insert = vec![9, 2, 7, 1, 4, 6, 13];
+ let present = vec![1, 6, 2];
+ let mut set = IndexSet::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(set.len(), i);
+ let (index, success) = set.insert_full(elt);
+ assert!(success);
+ assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0));
+ assert_eq!(set.len(), i + 1);
+ }
+
+ let len = set.len();
+ for &elt in &present {
+ let (index, success) = set.insert_full(elt);
+ assert!(!success);
+ assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0));
+ assert_eq!(set.len(), len);
+ }
+ }
+
+ #[test]
+ fn insert_2() {
+ let mut set = IndexSet::with_capacity(16);
+
+ let mut values = vec![];
+ values.extend(0..16);
+ values.extend(if cfg!(miri) { 32..64 } else { 128..267 });
+
+ for &i in &values {
+ let old_set = set.clone();
+ set.insert(i);
+ for value in old_set.iter() {
+ if set.get(value).is_none() {
+ println!("old_set: {:?}", old_set);
+ println!("set: {:?}", set);
+ panic!("did not find {} in set", value);
+ }
+ }
+ }
+
+ for &i in &values {
+ assert!(set.get(&i).is_some(), "did not find {}", i);
+ }
+ }
+
+ #[test]
+ fn insert_dup() {
+ let mut elements = vec![0, 2, 4, 6, 8];
+ let mut set: IndexSet<u8> = elements.drain(..).collect();
+ {
+ let (i, v) = set.get_full(&0).unwrap();
+ assert_eq!(set.len(), 5);
+ assert_eq!(i, 0);
+ assert_eq!(*v, 0);
+ }
+ {
+ let inserted = set.insert(0);
+ let (i, v) = set.get_full(&0).unwrap();
+ assert_eq!(set.len(), 5);
+ assert_eq!(inserted, false);
+ assert_eq!(i, 0);
+ assert_eq!(*v, 0);
+ }
+ }
+
+ #[test]
+ fn insert_order() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut set = IndexSet::new();
+
+ for &elt in &insert {
+ set.insert(elt);
+ }
+
+ assert_eq!(set.iter().count(), set.len());
+ assert_eq!(set.iter().count(), insert.len());
+ for (a, b) in insert.iter().zip(set.iter()) {
+ assert_eq!(a, b);
+ }
+ for (i, v) in (0..insert.len()).zip(set.iter()) {
+ assert_eq!(set.get_index(i).unwrap(), v);
+ }
+ }
+
+ #[test]
+ fn replace() {
+ let replace = [0, 4, 2, 12, 8, 7, 11, 5];
+ let not_present = [1, 3, 6, 9, 10];
+ let mut set = IndexSet::with_capacity(replace.len());
+
+ for (i, &elt) in replace.iter().enumerate() {
+ assert_eq!(set.len(), i);
+ set.replace(elt);
+ assert_eq!(set.len(), i + 1);
+ assert_eq!(set.get(&elt), Some(&elt));
+ }
+ println!("{:?}", set);
+
+ for &elt in &not_present {
+ assert!(set.get(&elt).is_none());
+ }
+ }
+
+ #[test]
+ fn replace_full() {
+ let replace = vec![9, 2, 7, 1, 4, 6, 13];
+ let present = vec![1, 6, 2];
+ let mut set = IndexSet::with_capacity(replace.len());
+
+ for (i, &elt) in replace.iter().enumerate() {
+ assert_eq!(set.len(), i);
+ let (index, replaced) = set.replace_full(elt);
+ assert!(replaced.is_none());
+ assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0));
+ assert_eq!(set.len(), i + 1);
+ }
+
+ let len = set.len();
+ for &elt in &present {
+ let (index, replaced) = set.replace_full(elt);
+ assert_eq!(Some(elt), replaced);
+ assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0));
+ assert_eq!(set.len(), len);
+ }
+ }
+
+ #[test]
+ fn replace_2() {
+ let mut set = IndexSet::with_capacity(16);
+
+ let mut values = vec![];
+ values.extend(0..16);
+ values.extend(if cfg!(miri) { 32..64 } else { 128..267 });
+
+ for &i in &values {
+ let old_set = set.clone();
+ set.replace(i);
+ for value in old_set.iter() {
+ if set.get(value).is_none() {
+ println!("old_set: {:?}", old_set);
+ println!("set: {:?}", set);
+ panic!("did not find {} in set", value);
+ }
+ }
+ }
+
+ for &i in &values {
+ assert!(set.get(&i).is_some(), "did not find {}", i);
+ }
+ }
+
+ #[test]
+ fn replace_dup() {
+ let mut elements = vec![0, 2, 4, 6, 8];
+ let mut set: IndexSet<u8> = elements.drain(..).collect();
+ {
+ let (i, v) = set.get_full(&0).unwrap();
+ assert_eq!(set.len(), 5);
+ assert_eq!(i, 0);
+ assert_eq!(*v, 0);
+ }
+ {
+ let replaced = set.replace(0);
+ let (i, v) = set.get_full(&0).unwrap();
+ assert_eq!(set.len(), 5);
+ assert_eq!(replaced, Some(0));
+ assert_eq!(i, 0);
+ assert_eq!(*v, 0);
+ }
+ }
+
+ #[test]
+ fn replace_order() {
+ let replace = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut set = IndexSet::new();
+
+ for &elt in &replace {
+ set.replace(elt);
+ }
+
+ assert_eq!(set.iter().count(), set.len());
+ assert_eq!(set.iter().count(), replace.len());
+ for (a, b) in replace.iter().zip(set.iter()) {
+ assert_eq!(a, b);
+ }
+ for (i, v) in (0..replace.len()).zip(set.iter()) {
+ assert_eq!(set.get_index(i).unwrap(), v);
+ }
+ }
+
+ #[test]
+ fn grow() {
+ let insert = [0, 4, 2, 12, 8, 7, 11];
+ let not_present = [1, 3, 6, 9, 10];
+ let mut set = IndexSet::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(set.len(), i);
+ set.insert(elt);
+ assert_eq!(set.len(), i + 1);
+ assert_eq!(set.get(&elt), Some(&elt));
+ }
+
+ println!("{:?}", set);
+ for &elt in &insert {
+ set.insert(elt * 10);
+ }
+ for &elt in &insert {
+ set.insert(elt * 100);
+ }
+ for (i, &elt) in insert.iter().cycle().enumerate().take(100) {
+ set.insert(elt * 100 + i as i32);
+ }
+ println!("{:?}", set);
+ for &elt in &not_present {
+ assert!(set.get(&elt).is_none());
+ }
+ }
+
+ #[test]
+ fn reserve() {
+ let mut set = IndexSet::<usize>::new();
+ assert_eq!(set.capacity(), 0);
+ set.reserve(100);
+ let capacity = set.capacity();
+ assert!(capacity >= 100);
+ for i in 0..capacity {
+ assert_eq!(set.len(), i);
+ set.insert(i);
+ assert_eq!(set.len(), i + 1);
+ assert_eq!(set.capacity(), capacity);
+ assert_eq!(set.get(&i), Some(&i));
+ }
+ set.insert(capacity);
+ assert_eq!(set.len(), capacity + 1);
+ assert!(set.capacity() > capacity);
+ assert_eq!(set.get(&capacity), Some(&capacity));
+ }
+
+ #[test]
+ fn shrink_to_fit() {
+ let mut set = IndexSet::<usize>::new();
+ assert_eq!(set.capacity(), 0);
+ for i in 0..100 {
+ assert_eq!(set.len(), i);
+ set.insert(i);
+ assert_eq!(set.len(), i + 1);
+ assert!(set.capacity() >= i + 1);
+ assert_eq!(set.get(&i), Some(&i));
+ set.shrink_to_fit();
+ assert_eq!(set.len(), i + 1);
+ assert_eq!(set.capacity(), i + 1);
+ assert_eq!(set.get(&i), Some(&i));
+ }
+ }
+
+ #[test]
+ fn remove() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut set = IndexSet::new();
+
+ for &elt in &insert {
+ set.insert(elt);
+ }
+
+ assert_eq!(set.iter().count(), set.len());
+ assert_eq!(set.iter().count(), insert.len());
+ for (a, b) in insert.iter().zip(set.iter()) {
+ assert_eq!(a, b);
+ }
+
+ let remove_fail = [99, 77];
+ let remove = [4, 12, 8, 7];
+
+ for &value in &remove_fail {
+ assert!(set.swap_remove_full(&value).is_none());
+ }
+ println!("{:?}", set);
+ for &value in &remove {
+ //println!("{:?}", set);
+ let index = set.get_full(&value).unwrap().0;
+ assert_eq!(set.swap_remove_full(&value), Some((index, value)));
+ }
+ println!("{:?}", set);
+
+ for value in &insert {
+ assert_eq!(set.get(value).is_some(), !remove.contains(value));
+ }
+ assert_eq!(set.len(), insert.len() - remove.len());
+ assert_eq!(set.iter().count(), insert.len() - remove.len());
+ }
+
+ #[test]
+ fn swap_remove_index() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut set = IndexSet::new();
+
+ for &elt in &insert {
+ set.insert(elt);
+ }
+
+ let mut vector = insert.to_vec();
+ let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1];
+
+ // check that the same swap remove sequence on vec and set
+ // have the same result.
+ for &rm in remove_sequence {
+ let out_vec = vector.swap_remove(rm);
+ let out_set = set.swap_remove_index(rm).unwrap();
+ assert_eq!(out_vec, out_set);
+ }
+ assert_eq!(vector.len(), set.len());
+ for (a, b) in vector.iter().zip(set.iter()) {
+ assert_eq!(a, b);
+ }
+ }
+
+ #[test]
+ fn partial_eq_and_eq() {
+ let mut set_a = IndexSet::new();
+ set_a.insert(1);
+ set_a.insert(2);
+ let mut set_b = set_a.clone();
+ assert_eq!(set_a, set_b);
+ set_b.swap_remove(&1);
+ assert_ne!(set_a, set_b);
+
+ let set_c: IndexSet<_> = set_b.into_iter().collect();
+ assert_ne!(set_a, set_c);
+ assert_ne!(set_c, set_a);
+ }
+
+ #[test]
+ fn extend() {
+ let mut set = IndexSet::new();
+ set.extend(vec![&1, &2, &3, &4]);
+ set.extend(vec![5, 6]);
+ assert_eq!(set.into_iter().collect::<Vec<_>>(), vec![1, 2, 3, 4, 5, 6]);
+ }
+
+ #[test]
+ fn comparisons() {
+ let set_a: IndexSet<_> = (0..3).collect();
+ let set_b: IndexSet<_> = (3..6).collect();
+ let set_c: IndexSet<_> = (0..6).collect();
+ let set_d: IndexSet<_> = (3..9).collect();
+
+ assert!(!set_a.is_disjoint(&set_a));
+ assert!(set_a.is_subset(&set_a));
+ assert!(set_a.is_superset(&set_a));
+
+ assert!(set_a.is_disjoint(&set_b));
+ assert!(set_b.is_disjoint(&set_a));
+ assert!(!set_a.is_subset(&set_b));
+ assert!(!set_b.is_subset(&set_a));
+ assert!(!set_a.is_superset(&set_b));
+ assert!(!set_b.is_superset(&set_a));
+
+ assert!(!set_a.is_disjoint(&set_c));
+ assert!(!set_c.is_disjoint(&set_a));
+ assert!(set_a.is_subset(&set_c));
+ assert!(!set_c.is_subset(&set_a));
+ assert!(!set_a.is_superset(&set_c));
+ assert!(set_c.is_superset(&set_a));
+
+ assert!(!set_c.is_disjoint(&set_d));
+ assert!(!set_d.is_disjoint(&set_c));
+ assert!(!set_c.is_subset(&set_d));
+ assert!(!set_d.is_subset(&set_c));
+ assert!(!set_c.is_superset(&set_d));
+ assert!(!set_d.is_superset(&set_c));
+ }
+
+ #[test]
+ fn iter_comparisons() {
+ use std::iter::empty;
+
+ fn check<'a, I1, I2>(iter1: I1, iter2: I2)
+ where
+ I1: Iterator<Item = &'a i32>,
+ I2: Iterator<Item = i32>,
+ {
+ assert!(iter1.copied().eq(iter2));
+ }
+
+ let set_a: IndexSet<_> = (0..3).collect();
+ let set_b: IndexSet<_> = (3..6).collect();
+ let set_c: IndexSet<_> = (0..6).collect();
+ let set_d: IndexSet<_> = (3..9).rev().collect();
+
+ check(set_a.difference(&set_a), empty());
+ check(set_a.symmetric_difference(&set_a), empty());
+ check(set_a.intersection(&set_a), 0..3);
+ check(set_a.union(&set_a), 0..3);
+
+ check(set_a.difference(&set_b), 0..3);
+ check(set_b.difference(&set_a), 3..6);
+ check(set_a.symmetric_difference(&set_b), 0..6);
+ check(set_b.symmetric_difference(&set_a), (3..6).chain(0..3));
+ check(set_a.intersection(&set_b), empty());
+ check(set_b.intersection(&set_a), empty());
+ check(set_a.union(&set_b), 0..6);
+ check(set_b.union(&set_a), (3..6).chain(0..3));
+
+ check(set_a.difference(&set_c), empty());
+ check(set_c.difference(&set_a), 3..6);
+ check(set_a.symmetric_difference(&set_c), 3..6);
+ check(set_c.symmetric_difference(&set_a), 3..6);
+ check(set_a.intersection(&set_c), 0..3);
+ check(set_c.intersection(&set_a), 0..3);
+ check(set_a.union(&set_c), 0..6);
+ check(set_c.union(&set_a), 0..6);
+
+ check(set_c.difference(&set_d), 0..3);
+ check(set_d.difference(&set_c), (6..9).rev());
+ check(
+ set_c.symmetric_difference(&set_d),
+ (0..3).chain((6..9).rev()),
+ );
+ check(set_d.symmetric_difference(&set_c), (6..9).rev().chain(0..3));
+ check(set_c.intersection(&set_d), 3..6);
+ check(set_d.intersection(&set_c), (3..6).rev());
+ check(set_c.union(&set_d), (0..6).chain((6..9).rev()));
+ check(set_d.union(&set_c), (3..9).rev().chain(0..3));
+ }
+
+ #[test]
+ fn ops() {
+ let empty = IndexSet::<i32>::new();
+ let set_a: IndexSet<_> = (0..3).collect();
+ let set_b: IndexSet<_> = (3..6).collect();
+ let set_c: IndexSet<_> = (0..6).collect();
+ let set_d: IndexSet<_> = (3..9).rev().collect();
+
+ #[allow(clippy::eq_op)]
+ {
+ assert_eq!(&set_a & &set_a, set_a);
+ assert_eq!(&set_a | &set_a, set_a);
+ assert_eq!(&set_a ^ &set_a, empty);
+ assert_eq!(&set_a - &set_a, empty);
+ }
+
+ assert_eq!(&set_a & &set_b, empty);
+ assert_eq!(&set_b & &set_a, empty);
+ assert_eq!(&set_a | &set_b, set_c);
+ assert_eq!(&set_b | &set_a, set_c);
+ assert_eq!(&set_a ^ &set_b, set_c);
+ assert_eq!(&set_b ^ &set_a, set_c);
+ assert_eq!(&set_a - &set_b, set_a);
+ assert_eq!(&set_b - &set_a, set_b);
+
+ assert_eq!(&set_a & &set_c, set_a);
+ assert_eq!(&set_c & &set_a, set_a);
+ assert_eq!(&set_a | &set_c, set_c);
+ assert_eq!(&set_c | &set_a, set_c);
+ assert_eq!(&set_a ^ &set_c, set_b);
+ assert_eq!(&set_c ^ &set_a, set_b);
+ assert_eq!(&set_a - &set_c, empty);
+ assert_eq!(&set_c - &set_a, set_b);
+
+ assert_eq!(&set_c & &set_d, set_b);
+ assert_eq!(&set_d & &set_c, set_b);
+ assert_eq!(&set_c | &set_d, &set_a | &set_d);
+ assert_eq!(&set_d | &set_c, &set_a | &set_d);
+ assert_eq!(&set_c ^ &set_d, &set_a | &(&set_d - &set_b));
+ assert_eq!(&set_d ^ &set_c, &set_a | &(&set_d - &set_b));
+ assert_eq!(&set_c - &set_d, set_a);
+ assert_eq!(&set_d - &set_c, &set_d - &set_b);
+ }
+
+ #[test]
+ #[cfg(has_std)]
+ fn from_array() {
+ let set1 = IndexSet::from([1, 2, 3, 4]);
+ let set2: IndexSet<_> = [1, 2, 3, 4].into();
+
+ assert_eq!(set1, set2);
+ }
+}
diff --git a/vendor/indexmap-1.9.3/src/util.rs b/vendor/indexmap-1.9.3/src/util.rs
new file mode 100644
index 0000000..a24dfaf
--- /dev/null
+++ b/vendor/indexmap-1.9.3/src/util.rs
@@ -0,0 +1,31 @@
+use core::ops::{Bound, Range, RangeBounds};
+
+pub(crate) fn third<A, B, C>(t: (A, B, C)) -> C {
+ t.2
+}
+
+pub(crate) fn simplify_range<R>(range: R, len: usize) -> Range<usize>
+where
+ R: RangeBounds<usize>,
+{
+ let start = match range.start_bound() {
+ Bound::Unbounded => 0,
+ Bound::Included(&i) if i <= len => i,
+ Bound::Excluded(&i) if i < len => i + 1,
+ bound => panic!("range start {:?} should be <= length {}", bound, len),
+ };
+ let end = match range.end_bound() {
+ Bound::Unbounded => len,
+ Bound::Excluded(&i) if i <= len => i,
+ Bound::Included(&i) if i < len => i + 1,
+ bound => panic!("range end {:?} should be <= length {}", bound, len),
+ };
+ if start > end {
+ panic!(
+ "range start {:?} should be <= range end {:?}",
+ range.start_bound(),
+ range.end_bound()
+ );
+ }
+ start..end
+}
diff --git a/vendor/indexmap-1.9.3/tests/equivalent_trait.rs b/vendor/indexmap-1.9.3/tests/equivalent_trait.rs
new file mode 100644
index 0000000..ff5943a
--- /dev/null
+++ b/vendor/indexmap-1.9.3/tests/equivalent_trait.rs
@@ -0,0 +1,53 @@
+use indexmap::indexmap;
+use indexmap::Equivalent;
+
+use std::hash::Hash;
+
+#[derive(Debug, Hash)]
+pub struct Pair<A, B>(pub A, pub B);
+
+impl<A, B, C, D> PartialEq<(A, B)> for Pair<C, D>
+where
+ C: PartialEq<A>,
+ D: PartialEq<B>,
+{
+ fn eq(&self, rhs: &(A, B)) -> bool {
+ self.0 == rhs.0 && self.1 == rhs.1
+ }
+}
+
+impl<A, B, X> Equivalent<X> for Pair<A, B>
+where
+ Pair<A, B>: PartialEq<X>,
+ A: Hash + Eq,
+ B: Hash + Eq,
+{
+ fn equivalent(&self, other: &X) -> bool {
+ *self == *other
+ }
+}
+
+#[test]
+fn test_lookup() {
+ let s = String::from;
+ let map = indexmap! {
+ (s("a"), s("b")) => 1,
+ (s("a"), s("x")) => 2,
+ };
+
+ assert!(map.contains_key(&Pair("a", "b")));
+ assert!(!map.contains_key(&Pair("b", "a")));
+}
+
+#[test]
+fn test_string_str() {
+ let s = String::from;
+ let mut map = indexmap! {
+ s("a") => 1, s("b") => 2,
+ s("x") => 3, s("y") => 4,
+ };
+
+ assert!(map.contains_key("a"));
+ assert!(!map.contains_key("z"));
+ assert_eq!(map.swap_remove("b"), Some(2));
+}
diff --git a/vendor/indexmap-1.9.3/tests/macros_full_path.rs b/vendor/indexmap-1.9.3/tests/macros_full_path.rs
new file mode 100644
index 0000000..2467d9b
--- /dev/null
+++ b/vendor/indexmap-1.9.3/tests/macros_full_path.rs
@@ -0,0 +1,19 @@
+#[test]
+fn test_create_map() {
+ let _m = indexmap::indexmap! {
+ 1 => 2,
+ 7 => 1,
+ 2 => 2,
+ 3 => 3,
+ };
+}
+
+#[test]
+fn test_create_set() {
+ let _s = indexmap::indexset! {
+ 1,
+ 7,
+ 2,
+ 3,
+ };
+}
diff --git a/vendor/indexmap-1.9.3/tests/quick.rs b/vendor/indexmap-1.9.3/tests/quick.rs
new file mode 100644
index 0000000..e9d96ac
--- /dev/null
+++ b/vendor/indexmap-1.9.3/tests/quick.rs
@@ -0,0 +1,573 @@
+use indexmap::{IndexMap, IndexSet};
+use itertools::Itertools;
+
+use quickcheck::Arbitrary;
+use quickcheck::Gen;
+use quickcheck::QuickCheck;
+use quickcheck::TestResult;
+
+use fnv::FnvHasher;
+use std::hash::{BuildHasher, BuildHasherDefault};
+type FnvBuilder = BuildHasherDefault<FnvHasher>;
+type IndexMapFnv<K, V> = IndexMap<K, V, FnvBuilder>;
+
+use std::cmp::min;
+use std::collections::HashMap;
+use std::collections::HashSet;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::ops::Bound;
+use std::ops::Deref;
+
+use indexmap::map::Entry as OEntry;
+use std::collections::hash_map::Entry as HEntry;
+
+fn set<'a, T: 'a, I>(iter: I) -> HashSet<T>
+where
+ I: IntoIterator<Item = &'a T>,
+ T: Copy + Hash + Eq,
+{
+ iter.into_iter().copied().collect()
+}
+
+fn indexmap<'a, T: 'a, I>(iter: I) -> IndexMap<T, ()>
+where
+ I: IntoIterator<Item = &'a T>,
+ T: Copy + Hash + Eq,
+{
+ IndexMap::from_iter(iter.into_iter().copied().map(|k| (k, ())))
+}
+
+// Helper macro to allow us to use smaller quickcheck limits under miri.
+macro_rules! quickcheck_limit {
+ (@as_items $($i:item)*) => ($($i)*);
+ {
+ $(
+ $(#[$m:meta])*
+ fn $fn_name:ident($($arg_name:ident : $arg_ty:ty),*) -> $ret:ty {
+ $($code:tt)*
+ }
+ )*
+ } => (
+ quickcheck::quickcheck! {
+ @as_items
+ $(
+ #[test]
+ $(#[$m])*
+ fn $fn_name() {
+ fn prop($($arg_name: $arg_ty),*) -> $ret {
+ $($code)*
+ }
+ let mut quickcheck = QuickCheck::new();
+ if cfg!(miri) {
+ quickcheck = quickcheck
+ .gen(Gen::new(10))
+ .tests(10)
+ .max_tests(100);
+ }
+
+ quickcheck.quickcheck(prop as fn($($arg_ty),*) -> $ret);
+ }
+ )*
+ }
+ )
+}
+
+quickcheck_limit! {
+ fn contains(insert: Vec<u32>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ insert.iter().all(|&key| map.get(&key).is_some())
+ }
+
+ fn contains_not(insert: Vec<u8>, not: Vec<u8>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ let nots = &set(&not) - &set(&insert);
+ nots.iter().all(|&key| map.get(&key).is_none())
+ }
+
+ fn insert_remove(insert: Vec<u8>, remove: Vec<u8>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ for &key in &remove {
+ map.swap_remove(&key);
+ }
+ let elements = &set(&insert) - &set(&remove);
+ map.len() == elements.len() && map.iter().count() == elements.len() &&
+ elements.iter().all(|k| map.get(k).is_some())
+ }
+
+ fn insertion_order(insert: Vec<u32>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ itertools::assert_equal(insert.iter().unique(), map.keys());
+ true
+ }
+
+ fn pop(insert: Vec<u8>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ let mut pops = Vec::new();
+ while let Some((key, _v)) = map.pop() {
+ pops.push(key);
+ }
+ pops.reverse();
+
+ itertools::assert_equal(insert.iter().unique(), &pops);
+ true
+ }
+
+ fn with_cap(template: Vec<()>) -> bool {
+ let cap = template.len();
+ let map: IndexMap<u8, u8> = IndexMap::with_capacity(cap);
+ println!("wish: {}, got: {} (diff: {})", cap, map.capacity(), map.capacity() as isize - cap as isize);
+ map.capacity() >= cap
+ }
+
+ fn drain_full(insert: Vec<u8>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ let mut clone = map.clone();
+ let drained = clone.drain(..);
+ for (key, _) in drained {
+ map.swap_remove(&key);
+ }
+ map.is_empty()
+ }
+
+ fn drain_bounds(insert: Vec<u8>, range: (Bound<usize>, Bound<usize>)) -> TestResult {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+
+ // First see if `Vec::drain` is happy with this range.
+ let result = std::panic::catch_unwind(|| {
+ let mut keys: Vec<u8> = map.keys().copied().collect();
+ keys.drain(range);
+ keys
+ });
+
+ if let Ok(keys) = result {
+ map.drain(range);
+ // Check that our `drain` matches the same key order.
+ assert!(map.keys().eq(&keys));
+ // Check that hash lookups all work too.
+ assert!(keys.iter().all(|key| map.contains_key(key)));
+ TestResult::passed()
+ } else {
+ // If `Vec::drain` panicked, so should we.
+ TestResult::must_fail(move || { map.drain(range); })
+ }
+ }
+
+ fn shift_remove(insert: Vec<u8>, remove: Vec<u8>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ for &key in &remove {
+ map.shift_remove(&key);
+ }
+ let elements = &set(&insert) - &set(&remove);
+
+ // Check that order is preserved after removals
+ let mut iter = map.keys();
+ for &key in insert.iter().unique() {
+ if elements.contains(&key) {
+ assert_eq!(Some(&key), iter.next());
+ }
+ }
+
+ map.len() == elements.len() && map.iter().count() == elements.len() &&
+ elements.iter().all(|k| map.get(k).is_some())
+ }
+
+ fn indexing(insert: Vec<u8>) -> bool {
+ let mut map: IndexMap<_, _> = insert.into_iter().map(|x| (x, x)).collect();
+ let set: IndexSet<_> = map.keys().copied().collect();
+ assert_eq!(map.len(), set.len());
+
+ for (i, &key) in set.iter().enumerate() {
+ assert_eq!(map.get_index(i), Some((&key, &key)));
+ assert_eq!(set.get_index(i), Some(&key));
+ assert_eq!(map[i], key);
+ assert_eq!(set[i], key);
+
+ *map.get_index_mut(i).unwrap().1 >>= 1;
+ map[i] <<= 1;
+ }
+
+ set.iter().enumerate().all(|(i, &key)| {
+ let value = key & !1;
+ map[&key] == value && map[i] == value
+ })
+ }
+
+ // Use `u8` test indices so quickcheck is less likely to go out of bounds.
+ fn swap_indices(vec: Vec<u8>, a: u8, b: u8) -> TestResult {
+ let mut set = IndexSet::<u8>::from_iter(vec);
+ let a = usize::from(a);
+ let b = usize::from(b);
+
+ if a >= set.len() || b >= set.len() {
+ return TestResult::discard();
+ }
+
+ let mut vec = Vec::from_iter(set.iter().cloned());
+ vec.swap(a, b);
+
+ set.swap_indices(a, b);
+
+ // Check both iteration order and hash lookups
+ assert!(set.iter().eq(vec.iter()));
+ assert!(vec.iter().enumerate().all(|(i, x)| {
+ set.get_index_of(x) == Some(i)
+ }));
+ TestResult::passed()
+ }
+
+ // Use `u8` test indices so quickcheck is less likely to go out of bounds.
+ fn move_index(vec: Vec<u8>, from: u8, to: u8) -> TestResult {
+ let mut set = IndexSet::<u8>::from_iter(vec);
+ let from = usize::from(from);
+ let to = usize::from(to);
+
+ if from >= set.len() || to >= set.len() {
+ return TestResult::discard();
+ }
+
+ let mut vec = Vec::from_iter(set.iter().cloned());
+ let x = vec.remove(from);
+ vec.insert(to, x);
+
+ set.move_index(from, to);
+
+ // Check both iteration order and hash lookups
+ assert!(set.iter().eq(vec.iter()));
+ assert!(vec.iter().enumerate().all(|(i, x)| {
+ set.get_index_of(x) == Some(i)
+ }));
+ TestResult::passed()
+ }
+}
+
+use crate::Op::*;
+#[derive(Copy, Clone, Debug)]
+enum Op<K, V> {
+ Add(K, V),
+ Remove(K),
+ AddEntry(K, V),
+ RemoveEntry(K),
+}
+
+impl<K, V> Arbitrary for Op<K, V>
+where
+ K: Arbitrary,
+ V: Arbitrary,
+{
+ fn arbitrary(g: &mut Gen) -> Self {
+ match u32::arbitrary(g) % 4 {
+ 0 => Add(K::arbitrary(g), V::arbitrary(g)),
+ 1 => AddEntry(K::arbitrary(g), V::arbitrary(g)),
+ 2 => Remove(K::arbitrary(g)),
+ _ => RemoveEntry(K::arbitrary(g)),
+ }
+ }
+}
+
+fn do_ops<K, V, S>(ops: &[Op<K, V>], a: &mut IndexMap<K, V, S>, b: &mut HashMap<K, V>)
+where
+ K: Hash + Eq + Clone,
+ V: Clone,
+ S: BuildHasher,
+{
+ for op in ops {
+ match *op {
+ Add(ref k, ref v) => {
+ a.insert(k.clone(), v.clone());
+ b.insert(k.clone(), v.clone());
+ }
+ AddEntry(ref k, ref v) => {
+ a.entry(k.clone()).or_insert_with(|| v.clone());
+ b.entry(k.clone()).or_insert_with(|| v.clone());
+ }
+ Remove(ref k) => {
+ a.swap_remove(k);
+ b.remove(k);
+ }
+ RemoveEntry(ref k) => {
+ if let OEntry::Occupied(ent) = a.entry(k.clone()) {
+ ent.swap_remove_entry();
+ }
+ if let HEntry::Occupied(ent) = b.entry(k.clone()) {
+ ent.remove_entry();
+ }
+ }
+ }
+ //println!("{:?}", a);
+ }
+}
+
+fn assert_maps_equivalent<K, V>(a: &IndexMap<K, V>, b: &HashMap<K, V>) -> bool
+where
+ K: Hash + Eq + Debug,
+ V: Eq + Debug,
+{
+ assert_eq!(a.len(), b.len());
+ assert_eq!(a.iter().next().is_some(), b.iter().next().is_some());
+ for key in a.keys() {
+ assert!(b.contains_key(key), "b does not contain {:?}", key);
+ }
+ for key in b.keys() {
+ assert!(a.get(key).is_some(), "a does not contain {:?}", key);
+ }
+ for key in a.keys() {
+ assert_eq!(a[key], b[key]);
+ }
+ true
+}
+
+quickcheck_limit! {
+ fn operations_i8(ops: Large<Vec<Op<i8, i8>>>) -> bool {
+ let mut map = IndexMap::new();
+ let mut reference = HashMap::new();
+ do_ops(&ops, &mut map, &mut reference);
+ assert_maps_equivalent(&map, &reference)
+ }
+
+ fn operations_string(ops: Vec<Op<Alpha, i8>>) -> bool {
+ let mut map = IndexMap::new();
+ let mut reference = HashMap::new();
+ do_ops(&ops, &mut map, &mut reference);
+ assert_maps_equivalent(&map, &reference)
+ }
+
+ fn keys_values(ops: Large<Vec<Op<i8, i8>>>) -> bool {
+ let mut map = IndexMap::new();
+ let mut reference = HashMap::new();
+ do_ops(&ops, &mut map, &mut reference);
+ let mut visit = IndexMap::new();
+ for (k, v) in map.keys().zip(map.values()) {
+ assert_eq!(&map[k], v);
+ assert!(!visit.contains_key(k));
+ visit.insert(*k, *v);
+ }
+ assert_eq!(visit.len(), reference.len());
+ true
+ }
+
+ fn keys_values_mut(ops: Large<Vec<Op<i8, i8>>>) -> bool {
+ let mut map = IndexMap::new();
+ let mut reference = HashMap::new();
+ do_ops(&ops, &mut map, &mut reference);
+ let mut visit = IndexMap::new();
+ let keys = Vec::from_iter(map.keys().copied());
+ for (k, v) in keys.iter().zip(map.values_mut()) {
+ assert_eq!(&reference[k], v);
+ assert!(!visit.contains_key(k));
+ visit.insert(*k, *v);
+ }
+ assert_eq!(visit.len(), reference.len());
+ true
+ }
+
+ fn equality(ops1: Vec<Op<i8, i8>>, removes: Vec<usize>) -> bool {
+ let mut map = IndexMap::new();
+ let mut reference = HashMap::new();
+ do_ops(&ops1, &mut map, &mut reference);
+ let mut ops2 = ops1.clone();
+ for &r in &removes {
+ if !ops2.is_empty() {
+ let i = r % ops2.len();
+ ops2.remove(i);
+ }
+ }
+ let mut map2 = IndexMapFnv::default();
+ let mut reference2 = HashMap::new();
+ do_ops(&ops2, &mut map2, &mut reference2);
+ assert_eq!(map == map2, reference == reference2);
+ true
+ }
+
+ fn retain_ordered(keys: Large<Vec<i8>>, remove: Large<Vec<i8>>) -> () {
+ let mut map = indexmap(keys.iter());
+ let initial_map = map.clone(); // deduplicated in-order input
+ let remove_map = indexmap(remove.iter());
+ let keys_s = set(keys.iter());
+ let remove_s = set(remove.iter());
+ let answer = &keys_s - &remove_s;
+ map.retain(|k, _| !remove_map.contains_key(k));
+
+ // check the values
+ assert_eq!(map.len(), answer.len());
+ for key in &answer {
+ assert!(map.contains_key(key));
+ }
+ // check the order
+ itertools::assert_equal(map.keys(), initial_map.keys().filter(|&k| !remove_map.contains_key(k)));
+ }
+
+ fn sort_1(keyvals: Large<Vec<(i8, i8)>>) -> () {
+ let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec());
+ let mut answer = keyvals.0;
+ answer.sort_by_key(|t| t.0);
+
+ // reverse dedup: Because IndexMap::from_iter keeps the last value for
+ // identical keys
+ answer.reverse();
+ answer.dedup_by_key(|t| t.0);
+ answer.reverse();
+
+ map.sort_by(|k1, _, k2, _| Ord::cmp(k1, k2));
+
+ // check it contains all the values it should
+ for &(key, val) in &answer {
+ assert_eq!(map[&key], val);
+ }
+
+ // check the order
+
+ let mapv = Vec::from_iter(map);
+ assert_eq!(answer, mapv);
+
+ }
+
+ fn sort_2(keyvals: Large<Vec<(i8, i8)>>) -> () {
+ let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec());
+ map.sort_by(|_, v1, _, v2| Ord::cmp(v1, v2));
+ assert_sorted_by_key(map, |t| t.1);
+ }
+
+ fn reverse(keyvals: Large<Vec<(i8, i8)>>) -> () {
+ let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec());
+
+ fn generate_answer(input: &Vec<(i8, i8)>) -> Vec<(i8, i8)> {
+ // to mimic what `IndexMap::from_iter` does:
+ // need to get (A) the unique keys in forward order, and (B) the
+ // last value of each of those keys.
+
+ // create (A): an iterable that yields the unique keys in ltr order
+ let mut seen_keys = HashSet::new();
+ let unique_keys_forward = input.iter().filter_map(move |(k, _)| {
+ if seen_keys.contains(k) { None }
+ else { seen_keys.insert(*k); Some(*k) }
+ });
+
+ // create (B): a mapping of keys to the last value seen for that key
+ // this is the same as reversing the input and taking the first
+ // value seen for that key!
+ let mut last_val_per_key = HashMap::new();
+ for &(k, v) in input.iter().rev() {
+ if !last_val_per_key.contains_key(&k) {
+ last_val_per_key.insert(k, v);
+ }
+ }
+
+ // iterate over the keys in (A) in order, and match each one with
+ // the corresponding last value from (B)
+ let mut ans: Vec<_> = unique_keys_forward
+ .map(|k| (k, *last_val_per_key.get(&k).unwrap()))
+ .collect();
+
+ // finally, since this test is testing `.reverse()`, reverse the
+ // answer in-place
+ ans.reverse();
+
+ ans
+ }
+
+ let answer = generate_answer(&keyvals.0);
+
+ // perform the work
+ map.reverse();
+
+ // check it contains all the values it should
+ for &(key, val) in &answer {
+ assert_eq!(map[&key], val);
+ }
+
+ // check the order
+ let mapv = Vec::from_iter(map);
+ assert_eq!(answer, mapv);
+ }
+}
+
+fn assert_sorted_by_key<I, Key, X>(iterable: I, key: Key)
+where
+ I: IntoIterator,
+ I::Item: Ord + Clone + Debug,
+ Key: Fn(&I::Item) -> X,
+ X: Ord,
+{
+ let input = Vec::from_iter(iterable);
+ let mut sorted = input.clone();
+ sorted.sort_by_key(key);
+ assert_eq!(input, sorted);
+}
+
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+struct Alpha(String);
+
+impl Deref for Alpha {
+ type Target = String;
+ fn deref(&self) -> &String {
+ &self.0
+ }
+}
+
+const ALPHABET: &[u8] = b"abcdefghijklmnopqrstuvwxyz";
+
+impl Arbitrary for Alpha {
+ fn arbitrary(g: &mut Gen) -> Self {
+ let len = usize::arbitrary(g) % g.size();
+ let len = min(len, 16);
+ Alpha(
+ (0..len)
+ .map(|_| ALPHABET[usize::arbitrary(g) % ALPHABET.len()] as char)
+ .collect(),
+ )
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ Box::new((**self).shrink().map(Alpha))
+ }
+}
+
+/// quickcheck Arbitrary adaptor -- make a larger vec
+#[derive(Clone, Debug)]
+struct Large<T>(T);
+
+impl<T> Deref for Large<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T> Arbitrary for Large<Vec<T>>
+where
+ T: Arbitrary,
+{
+ fn arbitrary(g: &mut Gen) -> Self {
+ let len = usize::arbitrary(g) % (g.size() * 10);
+ Large((0..len).map(|_| T::arbitrary(g)).collect())
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ Box::new((**self).shrink().map(Large))
+ }
+}
diff --git a/vendor/indexmap-1.9.3/tests/tests.rs b/vendor/indexmap-1.9.3/tests/tests.rs
new file mode 100644
index 0000000..7d522f1
--- /dev/null
+++ b/vendor/indexmap-1.9.3/tests/tests.rs
@@ -0,0 +1,28 @@
+use indexmap::{indexmap, indexset};
+
+#[test]
+fn test_sort() {
+ let m = indexmap! {
+ 1 => 2,
+ 7 => 1,
+ 2 => 2,
+ 3 => 3,
+ };
+
+ itertools::assert_equal(
+ m.sorted_by(|_k1, v1, _k2, v2| v1.cmp(v2)),
+ vec![(7, 1), (1, 2), (2, 2), (3, 3)],
+ );
+}
+
+#[test]
+fn test_sort_set() {
+ let s = indexset! {
+ 1,
+ 7,
+ 2,
+ 3,
+ };
+
+ itertools::assert_equal(s.sorted_by(|v1, v2| v1.cmp(v2)), vec![1, 2, 3, 7]);
+}
diff --git a/vendor/indexmap/.cargo-checksum.json b/vendor/indexmap/.cargo-checksum.json
new file mode 100644
index 0000000..7d54b6a
--- /dev/null
+++ b/vendor/indexmap/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{},"package":"8adf3ddd720272c6ea8bf59463c04e0f93d0bbf7c5439b691bca2987e0270897"} \ No newline at end of file
diff --git a/vendor/indexmap/Cargo.toml b/vendor/indexmap/Cargo.toml
new file mode 100644
index 0000000..ba1142e
--- /dev/null
+++ b/vendor/indexmap/Cargo.toml
@@ -0,0 +1,112 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2021"
+rust-version = "1.63"
+name = "indexmap"
+version = "2.0.2"
+description = "A hash table with consistent order and fast iteration."
+documentation = "https://docs.rs/indexmap/"
+readme = "README.md"
+keywords = [
+ "hashmap",
+ "no_std",
+]
+categories = [
+ "data-structures",
+ "no-std",
+]
+license = "Apache-2.0 OR MIT"
+repository = "https://github.com/bluss/indexmap"
+
+[package.metadata.docs.rs]
+features = [
+ "arbitrary",
+ "quickcheck",
+ "serde",
+ "rayon",
+]
+rustdoc-args = [
+ "--cfg",
+ "docsrs",
+]
+
+[package.metadata.release]
+no-dev-version = true
+tag-name = "{{version}}"
+
+[profile.bench]
+debug = 2
+
+[lib]
+bench = false
+
+[dependencies.arbitrary]
+version = "1.0"
+optional = true
+default-features = false
+
+[dependencies.equivalent]
+version = "1.0"
+default-features = false
+
+[dependencies.hashbrown]
+version = "0.14.1"
+features = ["raw"]
+default-features = false
+
+[dependencies.quickcheck]
+version = "1.0"
+optional = true
+default-features = false
+
+[dependencies.rayon]
+version = "1.5.3"
+optional = true
+
+[dependencies.rustc-rayon]
+version = "0.5"
+optional = true
+package = "rustc-rayon"
+
+[dependencies.serde]
+version = "1.0"
+optional = true
+default-features = false
+
+[dev-dependencies.fnv]
+version = "1.0"
+
+[dev-dependencies.fxhash]
+version = "0.2.1"
+
+[dev-dependencies.itertools]
+version = "0.11"
+
+[dev-dependencies.lazy_static]
+version = "1.3"
+
+[dev-dependencies.quickcheck]
+version = "1.0"
+default-features = false
+
+[dev-dependencies.rand]
+version = "0.8"
+features = ["small_rng"]
+
+[dev-dependencies.serde_derive]
+version = "1.0"
+
+[features]
+default = ["std"]
+std = []
+test_debug = []
diff --git a/vendor/indexmap/LICENSE-APACHE b/vendor/indexmap/LICENSE-APACHE
new file mode 100644
index 0000000..16fe87b
--- /dev/null
+++ b/vendor/indexmap/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/vendor/indexmap/LICENSE-MIT b/vendor/indexmap/LICENSE-MIT
new file mode 100644
index 0000000..8b81810
--- /dev/null
+++ b/vendor/indexmap/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2016--2017
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/vendor/indexmap/README.md b/vendor/indexmap/README.md
new file mode 100644
index 0000000..bf86932
--- /dev/null
+++ b/vendor/indexmap/README.md
@@ -0,0 +1,55 @@
+# indexmap
+
+[![build status](https://github.com/bluss/indexmap/workflows/Continuous%20integration/badge.svg?branch=master)](https://github.com/bluss/indexmap/actions)
+[![crates.io](https://img.shields.io/crates/v/indexmap.svg)](https://crates.io/crates/indexmap)
+[![docs](https://docs.rs/indexmap/badge.svg)](https://docs.rs/indexmap)
+[![rustc](https://img.shields.io/badge/rust-1.63%2B-orange.svg)](https://img.shields.io/badge/rust-1.63%2B-orange.svg)
+
+A pure-Rust hash table which preserves (in a limited sense) insertion order.
+
+This crate implements compact map and set data-structures,
+where the iteration order of the keys is independent from their hash or
+value. It preserves insertion order (except after removals), and it
+allows lookup of entries by either hash table key or numerical index.
+
+Note: this crate was originally released under the name `ordermap`,
+but it was renamed to `indexmap` to better reflect its features.
+
+# Background
+
+This was inspired by Python 3.6's new dict implementation (which remembers
+the insertion order and is fast to iterate, and is compact in memory).
+
+Some of those features were translated to Rust, and some were not. The result
+was indexmap, a hash table that has following properties:
+
+- Order is **independent of hash function** and hash values of keys.
+- Fast to iterate.
+- Indexed in compact space.
+- Preserves insertion order **as long** as you don't call `.remove()`.
+- Uses hashbrown for the inner table, just like Rust's libstd `HashMap` does.
+
+## Performance
+
+`IndexMap` derives a couple of performance facts directly from how it is constructed,
+which is roughly:
+
+> A raw hash table of key-value indices, and a vector of key-value pairs.
+
+- Iteration is very fast since it is on the dense key-values.
+- Removal is fast since it moves memory areas only in the table,
+ and uses a single swap in the vector.
+- Lookup is fast-ish because the initial 7-bit hash lookup uses SIMD, and indices are
+ densely stored. Lookup also is slow-ish since the actual key-value pairs are stored
+ separately. (Visible when cpu caches size is limiting.)
+
+- In practice, `IndexMap` has been tested out as the hashmap in rustc in [PR45282] and
+ the performance was roughly on par across the whole workload.
+- If you want the properties of `IndexMap`, or its strongest performance points
+ fits your workload, it might be the best hash table implementation.
+
+[PR45282]: https://github.com/rust-lang/rust/pull/45282
+
+# Recent Changes
+
+See [RELEASES.md](https://github.com/bluss/indexmap/blob/master/RELEASES.md).
diff --git a/vendor/indexmap/RELEASES.md b/vendor/indexmap/RELEASES.md
new file mode 100644
index 0000000..0964d19
--- /dev/null
+++ b/vendor/indexmap/RELEASES.md
@@ -0,0 +1,434 @@
+- 2.0.2
+
+ - The `hashbrown` dependency has been updated to version 0.14.1 to
+ complete the support for Rust 1.63.
+
+- 2.0.1
+
+ - **MSRV**: Rust 1.63.0 is now supported as well, pending publication of
+ `hashbrown`'s relaxed MSRV (or use cargo `--ignore-rust-version`).
+
+- 2.0.0
+
+ - **MSRV**: Rust 1.64.0 or later is now required.
+
+ - The `"std"` feature is no longer auto-detected. It is included in the
+ default feature set, or else can be enabled like any other Cargo feature.
+
+ - The `"serde-1"` feature has been removed, leaving just the optional
+ `"serde"` dependency to be enabled like a feature itself.
+
+ - `IndexMap::get_index_mut` now returns `Option<(&K, &mut V)>`, changing
+ the key part from `&mut K` to `&K`. There is also a new alternative
+ `MutableKeys::get_index_mut2` to access the former behavior.
+
+ - The new `map::Slice<K, V>` and `set::Slice<T>` offer a linear view of maps
+ and sets, behaving a lot like normal `[(K, V)]` and `[T]` slices. Notably,
+ comparison traits like `Eq` only consider items in order, rather than hash
+ lookups, and slices even implement `Hash`.
+
+ - `IndexMap` and `IndexSet` now have `sort_by_cached_key` and
+ `par_sort_by_cached_key` methods which perform stable sorts in place
+ using a key extraction function.
+
+ - `IndexMap` and `IndexSet` now have `reserve_exact`, `try_reserve`, and
+ `try_reserve_exact` methods that correspond to the same methods on `Vec`.
+ However, exactness only applies to the direct capacity for items, while the
+ raw hash table still follows its own rules for capacity and load factor.
+
+ - The `Equivalent` trait is now re-exported from the `equivalent` crate,
+ intended as a common base to allow types to work with multiple map types.
+
+ - The `hashbrown` dependency has been updated to version 0.14.
+
+ - The `serde_seq` module has been moved from the crate root to below the
+ `map` module.
+
+- 1.9.3
+
+ - Bump the `rustc-rayon` dependency, for compiler use only.
+
+- 1.9.2
+
+ - `IndexMap` and `IndexSet` both implement `arbitrary::Arbitrary<'_>` and
+ `quickcheck::Arbitrary` if those optional dependency features are enabled.
+
+- 1.9.1
+
+ - The MSRV now allows Rust 1.56.0 as well. However, currently `hashbrown`
+ 0.12.1 requires 1.56.1, so users on 1.56.0 should downgrade that to 0.12.0
+ until there is a later published version relaxing its requirement.
+
+- 1.9.0
+
+ - **MSRV**: Rust 1.56.1 or later is now required.
+
+ - The `hashbrown` dependency has been updated to version 0.12.
+
+ - `IterMut` and `ValuesMut` now implement `Debug`.
+
+ - The new `IndexMap::shrink_to` and `IndexSet::shrink_to` methods shrink
+ the capacity with a lower bound.
+
+ - The new `IndexMap::move_index` and `IndexSet::move_index` methods change
+ the position of an item from one index to another, shifting the items
+ between to accommodate the move.
+
+- 1.8.2
+
+ - Bump the `rustc-rayon` dependency, for compiler use only.
+
+- 1.8.1
+
+ - The new `IndexSet::replace_full` will return the index of the item along
+ with the replaced value, if any, by @zakcutner in PR [222].
+
+[222]: https://github.com/bluss/indexmap/pull/222
+
+- 1.8.0
+
+ - The new `IndexMap::into_keys` and `IndexMap::into_values` will consume
+ the map into keys or values, respectively, matching Rust 1.54's `HashMap`
+ methods, by @taiki-e in PR [195].
+
+ - More of the iterator types implement `Debug`, `ExactSizeIterator`, and
+ `FusedIterator`, by @cuviper in PR [196].
+
+ - `IndexMap` and `IndexSet` now implement rayon's `ParallelDrainRange`,
+ by @cuviper in PR [197].
+
+ - `IndexMap::with_hasher` and `IndexSet::with_hasher` are now `const`
+ functions, allowing static maps and sets, by @mwillsey in PR [203].
+
+ - `IndexMap` and `IndexSet` now implement `From` for arrays, matching
+ Rust 1.56's implementation for `HashMap`, by @rouge8 in PR [205].
+
+ - `IndexMap` and `IndexSet` now have methods `sort_unstable_keys`,
+ `sort_unstable_by`, `sorted_unstable_by`, and `par_*` equivalents,
+ which sort in-place without preserving the order of equal items, by
+ @bhgomes in PR [211].
+
+[195]: https://github.com/bluss/indexmap/pull/195
+[196]: https://github.com/bluss/indexmap/pull/196
+[197]: https://github.com/bluss/indexmap/pull/197
+[203]: https://github.com/bluss/indexmap/pull/203
+[205]: https://github.com/bluss/indexmap/pull/205
+[211]: https://github.com/bluss/indexmap/pull/211
+
+- 1.7.0
+
+ - **MSRV**: Rust 1.49 or later is now required.
+
+ - The `hashbrown` dependency has been updated to version 0.11.
+
+- 1.6.2
+
+ - Fixed to match `std` behavior, `OccupiedEntry::key` now references the
+ existing key in the map instead of the lookup key, by @cuviper in PR [170].
+
+ - The new `Entry::or_insert_with_key` matches Rust 1.50's `Entry` method,
+ passing `&K` to the callback to create a value, by @cuviper in PR [175].
+
+[170]: https://github.com/bluss/indexmap/pull/170
+[175]: https://github.com/bluss/indexmap/pull/175
+
+- 1.6.1
+
+ - The new `serde_seq` module implements `IndexMap` serialization as a
+ sequence to ensure order is preserved, by @cuviper in PR [158].
+
+ - New methods on maps and sets work like the `Vec`/slice methods by the same name:
+ `truncate`, `split_off`, `first`, `first_mut`, `last`, `last_mut`, and
+ `swap_indices`, by @cuviper in PR [160].
+
+[158]: https://github.com/bluss/indexmap/pull/158
+[160]: https://github.com/bluss/indexmap/pull/160
+
+- 1.6.0
+
+ - **MSRV**: Rust 1.36 or later is now required.
+
+ - The `hashbrown` dependency has been updated to version 0.9.
+
+- 1.5.2
+
+ - The new "std" feature will force the use of `std` for users that explicitly
+ want the default `S = RandomState`, bypassing the autodetection added in 1.3.0,
+ by @cuviper in PR [145].
+
+[145]: https://github.com/bluss/indexmap/pull/145
+
+- 1.5.1
+
+ - Values can now be indexed by their `usize` position by @cuviper in PR [132].
+
+ - Some of the generic bounds have been relaxed to match `std` by @cuviper in PR [141].
+
+ - `drain` now accepts any `R: RangeBounds<usize>` by @cuviper in PR [142].
+
+[132]: https://github.com/bluss/indexmap/pull/132
+[141]: https://github.com/bluss/indexmap/pull/141
+[142]: https://github.com/bluss/indexmap/pull/142
+
+- 1.5.0
+
+ - **MSRV**: Rust 1.32 or later is now required.
+
+ - The inner hash table is now based on `hashbrown` by @cuviper in PR [131].
+ This also completes the method `reserve` and adds `shrink_to_fit`.
+
+ - Add new methods `get_key_value`, `remove_entry`, `swap_remove_entry`,
+ and `shift_remove_entry`, by @cuviper in PR [136]
+
+ - `Clone::clone_from` reuses allocations by @cuviper in PR [125]
+
+ - Add new method `reverse` by @linclelinkpart5 in PR [128]
+
+[125]: https://github.com/bluss/indexmap/pull/125
+[128]: https://github.com/bluss/indexmap/pull/128
+[131]: https://github.com/bluss/indexmap/pull/131
+[136]: https://github.com/bluss/indexmap/pull/136
+
+- 1.4.0
+
+ - Add new method `get_index_of` by @Thermatrix in PR [115] and [120]
+
+ - Fix build script rebuild-if-changed configuration to use "build.rs";
+ fixes issue [123]. Fix by @cuviper.
+
+ - Dev-dependencies (rand and quickcheck) have been updated. The crate's tests
+ now run using Rust 1.32 or later (MSRV for building the crate has not changed).
+ by @kjeremy and @bluss
+
+[123]: https://github.com/bluss/indexmap/issues/123
+[115]: https://github.com/bluss/indexmap/pull/115
+[120]: https://github.com/bluss/indexmap/pull/120
+
+- 1.3.2
+
+ - Maintenance update to regenerate the published `Cargo.toml`.
+
+- 1.3.1
+
+ - Maintenance update for formatting and `autocfg` 1.0.
+
+- 1.3.0
+
+ - The deprecation messages in the previous version have been removed.
+ (The methods have not otherwise changed.) Docs for removal methods have been
+ improved.
+ - From Rust 1.36, this crate supports being built **without std**, requiring
+ `alloc` instead. This is enabled automatically when it is detected that
+ `std` is not available. There is no crate feature to enable/disable to
+ trigger this. The new build-dep `autocfg` enables this.
+
+- 1.2.0
+
+ - Plain `.remove()` now has a deprecation message, it informs the user
+ about picking one of the removal functions `swap_remove` and `shift_remove`
+ which have different performance and order semantics.
+ Plain `.remove()` will not be removed, the warning message and method
+ will remain until further.
+
+ - Add new method `shift_remove` for order preserving removal on the map,
+ and `shift_take` for the corresponding operation on the set.
+
+ - Add methods `swap_remove`, `swap_remove_entry` to `Entry`.
+
+ - Fix indexset/indexmap to support full paths, like `indexmap::indexmap!()`
+
+ - Internal improvements: fix warnings, deprecations and style lints
+
+- 1.1.0
+
+ - Added optional feature `"rayon"` that adds parallel iterator support
+ to `IndexMap` and `IndexSet` using Rayon. This includes all the regular
+ iterators in parallel versions, and parallel sort.
+
+ - Implemented `Clone` for `map::{Iter, Keys, Values}` and
+ `set::{Difference, Intersection, Iter, SymmetricDifference, Union}`
+
+ - Implemented `Debug` for `map::{Entry, IntoIter, Iter, Keys, Values}` and
+ `set::{Difference, Intersection, IntoIter, Iter, SymmetricDifference, Union}`
+
+ - Serde trait `IntoDeserializer` are implemented for `IndexMap` and `IndexSet`.
+
+ - Minimum Rust version requirement increased to Rust 1.30 for development builds.
+
+- 1.0.2
+
+ - The new methods `IndexMap::insert_full` and `IndexSet::insert_full` are
+ both like `insert` with the index included in the return value.
+
+ - The new method `Entry::and_modify` can be used to modify occupied
+ entries, matching the new methods of `std` maps in Rust 1.26.
+
+ - The new method `Entry::or_default` inserts a default value in unoccupied
+ entries, matching the new methods of `std` maps in Rust 1.28.
+
+- 1.0.1
+
+ - Document Rust version policy for the crate (see rustdoc)
+
+- 1.0.0
+
+ - This is the 1.0 release for `indexmap`! (the crate and datastructure
+ formerly known as “ordermap”)
+ - `OccupiedEntry::insert` changed its signature, to use `&mut self` for
+ the method receiver, matching the equivalent method for a standard
+ `HashMap`. Thanks to @dtolnay for finding this bug.
+ - The deprecated old names from ordermap were removed: `OrderMap`,
+ `OrderSet`, `ordermap!{}`, `orderset!{}`. Use the new `IndexMap`
+ etc names instead.
+
+- 0.4.1
+
+ - Renamed crate to `indexmap`; the `ordermap` crate is now deprecated
+ and the types `OrderMap/Set` now have a deprecation notice.
+
+- 0.4.0
+
+ - This is the last release series for this `ordermap` under that name,
+ because the crate is **going to be renamed** to `indexmap` (with types
+ `IndexMap`, `IndexSet`) and no change in functionality!
+ - The map and its associated structs moved into the `map` submodule of the
+ crate, so that the map and set are symmetric
+
+ + The iterators, `Entry` and other structs are now under `ordermap::map::`
+
+ - Internally refactored `OrderMap<K, V, S>` so that all the main algorithms
+ (insertion, lookup, removal etc) that don't use the `S` parameter (the
+ hasher) are compiled without depending on `S`, which reduces generics bloat.
+
+ - `Entry<K, V>` no longer has a type parameter `S`, which is just like
+ the standard `HashMap`'s entry.
+
+ - Minimum Rust version requirement increased to Rust 1.18
+
+- 0.3.5
+
+ - Documentation improvements
+
+- 0.3.4
+
+ - The `.retain()` methods for `OrderMap` and `OrderSet` now
+ traverse the elements in order, and the retained elements **keep their order**
+ - Added new methods `.sort_by()`, `.sort_keys()` to `OrderMap` and
+ `.sort_by()`, `.sort()` to `OrderSet`. These methods allow you to
+ sort the maps in place efficiently.
+
+- 0.3.3
+
+ - Document insertion behaviour better by @lucab
+ - Updated dependences (no feature changes) by @ignatenkobrain
+
+- 0.3.2
+
+ - Add `OrderSet` by @cuviper!
+ - `OrderMap::drain` is now (too) a double ended iterator.
+
+- 0.3.1
+
+ - In all ordermap iterators, forward the `collect` method to the underlying
+ iterator as well.
+ - Add crates.io categories.
+
+- 0.3.0
+
+ - The methods `get_pair`, `get_pair_index` were both replaced by
+ `get_full` (and the same for the mutable case).
+ - Method `swap_remove_pair` replaced by `swap_remove_full`.
+ - Add trait `MutableKeys` for opt-in mutable key access. Mutable key access
+ is only possible through the methods of this extension trait.
+ - Add new trait `Equivalent` for key equivalence. This extends the
+ `Borrow` trait mechanism for `OrderMap::get` in a backwards compatible
+ way, just some minor type inference related issues may become apparent.
+ See [#10] for more information.
+ - Implement `Extend<(&K, &V)>` by @xfix.
+
+[#10]: https://github.com/bluss/ordermap/pull/10
+
+- 0.2.13
+
+ - Fix deserialization to support custom hashers by @Techcable.
+ - Add methods `.index()` on the entry types by @garro95.
+
+- 0.2.12
+
+ - Add methods `.with_hasher()`, `.hasher()`.
+
+- 0.2.11
+
+ - Support `ExactSizeIterator` for the iterators. By @Binero.
+ - Use `Box<[Pos]>` internally, saving a word in the `OrderMap` struct.
+ - Serde support, with crate feature `"serde-1"`. By @xfix.
+
+- 0.2.10
+
+ - Add iterator `.drain(..)` by @stevej.
+
+- 0.2.9
+
+ - Add method `.is_empty()` by @overvenus.
+ - Implement `PartialEq, Eq` by @overvenus.
+ - Add method `.sorted_by()`.
+
+- 0.2.8
+
+ - Add iterators `.values()` and `.values_mut()`.
+ - Fix compatibility with 32-bit platforms.
+
+- 0.2.7
+
+ - Add `.retain()`.
+
+- 0.2.6
+
+ - Add `OccupiedEntry::remove_entry` and other minor entry methods,
+ so that it now has all the features of `HashMap`'s entries.
+
+- 0.2.5
+
+ - Improved `.pop()` slightly.
+
+- 0.2.4
+
+ - Improved performance of `.insert()` ([#3]) by @pczarn.
+
+[#3]: https://github.com/bluss/ordermap/pull/3
+
+- 0.2.3
+
+ - Generalize `Entry` for now, so that it works on hashmaps with non-default
+ hasher. However, there's a lingering compat issue since libstd `HashMap`
+ does not parameterize its entries by the hasher (`S` typarm).
+ - Special case some iterator methods like `.nth()`.
+
+- 0.2.2
+
+ - Disable the verbose `Debug` impl by default.
+
+- 0.2.1
+
+ - Fix doc links and clarify docs.
+
+- 0.2.0
+
+ - Add more `HashMap` methods & compat with its API.
+ - Experimental support for `.entry()` (the simplest parts of the API).
+ - Add `.reserve()` (placeholder impl).
+ - Add `.remove()` as synonym for `.swap_remove()`.
+ - Changed `.insert()` to swap value if the entry already exists, and
+ return `Option`.
+ - Experimental support as an *indexed* hash map! Added methods
+ `.get_index()`, `.get_index_mut()`, `.swap_remove_index()`,
+ `.get_pair_index()`, `.get_pair_index_mut()`.
+
+- 0.1.2
+
+ - Implement the 32/32 split idea for `Pos` which improves cache utilization
+ and lookup performance.
+
+- 0.1.1
+
+ - Initial release.
diff --git a/vendor/indexmap/benches/bench.rs b/vendor/indexmap/benches/bench.rs
new file mode 100644
index 0000000..a4e8e21
--- /dev/null
+++ b/vendor/indexmap/benches/bench.rs
@@ -0,0 +1,763 @@
+#![feature(test)]
+
+extern crate test;
+#[macro_use]
+extern crate lazy_static;
+
+use fnv::FnvHasher;
+use std::hash::BuildHasherDefault;
+use std::hash::Hash;
+type FnvBuilder = BuildHasherDefault<FnvHasher>;
+
+use test::black_box;
+use test::Bencher;
+
+use indexmap::IndexMap;
+
+use std::collections::HashMap;
+
+use rand::rngs::SmallRng;
+use rand::seq::SliceRandom;
+use rand::SeedableRng;
+
+/// Use a consistently seeded Rng for benchmark stability
+fn small_rng() -> SmallRng {
+ let seed = u64::from_le_bytes(*b"indexmap");
+ SmallRng::seed_from_u64(seed)
+}
+
+#[bench]
+fn new_hashmap(b: &mut Bencher) {
+ b.iter(|| HashMap::<String, String>::new());
+}
+
+#[bench]
+fn new_indexmap(b: &mut Bencher) {
+ b.iter(|| IndexMap::<String, String>::new());
+}
+
+#[bench]
+fn with_capacity_10e5_hashmap(b: &mut Bencher) {
+ b.iter(|| HashMap::<String, String>::with_capacity(10_000));
+}
+
+#[bench]
+fn with_capacity_10e5_indexmap(b: &mut Bencher) {
+ b.iter(|| IndexMap::<String, String>::with_capacity(10_000));
+}
+
+#[bench]
+fn insert_hashmap_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x, ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x, ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_hashmap_string_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x.to_string(), ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_string_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x.to_string(), ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_hashmap_str_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let ss = Vec::from_iter((0..c).map(|x| x.to_string()));
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for key in &ss {
+ map.insert(&key[..], ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_str_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let ss = Vec::from_iter((0..c).map(|x| x.to_string()));
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for key in &ss {
+ map.insert(&key[..], ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_hashmap_int_bigvalue_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let value = [0u64; 10];
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for i in 0..c {
+ map.insert(i, value);
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_int_bigvalue_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let value = [0u64; 10];
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for i in 0..c {
+ map.insert(i, value);
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_hashmap_100_000(b: &mut Bencher) {
+ let c = 100_000;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x, ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_100_000(b: &mut Bencher) {
+ let c = 100_000;
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x, ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_hashmap_150(b: &mut Bencher) {
+ let c = 150;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x, ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_150(b: &mut Bencher) {
+ let c = 150;
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x, ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn entry_hashmap_150(b: &mut Bencher) {
+ let c = 150;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.entry(x).or_insert(());
+ }
+ map
+ });
+}
+
+#[bench]
+fn entry_indexmap_150(b: &mut Bencher) {
+ let c = 150;
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for x in 0..c {
+ map.entry(x).or_insert(());
+ }
+ map
+ });
+}
+
+#[bench]
+fn iter_sum_hashmap_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = HashMap::with_capacity(c);
+ let len = c - c / 10;
+ for x in 0..len {
+ map.insert(x, ());
+ }
+ assert_eq!(map.len(), len);
+ b.iter(|| map.keys().sum::<usize>());
+}
+
+#[bench]
+fn iter_sum_indexmap_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = IndexMap::with_capacity(c);
+ let len = c - c / 10;
+ for x in 0..len {
+ map.insert(x, ());
+ }
+ assert_eq!(map.len(), len);
+ b.iter(|| map.keys().sum::<usize>());
+}
+
+#[bench]
+fn iter_black_box_hashmap_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = HashMap::with_capacity(c);
+ let len = c - c / 10;
+ for x in 0..len {
+ map.insert(x, ());
+ }
+ assert_eq!(map.len(), len);
+ b.iter(|| {
+ for &key in map.keys() {
+ black_box(key);
+ }
+ });
+}
+
+#[bench]
+fn iter_black_box_indexmap_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = IndexMap::with_capacity(c);
+ let len = c - c / 10;
+ for x in 0..len {
+ map.insert(x, ());
+ }
+ assert_eq!(map.len(), len);
+ b.iter(|| {
+ for &key in map.keys() {
+ black_box(key);
+ }
+ });
+}
+
+fn shuffled_keys<I>(iter: I) -> Vec<I::Item>
+where
+ I: IntoIterator,
+{
+ let mut v = Vec::from_iter(iter);
+ let mut rng = small_rng();
+ v.shuffle(&mut rng);
+ v
+}
+
+#[bench]
+fn lookup_hashmap_10_000_exist(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = HashMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(key, 1);
+ }
+ b.iter(|| {
+ let mut found = 0;
+ for key in 5000..c {
+ found += map.get(&key).is_some() as i32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_hashmap_10_000_noexist(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = HashMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(key, 1);
+ }
+ b.iter(|| {
+ let mut found = 0;
+ for key in c..15000 {
+ found += map.get(&key).is_some() as i32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_indexmap_10_000_exist(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = IndexMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(key, 1);
+ }
+ b.iter(|| {
+ let mut found = 0;
+ for key in 5000..c {
+ found += map.get(&key).is_some() as i32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_indexmap_10_000_noexist(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = IndexMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(key, 1);
+ }
+ b.iter(|| {
+ let mut found = 0;
+ for key in c..15000 {
+ found += map.get(&key).is_some() as i32;
+ }
+ found
+ });
+}
+
+// number of items to look up
+const LOOKUP_MAP_SIZE: u32 = 100_000_u32;
+const LOOKUP_SAMPLE_SIZE: u32 = 5000;
+const SORT_MAP_SIZE: usize = 10_000;
+
+// use lazy_static so that comparison benchmarks use the exact same inputs
+lazy_static! {
+ static ref KEYS: Vec<u32> = shuffled_keys(0..LOOKUP_MAP_SIZE);
+}
+
+lazy_static! {
+ static ref HMAP_100K: HashMap<u32, u32> = {
+ let c = LOOKUP_MAP_SIZE;
+ let mut map = HashMap::with_capacity(c as usize);
+ let keys = &*KEYS;
+ for &key in keys {
+ map.insert(key, key);
+ }
+ map
+ };
+}
+
+lazy_static! {
+ static ref IMAP_100K: IndexMap<u32, u32> = {
+ let c = LOOKUP_MAP_SIZE;
+ let mut map = IndexMap::with_capacity(c as usize);
+ let keys = &*KEYS;
+ for &key in keys {
+ map.insert(key, key);
+ }
+ map
+ };
+}
+
+lazy_static! {
+ static ref IMAP_SORT_U32: IndexMap<u32, u32> = {
+ let mut map = IndexMap::with_capacity(SORT_MAP_SIZE);
+ for &key in &KEYS[..SORT_MAP_SIZE] {
+ map.insert(key, key);
+ }
+ map
+ };
+}
+lazy_static! {
+ static ref IMAP_SORT_S: IndexMap<String, String> = {
+ let mut map = IndexMap::with_capacity(SORT_MAP_SIZE);
+ for &key in &KEYS[..SORT_MAP_SIZE] {
+ map.insert(format!("{:^16x}", &key), String::new());
+ }
+ map
+ };
+}
+
+#[bench]
+fn lookup_hashmap_100_000_multi(b: &mut Bencher) {
+ let map = &*HMAP_100K;
+ b.iter(|| {
+ let mut found = 0;
+ for key in 0..LOOKUP_SAMPLE_SIZE {
+ found += map.get(&key).is_some() as u32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_indexmap_100_000_multi(b: &mut Bencher) {
+ let map = &*IMAP_100K;
+ b.iter(|| {
+ let mut found = 0;
+ for key in 0..LOOKUP_SAMPLE_SIZE {
+ found += map.get(&key).is_some() as u32;
+ }
+ found
+ });
+}
+
+// inorder: Test looking up keys in the same order as they were inserted
+#[bench]
+fn lookup_hashmap_100_000_inorder_multi(b: &mut Bencher) {
+ let map = &*HMAP_100K;
+ let keys = &*KEYS;
+ b.iter(|| {
+ let mut found = 0;
+ for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] {
+ found += map.get(key).is_some() as u32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_indexmap_100_000_inorder_multi(b: &mut Bencher) {
+ let map = &*IMAP_100K;
+ let keys = &*KEYS;
+ b.iter(|| {
+ let mut found = 0;
+ for key in &keys[0..LOOKUP_SAMPLE_SIZE as usize] {
+ found += map.get(key).is_some() as u32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_hashmap_100_000_single(b: &mut Bencher) {
+ let map = &*HMAP_100K;
+ let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle();
+ b.iter(|| {
+ let key = iter.next().unwrap();
+ map.get(&key).is_some()
+ });
+}
+
+#[bench]
+fn lookup_indexmap_100_000_single(b: &mut Bencher) {
+ let map = &*IMAP_100K;
+ let mut iter = (0..LOOKUP_MAP_SIZE + LOOKUP_SAMPLE_SIZE).cycle();
+ b.iter(|| {
+ let key = iter.next().unwrap();
+ map.get(&key).is_some()
+ });
+}
+
+const GROW_SIZE: usize = 100_000;
+type GrowKey = u32;
+
+// Test grow/resize without preallocation
+#[bench]
+fn grow_fnv_hashmap_100_000(b: &mut Bencher) {
+ b.iter(|| {
+ let mut map: HashMap<_, _, FnvBuilder> = HashMap::default();
+ for x in 0..GROW_SIZE {
+ map.insert(x as GrowKey, x as GrowKey);
+ }
+ map
+ });
+}
+
+#[bench]
+fn grow_fnv_indexmap_100_000(b: &mut Bencher) {
+ b.iter(|| {
+ let mut map: IndexMap<_, _, FnvBuilder> = IndexMap::default();
+ for x in 0..GROW_SIZE {
+ map.insert(x as GrowKey, x as GrowKey);
+ }
+ map
+ });
+}
+
+const MERGE: u64 = 10_000;
+#[bench]
+fn hashmap_merge_simple(b: &mut Bencher) {
+ let first_map: HashMap<u64, _> = (0..MERGE).map(|i| (i, ())).collect();
+ let second_map: HashMap<u64, _> = (MERGE..MERGE * 2).map(|i| (i, ())).collect();
+ b.iter(|| {
+ let mut merged = first_map.clone();
+ merged.extend(second_map.iter().map(|(&k, &v)| (k, v)));
+ merged
+ });
+}
+
+#[bench]
+fn hashmap_merge_shuffle(b: &mut Bencher) {
+ let first_map: HashMap<u64, _> = (0..MERGE).map(|i| (i, ())).collect();
+ let second_map: HashMap<u64, _> = (MERGE..MERGE * 2).map(|i| (i, ())).collect();
+ let mut v = Vec::new();
+ let mut rng = small_rng();
+ b.iter(|| {
+ let mut merged = first_map.clone();
+ v.extend(second_map.iter().map(|(&k, &v)| (k, v)));
+ v.shuffle(&mut rng);
+ merged.extend(v.drain(..));
+
+ merged
+ });
+}
+
+#[bench]
+fn indexmap_merge_simple(b: &mut Bencher) {
+ let first_map: IndexMap<u64, _> = (0..MERGE).map(|i| (i, ())).collect();
+ let second_map: IndexMap<u64, _> = (MERGE..MERGE * 2).map(|i| (i, ())).collect();
+ b.iter(|| {
+ let mut merged = first_map.clone();
+ merged.extend(second_map.iter().map(|(&k, &v)| (k, v)));
+ merged
+ });
+}
+
+#[bench]
+fn indexmap_merge_shuffle(b: &mut Bencher) {
+ let first_map: IndexMap<u64, _> = (0..MERGE).map(|i| (i, ())).collect();
+ let second_map: IndexMap<u64, _> = (MERGE..MERGE * 2).map(|i| (i, ())).collect();
+ let mut v = Vec::new();
+ let mut rng = small_rng();
+ b.iter(|| {
+ let mut merged = first_map.clone();
+ v.extend(second_map.iter().map(|(&k, &v)| (k, v)));
+ v.shuffle(&mut rng);
+ merged.extend(v.drain(..));
+
+ merged
+ });
+}
+
+#[bench]
+fn swap_remove_indexmap_100_000(b: &mut Bencher) {
+ let map = IMAP_100K.clone();
+ let mut keys = Vec::from_iter(map.keys().copied());
+ let mut rng = small_rng();
+ keys.shuffle(&mut rng);
+
+ b.iter(|| {
+ let mut map = map.clone();
+ for key in &keys {
+ map.swap_remove(key);
+ }
+ assert_eq!(map.len(), 0);
+ map
+ });
+}
+
+#[bench]
+fn shift_remove_indexmap_100_000_few(b: &mut Bencher) {
+ let map = IMAP_100K.clone();
+ let mut keys = Vec::from_iter(map.keys().copied());
+ let mut rng = small_rng();
+ keys.shuffle(&mut rng);
+ keys.truncate(50);
+
+ b.iter(|| {
+ let mut map = map.clone();
+ for key in &keys {
+ map.shift_remove(key);
+ }
+ assert_eq!(map.len(), IMAP_100K.len() - keys.len());
+ map
+ });
+}
+
+#[bench]
+fn shift_remove_indexmap_2_000_full(b: &mut Bencher) {
+ let mut keys = KEYS[..2_000].to_vec();
+ let mut map = IndexMap::with_capacity(keys.len());
+ for &key in &keys {
+ map.insert(key, key);
+ }
+ let mut rng = small_rng();
+ keys.shuffle(&mut rng);
+
+ b.iter(|| {
+ let mut map = map.clone();
+ for key in &keys {
+ map.shift_remove(key);
+ }
+ assert_eq!(map.len(), 0);
+ map
+ });
+}
+
+#[bench]
+fn pop_indexmap_100_000(b: &mut Bencher) {
+ let map = IMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ while !map.is_empty() {
+ map.pop();
+ }
+ assert_eq!(map.len(), 0);
+ map
+ });
+}
+
+#[bench]
+fn few_retain_indexmap_100_000(b: &mut Bencher) {
+ let map = IMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ map.retain(|k, _| *k % 7 == 0);
+ map
+ });
+}
+
+#[bench]
+fn few_retain_hashmap_100_000(b: &mut Bencher) {
+ let map = HMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ map.retain(|k, _| *k % 7 == 0);
+ map
+ });
+}
+
+#[bench]
+fn half_retain_indexmap_100_000(b: &mut Bencher) {
+ let map = IMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ map.retain(|k, _| *k % 2 == 0);
+ map
+ });
+}
+
+#[bench]
+fn half_retain_hashmap_100_000(b: &mut Bencher) {
+ let map = HMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ map.retain(|k, _| *k % 2 == 0);
+ map
+ });
+}
+
+#[bench]
+fn many_retain_indexmap_100_000(b: &mut Bencher) {
+ let map = IMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ map.retain(|k, _| *k % 100 != 0);
+ map
+ });
+}
+
+#[bench]
+fn many_retain_hashmap_100_000(b: &mut Bencher) {
+ let map = HMAP_100K.clone();
+
+ b.iter(|| {
+ let mut map = map.clone();
+ map.retain(|k, _| *k % 100 != 0);
+ map
+ });
+}
+
+// simple sort impl for comparison
+pub fn simple_sort<K: Ord + Hash, V>(m: &mut IndexMap<K, V>) {
+ let mut ordered: Vec<_> = m.drain(..).collect();
+ ordered.sort_by(|left, right| left.0.cmp(&right.0));
+ m.extend(ordered);
+}
+
+#[bench]
+fn indexmap_sort_s(b: &mut Bencher) {
+ let map = IMAP_SORT_S.clone();
+
+ // there's a map clone there, but it's still useful to profile this
+ b.iter(|| {
+ let mut map = map.clone();
+ map.sort_keys();
+ map
+ });
+}
+
+#[bench]
+fn indexmap_simple_sort_s(b: &mut Bencher) {
+ let map = IMAP_SORT_S.clone();
+
+ // there's a map clone there, but it's still useful to profile this
+ b.iter(|| {
+ let mut map = map.clone();
+ simple_sort(&mut map);
+ map
+ });
+}
+
+#[bench]
+fn indexmap_sort_u32(b: &mut Bencher) {
+ let map = IMAP_SORT_U32.clone();
+
+ // there's a map clone there, but it's still useful to profile this
+ b.iter(|| {
+ let mut map = map.clone();
+ map.sort_keys();
+ map
+ });
+}
+
+#[bench]
+fn indexmap_simple_sort_u32(b: &mut Bencher) {
+ let map = IMAP_SORT_U32.clone();
+
+ // there's a map clone there, but it's still useful to profile this
+ b.iter(|| {
+ let mut map = map.clone();
+ simple_sort(&mut map);
+ map
+ });
+}
+
+// measure the fixed overhead of cloning in sort benchmarks
+#[bench]
+fn indexmap_clone_for_sort_s(b: &mut Bencher) {
+ let map = IMAP_SORT_S.clone();
+
+ b.iter(|| map.clone());
+}
+
+#[bench]
+fn indexmap_clone_for_sort_u32(b: &mut Bencher) {
+ let map = IMAP_SORT_U32.clone();
+
+ b.iter(|| map.clone());
+}
diff --git a/vendor/indexmap/benches/faststring.rs b/vendor/indexmap/benches/faststring.rs
new file mode 100644
index 0000000..ecc28b4
--- /dev/null
+++ b/vendor/indexmap/benches/faststring.rs
@@ -0,0 +1,185 @@
+#![feature(test)]
+
+extern crate test;
+
+use test::Bencher;
+
+use indexmap::IndexMap;
+
+use std::collections::HashMap;
+
+use rand::rngs::SmallRng;
+use rand::seq::SliceRandom;
+use rand::SeedableRng;
+
+use std::hash::{Hash, Hasher};
+
+use std::borrow::Borrow;
+use std::ops::Deref;
+
+/// Use a consistently seeded Rng for benchmark stability
+fn small_rng() -> SmallRng {
+ let seed = u64::from_le_bytes(*b"indexmap");
+ SmallRng::seed_from_u64(seed)
+}
+
+#[derive(PartialEq, Eq, Copy, Clone)]
+#[repr(transparent)]
+pub struct OneShot<T: ?Sized>(pub T);
+
+impl Hash for OneShot<str> {
+ fn hash<H: Hasher>(&self, h: &mut H) {
+ h.write(self.0.as_bytes())
+ }
+}
+
+impl<'a, S> From<&'a S> for &'a OneShot<str>
+where
+ S: AsRef<str>,
+{
+ fn from(s: &'a S) -> Self {
+ let s: &str = s.as_ref();
+ unsafe { &*(s as *const str as *const OneShot<str>) }
+ }
+}
+
+impl Hash for OneShot<String> {
+ fn hash<H: Hasher>(&self, h: &mut H) {
+ h.write(self.0.as_bytes())
+ }
+}
+
+impl Borrow<OneShot<str>> for OneShot<String> {
+ fn borrow(&self) -> &OneShot<str> {
+ <&OneShot<str>>::from(&self.0)
+ }
+}
+
+impl<T> Deref for OneShot<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+fn shuffled_keys<I>(iter: I) -> Vec<I::Item>
+where
+ I: IntoIterator,
+{
+ let mut v = Vec::from_iter(iter);
+ let mut rng = small_rng();
+ v.shuffle(&mut rng);
+ v
+}
+
+#[bench]
+fn insert_hashmap_string_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x.to_string(), ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_hashmap_string_oneshot_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = HashMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(OneShot(x.to_string()), ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn insert_indexmap_string_10_000(b: &mut Bencher) {
+ let c = 10_000;
+ b.iter(|| {
+ let mut map = IndexMap::with_capacity(c);
+ for x in 0..c {
+ map.insert(x.to_string(), ());
+ }
+ map
+ });
+}
+
+#[bench]
+fn lookup_hashmap_10_000_exist_string(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = HashMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(key.to_string(), 1);
+ }
+ let lookups = (5000..c).map(|x| x.to_string()).collect::<Vec<_>>();
+ b.iter(|| {
+ let mut found = 0;
+ for key in &lookups {
+ found += map.get(key).is_some() as i32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_hashmap_10_000_exist_string_oneshot(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = HashMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(OneShot(key.to_string()), 1);
+ }
+ let lookups = (5000..c)
+ .map(|x| OneShot(x.to_string()))
+ .collect::<Vec<_>>();
+ b.iter(|| {
+ let mut found = 0;
+ for key in &lookups {
+ found += map.get(key).is_some() as i32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_indexmap_10_000_exist_string(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = IndexMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(key.to_string(), 1);
+ }
+ let lookups = (5000..c).map(|x| x.to_string()).collect::<Vec<_>>();
+ b.iter(|| {
+ let mut found = 0;
+ for key in &lookups {
+ found += map.get(key).is_some() as i32;
+ }
+ found
+ });
+}
+
+#[bench]
+fn lookup_indexmap_10_000_exist_string_oneshot(b: &mut Bencher) {
+ let c = 10_000;
+ let mut map = IndexMap::with_capacity(c);
+ let keys = shuffled_keys(0..c);
+ for &key in &keys {
+ map.insert(OneShot(key.to_string()), 1);
+ }
+ let lookups = (5000..c)
+ .map(|x| OneShot(x.to_string()))
+ .collect::<Vec<_>>();
+ b.iter(|| {
+ let mut found = 0;
+ for key in &lookups {
+ found += map.get(key).is_some() as i32;
+ }
+ found
+ });
+}
diff --git a/vendor/indexmap/debian/patches/drop-quickcheck.diff b/vendor/indexmap/debian/patches/drop-quickcheck.diff
new file mode 100644
index 0000000..3ac5b2c
--- /dev/null
+++ b/vendor/indexmap/debian/patches/drop-quickcheck.diff
@@ -0,0 +1,22 @@
+--- rust-indexmap-1.9.3.orig/Cargo.toml
++++ rust-indexmap-1.9.3/Cargo.toml
+@@ -35,7 +35,6 @@ tag-name = "{{version}}"
+ [package.metadata.docs.rs]
+ features = [
+ "arbitrary",
+- "quickcheck",
+ "serde-1",
+ "rayon",
+ ]
+@@ -56,11 +55,6 @@ version = "0.12"
+ features = ["raw"]
+ default-features = false
+
+-[dependencies.quickcheck]
+-version = "1.0"
+-optional = true
+-default-features = false
+-
+ [dependencies.rayon]
+ version = "1.4.1"
+ optional = true
diff --git a/vendor/indexmap/debian/patches/drop-rustc-rayon.diff b/vendor/indexmap/debian/patches/drop-rustc-rayon.diff
new file mode 100644
index 0000000..4b791a6
--- /dev/null
+++ b/vendor/indexmap/debian/patches/drop-rustc-rayon.diff
@@ -0,0 +1,23 @@
+Author: Daniel Kahn Gillmor <dkg@fifthhorseman.net>
+Subject: Drop rustc-rayon feature
+
+I believe the rustc-rayon feature is only useful for building the
+rustc compiler (it was added in indexmap 1.7.1), and my understanding
+is that rustc in debian is *not* built from debcargo-produced crates.
+
+So rather than intrdoucing an additional feature-related binary
+package, which would cause a trip through the NEW queue, i'm just
+stripping it out.
+
+Another approach that avoids the new binary package, if it turns out
+that stripping this feature causes problems, would be to add
+collapse_features=true to debian/debcargo.toml.
+
+
+--- indexmap/Cargo.toml
++++ indexmap/Cargo.toml
+@@ -56,4 +56,0 @@
+-[dependencies.rustc-rayon]
+-version = "0.5"
+-optional = true
+-package = "rustc-rayon"
diff --git a/vendor/indexmap/debian/patches/series b/vendor/indexmap/debian/patches/series
new file mode 100644
index 0000000..4020f57
--- /dev/null
+++ b/vendor/indexmap/debian/patches/series
@@ -0,0 +1,2 @@
+#drop-rustc-rayon.diff
+#drop-quickcheck.diff
diff --git a/vendor/indexmap/src/arbitrary.rs b/vendor/indexmap/src/arbitrary.rs
new file mode 100644
index 0000000..7798438
--- /dev/null
+++ b/vendor/indexmap/src/arbitrary.rs
@@ -0,0 +1,77 @@
+#[cfg(feature = "arbitrary")]
+#[cfg_attr(docsrs, doc(cfg(feature = "arbitrary")))]
+mod impl_arbitrary {
+ use crate::{IndexMap, IndexSet};
+ use arbitrary::{Arbitrary, Result, Unstructured};
+ use core::hash::{BuildHasher, Hash};
+
+ impl<'a, K, V, S> Arbitrary<'a> for IndexMap<K, V, S>
+ where
+ K: Arbitrary<'a> + Hash + Eq,
+ V: Arbitrary<'a>,
+ S: BuildHasher + Default,
+ {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ u.arbitrary_iter()?.collect()
+ }
+
+ fn arbitrary_take_rest(u: Unstructured<'a>) -> Result<Self> {
+ u.arbitrary_take_rest_iter()?.collect()
+ }
+ }
+
+ impl<'a, T, S> Arbitrary<'a> for IndexSet<T, S>
+ where
+ T: Arbitrary<'a> + Hash + Eq,
+ S: BuildHasher + Default,
+ {
+ fn arbitrary(u: &mut Unstructured<'a>) -> Result<Self> {
+ u.arbitrary_iter()?.collect()
+ }
+
+ fn arbitrary_take_rest(u: Unstructured<'a>) -> Result<Self> {
+ u.arbitrary_take_rest_iter()?.collect()
+ }
+ }
+}
+
+#[cfg(feature = "quickcheck")]
+#[cfg_attr(docsrs, doc(cfg(feature = "quickcheck")))]
+mod impl_quickcheck {
+ use crate::{IndexMap, IndexSet};
+ use alloc::boxed::Box;
+ use alloc::vec::Vec;
+ use core::hash::{BuildHasher, Hash};
+ use quickcheck::{Arbitrary, Gen};
+
+ impl<K, V, S> Arbitrary for IndexMap<K, V, S>
+ where
+ K: Arbitrary + Hash + Eq,
+ V: Arbitrary,
+ S: BuildHasher + Default + Clone + 'static,
+ {
+ fn arbitrary(g: &mut Gen) -> Self {
+ Self::from_iter(Vec::arbitrary(g))
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ let vec = Vec::from_iter(self.clone());
+ Box::new(vec.shrink().map(Self::from_iter))
+ }
+ }
+
+ impl<T, S> Arbitrary for IndexSet<T, S>
+ where
+ T: Arbitrary + Hash + Eq,
+ S: BuildHasher + Default + Clone + 'static,
+ {
+ fn arbitrary(g: &mut Gen) -> Self {
+ Self::from_iter(Vec::arbitrary(g))
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ let vec = Vec::from_iter(self.clone());
+ Box::new(vec.shrink().map(Self::from_iter))
+ }
+ }
+}
diff --git a/vendor/indexmap/src/lib.rs b/vendor/indexmap/src/lib.rs
new file mode 100644
index 0000000..5e42784
--- /dev/null
+++ b/vendor/indexmap/src/lib.rs
@@ -0,0 +1,278 @@
+// We *mostly* avoid unsafe code, but `map::core::raw` allows it to use `RawTable` buckets.
+#![deny(unsafe_code)]
+#![warn(rust_2018_idioms)]
+#![doc(html_root_url = "https://docs.rs/indexmap/1/")]
+#![no_std]
+
+//! [`IndexMap`] is a hash table where the iteration order of the key-value
+//! pairs is independent of the hash values of the keys.
+//!
+//! [`IndexSet`] is a corresponding hash set using the same implementation and
+//! with similar properties.
+//!
+//! [`IndexMap`]: map/struct.IndexMap.html
+//! [`IndexSet`]: set/struct.IndexSet.html
+//!
+//!
+//! ### Highlights
+//!
+//! [`IndexMap`] and [`IndexSet`] are drop-in compatible with the std `HashMap`
+//! and `HashSet`, but they also have some features of note:
+//!
+//! - The ordering semantics (see their documentation for details)
+//! - Sorting methods and the [`.pop()`][IndexMap::pop] methods.
+//! - The [`Equivalent`] trait, which offers more flexible equality definitions
+//! between borrowed and owned versions of keys.
+//! - The [`MutableKeys`][map::MutableKeys] trait, which gives opt-in mutable
+//! access to hash map keys.
+//!
+//! ### Feature Flags
+//!
+//! To reduce the amount of compiled code in the crate by default, certain
+//! features are gated behind [feature flags]. These allow you to opt in to (or
+//! out of) functionality. Below is a list of the features available in this
+//! crate.
+//!
+//! * `std`: Enables features which require the Rust standard library. For more
+//! information see the section on [`no_std`].
+//! * `rayon`: Enables parallel iteration and other parallel methods.
+//! * `serde`: Adds implementations for [`Serialize`] and [`Deserialize`]
+//! to [`IndexMap`] and [`IndexSet`]. Alternative implementations for
+//! (de)serializing [`IndexMap`] as an ordered sequence are available in the
+//! [`map::serde_seq`] module.
+//! * `arbitrary`: Adds implementations for the [`arbitrary::Arbitrary`] trait
+//! to [`IndexMap`] and [`IndexSet`].
+//! * `quickcheck`: Adds implementations for the [`quickcheck::Arbitrary`] trait
+//! to [`IndexMap`] and [`IndexSet`].
+//!
+//! _Note: only the `std` feature is enabled by default._
+//!
+//! [feature flags]: https://doc.rust-lang.org/cargo/reference/manifest.html#the-features-section
+//! [`no_std`]: #no-standard-library-targets
+//! [`Serialize`]: `::serde::Serialize`
+//! [`Deserialize`]: `::serde::Deserialize`
+//! [`arbitrary::Arbitrary`]: `::arbitrary::Arbitrary`
+//! [`quickcheck::Arbitrary`]: `::quickcheck::Arbitrary`
+//!
+//! ### Alternate Hashers
+//!
+//! [`IndexMap`] and [`IndexSet`] have a default hasher type `S = RandomState`,
+//! just like the standard `HashMap` and `HashSet`, which is resistant to
+//! HashDoS attacks but not the most performant. Type aliases can make it easier
+//! to use alternate hashers:
+//!
+//! ```
+//! use fnv::FnvBuildHasher;
+//! use fxhash::FxBuildHasher;
+//! use indexmap::{IndexMap, IndexSet};
+//!
+//! type FnvIndexMap<K, V> = IndexMap<K, V, FnvBuildHasher>;
+//! type FnvIndexSet<T> = IndexSet<T, FnvBuildHasher>;
+//!
+//! type FxIndexMap<K, V> = IndexMap<K, V, FxBuildHasher>;
+//! type FxIndexSet<T> = IndexSet<T, FxBuildHasher>;
+//!
+//! let std: IndexSet<i32> = (0..100).collect();
+//! let fnv: FnvIndexSet<i32> = (0..100).collect();
+//! let fx: FxIndexSet<i32> = (0..100).collect();
+//! assert_eq!(std, fnv);
+//! assert_eq!(std, fx);
+//! ```
+//!
+//! ### Rust Version
+//!
+//! This version of indexmap requires Rust 1.63 or later.
+//!
+//! The indexmap 2.x release series will use a carefully considered version
+//! upgrade policy, where in a later 2.x version, we will raise the minimum
+//! required Rust version.
+//!
+//! ## No Standard Library Targets
+//!
+//! This crate supports being built without `std`, requiring `alloc` instead.
+//! This is chosen by disabling the default "std" cargo feature, by adding
+//! `default-features = false` to your dependency specification.
+//!
+//! - Creating maps and sets using [`new`][IndexMap::new] and
+//! [`with_capacity`][IndexMap::with_capacity] is unavailable without `std`.
+//! Use methods [`IndexMap::default`][def],
+//! [`with_hasher`][IndexMap::with_hasher],
+//! [`with_capacity_and_hasher`][IndexMap::with_capacity_and_hasher] instead.
+//! A no-std compatible hasher will be needed as well, for example
+//! from the crate `twox-hash`.
+//! - Macros [`indexmap!`] and [`indexset!`] are unavailable without `std`.
+//!
+//! [def]: map/struct.IndexMap.html#impl-Default
+
+#![cfg_attr(docsrs, feature(doc_cfg))]
+
+extern crate alloc;
+
+#[cfg(feature = "std")]
+#[macro_use]
+extern crate std;
+
+use alloc::vec::{self, Vec};
+
+mod arbitrary;
+#[macro_use]
+mod macros;
+mod mutable_keys;
+#[cfg(feature = "serde")]
+#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
+mod serde;
+mod util;
+
+pub mod map;
+pub mod set;
+
+// Placed after `map` and `set` so new `rayon` methods on the types
+// are documented after the "normal" methods.
+#[cfg(feature = "rayon")]
+#[cfg_attr(docsrs, doc(cfg(feature = "rayon")))]
+mod rayon;
+
+#[cfg(feature = "rustc-rayon")]
+mod rustc;
+
+pub use crate::map::IndexMap;
+pub use crate::set::IndexSet;
+pub use equivalent::Equivalent;
+
+// shared private items
+
+/// Hash value newtype. Not larger than usize, since anything larger
+/// isn't used for selecting position anyway.
+#[derive(Clone, Copy, Debug, PartialEq)]
+struct HashValue(usize);
+
+impl HashValue {
+ #[inline(always)]
+ fn get(self) -> u64 {
+ self.0 as u64
+ }
+}
+
+#[derive(Copy, Debug)]
+struct Bucket<K, V> {
+ hash: HashValue,
+ key: K,
+ value: V,
+}
+
+impl<K, V> Clone for Bucket<K, V>
+where
+ K: Clone,
+ V: Clone,
+{
+ fn clone(&self) -> Self {
+ Bucket {
+ hash: self.hash,
+ key: self.key.clone(),
+ value: self.value.clone(),
+ }
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ self.hash = other.hash;
+ self.key.clone_from(&other.key);
+ self.value.clone_from(&other.value);
+ }
+}
+
+impl<K, V> Bucket<K, V> {
+ // field accessors -- used for `f` instead of closures in `.map(f)`
+ fn key_ref(&self) -> &K {
+ &self.key
+ }
+ fn value_ref(&self) -> &V {
+ &self.value
+ }
+ fn value_mut(&mut self) -> &mut V {
+ &mut self.value
+ }
+ fn key(self) -> K {
+ self.key
+ }
+ fn value(self) -> V {
+ self.value
+ }
+ fn key_value(self) -> (K, V) {
+ (self.key, self.value)
+ }
+ fn refs(&self) -> (&K, &V) {
+ (&self.key, &self.value)
+ }
+ fn ref_mut(&mut self) -> (&K, &mut V) {
+ (&self.key, &mut self.value)
+ }
+ fn muts(&mut self) -> (&mut K, &mut V) {
+ (&mut self.key, &mut self.value)
+ }
+}
+
+trait Entries {
+ type Entry;
+ fn into_entries(self) -> Vec<Self::Entry>;
+ fn as_entries(&self) -> &[Self::Entry];
+ fn as_entries_mut(&mut self) -> &mut [Self::Entry];
+ fn with_entries<F>(&mut self, f: F)
+ where
+ F: FnOnce(&mut [Self::Entry]);
+}
+
+/// The error type for `try_reserve` methods.
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct TryReserveError {
+ kind: TryReserveErrorKind,
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+enum TryReserveErrorKind {
+ // The standard library's kind is currently opaque to us, otherwise we could unify this.
+ Std(alloc::collections::TryReserveError),
+ CapacityOverflow,
+ AllocError { layout: alloc::alloc::Layout },
+}
+
+// These are not `From` so we don't expose them in our public API.
+impl TryReserveError {
+ fn from_alloc(error: alloc::collections::TryReserveError) -> Self {
+ Self {
+ kind: TryReserveErrorKind::Std(error),
+ }
+ }
+
+ fn from_hashbrown(error: hashbrown::TryReserveError) -> Self {
+ Self {
+ kind: match error {
+ hashbrown::TryReserveError::CapacityOverflow => {
+ TryReserveErrorKind::CapacityOverflow
+ }
+ hashbrown::TryReserveError::AllocError { layout } => {
+ TryReserveErrorKind::AllocError { layout }
+ }
+ },
+ }
+ }
+}
+
+impl core::fmt::Display for TryReserveError {
+ fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
+ let reason = match &self.kind {
+ TryReserveErrorKind::Std(e) => return core::fmt::Display::fmt(e, f),
+ TryReserveErrorKind::CapacityOverflow => {
+ " because the computed capacity exceeded the collection's maximum"
+ }
+ TryReserveErrorKind::AllocError { .. } => {
+ " because the memory allocator returned an error"
+ }
+ };
+ f.write_str("memory allocation failed")?;
+ f.write_str(reason)
+ }
+}
+
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+impl std::error::Error for TryReserveError {}
diff --git a/vendor/indexmap/src/macros.rs b/vendor/indexmap/src/macros.rs
new file mode 100644
index 0000000..5317f1c
--- /dev/null
+++ b/vendor/indexmap/src/macros.rs
@@ -0,0 +1,178 @@
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+#[macro_export]
+/// Create an `IndexMap` from a list of key-value pairs
+///
+/// ## Example
+///
+/// ```
+/// use indexmap::indexmap;
+///
+/// let map = indexmap!{
+/// "a" => 1,
+/// "b" => 2,
+/// };
+/// assert_eq!(map["a"], 1);
+/// assert_eq!(map["b"], 2);
+/// assert_eq!(map.get("c"), None);
+///
+/// // "a" is the first key
+/// assert_eq!(map.keys().next(), Some(&"a"));
+/// ```
+macro_rules! indexmap {
+ ($($key:expr => $value:expr,)+) => { $crate::indexmap!($($key => $value),+) };
+ ($($key:expr => $value:expr),*) => {
+ {
+ // Note: `stringify!($key)` is just here to consume the repetition,
+ // but we throw away that string literal during constant evaluation.
+ const CAP: usize = <[()]>::len(&[$({ stringify!($key); }),*]);
+ let mut map = $crate::IndexMap::with_capacity(CAP);
+ $(
+ map.insert($key, $value);
+ )*
+ map
+ }
+ };
+}
+
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+#[macro_export]
+/// Create an `IndexSet` from a list of values
+///
+/// ## Example
+///
+/// ```
+/// use indexmap::indexset;
+///
+/// let set = indexset!{
+/// "a",
+/// "b",
+/// };
+/// assert!(set.contains("a"));
+/// assert!(set.contains("b"));
+/// assert!(!set.contains("c"));
+///
+/// // "a" is the first value
+/// assert_eq!(set.iter().next(), Some(&"a"));
+/// ```
+macro_rules! indexset {
+ ($($value:expr,)+) => { $crate::indexset!($($value),+) };
+ ($($value:expr),*) => {
+ {
+ // Note: `stringify!($value)` is just here to consume the repetition,
+ // but we throw away that string literal during constant evaluation.
+ const CAP: usize = <[()]>::len(&[$({ stringify!($value); }),*]);
+ let mut set = $crate::IndexSet::with_capacity(CAP);
+ $(
+ set.insert($value);
+ )*
+ set
+ }
+ };
+}
+
+// generate all the Iterator methods by just forwarding to the underlying
+// self.iter and mapping its element.
+macro_rules! iterator_methods {
+ // $map_elt is the mapping function from the underlying iterator's element
+ // same mapping function for both options and iterators
+ ($map_elt:expr) => {
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter.next().map($map_elt)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ fn count(self) -> usize {
+ self.iter.len()
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter.nth(n).map($map_elt)
+ }
+
+ fn last(mut self) -> Option<Self::Item> {
+ self.next_back()
+ }
+
+ fn collect<C>(self) -> C
+ where
+ C: FromIterator<Self::Item>,
+ {
+ // NB: forwarding this directly to standard iterators will
+ // allow it to leverage unstable traits like `TrustedLen`.
+ self.iter.map($map_elt).collect()
+ }
+ };
+}
+
+macro_rules! double_ended_iterator_methods {
+ // $map_elt is the mapping function from the underlying iterator's element
+ // same mapping function for both options and iterators
+ ($map_elt:expr) => {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.iter.next_back().map($map_elt)
+ }
+
+ fn nth_back(&mut self, n: usize) -> Option<Self::Item> {
+ self.iter.nth_back(n).map($map_elt)
+ }
+ };
+}
+
+// generate `ParallelIterator` methods by just forwarding to the underlying
+// self.entries and mapping its elements.
+#[cfg(any(feature = "rayon", feature = "rustc-rayon"))]
+macro_rules! parallel_iterator_methods {
+ // $map_elt is the mapping function from the underlying iterator's element
+ ($map_elt:expr) => {
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ self.entries
+ .into_par_iter()
+ .map($map_elt)
+ .drive_unindexed(consumer)
+ }
+
+ // NB: This allows indexed collection, e.g. directly into a `Vec`, but the
+ // underlying iterator must really be indexed. We should remove this if we
+ // start having tombstones that must be filtered out.
+ fn opt_len(&self) -> Option<usize> {
+ Some(self.entries.len())
+ }
+ };
+}
+
+// generate `IndexedParallelIterator` methods by just forwarding to the underlying
+// self.entries and mapping its elements.
+#[cfg(any(feature = "rayon", feature = "rustc-rayon"))]
+macro_rules! indexed_parallel_iterator_methods {
+ // $map_elt is the mapping function from the underlying iterator's element
+ ($map_elt:expr) => {
+ fn drive<C>(self, consumer: C) -> C::Result
+ where
+ C: Consumer<Self::Item>,
+ {
+ self.entries.into_par_iter().map($map_elt).drive(consumer)
+ }
+
+ fn len(&self) -> usize {
+ self.entries.len()
+ }
+
+ fn with_producer<CB>(self, callback: CB) -> CB::Output
+ where
+ CB: ProducerCallback<Self::Item>,
+ {
+ self.entries
+ .into_par_iter()
+ .map($map_elt)
+ .with_producer(callback)
+ }
+ };
+}
diff --git a/vendor/indexmap/src/map.rs b/vendor/indexmap/src/map.rs
new file mode 100644
index 0000000..cb405ca
--- /dev/null
+++ b/vendor/indexmap/src/map.rs
@@ -0,0 +1,1223 @@
+//! `IndexMap` is a hash table where the iteration order of the key-value
+//! pairs is independent of the hash values of the keys.
+
+mod core;
+mod iter;
+mod slice;
+
+#[cfg(feature = "serde")]
+#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
+pub mod serde_seq;
+
+#[cfg(test)]
+mod tests;
+
+pub use self::core::{Entry, OccupiedEntry, VacantEntry};
+pub use self::iter::{
+ Drain, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, Values, ValuesMut,
+};
+pub use self::slice::Slice;
+pub use crate::mutable_keys::MutableKeys;
+
+#[cfg(feature = "rayon")]
+pub use crate::rayon::map as rayon;
+
+use ::core::cmp::Ordering;
+use ::core::fmt;
+use ::core::hash::{BuildHasher, Hash, Hasher};
+use ::core::ops::{Index, IndexMut, RangeBounds};
+use alloc::boxed::Box;
+use alloc::vec::Vec;
+
+#[cfg(feature = "std")]
+use std::collections::hash_map::RandomState;
+
+use self::core::IndexMapCore;
+use crate::util::{third, try_simplify_range};
+use crate::{Bucket, Entries, Equivalent, HashValue, TryReserveError};
+
+/// A hash table where the iteration order of the key-value pairs is independent
+/// of the hash values of the keys.
+///
+/// The interface is closely compatible with the standard `HashMap`, but also
+/// has additional features.
+///
+/// # Order
+///
+/// The key-value pairs have a consistent order that is determined by
+/// the sequence of insertion and removal calls on the map. The order does
+/// not depend on the keys or the hash function at all.
+///
+/// All iterators traverse the map in *the order*.
+///
+/// The insertion order is preserved, with **notable exceptions** like the
+/// `.remove()` or `.swap_remove()` methods. Methods such as `.sort_by()` of
+/// course result in a new order, depending on the sorting order.
+///
+/// # Indices
+///
+/// The key-value pairs are indexed in a compact range without holes in the
+/// range `0..self.len()`. For example, the method `.get_full` looks up the
+/// index for a key, and the method `.get_index` looks up the key-value pair by
+/// index.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexMap;
+///
+/// // count the frequency of each letter in a sentence.
+/// let mut letters = IndexMap::new();
+/// for ch in "a short treatise on fungi".chars() {
+/// *letters.entry(ch).or_insert(0) += 1;
+/// }
+///
+/// assert_eq!(letters[&'s'], 2);
+/// assert_eq!(letters[&'t'], 3);
+/// assert_eq!(letters[&'u'], 1);
+/// assert_eq!(letters.get(&'y'), None);
+/// ```
+#[cfg(feature = "std")]
+pub struct IndexMap<K, V, S = RandomState> {
+ pub(crate) core: IndexMapCore<K, V>,
+ hash_builder: S,
+}
+#[cfg(not(feature = "std"))]
+pub struct IndexMap<K, V, S> {
+ pub(crate) core: IndexMapCore<K, V>,
+ hash_builder: S,
+}
+
+impl<K, V, S> Clone for IndexMap<K, V, S>
+where
+ K: Clone,
+ V: Clone,
+ S: Clone,
+{
+ fn clone(&self) -> Self {
+ IndexMap {
+ core: self.core.clone(),
+ hash_builder: self.hash_builder.clone(),
+ }
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ self.core.clone_from(&other.core);
+ self.hash_builder.clone_from(&other.hash_builder);
+ }
+}
+
+impl<K, V, S> Entries for IndexMap<K, V, S> {
+ type Entry = Bucket<K, V>;
+
+ #[inline]
+ fn into_entries(self) -> Vec<Self::Entry> {
+ self.core.into_entries()
+ }
+
+ #[inline]
+ fn as_entries(&self) -> &[Self::Entry] {
+ self.core.as_entries()
+ }
+
+ #[inline]
+ fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
+ self.core.as_entries_mut()
+ }
+
+ fn with_entries<F>(&mut self, f: F)
+ where
+ F: FnOnce(&mut [Self::Entry]),
+ {
+ self.core.with_entries(f);
+ }
+}
+
+impl<K, V, S> fmt::Debug for IndexMap<K, V, S>
+where
+ K: fmt::Debug,
+ V: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if cfg!(not(feature = "test_debug")) {
+ f.debug_map().entries(self.iter()).finish()
+ } else {
+ // Let the inner `IndexMapCore` print all of its details
+ f.debug_struct("IndexMap")
+ .field("core", &self.core)
+ .finish()
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+impl<K, V> IndexMap<K, V> {
+ /// Create a new map. (Does not allocate.)
+ #[inline]
+ pub fn new() -> Self {
+ Self::with_capacity(0)
+ }
+
+ /// Create a new map with capacity for `n` key-value pairs. (Does not
+ /// allocate if `n` is zero.)
+ ///
+ /// Computes in **O(n)** time.
+ #[inline]
+ pub fn with_capacity(n: usize) -> Self {
+ Self::with_capacity_and_hasher(n, <_>::default())
+ }
+}
+
+impl<K, V, S> IndexMap<K, V, S> {
+ /// Create a new map with capacity for `n` key-value pairs. (Does not
+ /// allocate if `n` is zero.)
+ ///
+ /// Computes in **O(n)** time.
+ #[inline]
+ pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self {
+ if n == 0 {
+ Self::with_hasher(hash_builder)
+ } else {
+ IndexMap {
+ core: IndexMapCore::with_capacity(n),
+ hash_builder,
+ }
+ }
+ }
+
+ /// Create a new map with `hash_builder`.
+ ///
+ /// This function is `const`, so it
+ /// can be called in `static` contexts.
+ pub const fn with_hasher(hash_builder: S) -> Self {
+ IndexMap {
+ core: IndexMapCore::new(),
+ hash_builder,
+ }
+ }
+
+ /// Return the number of elements the map can hold without reallocating.
+ ///
+ /// This number is a lower bound; the map might be able to hold more,
+ /// but is guaranteed to be able to hold at least this many.
+ ///
+ /// Computes in **O(1)** time.
+ pub fn capacity(&self) -> usize {
+ self.core.capacity()
+ }
+
+ /// Return a reference to the map's `BuildHasher`.
+ pub fn hasher(&self) -> &S {
+ &self.hash_builder
+ }
+
+ /// Return the number of key-value pairs in the map.
+ ///
+ /// Computes in **O(1)** time.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.core.len()
+ }
+
+ /// Returns true if the map contains no elements.
+ ///
+ /// Computes in **O(1)** time.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Return an iterator over the key-value pairs of the map, in their order
+ pub fn iter(&self) -> Iter<'_, K, V> {
+ Iter::new(self.as_entries())
+ }
+
+ /// Return an iterator over the key-value pairs of the map, in their order
+ pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
+ IterMut::new(self.as_entries_mut())
+ }
+
+ /// Return an iterator over the keys of the map, in their order
+ pub fn keys(&self) -> Keys<'_, K, V> {
+ Keys::new(self.as_entries())
+ }
+
+ /// Return an owning iterator over the keys of the map, in their order
+ pub fn into_keys(self) -> IntoKeys<K, V> {
+ IntoKeys::new(self.into_entries())
+ }
+
+ /// Return an iterator over the values of the map, in their order
+ pub fn values(&self) -> Values<'_, K, V> {
+ Values::new(self.as_entries())
+ }
+
+ /// Return an iterator over mutable references to the values of the map,
+ /// in their order
+ pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> {
+ ValuesMut::new(self.as_entries_mut())
+ }
+
+ /// Return an owning iterator over the values of the map, in their order
+ pub fn into_values(self) -> IntoValues<K, V> {
+ IntoValues::new(self.into_entries())
+ }
+
+ /// Remove all key-value pairs in the map, while preserving its capacity.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn clear(&mut self) {
+ self.core.clear();
+ }
+
+ /// Shortens the map, keeping the first `len` elements and dropping the rest.
+ ///
+ /// If `len` is greater than the map's current length, this has no effect.
+ pub fn truncate(&mut self, len: usize) {
+ self.core.truncate(len);
+ }
+
+ /// Clears the `IndexMap` in the given index range, returning those
+ /// key-value pairs as a drain iterator.
+ ///
+ /// The range may be any type that implements `RangeBounds<usize>`,
+ /// including all of the `std::ops::Range*` types, or even a tuple pair of
+ /// `Bound` start and end values. To drain the map entirely, use `RangeFull`
+ /// like `map.drain(..)`.
+ ///
+ /// This shifts down all entries following the drained range to fill the
+ /// gap, and keeps the allocated memory for reuse.
+ ///
+ /// ***Panics*** if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the map.
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_, K, V>
+ where
+ R: RangeBounds<usize>,
+ {
+ Drain::new(self.core.drain(range))
+ }
+
+ /// Splits the collection into two at the given index.
+ ///
+ /// Returns a newly allocated map containing the elements in the range
+ /// `[at, len)`. After the call, the original map will be left containing
+ /// the elements `[0, at)` with its previous capacity unchanged.
+ ///
+ /// ***Panics*** if `at > len`.
+ pub fn split_off(&mut self, at: usize) -> Self
+ where
+ S: Clone,
+ {
+ Self {
+ core: self.core.split_off(at),
+ hash_builder: self.hash_builder.clone(),
+ }
+ }
+}
+
+impl<K, V, S> IndexMap<K, V, S>
+where
+ K: Hash + Eq,
+ S: BuildHasher,
+{
+ /// Reserve capacity for `additional` more key-value pairs.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn reserve(&mut self, additional: usize) {
+ self.core.reserve(additional);
+ }
+
+ /// Reserve capacity for `additional` more key-value pairs, without over-allocating.
+ ///
+ /// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid
+ /// frequent re-allocations. However, the underlying data structures may still have internal
+ /// capacity requirements, and the allocator itself may give more space than requested, so this
+ /// cannot be relied upon to be precisely minimal.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.core.reserve_exact(additional);
+ }
+
+ /// Try to reserve capacity for `additional` more key-value pairs.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.core.try_reserve(additional)
+ }
+
+ /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
+ ///
+ /// Unlike `try_reserve`, this does not deliberately over-allocate the entry capacity to avoid
+ /// frequent re-allocations. However, the underlying data structures may still have internal
+ /// capacity requirements, and the allocator itself may give more space than requested, so this
+ /// cannot be relied upon to be precisely minimal.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.core.try_reserve_exact(additional)
+ }
+
+ /// Shrink the capacity of the map as much as possible.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn shrink_to_fit(&mut self) {
+ self.core.shrink_to(0);
+ }
+
+ /// Shrink the capacity of the map with a lower limit.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.core.shrink_to(min_capacity);
+ }
+
+ fn hash<Q: ?Sized + Hash>(&self, key: &Q) -> HashValue {
+ let mut h = self.hash_builder.build_hasher();
+ key.hash(&mut h);
+ HashValue(h.finish() as usize)
+ }
+
+ /// Insert a key-value pair in the map.
+ ///
+ /// If an equivalent key already exists in the map: the key remains and
+ /// retains in its place in the order, its corresponding value is updated
+ /// with `value` and the older value is returned inside `Some(_)`.
+ ///
+ /// If no equivalent key existed in the map: the new key-value pair is
+ /// inserted, last in order, and `None` is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ ///
+ /// See also [`entry`](#method.entry) if you you want to insert *or* modify
+ /// or if you need to get the index of the corresponding key-value pair.
+ pub fn insert(&mut self, key: K, value: V) -> Option<V> {
+ self.insert_full(key, value).1
+ }
+
+ /// Insert a key-value pair in the map, and get their index.
+ ///
+ /// If an equivalent key already exists in the map: the key remains and
+ /// retains in its place in the order, its corresponding value is updated
+ /// with `value` and the older value is returned inside `(index, Some(_))`.
+ ///
+ /// If no equivalent key existed in the map: the new key-value pair is
+ /// inserted, last in order, and `(index, None)` is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ ///
+ /// See also [`entry`](#method.entry) if you you want to insert *or* modify
+ /// or if you need to get the index of the corresponding key-value pair.
+ pub fn insert_full(&mut self, key: K, value: V) -> (usize, Option<V>) {
+ let hash = self.hash(&key);
+ self.core.insert_full(hash, key, value)
+ }
+
+ /// Get the given key’s corresponding entry in the map for insertion and/or
+ /// in-place manipulation.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn entry(&mut self, key: K) -> Entry<'_, K, V> {
+ let hash = self.hash(&key);
+ self.core.entry(hash, key)
+ }
+
+ /// Return `true` if an equivalent to `key` exists in the map.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn contains_key<Q: ?Sized>(&self, key: &Q) -> bool
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ self.get_index_of(key).is_some()
+ }
+
+ /// Return a reference to the value stored for `key`, if it is present,
+ /// else `None`.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn get<Q: ?Sized>(&self, key: &Q) -> Option<&V>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if let Some(i) = self.get_index_of(key) {
+ let entry = &self.as_entries()[i];
+ Some(&entry.value)
+ } else {
+ None
+ }
+ }
+
+ /// Return references to the key-value pair stored for `key`,
+ /// if it is present, else `None`.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn get_key_value<Q: ?Sized>(&self, key: &Q) -> Option<(&K, &V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if let Some(i) = self.get_index_of(key) {
+ let entry = &self.as_entries()[i];
+ Some((&entry.key, &entry.value))
+ } else {
+ None
+ }
+ }
+
+ /// Return item index, key and value
+ pub fn get_full<Q: ?Sized>(&self, key: &Q) -> Option<(usize, &K, &V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if let Some(i) = self.get_index_of(key) {
+ let entry = &self.as_entries()[i];
+ Some((i, &entry.key, &entry.value))
+ } else {
+ None
+ }
+ }
+
+ /// Return item index, if it exists in the map
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn get_index_of<Q: ?Sized>(&self, key: &Q) -> Option<usize>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if self.is_empty() {
+ None
+ } else {
+ let hash = self.hash(key);
+ self.core.get_index_of(hash, key)
+ }
+ }
+
+ pub fn get_mut<Q: ?Sized>(&mut self, key: &Q) -> Option<&mut V>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if let Some(i) = self.get_index_of(key) {
+ let entry = &mut self.as_entries_mut()[i];
+ Some(&mut entry.value)
+ } else {
+ None
+ }
+ }
+
+ pub fn get_full_mut<Q: ?Sized>(&mut self, key: &Q) -> Option<(usize, &K, &mut V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if let Some(i) = self.get_index_of(key) {
+ let entry = &mut self.as_entries_mut()[i];
+ Some((i, &entry.key, &mut entry.value))
+ } else {
+ None
+ }
+ }
+
+ /// Remove the key-value pair equivalent to `key` and return
+ /// its value.
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove(key)`, if you need to
+ /// preserve the order of the keys in the map, use `.shift_remove(key)`
+ /// instead.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn remove<Q: ?Sized>(&mut self, key: &Q) -> Option<V>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ self.swap_remove(key)
+ }
+
+ /// Remove and return the key-value pair equivalent to `key`.
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove_entry(key)`, if you need to
+ /// preserve the order of the keys in the map, use `.shift_remove_entry(key)`
+ /// instead.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn remove_entry<Q: ?Sized>(&mut self, key: &Q) -> Option<(K, V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ self.swap_remove_entry(key)
+ }
+
+ /// Remove the key-value pair equivalent to `key` and return
+ /// its value.
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Return `None` if `key` is not in map.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove<Q: ?Sized>(&mut self, key: &Q) -> Option<V>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ self.swap_remove_full(key).map(third)
+ }
+
+ /// Remove and return the key-value pair equivalent to `key`.
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Return `None` if `key` is not in map.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove_entry<Q: ?Sized>(&mut self, key: &Q) -> Option<(K, V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ match self.swap_remove_full(key) {
+ Some((_, key, value)) => Some((key, value)),
+ None => None,
+ }
+ }
+
+ /// Remove the key-value pair equivalent to `key` and return it and
+ /// the index it had.
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Return `None` if `key` is not in map.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove_full<Q: ?Sized>(&mut self, key: &Q) -> Option<(usize, K, V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if self.is_empty() {
+ return None;
+ }
+ let hash = self.hash(key);
+ self.core.swap_remove_full(hash, key)
+ }
+
+ /// Remove the key-value pair equivalent to `key` and return
+ /// its value.
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Return `None` if `key` is not in map.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove<Q: ?Sized>(&mut self, key: &Q) -> Option<V>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ self.shift_remove_full(key).map(third)
+ }
+
+ /// Remove and return the key-value pair equivalent to `key`.
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Return `None` if `key` is not in map.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove_entry<Q: ?Sized>(&mut self, key: &Q) -> Option<(K, V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ match self.shift_remove_full(key) {
+ Some((_, key, value)) => Some((key, value)),
+ None => None,
+ }
+ }
+
+ /// Remove the key-value pair equivalent to `key` and return it and
+ /// the index it had.
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Return `None` if `key` is not in map.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove_full<Q: ?Sized>(&mut self, key: &Q) -> Option<(usize, K, V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if self.is_empty() {
+ return None;
+ }
+ let hash = self.hash(key);
+ self.core.shift_remove_full(hash, key)
+ }
+
+ /// Remove the last key-value pair
+ ///
+ /// This preserves the order of the remaining elements.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn pop(&mut self) -> Option<(K, V)> {
+ self.core.pop()
+ }
+
+ /// Scan through each key-value pair in the map and keep those where the
+ /// closure `keep` returns `true`.
+ ///
+ /// The elements are visited in order, and remaining elements keep their
+ /// order.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn retain<F>(&mut self, mut keep: F)
+ where
+ F: FnMut(&K, &mut V) -> bool,
+ {
+ self.core.retain_in_order(move |k, v| keep(k, v));
+ }
+
+ pub(crate) fn retain_mut<F>(&mut self, keep: F)
+ where
+ F: FnMut(&mut K, &mut V) -> bool,
+ {
+ self.core.retain_in_order(keep);
+ }
+
+ /// Sort the map’s key-value pairs by the default ordering of the keys.
+ ///
+ /// See [`sort_by`](Self::sort_by) for details.
+ pub fn sort_keys(&mut self)
+ where
+ K: Ord,
+ {
+ self.with_entries(move |entries| {
+ entries.sort_by(move |a, b| K::cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the map’s key-value pairs in place using the comparison
+ /// function `cmp`.
+ ///
+ /// The comparison function receives two key and value pairs to compare (you
+ /// can sort by keys or values or their combination as needed).
+ ///
+ /// Computes in **O(n log n + c)** time and **O(n)** space where *n* is
+ /// the length of the map and *c* the capacity. The sort is stable.
+ pub fn sort_by<F>(&mut self, mut cmp: F)
+ where
+ F: FnMut(&K, &V, &K, &V) -> Ordering,
+ {
+ self.with_entries(move |entries| {
+ entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ });
+ }
+
+ /// Sort the key-value pairs of the map and return a by-value iterator of
+ /// the key-value pairs with the result.
+ ///
+ /// The sort is stable.
+ pub fn sorted_by<F>(self, mut cmp: F) -> IntoIter<K, V>
+ where
+ F: FnMut(&K, &V, &K, &V) -> Ordering,
+ {
+ let mut entries = self.into_entries();
+ entries.sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ IntoIter::new(entries)
+ }
+
+ /// Sort the map's key-value pairs by the default ordering of the keys, but
+ /// may not preserve the order of equal elements.
+ ///
+ /// See [`sort_unstable_by`](Self::sort_unstable_by) for details.
+ pub fn sort_unstable_keys(&mut self)
+ where
+ K: Ord,
+ {
+ self.with_entries(move |entries| {
+ entries.sort_unstable_by(move |a, b| K::cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the map's key-value pairs in place using the comparison function `cmp`, but
+ /// may not preserve the order of equal elements.
+ ///
+ /// The comparison function receives two key and value pairs to compare (you
+ /// can sort by keys or values or their combination as needed).
+ ///
+ /// Computes in **O(n log n + c)** time where *n* is
+ /// the length of the map and *c* is the capacity. The sort is unstable.
+ pub fn sort_unstable_by<F>(&mut self, mut cmp: F)
+ where
+ F: FnMut(&K, &V, &K, &V) -> Ordering,
+ {
+ self.with_entries(move |entries| {
+ entries.sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ });
+ }
+
+ /// Sort the key-value pairs of the map and return a by-value iterator of
+ /// the key-value pairs with the result.
+ ///
+ /// The sort is unstable.
+ #[inline]
+ pub fn sorted_unstable_by<F>(self, mut cmp: F) -> IntoIter<K, V>
+ where
+ F: FnMut(&K, &V, &K, &V) -> Ordering,
+ {
+ let mut entries = self.into_entries();
+ entries.sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ IntoIter::new(entries)
+ }
+
+ /// Sort the map’s key-value pairs in place using a sort-key extraction function.
+ ///
+ /// During sorting, the function is called at most once per entry, by using temporary storage
+ /// to remember the results of its evaluation. The order of calls to the function is
+ /// unspecified and may change between versions of `indexmap` or the standard library.
+ ///
+ /// Computes in **O(m n + n log n + c)** time () and **O(n)** space, where the function is
+ /// **O(m)**, *n* is the length of the map, and *c* the capacity. The sort is stable.
+ pub fn sort_by_cached_key<T, F>(&mut self, mut sort_key: F)
+ where
+ T: Ord,
+ F: FnMut(&K, &V) -> T,
+ {
+ self.with_entries(move |entries| {
+ entries.sort_by_cached_key(move |a| sort_key(&a.key, &a.value));
+ });
+ }
+
+ /// Reverses the order of the map’s key-value pairs in place.
+ ///
+ /// Computes in **O(n)** time and **O(1)** space.
+ pub fn reverse(&mut self) {
+ self.core.reverse()
+ }
+}
+
+impl<K, V, S> IndexMap<K, V, S> {
+ /// Returns a slice of all the key-value pairs in the map.
+ ///
+ /// Computes in **O(1)** time.
+ pub fn as_slice(&self) -> &Slice<K, V> {
+ Slice::from_slice(self.as_entries())
+ }
+
+ /// Returns a mutable slice of all the key-value pairs in the map.
+ ///
+ /// Computes in **O(1)** time.
+ pub fn as_mut_slice(&mut self) -> &mut Slice<K, V> {
+ Slice::from_mut_slice(self.as_entries_mut())
+ }
+
+ /// Converts into a boxed slice of all the key-value pairs in the map.
+ ///
+ /// Note that this will drop the inner hash table and any excess capacity.
+ pub fn into_boxed_slice(self) -> Box<Slice<K, V>> {
+ Slice::from_boxed(self.into_entries().into_boxed_slice())
+ }
+
+ /// Get a key-value pair by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Computes in **O(1)** time.
+ pub fn get_index(&self, index: usize) -> Option<(&K, &V)> {
+ self.as_entries().get(index).map(Bucket::refs)
+ }
+
+ /// Get a key-value pair by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Computes in **O(1)** time.
+ pub fn get_index_mut(&mut self, index: usize) -> Option<(&K, &mut V)> {
+ self.as_entries_mut().get_mut(index).map(Bucket::ref_mut)
+ }
+
+ /// Returns a slice of key-value pairs in the given range of indices.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Computes in **O(1)** time.
+ pub fn get_range<R: RangeBounds<usize>>(&self, range: R) -> Option<&Slice<K, V>> {
+ let entries = self.as_entries();
+ let range = try_simplify_range(range, entries.len())?;
+ entries.get(range).map(Slice::from_slice)
+ }
+
+ /// Returns a mutable slice of key-value pairs in the given range of indices.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Computes in **O(1)** time.
+ pub fn get_range_mut<R: RangeBounds<usize>>(&mut self, range: R) -> Option<&mut Slice<K, V>> {
+ let entries = self.as_entries_mut();
+ let range = try_simplify_range(range, entries.len())?;
+ entries.get_mut(range).map(Slice::from_mut_slice)
+ }
+
+ /// Get the first key-value pair
+ ///
+ /// Computes in **O(1)** time.
+ pub fn first(&self) -> Option<(&K, &V)> {
+ self.as_entries().first().map(Bucket::refs)
+ }
+
+ /// Get the first key-value pair, with mutable access to the value
+ ///
+ /// Computes in **O(1)** time.
+ pub fn first_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.as_entries_mut().first_mut().map(Bucket::ref_mut)
+ }
+
+ /// Get the last key-value pair
+ ///
+ /// Computes in **O(1)** time.
+ pub fn last(&self) -> Option<(&K, &V)> {
+ self.as_entries().last().map(Bucket::refs)
+ }
+
+ /// Get the last key-value pair, with mutable access to the value
+ ///
+ /// Computes in **O(1)** time.
+ pub fn last_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.as_entries_mut().last_mut().map(Bucket::ref_mut)
+ }
+
+ /// Remove the key-value pair by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
+ self.core.swap_remove_index(index)
+ }
+
+ /// Remove the key-value pair by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
+ self.core.shift_remove_index(index)
+ }
+
+ /// Moves the position of a key-value pair from one index to another
+ /// by shifting all other pairs in-between.
+ ///
+ /// * If `from < to`, the other pairs will shift down while the targeted pair moves up.
+ /// * If `from > to`, the other pairs will shift up while the targeted pair moves down.
+ ///
+ /// ***Panics*** if `from` or `to` are out of bounds.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn move_index(&mut self, from: usize, to: usize) {
+ self.core.move_index(from, to)
+ }
+
+ /// Swaps the position of two key-value pairs in the map.
+ ///
+ /// ***Panics*** if `a` or `b` are out of bounds.
+ pub fn swap_indices(&mut self, a: usize, b: usize) {
+ self.core.swap_indices(a, b)
+ }
+}
+
+/// Access `IndexMap` values corresponding to a key.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// for word in "Lorem ipsum dolor sit amet".split_whitespace() {
+/// map.insert(word.to_lowercase(), word.to_uppercase());
+/// }
+/// assert_eq!(map["lorem"], "LOREM");
+/// assert_eq!(map["ipsum"], "IPSUM");
+/// ```
+///
+/// ```should_panic
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// map.insert("foo", 1);
+/// println!("{:?}", map["bar"]); // panics!
+/// ```
+impl<K, V, Q: ?Sized, S> Index<&Q> for IndexMap<K, V, S>
+where
+ Q: Hash + Equivalent<K>,
+ K: Hash + Eq,
+ S: BuildHasher,
+{
+ type Output = V;
+
+ /// Returns a reference to the value corresponding to the supplied `key`.
+ ///
+ /// ***Panics*** if `key` is not present in the map.
+ fn index(&self, key: &Q) -> &V {
+ self.get(key).expect("IndexMap: key not found")
+ }
+}
+
+/// Access `IndexMap` values corresponding to a key.
+///
+/// Mutable indexing allows changing / updating values of key-value
+/// pairs that are already present.
+///
+/// You can **not** insert new pairs with index syntax, use `.insert()`.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// for word in "Lorem ipsum dolor sit amet".split_whitespace() {
+/// map.insert(word.to_lowercase(), word.to_string());
+/// }
+/// let lorem = &mut map["lorem"];
+/// assert_eq!(lorem, "Lorem");
+/// lorem.retain(char::is_lowercase);
+/// assert_eq!(map["lorem"], "orem");
+/// ```
+///
+/// ```should_panic
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// map.insert("foo", 1);
+/// map["bar"] = 1; // panics!
+/// ```
+impl<K, V, Q: ?Sized, S> IndexMut<&Q> for IndexMap<K, V, S>
+where
+ Q: Hash + Equivalent<K>,
+ K: Hash + Eq,
+ S: BuildHasher,
+{
+ /// Returns a mutable reference to the value corresponding to the supplied `key`.
+ ///
+ /// ***Panics*** if `key` is not present in the map.
+ fn index_mut(&mut self, key: &Q) -> &mut V {
+ self.get_mut(key).expect("IndexMap: key not found")
+ }
+}
+
+/// Access `IndexMap` values at indexed positions.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// for word in "Lorem ipsum dolor sit amet".split_whitespace() {
+/// map.insert(word.to_lowercase(), word.to_uppercase());
+/// }
+/// assert_eq!(map[0], "LOREM");
+/// assert_eq!(map[1], "IPSUM");
+/// map.reverse();
+/// assert_eq!(map[0], "AMET");
+/// assert_eq!(map[1], "SIT");
+/// map.sort_keys();
+/// assert_eq!(map[0], "AMET");
+/// assert_eq!(map[1], "DOLOR");
+/// ```
+///
+/// ```should_panic
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// map.insert("foo", 1);
+/// println!("{:?}", map[10]); // panics!
+/// ```
+impl<K, V, S> Index<usize> for IndexMap<K, V, S> {
+ type Output = V;
+
+ /// Returns a reference to the value at the supplied `index`.
+ ///
+ /// ***Panics*** if `index` is out of bounds.
+ fn index(&self, index: usize) -> &V {
+ self.get_index(index)
+ .expect("IndexMap: index out of bounds")
+ .1
+ }
+}
+
+/// Access `IndexMap` values at indexed positions.
+///
+/// Mutable indexing allows changing / updating indexed values
+/// that are already present.
+///
+/// You can **not** insert new values with index syntax, use `.insert()`.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// for word in "Lorem ipsum dolor sit amet".split_whitespace() {
+/// map.insert(word.to_lowercase(), word.to_string());
+/// }
+/// let lorem = &mut map[0];
+/// assert_eq!(lorem, "Lorem");
+/// lorem.retain(char::is_lowercase);
+/// assert_eq!(map["lorem"], "orem");
+/// ```
+///
+/// ```should_panic
+/// use indexmap::IndexMap;
+///
+/// let mut map = IndexMap::new();
+/// map.insert("foo", 1);
+/// map[10] = 1; // panics!
+/// ```
+impl<K, V, S> IndexMut<usize> for IndexMap<K, V, S> {
+ /// Returns a mutable reference to the value at the supplied `index`.
+ ///
+ /// ***Panics*** if `index` is out of bounds.
+ fn index_mut(&mut self, index: usize) -> &mut V {
+ self.get_index_mut(index)
+ .expect("IndexMap: index out of bounds")
+ .1
+ }
+}
+
+impl<K, V, S> FromIterator<(K, V)> for IndexMap<K, V, S>
+where
+ K: Hash + Eq,
+ S: BuildHasher + Default,
+{
+ /// Create an `IndexMap` from the sequence of key-value pairs in the
+ /// iterable.
+ ///
+ /// `from_iter` uses the same logic as `extend`. See
+ /// [`extend`](#method.extend) for more details.
+ fn from_iter<I: IntoIterator<Item = (K, V)>>(iterable: I) -> Self {
+ let iter = iterable.into_iter();
+ let (low, _) = iter.size_hint();
+ let mut map = Self::with_capacity_and_hasher(low, <_>::default());
+ map.extend(iter);
+ map
+ }
+}
+
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+impl<K, V, const N: usize> From<[(K, V); N]> for IndexMap<K, V, RandomState>
+where
+ K: Hash + Eq,
+{
+ /// # Examples
+ ///
+ /// ```
+ /// use indexmap::IndexMap;
+ ///
+ /// let map1 = IndexMap::from([(1, 2), (3, 4)]);
+ /// let map2: IndexMap<_, _> = [(1, 2), (3, 4)].into();
+ /// assert_eq!(map1, map2);
+ /// ```
+ fn from(arr: [(K, V); N]) -> Self {
+ Self::from_iter(arr)
+ }
+}
+
+impl<K, V, S> Extend<(K, V)> for IndexMap<K, V, S>
+where
+ K: Hash + Eq,
+ S: BuildHasher,
+{
+ /// Extend the map with all key-value pairs in the iterable.
+ ///
+ /// This is equivalent to calling [`insert`](#method.insert) for each of
+ /// them in order, which means that for keys that already existed
+ /// in the map, their value is updated but it keeps the existing order.
+ ///
+ /// New keys are inserted in the order they appear in the sequence. If
+ /// equivalents of a key occur more than once, the last corresponding value
+ /// prevails.
+ fn extend<I: IntoIterator<Item = (K, V)>>(&mut self, iterable: I) {
+ // (Note: this is a copy of `std`/`hashbrown`'s reservation logic.)
+ // Keys may be already present or show multiple times in the iterator.
+ // Reserve the entire hint lower bound if the map is empty.
+ // Otherwise reserve half the hint (rounded up), so the map
+ // will only resize twice in the worst case.
+ let iter = iterable.into_iter();
+ let reserve = if self.is_empty() {
+ iter.size_hint().0
+ } else {
+ (iter.size_hint().0 + 1) / 2
+ };
+ self.reserve(reserve);
+ iter.for_each(move |(k, v)| {
+ self.insert(k, v);
+ });
+ }
+}
+
+impl<'a, K, V, S> Extend<(&'a K, &'a V)> for IndexMap<K, V, S>
+where
+ K: Hash + Eq + Copy,
+ V: Copy,
+ S: BuildHasher,
+{
+ /// Extend the map with all key-value pairs in the iterable.
+ ///
+ /// See the first extend method for more details.
+ fn extend<I: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iterable: I) {
+ self.extend(iterable.into_iter().map(|(&key, &value)| (key, value)));
+ }
+}
+
+impl<K, V, S> Default for IndexMap<K, V, S>
+where
+ S: Default,
+{
+ /// Return an empty `IndexMap`
+ fn default() -> Self {
+ Self::with_capacity_and_hasher(0, S::default())
+ }
+}
+
+impl<K, V1, S1, V2, S2> PartialEq<IndexMap<K, V2, S2>> for IndexMap<K, V1, S1>
+where
+ K: Hash + Eq,
+ V1: PartialEq<V2>,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn eq(&self, other: &IndexMap<K, V2, S2>) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+
+ self.iter()
+ .all(|(key, value)| other.get(key).map_or(false, |v| *value == *v))
+ }
+}
+
+impl<K, V, S> Eq for IndexMap<K, V, S>
+where
+ K: Eq + Hash,
+ V: Eq,
+ S: BuildHasher,
+{
+}
diff --git a/vendor/indexmap/src/map/core.rs b/vendor/indexmap/src/map/core.rs
new file mode 100644
index 0000000..4a78035
--- /dev/null
+++ b/vendor/indexmap/src/map/core.rs
@@ -0,0 +1,762 @@
+//! This is the core implementation that doesn't depend on the hasher at all.
+//!
+//! The methods of `IndexMapCore` don't use any Hash properties of K.
+//!
+//! It's cleaner to separate them out, then the compiler checks that we are not
+//! using Hash at all in these methods.
+//!
+//! However, we should probably not let this show in the public API or docs.
+
+mod raw;
+
+use hashbrown::raw::RawTable;
+
+use crate::vec::{Drain, Vec};
+use crate::TryReserveError;
+use core::fmt;
+use core::mem;
+use core::ops::RangeBounds;
+
+use crate::util::simplify_range;
+use crate::{Bucket, Entries, Equivalent, HashValue};
+
+/// Core of the map that does not depend on S
+pub(crate) struct IndexMapCore<K, V> {
+ /// indices mapping from the entry hash to its index.
+ indices: RawTable<usize>,
+ /// entries is a dense vec of entries in their order.
+ entries: Vec<Bucket<K, V>>,
+}
+
+#[inline(always)]
+fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
+ move |&i| entries[i].hash.get()
+}
+
+#[inline]
+fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
+ key: &'a Q,
+ entries: &'a [Bucket<K, V>],
+) -> impl Fn(&usize) -> bool + 'a {
+ move |&i| Q::equivalent(key, &entries[i].key)
+}
+
+#[inline]
+fn erase_index(table: &mut RawTable<usize>, hash: HashValue, index: usize) {
+ let erased = table.erase_entry(hash.get(), move |&i| i == index);
+ debug_assert!(erased);
+}
+
+#[inline]
+fn update_index(table: &mut RawTable<usize>, hash: HashValue, old: usize, new: usize) {
+ let index = table
+ .get_mut(hash.get(), move |&i| i == old)
+ .expect("index not found");
+ *index = new;
+}
+
+impl<K, V> Clone for IndexMapCore<K, V>
+where
+ K: Clone,
+ V: Clone,
+{
+ fn clone(&self) -> Self {
+ let mut new = Self::new();
+ new.clone_from(self);
+ new
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ let hasher = get_hash(&other.entries);
+ self.indices.clone_from_with_hasher(&other.indices, hasher);
+ if self.entries.capacity() < other.entries.len() {
+ // If we must resize, match the indices capacity.
+ let additional = other.entries.len() - self.entries.len();
+ self.reserve_entries(additional);
+ }
+ self.entries.clone_from(&other.entries);
+ }
+}
+
+impl<K, V> fmt::Debug for IndexMapCore<K, V>
+where
+ K: fmt::Debug,
+ V: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("IndexMapCore")
+ .field("indices", &raw::DebugIndices(&self.indices))
+ .field("entries", &self.entries)
+ .finish()
+ }
+}
+
+impl<K, V> Entries for IndexMapCore<K, V> {
+ type Entry = Bucket<K, V>;
+
+ #[inline]
+ fn into_entries(self) -> Vec<Self::Entry> {
+ self.entries
+ }
+
+ #[inline]
+ fn as_entries(&self) -> &[Self::Entry] {
+ &self.entries
+ }
+
+ #[inline]
+ fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
+ &mut self.entries
+ }
+
+ fn with_entries<F>(&mut self, f: F)
+ where
+ F: FnOnce(&mut [Self::Entry]),
+ {
+ f(&mut self.entries);
+ self.rebuild_hash_table();
+ }
+}
+
+impl<K, V> IndexMapCore<K, V> {
+ /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`.
+ const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>();
+
+ #[inline]
+ pub(crate) const fn new() -> Self {
+ IndexMapCore {
+ indices: RawTable::new(),
+ entries: Vec::new(),
+ }
+ }
+
+ #[inline]
+ pub(crate) fn with_capacity(n: usize) -> Self {
+ IndexMapCore {
+ indices: RawTable::with_capacity(n),
+ entries: Vec::with_capacity(n),
+ }
+ }
+
+ #[inline]
+ pub(crate) fn len(&self) -> usize {
+ self.indices.len()
+ }
+
+ #[inline]
+ pub(crate) fn capacity(&self) -> usize {
+ Ord::min(self.indices.capacity(), self.entries.capacity())
+ }
+
+ pub(crate) fn clear(&mut self) {
+ self.indices.clear();
+ self.entries.clear();
+ }
+
+ pub(crate) fn truncate(&mut self, len: usize) {
+ if len < self.len() {
+ self.erase_indices(len, self.entries.len());
+ self.entries.truncate(len);
+ }
+ }
+
+ pub(crate) fn drain<R>(&mut self, range: R) -> Drain<'_, Bucket<K, V>>
+ where
+ R: RangeBounds<usize>,
+ {
+ let range = simplify_range(range, self.entries.len());
+ self.erase_indices(range.start, range.end);
+ self.entries.drain(range)
+ }
+
+ #[cfg(feature = "rayon")]
+ pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
+ where
+ K: Send,
+ V: Send,
+ R: RangeBounds<usize>,
+ {
+ use rayon::iter::ParallelDrainRange;
+ let range = simplify_range(range, self.entries.len());
+ self.erase_indices(range.start, range.end);
+ self.entries.par_drain(range)
+ }
+
+ pub(crate) fn split_off(&mut self, at: usize) -> Self {
+ assert!(at <= self.entries.len());
+ self.erase_indices(at, self.entries.len());
+ let entries = self.entries.split_off(at);
+
+ let mut indices = RawTable::with_capacity(entries.len());
+ raw::insert_bulk_no_grow(&mut indices, &entries);
+ Self { indices, entries }
+ }
+
+ /// Reserve capacity for `additional` more key-value pairs.
+ pub(crate) fn reserve(&mut self, additional: usize) {
+ self.indices.reserve(additional, get_hash(&self.entries));
+ // Only grow entries if necessary, since we also round up capacity.
+ if additional > self.entries.capacity() - self.entries.len() {
+ self.reserve_entries(additional);
+ }
+ }
+
+ /// Reserve entries capacity, rounded up to match the indices
+ fn reserve_entries(&mut self, additional: usize) {
+ // Use a soft-limit on the maximum capacity, but if the caller explicitly
+ // requested more, do it and let them have the resulting panic.
+ let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
+ let try_add = new_capacity - self.entries.len();
+ if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
+ return;
+ }
+ self.entries.reserve_exact(additional);
+ }
+
+ /// Reserve capacity for `additional` more key-value pairs, without over-allocating.
+ pub(crate) fn reserve_exact(&mut self, additional: usize) {
+ self.indices.reserve(additional, get_hash(&self.entries));
+ self.entries.reserve_exact(additional);
+ }
+
+ /// Try to reserve capacity for `additional` more key-value pairs.
+ pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.indices
+ .try_reserve(additional, get_hash(&self.entries))
+ .map_err(TryReserveError::from_hashbrown)?;
+ // Only grow entries if necessary, since we also round up capacity.
+ if additional > self.entries.capacity() - self.entries.len() {
+ self.try_reserve_entries(additional)
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Try to reserve entries capacity, rounded up to match the indices
+ fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ // Use a soft-limit on the maximum capacity, but if the caller explicitly
+ // requested more, do it and let them have the resulting error.
+ let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
+ let try_add = new_capacity - self.entries.len();
+ if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
+ return Ok(());
+ }
+ self.entries
+ .try_reserve_exact(additional)
+ .map_err(TryReserveError::from_alloc)
+ }
+
+ /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
+ pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.indices
+ .try_reserve(additional, get_hash(&self.entries))
+ .map_err(TryReserveError::from_hashbrown)?;
+ self.entries
+ .try_reserve_exact(additional)
+ .map_err(TryReserveError::from_alloc)
+ }
+
+ /// Shrink the capacity of the map with a lower bound
+ pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
+ self.indices
+ .shrink_to(min_capacity, get_hash(&self.entries));
+ self.entries.shrink_to(min_capacity);
+ }
+
+ /// Remove the last key-value pair
+ pub(crate) fn pop(&mut self) -> Option<(K, V)> {
+ if let Some(entry) = self.entries.pop() {
+ let last = self.entries.len();
+ erase_index(&mut self.indices, entry.hash, last);
+ Some((entry.key, entry.value))
+ } else {
+ None
+ }
+ }
+
+ /// Append a key-value pair to `entries`, *without* checking whether it already exists.
+ fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
+ if self.entries.len() == self.entries.capacity() {
+ // Reserve our own capacity synced to the indices,
+ // rather than letting `Vec::push` just double it.
+ self.reserve_entries(1);
+ }
+ self.entries.push(Bucket { hash, key, value });
+ }
+
+ /// Return the index in `entries` where an equivalent key can be found
+ pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
+ where
+ Q: ?Sized + Equivalent<K>,
+ {
+ let eq = equivalent(key, &self.entries);
+ self.indices.get(hash.get(), eq).copied()
+ }
+
+ pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
+ where
+ K: Eq,
+ {
+ match self.find_or_insert(hash, &key) {
+ Ok(i) => (i, Some(mem::replace(&mut self.entries[i].value, value))),
+ Err(i) => {
+ debug_assert_eq!(i, self.entries.len());
+ self.push_entry(hash, key, value);
+ (i, None)
+ }
+ }
+ }
+
+ /// Remove an entry by shifting all entries that follow it
+ pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
+ where
+ Q: ?Sized + Equivalent<K>,
+ {
+ let eq = equivalent(key, &self.entries);
+ match self.indices.remove_entry(hash.get(), eq) {
+ Some(index) => {
+ let (key, value) = self.shift_remove_finish(index);
+ Some((index, key, value))
+ }
+ None => None,
+ }
+ }
+
+ /// Remove an entry by shifting all entries that follow it
+ pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
+ match self.entries.get(index) {
+ Some(entry) => {
+ erase_index(&mut self.indices, entry.hash, index);
+ Some(self.shift_remove_finish(index))
+ }
+ None => None,
+ }
+ }
+
+ /// Remove an entry by shifting all entries that follow it
+ ///
+ /// The index should already be removed from `self.indices`.
+ fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
+ // Correct indices that point to the entries that followed the removed entry.
+ self.decrement_indices(index + 1, self.entries.len());
+
+ // Use Vec::remove to actually remove the entry.
+ let entry = self.entries.remove(index);
+ (entry.key, entry.value)
+ }
+
+ /// Decrement all indices in the range `start..end`.
+ ///
+ /// The index `start - 1` should not exist in `self.indices`.
+ /// All entries should still be in their original positions.
+ fn decrement_indices(&mut self, start: usize, end: usize) {
+ // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
+ let shifted_entries = &self.entries[start..end];
+ if shifted_entries.len() > self.indices.buckets() / 2 {
+ // Shift all indices in range.
+ for i in self.indices_mut() {
+ if start <= *i && *i < end {
+ *i -= 1;
+ }
+ }
+ } else {
+ // Find each entry in range to shift its index.
+ for (i, entry) in (start..end).zip(shifted_entries) {
+ update_index(&mut self.indices, entry.hash, i, i - 1);
+ }
+ }
+ }
+
+ /// Increment all indices in the range `start..end`.
+ ///
+ /// The index `end` should not exist in `self.indices`.
+ /// All entries should still be in their original positions.
+ fn increment_indices(&mut self, start: usize, end: usize) {
+ // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
+ let shifted_entries = &self.entries[start..end];
+ if shifted_entries.len() > self.indices.buckets() / 2 {
+ // Shift all indices in range.
+ for i in self.indices_mut() {
+ if start <= *i && *i < end {
+ *i += 1;
+ }
+ }
+ } else {
+ // Find each entry in range to shift its index, updated in reverse so
+ // we never have duplicated indices that might have a hash collision.
+ for (i, entry) in (start..end).zip(shifted_entries).rev() {
+ update_index(&mut self.indices, entry.hash, i, i + 1);
+ }
+ }
+ }
+
+ pub(super) fn move_index(&mut self, from: usize, to: usize) {
+ let from_hash = self.entries[from].hash;
+ if from != to {
+ // Use a sentinel index so other indices don't collide.
+ update_index(&mut self.indices, from_hash, from, usize::MAX);
+
+ // Update all other indices and rotate the entry positions.
+ if from < to {
+ self.decrement_indices(from + 1, to + 1);
+ self.entries[from..=to].rotate_left(1);
+ } else if to < from {
+ self.increment_indices(to, from);
+ self.entries[to..=from].rotate_right(1);
+ }
+
+ // Change the sentinel index to its final position.
+ update_index(&mut self.indices, from_hash, usize::MAX, to);
+ }
+ }
+
+ pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
+ // If they're equal and in-bounds, there's nothing to do.
+ if a == b && a < self.entries.len() {
+ return;
+ }
+
+ // We'll get a "nice" bounds-check from indexing `self.entries`,
+ // and then we expect to find it in the table as well.
+ let [ref_a, ref_b] = self
+ .indices
+ .get_many_mut(
+ [self.entries[a].hash.get(), self.entries[b].hash.get()],
+ move |i, &x| if i == 0 { x == a } else { x == b },
+ )
+ .expect("indices not found");
+
+ mem::swap(ref_a, ref_b);
+ self.entries.swap(a, b);
+ }
+
+ /// Remove an entry by swapping it with the last
+ pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
+ where
+ Q: ?Sized + Equivalent<K>,
+ {
+ let eq = equivalent(key, &self.entries);
+ match self.indices.remove_entry(hash.get(), eq) {
+ Some(index) => {
+ let (key, value) = self.swap_remove_finish(index);
+ Some((index, key, value))
+ }
+ None => None,
+ }
+ }
+
+ /// Remove an entry by swapping it with the last
+ pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
+ match self.entries.get(index) {
+ Some(entry) => {
+ erase_index(&mut self.indices, entry.hash, index);
+ Some(self.swap_remove_finish(index))
+ }
+ None => None,
+ }
+ }
+
+ /// Finish removing an entry by swapping it with the last
+ ///
+ /// The index should already be removed from `self.indices`.
+ fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
+ // use swap_remove, but then we need to update the index that points
+ // to the other entry that has to move
+ let entry = self.entries.swap_remove(index);
+
+ // correct index that points to the entry that had to swap places
+ if let Some(entry) = self.entries.get(index) {
+ // was not last element
+ // examine new element in `index` and find it in indices
+ let last = self.entries.len();
+ update_index(&mut self.indices, entry.hash, last, index);
+ }
+
+ (entry.key, entry.value)
+ }
+
+ /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
+ ///
+ /// All of these items should still be at their original location in `entries`.
+ /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
+ fn erase_indices(&mut self, start: usize, end: usize) {
+ let (init, shifted_entries) = self.entries.split_at(end);
+ let (start_entries, erased_entries) = init.split_at(start);
+
+ let erased = erased_entries.len();
+ let shifted = shifted_entries.len();
+ let half_capacity = self.indices.buckets() / 2;
+
+ // Use a heuristic between different strategies
+ if erased == 0 {
+ // Degenerate case, nothing to do
+ } else if start + shifted < half_capacity && start < erased {
+ // Reinsert everything, as there are few kept indices
+ self.indices.clear();
+
+ // Reinsert stable indices, then shifted indices
+ raw::insert_bulk_no_grow(&mut self.indices, start_entries);
+ raw::insert_bulk_no_grow(&mut self.indices, shifted_entries);
+ } else if erased + shifted < half_capacity {
+ // Find each affected index, as there are few to adjust
+
+ // Find erased indices
+ for (i, entry) in (start..).zip(erased_entries) {
+ erase_index(&mut self.indices, entry.hash, i);
+ }
+
+ // Find shifted indices
+ for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
+ update_index(&mut self.indices, entry.hash, old, new);
+ }
+ } else {
+ // Sweep the whole table for adjustments
+ self.erase_indices_sweep(start, end);
+ }
+
+ debug_assert_eq!(self.indices.len(), start + shifted);
+ }
+
+ pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
+ where
+ F: FnMut(&mut K, &mut V) -> bool,
+ {
+ self.entries
+ .retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
+ if self.entries.len() < self.indices.len() {
+ self.rebuild_hash_table();
+ }
+ }
+
+ fn rebuild_hash_table(&mut self) {
+ self.indices.clear();
+ raw::insert_bulk_no_grow(&mut self.indices, &self.entries);
+ }
+
+ pub(crate) fn reverse(&mut self) {
+ self.entries.reverse();
+
+ // No need to save hash indices, can easily calculate what they should
+ // be, given that this is an in-place reversal.
+ let len = self.entries.len();
+ for i in self.indices_mut() {
+ *i = len - *i - 1;
+ }
+ }
+}
+
+/// Entry for an existing key-value pair or a vacant location to
+/// insert one.
+pub enum Entry<'a, K, V> {
+ /// Existing slot with equivalent key.
+ Occupied(OccupiedEntry<'a, K, V>),
+ /// Vacant slot (no equivalent key in the map).
+ Vacant(VacantEntry<'a, K, V>),
+}
+
+impl<'a, K, V> Entry<'a, K, V> {
+ /// Inserts the given default value in the entry if it is vacant and returns a mutable
+ /// reference to it. Otherwise a mutable reference to an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_insert(self, default: V) -> &'a mut V {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => entry.insert(default),
+ }
+ }
+
+ /// Inserts the result of the `call` function in the entry if it is vacant and returns a mutable
+ /// reference to it. Otherwise a mutable reference to an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_insert_with<F>(self, call: F) -> &'a mut V
+ where
+ F: FnOnce() -> V,
+ {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => entry.insert(call()),
+ }
+ }
+
+ /// Inserts the result of the `call` function with a reference to the entry's key if it is
+ /// vacant, and returns a mutable reference to the new value. Otherwise a mutable reference to
+ /// an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_insert_with_key<F>(self, call: F) -> &'a mut V
+ where
+ F: FnOnce(&K) -> V,
+ {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => {
+ let value = call(&entry.key);
+ entry.insert(value)
+ }
+ }
+ }
+
+ /// Gets a reference to the entry's key, either within the map if occupied,
+ /// or else the new key that was used to find the entry.
+ pub fn key(&self) -> &K {
+ match *self {
+ Entry::Occupied(ref entry) => entry.key(),
+ Entry::Vacant(ref entry) => entry.key(),
+ }
+ }
+
+ /// Return the index where the key-value pair exists or will be inserted.
+ pub fn index(&self) -> usize {
+ match *self {
+ Entry::Occupied(ref entry) => entry.index(),
+ Entry::Vacant(ref entry) => entry.index(),
+ }
+ }
+
+ /// Modifies the entry if it is occupied.
+ pub fn and_modify<F>(self, f: F) -> Self
+ where
+ F: FnOnce(&mut V),
+ {
+ match self {
+ Entry::Occupied(mut o) => {
+ f(o.get_mut());
+ Entry::Occupied(o)
+ }
+ x => x,
+ }
+ }
+
+ /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable
+ /// reference to it. Otherwise a mutable reference to an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_default(self) -> &'a mut V
+ where
+ V: Default,
+ {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => entry.insert(V::default()),
+ }
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Entry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Entry::Vacant(ref v) => f.debug_tuple(stringify!(Entry)).field(v).finish(),
+ Entry::Occupied(ref o) => f.debug_tuple(stringify!(Entry)).field(o).finish(),
+ }
+ }
+}
+
+pub use self::raw::OccupiedEntry;
+
+// Extra methods that don't threaten the unsafe encapsulation.
+impl<K, V> OccupiedEntry<'_, K, V> {
+ /// Sets the value of the entry to `value`, and returns the entry's old value.
+ pub fn insert(&mut self, value: V) -> V {
+ mem::replace(self.get_mut(), value)
+ }
+
+ /// Remove the key, value pair stored in the map for this entry, and return the value.
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove()`.
+ pub fn remove(self) -> V {
+ self.swap_remove()
+ }
+
+ /// Remove the key, value pair stored in the map for this entry, and return the value.
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove(self) -> V {
+ self.swap_remove_entry().1
+ }
+
+ /// Remove the key, value pair stored in the map for this entry, and return the value.
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove(self) -> V {
+ self.shift_remove_entry().1
+ }
+
+ /// Remove and return the key, value pair stored in the map for this entry
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove_entry()`.
+ pub fn remove_entry(self) -> (K, V) {
+ self.swap_remove_entry()
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for OccupiedEntry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct(stringify!(OccupiedEntry))
+ .field("key", self.key())
+ .field("value", self.get())
+ .finish()
+ }
+}
+
+/// A view into a vacant entry in a `IndexMap`.
+/// It is part of the [`Entry`] enum.
+///
+/// [`Entry`]: enum.Entry.html
+pub struct VacantEntry<'a, K, V> {
+ map: &'a mut IndexMapCore<K, V>,
+ hash: HashValue,
+ key: K,
+}
+
+impl<'a, K, V> VacantEntry<'a, K, V> {
+ /// Gets a reference to the key that was used to find the entry.
+ pub fn key(&self) -> &K {
+ &self.key
+ }
+
+ /// Takes ownership of the key, leaving the entry vacant.
+ pub fn into_key(self) -> K {
+ self.key
+ }
+
+ /// Return the index where the key-value pair will be inserted.
+ pub fn index(&self) -> usize {
+ self.map.indices.len()
+ }
+
+ /// Inserts the entry's key and the given value into the map, and returns a mutable reference
+ /// to the value.
+ pub fn insert(self, value: V) -> &'a mut V {
+ let i = self.index();
+ let Self { map, hash, key } = self;
+ map.indices.insert(hash.get(), i, get_hash(&map.entries));
+ debug_assert_eq!(i, map.entries.len());
+ map.push_entry(hash, key, value);
+ &mut map.entries[i].value
+ }
+}
+
+impl<K: fmt::Debug, V> fmt::Debug for VacantEntry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple(stringify!(VacantEntry))
+ .field(self.key())
+ .finish()
+ }
+}
+
+#[test]
+fn assert_send_sync() {
+ fn assert_send_sync<T: Send + Sync>() {}
+ assert_send_sync::<IndexMapCore<i32, i32>>();
+ assert_send_sync::<Entry<'_, i32, i32>>();
+}
diff --git a/vendor/indexmap/src/map/core/raw.rs b/vendor/indexmap/src/map/core/raw.rs
new file mode 100644
index 0000000..be71c9c
--- /dev/null
+++ b/vendor/indexmap/src/map/core/raw.rs
@@ -0,0 +1,194 @@
+#![allow(unsafe_code)]
+//! This module encapsulates the `unsafe` access to `hashbrown::raw::RawTable`,
+//! mostly in dealing with its bucket "pointers".
+
+use super::{equivalent, get_hash, Bucket, Entry, HashValue, IndexMapCore, VacantEntry};
+use core::fmt;
+use core::mem::replace;
+use hashbrown::raw::RawTable;
+
+type RawBucket = hashbrown::raw::Bucket<usize>;
+
+/// Inserts many entries into a raw table without reallocating.
+///
+/// ***Panics*** if there is not sufficient capacity already.
+pub(super) fn insert_bulk_no_grow<K, V>(indices: &mut RawTable<usize>, entries: &[Bucket<K, V>]) {
+ assert!(indices.capacity() - indices.len() >= entries.len());
+ for entry in entries {
+ // SAFETY: we asserted that sufficient capacity exists for all entries.
+ unsafe {
+ indices.insert_no_grow(entry.hash.get(), indices.len());
+ }
+ }
+}
+
+pub(super) struct DebugIndices<'a>(pub &'a RawTable<usize>);
+impl fmt::Debug for DebugIndices<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // SAFETY: we're not letting any of the buckets escape this function
+ let indices = unsafe { self.0.iter().map(|raw_bucket| *raw_bucket.as_ref()) };
+ f.debug_list().entries(indices).finish()
+ }
+}
+
+impl<K, V> IndexMapCore<K, V> {
+ /// Sweep the whole table to erase indices start..end
+ pub(super) fn erase_indices_sweep(&mut self, start: usize, end: usize) {
+ // SAFETY: we're not letting any of the buckets escape this function
+ unsafe {
+ let offset = end - start;
+ for bucket in self.indices.iter() {
+ let i = bucket.as_mut();
+ if *i >= end {
+ *i -= offset;
+ } else if *i >= start {
+ self.indices.erase(bucket);
+ }
+ }
+ }
+ }
+
+ /// Search for a key in the table and return `Ok(entry_index)` if found.
+ /// Otherwise, insert the key and return `Err(new_index)`.
+ ///
+ /// Note that hashbrown may resize the table to reserve space for insertion,
+ /// even before checking if it's already present, so this is somewhat biased
+ /// towards new items.
+ pub(crate) fn find_or_insert(&mut self, hash: HashValue, key: &K) -> Result<usize, usize>
+ where
+ K: Eq,
+ {
+ let hash = hash.get();
+ let eq = equivalent(key, &self.entries);
+ let hasher = get_hash(&self.entries);
+ // SAFETY: We're not mutating between find and read/insert.
+ unsafe {
+ match self.indices.find_or_find_insert_slot(hash, eq, hasher) {
+ Ok(raw_bucket) => Ok(*raw_bucket.as_ref()),
+ Err(slot) => {
+ let index = self.indices.len();
+ self.indices.insert_in_slot(hash, slot, index);
+ Err(index)
+ }
+ }
+ }
+ }
+
+ pub(crate) fn entry(&mut self, hash: HashValue, key: K) -> Entry<'_, K, V>
+ where
+ K: Eq,
+ {
+ let eq = equivalent(&key, &self.entries);
+ match self.indices.find(hash.get(), eq) {
+ // SAFETY: The entry is created with a live raw bucket, at the same time
+ // we have a &mut reference to the map, so it can not be modified further.
+ Some(raw_bucket) => Entry::Occupied(OccupiedEntry {
+ map: self,
+ raw_bucket,
+ key,
+ }),
+ None => Entry::Vacant(VacantEntry {
+ map: self,
+ hash,
+ key,
+ }),
+ }
+ }
+
+ pub(super) fn indices_mut(&mut self) -> impl Iterator<Item = &mut usize> {
+ // SAFETY: we're not letting any of the buckets escape this function,
+ // only the item references that are appropriately bound to `&mut self`.
+ unsafe { self.indices.iter().map(|bucket| bucket.as_mut()) }
+ }
+}
+
+/// A view into an occupied entry in a `IndexMap`.
+/// It is part of the [`Entry`] enum.
+///
+/// [`Entry`]: enum.Entry.html
+// SAFETY: The lifetime of the map reference also constrains the raw bucket,
+// which is essentially a raw pointer into the map indices.
+pub struct OccupiedEntry<'a, K, V> {
+ map: &'a mut IndexMapCore<K, V>,
+ raw_bucket: RawBucket,
+ key: K,
+}
+
+// `hashbrown::raw::Bucket` is only `Send`, not `Sync`.
+// SAFETY: `&self` only accesses the bucket to read it.
+unsafe impl<K: Sync, V: Sync> Sync for OccupiedEntry<'_, K, V> {}
+
+// The parent module also adds methods that don't threaten the unsafe encapsulation.
+impl<'a, K, V> OccupiedEntry<'a, K, V> {
+ /// Gets a reference to the entry's key in the map.
+ ///
+ /// Note that this is not the key that was used to find the entry. There may be an observable
+ /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like
+ /// extra fields or the memory address of an allocation.
+ pub fn key(&self) -> &K {
+ &self.map.entries[self.index()].key
+ }
+
+ /// Gets a reference to the entry's value in the map.
+ pub fn get(&self) -> &V {
+ &self.map.entries[self.index()].value
+ }
+
+ /// Gets a mutable reference to the entry's value in the map.
+ ///
+ /// If you need a reference which may outlive the destruction of the
+ /// `Entry` value, see `into_mut`.
+ pub fn get_mut(&mut self) -> &mut V {
+ let index = self.index();
+ &mut self.map.entries[index].value
+ }
+
+ /// Put the new key in the occupied entry's key slot
+ pub(crate) fn replace_key(self) -> K {
+ let index = self.index();
+ let old_key = &mut self.map.entries[index].key;
+ replace(old_key, self.key)
+ }
+
+ /// Return the index of the key-value pair
+ #[inline]
+ pub fn index(&self) -> usize {
+ // SAFETY: we have &mut map keep keeping the bucket stable
+ unsafe { *self.raw_bucket.as_ref() }
+ }
+
+ /// Converts into a mutable reference to the entry's value in the map,
+ /// with a lifetime bound to the map itself.
+ pub fn into_mut(self) -> &'a mut V {
+ let index = self.index();
+ &mut self.map.entries[index].value
+ }
+
+ /// Remove and return the key, value pair stored in the map for this entry
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove_entry(self) -> (K, V) {
+ // SAFETY: This is safe because it can only happen once (self is consumed)
+ // and map.indices have not been modified since entry construction
+ let (index, _slot) = unsafe { self.map.indices.remove(self.raw_bucket) };
+ self.map.swap_remove_finish(index)
+ }
+
+ /// Remove and return the key, value pair stored in the map for this entry
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove_entry(self) -> (K, V) {
+ // SAFETY: This is safe because it can only happen once (self is consumed)
+ // and map.indices have not been modified since entry construction
+ let (index, _slot) = unsafe { self.map.indices.remove(self.raw_bucket) };
+ self.map.shift_remove_finish(index)
+ }
+}
diff --git a/vendor/indexmap/src/map/iter.rs b/vendor/indexmap/src/map/iter.rs
new file mode 100644
index 0000000..db6e140
--- /dev/null
+++ b/vendor/indexmap/src/map/iter.rs
@@ -0,0 +1,541 @@
+use super::{Bucket, Entries, IndexMap, Slice};
+
+use alloc::vec::{self, Vec};
+use core::fmt;
+use core::iter::FusedIterator;
+use core::slice;
+
+impl<'a, K, V, S> IntoIterator for &'a IndexMap<K, V, S> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = Iter<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, K, V, S> IntoIterator for &'a mut IndexMap<K, V, S> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = IterMut<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+impl<K, V, S> IntoIterator for IndexMap<K, V, S> {
+ type Item = (K, V);
+ type IntoIter = IntoIter<K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter::new(self.into_entries())
+ }
+}
+
+/// An iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`iter`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`iter`]: struct.IndexMap.html#method.iter
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct Iter<'a, K, V> {
+ iter: slice::Iter<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Iter<'a, K, V> {
+ pub(super) fn new(entries: &'a [Bucket<K, V>]) -> Self {
+ Self {
+ iter: entries.iter(),
+ }
+ }
+
+ /// Returns a slice of the remaining entries in the iterator.
+ pub fn as_slice(&self) -> &'a Slice<K, V> {
+ Slice::from_slice(self.iter.as_slice())
+ }
+}
+
+impl<'a, K, V> Iterator for Iter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ iterator_methods!(Bucket::refs);
+}
+
+impl<K, V> DoubleEndedIterator for Iter<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::refs);
+}
+
+impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for Iter<'_, K, V> {}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+impl<K, V> Clone for Iter<'_, K, V> {
+ fn clone(&self) -> Self {
+ Iter {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Iter<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+impl<K, V> Default for Iter<'_, K, V> {
+ fn default() -> Self {
+ Self { iter: [].iter() }
+ }
+}
+
+/// A mutable iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: struct.IndexMap.html#method.iter_mut
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct IterMut<'a, K, V> {
+ iter: slice::IterMut<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> IterMut<'a, K, V> {
+ pub(super) fn new(entries: &'a mut [Bucket<K, V>]) -> Self {
+ Self {
+ iter: entries.iter_mut(),
+ }
+ }
+
+ /// Returns a slice of the remaining entries in the iterator.
+ pub fn as_slice(&self) -> &Slice<K, V> {
+ Slice::from_slice(self.iter.as_slice())
+ }
+
+ /// Returns a mutable slice of the remaining entries in the iterator.
+ ///
+ /// To avoid creating `&mut` references that alias, this is forced to consume the iterator.
+ pub fn into_slice(self) -> &'a mut Slice<K, V> {
+ Slice::from_mut_slice(self.iter.into_slice())
+ }
+}
+
+impl<'a, K, V> Iterator for IterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ iterator_methods!(Bucket::ref_mut);
+}
+
+impl<K, V> DoubleEndedIterator for IterMut<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::ref_mut);
+}
+
+impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for IterMut<'_, K, V> {}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IterMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<K, V> Default for IterMut<'_, K, V> {
+ fn default() -> Self {
+ Self {
+ iter: [].iter_mut(),
+ }
+ }
+}
+
+/// An owning iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`IndexMap`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
+///
+/// [`into_iter`]: struct.IndexMap.html#method.into_iter
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct IntoIter<K, V> {
+ iter: vec::IntoIter<Bucket<K, V>>,
+}
+
+impl<K, V> IntoIter<K, V> {
+ pub(super) fn new(entries: Vec<Bucket<K, V>>) -> Self {
+ Self {
+ iter: entries.into_iter(),
+ }
+ }
+
+ /// Returns a slice of the remaining entries in the iterator.
+ pub fn as_slice(&self) -> &Slice<K, V> {
+ Slice::from_slice(self.iter.as_slice())
+ }
+
+ /// Returns a mutable slice of the remaining entries in the iterator.
+ pub fn as_mut_slice(&mut self) -> &mut Slice<K, V> {
+ Slice::from_mut_slice(self.iter.as_mut_slice())
+ }
+}
+
+impl<K, V> Iterator for IntoIter<K, V> {
+ type Item = (K, V);
+
+ iterator_methods!(Bucket::key_value);
+}
+
+impl<K, V> DoubleEndedIterator for IntoIter<K, V> {
+ double_ended_iterator_methods!(Bucket::key_value);
+}
+
+impl<K, V> ExactSizeIterator for IntoIter<K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for IntoIter<K, V> {}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IntoIter<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<K, V> Default for IntoIter<K, V> {
+ fn default() -> Self {
+ Self {
+ iter: Vec::new().into_iter(),
+ }
+ }
+}
+
+/// A draining iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`drain`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`drain`]: struct.IndexMap.html#method.drain
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct Drain<'a, K, V> {
+ iter: vec::Drain<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Drain<'a, K, V> {
+ pub(super) fn new(iter: vec::Drain<'a, Bucket<K, V>>) -> Self {
+ Self { iter }
+ }
+
+ /// Returns a slice of the remaining entries in the iterator.
+ pub fn as_slice(&self) -> &Slice<K, V> {
+ Slice::from_slice(self.iter.as_slice())
+ }
+}
+
+impl<K, V> Iterator for Drain<'_, K, V> {
+ type Item = (K, V);
+
+ iterator_methods!(Bucket::key_value);
+}
+
+impl<K, V> DoubleEndedIterator for Drain<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::key_value);
+}
+
+impl<K, V> ExactSizeIterator for Drain<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for Drain<'_, K, V> {}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Drain<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+/// An iterator over the keys of a `IndexMap`.
+///
+/// This `struct` is created by the [`keys`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`keys`]: struct.IndexMap.html#method.keys
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct Keys<'a, K, V> {
+ iter: slice::Iter<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Keys<'a, K, V> {
+ pub(super) fn new(entries: &'a [Bucket<K, V>]) -> Self {
+ Self {
+ iter: entries.iter(),
+ }
+ }
+}
+
+impl<'a, K, V> Iterator for Keys<'a, K, V> {
+ type Item = &'a K;
+
+ iterator_methods!(Bucket::key_ref);
+}
+
+impl<K, V> DoubleEndedIterator for Keys<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::key_ref);
+}
+
+impl<K, V> ExactSizeIterator for Keys<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for Keys<'_, K, V> {}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+impl<K, V> Clone for Keys<'_, K, V> {
+ fn clone(&self) -> Self {
+ Keys {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<K: fmt::Debug, V> fmt::Debug for Keys<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+impl<K, V> Default for Keys<'_, K, V> {
+ fn default() -> Self {
+ Self { iter: [].iter() }
+ }
+}
+
+/// An owning iterator over the keys of a `IndexMap`.
+///
+/// This `struct` is created by the [`into_keys`] method on [`IndexMap`].
+/// See its documentation for more.
+///
+/// [`IndexMap`]: struct.IndexMap.html
+/// [`into_keys`]: struct.IndexMap.html#method.into_keys
+pub struct IntoKeys<K, V> {
+ iter: vec::IntoIter<Bucket<K, V>>,
+}
+
+impl<K, V> IntoKeys<K, V> {
+ pub(super) fn new(entries: Vec<Bucket<K, V>>) -> Self {
+ Self {
+ iter: entries.into_iter(),
+ }
+ }
+}
+
+impl<K, V> Iterator for IntoKeys<K, V> {
+ type Item = K;
+
+ iterator_methods!(Bucket::key);
+}
+
+impl<K, V> DoubleEndedIterator for IntoKeys<K, V> {
+ double_ended_iterator_methods!(Bucket::key);
+}
+
+impl<K, V> ExactSizeIterator for IntoKeys<K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for IntoKeys<K, V> {}
+
+impl<K: fmt::Debug, V> fmt::Debug for IntoKeys<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<K, V> Default for IntoKeys<K, V> {
+ fn default() -> Self {
+ Self {
+ iter: Vec::new().into_iter(),
+ }
+ }
+}
+
+/// An iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`values`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`values`]: struct.IndexMap.html#method.values
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct Values<'a, K, V> {
+ iter: slice::Iter<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Values<'a, K, V> {
+ pub(super) fn new(entries: &'a [Bucket<K, V>]) -> Self {
+ Self {
+ iter: entries.iter(),
+ }
+ }
+}
+
+impl<'a, K, V> Iterator for Values<'a, K, V> {
+ type Item = &'a V;
+
+ iterator_methods!(Bucket::value_ref);
+}
+
+impl<K, V> DoubleEndedIterator for Values<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::value_ref);
+}
+
+impl<K, V> ExactSizeIterator for Values<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for Values<'_, K, V> {}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+impl<K, V> Clone for Values<'_, K, V> {
+ fn clone(&self) -> Self {
+ Values {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<K, V: fmt::Debug> fmt::Debug for Values<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+impl<K, V> Default for Values<'_, K, V> {
+ fn default() -> Self {
+ Self { iter: [].iter() }
+ }
+}
+
+/// A mutable iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`values_mut`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`values_mut`]: struct.IndexMap.html#method.values_mut
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct ValuesMut<'a, K, V> {
+ iter: slice::IterMut<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> ValuesMut<'a, K, V> {
+ pub(super) fn new(entries: &'a mut [Bucket<K, V>]) -> Self {
+ Self {
+ iter: entries.iter_mut(),
+ }
+ }
+}
+
+impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
+ type Item = &'a mut V;
+
+ iterator_methods!(Bucket::value_mut);
+}
+
+impl<K, V> DoubleEndedIterator for ValuesMut<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::value_mut);
+}
+
+impl<K, V> ExactSizeIterator for ValuesMut<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for ValuesMut<'_, K, V> {}
+
+impl<K, V: fmt::Debug> fmt::Debug for ValuesMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::value_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<K, V> Default for ValuesMut<'_, K, V> {
+ fn default() -> Self {
+ Self {
+ iter: [].iter_mut(),
+ }
+ }
+}
+
+/// An owning iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`into_values`] method on [`IndexMap`].
+/// See its documentation for more.
+///
+/// [`IndexMap`]: struct.IndexMap.html
+/// [`into_values`]: struct.IndexMap.html#method.into_values
+pub struct IntoValues<K, V> {
+ iter: vec::IntoIter<Bucket<K, V>>,
+}
+
+impl<K, V> IntoValues<K, V> {
+ pub(super) fn new(entries: Vec<Bucket<K, V>>) -> Self {
+ Self {
+ iter: entries.into_iter(),
+ }
+ }
+}
+
+impl<K, V> Iterator for IntoValues<K, V> {
+ type Item = V;
+
+ iterator_methods!(Bucket::value);
+}
+
+impl<K, V> DoubleEndedIterator for IntoValues<K, V> {
+ double_ended_iterator_methods!(Bucket::value);
+}
+
+impl<K, V> ExactSizeIterator for IntoValues<K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for IntoValues<K, V> {}
+
+impl<K, V: fmt::Debug> fmt::Debug for IntoValues<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::value_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<K, V> Default for IntoValues<K, V> {
+ fn default() -> Self {
+ Self {
+ iter: Vec::new().into_iter(),
+ }
+ }
+}
diff --git a/vendor/indexmap/src/map/serde_seq.rs b/vendor/indexmap/src/map/serde_seq.rs
new file mode 100644
index 0000000..f10aa57
--- /dev/null
+++ b/vendor/indexmap/src/map/serde_seq.rs
@@ -0,0 +1,138 @@
+//! Functions to serialize and deserialize an `IndexMap` as an ordered sequence.
+//!
+//! The default `serde` implementation serializes `IndexMap` as a normal map,
+//! but there is no guarantee that serialization formats will preserve the order
+//! of the key-value pairs. This module serializes `IndexMap` as a sequence of
+//! `(key, value)` elements instead, in order.
+//!
+//! This module may be used in a field attribute for derived implementations:
+//!
+//! ```
+//! # use indexmap::IndexMap;
+//! # use serde_derive::{Deserialize, Serialize};
+//! #[derive(Deserialize, Serialize)]
+//! struct Data {
+//! #[serde(with = "indexmap::map::serde_seq")]
+//! map: IndexMap<i32, u64>,
+//! // ...
+//! }
+//! ```
+
+use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor};
+use serde::ser::{Serialize, Serializer};
+
+use core::fmt::{self, Formatter};
+use core::hash::{BuildHasher, Hash};
+use core::marker::PhantomData;
+
+use crate::map::Slice as MapSlice;
+use crate::set::Slice as SetSlice;
+use crate::IndexMap;
+
+/// Serializes a `map::Slice` as an ordered sequence.
+///
+/// This behaves like [`crate::map::serde_seq`] for `IndexMap`, serializing a sequence
+/// of `(key, value)` pairs, rather than as a map that might not preserve order.
+impl<K, V> Serialize for MapSlice<K, V>
+where
+ K: Serialize,
+ V: Serialize,
+{
+ fn serialize<T>(&self, serializer: T) -> Result<T::Ok, T::Error>
+ where
+ T: Serializer,
+ {
+ serializer.collect_seq(self)
+ }
+}
+
+/// Serializes a `set::Slice` as an ordered sequence.
+impl<T> Serialize for SetSlice<T>
+where
+ T: Serialize,
+{
+ fn serialize<Se>(&self, serializer: Se) -> Result<Se::Ok, Se::Error>
+ where
+ Se: Serializer,
+ {
+ serializer.collect_seq(self)
+ }
+}
+
+/// Serializes an `IndexMap` as an ordered sequence.
+///
+/// This function may be used in a field attribute for deriving `Serialize`:
+///
+/// ```
+/// # use indexmap::IndexMap;
+/// # use serde_derive::Serialize;
+/// #[derive(Serialize)]
+/// struct Data {
+/// #[serde(serialize_with = "indexmap::map::serde_seq::serialize")]
+/// map: IndexMap<i32, u64>,
+/// // ...
+/// }
+/// ```
+pub fn serialize<K, V, S, T>(map: &IndexMap<K, V, S>, serializer: T) -> Result<T::Ok, T::Error>
+where
+ K: Serialize + Hash + Eq,
+ V: Serialize,
+ S: BuildHasher,
+ T: Serializer,
+{
+ serializer.collect_seq(map)
+}
+
+/// Visitor to deserialize a *sequenced* `IndexMap`
+struct SeqVisitor<K, V, S>(PhantomData<(K, V, S)>);
+
+impl<'de, K, V, S> Visitor<'de> for SeqVisitor<K, V, S>
+where
+ K: Deserialize<'de> + Eq + Hash,
+ V: Deserialize<'de>,
+ S: Default + BuildHasher,
+{
+ type Value = IndexMap<K, V, S>;
+
+ fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
+ write!(formatter, "a sequenced map")
+ }
+
+ fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
+ where
+ A: SeqAccess<'de>,
+ {
+ let capacity = seq.size_hint().unwrap_or(0);
+ let mut map = IndexMap::with_capacity_and_hasher(capacity, S::default());
+
+ while let Some((key, value)) = seq.next_element()? {
+ map.insert(key, value);
+ }
+
+ Ok(map)
+ }
+}
+
+/// Deserializes an `IndexMap` from an ordered sequence.
+///
+/// This function may be used in a field attribute for deriving `Deserialize`:
+///
+/// ```
+/// # use indexmap::IndexMap;
+/// # use serde_derive::Deserialize;
+/// #[derive(Deserialize)]
+/// struct Data {
+/// #[serde(deserialize_with = "indexmap::map::serde_seq::deserialize")]
+/// map: IndexMap<i32, u64>,
+/// // ...
+/// }
+/// ```
+pub fn deserialize<'de, D, K, V, S>(deserializer: D) -> Result<IndexMap<K, V, S>, D::Error>
+where
+ D: Deserializer<'de>,
+ K: Deserialize<'de> + Eq + Hash,
+ V: Deserialize<'de>,
+ S: Default + BuildHasher,
+{
+ deserializer.deserialize_seq(SeqVisitor(PhantomData))
+}
diff --git a/vendor/indexmap/src/map/slice.rs b/vendor/indexmap/src/map/slice.rs
new file mode 100644
index 0000000..9fb876f
--- /dev/null
+++ b/vendor/indexmap/src/map/slice.rs
@@ -0,0 +1,471 @@
+use super::{
+ Bucket, Entries, IndexMap, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, Values,
+ ValuesMut,
+};
+use crate::util::try_simplify_range;
+
+use alloc::boxed::Box;
+use alloc::vec::Vec;
+use core::cmp::Ordering;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::ops::{self, Bound, Index, IndexMut, RangeBounds};
+
+/// A dynamically-sized slice of key-value pairs in an `IndexMap`.
+///
+/// This supports indexed operations much like a `[(K, V)]` slice,
+/// but not any hashed operations on the map keys.
+///
+/// Unlike `IndexMap`, `Slice` does consider the order for `PartialEq`
+/// and `Eq`, and it also implements `PartialOrd`, `Ord`, and `Hash`.
+#[repr(transparent)]
+pub struct Slice<K, V> {
+ pub(crate) entries: [Bucket<K, V>],
+}
+
+// SAFETY: `Slice<K, V>` is a transparent wrapper around `[Bucket<K, V>]`,
+// and reference lifetimes are bound together in function signatures.
+#[allow(unsafe_code)]
+impl<K, V> Slice<K, V> {
+ pub(super) fn from_slice(entries: &[Bucket<K, V>]) -> &Self {
+ unsafe { &*(entries as *const [Bucket<K, V>] as *const Self) }
+ }
+
+ pub(super) fn from_mut_slice(entries: &mut [Bucket<K, V>]) -> &mut Self {
+ unsafe { &mut *(entries as *mut [Bucket<K, V>] as *mut Self) }
+ }
+
+ pub(super) fn from_boxed(entries: Box<[Bucket<K, V>]>) -> Box<Self> {
+ unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) }
+ }
+
+ fn into_boxed(self: Box<Self>) -> Box<[Bucket<K, V>]> {
+ unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket<K, V>]) }
+ }
+}
+
+impl<K, V> Slice<K, V> {
+ pub(crate) fn into_entries(self: Box<Self>) -> Vec<Bucket<K, V>> {
+ self.into_boxed().into_vec()
+ }
+
+ /// Return the number of key-value pairs in the map slice.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.entries.len()
+ }
+
+ /// Returns true if the map slice contains no elements.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.entries.is_empty()
+ }
+
+ /// Get a key-value pair by index.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ pub fn get_index(&self, index: usize) -> Option<(&K, &V)> {
+ self.entries.get(index).map(Bucket::refs)
+ }
+
+ /// Get a key-value pair by index, with mutable access to the value.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ pub fn get_index_mut(&mut self, index: usize) -> Option<(&K, &mut V)> {
+ self.entries.get_mut(index).map(Bucket::ref_mut)
+ }
+
+ /// Returns a slice of key-value pairs in the given range of indices.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ pub fn get_range<R: RangeBounds<usize>>(&self, range: R) -> Option<&Self> {
+ let range = try_simplify_range(range, self.entries.len())?;
+ self.entries.get(range).map(Slice::from_slice)
+ }
+
+ /// Returns a mutable slice of key-value pairs in the given range of indices.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ pub fn get_range_mut<R: RangeBounds<usize>>(&mut self, range: R) -> Option<&mut Self> {
+ let range = try_simplify_range(range, self.entries.len())?;
+ self.entries.get_mut(range).map(Slice::from_mut_slice)
+ }
+
+ /// Get the first key-value pair.
+ pub fn first(&self) -> Option<(&K, &V)> {
+ self.entries.first().map(Bucket::refs)
+ }
+
+ /// Get the first key-value pair, with mutable access to the value.
+ pub fn first_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.entries.first_mut().map(Bucket::ref_mut)
+ }
+
+ /// Get the last key-value pair.
+ pub fn last(&self) -> Option<(&K, &V)> {
+ self.entries.last().map(Bucket::refs)
+ }
+
+ /// Get the last key-value pair, with mutable access to the value.
+ pub fn last_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.entries.last_mut().map(Bucket::ref_mut)
+ }
+
+ /// Divides one slice into two at an index.
+ ///
+ /// ***Panics*** if `index > len`.
+ pub fn split_at(&self, index: usize) -> (&Self, &Self) {
+ let (first, second) = self.entries.split_at(index);
+ (Self::from_slice(first), Self::from_slice(second))
+ }
+
+ /// Divides one mutable slice into two at an index.
+ ///
+ /// ***Panics*** if `index > len`.
+ pub fn split_at_mut(&mut self, index: usize) -> (&mut Self, &mut Self) {
+ let (first, second) = self.entries.split_at_mut(index);
+ (Self::from_mut_slice(first), Self::from_mut_slice(second))
+ }
+
+ /// Returns the first key-value pair and the rest of the slice,
+ /// or `None` if it is empty.
+ pub fn split_first(&self) -> Option<((&K, &V), &Self)> {
+ if let [first, rest @ ..] = &self.entries {
+ Some((first.refs(), Self::from_slice(rest)))
+ } else {
+ None
+ }
+ }
+
+ /// Returns the first key-value pair and the rest of the slice,
+ /// with mutable access to the value, or `None` if it is empty.
+ pub fn split_first_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> {
+ if let [first, rest @ ..] = &mut self.entries {
+ Some((first.ref_mut(), Self::from_mut_slice(rest)))
+ } else {
+ None
+ }
+ }
+
+ /// Returns the last key-value pair and the rest of the slice,
+ /// or `None` if it is empty.
+ pub fn split_last(&self) -> Option<((&K, &V), &Self)> {
+ if let [rest @ .., last] = &self.entries {
+ Some((last.refs(), Self::from_slice(rest)))
+ } else {
+ None
+ }
+ }
+
+ /// Returns the last key-value pair and the rest of the slice,
+ /// with mutable access to the value, or `None` if it is empty.
+ pub fn split_last_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> {
+ if let [rest @ .., last] = &mut self.entries {
+ Some((last.ref_mut(), Self::from_mut_slice(rest)))
+ } else {
+ None
+ }
+ }
+
+ /// Return an iterator over the key-value pairs of the map slice.
+ pub fn iter(&self) -> Iter<'_, K, V> {
+ Iter::new(&self.entries)
+ }
+
+ /// Return an iterator over the key-value pairs of the map slice.
+ pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
+ IterMut::new(&mut self.entries)
+ }
+
+ /// Return an iterator over the keys of the map slice.
+ pub fn keys(&self) -> Keys<'_, K, V> {
+ Keys::new(&self.entries)
+ }
+
+ /// Return an owning iterator over the keys of the map slice.
+ pub fn into_keys(self: Box<Self>) -> IntoKeys<K, V> {
+ IntoKeys::new(self.into_entries())
+ }
+
+ /// Return an iterator over the values of the map slice.
+ pub fn values(&self) -> Values<'_, K, V> {
+ Values::new(&self.entries)
+ }
+
+ /// Return an iterator over mutable references to the the values of the map slice.
+ pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> {
+ ValuesMut::new(&mut self.entries)
+ }
+
+ /// Return an owning iterator over the values of the map slice.
+ pub fn into_values(self: Box<Self>) -> IntoValues<K, V> {
+ IntoValues::new(self.into_entries())
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a Slice<K, V> {
+ type IntoIter = Iter<'a, K, V>;
+ type Item = (&'a K, &'a V);
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a mut Slice<K, V> {
+ type IntoIter = IterMut<'a, K, V>;
+ type Item = (&'a K, &'a mut V);
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+impl<K, V> IntoIterator for Box<Slice<K, V>> {
+ type IntoIter = IntoIter<K, V>;
+ type Item = (K, V);
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter::new(self.into_entries())
+ }
+}
+
+impl<K, V> Default for &'_ Slice<K, V> {
+ fn default() -> Self {
+ Slice::from_slice(&[])
+ }
+}
+
+impl<K, V> Default for &'_ mut Slice<K, V> {
+ fn default() -> Self {
+ Slice::from_mut_slice(&mut [])
+ }
+}
+
+impl<K, V> Default for Box<Slice<K, V>> {
+ fn default() -> Self {
+ Slice::from_boxed(Box::default())
+ }
+}
+
+impl<K: Clone, V: Clone> Clone for Box<Slice<K, V>> {
+ fn clone(&self) -> Self {
+ Slice::from_boxed(self.entries.to_vec().into_boxed_slice())
+ }
+}
+
+impl<K: Copy, V: Copy> From<&Slice<K, V>> for Box<Slice<K, V>> {
+ fn from(slice: &Slice<K, V>) -> Self {
+ Slice::from_boxed(Box::from(&slice.entries))
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Slice<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self).finish()
+ }
+}
+
+impl<K: PartialEq, V: PartialEq> PartialEq for Slice<K, V> {
+ fn eq(&self, other: &Self) -> bool {
+ self.len() == other.len() && self.iter().eq(other)
+ }
+}
+
+impl<K: Eq, V: Eq> Eq for Slice<K, V> {}
+
+impl<K: PartialOrd, V: PartialOrd> PartialOrd for Slice<K, V> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.iter().partial_cmp(other)
+ }
+}
+
+impl<K: Ord, V: Ord> Ord for Slice<K, V> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.iter().cmp(other)
+ }
+}
+
+impl<K: Hash, V: Hash> Hash for Slice<K, V> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.len().hash(state);
+ for (key, value) in self {
+ key.hash(state);
+ value.hash(state);
+ }
+ }
+}
+
+impl<K, V> Index<usize> for Slice<K, V> {
+ type Output = V;
+
+ fn index(&self, index: usize) -> &V {
+ &self.entries[index].value
+ }
+}
+
+impl<K, V> IndexMut<usize> for Slice<K, V> {
+ fn index_mut(&mut self, index: usize) -> &mut V {
+ &mut self.entries[index].value
+ }
+}
+
+// We can't have `impl<I: RangeBounds<usize>> Index<I>` because that conflicts
+// both upstream with `Index<usize>` and downstream with `Index<&Q>`.
+// Instead, we repeat the implementations for all the core range types.
+macro_rules! impl_index {
+ ($($range:ty),*) => {$(
+ impl<K, V, S> Index<$range> for IndexMap<K, V, S> {
+ type Output = Slice<K, V>;
+
+ fn index(&self, range: $range) -> &Self::Output {
+ Slice::from_slice(&self.as_entries()[range])
+ }
+ }
+
+ impl<K, V, S> IndexMut<$range> for IndexMap<K, V, S> {
+ fn index_mut(&mut self, range: $range) -> &mut Self::Output {
+ Slice::from_mut_slice(&mut self.as_entries_mut()[range])
+ }
+ }
+
+ impl<K, V> Index<$range> for Slice<K, V> {
+ type Output = Slice<K, V>;
+
+ fn index(&self, range: $range) -> &Self {
+ Self::from_slice(&self.entries[range])
+ }
+ }
+
+ impl<K, V> IndexMut<$range> for Slice<K, V> {
+ fn index_mut(&mut self, range: $range) -> &mut Self {
+ Self::from_mut_slice(&mut self.entries[range])
+ }
+ }
+ )*}
+}
+impl_index!(
+ ops::Range<usize>,
+ ops::RangeFrom<usize>,
+ ops::RangeFull,
+ ops::RangeInclusive<usize>,
+ ops::RangeTo<usize>,
+ ops::RangeToInclusive<usize>,
+ (Bound<usize>, Bound<usize>)
+);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use alloc::vec::Vec;
+
+ #[test]
+ fn slice_index() {
+ fn check(
+ vec_slice: &[(i32, i32)],
+ map_slice: &Slice<i32, i32>,
+ sub_slice: &Slice<i32, i32>,
+ ) {
+ assert_eq!(map_slice as *const _, sub_slice as *const _);
+ itertools::assert_equal(
+ vec_slice.iter().copied(),
+ map_slice.iter().map(|(&k, &v)| (k, v)),
+ );
+ itertools::assert_equal(vec_slice.iter().map(|(k, _)| k), map_slice.keys());
+ itertools::assert_equal(vec_slice.iter().map(|(_, v)| v), map_slice.values());
+ }
+
+ let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect();
+ let map: IndexMap<i32, i32> = vec.iter().cloned().collect();
+ let slice = map.as_slice();
+
+ // RangeFull
+ check(&vec[..], &map[..], &slice[..]);
+
+ for i in 0usize..10 {
+ // Index
+ assert_eq!(vec[i].1, map[i]);
+ assert_eq!(vec[i].1, slice[i]);
+ assert_eq!(map[&(i as i32)], map[i]);
+ assert_eq!(map[&(i as i32)], slice[i]);
+
+ // RangeFrom
+ check(&vec[i..], &map[i..], &slice[i..]);
+
+ // RangeTo
+ check(&vec[..i], &map[..i], &slice[..i]);
+
+ // RangeToInclusive
+ check(&vec[..=i], &map[..=i], &slice[..=i]);
+
+ // (Bound<usize>, Bound<usize>)
+ let bounds = (Bound::Excluded(i), Bound::Unbounded);
+ check(&vec[i + 1..], &map[bounds], &slice[bounds]);
+
+ for j in i..=10 {
+ // Range
+ check(&vec[i..j], &map[i..j], &slice[i..j]);
+ }
+
+ for j in i..10 {
+ // RangeInclusive
+ check(&vec[i..=j], &map[i..=j], &slice[i..=j]);
+ }
+ }
+ }
+
+ #[test]
+ fn slice_index_mut() {
+ fn check_mut(
+ vec_slice: &[(i32, i32)],
+ map_slice: &mut Slice<i32, i32>,
+ sub_slice: &mut Slice<i32, i32>,
+ ) {
+ assert_eq!(map_slice, sub_slice);
+ itertools::assert_equal(
+ vec_slice.iter().copied(),
+ map_slice.iter_mut().map(|(&k, &mut v)| (k, v)),
+ );
+ itertools::assert_equal(
+ vec_slice.iter().map(|&(_, v)| v),
+ map_slice.values_mut().map(|&mut v| v),
+ );
+ }
+
+ let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect();
+ let mut map: IndexMap<i32, i32> = vec.iter().cloned().collect();
+ let mut map2 = map.clone();
+ let slice = map2.as_mut_slice();
+
+ // RangeFull
+ check_mut(&vec[..], &mut map[..], &mut slice[..]);
+
+ for i in 0usize..10 {
+ // IndexMut
+ assert_eq!(&mut map[i], &mut slice[i]);
+
+ // RangeFrom
+ check_mut(&vec[i..], &mut map[i..], &mut slice[i..]);
+
+ // RangeTo
+ check_mut(&vec[..i], &mut map[..i], &mut slice[..i]);
+
+ // RangeToInclusive
+ check_mut(&vec[..=i], &mut map[..=i], &mut slice[..=i]);
+
+ // (Bound<usize>, Bound<usize>)
+ let bounds = (Bound::Excluded(i), Bound::Unbounded);
+ check_mut(&vec[i + 1..], &mut map[bounds], &mut slice[bounds]);
+
+ for j in i..=10 {
+ // Range
+ check_mut(&vec[i..j], &mut map[i..j], &mut slice[i..j]);
+ }
+
+ for j in i..10 {
+ // RangeInclusive
+ check_mut(&vec[i..=j], &mut map[i..=j], &mut slice[i..=j]);
+ }
+ }
+ }
+}
diff --git a/vendor/indexmap/src/map/tests.rs b/vendor/indexmap/src/map/tests.rs
new file mode 100644
index 0000000..f273d71
--- /dev/null
+++ b/vendor/indexmap/src/map/tests.rs
@@ -0,0 +1,449 @@
+use super::*;
+use std::string::String;
+
+#[test]
+fn it_works() {
+ let mut map = IndexMap::new();
+ assert_eq!(map.is_empty(), true);
+ map.insert(1, ());
+ map.insert(1, ());
+ assert_eq!(map.len(), 1);
+ assert!(map.get(&1).is_some());
+ assert_eq!(map.is_empty(), false);
+}
+
+#[test]
+fn new() {
+ let map = IndexMap::<String, String>::new();
+ println!("{:?}", map);
+ assert_eq!(map.capacity(), 0);
+ assert_eq!(map.len(), 0);
+ assert_eq!(map.is_empty(), true);
+}
+
+#[test]
+fn insert() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5];
+ let not_present = [1, 3, 6, 9, 10];
+ let mut map = IndexMap::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(map.len(), i);
+ map.insert(elt, elt);
+ assert_eq!(map.len(), i + 1);
+ assert_eq!(map.get(&elt), Some(&elt));
+ assert_eq!(map[&elt], elt);
+ }
+ println!("{:?}", map);
+
+ for &elt in &not_present {
+ assert!(map.get(&elt).is_none());
+ }
+}
+
+#[test]
+fn insert_full() {
+ let insert = vec![9, 2, 7, 1, 4, 6, 13];
+ let present = vec![1, 6, 2];
+ let mut map = IndexMap::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(map.len(), i);
+ let (index, existing) = map.insert_full(elt, elt);
+ assert_eq!(existing, None);
+ assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0));
+ assert_eq!(map.len(), i + 1);
+ }
+
+ let len = map.len();
+ for &elt in &present {
+ let (index, existing) = map.insert_full(elt, elt);
+ assert_eq!(existing, Some(elt));
+ assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0));
+ assert_eq!(map.len(), len);
+ }
+}
+
+#[test]
+fn insert_2() {
+ let mut map = IndexMap::with_capacity(16);
+
+ let mut keys = vec![];
+ keys.extend(0..16);
+ keys.extend(if cfg!(miri) { 32..64 } else { 128..267 });
+
+ for &i in &keys {
+ let old_map = map.clone();
+ map.insert(i, ());
+ for key in old_map.keys() {
+ if map.get(key).is_none() {
+ println!("old_map: {:?}", old_map);
+ println!("map: {:?}", map);
+ panic!("did not find {} in map", key);
+ }
+ }
+ }
+
+ for &i in &keys {
+ assert!(map.get(&i).is_some(), "did not find {}", i);
+ }
+}
+
+#[test]
+fn insert_order() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut map = IndexMap::new();
+
+ for &elt in &insert {
+ map.insert(elt, ());
+ }
+
+ assert_eq!(map.keys().count(), map.len());
+ assert_eq!(map.keys().count(), insert.len());
+ for (a, b) in insert.iter().zip(map.keys()) {
+ assert_eq!(a, b);
+ }
+ for (i, k) in (0..insert.len()).zip(map.keys()) {
+ assert_eq!(map.get_index(i).unwrap().0, k);
+ }
+}
+
+#[test]
+fn grow() {
+ let insert = [0, 4, 2, 12, 8, 7, 11];
+ let not_present = [1, 3, 6, 9, 10];
+ let mut map = IndexMap::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(map.len(), i);
+ map.insert(elt, elt);
+ assert_eq!(map.len(), i + 1);
+ assert_eq!(map.get(&elt), Some(&elt));
+ assert_eq!(map[&elt], elt);
+ }
+
+ println!("{:?}", map);
+ for &elt in &insert {
+ map.insert(elt * 10, elt);
+ }
+ for &elt in &insert {
+ map.insert(elt * 100, elt);
+ }
+ for (i, &elt) in insert.iter().cycle().enumerate().take(100) {
+ map.insert(elt * 100 + i as i32, elt);
+ }
+ println!("{:?}", map);
+ for &elt in &not_present {
+ assert!(map.get(&elt).is_none());
+ }
+}
+
+#[test]
+fn reserve() {
+ let mut map = IndexMap::<usize, usize>::new();
+ assert_eq!(map.capacity(), 0);
+ map.reserve(100);
+ let capacity = map.capacity();
+ assert!(capacity >= 100);
+ for i in 0..capacity {
+ assert_eq!(map.len(), i);
+ map.insert(i, i * i);
+ assert_eq!(map.len(), i + 1);
+ assert_eq!(map.capacity(), capacity);
+ assert_eq!(map.get(&i), Some(&(i * i)));
+ }
+ map.insert(capacity, std::usize::MAX);
+ assert_eq!(map.len(), capacity + 1);
+ assert!(map.capacity() > capacity);
+ assert_eq!(map.get(&capacity), Some(&std::usize::MAX));
+}
+
+#[test]
+fn try_reserve() {
+ let mut map = IndexMap::<usize, usize>::new();
+ assert_eq!(map.capacity(), 0);
+ assert_eq!(map.try_reserve(100), Ok(()));
+ assert!(map.capacity() >= 100);
+ assert!(map.try_reserve(usize::MAX).is_err());
+}
+
+#[test]
+fn shrink_to_fit() {
+ let mut map = IndexMap::<usize, usize>::new();
+ assert_eq!(map.capacity(), 0);
+ for i in 0..100 {
+ assert_eq!(map.len(), i);
+ map.insert(i, i * i);
+ assert_eq!(map.len(), i + 1);
+ assert!(map.capacity() >= i + 1);
+ assert_eq!(map.get(&i), Some(&(i * i)));
+ map.shrink_to_fit();
+ assert_eq!(map.len(), i + 1);
+ assert_eq!(map.capacity(), i + 1);
+ assert_eq!(map.get(&i), Some(&(i * i)));
+ }
+}
+
+#[test]
+fn remove() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut map = IndexMap::new();
+
+ for &elt in &insert {
+ map.insert(elt, elt);
+ }
+
+ assert_eq!(map.keys().count(), map.len());
+ assert_eq!(map.keys().count(), insert.len());
+ for (a, b) in insert.iter().zip(map.keys()) {
+ assert_eq!(a, b);
+ }
+
+ let remove_fail = [99, 77];
+ let remove = [4, 12, 8, 7];
+
+ for &key in &remove_fail {
+ assert!(map.swap_remove_full(&key).is_none());
+ }
+ println!("{:?}", map);
+ for &key in &remove {
+ //println!("{:?}", map);
+ let index = map.get_full(&key).unwrap().0;
+ assert_eq!(map.swap_remove_full(&key), Some((index, key, key)));
+ }
+ println!("{:?}", map);
+
+ for key in &insert {
+ assert_eq!(map.get(key).is_some(), !remove.contains(key));
+ }
+ assert_eq!(map.len(), insert.len() - remove.len());
+ assert_eq!(map.keys().count(), insert.len() - remove.len());
+}
+
+#[test]
+fn remove_to_empty() {
+ let mut map = indexmap! { 0 => 0, 4 => 4, 5 => 5 };
+ map.swap_remove(&5).unwrap();
+ map.swap_remove(&4).unwrap();
+ map.swap_remove(&0).unwrap();
+ assert!(map.is_empty());
+}
+
+#[test]
+fn swap_remove_index() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut map = IndexMap::new();
+
+ for &elt in &insert {
+ map.insert(elt, elt * 2);
+ }
+
+ let mut vector = insert.to_vec();
+ let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1];
+
+ // check that the same swap remove sequence on vec and map
+ // have the same result.
+ for &rm in remove_sequence {
+ let out_vec = vector.swap_remove(rm);
+ let (out_map, _) = map.swap_remove_index(rm).unwrap();
+ assert_eq!(out_vec, out_map);
+ }
+ assert_eq!(vector.len(), map.len());
+ for (a, b) in vector.iter().zip(map.keys()) {
+ assert_eq!(a, b);
+ }
+}
+
+#[test]
+fn partial_eq_and_eq() {
+ let mut map_a = IndexMap::new();
+ map_a.insert(1, "1");
+ map_a.insert(2, "2");
+ let mut map_b = map_a.clone();
+ assert_eq!(map_a, map_b);
+ map_b.swap_remove(&1);
+ assert_ne!(map_a, map_b);
+
+ let map_c: IndexMap<_, String> = map_b.into_iter().map(|(k, v)| (k, v.into())).collect();
+ assert_ne!(map_a, map_c);
+ assert_ne!(map_c, map_a);
+}
+
+#[test]
+fn extend() {
+ let mut map = IndexMap::new();
+ map.extend(vec![(&1, &2), (&3, &4)]);
+ map.extend(vec![(5, 6)]);
+ assert_eq!(
+ map.into_iter().collect::<Vec<_>>(),
+ vec![(1, 2), (3, 4), (5, 6)]
+ );
+}
+
+#[test]
+fn entry() {
+ let mut map = IndexMap::new();
+
+ map.insert(1, "1");
+ map.insert(2, "2");
+ {
+ let e = map.entry(3);
+ assert_eq!(e.index(), 2);
+ let e = e.or_insert("3");
+ assert_eq!(e, &"3");
+ }
+
+ let e = map.entry(2);
+ assert_eq!(e.index(), 1);
+ assert_eq!(e.key(), &2);
+ match e {
+ Entry::Occupied(ref e) => assert_eq!(e.get(), &"2"),
+ Entry::Vacant(_) => panic!(),
+ }
+ assert_eq!(e.or_insert("4"), &"2");
+}
+
+#[test]
+fn entry_and_modify() {
+ let mut map = IndexMap::new();
+
+ map.insert(1, "1");
+ map.entry(1).and_modify(|x| *x = "2");
+ assert_eq!(Some(&"2"), map.get(&1));
+
+ map.entry(2).and_modify(|x| *x = "doesn't exist");
+ assert_eq!(None, map.get(&2));
+}
+
+#[test]
+fn entry_or_default() {
+ let mut map = IndexMap::new();
+
+ #[derive(Debug, PartialEq)]
+ enum TestEnum {
+ DefaultValue,
+ NonDefaultValue,
+ }
+
+ impl Default for TestEnum {
+ fn default() -> Self {
+ TestEnum::DefaultValue
+ }
+ }
+
+ map.insert(1, TestEnum::NonDefaultValue);
+ assert_eq!(&mut TestEnum::NonDefaultValue, map.entry(1).or_default());
+
+ assert_eq!(&mut TestEnum::DefaultValue, map.entry(2).or_default());
+}
+
+#[test]
+fn occupied_entry_key() {
+ // These keys match hash and equality, but their addresses are distinct.
+ let (k1, k2) = (&mut 1, &mut 1);
+ let k1_ptr = k1 as *const i32;
+ let k2_ptr = k2 as *const i32;
+ assert_ne!(k1_ptr, k2_ptr);
+
+ let mut map = IndexMap::new();
+ map.insert(k1, "value");
+ match map.entry(k2) {
+ Entry::Occupied(ref e) => {
+ // `OccupiedEntry::key` should reference the key in the map,
+ // not the key that was used to find the entry.
+ let ptr = *e.key() as *const i32;
+ assert_eq!(ptr, k1_ptr);
+ assert_ne!(ptr, k2_ptr);
+ }
+ Entry::Vacant(_) => panic!(),
+ }
+}
+
+#[test]
+fn keys() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_iter().collect();
+ let keys: Vec<_> = map.keys().copied().collect();
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+}
+
+#[test]
+fn into_keys() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_iter().collect();
+ let keys: Vec<i32> = map.into_keys().collect();
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+}
+
+#[test]
+fn values() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_iter().collect();
+ let values: Vec<_> = map.values().copied().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+}
+
+#[test]
+fn values_mut() {
+ let vec = vec![(1, 1), (2, 2), (3, 3)];
+ let mut map: IndexMap<_, _> = vec.into_iter().collect();
+ for value in map.values_mut() {
+ *value *= 2
+ }
+ let values: Vec<_> = map.values().copied().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&2));
+ assert!(values.contains(&4));
+ assert!(values.contains(&6));
+}
+
+#[test]
+fn into_values() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_iter().collect();
+ let values: Vec<char> = map.into_values().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+}
+
+#[test]
+#[cfg(feature = "std")]
+fn from_array() {
+ let map = IndexMap::from([(1, 2), (3, 4)]);
+ let mut expected = IndexMap::new();
+ expected.insert(1, 2);
+ expected.insert(3, 4);
+
+ assert_eq!(map, expected)
+}
+
+#[test]
+fn iter_default() {
+ struct K;
+ struct V;
+ fn assert_default<T>()
+ where
+ T: Default + Iterator,
+ {
+ assert!(T::default().next().is_none());
+ }
+ assert_default::<Iter<'static, K, V>>();
+ assert_default::<IterMut<'static, K, V>>();
+ assert_default::<IntoIter<K, V>>();
+ assert_default::<Keys<'static, K, V>>();
+ assert_default::<IntoKeys<K, V>>();
+ assert_default::<Values<'static, K, V>>();
+ assert_default::<ValuesMut<'static, K, V>>();
+ assert_default::<IntoValues<K, V>>();
+}
diff --git a/vendor/indexmap/src/mutable_keys.rs b/vendor/indexmap/src/mutable_keys.rs
new file mode 100644
index 0000000..7efc779
--- /dev/null
+++ b/vendor/indexmap/src/mutable_keys.rs
@@ -0,0 +1,91 @@
+use core::hash::{BuildHasher, Hash};
+
+use super::{Bucket, Entries, Equivalent, IndexMap};
+
+/// Opt-in mutable access to keys.
+///
+/// These methods expose `&mut K`, mutable references to the key as it is stored
+/// in the map.
+/// You are allowed to modify the keys in the hashmap **if the modification
+/// does not change the key’s hash and equality**.
+///
+/// If keys are modified erroneously, you can no longer look them up.
+/// This is sound (memory safe) but a logical error hazard (just like
+/// implementing PartialEq, Eq, or Hash incorrectly would be).
+///
+/// `use` this trait to enable its methods for `IndexMap`.
+///
+/// This trait is sealed and cannot be implemented for types outside this crate.
+pub trait MutableKeys: private::Sealed {
+ type Key;
+ type Value;
+
+ /// Return item index, mutable reference to key and value
+ ///
+ /// Computes in **O(1)** time (average).
+ fn get_full_mut2<Q: ?Sized>(
+ &mut self,
+ key: &Q,
+ ) -> Option<(usize, &mut Self::Key, &mut Self::Value)>
+ where
+ Q: Hash + Equivalent<Self::Key>;
+
+ /// Return mutable reference to key and value at an index.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Computes in **O(1)** time.
+ fn get_index_mut2(&mut self, index: usize) -> Option<(&mut Self::Key, &mut Self::Value)>;
+
+ /// Scan through each key-value pair in the map and keep those where the
+ /// closure `keep` returns `true`.
+ ///
+ /// The elements are visited in order, and remaining elements keep their
+ /// order.
+ ///
+ /// Computes in **O(n)** time (average).
+ fn retain2<F>(&mut self, keep: F)
+ where
+ F: FnMut(&mut Self::Key, &mut Self::Value) -> bool;
+}
+
+/// Opt-in mutable access to keys.
+///
+/// See [`MutableKeys`](trait.MutableKeys.html) for more information.
+impl<K, V, S> MutableKeys for IndexMap<K, V, S>
+where
+ K: Eq + Hash,
+ S: BuildHasher,
+{
+ type Key = K;
+ type Value = V;
+
+ fn get_full_mut2<Q: ?Sized>(&mut self, key: &Q) -> Option<(usize, &mut K, &mut V)>
+ where
+ Q: Hash + Equivalent<K>,
+ {
+ if let Some(i) = self.get_index_of(key) {
+ let entry = &mut self.as_entries_mut()[i];
+ Some((i, &mut entry.key, &mut entry.value))
+ } else {
+ None
+ }
+ }
+
+ fn get_index_mut2(&mut self, index: usize) -> Option<(&mut K, &mut V)> {
+ self.as_entries_mut().get_mut(index).map(Bucket::muts)
+ }
+
+ fn retain2<F>(&mut self, keep: F)
+ where
+ F: FnMut(&mut K, &mut V) -> bool,
+ {
+ self.retain_mut(keep)
+ }
+}
+
+mod private {
+ pub trait Sealed {}
+
+ impl<K, V, S> Sealed for super::IndexMap<K, V, S> {}
+}
diff --git a/vendor/indexmap/src/rayon/map.rs b/vendor/indexmap/src/rayon/map.rs
new file mode 100644
index 0000000..d5325f2
--- /dev/null
+++ b/vendor/indexmap/src/rayon/map.rs
@@ -0,0 +1,679 @@
+//! Parallel iterator types for `IndexMap` with [rayon](https://docs.rs/rayon/1.0/rayon).
+//!
+//! You will rarely need to interact with this module directly unless you need to name one of the
+//! iterator types.
+
+use super::collect;
+use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
+use rayon::prelude::*;
+
+use crate::vec::Vec;
+use alloc::boxed::Box;
+use core::cmp::Ordering;
+use core::fmt;
+use core::hash::{BuildHasher, Hash};
+use core::ops::RangeBounds;
+
+use crate::map::Slice;
+use crate::Bucket;
+use crate::Entries;
+use crate::IndexMap;
+
+impl<K, V, S> IntoParallelIterator for IndexMap<K, V, S>
+where
+ K: Send,
+ V: Send,
+{
+ type Item = (K, V);
+ type Iter = IntoParIter<K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ IntoParIter {
+ entries: self.into_entries(),
+ }
+ }
+}
+
+impl<K, V> IntoParallelIterator for Box<Slice<K, V>>
+where
+ K: Send,
+ V: Send,
+{
+ type Item = (K, V);
+ type Iter = IntoParIter<K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ IntoParIter {
+ entries: self.into_entries(),
+ }
+ }
+}
+
+/// A parallel owning iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`into_par_iter`] method on [`IndexMap`]
+/// (provided by rayon's `IntoParallelIterator` trait). See its documentation for more.
+///
+/// [`into_par_iter`]: ../struct.IndexMap.html#method.into_par_iter
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct IntoParIter<K, V> {
+ entries: Vec<Bucket<K, V>>,
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IntoParIter<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<K: Send, V: Send> ParallelIterator for IntoParIter<K, V> {
+ type Item = (K, V);
+
+ parallel_iterator_methods!(Bucket::key_value);
+}
+
+impl<K: Send, V: Send> IndexedParallelIterator for IntoParIter<K, V> {
+ indexed_parallel_iterator_methods!(Bucket::key_value);
+}
+
+impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap<K, V, S>
+where
+ K: Sync,
+ V: Sync,
+{
+ type Item = (&'a K, &'a V);
+ type Iter = ParIter<'a, K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIter {
+ entries: self.as_entries(),
+ }
+ }
+}
+
+impl<'a, K, V> IntoParallelIterator for &'a Slice<K, V>
+where
+ K: Sync,
+ V: Sync,
+{
+ type Item = (&'a K, &'a V);
+ type Iter = ParIter<'a, K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIter {
+ entries: &self.entries,
+ }
+ }
+}
+
+/// A parallel iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`par_iter`] method on [`IndexMap`]
+/// (provided by rayon's `IntoParallelRefIterator` trait). See its documentation for more.
+///
+/// [`par_iter`]: ../struct.IndexMap.html#method.par_iter
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct ParIter<'a, K, V> {
+ entries: &'a [Bucket<K, V>],
+}
+
+impl<K, V> Clone for ParIter<'_, K, V> {
+ fn clone(&self) -> Self {
+ ParIter { ..*self }
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for ParIter<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ parallel_iterator_methods!(Bucket::refs);
+}
+
+impl<K: Sync, V: Sync> IndexedParallelIterator for ParIter<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::refs);
+}
+
+impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap<K, V, S>
+where
+ K: Sync + Send,
+ V: Send,
+{
+ type Item = (&'a K, &'a mut V);
+ type Iter = ParIterMut<'a, K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIterMut {
+ entries: self.as_entries_mut(),
+ }
+ }
+}
+
+impl<'a, K, V> IntoParallelIterator for &'a mut Slice<K, V>
+where
+ K: Sync + Send,
+ V: Send,
+{
+ type Item = (&'a K, &'a mut V);
+ type Iter = ParIterMut<'a, K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIterMut {
+ entries: &mut self.entries,
+ }
+ }
+}
+
+/// A parallel mutable iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`par_iter_mut`] method on [`IndexMap`]
+/// (provided by rayon's `IntoParallelRefMutIterator` trait). See its documentation for more.
+///
+/// [`par_iter_mut`]: ../struct.IndexMap.html#method.par_iter_mut
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct ParIterMut<'a, K, V> {
+ entries: &'a mut [Bucket<K, V>],
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for ParIterMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ parallel_iterator_methods!(Bucket::ref_mut);
+}
+
+impl<K: Sync + Send, V: Send> IndexedParallelIterator for ParIterMut<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::ref_mut);
+}
+
+impl<'a, K, V, S> ParallelDrainRange<usize> for &'a mut IndexMap<K, V, S>
+where
+ K: Send,
+ V: Send,
+{
+ type Item = (K, V);
+ type Iter = ParDrain<'a, K, V>;
+
+ fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
+ ParDrain {
+ entries: self.core.par_drain(range),
+ }
+ }
+}
+
+/// A parallel draining iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`par_drain`] method on [`IndexMap`]
+/// (provided by rayon's `ParallelDrainRange` trait). See its documentation for more.
+///
+/// [`par_drain`]: ../struct.IndexMap.html#method.par_drain
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct ParDrain<'a, K: Send, V: Send> {
+ entries: rayon::vec::Drain<'a, Bucket<K, V>>,
+}
+
+impl<K: Send, V: Send> ParallelIterator for ParDrain<'_, K, V> {
+ type Item = (K, V);
+
+ parallel_iterator_methods!(Bucket::key_value);
+}
+
+impl<K: Send, V: Send> IndexedParallelIterator for ParDrain<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::key_value);
+}
+
+/// Parallel iterator methods and other parallel methods.
+///
+/// The following methods **require crate feature `"rayon"`**.
+///
+/// See also the `IntoParallelIterator` implementations.
+impl<K, V, S> IndexMap<K, V, S>
+where
+ K: Sync,
+ V: Sync,
+{
+ /// Return a parallel iterator over the keys of the map.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the map is still preserved for operations like `reduce` and `collect`.
+ pub fn par_keys(&self) -> ParKeys<'_, K, V> {
+ ParKeys {
+ entries: self.as_entries(),
+ }
+ }
+
+ /// Return a parallel iterator over the values of the map.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the map is still preserved for operations like `reduce` and `collect`.
+ pub fn par_values(&self) -> ParValues<'_, K, V> {
+ ParValues {
+ entries: self.as_entries(),
+ }
+ }
+}
+
+/// Parallel iterator methods and other parallel methods.
+///
+/// The following methods **require crate feature `"rayon"`**.
+///
+/// See also the `IntoParallelIterator` implementations.
+impl<K, V> Slice<K, V>
+where
+ K: Sync,
+ V: Sync,
+{
+ /// Return a parallel iterator over the keys of the map slice.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the slice is still preserved for operations like `reduce` and `collect`.
+ pub fn par_keys(&self) -> ParKeys<'_, K, V> {
+ ParKeys {
+ entries: &self.entries,
+ }
+ }
+
+ /// Return a parallel iterator over the values of the map slice.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the slice is still preserved for operations like `reduce` and `collect`.
+ pub fn par_values(&self) -> ParValues<'_, K, V> {
+ ParValues {
+ entries: &self.entries,
+ }
+ }
+}
+
+impl<K, V, S> IndexMap<K, V, S>
+where
+ K: Hash + Eq + Sync,
+ V: Sync,
+ S: BuildHasher,
+{
+ /// Returns `true` if `self` contains all of the same key-value pairs as `other`,
+ /// regardless of each map's indexed order, determined in parallel.
+ pub fn par_eq<V2, S2>(&self, other: &IndexMap<K, V2, S2>) -> bool
+ where
+ V: PartialEq<V2>,
+ V2: Sync,
+ S2: BuildHasher + Sync,
+ {
+ self.len() == other.len()
+ && self
+ .par_iter()
+ .all(move |(key, value)| other.get(key).map_or(false, |v| *value == *v))
+ }
+}
+
+/// A parallel iterator over the keys of a `IndexMap`.
+///
+/// This `struct` is created by the [`par_keys`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`par_keys`]: ../struct.IndexMap.html#method.par_keys
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct ParKeys<'a, K, V> {
+ entries: &'a [Bucket<K, V>],
+}
+
+impl<K, V> Clone for ParKeys<'_, K, V> {
+ fn clone(&self) -> Self {
+ ParKeys { ..*self }
+ }
+}
+
+impl<K: fmt::Debug, V> fmt::Debug for ParKeys<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, K: Sync, V: Sync> ParallelIterator for ParKeys<'a, K, V> {
+ type Item = &'a K;
+
+ parallel_iterator_methods!(Bucket::key_ref);
+}
+
+impl<K: Sync, V: Sync> IndexedParallelIterator for ParKeys<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::key_ref);
+}
+
+/// A parallel iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`par_values`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`par_values`]: ../struct.IndexMap.html#method.par_values
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct ParValues<'a, K, V> {
+ entries: &'a [Bucket<K, V>],
+}
+
+impl<K, V> Clone for ParValues<'_, K, V> {
+ fn clone(&self) -> Self {
+ ParValues { ..*self }
+ }
+}
+
+impl<K, V: fmt::Debug> fmt::Debug for ParValues<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::value_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, K: Sync, V: Sync> ParallelIterator for ParValues<'a, K, V> {
+ type Item = &'a V;
+
+ parallel_iterator_methods!(Bucket::value_ref);
+}
+
+impl<K: Sync, V: Sync> IndexedParallelIterator for ParValues<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::value_ref);
+}
+
+impl<K, V, S> IndexMap<K, V, S>
+where
+ K: Send,
+ V: Send,
+{
+ /// Return a parallel iterator over mutable references to the values of the map
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the map is still preserved for operations like `reduce` and `collect`.
+ pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> {
+ ParValuesMut {
+ entries: self.as_entries_mut(),
+ }
+ }
+}
+
+impl<K, V> Slice<K, V>
+where
+ K: Send,
+ V: Send,
+{
+ /// Return a parallel iterator over mutable references to the the values of the map slice.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the slice is still preserved for operations like `reduce` and `collect`.
+ pub fn par_values_mut(&mut self) -> ParValuesMut<'_, K, V> {
+ ParValuesMut {
+ entries: &mut self.entries,
+ }
+ }
+}
+
+impl<K, V, S> IndexMap<K, V, S>
+where
+ K: Hash + Eq + Send,
+ V: Send,
+ S: BuildHasher,
+{
+ /// Sort the map’s key-value pairs in parallel, by the default ordering of the keys.
+ pub fn par_sort_keys(&mut self)
+ where
+ K: Ord,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_by(|a, b| K::cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the map’s key-value pairs in place and in parallel, using the comparison
+ /// function `cmp`.
+ ///
+ /// The comparison function receives two key and value pairs to compare (you
+ /// can sort by keys or values or their combination as needed).
+ pub fn par_sort_by<F>(&mut self, cmp: F)
+ where
+ F: Fn(&K, &V, &K, &V) -> Ordering + Sync,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ });
+ }
+
+ /// Sort the key-value pairs of the map in parallel and return a by-value parallel
+ /// iterator of the key-value pairs with the result.
+ pub fn par_sorted_by<F>(self, cmp: F) -> IntoParIter<K, V>
+ where
+ F: Fn(&K, &V, &K, &V) -> Ordering + Sync,
+ {
+ let mut entries = self.into_entries();
+ entries.par_sort_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ IntoParIter { entries }
+ }
+
+ /// Sort the map's key-value pairs in parallel, by the default ordering of the keys.
+ pub fn par_sort_unstable_keys(&mut self)
+ where
+ K: Ord,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_unstable_by(|a, b| K::cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the map's key-value pairs in place and in parallel, using the comparison
+ /// function `cmp`.
+ ///
+ /// The comparison function receives two key and value pairs to compare (you
+ /// can sort by keys or values or their combination as needed).
+ pub fn par_sort_unstable_by<F>(&mut self, cmp: F)
+ where
+ F: Fn(&K, &V, &K, &V) -> Ordering + Sync,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ });
+ }
+
+ /// Sort the key-value pairs of the map in parallel and return a by-value parallel
+ /// iterator of the key-value pairs with the result.
+ pub fn par_sorted_unstable_by<F>(self, cmp: F) -> IntoParIter<K, V>
+ where
+ F: Fn(&K, &V, &K, &V) -> Ordering + Sync,
+ {
+ let mut entries = self.into_entries();
+ entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &a.value, &b.key, &b.value));
+ IntoParIter { entries }
+ }
+
+ /// Sort the map’s key-value pairs in place and in parallel, using a sort-key extraction
+ /// function.
+ pub fn par_sort_by_cached_key<T, F>(&mut self, sort_key: F)
+ where
+ T: Ord + Send,
+ F: Fn(&K, &V) -> T + Sync,
+ {
+ self.with_entries(move |entries| {
+ entries.par_sort_by_cached_key(move |a| sort_key(&a.key, &a.value));
+ });
+ }
+}
+
+/// A parallel mutable iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`par_values_mut`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`par_values_mut`]: ../struct.IndexMap.html#method.par_values_mut
+/// [`IndexMap`]: ../struct.IndexMap.html
+pub struct ParValuesMut<'a, K, V> {
+ entries: &'a mut [Bucket<K, V>],
+}
+
+impl<K, V: fmt::Debug> fmt::Debug for ParValuesMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::value_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, K: Send, V: Send> ParallelIterator for ParValuesMut<'a, K, V> {
+ type Item = &'a mut V;
+
+ parallel_iterator_methods!(Bucket::value_mut);
+}
+
+impl<K: Send, V: Send> IndexedParallelIterator for ParValuesMut<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::value_mut);
+}
+
+impl<K, V, S> FromParallelIterator<(K, V)> for IndexMap<K, V, S>
+where
+ K: Eq + Hash + Send,
+ V: Send,
+ S: BuildHasher + Default + Send,
+{
+ fn from_par_iter<I>(iter: I) -> Self
+ where
+ I: IntoParallelIterator<Item = (K, V)>,
+ {
+ let list = collect(iter);
+ let len = list.iter().map(Vec::len).sum();
+ let mut map = Self::with_capacity_and_hasher(len, S::default());
+ for vec in list {
+ map.extend(vec);
+ }
+ map
+ }
+}
+
+impl<K, V, S> ParallelExtend<(K, V)> for IndexMap<K, V, S>
+where
+ K: Eq + Hash + Send,
+ V: Send,
+ S: BuildHasher + Send,
+{
+ fn par_extend<I>(&mut self, iter: I)
+ where
+ I: IntoParallelIterator<Item = (K, V)>,
+ {
+ for vec in collect(iter) {
+ self.extend(vec);
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a, S> ParallelExtend<(&'a K, &'a V)> for IndexMap<K, V, S>
+where
+ K: Copy + Eq + Hash + Send + Sync,
+ V: Copy + Send + Sync,
+ S: BuildHasher + Send,
+{
+ fn par_extend<I>(&mut self, iter: I)
+ where
+ I: IntoParallelIterator<Item = (&'a K, &'a V)>,
+ {
+ for vec in collect(iter) {
+ self.extend(vec);
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use std::string::String;
+
+ #[test]
+ fn insert_order() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut map = IndexMap::new();
+
+ for &elt in &insert {
+ map.insert(elt, ());
+ }
+
+ assert_eq!(map.par_keys().count(), map.len());
+ assert_eq!(map.par_keys().count(), insert.len());
+ insert.par_iter().zip(map.par_keys()).for_each(|(a, b)| {
+ assert_eq!(a, b);
+ });
+ (0..insert.len())
+ .into_par_iter()
+ .zip(map.par_keys())
+ .for_each(|(i, k)| {
+ assert_eq!(map.get_index(i).unwrap().0, k);
+ });
+ }
+
+ #[test]
+ fn partial_eq_and_eq() {
+ let mut map_a = IndexMap::new();
+ map_a.insert(1, "1");
+ map_a.insert(2, "2");
+ let mut map_b = map_a.clone();
+ assert!(map_a.par_eq(&map_b));
+ map_b.swap_remove(&1);
+ assert!(!map_a.par_eq(&map_b));
+ map_b.insert(3, "3");
+ assert!(!map_a.par_eq(&map_b));
+
+ let map_c: IndexMap<_, String> =
+ map_b.into_par_iter().map(|(k, v)| (k, v.into())).collect();
+ assert!(!map_a.par_eq(&map_c));
+ assert!(!map_c.par_eq(&map_a));
+ }
+
+ #[test]
+ fn extend() {
+ let mut map = IndexMap::new();
+ map.par_extend(vec![(&1, &2), (&3, &4)]);
+ map.par_extend(vec![(5, 6)]);
+ assert_eq!(
+ map.into_par_iter().collect::<Vec<_>>(),
+ vec![(1, 2), (3, 4), (5, 6)]
+ );
+ }
+
+ #[test]
+ fn keys() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_par_iter().collect();
+ let keys: Vec<_> = map.par_keys().copied().collect();
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+ }
+
+ #[test]
+ fn values() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_par_iter().collect();
+ let values: Vec<_> = map.par_values().copied().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+ }
+
+ #[test]
+ fn values_mut() {
+ let vec = vec![(1, 1), (2, 2), (3, 3)];
+ let mut map: IndexMap<_, _> = vec.into_par_iter().collect();
+ map.par_values_mut().for_each(|value| *value *= 2);
+ let values: Vec<_> = map.par_values().copied().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&2));
+ assert!(values.contains(&4));
+ assert!(values.contains(&6));
+ }
+}
diff --git a/vendor/indexmap/src/rayon/mod.rs b/vendor/indexmap/src/rayon/mod.rs
new file mode 100644
index 0000000..ebb1ac2
--- /dev/null
+++ b/vendor/indexmap/src/rayon/mod.rs
@@ -0,0 +1,27 @@
+use rayon::prelude::*;
+
+use alloc::collections::LinkedList;
+
+use crate::vec::Vec;
+
+pub mod map;
+pub mod set;
+
+// This form of intermediate collection is also how Rayon collects `HashMap`.
+// Note that the order will also be preserved!
+fn collect<I: IntoParallelIterator>(iter: I) -> LinkedList<Vec<I::Item>> {
+ iter.into_par_iter()
+ .fold(Vec::new, |mut vec, elem| {
+ vec.push(elem);
+ vec
+ })
+ .map(|vec| {
+ let mut list = LinkedList::new();
+ list.push_back(vec);
+ list
+ })
+ .reduce(LinkedList::new, |mut list1, mut list2| {
+ list1.append(&mut list2);
+ list1
+ })
+}
diff --git a/vendor/indexmap/src/rayon/set.rs b/vendor/indexmap/src/rayon/set.rs
new file mode 100644
index 0000000..0fc478e
--- /dev/null
+++ b/vendor/indexmap/src/rayon/set.rs
@@ -0,0 +1,774 @@
+//! Parallel iterator types for `IndexSet` with [rayon](https://docs.rs/rayon/1.0/rayon).
+//!
+//! You will rarely need to interact with this module directly unless you need to name one of the
+//! iterator types.
+
+use super::collect;
+use rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
+use rayon::prelude::*;
+
+use crate::vec::Vec;
+use alloc::boxed::Box;
+use core::cmp::Ordering;
+use core::fmt;
+use core::hash::{BuildHasher, Hash};
+use core::ops::RangeBounds;
+
+use crate::set::Slice;
+use crate::Entries;
+use crate::IndexSet;
+
+type Bucket<T> = crate::Bucket<T, ()>;
+
+impl<T, S> IntoParallelIterator for IndexSet<T, S>
+where
+ T: Send,
+{
+ type Item = T;
+ type Iter = IntoParIter<T>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ IntoParIter {
+ entries: self.into_entries(),
+ }
+ }
+}
+
+impl<T> IntoParallelIterator for Box<Slice<T>>
+where
+ T: Send,
+{
+ type Item = T;
+ type Iter = IntoParIter<T>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ IntoParIter {
+ entries: self.into_entries(),
+ }
+ }
+}
+
+/// A parallel owning iterator over the items of a `IndexSet`.
+///
+/// This `struct` is created by the [`into_par_iter`] method on [`IndexSet`]
+/// (provided by rayon's `IntoParallelIterator` trait). See its documentation for more.
+///
+/// [`IndexSet`]: ../struct.IndexSet.html
+/// [`into_par_iter`]: ../struct.IndexSet.html#method.into_par_iter
+pub struct IntoParIter<T> {
+ entries: Vec<Bucket<T>>,
+}
+
+impl<T: fmt::Debug> fmt::Debug for IntoParIter<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<T: Send> ParallelIterator for IntoParIter<T> {
+ type Item = T;
+
+ parallel_iterator_methods!(Bucket::key);
+}
+
+impl<T: Send> IndexedParallelIterator for IntoParIter<T> {
+ indexed_parallel_iterator_methods!(Bucket::key);
+}
+
+impl<'a, T, S> IntoParallelIterator for &'a IndexSet<T, S>
+where
+ T: Sync,
+{
+ type Item = &'a T;
+ type Iter = ParIter<'a, T>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIter {
+ entries: self.as_entries(),
+ }
+ }
+}
+
+impl<'a, T> IntoParallelIterator for &'a Slice<T>
+where
+ T: Sync,
+{
+ type Item = &'a T;
+ type Iter = ParIter<'a, T>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIter {
+ entries: &self.entries,
+ }
+ }
+}
+
+/// A parallel iterator over the items of a `IndexSet`.
+///
+/// This `struct` is created by the [`par_iter`] method on [`IndexSet`]
+/// (provided by rayon's `IntoParallelRefIterator` trait). See its documentation for more.
+///
+/// [`IndexSet`]: ../struct.IndexSet.html
+/// [`par_iter`]: ../struct.IndexSet.html#method.par_iter
+pub struct ParIter<'a, T> {
+ entries: &'a [Bucket<T>],
+}
+
+impl<T> Clone for ParIter<'_, T> {
+ fn clone(&self) -> Self {
+ ParIter { ..*self }
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for ParIter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.entries.iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> {
+ type Item = &'a T;
+
+ parallel_iterator_methods!(Bucket::key_ref);
+}
+
+impl<T: Sync> IndexedParallelIterator for ParIter<'_, T> {
+ indexed_parallel_iterator_methods!(Bucket::key_ref);
+}
+
+impl<'a, T, S> ParallelDrainRange<usize> for &'a mut IndexSet<T, S>
+where
+ T: Send,
+{
+ type Item = T;
+ type Iter = ParDrain<'a, T>;
+
+ fn par_drain<R: RangeBounds<usize>>(self, range: R) -> Self::Iter {
+ ParDrain {
+ entries: self.map.core.par_drain(range),
+ }
+ }
+}
+
+/// A parallel draining iterator over the items of a `IndexSet`.
+///
+/// This `struct` is created by the [`par_drain`] method on [`IndexSet`]
+/// (provided by rayon's `ParallelDrainRange` trait). See its documentation for more.
+///
+/// [`par_drain`]: ../struct.IndexSet.html#method.par_drain
+/// [`IndexSet`]: ../struct.IndexSet.html
+pub struct ParDrain<'a, T: Send> {
+ entries: rayon::vec::Drain<'a, Bucket<T>>,
+}
+
+impl<T: Send> ParallelIterator for ParDrain<'_, T> {
+ type Item = T;
+
+ parallel_iterator_methods!(Bucket::key);
+}
+
+impl<T: Send> IndexedParallelIterator for ParDrain<'_, T> {
+ indexed_parallel_iterator_methods!(Bucket::key);
+}
+
+/// Parallel iterator methods and other parallel methods.
+///
+/// The following methods **require crate feature `"rayon"`**.
+///
+/// See also the `IntoParallelIterator` implementations.
+impl<T, S> IndexSet<T, S>
+where
+ T: Hash + Eq + Sync,
+ S: BuildHasher + Sync,
+{
+ /// Return a parallel iterator over the values that are in `self` but not `other`.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the `self` set is still preserved for operations like `reduce` and `collect`.
+ pub fn par_difference<'a, S2>(
+ &'a self,
+ other: &'a IndexSet<T, S2>,
+ ) -> ParDifference<'a, T, S, S2>
+ where
+ S2: BuildHasher + Sync,
+ {
+ ParDifference {
+ set1: self,
+ set2: other,
+ }
+ }
+
+ /// Return a parallel iterator over the values that are in `self` or `other`,
+ /// but not in both.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the sets is still preserved for operations like `reduce` and `collect`.
+ /// Values from `self` are produced in their original order, followed by
+ /// values from `other` in their original order.
+ pub fn par_symmetric_difference<'a, S2>(
+ &'a self,
+ other: &'a IndexSet<T, S2>,
+ ) -> ParSymmetricDifference<'a, T, S, S2>
+ where
+ S2: BuildHasher + Sync,
+ {
+ ParSymmetricDifference {
+ set1: self,
+ set2: other,
+ }
+ }
+
+ /// Return a parallel iterator over the values that are in both `self` and `other`.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the `self` set is still preserved for operations like `reduce` and `collect`.
+ pub fn par_intersection<'a, S2>(
+ &'a self,
+ other: &'a IndexSet<T, S2>,
+ ) -> ParIntersection<'a, T, S, S2>
+ where
+ S2: BuildHasher + Sync,
+ {
+ ParIntersection {
+ set1: self,
+ set2: other,
+ }
+ }
+
+ /// Return a parallel iterator over all values that are in `self` or `other`.
+ ///
+ /// While parallel iterators can process items in any order, their relative order
+ /// in the sets is still preserved for operations like `reduce` and `collect`.
+ /// Values from `self` are produced in their original order, followed by
+ /// values that are unique to `other` in their original order.
+ pub fn par_union<'a, S2>(&'a self, other: &'a IndexSet<T, S2>) -> ParUnion<'a, T, S, S2>
+ where
+ S2: BuildHasher + Sync,
+ {
+ ParUnion {
+ set1: self,
+ set2: other,
+ }
+ }
+
+ /// Returns `true` if `self` contains all of the same values as `other`,
+ /// regardless of each set's indexed order, determined in parallel.
+ pub fn par_eq<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher + Sync,
+ {
+ self.len() == other.len() && self.par_is_subset(other)
+ }
+
+ /// Returns `true` if `self` has no elements in common with `other`,
+ /// determined in parallel.
+ pub fn par_is_disjoint<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher + Sync,
+ {
+ if self.len() <= other.len() {
+ self.par_iter().all(move |value| !other.contains(value))
+ } else {
+ other.par_iter().all(move |value| !self.contains(value))
+ }
+ }
+
+ /// Returns `true` if all elements of `other` are contained in `self`,
+ /// determined in parallel.
+ pub fn par_is_superset<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher + Sync,
+ {
+ other.par_is_subset(self)
+ }
+
+ /// Returns `true` if all elements of `self` are contained in `other`,
+ /// determined in parallel.
+ pub fn par_is_subset<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher + Sync,
+ {
+ self.len() <= other.len() && self.par_iter().all(move |value| other.contains(value))
+ }
+}
+
+/// A parallel iterator producing elements in the difference of `IndexSet`s.
+///
+/// This `struct` is created by the [`par_difference`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: ../struct.IndexSet.html
+/// [`par_difference`]: ../struct.IndexSet.html#method.par_difference
+pub struct ParDifference<'a, T, S1, S2> {
+ set1: &'a IndexSet<T, S1>,
+ set2: &'a IndexSet<T, S2>,
+}
+
+impl<T, S1, S2> Clone for ParDifference<'_, T, S1, S2> {
+ fn clone(&self) -> Self {
+ ParDifference { ..*self }
+ }
+}
+
+impl<T, S1, S2> fmt::Debug for ParDifference<'_, T, S1, S2>
+where
+ T: fmt::Debug + Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list()
+ .entries(self.set1.difference(self.set2))
+ .finish()
+ }
+}
+
+impl<'a, T, S1, S2> ParallelIterator for ParDifference<'a, T, S1, S2>
+where
+ T: Hash + Eq + Sync,
+ S1: BuildHasher + Sync,
+ S2: BuildHasher + Sync,
+{
+ type Item = &'a T;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let Self { set1, set2 } = self;
+
+ set1.par_iter()
+ .filter(move |&item| !set2.contains(item))
+ .drive_unindexed(consumer)
+ }
+}
+
+/// A parallel iterator producing elements in the intersection of `IndexSet`s.
+///
+/// This `struct` is created by the [`par_intersection`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: ../struct.IndexSet.html
+/// [`par_intersection`]: ../struct.IndexSet.html#method.par_intersection
+pub struct ParIntersection<'a, T, S1, S2> {
+ set1: &'a IndexSet<T, S1>,
+ set2: &'a IndexSet<T, S2>,
+}
+
+impl<T, S1, S2> Clone for ParIntersection<'_, T, S1, S2> {
+ fn clone(&self) -> Self {
+ ParIntersection { ..*self }
+ }
+}
+
+impl<T, S1, S2> fmt::Debug for ParIntersection<'_, T, S1, S2>
+where
+ T: fmt::Debug + Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list()
+ .entries(self.set1.intersection(self.set2))
+ .finish()
+ }
+}
+
+impl<'a, T, S1, S2> ParallelIterator for ParIntersection<'a, T, S1, S2>
+where
+ T: Hash + Eq + Sync,
+ S1: BuildHasher + Sync,
+ S2: BuildHasher + Sync,
+{
+ type Item = &'a T;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let Self { set1, set2 } = self;
+
+ set1.par_iter()
+ .filter(move |&item| set2.contains(item))
+ .drive_unindexed(consumer)
+ }
+}
+
+/// A parallel iterator producing elements in the symmetric difference of `IndexSet`s.
+///
+/// This `struct` is created by the [`par_symmetric_difference`] method on
+/// [`IndexSet`]. See its documentation for more.
+///
+/// [`IndexSet`]: ../struct.IndexSet.html
+/// [`par_symmetric_difference`]: ../struct.IndexSet.html#method.par_symmetric_difference
+pub struct ParSymmetricDifference<'a, T, S1, S2> {
+ set1: &'a IndexSet<T, S1>,
+ set2: &'a IndexSet<T, S2>,
+}
+
+impl<T, S1, S2> Clone for ParSymmetricDifference<'_, T, S1, S2> {
+ fn clone(&self) -> Self {
+ ParSymmetricDifference { ..*self }
+ }
+}
+
+impl<T, S1, S2> fmt::Debug for ParSymmetricDifference<'_, T, S1, S2>
+where
+ T: fmt::Debug + Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list()
+ .entries(self.set1.symmetric_difference(self.set2))
+ .finish()
+ }
+}
+
+impl<'a, T, S1, S2> ParallelIterator for ParSymmetricDifference<'a, T, S1, S2>
+where
+ T: Hash + Eq + Sync,
+ S1: BuildHasher + Sync,
+ S2: BuildHasher + Sync,
+{
+ type Item = &'a T;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let Self { set1, set2 } = self;
+
+ set1.par_difference(set2)
+ .chain(set2.par_difference(set1))
+ .drive_unindexed(consumer)
+ }
+}
+
+/// A parallel iterator producing elements in the union of `IndexSet`s.
+///
+/// This `struct` is created by the [`par_union`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: ../struct.IndexSet.html
+/// [`par_union`]: ../struct.IndexSet.html#method.par_union
+pub struct ParUnion<'a, T, S1, S2> {
+ set1: &'a IndexSet<T, S1>,
+ set2: &'a IndexSet<T, S2>,
+}
+
+impl<T, S1, S2> Clone for ParUnion<'_, T, S1, S2> {
+ fn clone(&self) -> Self {
+ ParUnion { ..*self }
+ }
+}
+
+impl<T, S1, S2> fmt::Debug for ParUnion<'_, T, S1, S2>
+where
+ T: fmt::Debug + Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.set1.union(self.set2)).finish()
+ }
+}
+
+impl<'a, T, S1, S2> ParallelIterator for ParUnion<'a, T, S1, S2>
+where
+ T: Hash + Eq + Sync,
+ S1: BuildHasher + Sync,
+ S2: BuildHasher + Sync,
+{
+ type Item = &'a T;
+
+ fn drive_unindexed<C>(self, consumer: C) -> C::Result
+ where
+ C: UnindexedConsumer<Self::Item>,
+ {
+ let Self { set1, set2 } = self;
+
+ set1.par_iter()
+ .chain(set2.par_difference(set1))
+ .drive_unindexed(consumer)
+ }
+}
+
+/// Parallel sorting methods.
+///
+/// The following methods **require crate feature `"rayon"`**.
+impl<T, S> IndexSet<T, S>
+where
+ T: Hash + Eq + Send,
+ S: BuildHasher + Send,
+{
+ /// Sort the set’s values in parallel by their default ordering.
+ pub fn par_sort(&mut self)
+ where
+ T: Ord,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_by(|a, b| T::cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the set’s values in place and in parallel, using the comparison function `cmp`.
+ pub fn par_sort_by<F>(&mut self, cmp: F)
+ where
+ F: Fn(&T, &T) -> Ordering + Sync,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_by(move |a, b| cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the values of the set in parallel and return a by-value parallel iterator of
+ /// the values with the result.
+ pub fn par_sorted_by<F>(self, cmp: F) -> IntoParIter<T>
+ where
+ F: Fn(&T, &T) -> Ordering + Sync,
+ {
+ let mut entries = self.into_entries();
+ entries.par_sort_by(move |a, b| cmp(&a.key, &b.key));
+ IntoParIter { entries }
+ }
+
+ /// Sort the set's values in parallel by their default ordering.
+ pub fn par_sort_unstable(&mut self)
+ where
+ T: Ord,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_unstable_by(|a, b| T::cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the set’s values in place and in parallel, using the comparison function `cmp`.
+ pub fn par_sort_unstable_by<F>(&mut self, cmp: F)
+ where
+ F: Fn(&T, &T) -> Ordering + Sync,
+ {
+ self.with_entries(|entries| {
+ entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key));
+ });
+ }
+
+ /// Sort the values of the set in parallel and return a by-value parallel iterator of
+ /// the values with the result.
+ pub fn par_sorted_unstable_by<F>(self, cmp: F) -> IntoParIter<T>
+ where
+ F: Fn(&T, &T) -> Ordering + Sync,
+ {
+ let mut entries = self.into_entries();
+ entries.par_sort_unstable_by(move |a, b| cmp(&a.key, &b.key));
+ IntoParIter { entries }
+ }
+
+ /// Sort the set’s values in place and in parallel, using a key extraction function.
+ pub fn par_sort_by_cached_key<K, F>(&mut self, sort_key: F)
+ where
+ K: Ord + Send,
+ F: Fn(&T) -> K + Sync,
+ {
+ self.with_entries(move |entries| {
+ entries.par_sort_by_cached_key(move |a| sort_key(&a.key));
+ });
+ }
+}
+
+impl<T, S> FromParallelIterator<T> for IndexSet<T, S>
+where
+ T: Eq + Hash + Send,
+ S: BuildHasher + Default + Send,
+{
+ fn from_par_iter<I>(iter: I) -> Self
+ where
+ I: IntoParallelIterator<Item = T>,
+ {
+ let list = collect(iter);
+ let len = list.iter().map(Vec::len).sum();
+ let mut set = Self::with_capacity_and_hasher(len, S::default());
+ for vec in list {
+ set.extend(vec);
+ }
+ set
+ }
+}
+
+impl<T, S> ParallelExtend<T> for IndexSet<T, S>
+where
+ T: Eq + Hash + Send,
+ S: BuildHasher + Send,
+{
+ fn par_extend<I>(&mut self, iter: I)
+ where
+ I: IntoParallelIterator<Item = T>,
+ {
+ for vec in collect(iter) {
+ self.extend(vec);
+ }
+ }
+}
+
+impl<'a, T: 'a, S> ParallelExtend<&'a T> for IndexSet<T, S>
+where
+ T: Copy + Eq + Hash + Send + Sync,
+ S: BuildHasher + Send,
+{
+ fn par_extend<I>(&mut self, iter: I)
+ where
+ I: IntoParallelIterator<Item = &'a T>,
+ {
+ for vec in collect(iter) {
+ self.extend(vec);
+ }
+ }
+}
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+
+ #[test]
+ fn insert_order() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut set = IndexSet::new();
+
+ for &elt in &insert {
+ set.insert(elt);
+ }
+
+ assert_eq!(set.par_iter().count(), set.len());
+ assert_eq!(set.par_iter().count(), insert.len());
+ insert.par_iter().zip(&set).for_each(|(a, b)| {
+ assert_eq!(a, b);
+ });
+ (0..insert.len())
+ .into_par_iter()
+ .zip(&set)
+ .for_each(|(i, v)| {
+ assert_eq!(set.get_index(i).unwrap(), v);
+ });
+ }
+
+ #[test]
+ fn partial_eq_and_eq() {
+ let mut set_a = IndexSet::new();
+ set_a.insert(1);
+ set_a.insert(2);
+ let mut set_b = set_a.clone();
+ assert!(set_a.par_eq(&set_b));
+ set_b.swap_remove(&1);
+ assert!(!set_a.par_eq(&set_b));
+ set_b.insert(3);
+ assert!(!set_a.par_eq(&set_b));
+
+ let set_c: IndexSet<_> = set_b.into_par_iter().collect();
+ assert!(!set_a.par_eq(&set_c));
+ assert!(!set_c.par_eq(&set_a));
+ }
+
+ #[test]
+ fn extend() {
+ let mut set = IndexSet::new();
+ set.par_extend(vec![&1, &2, &3, &4]);
+ set.par_extend(vec![5, 6]);
+ assert_eq!(
+ set.into_par_iter().collect::<Vec<_>>(),
+ vec![1, 2, 3, 4, 5, 6]
+ );
+ }
+
+ #[test]
+ fn comparisons() {
+ let set_a: IndexSet<_> = (0..3).collect();
+ let set_b: IndexSet<_> = (3..6).collect();
+ let set_c: IndexSet<_> = (0..6).collect();
+ let set_d: IndexSet<_> = (3..9).collect();
+
+ assert!(!set_a.par_is_disjoint(&set_a));
+ assert!(set_a.par_is_subset(&set_a));
+ assert!(set_a.par_is_superset(&set_a));
+
+ assert!(set_a.par_is_disjoint(&set_b));
+ assert!(set_b.par_is_disjoint(&set_a));
+ assert!(!set_a.par_is_subset(&set_b));
+ assert!(!set_b.par_is_subset(&set_a));
+ assert!(!set_a.par_is_superset(&set_b));
+ assert!(!set_b.par_is_superset(&set_a));
+
+ assert!(!set_a.par_is_disjoint(&set_c));
+ assert!(!set_c.par_is_disjoint(&set_a));
+ assert!(set_a.par_is_subset(&set_c));
+ assert!(!set_c.par_is_subset(&set_a));
+ assert!(!set_a.par_is_superset(&set_c));
+ assert!(set_c.par_is_superset(&set_a));
+
+ assert!(!set_c.par_is_disjoint(&set_d));
+ assert!(!set_d.par_is_disjoint(&set_c));
+ assert!(!set_c.par_is_subset(&set_d));
+ assert!(!set_d.par_is_subset(&set_c));
+ assert!(!set_c.par_is_superset(&set_d));
+ assert!(!set_d.par_is_superset(&set_c));
+ }
+
+ #[test]
+ fn iter_comparisons() {
+ use std::iter::empty;
+
+ fn check<'a, I1, I2>(iter1: I1, iter2: I2)
+ where
+ I1: ParallelIterator<Item = &'a i32>,
+ I2: Iterator<Item = i32>,
+ {
+ let v1: Vec<_> = iter1.copied().collect();
+ let v2: Vec<_> = iter2.collect();
+ assert_eq!(v1, v2);
+ }
+
+ let set_a: IndexSet<_> = (0..3).collect();
+ let set_b: IndexSet<_> = (3..6).collect();
+ let set_c: IndexSet<_> = (0..6).collect();
+ let set_d: IndexSet<_> = (3..9).rev().collect();
+
+ check(set_a.par_difference(&set_a), empty());
+ check(set_a.par_symmetric_difference(&set_a), empty());
+ check(set_a.par_intersection(&set_a), 0..3);
+ check(set_a.par_union(&set_a), 0..3);
+
+ check(set_a.par_difference(&set_b), 0..3);
+ check(set_b.par_difference(&set_a), 3..6);
+ check(set_a.par_symmetric_difference(&set_b), 0..6);
+ check(set_b.par_symmetric_difference(&set_a), (3..6).chain(0..3));
+ check(set_a.par_intersection(&set_b), empty());
+ check(set_b.par_intersection(&set_a), empty());
+ check(set_a.par_union(&set_b), 0..6);
+ check(set_b.par_union(&set_a), (3..6).chain(0..3));
+
+ check(set_a.par_difference(&set_c), empty());
+ check(set_c.par_difference(&set_a), 3..6);
+ check(set_a.par_symmetric_difference(&set_c), 3..6);
+ check(set_c.par_symmetric_difference(&set_a), 3..6);
+ check(set_a.par_intersection(&set_c), 0..3);
+ check(set_c.par_intersection(&set_a), 0..3);
+ check(set_a.par_union(&set_c), 0..6);
+ check(set_c.par_union(&set_a), 0..6);
+
+ check(set_c.par_difference(&set_d), 0..3);
+ check(set_d.par_difference(&set_c), (6..9).rev());
+ check(
+ set_c.par_symmetric_difference(&set_d),
+ (0..3).chain((6..9).rev()),
+ );
+ check(
+ set_d.par_symmetric_difference(&set_c),
+ (6..9).rev().chain(0..3),
+ );
+ check(set_c.par_intersection(&set_d), 3..6);
+ check(set_d.par_intersection(&set_c), (3..6).rev());
+ check(set_c.par_union(&set_d), (0..6).chain((6..9).rev()));
+ check(set_d.par_union(&set_c), (3..9).rev().chain(0..3));
+ }
+}
diff --git a/vendor/indexmap/src/rustc.rs b/vendor/indexmap/src/rustc.rs
new file mode 100644
index 0000000..b843858
--- /dev/null
+++ b/vendor/indexmap/src/rustc.rs
@@ -0,0 +1,158 @@
+//! Minimal support for `rustc-rayon`, not intended for general use.
+
+use crate::vec::Vec;
+use crate::{Bucket, Entries, IndexMap, IndexSet};
+
+use rustc_rayon::iter::plumbing::{Consumer, ProducerCallback, UnindexedConsumer};
+use rustc_rayon::iter::{IndexedParallelIterator, IntoParallelIterator, ParallelIterator};
+
+mod map {
+ use super::*;
+
+ impl<K, V, S> IntoParallelIterator for IndexMap<K, V, S>
+ where
+ K: Send,
+ V: Send,
+ {
+ type Item = (K, V);
+ type Iter = IntoParIter<K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ IntoParIter {
+ entries: self.into_entries(),
+ }
+ }
+ }
+
+ pub struct IntoParIter<K, V> {
+ entries: Vec<Bucket<K, V>>,
+ }
+
+ impl<K: Send, V: Send> ParallelIterator for IntoParIter<K, V> {
+ type Item = (K, V);
+
+ parallel_iterator_methods!(Bucket::key_value);
+ }
+
+ impl<K: Send, V: Send> IndexedParallelIterator for IntoParIter<K, V> {
+ indexed_parallel_iterator_methods!(Bucket::key_value);
+ }
+
+ impl<'a, K, V, S> IntoParallelIterator for &'a IndexMap<K, V, S>
+ where
+ K: Sync,
+ V: Sync,
+ {
+ type Item = (&'a K, &'a V);
+ type Iter = ParIter<'a, K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIter {
+ entries: self.as_entries(),
+ }
+ }
+ }
+
+ pub struct ParIter<'a, K, V> {
+ entries: &'a [Bucket<K, V>],
+ }
+
+ impl<'a, K: Sync, V: Sync> ParallelIterator for ParIter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ parallel_iterator_methods!(Bucket::refs);
+ }
+
+ impl<K: Sync, V: Sync> IndexedParallelIterator for ParIter<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::refs);
+ }
+
+ impl<'a, K, V, S> IntoParallelIterator for &'a mut IndexMap<K, V, S>
+ where
+ K: Sync + Send,
+ V: Send,
+ {
+ type Item = (&'a K, &'a mut V);
+ type Iter = ParIterMut<'a, K, V>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIterMut {
+ entries: self.as_entries_mut(),
+ }
+ }
+ }
+
+ pub struct ParIterMut<'a, K, V> {
+ entries: &'a mut [Bucket<K, V>],
+ }
+
+ impl<'a, K: Sync + Send, V: Send> ParallelIterator for ParIterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ parallel_iterator_methods!(Bucket::ref_mut);
+ }
+
+ impl<K: Sync + Send, V: Send> IndexedParallelIterator for ParIterMut<'_, K, V> {
+ indexed_parallel_iterator_methods!(Bucket::ref_mut);
+ }
+}
+
+mod set {
+ use super::*;
+
+ impl<T, S> IntoParallelIterator for IndexSet<T, S>
+ where
+ T: Send,
+ {
+ type Item = T;
+ type Iter = IntoParIter<T>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ IntoParIter {
+ entries: self.into_entries(),
+ }
+ }
+ }
+
+ pub struct IntoParIter<T> {
+ entries: Vec<Bucket<T, ()>>,
+ }
+
+ impl<T: Send> ParallelIterator for IntoParIter<T> {
+ type Item = T;
+
+ parallel_iterator_methods!(Bucket::key);
+ }
+
+ impl<T: Send> IndexedParallelIterator for IntoParIter<T> {
+ indexed_parallel_iterator_methods!(Bucket::key);
+ }
+
+ impl<'a, T, S> IntoParallelIterator for &'a IndexSet<T, S>
+ where
+ T: Sync,
+ {
+ type Item = &'a T;
+ type Iter = ParIter<'a, T>;
+
+ fn into_par_iter(self) -> Self::Iter {
+ ParIter {
+ entries: self.as_entries(),
+ }
+ }
+ }
+
+ pub struct ParIter<'a, T> {
+ entries: &'a [Bucket<T, ()>],
+ }
+
+ impl<'a, T: Sync> ParallelIterator for ParIter<'a, T> {
+ type Item = &'a T;
+
+ parallel_iterator_methods!(Bucket::key_ref);
+ }
+
+ impl<T: Sync> IndexedParallelIterator for ParIter<'_, T> {
+ indexed_parallel_iterator_methods!(Bucket::key_ref);
+ }
+}
diff --git a/vendor/indexmap/src/serde.rs b/vendor/indexmap/src/serde.rs
new file mode 100644
index 0000000..37de3e3
--- /dev/null
+++ b/vendor/indexmap/src/serde.rs
@@ -0,0 +1,151 @@
+use serde::de::value::{MapDeserializer, SeqDeserializer};
+use serde::de::{
+ Deserialize, Deserializer, Error, IntoDeserializer, MapAccess, SeqAccess, Visitor,
+};
+use serde::ser::{Serialize, Serializer};
+
+use core::fmt::{self, Formatter};
+use core::hash::{BuildHasher, Hash};
+use core::marker::PhantomData;
+
+use crate::IndexMap;
+
+impl<K, V, S> Serialize for IndexMap<K, V, S>
+where
+ K: Serialize + Hash + Eq,
+ V: Serialize,
+ S: BuildHasher,
+{
+ fn serialize<T>(&self, serializer: T) -> Result<T::Ok, T::Error>
+ where
+ T: Serializer,
+ {
+ serializer.collect_map(self)
+ }
+}
+
+struct IndexMapVisitor<K, V, S>(PhantomData<(K, V, S)>);
+
+impl<'de, K, V, S> Visitor<'de> for IndexMapVisitor<K, V, S>
+where
+ K: Deserialize<'de> + Eq + Hash,
+ V: Deserialize<'de>,
+ S: Default + BuildHasher,
+{
+ type Value = IndexMap<K, V, S>;
+
+ fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
+ write!(formatter, "a map")
+ }
+
+ fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error>
+ where
+ A: MapAccess<'de>,
+ {
+ let mut values =
+ IndexMap::with_capacity_and_hasher(map.size_hint().unwrap_or(0), S::default());
+
+ while let Some((key, value)) = map.next_entry()? {
+ values.insert(key, value);
+ }
+
+ Ok(values)
+ }
+}
+
+impl<'de, K, V, S> Deserialize<'de> for IndexMap<K, V, S>
+where
+ K: Deserialize<'de> + Eq + Hash,
+ V: Deserialize<'de>,
+ S: Default + BuildHasher,
+{
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ deserializer.deserialize_map(IndexMapVisitor(PhantomData))
+ }
+}
+
+impl<'de, K, V, S, E> IntoDeserializer<'de, E> for IndexMap<K, V, S>
+where
+ K: IntoDeserializer<'de, E> + Eq + Hash,
+ V: IntoDeserializer<'de, E>,
+ S: BuildHasher,
+ E: Error,
+{
+ type Deserializer = MapDeserializer<'de, <Self as IntoIterator>::IntoIter, E>;
+
+ fn into_deserializer(self) -> Self::Deserializer {
+ MapDeserializer::new(self.into_iter())
+ }
+}
+
+use crate::IndexSet;
+
+impl<T, S> Serialize for IndexSet<T, S>
+where
+ T: Serialize + Hash + Eq,
+ S: BuildHasher,
+{
+ fn serialize<Se>(&self, serializer: Se) -> Result<Se::Ok, Se::Error>
+ where
+ Se: Serializer,
+ {
+ serializer.collect_seq(self)
+ }
+}
+
+struct IndexSetVisitor<T, S>(PhantomData<(T, S)>);
+
+impl<'de, T, S> Visitor<'de> for IndexSetVisitor<T, S>
+where
+ T: Deserialize<'de> + Eq + Hash,
+ S: Default + BuildHasher,
+{
+ type Value = IndexSet<T, S>;
+
+ fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
+ write!(formatter, "a set")
+ }
+
+ fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
+ where
+ A: SeqAccess<'de>,
+ {
+ let mut values =
+ IndexSet::with_capacity_and_hasher(seq.size_hint().unwrap_or(0), S::default());
+
+ while let Some(value) = seq.next_element()? {
+ values.insert(value);
+ }
+
+ Ok(values)
+ }
+}
+
+impl<'de, T, S> Deserialize<'de> for IndexSet<T, S>
+where
+ T: Deserialize<'de> + Eq + Hash,
+ S: Default + BuildHasher,
+{
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: Deserializer<'de>,
+ {
+ deserializer.deserialize_seq(IndexSetVisitor(PhantomData))
+ }
+}
+
+impl<'de, T, S, E> IntoDeserializer<'de, E> for IndexSet<T, S>
+where
+ T: IntoDeserializer<'de, E> + Eq + Hash,
+ S: BuildHasher,
+ E: Error,
+{
+ type Deserializer = SeqDeserializer<<Self as IntoIterator>::IntoIter, E>;
+
+ fn into_deserializer(self) -> Self::Deserializer {
+ SeqDeserializer::new(self.into_iter())
+ }
+}
diff --git a/vendor/indexmap/src/set.rs b/vendor/indexmap/src/set.rs
new file mode 100644
index 0000000..811f462
--- /dev/null
+++ b/vendor/indexmap/src/set.rs
@@ -0,0 +1,1018 @@
+//! A hash set implemented using `IndexMap`
+
+mod iter;
+mod slice;
+
+#[cfg(test)]
+mod tests;
+
+pub use self::iter::{Difference, Drain, Intersection, IntoIter, Iter, SymmetricDifference, Union};
+pub use self::slice::Slice;
+
+#[cfg(feature = "rayon")]
+pub use crate::rayon::set as rayon;
+use crate::TryReserveError;
+
+#[cfg(feature = "std")]
+use std::collections::hash_map::RandomState;
+
+use crate::util::try_simplify_range;
+use alloc::boxed::Box;
+use alloc::vec::Vec;
+use core::cmp::Ordering;
+use core::fmt;
+use core::hash::{BuildHasher, Hash};
+use core::ops::{BitAnd, BitOr, BitXor, Index, RangeBounds, Sub};
+
+use super::{Entries, Equivalent, IndexMap};
+
+type Bucket<T> = super::Bucket<T, ()>;
+
+/// A hash set where the iteration order of the values is independent of their
+/// hash values.
+///
+/// The interface is closely compatible with the standard `HashSet`, but also
+/// has additional features.
+///
+/// # Order
+///
+/// The values have a consistent order that is determined by the sequence of
+/// insertion and removal calls on the set. The order does not depend on the
+/// values or the hash function at all. Note that insertion order and value
+/// are not affected if a re-insertion is attempted once an element is
+/// already present.
+///
+/// All iterators traverse the set *in order*. Set operation iterators like
+/// `union` produce a concatenated order, as do their matching "bitwise"
+/// operators. See their documentation for specifics.
+///
+/// The insertion order is preserved, with **notable exceptions** like the
+/// `.remove()` or `.swap_remove()` methods. Methods such as `.sort_by()` of
+/// course result in a new order, depending on the sorting order.
+///
+/// # Indices
+///
+/// The values are indexed in a compact range without holes in the range
+/// `0..self.len()`. For example, the method `.get_full` looks up the index for
+/// a value, and the method `.get_index` looks up the value by index.
+///
+/// # Complexity
+///
+/// Internally, `IndexSet<T, S>` just holds an [`IndexMap<T, (), S>`](IndexMap). Thus the complexity
+/// of the two are the same for most methods.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexSet;
+///
+/// // Collects which letters appear in a sentence.
+/// let letters: IndexSet<_> = "a short treatise on fungi".chars().collect();
+///
+/// assert!(letters.contains(&'s'));
+/// assert!(letters.contains(&'t'));
+/// assert!(letters.contains(&'u'));
+/// assert!(!letters.contains(&'y'));
+/// ```
+#[cfg(feature = "std")]
+pub struct IndexSet<T, S = RandomState> {
+ pub(crate) map: IndexMap<T, (), S>,
+}
+#[cfg(not(feature = "std"))]
+pub struct IndexSet<T, S> {
+ pub(crate) map: IndexMap<T, (), S>,
+}
+
+impl<T, S> Clone for IndexSet<T, S>
+where
+ T: Clone,
+ S: Clone,
+{
+ fn clone(&self) -> Self {
+ IndexSet {
+ map: self.map.clone(),
+ }
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ self.map.clone_from(&other.map);
+ }
+}
+
+impl<T, S> Entries for IndexSet<T, S> {
+ type Entry = Bucket<T>;
+
+ #[inline]
+ fn into_entries(self) -> Vec<Self::Entry> {
+ self.map.into_entries()
+ }
+
+ #[inline]
+ fn as_entries(&self) -> &[Self::Entry] {
+ self.map.as_entries()
+ }
+
+ #[inline]
+ fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
+ self.map.as_entries_mut()
+ }
+
+ fn with_entries<F>(&mut self, f: F)
+ where
+ F: FnOnce(&mut [Self::Entry]),
+ {
+ self.map.with_entries(f);
+ }
+}
+
+impl<T, S> fmt::Debug for IndexSet<T, S>
+where
+ T: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ if cfg!(not(feature = "test_debug")) {
+ f.debug_set().entries(self.iter()).finish()
+ } else {
+ // Let the inner `IndexMap` print all of its details
+ f.debug_struct("IndexSet").field("map", &self.map).finish()
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+impl<T> IndexSet<T> {
+ /// Create a new set. (Does not allocate.)
+ pub fn new() -> Self {
+ IndexSet {
+ map: IndexMap::new(),
+ }
+ }
+
+ /// Create a new set with capacity for `n` elements.
+ /// (Does not allocate if `n` is zero.)
+ ///
+ /// Computes in **O(n)** time.
+ pub fn with_capacity(n: usize) -> Self {
+ IndexSet {
+ map: IndexMap::with_capacity(n),
+ }
+ }
+}
+
+impl<T, S> IndexSet<T, S> {
+ /// Create a new set with capacity for `n` elements.
+ /// (Does not allocate if `n` is zero.)
+ ///
+ /// Computes in **O(n)** time.
+ pub fn with_capacity_and_hasher(n: usize, hash_builder: S) -> Self {
+ IndexSet {
+ map: IndexMap::with_capacity_and_hasher(n, hash_builder),
+ }
+ }
+
+ /// Create a new set with `hash_builder`.
+ ///
+ /// This function is `const`, so it
+ /// can be called in `static` contexts.
+ pub const fn with_hasher(hash_builder: S) -> Self {
+ IndexSet {
+ map: IndexMap::with_hasher(hash_builder),
+ }
+ }
+
+ /// Return the number of elements the set can hold without reallocating.
+ ///
+ /// This number is a lower bound; the set might be able to hold more,
+ /// but is guaranteed to be able to hold at least this many.
+ ///
+ /// Computes in **O(1)** time.
+ pub fn capacity(&self) -> usize {
+ self.map.capacity()
+ }
+
+ /// Return a reference to the set's `BuildHasher`.
+ pub fn hasher(&self) -> &S {
+ self.map.hasher()
+ }
+
+ /// Return the number of elements in the set.
+ ///
+ /// Computes in **O(1)** time.
+ pub fn len(&self) -> usize {
+ self.map.len()
+ }
+
+ /// Returns true if the set contains no elements.
+ ///
+ /// Computes in **O(1)** time.
+ pub fn is_empty(&self) -> bool {
+ self.map.is_empty()
+ }
+
+ /// Return an iterator over the values of the set, in their order
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter::new(self.as_entries())
+ }
+
+ /// Remove all elements in the set, while preserving its capacity.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn clear(&mut self) {
+ self.map.clear();
+ }
+
+ /// Shortens the set, keeping the first `len` elements and dropping the rest.
+ ///
+ /// If `len` is greater than the set's current length, this has no effect.
+ pub fn truncate(&mut self, len: usize) {
+ self.map.truncate(len);
+ }
+
+ /// Clears the `IndexSet` in the given index range, returning those values
+ /// as a drain iterator.
+ ///
+ /// The range may be any type that implements `RangeBounds<usize>`,
+ /// including all of the `std::ops::Range*` types, or even a tuple pair of
+ /// `Bound` start and end values. To drain the set entirely, use `RangeFull`
+ /// like `set.drain(..)`.
+ ///
+ /// This shifts down all entries following the drained range to fill the
+ /// gap, and keeps the allocated memory for reuse.
+ ///
+ /// ***Panics*** if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the set.
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_, T>
+ where
+ R: RangeBounds<usize>,
+ {
+ Drain::new(self.map.core.drain(range))
+ }
+
+ /// Splits the collection into two at the given index.
+ ///
+ /// Returns a newly allocated set containing the elements in the range
+ /// `[at, len)`. After the call, the original set will be left containing
+ /// the elements `[0, at)` with its previous capacity unchanged.
+ ///
+ /// ***Panics*** if `at > len`.
+ pub fn split_off(&mut self, at: usize) -> Self
+ where
+ S: Clone,
+ {
+ Self {
+ map: self.map.split_off(at),
+ }
+ }
+}
+
+impl<T, S> IndexSet<T, S>
+where
+ T: Hash + Eq,
+ S: BuildHasher,
+{
+ /// Reserve capacity for `additional` more values.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn reserve(&mut self, additional: usize) {
+ self.map.reserve(additional);
+ }
+
+ /// Reserve capacity for `additional` more values, without over-allocating.
+ ///
+ /// Unlike `reserve`, this does not deliberately over-allocate the entry capacity to avoid
+ /// frequent re-allocations. However, the underlying data structures may still have internal
+ /// capacity requirements, and the allocator itself may give more space than requested, so this
+ /// cannot be relied upon to be precisely minimal.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.map.reserve_exact(additional);
+ }
+
+ /// Try to reserve capacity for `additional` more values.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.map.try_reserve(additional)
+ }
+
+ /// Try to reserve capacity for `additional` more values, without over-allocating.
+ ///
+ /// Unlike `try_reserve`, this does not deliberately over-allocate the entry capacity to avoid
+ /// frequent re-allocations. However, the underlying data structures may still have internal
+ /// capacity requirements, and the allocator itself may give more space than requested, so this
+ /// cannot be relied upon to be precisely minimal.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.map.try_reserve_exact(additional)
+ }
+
+ /// Shrink the capacity of the set as much as possible.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn shrink_to_fit(&mut self) {
+ self.map.shrink_to_fit();
+ }
+
+ /// Shrink the capacity of the set with a lower limit.
+ ///
+ /// Computes in **O(n)** time.
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.map.shrink_to(min_capacity);
+ }
+
+ /// Insert the value into the set.
+ ///
+ /// If an equivalent item already exists in the set, it returns
+ /// `false` leaving the original value in the set and without
+ /// altering its insertion order. Otherwise, it inserts the new
+ /// item and returns `true`.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn insert(&mut self, value: T) -> bool {
+ self.map.insert(value, ()).is_none()
+ }
+
+ /// Insert the value into the set, and get its index.
+ ///
+ /// If an equivalent item already exists in the set, it returns
+ /// the index of the existing item and `false`, leaving the
+ /// original value in the set and without altering its insertion
+ /// order. Otherwise, it inserts the new item and returns the index
+ /// of the inserted item and `true`.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn insert_full(&mut self, value: T) -> (usize, bool) {
+ let (index, existing) = self.map.insert_full(value, ());
+ (index, existing.is_none())
+ }
+
+ /// Return an iterator over the values that are in `self` but not `other`.
+ ///
+ /// Values are produced in the same order that they appear in `self`.
+ pub fn difference<'a, S2>(&'a self, other: &'a IndexSet<T, S2>) -> Difference<'a, T, S2>
+ where
+ S2: BuildHasher,
+ {
+ Difference::new(self, other)
+ }
+
+ /// Return an iterator over the values that are in `self` or `other`,
+ /// but not in both.
+ ///
+ /// Values from `self` are produced in their original order, followed by
+ /// values from `other` in their original order.
+ pub fn symmetric_difference<'a, S2>(
+ &'a self,
+ other: &'a IndexSet<T, S2>,
+ ) -> SymmetricDifference<'a, T, S, S2>
+ where
+ S2: BuildHasher,
+ {
+ SymmetricDifference::new(self, other)
+ }
+
+ /// Return an iterator over the values that are in both `self` and `other`.
+ ///
+ /// Values are produced in the same order that they appear in `self`.
+ pub fn intersection<'a, S2>(&'a self, other: &'a IndexSet<T, S2>) -> Intersection<'a, T, S2>
+ where
+ S2: BuildHasher,
+ {
+ Intersection::new(self, other)
+ }
+
+ /// Return an iterator over all values that are in `self` or `other`.
+ ///
+ /// Values from `self` are produced in their original order, followed by
+ /// values that are unique to `other` in their original order.
+ pub fn union<'a, S2>(&'a self, other: &'a IndexSet<T, S2>) -> Union<'a, T, S>
+ where
+ S2: BuildHasher,
+ {
+ Union::new(self, other)
+ }
+
+ /// Return `true` if an equivalent to `value` exists in the set.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn contains<Q: ?Sized>(&self, value: &Q) -> bool
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.contains_key(value)
+ }
+
+ /// Return a reference to the value stored in the set, if it is present,
+ /// else `None`.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.get_key_value(value).map(|(x, &())| x)
+ }
+
+ /// Return item index and value
+ pub fn get_full<Q: ?Sized>(&self, value: &Q) -> Option<(usize, &T)>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.get_full(value).map(|(i, x, &())| (i, x))
+ }
+
+ /// Return item index, if it exists in the set
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn get_index_of<Q: ?Sized>(&self, value: &Q) -> Option<usize>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.get_index_of(value)
+ }
+
+ /// Adds a value to the set, replacing the existing value, if any, that is
+ /// equal to the given one, without altering its insertion order. Returns
+ /// the replaced value.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn replace(&mut self, value: T) -> Option<T> {
+ self.replace_full(value).1
+ }
+
+ /// Adds a value to the set, replacing the existing value, if any, that is
+ /// equal to the given one, without altering its insertion order. Returns
+ /// the index of the item and its replaced value.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn replace_full(&mut self, value: T) -> (usize, Option<T>) {
+ use super::map::Entry::*;
+
+ match self.map.entry(value) {
+ Vacant(e) => {
+ let index = e.index();
+ e.insert(());
+ (index, None)
+ }
+ Occupied(e) => (e.index(), Some(e.replace_key())),
+ }
+ }
+
+ /// Remove the value from the set, and return `true` if it was present.
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove(value)`, if you want
+ /// to preserve the order of the values in the set, use `.shift_remove(value)`.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn remove<Q: ?Sized>(&mut self, value: &Q) -> bool
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.swap_remove(value)
+ }
+
+ /// Remove the value from the set, and return `true` if it was present.
+ ///
+ /// Like `Vec::swap_remove`, the value is removed by swapping it with the
+ /// last element of the set and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Return `false` if `value` was not in the set.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove<Q: ?Sized>(&mut self, value: &Q) -> bool
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.swap_remove(value).is_some()
+ }
+
+ /// Remove the value from the set, and return `true` if it was present.
+ ///
+ /// Like `Vec::remove`, the value is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Return `false` if `value` was not in the set.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove<Q: ?Sized>(&mut self, value: &Q) -> bool
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.shift_remove(value).is_some()
+ }
+
+ /// Removes and returns the value in the set, if any, that is equal to the
+ /// given one.
+ ///
+ /// **NOTE:** This is equivalent to `.swap_take(value)`, if you need to
+ /// preserve the order of the values in the set, use `.shift_take(value)`
+ /// instead.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.swap_take(value)
+ }
+
+ /// Removes and returns the value in the set, if any, that is equal to the
+ /// given one.
+ ///
+ /// Like `Vec::swap_remove`, the value is removed by swapping it with the
+ /// last element of the set and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Return `None` if `value` was not in the set.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.swap_remove_entry(value).map(|(x, ())| x)
+ }
+
+ /// Removes and returns the value in the set, if any, that is equal to the
+ /// given one.
+ ///
+ /// Like `Vec::remove`, the value is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Return `None` if `value` was not in the set.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.shift_remove_entry(value).map(|(x, ())| x)
+ }
+
+ /// Remove the value from the set return it and the index it had.
+ ///
+ /// Like `Vec::swap_remove`, the value is removed by swapping it with the
+ /// last element of the set and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Return `None` if `value` was not in the set.
+ pub fn swap_remove_full<Q: ?Sized>(&mut self, value: &Q) -> Option<(usize, T)>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.swap_remove_full(value).map(|(i, x, ())| (i, x))
+ }
+
+ /// Remove the value from the set return it and the index it had.
+ ///
+ /// Like `Vec::remove`, the value is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Return `None` if `value` was not in the set.
+ pub fn shift_remove_full<Q: ?Sized>(&mut self, value: &Q) -> Option<(usize, T)>
+ where
+ Q: Hash + Equivalent<T>,
+ {
+ self.map.shift_remove_full(value).map(|(i, x, ())| (i, x))
+ }
+
+ /// Remove the last value
+ ///
+ /// This preserves the order of the remaining elements.
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn pop(&mut self) -> Option<T> {
+ self.map.pop().map(|(x, ())| x)
+ }
+
+ /// Scan through each value in the set and keep those where the
+ /// closure `keep` returns `true`.
+ ///
+ /// The elements are visited in order, and remaining elements keep their
+ /// order.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn retain<F>(&mut self, mut keep: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ self.map.retain(move |x, &mut ()| keep(x))
+ }
+
+ /// Sort the set’s values by their default ordering.
+ ///
+ /// See [`sort_by`](Self::sort_by) for details.
+ pub fn sort(&mut self)
+ where
+ T: Ord,
+ {
+ self.map.sort_keys()
+ }
+
+ /// Sort the set’s values in place using the comparison function `cmp`.
+ ///
+ /// Computes in **O(n log n)** time and **O(n)** space. The sort is stable.
+ pub fn sort_by<F>(&mut self, mut cmp: F)
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ self.map.sort_by(move |a, _, b, _| cmp(a, b));
+ }
+
+ /// Sort the values of the set and return a by-value iterator of
+ /// the values with the result.
+ ///
+ /// The sort is stable.
+ pub fn sorted_by<F>(self, mut cmp: F) -> IntoIter<T>
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ let mut entries = self.into_entries();
+ entries.sort_by(move |a, b| cmp(&a.key, &b.key));
+ IntoIter::new(entries)
+ }
+
+ /// Sort the set's values by their default ordering.
+ ///
+ /// See [`sort_unstable_by`](Self::sort_unstable_by) for details.
+ pub fn sort_unstable(&mut self)
+ where
+ T: Ord,
+ {
+ self.map.sort_unstable_keys()
+ }
+
+ /// Sort the set's values in place using the comparison function `cmp`.
+ ///
+ /// Computes in **O(n log n)** time. The sort is unstable.
+ pub fn sort_unstable_by<F>(&mut self, mut cmp: F)
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ self.map.sort_unstable_by(move |a, _, b, _| cmp(a, b))
+ }
+
+ /// Sort the values of the set and return a by-value iterator of
+ /// the values with the result.
+ pub fn sorted_unstable_by<F>(self, mut cmp: F) -> IntoIter<T>
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ let mut entries = self.into_entries();
+ entries.sort_unstable_by(move |a, b| cmp(&a.key, &b.key));
+ IntoIter::new(entries)
+ }
+
+ /// Sort the set’s values in place using a key extraction function.
+ ///
+ /// During sorting, the function is called at most once per entry, by using temporary storage
+ /// to remember the results of its evaluation. The order of calls to the function is
+ /// unspecified and may change between versions of `indexmap` or the standard library.
+ ///
+ /// Computes in **O(m n + n log n + c)** time () and **O(n)** space, where the function is
+ /// **O(m)**, *n* is the length of the map, and *c* the capacity. The sort is stable.
+ pub fn sort_by_cached_key<K, F>(&mut self, mut sort_key: F)
+ where
+ K: Ord,
+ F: FnMut(&T) -> K,
+ {
+ self.with_entries(move |entries| {
+ entries.sort_by_cached_key(move |a| sort_key(&a.key));
+ });
+ }
+
+ /// Reverses the order of the set’s values in place.
+ ///
+ /// Computes in **O(n)** time and **O(1)** space.
+ pub fn reverse(&mut self) {
+ self.map.reverse()
+ }
+}
+
+impl<T, S> IndexSet<T, S> {
+ /// Returns a slice of all the values in the set.
+ ///
+ /// Computes in **O(1)** time.
+ pub fn as_slice(&self) -> &Slice<T> {
+ Slice::from_slice(self.as_entries())
+ }
+
+ /// Converts into a boxed slice of all the values in the set.
+ ///
+ /// Note that this will drop the inner hash table and any excess capacity.
+ pub fn into_boxed_slice(self) -> Box<Slice<T>> {
+ Slice::from_boxed(self.into_entries().into_boxed_slice())
+ }
+
+ /// Get a value by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Computes in **O(1)** time.
+ pub fn get_index(&self, index: usize) -> Option<&T> {
+ self.as_entries().get(index).map(Bucket::key_ref)
+ }
+
+ /// Returns a slice of values in the given range of indices.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Computes in **O(1)** time.
+ pub fn get_range<R: RangeBounds<usize>>(&self, range: R) -> Option<&Slice<T>> {
+ let entries = self.as_entries();
+ let range = try_simplify_range(range, entries.len())?;
+ entries.get(range).map(Slice::from_slice)
+ }
+
+ /// Get the first value
+ ///
+ /// Computes in **O(1)** time.
+ pub fn first(&self) -> Option<&T> {
+ self.as_entries().first().map(Bucket::key_ref)
+ }
+
+ /// Get the last value
+ ///
+ /// Computes in **O(1)** time.
+ pub fn last(&self) -> Option<&T> {
+ self.as_entries().last().map(Bucket::key_ref)
+ }
+
+ /// Remove the value by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Like `Vec::swap_remove`, the value is removed by swapping it with the
+ /// last element of the set and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove_index(&mut self, index: usize) -> Option<T> {
+ self.map.swap_remove_index(index).map(|(x, ())| x)
+ }
+
+ /// Remove the value by index
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ ///
+ /// Like `Vec::remove`, the value is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove_index(&mut self, index: usize) -> Option<T> {
+ self.map.shift_remove_index(index).map(|(x, ())| x)
+ }
+
+ /// Moves the position of a value from one index to another
+ /// by shifting all other values in-between.
+ ///
+ /// * If `from < to`, the other values will shift down while the targeted value moves up.
+ /// * If `from > to`, the other values will shift up while the targeted value moves down.
+ ///
+ /// ***Panics*** if `from` or `to` are out of bounds.
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn move_index(&mut self, from: usize, to: usize) {
+ self.map.move_index(from, to)
+ }
+
+ /// Swaps the position of two values in the set.
+ ///
+ /// ***Panics*** if `a` or `b` are out of bounds.
+ pub fn swap_indices(&mut self, a: usize, b: usize) {
+ self.map.swap_indices(a, b)
+ }
+}
+
+/// Access `IndexSet` values at indexed positions.
+///
+/// # Examples
+///
+/// ```
+/// use indexmap::IndexSet;
+///
+/// let mut set = IndexSet::new();
+/// for word in "Lorem ipsum dolor sit amet".split_whitespace() {
+/// set.insert(word.to_string());
+/// }
+/// assert_eq!(set[0], "Lorem");
+/// assert_eq!(set[1], "ipsum");
+/// set.reverse();
+/// assert_eq!(set[0], "amet");
+/// assert_eq!(set[1], "sit");
+/// set.sort();
+/// assert_eq!(set[0], "Lorem");
+/// assert_eq!(set[1], "amet");
+/// ```
+///
+/// ```should_panic
+/// use indexmap::IndexSet;
+///
+/// let mut set = IndexSet::new();
+/// set.insert("foo");
+/// println!("{:?}", set[10]); // panics!
+/// ```
+impl<T, S> Index<usize> for IndexSet<T, S> {
+ type Output = T;
+
+ /// Returns a reference to the value at the supplied `index`.
+ ///
+ /// ***Panics*** if `index` is out of bounds.
+ fn index(&self, index: usize) -> &T {
+ self.get_index(index)
+ .expect("IndexSet: index out of bounds")
+ }
+}
+
+impl<T, S> FromIterator<T> for IndexSet<T, S>
+where
+ T: Hash + Eq,
+ S: BuildHasher + Default,
+{
+ fn from_iter<I: IntoIterator<Item = T>>(iterable: I) -> Self {
+ let iter = iterable.into_iter().map(|x| (x, ()));
+ IndexSet {
+ map: IndexMap::from_iter(iter),
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+#[cfg_attr(docsrs, doc(cfg(feature = "std")))]
+impl<T, const N: usize> From<[T; N]> for IndexSet<T, RandomState>
+where
+ T: Eq + Hash,
+{
+ /// # Examples
+ ///
+ /// ```
+ /// use indexmap::IndexSet;
+ ///
+ /// let set1 = IndexSet::from([1, 2, 3, 4]);
+ /// let set2: IndexSet<_> = [1, 2, 3, 4].into();
+ /// assert_eq!(set1, set2);
+ /// ```
+ fn from(arr: [T; N]) -> Self {
+ Self::from_iter(arr)
+ }
+}
+
+impl<T, S> Extend<T> for IndexSet<T, S>
+where
+ T: Hash + Eq,
+ S: BuildHasher,
+{
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iterable: I) {
+ let iter = iterable.into_iter().map(|x| (x, ()));
+ self.map.extend(iter);
+ }
+}
+
+impl<'a, T, S> Extend<&'a T> for IndexSet<T, S>
+where
+ T: Hash + Eq + Copy + 'a,
+ S: BuildHasher,
+{
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iterable: I) {
+ let iter = iterable.into_iter().copied();
+ self.extend(iter);
+ }
+}
+
+impl<T, S> Default for IndexSet<T, S>
+where
+ S: Default,
+{
+ /// Return an empty `IndexSet`
+ fn default() -> Self {
+ IndexSet {
+ map: IndexMap::default(),
+ }
+ }
+}
+
+impl<T, S1, S2> PartialEq<IndexSet<T, S2>> for IndexSet<T, S1>
+where
+ T: Hash + Eq,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn eq(&self, other: &IndexSet<T, S2>) -> bool {
+ self.len() == other.len() && self.is_subset(other)
+ }
+}
+
+impl<T, S> Eq for IndexSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+impl<T, S> IndexSet<T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ /// Returns `true` if `self` has no elements in common with `other`.
+ pub fn is_disjoint<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher,
+ {
+ if self.len() <= other.len() {
+ self.iter().all(move |value| !other.contains(value))
+ } else {
+ other.iter().all(move |value| !self.contains(value))
+ }
+ }
+
+ /// Returns `true` if all elements of `self` are contained in `other`.
+ pub fn is_subset<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher,
+ {
+ self.len() <= other.len() && self.iter().all(move |value| other.contains(value))
+ }
+
+ /// Returns `true` if all elements of `other` are contained in `self`.
+ pub fn is_superset<S2>(&self, other: &IndexSet<T, S2>) -> bool
+ where
+ S2: BuildHasher,
+ {
+ other.is_subset(self)
+ }
+}
+
+impl<T, S1, S2> BitAnd<&IndexSet<T, S2>> for &IndexSet<T, S1>
+where
+ T: Eq + Hash + Clone,
+ S1: BuildHasher + Default,
+ S2: BuildHasher,
+{
+ type Output = IndexSet<T, S1>;
+
+ /// Returns the set intersection, cloned into a new set.
+ ///
+ /// Values are collected in the same order that they appear in `self`.
+ fn bitand(self, other: &IndexSet<T, S2>) -> Self::Output {
+ self.intersection(other).cloned().collect()
+ }
+}
+
+impl<T, S1, S2> BitOr<&IndexSet<T, S2>> for &IndexSet<T, S1>
+where
+ T: Eq + Hash + Clone,
+ S1: BuildHasher + Default,
+ S2: BuildHasher,
+{
+ type Output = IndexSet<T, S1>;
+
+ /// Returns the set union, cloned into a new set.
+ ///
+ /// Values from `self` are collected in their original order, followed by
+ /// values that are unique to `other` in their original order.
+ fn bitor(self, other: &IndexSet<T, S2>) -> Self::Output {
+ self.union(other).cloned().collect()
+ }
+}
+
+impl<T, S1, S2> BitXor<&IndexSet<T, S2>> for &IndexSet<T, S1>
+where
+ T: Eq + Hash + Clone,
+ S1: BuildHasher + Default,
+ S2: BuildHasher,
+{
+ type Output = IndexSet<T, S1>;
+
+ /// Returns the set symmetric-difference, cloned into a new set.
+ ///
+ /// Values from `self` are collected in their original order, followed by
+ /// values from `other` in their original order.
+ fn bitxor(self, other: &IndexSet<T, S2>) -> Self::Output {
+ self.symmetric_difference(other).cloned().collect()
+ }
+}
+
+impl<T, S1, S2> Sub<&IndexSet<T, S2>> for &IndexSet<T, S1>
+where
+ T: Eq + Hash + Clone,
+ S1: BuildHasher + Default,
+ S2: BuildHasher,
+{
+ type Output = IndexSet<T, S1>;
+
+ /// Returns the set difference, cloned into a new set.
+ ///
+ /// Values are collected in the same order that they appear in `self`.
+ fn sub(self, other: &IndexSet<T, S2>) -> Self::Output {
+ self.difference(other).cloned().collect()
+ }
+}
diff --git a/vendor/indexmap/src/set/iter.rs b/vendor/indexmap/src/set/iter.rs
new file mode 100644
index 0000000..828756d
--- /dev/null
+++ b/vendor/indexmap/src/set/iter.rs
@@ -0,0 +1,543 @@
+use super::{Bucket, Entries, IndexSet, Slice};
+
+use alloc::vec::{self, Vec};
+use core::fmt;
+use core::hash::{BuildHasher, Hash};
+use core::iter::{Chain, FusedIterator};
+use core::slice::Iter as SliceIter;
+
+impl<'a, T, S> IntoIterator for &'a IndexSet<T, S> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<T, S> IntoIterator for IndexSet<T, S> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter::new(self.into_entries())
+ }
+}
+
+/// An iterator over the items of a `IndexSet`.
+///
+/// This `struct` is created by the [`iter`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`iter`]: struct.IndexSet.html#method.iter
+pub struct Iter<'a, T> {
+ iter: SliceIter<'a, Bucket<T>>,
+}
+
+impl<'a, T> Iter<'a, T> {
+ pub(super) fn new(entries: &'a [Bucket<T>]) -> Self {
+ Self {
+ iter: entries.iter(),
+ }
+ }
+
+ /// Returns a slice of the remaining entries in the iterator.
+ pub fn as_slice(&self) -> &'a Slice<T> {
+ Slice::from_slice(self.iter.as_slice())
+ }
+}
+
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ iterator_methods!(Bucket::key_ref);
+}
+
+impl<T> DoubleEndedIterator for Iter<'_, T> {
+ double_ended_iterator_methods!(Bucket::key_ref);
+}
+
+impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<T> FusedIterator for Iter<'_, T> {}
+
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+impl<T> Default for Iter<'_, T> {
+ fn default() -> Self {
+ Self { iter: [].iter() }
+ }
+}
+
+/// An owning iterator over the items of a `IndexSet`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`IndexSet`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`into_iter`]: struct.IndexSet.html#method.into_iter
+pub struct IntoIter<T> {
+ iter: vec::IntoIter<Bucket<T>>,
+}
+
+impl<T> IntoIter<T> {
+ pub(super) fn new(entries: Vec<Bucket<T>>) -> Self {
+ Self {
+ iter: entries.into_iter(),
+ }
+ }
+
+ /// Returns a slice of the remaining entries in the iterator.
+ pub fn as_slice(&self) -> &Slice<T> {
+ Slice::from_slice(self.iter.as_slice())
+ }
+}
+
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+
+ iterator_methods!(Bucket::key);
+}
+
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ double_ended_iterator_methods!(Bucket::key);
+}
+
+impl<T> ExactSizeIterator for IntoIter<T> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<T> FusedIterator for IntoIter<T> {}
+
+impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<T> Default for IntoIter<T> {
+ fn default() -> Self {
+ Self {
+ iter: Vec::new().into_iter(),
+ }
+ }
+}
+
+/// A draining iterator over the items of a `IndexSet`.
+///
+/// This `struct` is created by the [`drain`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`drain`]: struct.IndexSet.html#method.drain
+pub struct Drain<'a, T> {
+ iter: vec::Drain<'a, Bucket<T>>,
+}
+
+impl<'a, T> Drain<'a, T> {
+ pub(super) fn new(iter: vec::Drain<'a, Bucket<T>>) -> Self {
+ Self { iter }
+ }
+
+ /// Returns a slice of the remaining entries in the iterator.
+ pub fn as_slice(&self) -> &Slice<T> {
+ Slice::from_slice(self.iter.as_slice())
+ }
+}
+
+impl<T> Iterator for Drain<'_, T> {
+ type Item = T;
+
+ iterator_methods!(Bucket::key);
+}
+
+impl<T> DoubleEndedIterator for Drain<'_, T> {
+ double_ended_iterator_methods!(Bucket::key);
+}
+
+impl<T> ExactSizeIterator for Drain<'_, T> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<T> FusedIterator for Drain<'_, T> {}
+
+impl<T: fmt::Debug> fmt::Debug for Drain<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the difference of `IndexSet`s.
+///
+/// This `struct` is created by the [`difference`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`difference`]: struct.IndexSet.html#method.difference
+pub struct Difference<'a, T, S> {
+ iter: Iter<'a, T>,
+ other: &'a IndexSet<T, S>,
+}
+
+impl<'a, T, S> Difference<'a, T, S> {
+ pub(super) fn new<S1>(set: &'a IndexSet<T, S1>, other: &'a IndexSet<T, S>) -> Self {
+ Self {
+ iter: set.iter(),
+ other,
+ }
+ }
+}
+
+impl<'a, T, S> Iterator for Difference<'a, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ while let Some(item) = self.iter.next() {
+ if !self.other.contains(item) {
+ return Some(item);
+ }
+ }
+ None
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, self.iter.size_hint().1)
+ }
+}
+
+impl<T, S> DoubleEndedIterator for Difference<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ while let Some(item) = self.iter.next_back() {
+ if !self.other.contains(item) {
+ return Some(item);
+ }
+ }
+ None
+ }
+}
+
+impl<T, S> FusedIterator for Difference<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+impl<T, S> Clone for Difference<'_, T, S> {
+ fn clone(&self) -> Self {
+ Difference {
+ iter: self.iter.clone(),
+ ..*self
+ }
+ }
+}
+
+impl<T, S> fmt::Debug for Difference<'_, T, S>
+where
+ T: fmt::Debug + Eq + Hash,
+ S: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the intersection of `IndexSet`s.
+///
+/// This `struct` is created by the [`intersection`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`intersection`]: struct.IndexSet.html#method.intersection
+pub struct Intersection<'a, T, S> {
+ iter: Iter<'a, T>,
+ other: &'a IndexSet<T, S>,
+}
+
+impl<'a, T, S> Intersection<'a, T, S> {
+ pub(super) fn new<S1>(set: &'a IndexSet<T, S1>, other: &'a IndexSet<T, S>) -> Self {
+ Self {
+ iter: set.iter(),
+ other,
+ }
+ }
+}
+
+impl<'a, T, S> Iterator for Intersection<'a, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ while let Some(item) = self.iter.next() {
+ if self.other.contains(item) {
+ return Some(item);
+ }
+ }
+ None
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, self.iter.size_hint().1)
+ }
+}
+
+impl<T, S> DoubleEndedIterator for Intersection<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ while let Some(item) = self.iter.next_back() {
+ if self.other.contains(item) {
+ return Some(item);
+ }
+ }
+ None
+ }
+}
+
+impl<T, S> FusedIterator for Intersection<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+impl<T, S> Clone for Intersection<'_, T, S> {
+ fn clone(&self) -> Self {
+ Intersection {
+ iter: self.iter.clone(),
+ ..*self
+ }
+ }
+}
+
+impl<T, S> fmt::Debug for Intersection<'_, T, S>
+where
+ T: fmt::Debug + Eq + Hash,
+ S: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the symmetric difference of `IndexSet`s.
+///
+/// This `struct` is created by the [`symmetric_difference`] method on
+/// [`IndexSet`]. See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`symmetric_difference`]: struct.IndexSet.html#method.symmetric_difference
+pub struct SymmetricDifference<'a, T, S1, S2> {
+ iter: Chain<Difference<'a, T, S2>, Difference<'a, T, S1>>,
+}
+
+impl<'a, T, S1, S2> SymmetricDifference<'a, T, S1, S2>
+where
+ T: Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ pub(super) fn new(set1: &'a IndexSet<T, S1>, set2: &'a IndexSet<T, S2>) -> Self {
+ let diff1 = set1.difference(set2);
+ let diff2 = set2.difference(set1);
+ Self {
+ iter: diff1.chain(diff2),
+ }
+ }
+}
+
+impl<'a, T, S1, S2> Iterator for SymmetricDifference<'a, T, S1, S2>
+where
+ T: Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.fold(init, f)
+ }
+}
+
+impl<T, S1, S2> DoubleEndedIterator for SymmetricDifference<'_, T, S1, S2>
+where
+ T: Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.iter.next_back()
+ }
+
+ fn rfold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.rfold(init, f)
+ }
+}
+
+impl<T, S1, S2> FusedIterator for SymmetricDifference<'_, T, S1, S2>
+where
+ T: Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+}
+
+impl<T, S1, S2> Clone for SymmetricDifference<'_, T, S1, S2> {
+ fn clone(&self) -> Self {
+ SymmetricDifference {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<T, S1, S2> fmt::Debug for SymmetricDifference<'_, T, S1, S2>
+where
+ T: fmt::Debug + Eq + Hash,
+ S1: BuildHasher,
+ S2: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the union of `IndexSet`s.
+///
+/// This `struct` is created by the [`union`] method on [`IndexSet`].
+/// See its documentation for more.
+///
+/// [`IndexSet`]: struct.IndexSet.html
+/// [`union`]: struct.IndexSet.html#method.union
+pub struct Union<'a, T, S> {
+ iter: Chain<Iter<'a, T>, Difference<'a, T, S>>,
+}
+
+impl<'a, T, S> Union<'a, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ pub(super) fn new<S2>(set1: &'a IndexSet<T, S>, set2: &'a IndexSet<T, S2>) -> Self
+ where
+ S2: BuildHasher,
+ {
+ Self {
+ iter: set1.iter().chain(set2.difference(set1)),
+ }
+ }
+}
+
+impl<'a, T, S> Iterator for Union<'a, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.iter.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ fn fold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.fold(init, f)
+ }
+}
+
+impl<T, S> DoubleEndedIterator for Union<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.iter.next_back()
+ }
+
+ fn rfold<B, F>(self, init: B, f: F) -> B
+ where
+ F: FnMut(B, Self::Item) -> B,
+ {
+ self.iter.rfold(init, f)
+ }
+}
+
+impl<T, S> FusedIterator for Union<'_, T, S>
+where
+ T: Eq + Hash,
+ S: BuildHasher,
+{
+}
+
+impl<T, S> Clone for Union<'_, T, S> {
+ fn clone(&self) -> Self {
+ Union {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<T, S> fmt::Debug for Union<'_, T, S>
+where
+ T: fmt::Debug + Eq + Hash,
+ S: BuildHasher,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
diff --git a/vendor/indexmap/src/set/slice.rs b/vendor/indexmap/src/set/slice.rs
new file mode 100644
index 0000000..608311d
--- /dev/null
+++ b/vendor/indexmap/src/set/slice.rs
@@ -0,0 +1,278 @@
+use super::{Bucket, Entries, IndexSet, IntoIter, Iter};
+use crate::util::try_simplify_range;
+
+use alloc::boxed::Box;
+use alloc::vec::Vec;
+use core::cmp::Ordering;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::ops::{self, Bound, Index, RangeBounds};
+
+/// A dynamically-sized slice of values in an `IndexSet`.
+///
+/// This supports indexed operations much like a `[T]` slice,
+/// but not any hashed operations on the values.
+///
+/// Unlike `IndexSet`, `Slice` does consider the order for `PartialEq`
+/// and `Eq`, and it also implements `PartialOrd`, `Ord`, and `Hash`.
+#[repr(transparent)]
+pub struct Slice<T> {
+ pub(crate) entries: [Bucket<T>],
+}
+
+// SAFETY: `Slice<T>` is a transparent wrapper around `[Bucket<T>]`,
+// and reference lifetimes are bound together in function signatures.
+#[allow(unsafe_code)]
+impl<T> Slice<T> {
+ pub(super) fn from_slice(entries: &[Bucket<T>]) -> &Self {
+ unsafe { &*(entries as *const [Bucket<T>] as *const Self) }
+ }
+
+ pub(super) fn from_boxed(entries: Box<[Bucket<T>]>) -> Box<Self> {
+ unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) }
+ }
+
+ fn into_boxed(self: Box<Self>) -> Box<[Bucket<T>]> {
+ unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket<T>]) }
+ }
+}
+
+impl<T> Slice<T> {
+ pub(crate) fn into_entries(self: Box<Self>) -> Vec<Bucket<T>> {
+ self.into_boxed().into_vec()
+ }
+
+ /// Return the number of elements in the set slice.
+ pub fn len(&self) -> usize {
+ self.entries.len()
+ }
+
+ /// Returns true if the set slice contains no elements.
+ pub fn is_empty(&self) -> bool {
+ self.entries.is_empty()
+ }
+
+ /// Get a value by index.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ pub fn get_index(&self, index: usize) -> Option<&T> {
+ self.entries.get(index).map(Bucket::key_ref)
+ }
+
+ /// Returns a slice of values in the given range of indices.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ pub fn get_range<R: RangeBounds<usize>>(&self, range: R) -> Option<&Self> {
+ let range = try_simplify_range(range, self.entries.len())?;
+ self.entries.get(range).map(Self::from_slice)
+ }
+
+ /// Get the first value.
+ pub fn first(&self) -> Option<&T> {
+ self.entries.first().map(Bucket::key_ref)
+ }
+
+ /// Get the last value.
+ pub fn last(&self) -> Option<&T> {
+ self.entries.last().map(Bucket::key_ref)
+ }
+
+ /// Divides one slice into two at an index.
+ ///
+ /// ***Panics*** if `index > len`.
+ pub fn split_at(&self, index: usize) -> (&Self, &Self) {
+ let (first, second) = self.entries.split_at(index);
+ (Self::from_slice(first), Self::from_slice(second))
+ }
+
+ /// Returns the first value and the rest of the slice,
+ /// or `None` if it is empty.
+ pub fn split_first(&self) -> Option<(&T, &Self)> {
+ if let [first, rest @ ..] = &self.entries {
+ Some((&first.key, Self::from_slice(rest)))
+ } else {
+ None
+ }
+ }
+
+ /// Returns the last value and the rest of the slice,
+ /// or `None` if it is empty.
+ pub fn split_last(&self) -> Option<(&T, &Self)> {
+ if let [rest @ .., last] = &self.entries {
+ Some((&last.key, Self::from_slice(rest)))
+ } else {
+ None
+ }
+ }
+
+ /// Return an iterator over the values of the set slice.
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter::new(&self.entries)
+ }
+}
+
+impl<'a, T> IntoIterator for &'a Slice<T> {
+ type IntoIter = Iter<'a, T>;
+ type Item = &'a T;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<T> IntoIterator for Box<Slice<T>> {
+ type IntoIter = IntoIter<T>;
+ type Item = T;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter::new(self.into_entries())
+ }
+}
+
+impl<T> Default for &'_ Slice<T> {
+ fn default() -> Self {
+ Slice::from_slice(&[])
+ }
+}
+
+impl<T> Default for Box<Slice<T>> {
+ fn default() -> Self {
+ Slice::from_boxed(Box::default())
+ }
+}
+
+impl<T: Clone> Clone for Box<Slice<T>> {
+ fn clone(&self) -> Self {
+ Slice::from_boxed(self.entries.to_vec().into_boxed_slice())
+ }
+}
+
+impl<T: Copy> From<&Slice<T>> for Box<Slice<T>> {
+ fn from(slice: &Slice<T>) -> Self {
+ Slice::from_boxed(Box::from(&slice.entries))
+ }
+}
+
+impl<T: fmt::Debug> fmt::Debug for Slice<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self).finish()
+ }
+}
+
+impl<T: PartialEq> PartialEq for Slice<T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.len() == other.len() && self.iter().eq(other)
+ }
+}
+
+impl<T: Eq> Eq for Slice<T> {}
+
+impl<T: PartialOrd> PartialOrd for Slice<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.iter().partial_cmp(other)
+ }
+}
+
+impl<T: Ord> Ord for Slice<T> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.iter().cmp(other)
+ }
+}
+
+impl<T: Hash> Hash for Slice<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.len().hash(state);
+ for value in self {
+ value.hash(state);
+ }
+ }
+}
+
+impl<T> Index<usize> for Slice<T> {
+ type Output = T;
+
+ fn index(&self, index: usize) -> &Self::Output {
+ &self.entries[index].key
+ }
+}
+
+// We can't have `impl<I: RangeBounds<usize>> Index<I>` because that conflicts with `Index<usize>`.
+// Instead, we repeat the implementations for all the core range types.
+macro_rules! impl_index {
+ ($($range:ty),*) => {$(
+ impl<T, S> Index<$range> for IndexSet<T, S> {
+ type Output = Slice<T>;
+
+ fn index(&self, range: $range) -> &Self::Output {
+ Slice::from_slice(&self.as_entries()[range])
+ }
+ }
+
+ impl<T> Index<$range> for Slice<T> {
+ type Output = Self;
+
+ fn index(&self, range: $range) -> &Self::Output {
+ Slice::from_slice(&self.entries[range])
+ }
+ }
+ )*}
+}
+impl_index!(
+ ops::Range<usize>,
+ ops::RangeFrom<usize>,
+ ops::RangeFull,
+ ops::RangeInclusive<usize>,
+ ops::RangeTo<usize>,
+ ops::RangeToInclusive<usize>,
+ (Bound<usize>, Bound<usize>)
+);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use alloc::vec::Vec;
+
+ #[test]
+ fn slice_index() {
+ fn check(vec_slice: &[i32], set_slice: &Slice<i32>, sub_slice: &Slice<i32>) {
+ assert_eq!(set_slice as *const _, sub_slice as *const _);
+ itertools::assert_equal(vec_slice, set_slice);
+ }
+
+ let vec: Vec<i32> = (0..10).map(|i| i * i).collect();
+ let set: IndexSet<i32> = vec.iter().cloned().collect();
+ let slice = set.as_slice();
+
+ // RangeFull
+ check(&vec[..], &set[..], &slice[..]);
+
+ for i in 0usize..10 {
+ // Index
+ assert_eq!(vec[i], set[i]);
+ assert_eq!(vec[i], slice[i]);
+
+ // RangeFrom
+ check(&vec[i..], &set[i..], &slice[i..]);
+
+ // RangeTo
+ check(&vec[..i], &set[..i], &slice[..i]);
+
+ // RangeToInclusive
+ check(&vec[..=i], &set[..=i], &slice[..=i]);
+
+ // (Bound<usize>, Bound<usize>)
+ let bounds = (Bound::Excluded(i), Bound::Unbounded);
+ check(&vec[i + 1..], &set[bounds], &slice[bounds]);
+
+ for j in i..=10 {
+ // Range
+ check(&vec[i..j], &set[i..j], &slice[i..j]);
+ }
+
+ for j in i..10 {
+ // RangeInclusive
+ check(&vec[i..=j], &set[i..=j], &slice[i..=j]);
+ }
+ }
+ }
+}
diff --git a/vendor/indexmap/src/set/tests.rs b/vendor/indexmap/src/set/tests.rs
new file mode 100644
index 0000000..44f8ed8
--- /dev/null
+++ b/vendor/indexmap/src/set/tests.rs
@@ -0,0 +1,545 @@
+use super::*;
+use std::string::String;
+
+#[test]
+fn it_works() {
+ let mut set = IndexSet::new();
+ assert_eq!(set.is_empty(), true);
+ set.insert(1);
+ set.insert(1);
+ assert_eq!(set.len(), 1);
+ assert!(set.get(&1).is_some());
+ assert_eq!(set.is_empty(), false);
+}
+
+#[test]
+fn new() {
+ let set = IndexSet::<String>::new();
+ println!("{:?}", set);
+ assert_eq!(set.capacity(), 0);
+ assert_eq!(set.len(), 0);
+ assert_eq!(set.is_empty(), true);
+}
+
+#[test]
+fn insert() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5];
+ let not_present = [1, 3, 6, 9, 10];
+ let mut set = IndexSet::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(set.len(), i);
+ set.insert(elt);
+ assert_eq!(set.len(), i + 1);
+ assert_eq!(set.get(&elt), Some(&elt));
+ }
+ println!("{:?}", set);
+
+ for &elt in &not_present {
+ assert!(set.get(&elt).is_none());
+ }
+}
+
+#[test]
+fn insert_full() {
+ let insert = vec![9, 2, 7, 1, 4, 6, 13];
+ let present = vec![1, 6, 2];
+ let mut set = IndexSet::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(set.len(), i);
+ let (index, success) = set.insert_full(elt);
+ assert!(success);
+ assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0));
+ assert_eq!(set.len(), i + 1);
+ }
+
+ let len = set.len();
+ for &elt in &present {
+ let (index, success) = set.insert_full(elt);
+ assert!(!success);
+ assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0));
+ assert_eq!(set.len(), len);
+ }
+}
+
+#[test]
+fn insert_2() {
+ let mut set = IndexSet::with_capacity(16);
+
+ let mut values = vec![];
+ values.extend(0..16);
+ values.extend(if cfg!(miri) { 32..64 } else { 128..267 });
+
+ for &i in &values {
+ let old_set = set.clone();
+ set.insert(i);
+ for value in old_set.iter() {
+ if set.get(value).is_none() {
+ println!("old_set: {:?}", old_set);
+ println!("set: {:?}", set);
+ panic!("did not find {} in set", value);
+ }
+ }
+ }
+
+ for &i in &values {
+ assert!(set.get(&i).is_some(), "did not find {}", i);
+ }
+}
+
+#[test]
+fn insert_dup() {
+ let mut elements = vec![0, 2, 4, 6, 8];
+ let mut set: IndexSet<u8> = elements.drain(..).collect();
+ {
+ let (i, v) = set.get_full(&0).unwrap();
+ assert_eq!(set.len(), 5);
+ assert_eq!(i, 0);
+ assert_eq!(*v, 0);
+ }
+ {
+ let inserted = set.insert(0);
+ let (i, v) = set.get_full(&0).unwrap();
+ assert_eq!(set.len(), 5);
+ assert_eq!(inserted, false);
+ assert_eq!(i, 0);
+ assert_eq!(*v, 0);
+ }
+}
+
+#[test]
+fn insert_order() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut set = IndexSet::new();
+
+ for &elt in &insert {
+ set.insert(elt);
+ }
+
+ assert_eq!(set.iter().count(), set.len());
+ assert_eq!(set.iter().count(), insert.len());
+ for (a, b) in insert.iter().zip(set.iter()) {
+ assert_eq!(a, b);
+ }
+ for (i, v) in (0..insert.len()).zip(set.iter()) {
+ assert_eq!(set.get_index(i).unwrap(), v);
+ }
+}
+
+#[test]
+fn replace() {
+ let replace = [0, 4, 2, 12, 8, 7, 11, 5];
+ let not_present = [1, 3, 6, 9, 10];
+ let mut set = IndexSet::with_capacity(replace.len());
+
+ for (i, &elt) in replace.iter().enumerate() {
+ assert_eq!(set.len(), i);
+ set.replace(elt);
+ assert_eq!(set.len(), i + 1);
+ assert_eq!(set.get(&elt), Some(&elt));
+ }
+ println!("{:?}", set);
+
+ for &elt in &not_present {
+ assert!(set.get(&elt).is_none());
+ }
+}
+
+#[test]
+fn replace_full() {
+ let replace = vec![9, 2, 7, 1, 4, 6, 13];
+ let present = vec![1, 6, 2];
+ let mut set = IndexSet::with_capacity(replace.len());
+
+ for (i, &elt) in replace.iter().enumerate() {
+ assert_eq!(set.len(), i);
+ let (index, replaced) = set.replace_full(elt);
+ assert!(replaced.is_none());
+ assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0));
+ assert_eq!(set.len(), i + 1);
+ }
+
+ let len = set.len();
+ for &elt in &present {
+ let (index, replaced) = set.replace_full(elt);
+ assert_eq!(Some(elt), replaced);
+ assert_eq!(Some(index), set.get_full(&elt).map(|x| x.0));
+ assert_eq!(set.len(), len);
+ }
+}
+
+#[test]
+fn replace_2() {
+ let mut set = IndexSet::with_capacity(16);
+
+ let mut values = vec![];
+ values.extend(0..16);
+ values.extend(if cfg!(miri) { 32..64 } else { 128..267 });
+
+ for &i in &values {
+ let old_set = set.clone();
+ set.replace(i);
+ for value in old_set.iter() {
+ if set.get(value).is_none() {
+ println!("old_set: {:?}", old_set);
+ println!("set: {:?}", set);
+ panic!("did not find {} in set", value);
+ }
+ }
+ }
+
+ for &i in &values {
+ assert!(set.get(&i).is_some(), "did not find {}", i);
+ }
+}
+
+#[test]
+fn replace_dup() {
+ let mut elements = vec![0, 2, 4, 6, 8];
+ let mut set: IndexSet<u8> = elements.drain(..).collect();
+ {
+ let (i, v) = set.get_full(&0).unwrap();
+ assert_eq!(set.len(), 5);
+ assert_eq!(i, 0);
+ assert_eq!(*v, 0);
+ }
+ {
+ let replaced = set.replace(0);
+ let (i, v) = set.get_full(&0).unwrap();
+ assert_eq!(set.len(), 5);
+ assert_eq!(replaced, Some(0));
+ assert_eq!(i, 0);
+ assert_eq!(*v, 0);
+ }
+}
+
+#[test]
+fn replace_order() {
+ let replace = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut set = IndexSet::new();
+
+ for &elt in &replace {
+ set.replace(elt);
+ }
+
+ assert_eq!(set.iter().count(), set.len());
+ assert_eq!(set.iter().count(), replace.len());
+ for (a, b) in replace.iter().zip(set.iter()) {
+ assert_eq!(a, b);
+ }
+ for (i, v) in (0..replace.len()).zip(set.iter()) {
+ assert_eq!(set.get_index(i).unwrap(), v);
+ }
+}
+
+#[test]
+fn grow() {
+ let insert = [0, 4, 2, 12, 8, 7, 11];
+ let not_present = [1, 3, 6, 9, 10];
+ let mut set = IndexSet::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(set.len(), i);
+ set.insert(elt);
+ assert_eq!(set.len(), i + 1);
+ assert_eq!(set.get(&elt), Some(&elt));
+ }
+
+ println!("{:?}", set);
+ for &elt in &insert {
+ set.insert(elt * 10);
+ }
+ for &elt in &insert {
+ set.insert(elt * 100);
+ }
+ for (i, &elt) in insert.iter().cycle().enumerate().take(100) {
+ set.insert(elt * 100 + i as i32);
+ }
+ println!("{:?}", set);
+ for &elt in &not_present {
+ assert!(set.get(&elt).is_none());
+ }
+}
+
+#[test]
+fn reserve() {
+ let mut set = IndexSet::<usize>::new();
+ assert_eq!(set.capacity(), 0);
+ set.reserve(100);
+ let capacity = set.capacity();
+ assert!(capacity >= 100);
+ for i in 0..capacity {
+ assert_eq!(set.len(), i);
+ set.insert(i);
+ assert_eq!(set.len(), i + 1);
+ assert_eq!(set.capacity(), capacity);
+ assert_eq!(set.get(&i), Some(&i));
+ }
+ set.insert(capacity);
+ assert_eq!(set.len(), capacity + 1);
+ assert!(set.capacity() > capacity);
+ assert_eq!(set.get(&capacity), Some(&capacity));
+}
+
+#[test]
+fn try_reserve() {
+ let mut set = IndexSet::<usize>::new();
+ assert_eq!(set.capacity(), 0);
+ assert_eq!(set.try_reserve(100), Ok(()));
+ assert!(set.capacity() >= 100);
+ assert!(set.try_reserve(usize::MAX).is_err());
+}
+
+#[test]
+fn shrink_to_fit() {
+ let mut set = IndexSet::<usize>::new();
+ assert_eq!(set.capacity(), 0);
+ for i in 0..100 {
+ assert_eq!(set.len(), i);
+ set.insert(i);
+ assert_eq!(set.len(), i + 1);
+ assert!(set.capacity() >= i + 1);
+ assert_eq!(set.get(&i), Some(&i));
+ set.shrink_to_fit();
+ assert_eq!(set.len(), i + 1);
+ assert_eq!(set.capacity(), i + 1);
+ assert_eq!(set.get(&i), Some(&i));
+ }
+}
+
+#[test]
+fn remove() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut set = IndexSet::new();
+
+ for &elt in &insert {
+ set.insert(elt);
+ }
+
+ assert_eq!(set.iter().count(), set.len());
+ assert_eq!(set.iter().count(), insert.len());
+ for (a, b) in insert.iter().zip(set.iter()) {
+ assert_eq!(a, b);
+ }
+
+ let remove_fail = [99, 77];
+ let remove = [4, 12, 8, 7];
+
+ for &value in &remove_fail {
+ assert!(set.swap_remove_full(&value).is_none());
+ }
+ println!("{:?}", set);
+ for &value in &remove {
+ //println!("{:?}", set);
+ let index = set.get_full(&value).unwrap().0;
+ assert_eq!(set.swap_remove_full(&value), Some((index, value)));
+ }
+ println!("{:?}", set);
+
+ for value in &insert {
+ assert_eq!(set.get(value).is_some(), !remove.contains(value));
+ }
+ assert_eq!(set.len(), insert.len() - remove.len());
+ assert_eq!(set.iter().count(), insert.len() - remove.len());
+}
+
+#[test]
+fn swap_remove_index() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut set = IndexSet::new();
+
+ for &elt in &insert {
+ set.insert(elt);
+ }
+
+ let mut vector = insert.to_vec();
+ let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1];
+
+ // check that the same swap remove sequence on vec and set
+ // have the same result.
+ for &rm in remove_sequence {
+ let out_vec = vector.swap_remove(rm);
+ let out_set = set.swap_remove_index(rm).unwrap();
+ assert_eq!(out_vec, out_set);
+ }
+ assert_eq!(vector.len(), set.len());
+ for (a, b) in vector.iter().zip(set.iter()) {
+ assert_eq!(a, b);
+ }
+}
+
+#[test]
+fn partial_eq_and_eq() {
+ let mut set_a = IndexSet::new();
+ set_a.insert(1);
+ set_a.insert(2);
+ let mut set_b = set_a.clone();
+ assert_eq!(set_a, set_b);
+ set_b.swap_remove(&1);
+ assert_ne!(set_a, set_b);
+
+ let set_c: IndexSet<_> = set_b.into_iter().collect();
+ assert_ne!(set_a, set_c);
+ assert_ne!(set_c, set_a);
+}
+
+#[test]
+fn extend() {
+ let mut set = IndexSet::new();
+ set.extend(vec![&1, &2, &3, &4]);
+ set.extend(vec![5, 6]);
+ assert_eq!(set.into_iter().collect::<Vec<_>>(), vec![1, 2, 3, 4, 5, 6]);
+}
+
+#[test]
+fn comparisons() {
+ let set_a: IndexSet<_> = (0..3).collect();
+ let set_b: IndexSet<_> = (3..6).collect();
+ let set_c: IndexSet<_> = (0..6).collect();
+ let set_d: IndexSet<_> = (3..9).collect();
+
+ assert!(!set_a.is_disjoint(&set_a));
+ assert!(set_a.is_subset(&set_a));
+ assert!(set_a.is_superset(&set_a));
+
+ assert!(set_a.is_disjoint(&set_b));
+ assert!(set_b.is_disjoint(&set_a));
+ assert!(!set_a.is_subset(&set_b));
+ assert!(!set_b.is_subset(&set_a));
+ assert!(!set_a.is_superset(&set_b));
+ assert!(!set_b.is_superset(&set_a));
+
+ assert!(!set_a.is_disjoint(&set_c));
+ assert!(!set_c.is_disjoint(&set_a));
+ assert!(set_a.is_subset(&set_c));
+ assert!(!set_c.is_subset(&set_a));
+ assert!(!set_a.is_superset(&set_c));
+ assert!(set_c.is_superset(&set_a));
+
+ assert!(!set_c.is_disjoint(&set_d));
+ assert!(!set_d.is_disjoint(&set_c));
+ assert!(!set_c.is_subset(&set_d));
+ assert!(!set_d.is_subset(&set_c));
+ assert!(!set_c.is_superset(&set_d));
+ assert!(!set_d.is_superset(&set_c));
+}
+
+#[test]
+fn iter_comparisons() {
+ use std::iter::empty;
+
+ fn check<'a, I1, I2>(iter1: I1, iter2: I2)
+ where
+ I1: Iterator<Item = &'a i32>,
+ I2: Iterator<Item = i32>,
+ {
+ assert!(iter1.copied().eq(iter2));
+ }
+
+ let set_a: IndexSet<_> = (0..3).collect();
+ let set_b: IndexSet<_> = (3..6).collect();
+ let set_c: IndexSet<_> = (0..6).collect();
+ let set_d: IndexSet<_> = (3..9).rev().collect();
+
+ check(set_a.difference(&set_a), empty());
+ check(set_a.symmetric_difference(&set_a), empty());
+ check(set_a.intersection(&set_a), 0..3);
+ check(set_a.union(&set_a), 0..3);
+
+ check(set_a.difference(&set_b), 0..3);
+ check(set_b.difference(&set_a), 3..6);
+ check(set_a.symmetric_difference(&set_b), 0..6);
+ check(set_b.symmetric_difference(&set_a), (3..6).chain(0..3));
+ check(set_a.intersection(&set_b), empty());
+ check(set_b.intersection(&set_a), empty());
+ check(set_a.union(&set_b), 0..6);
+ check(set_b.union(&set_a), (3..6).chain(0..3));
+
+ check(set_a.difference(&set_c), empty());
+ check(set_c.difference(&set_a), 3..6);
+ check(set_a.symmetric_difference(&set_c), 3..6);
+ check(set_c.symmetric_difference(&set_a), 3..6);
+ check(set_a.intersection(&set_c), 0..3);
+ check(set_c.intersection(&set_a), 0..3);
+ check(set_a.union(&set_c), 0..6);
+ check(set_c.union(&set_a), 0..6);
+
+ check(set_c.difference(&set_d), 0..3);
+ check(set_d.difference(&set_c), (6..9).rev());
+ check(
+ set_c.symmetric_difference(&set_d),
+ (0..3).chain((6..9).rev()),
+ );
+ check(set_d.symmetric_difference(&set_c), (6..9).rev().chain(0..3));
+ check(set_c.intersection(&set_d), 3..6);
+ check(set_d.intersection(&set_c), (3..6).rev());
+ check(set_c.union(&set_d), (0..6).chain((6..9).rev()));
+ check(set_d.union(&set_c), (3..9).rev().chain(0..3));
+}
+
+#[test]
+fn ops() {
+ let empty = IndexSet::<i32>::new();
+ let set_a: IndexSet<_> = (0..3).collect();
+ let set_b: IndexSet<_> = (3..6).collect();
+ let set_c: IndexSet<_> = (0..6).collect();
+ let set_d: IndexSet<_> = (3..9).rev().collect();
+
+ #[allow(clippy::eq_op)]
+ {
+ assert_eq!(&set_a & &set_a, set_a);
+ assert_eq!(&set_a | &set_a, set_a);
+ assert_eq!(&set_a ^ &set_a, empty);
+ assert_eq!(&set_a - &set_a, empty);
+ }
+
+ assert_eq!(&set_a & &set_b, empty);
+ assert_eq!(&set_b & &set_a, empty);
+ assert_eq!(&set_a | &set_b, set_c);
+ assert_eq!(&set_b | &set_a, set_c);
+ assert_eq!(&set_a ^ &set_b, set_c);
+ assert_eq!(&set_b ^ &set_a, set_c);
+ assert_eq!(&set_a - &set_b, set_a);
+ assert_eq!(&set_b - &set_a, set_b);
+
+ assert_eq!(&set_a & &set_c, set_a);
+ assert_eq!(&set_c & &set_a, set_a);
+ assert_eq!(&set_a | &set_c, set_c);
+ assert_eq!(&set_c | &set_a, set_c);
+ assert_eq!(&set_a ^ &set_c, set_b);
+ assert_eq!(&set_c ^ &set_a, set_b);
+ assert_eq!(&set_a - &set_c, empty);
+ assert_eq!(&set_c - &set_a, set_b);
+
+ assert_eq!(&set_c & &set_d, set_b);
+ assert_eq!(&set_d & &set_c, set_b);
+ assert_eq!(&set_c | &set_d, &set_a | &set_d);
+ assert_eq!(&set_d | &set_c, &set_a | &set_d);
+ assert_eq!(&set_c ^ &set_d, &set_a | &(&set_d - &set_b));
+ assert_eq!(&set_d ^ &set_c, &set_a | &(&set_d - &set_b));
+ assert_eq!(&set_c - &set_d, set_a);
+ assert_eq!(&set_d - &set_c, &set_d - &set_b);
+}
+
+#[test]
+#[cfg(feature = "std")]
+fn from_array() {
+ let set1 = IndexSet::from([1, 2, 3, 4]);
+ let set2: IndexSet<_> = [1, 2, 3, 4].into();
+
+ assert_eq!(set1, set2);
+}
+
+#[test]
+fn iter_default() {
+ struct Item;
+ fn assert_default<T>()
+ where
+ T: Default + Iterator,
+ {
+ assert!(T::default().next().is_none());
+ }
+ assert_default::<Iter<'static, Item>>();
+ assert_default::<IntoIter<Item>>();
+}
diff --git a/vendor/indexmap/src/util.rs b/vendor/indexmap/src/util.rs
new file mode 100644
index 0000000..377ff51
--- /dev/null
+++ b/vendor/indexmap/src/util.rs
@@ -0,0 +1,53 @@
+use core::ops::{Bound, Range, RangeBounds};
+
+pub(crate) fn third<A, B, C>(t: (A, B, C)) -> C {
+ t.2
+}
+
+pub(crate) fn simplify_range<R>(range: R, len: usize) -> Range<usize>
+where
+ R: RangeBounds<usize>,
+{
+ let start = match range.start_bound() {
+ Bound::Unbounded => 0,
+ Bound::Included(&i) if i <= len => i,
+ Bound::Excluded(&i) if i < len => i + 1,
+ bound => panic!("range start {:?} should be <= length {}", bound, len),
+ };
+ let end = match range.end_bound() {
+ Bound::Unbounded => len,
+ Bound::Excluded(&i) if i <= len => i,
+ Bound::Included(&i) if i < len => i + 1,
+ bound => panic!("range end {:?} should be <= length {}", bound, len),
+ };
+ if start > end {
+ panic!(
+ "range start {:?} should be <= range end {:?}",
+ range.start_bound(),
+ range.end_bound()
+ );
+ }
+ start..end
+}
+
+pub(crate) fn try_simplify_range<R>(range: R, len: usize) -> Option<Range<usize>>
+where
+ R: RangeBounds<usize>,
+{
+ let start = match range.start_bound() {
+ Bound::Unbounded => 0,
+ Bound::Included(&i) if i <= len => i,
+ Bound::Excluded(&i) if i < len => i + 1,
+ _ => return None,
+ };
+ let end = match range.end_bound() {
+ Bound::Unbounded => len,
+ Bound::Excluded(&i) if i <= len => i,
+ Bound::Included(&i) if i < len => i + 1,
+ _ => return None,
+ };
+ if start > end {
+ return None;
+ }
+ Some(start..end)
+}
diff --git a/vendor/indexmap/tests/equivalent_trait.rs b/vendor/indexmap/tests/equivalent_trait.rs
new file mode 100644
index 0000000..ff5943a
--- /dev/null
+++ b/vendor/indexmap/tests/equivalent_trait.rs
@@ -0,0 +1,53 @@
+use indexmap::indexmap;
+use indexmap::Equivalent;
+
+use std::hash::Hash;
+
+#[derive(Debug, Hash)]
+pub struct Pair<A, B>(pub A, pub B);
+
+impl<A, B, C, D> PartialEq<(A, B)> for Pair<C, D>
+where
+ C: PartialEq<A>,
+ D: PartialEq<B>,
+{
+ fn eq(&self, rhs: &(A, B)) -> bool {
+ self.0 == rhs.0 && self.1 == rhs.1
+ }
+}
+
+impl<A, B, X> Equivalent<X> for Pair<A, B>
+where
+ Pair<A, B>: PartialEq<X>,
+ A: Hash + Eq,
+ B: Hash + Eq,
+{
+ fn equivalent(&self, other: &X) -> bool {
+ *self == *other
+ }
+}
+
+#[test]
+fn test_lookup() {
+ let s = String::from;
+ let map = indexmap! {
+ (s("a"), s("b")) => 1,
+ (s("a"), s("x")) => 2,
+ };
+
+ assert!(map.contains_key(&Pair("a", "b")));
+ assert!(!map.contains_key(&Pair("b", "a")));
+}
+
+#[test]
+fn test_string_str() {
+ let s = String::from;
+ let mut map = indexmap! {
+ s("a") => 1, s("b") => 2,
+ s("x") => 3, s("y") => 4,
+ };
+
+ assert!(map.contains_key("a"));
+ assert!(!map.contains_key("z"));
+ assert_eq!(map.swap_remove("b"), Some(2));
+}
diff --git a/vendor/indexmap/tests/macros_full_path.rs b/vendor/indexmap/tests/macros_full_path.rs
new file mode 100644
index 0000000..2467d9b
--- /dev/null
+++ b/vendor/indexmap/tests/macros_full_path.rs
@@ -0,0 +1,19 @@
+#[test]
+fn test_create_map() {
+ let _m = indexmap::indexmap! {
+ 1 => 2,
+ 7 => 1,
+ 2 => 2,
+ 3 => 3,
+ };
+}
+
+#[test]
+fn test_create_set() {
+ let _s = indexmap::indexset! {
+ 1,
+ 7,
+ 2,
+ 3,
+ };
+}
diff --git a/vendor/indexmap/tests/quick.rs b/vendor/indexmap/tests/quick.rs
new file mode 100644
index 0000000..4142e2d
--- /dev/null
+++ b/vendor/indexmap/tests/quick.rs
@@ -0,0 +1,579 @@
+use indexmap::{IndexMap, IndexSet};
+use itertools::Itertools;
+
+use quickcheck::Arbitrary;
+use quickcheck::Gen;
+use quickcheck::QuickCheck;
+use quickcheck::TestResult;
+
+use fnv::FnvHasher;
+use std::hash::{BuildHasher, BuildHasherDefault};
+type FnvBuilder = BuildHasherDefault<FnvHasher>;
+type IndexMapFnv<K, V> = IndexMap<K, V, FnvBuilder>;
+
+use std::cmp::min;
+use std::collections::HashMap;
+use std::collections::HashSet;
+use std::fmt::Debug;
+use std::hash::Hash;
+use std::ops::Bound;
+use std::ops::Deref;
+
+use indexmap::map::Entry as OEntry;
+use std::collections::hash_map::Entry as HEntry;
+
+fn set<'a, T: 'a, I>(iter: I) -> HashSet<T>
+where
+ I: IntoIterator<Item = &'a T>,
+ T: Copy + Hash + Eq,
+{
+ iter.into_iter().copied().collect()
+}
+
+fn indexmap<'a, T: 'a, I>(iter: I) -> IndexMap<T, ()>
+where
+ I: IntoIterator<Item = &'a T>,
+ T: Copy + Hash + Eq,
+{
+ IndexMap::from_iter(iter.into_iter().copied().map(|k| (k, ())))
+}
+
+// Helper macro to allow us to use smaller quickcheck limits under miri.
+macro_rules! quickcheck_limit {
+ (@as_items $($i:item)*) => ($($i)*);
+ {
+ $(
+ $(#[$m:meta])*
+ fn $fn_name:ident($($arg_name:ident : $arg_ty:ty),*) -> $ret:ty {
+ $($code:tt)*
+ }
+ )*
+ } => (
+ quickcheck::quickcheck! {
+ @as_items
+ $(
+ #[test]
+ $(#[$m])*
+ fn $fn_name() {
+ fn prop($($arg_name: $arg_ty),*) -> $ret {
+ $($code)*
+ }
+ let mut quickcheck = QuickCheck::new();
+ if cfg!(miri) {
+ quickcheck = quickcheck
+ .gen(Gen::new(10))
+ .tests(10)
+ .max_tests(100);
+ }
+
+ quickcheck.quickcheck(prop as fn($($arg_ty),*) -> $ret);
+ }
+ )*
+ }
+ )
+}
+
+quickcheck_limit! {
+ fn contains(insert: Vec<u32>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ insert.iter().all(|&key| map.get(&key).is_some())
+ }
+
+ fn contains_not(insert: Vec<u8>, not: Vec<u8>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ let nots = &set(&not) - &set(&insert);
+ nots.iter().all(|&key| map.get(&key).is_none())
+ }
+
+ fn insert_remove(insert: Vec<u8>, remove: Vec<u8>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ for &key in &remove {
+ map.swap_remove(&key);
+ }
+ let elements = &set(&insert) - &set(&remove);
+ map.len() == elements.len() && map.iter().count() == elements.len() &&
+ elements.iter().all(|k| map.get(k).is_some())
+ }
+
+ fn insertion_order(insert: Vec<u32>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ itertools::assert_equal(insert.iter().unique(), map.keys());
+ true
+ }
+
+ fn pop(insert: Vec<u8>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ let mut pops = Vec::new();
+ while let Some((key, _v)) = map.pop() {
+ pops.push(key);
+ }
+ pops.reverse();
+
+ itertools::assert_equal(insert.iter().unique(), &pops);
+ true
+ }
+
+ fn with_cap(template: Vec<()>) -> bool {
+ let cap = template.len();
+ let map: IndexMap<u8, u8> = IndexMap::with_capacity(cap);
+ println!("wish: {}, got: {} (diff: {})", cap, map.capacity(), map.capacity() as isize - cap as isize);
+ map.capacity() >= cap
+ }
+
+ fn drain_full(insert: Vec<u8>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ let mut clone = map.clone();
+ let drained = clone.drain(..);
+ for (key, _) in drained {
+ map.swap_remove(&key);
+ }
+ map.is_empty()
+ }
+
+ fn drain_bounds(insert: Vec<u8>, range: (Bound<usize>, Bound<usize>)) -> TestResult {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+
+ // First see if `Vec::drain` is happy with this range.
+ let result = std::panic::catch_unwind(|| {
+ let mut keys: Vec<u8> = map.keys().copied().collect();
+ keys.drain(range);
+ keys
+ });
+
+ if let Ok(keys) = result {
+ map.drain(range);
+ // Check that our `drain` matches the same key order.
+ assert!(map.keys().eq(&keys));
+ // Check that hash lookups all work too.
+ assert!(keys.iter().all(|key| map.contains_key(key)));
+ TestResult::passed()
+ } else {
+ // If `Vec::drain` panicked, so should we.
+ TestResult::must_fail(move || { map.drain(range); })
+ }
+ }
+
+ fn shift_remove(insert: Vec<u8>, remove: Vec<u8>) -> bool {
+ let mut map = IndexMap::new();
+ for &key in &insert {
+ map.insert(key, ());
+ }
+ for &key in &remove {
+ map.shift_remove(&key);
+ }
+ let elements = &set(&insert) - &set(&remove);
+
+ // Check that order is preserved after removals
+ let mut iter = map.keys();
+ for &key in insert.iter().unique() {
+ if elements.contains(&key) {
+ assert_eq!(Some(&key), iter.next());
+ }
+ }
+
+ map.len() == elements.len() && map.iter().count() == elements.len() &&
+ elements.iter().all(|k| map.get(k).is_some())
+ }
+
+ fn indexing(insert: Vec<u8>) -> bool {
+ let mut map: IndexMap<_, _> = insert.into_iter().map(|x| (x, x)).collect();
+ let set: IndexSet<_> = map.keys().copied().collect();
+ assert_eq!(map.len(), set.len());
+
+ for (i, &key) in set.iter().enumerate() {
+ assert_eq!(map.get_index(i), Some((&key, &key)));
+ assert_eq!(set.get_index(i), Some(&key));
+ assert_eq!(map[i], key);
+ assert_eq!(set[i], key);
+
+ *map.get_index_mut(i).unwrap().1 >>= 1;
+ map[i] <<= 1;
+ }
+
+ set.iter().enumerate().all(|(i, &key)| {
+ let value = key & !1;
+ map[&key] == value && map[i] == value
+ })
+ }
+
+ // Use `u8` test indices so quickcheck is less likely to go out of bounds.
+ fn swap_indices(vec: Vec<u8>, a: u8, b: u8) -> TestResult {
+ let mut set = IndexSet::<u8>::from_iter(vec);
+ let a = usize::from(a);
+ let b = usize::from(b);
+
+ if a >= set.len() || b >= set.len() {
+ return TestResult::discard();
+ }
+
+ let mut vec = Vec::from_iter(set.iter().cloned());
+ vec.swap(a, b);
+
+ set.swap_indices(a, b);
+
+ // Check both iteration order and hash lookups
+ assert!(set.iter().eq(vec.iter()));
+ assert!(vec.iter().enumerate().all(|(i, x)| {
+ set.get_index_of(x) == Some(i)
+ }));
+ TestResult::passed()
+ }
+
+ // Use `u8` test indices so quickcheck is less likely to go out of bounds.
+ fn move_index(vec: Vec<u8>, from: u8, to: u8) -> TestResult {
+ let mut set = IndexSet::<u8>::from_iter(vec);
+ let from = usize::from(from);
+ let to = usize::from(to);
+
+ if from >= set.len() || to >= set.len() {
+ return TestResult::discard();
+ }
+
+ let mut vec = Vec::from_iter(set.iter().cloned());
+ let x = vec.remove(from);
+ vec.insert(to, x);
+
+ set.move_index(from, to);
+
+ // Check both iteration order and hash lookups
+ assert!(set.iter().eq(vec.iter()));
+ assert!(vec.iter().enumerate().all(|(i, x)| {
+ set.get_index_of(x) == Some(i)
+ }));
+ TestResult::passed()
+ }
+}
+
+use crate::Op::*;
+#[derive(Copy, Clone, Debug)]
+enum Op<K, V> {
+ Add(K, V),
+ Remove(K),
+ AddEntry(K, V),
+ RemoveEntry(K),
+}
+
+impl<K, V> Arbitrary for Op<K, V>
+where
+ K: Arbitrary,
+ V: Arbitrary,
+{
+ fn arbitrary(g: &mut Gen) -> Self {
+ match u32::arbitrary(g) % 4 {
+ 0 => Add(K::arbitrary(g), V::arbitrary(g)),
+ 1 => AddEntry(K::arbitrary(g), V::arbitrary(g)),
+ 2 => Remove(K::arbitrary(g)),
+ _ => RemoveEntry(K::arbitrary(g)),
+ }
+ }
+}
+
+fn do_ops<K, V, S>(ops: &[Op<K, V>], a: &mut IndexMap<K, V, S>, b: &mut HashMap<K, V>)
+where
+ K: Hash + Eq + Clone,
+ V: Clone,
+ S: BuildHasher,
+{
+ for op in ops {
+ match *op {
+ Add(ref k, ref v) => {
+ a.insert(k.clone(), v.clone());
+ b.insert(k.clone(), v.clone());
+ }
+ AddEntry(ref k, ref v) => {
+ a.entry(k.clone()).or_insert_with(|| v.clone());
+ b.entry(k.clone()).or_insert_with(|| v.clone());
+ }
+ Remove(ref k) => {
+ a.swap_remove(k);
+ b.remove(k);
+ }
+ RemoveEntry(ref k) => {
+ if let OEntry::Occupied(ent) = a.entry(k.clone()) {
+ ent.swap_remove_entry();
+ }
+ if let HEntry::Occupied(ent) = b.entry(k.clone()) {
+ ent.remove_entry();
+ }
+ }
+ }
+ //println!("{:?}", a);
+ }
+}
+
+fn assert_maps_equivalent<K, V>(a: &IndexMap<K, V>, b: &HashMap<K, V>) -> bool
+where
+ K: Hash + Eq + Debug,
+ V: Eq + Debug,
+{
+ assert_eq!(a.len(), b.len());
+ assert_eq!(a.iter().next().is_some(), b.iter().next().is_some());
+ for key in a.keys() {
+ assert!(b.contains_key(key), "b does not contain {:?}", key);
+ }
+ for key in b.keys() {
+ assert!(a.get(key).is_some(), "a does not contain {:?}", key);
+ }
+ for key in a.keys() {
+ assert_eq!(a[key], b[key]);
+ }
+ true
+}
+
+quickcheck_limit! {
+ fn operations_i8(ops: Large<Vec<Op<i8, i8>>>) -> bool {
+ let mut map = IndexMap::new();
+ let mut reference = HashMap::new();
+ do_ops(&ops, &mut map, &mut reference);
+ assert_maps_equivalent(&map, &reference)
+ }
+
+ fn operations_string(ops: Vec<Op<Alpha, i8>>) -> bool {
+ let mut map = IndexMap::new();
+ let mut reference = HashMap::new();
+ do_ops(&ops, &mut map, &mut reference);
+ assert_maps_equivalent(&map, &reference)
+ }
+
+ fn keys_values(ops: Large<Vec<Op<i8, i8>>>) -> bool {
+ let mut map = IndexMap::new();
+ let mut reference = HashMap::new();
+ do_ops(&ops, &mut map, &mut reference);
+ let mut visit = IndexMap::new();
+ for (k, v) in map.keys().zip(map.values()) {
+ assert_eq!(&map[k], v);
+ assert!(!visit.contains_key(k));
+ visit.insert(*k, *v);
+ }
+ assert_eq!(visit.len(), reference.len());
+ true
+ }
+
+ fn keys_values_mut(ops: Large<Vec<Op<i8, i8>>>) -> bool {
+ let mut map = IndexMap::new();
+ let mut reference = HashMap::new();
+ do_ops(&ops, &mut map, &mut reference);
+ let mut visit = IndexMap::new();
+ let keys = Vec::from_iter(map.keys().copied());
+ for (k, v) in keys.iter().zip(map.values_mut()) {
+ assert_eq!(&reference[k], v);
+ assert!(!visit.contains_key(k));
+ visit.insert(*k, *v);
+ }
+ assert_eq!(visit.len(), reference.len());
+ true
+ }
+
+ fn equality(ops1: Vec<Op<i8, i8>>, removes: Vec<usize>) -> bool {
+ let mut map = IndexMap::new();
+ let mut reference = HashMap::new();
+ do_ops(&ops1, &mut map, &mut reference);
+ let mut ops2 = ops1.clone();
+ for &r in &removes {
+ if !ops2.is_empty() {
+ let i = r % ops2.len();
+ ops2.remove(i);
+ }
+ }
+ let mut map2 = IndexMapFnv::default();
+ let mut reference2 = HashMap::new();
+ do_ops(&ops2, &mut map2, &mut reference2);
+ assert_eq!(map == map2, reference == reference2);
+ true
+ }
+
+ fn retain_ordered(keys: Large<Vec<i8>>, remove: Large<Vec<i8>>) -> () {
+ let mut map = indexmap(keys.iter());
+ let initial_map = map.clone(); // deduplicated in-order input
+ let remove_map = indexmap(remove.iter());
+ let keys_s = set(keys.iter());
+ let remove_s = set(remove.iter());
+ let answer = &keys_s - &remove_s;
+ map.retain(|k, _| !remove_map.contains_key(k));
+
+ // check the values
+ assert_eq!(map.len(), answer.len());
+ for key in &answer {
+ assert!(map.contains_key(key));
+ }
+ // check the order
+ itertools::assert_equal(map.keys(), initial_map.keys().filter(|&k| !remove_map.contains_key(k)));
+ }
+
+ fn sort_1(keyvals: Large<Vec<(i8, i8)>>) -> () {
+ let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec());
+ let mut answer = keyvals.0;
+ answer.sort_by_key(|t| t.0);
+
+ // reverse dedup: Because IndexMap::from_iter keeps the last value for
+ // identical keys
+ answer.reverse();
+ answer.dedup_by_key(|t| t.0);
+ answer.reverse();
+
+ map.sort_by(|k1, _, k2, _| Ord::cmp(k1, k2));
+
+ // check it contains all the values it should
+ for &(key, val) in &answer {
+ assert_eq!(map[&key], val);
+ }
+
+ // check the order
+
+ let mapv = Vec::from_iter(map);
+ assert_eq!(answer, mapv);
+
+ }
+
+ fn sort_2(keyvals: Large<Vec<(i8, i8)>>) -> () {
+ let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec());
+ map.sort_by(|_, v1, _, v2| Ord::cmp(v1, v2));
+ assert_sorted_by_key(map, |t| t.1);
+ }
+
+ fn sort_3(keyvals: Large<Vec<(i8, i8)>>) -> () {
+ let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec());
+ map.sort_by_cached_key(|&k, _| std::cmp::Reverse(k));
+ assert_sorted_by_key(map, |t| std::cmp::Reverse(t.0));
+ }
+
+ fn reverse(keyvals: Large<Vec<(i8, i8)>>) -> () {
+ let mut map: IndexMap<_, _> = IndexMap::from_iter(keyvals.to_vec());
+
+ fn generate_answer(input: &Vec<(i8, i8)>) -> Vec<(i8, i8)> {
+ // to mimic what `IndexMap::from_iter` does:
+ // need to get (A) the unique keys in forward order, and (B) the
+ // last value of each of those keys.
+
+ // create (A): an iterable that yields the unique keys in ltr order
+ let mut seen_keys = HashSet::new();
+ let unique_keys_forward = input.iter().filter_map(move |(k, _)| {
+ if seen_keys.contains(k) { None }
+ else { seen_keys.insert(*k); Some(*k) }
+ });
+
+ // create (B): a mapping of keys to the last value seen for that key
+ // this is the same as reversing the input and taking the first
+ // value seen for that key!
+ let mut last_val_per_key = HashMap::new();
+ for &(k, v) in input.iter().rev() {
+ if !last_val_per_key.contains_key(&k) {
+ last_val_per_key.insert(k, v);
+ }
+ }
+
+ // iterate over the keys in (A) in order, and match each one with
+ // the corresponding last value from (B)
+ let mut ans: Vec<_> = unique_keys_forward
+ .map(|k| (k, *last_val_per_key.get(&k).unwrap()))
+ .collect();
+
+ // finally, since this test is testing `.reverse()`, reverse the
+ // answer in-place
+ ans.reverse();
+
+ ans
+ }
+
+ let answer = generate_answer(&keyvals.0);
+
+ // perform the work
+ map.reverse();
+
+ // check it contains all the values it should
+ for &(key, val) in &answer {
+ assert_eq!(map[&key], val);
+ }
+
+ // check the order
+ let mapv = Vec::from_iter(map);
+ assert_eq!(answer, mapv);
+ }
+}
+
+fn assert_sorted_by_key<I, Key, X>(iterable: I, key: Key)
+where
+ I: IntoIterator,
+ I::Item: Ord + Clone + Debug,
+ Key: Fn(&I::Item) -> X,
+ X: Ord,
+{
+ let input = Vec::from_iter(iterable);
+ let mut sorted = input.clone();
+ sorted.sort_by_key(key);
+ assert_eq!(input, sorted);
+}
+
+#[derive(Clone, Debug, Hash, PartialEq, Eq)]
+struct Alpha(String);
+
+impl Deref for Alpha {
+ type Target = String;
+ fn deref(&self) -> &String {
+ &self.0
+ }
+}
+
+const ALPHABET: &[u8] = b"abcdefghijklmnopqrstuvwxyz";
+
+impl Arbitrary for Alpha {
+ fn arbitrary(g: &mut Gen) -> Self {
+ let len = usize::arbitrary(g) % g.size();
+ let len = min(len, 16);
+ Alpha(
+ (0..len)
+ .map(|_| ALPHABET[usize::arbitrary(g) % ALPHABET.len()] as char)
+ .collect(),
+ )
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ Box::new((**self).shrink().map(Alpha))
+ }
+}
+
+/// quickcheck Arbitrary adaptor -- make a larger vec
+#[derive(Clone, Debug)]
+struct Large<T>(T);
+
+impl<T> Deref for Large<T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ &self.0
+ }
+}
+
+impl<T> Arbitrary for Large<Vec<T>>
+where
+ T: Arbitrary,
+{
+ fn arbitrary(g: &mut Gen) -> Self {
+ let len = usize::arbitrary(g) % (g.size() * 10);
+ Large((0..len).map(|_| T::arbitrary(g)).collect())
+ }
+
+ fn shrink(&self) -> Box<dyn Iterator<Item = Self>> {
+ Box::new((**self).shrink().map(Large))
+ }
+}
diff --git a/vendor/indexmap/tests/tests.rs b/vendor/indexmap/tests/tests.rs
new file mode 100644
index 0000000..7d522f1
--- /dev/null
+++ b/vendor/indexmap/tests/tests.rs
@@ -0,0 +1,28 @@
+use indexmap::{indexmap, indexset};
+
+#[test]
+fn test_sort() {
+ let m = indexmap! {
+ 1 => 2,
+ 7 => 1,
+ 2 => 2,
+ 3 => 3,
+ };
+
+ itertools::assert_equal(
+ m.sorted_by(|_k1, v1, _k2, v2| v1.cmp(v2)),
+ vec![(7, 1), (1, 2), (2, 2), (3, 3)],
+ );
+}
+
+#[test]
+fn test_sort_set() {
+ let s = indexset! {
+ 1,
+ 7,
+ 2,
+ 3,
+ };
+
+ itertools::assert_equal(s.sorted_by(|v1, v2| v1.cmp(v2)), vec![1, 2, 3, 7]);
+}