summaryrefslogtreecommitdiffstats
path: root/vendor/indexmap/src/map
diff options
context:
space:
mode:
Diffstat (limited to 'vendor/indexmap/src/map')
-rw-r--r--vendor/indexmap/src/map/core.rs762
-rw-r--r--vendor/indexmap/src/map/core/raw.rs194
-rw-r--r--vendor/indexmap/src/map/iter.rs541
-rw-r--r--vendor/indexmap/src/map/serde_seq.rs138
-rw-r--r--vendor/indexmap/src/map/slice.rs471
-rw-r--r--vendor/indexmap/src/map/tests.rs449
6 files changed, 2555 insertions, 0 deletions
diff --git a/vendor/indexmap/src/map/core.rs b/vendor/indexmap/src/map/core.rs
new file mode 100644
index 0000000..4a78035
--- /dev/null
+++ b/vendor/indexmap/src/map/core.rs
@@ -0,0 +1,762 @@
+//! This is the core implementation that doesn't depend on the hasher at all.
+//!
+//! The methods of `IndexMapCore` don't use any Hash properties of K.
+//!
+//! It's cleaner to separate them out, then the compiler checks that we are not
+//! using Hash at all in these methods.
+//!
+//! However, we should probably not let this show in the public API or docs.
+
+mod raw;
+
+use hashbrown::raw::RawTable;
+
+use crate::vec::{Drain, Vec};
+use crate::TryReserveError;
+use core::fmt;
+use core::mem;
+use core::ops::RangeBounds;
+
+use crate::util::simplify_range;
+use crate::{Bucket, Entries, Equivalent, HashValue};
+
+/// Core of the map that does not depend on S
+pub(crate) struct IndexMapCore<K, V> {
+ /// indices mapping from the entry hash to its index.
+ indices: RawTable<usize>,
+ /// entries is a dense vec of entries in their order.
+ entries: Vec<Bucket<K, V>>,
+}
+
+#[inline(always)]
+fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
+ move |&i| entries[i].hash.get()
+}
+
+#[inline]
+fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
+ key: &'a Q,
+ entries: &'a [Bucket<K, V>],
+) -> impl Fn(&usize) -> bool + 'a {
+ move |&i| Q::equivalent(key, &entries[i].key)
+}
+
+#[inline]
+fn erase_index(table: &mut RawTable<usize>, hash: HashValue, index: usize) {
+ let erased = table.erase_entry(hash.get(), move |&i| i == index);
+ debug_assert!(erased);
+}
+
+#[inline]
+fn update_index(table: &mut RawTable<usize>, hash: HashValue, old: usize, new: usize) {
+ let index = table
+ .get_mut(hash.get(), move |&i| i == old)
+ .expect("index not found");
+ *index = new;
+}
+
+impl<K, V> Clone for IndexMapCore<K, V>
+where
+ K: Clone,
+ V: Clone,
+{
+ fn clone(&self) -> Self {
+ let mut new = Self::new();
+ new.clone_from(self);
+ new
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ let hasher = get_hash(&other.entries);
+ self.indices.clone_from_with_hasher(&other.indices, hasher);
+ if self.entries.capacity() < other.entries.len() {
+ // If we must resize, match the indices capacity.
+ let additional = other.entries.len() - self.entries.len();
+ self.reserve_entries(additional);
+ }
+ self.entries.clone_from(&other.entries);
+ }
+}
+
+impl<K, V> fmt::Debug for IndexMapCore<K, V>
+where
+ K: fmt::Debug,
+ V: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("IndexMapCore")
+ .field("indices", &raw::DebugIndices(&self.indices))
+ .field("entries", &self.entries)
+ .finish()
+ }
+}
+
+impl<K, V> Entries for IndexMapCore<K, V> {
+ type Entry = Bucket<K, V>;
+
+ #[inline]
+ fn into_entries(self) -> Vec<Self::Entry> {
+ self.entries
+ }
+
+ #[inline]
+ fn as_entries(&self) -> &[Self::Entry] {
+ &self.entries
+ }
+
+ #[inline]
+ fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
+ &mut self.entries
+ }
+
+ fn with_entries<F>(&mut self, f: F)
+ where
+ F: FnOnce(&mut [Self::Entry]),
+ {
+ f(&mut self.entries);
+ self.rebuild_hash_table();
+ }
+}
+
+impl<K, V> IndexMapCore<K, V> {
+ /// The maximum capacity before the `entries` allocation would exceed `isize::MAX`.
+ const MAX_ENTRIES_CAPACITY: usize = (isize::MAX as usize) / mem::size_of::<Bucket<K, V>>();
+
+ #[inline]
+ pub(crate) const fn new() -> Self {
+ IndexMapCore {
+ indices: RawTable::new(),
+ entries: Vec::new(),
+ }
+ }
+
+ #[inline]
+ pub(crate) fn with_capacity(n: usize) -> Self {
+ IndexMapCore {
+ indices: RawTable::with_capacity(n),
+ entries: Vec::with_capacity(n),
+ }
+ }
+
+ #[inline]
+ pub(crate) fn len(&self) -> usize {
+ self.indices.len()
+ }
+
+ #[inline]
+ pub(crate) fn capacity(&self) -> usize {
+ Ord::min(self.indices.capacity(), self.entries.capacity())
+ }
+
+ pub(crate) fn clear(&mut self) {
+ self.indices.clear();
+ self.entries.clear();
+ }
+
+ pub(crate) fn truncate(&mut self, len: usize) {
+ if len < self.len() {
+ self.erase_indices(len, self.entries.len());
+ self.entries.truncate(len);
+ }
+ }
+
+ pub(crate) fn drain<R>(&mut self, range: R) -> Drain<'_, Bucket<K, V>>
+ where
+ R: RangeBounds<usize>,
+ {
+ let range = simplify_range(range, self.entries.len());
+ self.erase_indices(range.start, range.end);
+ self.entries.drain(range)
+ }
+
+ #[cfg(feature = "rayon")]
+ pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
+ where
+ K: Send,
+ V: Send,
+ R: RangeBounds<usize>,
+ {
+ use rayon::iter::ParallelDrainRange;
+ let range = simplify_range(range, self.entries.len());
+ self.erase_indices(range.start, range.end);
+ self.entries.par_drain(range)
+ }
+
+ pub(crate) fn split_off(&mut self, at: usize) -> Self {
+ assert!(at <= self.entries.len());
+ self.erase_indices(at, self.entries.len());
+ let entries = self.entries.split_off(at);
+
+ let mut indices = RawTable::with_capacity(entries.len());
+ raw::insert_bulk_no_grow(&mut indices, &entries);
+ Self { indices, entries }
+ }
+
+ /// Reserve capacity for `additional` more key-value pairs.
+ pub(crate) fn reserve(&mut self, additional: usize) {
+ self.indices.reserve(additional, get_hash(&self.entries));
+ // Only grow entries if necessary, since we also round up capacity.
+ if additional > self.entries.capacity() - self.entries.len() {
+ self.reserve_entries(additional);
+ }
+ }
+
+ /// Reserve entries capacity, rounded up to match the indices
+ fn reserve_entries(&mut self, additional: usize) {
+ // Use a soft-limit on the maximum capacity, but if the caller explicitly
+ // requested more, do it and let them have the resulting panic.
+ let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
+ let try_add = new_capacity - self.entries.len();
+ if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
+ return;
+ }
+ self.entries.reserve_exact(additional);
+ }
+
+ /// Reserve capacity for `additional` more key-value pairs, without over-allocating.
+ pub(crate) fn reserve_exact(&mut self, additional: usize) {
+ self.indices.reserve(additional, get_hash(&self.entries));
+ self.entries.reserve_exact(additional);
+ }
+
+ /// Try to reserve capacity for `additional` more key-value pairs.
+ pub(crate) fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.indices
+ .try_reserve(additional, get_hash(&self.entries))
+ .map_err(TryReserveError::from_hashbrown)?;
+ // Only grow entries if necessary, since we also round up capacity.
+ if additional > self.entries.capacity() - self.entries.len() {
+ self.try_reserve_entries(additional)
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Try to reserve entries capacity, rounded up to match the indices
+ fn try_reserve_entries(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ // Use a soft-limit on the maximum capacity, but if the caller explicitly
+ // requested more, do it and let them have the resulting error.
+ let new_capacity = Ord::min(self.indices.capacity(), Self::MAX_ENTRIES_CAPACITY);
+ let try_add = new_capacity - self.entries.len();
+ if try_add > additional && self.entries.try_reserve_exact(try_add).is_ok() {
+ return Ok(());
+ }
+ self.entries
+ .try_reserve_exact(additional)
+ .map_err(TryReserveError::from_alloc)
+ }
+
+ /// Try to reserve capacity for `additional` more key-value pairs, without over-allocating.
+ pub(crate) fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.indices
+ .try_reserve(additional, get_hash(&self.entries))
+ .map_err(TryReserveError::from_hashbrown)?;
+ self.entries
+ .try_reserve_exact(additional)
+ .map_err(TryReserveError::from_alloc)
+ }
+
+ /// Shrink the capacity of the map with a lower bound
+ pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
+ self.indices
+ .shrink_to(min_capacity, get_hash(&self.entries));
+ self.entries.shrink_to(min_capacity);
+ }
+
+ /// Remove the last key-value pair
+ pub(crate) fn pop(&mut self) -> Option<(K, V)> {
+ if let Some(entry) = self.entries.pop() {
+ let last = self.entries.len();
+ erase_index(&mut self.indices, entry.hash, last);
+ Some((entry.key, entry.value))
+ } else {
+ None
+ }
+ }
+
+ /// Append a key-value pair to `entries`, *without* checking whether it already exists.
+ fn push_entry(&mut self, hash: HashValue, key: K, value: V) {
+ if self.entries.len() == self.entries.capacity() {
+ // Reserve our own capacity synced to the indices,
+ // rather than letting `Vec::push` just double it.
+ self.reserve_entries(1);
+ }
+ self.entries.push(Bucket { hash, key, value });
+ }
+
+ /// Return the index in `entries` where an equivalent key can be found
+ pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
+ where
+ Q: ?Sized + Equivalent<K>,
+ {
+ let eq = equivalent(key, &self.entries);
+ self.indices.get(hash.get(), eq).copied()
+ }
+
+ pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
+ where
+ K: Eq,
+ {
+ match self.find_or_insert(hash, &key) {
+ Ok(i) => (i, Some(mem::replace(&mut self.entries[i].value, value))),
+ Err(i) => {
+ debug_assert_eq!(i, self.entries.len());
+ self.push_entry(hash, key, value);
+ (i, None)
+ }
+ }
+ }
+
+ /// Remove an entry by shifting all entries that follow it
+ pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
+ where
+ Q: ?Sized + Equivalent<K>,
+ {
+ let eq = equivalent(key, &self.entries);
+ match self.indices.remove_entry(hash.get(), eq) {
+ Some(index) => {
+ let (key, value) = self.shift_remove_finish(index);
+ Some((index, key, value))
+ }
+ None => None,
+ }
+ }
+
+ /// Remove an entry by shifting all entries that follow it
+ pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
+ match self.entries.get(index) {
+ Some(entry) => {
+ erase_index(&mut self.indices, entry.hash, index);
+ Some(self.shift_remove_finish(index))
+ }
+ None => None,
+ }
+ }
+
+ /// Remove an entry by shifting all entries that follow it
+ ///
+ /// The index should already be removed from `self.indices`.
+ fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
+ // Correct indices that point to the entries that followed the removed entry.
+ self.decrement_indices(index + 1, self.entries.len());
+
+ // Use Vec::remove to actually remove the entry.
+ let entry = self.entries.remove(index);
+ (entry.key, entry.value)
+ }
+
+ /// Decrement all indices in the range `start..end`.
+ ///
+ /// The index `start - 1` should not exist in `self.indices`.
+ /// All entries should still be in their original positions.
+ fn decrement_indices(&mut self, start: usize, end: usize) {
+ // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
+ let shifted_entries = &self.entries[start..end];
+ if shifted_entries.len() > self.indices.buckets() / 2 {
+ // Shift all indices in range.
+ for i in self.indices_mut() {
+ if start <= *i && *i < end {
+ *i -= 1;
+ }
+ }
+ } else {
+ // Find each entry in range to shift its index.
+ for (i, entry) in (start..end).zip(shifted_entries) {
+ update_index(&mut self.indices, entry.hash, i, i - 1);
+ }
+ }
+ }
+
+ /// Increment all indices in the range `start..end`.
+ ///
+ /// The index `end` should not exist in `self.indices`.
+ /// All entries should still be in their original positions.
+ fn increment_indices(&mut self, start: usize, end: usize) {
+ // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
+ let shifted_entries = &self.entries[start..end];
+ if shifted_entries.len() > self.indices.buckets() / 2 {
+ // Shift all indices in range.
+ for i in self.indices_mut() {
+ if start <= *i && *i < end {
+ *i += 1;
+ }
+ }
+ } else {
+ // Find each entry in range to shift its index, updated in reverse so
+ // we never have duplicated indices that might have a hash collision.
+ for (i, entry) in (start..end).zip(shifted_entries).rev() {
+ update_index(&mut self.indices, entry.hash, i, i + 1);
+ }
+ }
+ }
+
+ pub(super) fn move_index(&mut self, from: usize, to: usize) {
+ let from_hash = self.entries[from].hash;
+ if from != to {
+ // Use a sentinel index so other indices don't collide.
+ update_index(&mut self.indices, from_hash, from, usize::MAX);
+
+ // Update all other indices and rotate the entry positions.
+ if from < to {
+ self.decrement_indices(from + 1, to + 1);
+ self.entries[from..=to].rotate_left(1);
+ } else if to < from {
+ self.increment_indices(to, from);
+ self.entries[to..=from].rotate_right(1);
+ }
+
+ // Change the sentinel index to its final position.
+ update_index(&mut self.indices, from_hash, usize::MAX, to);
+ }
+ }
+
+ pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
+ // If they're equal and in-bounds, there's nothing to do.
+ if a == b && a < self.entries.len() {
+ return;
+ }
+
+ // We'll get a "nice" bounds-check from indexing `self.entries`,
+ // and then we expect to find it in the table as well.
+ let [ref_a, ref_b] = self
+ .indices
+ .get_many_mut(
+ [self.entries[a].hash.get(), self.entries[b].hash.get()],
+ move |i, &x| if i == 0 { x == a } else { x == b },
+ )
+ .expect("indices not found");
+
+ mem::swap(ref_a, ref_b);
+ self.entries.swap(a, b);
+ }
+
+ /// Remove an entry by swapping it with the last
+ pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
+ where
+ Q: ?Sized + Equivalent<K>,
+ {
+ let eq = equivalent(key, &self.entries);
+ match self.indices.remove_entry(hash.get(), eq) {
+ Some(index) => {
+ let (key, value) = self.swap_remove_finish(index);
+ Some((index, key, value))
+ }
+ None => None,
+ }
+ }
+
+ /// Remove an entry by swapping it with the last
+ pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
+ match self.entries.get(index) {
+ Some(entry) => {
+ erase_index(&mut self.indices, entry.hash, index);
+ Some(self.swap_remove_finish(index))
+ }
+ None => None,
+ }
+ }
+
+ /// Finish removing an entry by swapping it with the last
+ ///
+ /// The index should already be removed from `self.indices`.
+ fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
+ // use swap_remove, but then we need to update the index that points
+ // to the other entry that has to move
+ let entry = self.entries.swap_remove(index);
+
+ // correct index that points to the entry that had to swap places
+ if let Some(entry) = self.entries.get(index) {
+ // was not last element
+ // examine new element in `index` and find it in indices
+ let last = self.entries.len();
+ update_index(&mut self.indices, entry.hash, last, index);
+ }
+
+ (entry.key, entry.value)
+ }
+
+ /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
+ ///
+ /// All of these items should still be at their original location in `entries`.
+ /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
+ fn erase_indices(&mut self, start: usize, end: usize) {
+ let (init, shifted_entries) = self.entries.split_at(end);
+ let (start_entries, erased_entries) = init.split_at(start);
+
+ let erased = erased_entries.len();
+ let shifted = shifted_entries.len();
+ let half_capacity = self.indices.buckets() / 2;
+
+ // Use a heuristic between different strategies
+ if erased == 0 {
+ // Degenerate case, nothing to do
+ } else if start + shifted < half_capacity && start < erased {
+ // Reinsert everything, as there are few kept indices
+ self.indices.clear();
+
+ // Reinsert stable indices, then shifted indices
+ raw::insert_bulk_no_grow(&mut self.indices, start_entries);
+ raw::insert_bulk_no_grow(&mut self.indices, shifted_entries);
+ } else if erased + shifted < half_capacity {
+ // Find each affected index, as there are few to adjust
+
+ // Find erased indices
+ for (i, entry) in (start..).zip(erased_entries) {
+ erase_index(&mut self.indices, entry.hash, i);
+ }
+
+ // Find shifted indices
+ for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
+ update_index(&mut self.indices, entry.hash, old, new);
+ }
+ } else {
+ // Sweep the whole table for adjustments
+ self.erase_indices_sweep(start, end);
+ }
+
+ debug_assert_eq!(self.indices.len(), start + shifted);
+ }
+
+ pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
+ where
+ F: FnMut(&mut K, &mut V) -> bool,
+ {
+ self.entries
+ .retain_mut(|entry| keep(&mut entry.key, &mut entry.value));
+ if self.entries.len() < self.indices.len() {
+ self.rebuild_hash_table();
+ }
+ }
+
+ fn rebuild_hash_table(&mut self) {
+ self.indices.clear();
+ raw::insert_bulk_no_grow(&mut self.indices, &self.entries);
+ }
+
+ pub(crate) fn reverse(&mut self) {
+ self.entries.reverse();
+
+ // No need to save hash indices, can easily calculate what they should
+ // be, given that this is an in-place reversal.
+ let len = self.entries.len();
+ for i in self.indices_mut() {
+ *i = len - *i - 1;
+ }
+ }
+}
+
+/// Entry for an existing key-value pair or a vacant location to
+/// insert one.
+pub enum Entry<'a, K, V> {
+ /// Existing slot with equivalent key.
+ Occupied(OccupiedEntry<'a, K, V>),
+ /// Vacant slot (no equivalent key in the map).
+ Vacant(VacantEntry<'a, K, V>),
+}
+
+impl<'a, K, V> Entry<'a, K, V> {
+ /// Inserts the given default value in the entry if it is vacant and returns a mutable
+ /// reference to it. Otherwise a mutable reference to an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_insert(self, default: V) -> &'a mut V {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => entry.insert(default),
+ }
+ }
+
+ /// Inserts the result of the `call` function in the entry if it is vacant and returns a mutable
+ /// reference to it. Otherwise a mutable reference to an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_insert_with<F>(self, call: F) -> &'a mut V
+ where
+ F: FnOnce() -> V,
+ {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => entry.insert(call()),
+ }
+ }
+
+ /// Inserts the result of the `call` function with a reference to the entry's key if it is
+ /// vacant, and returns a mutable reference to the new value. Otherwise a mutable reference to
+ /// an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_insert_with_key<F>(self, call: F) -> &'a mut V
+ where
+ F: FnOnce(&K) -> V,
+ {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => {
+ let value = call(&entry.key);
+ entry.insert(value)
+ }
+ }
+ }
+
+ /// Gets a reference to the entry's key, either within the map if occupied,
+ /// or else the new key that was used to find the entry.
+ pub fn key(&self) -> &K {
+ match *self {
+ Entry::Occupied(ref entry) => entry.key(),
+ Entry::Vacant(ref entry) => entry.key(),
+ }
+ }
+
+ /// Return the index where the key-value pair exists or will be inserted.
+ pub fn index(&self) -> usize {
+ match *self {
+ Entry::Occupied(ref entry) => entry.index(),
+ Entry::Vacant(ref entry) => entry.index(),
+ }
+ }
+
+ /// Modifies the entry if it is occupied.
+ pub fn and_modify<F>(self, f: F) -> Self
+ where
+ F: FnOnce(&mut V),
+ {
+ match self {
+ Entry::Occupied(mut o) => {
+ f(o.get_mut());
+ Entry::Occupied(o)
+ }
+ x => x,
+ }
+ }
+
+ /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable
+ /// reference to it. Otherwise a mutable reference to an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_default(self) -> &'a mut V
+ where
+ V: Default,
+ {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => entry.insert(V::default()),
+ }
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Entry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Entry::Vacant(ref v) => f.debug_tuple(stringify!(Entry)).field(v).finish(),
+ Entry::Occupied(ref o) => f.debug_tuple(stringify!(Entry)).field(o).finish(),
+ }
+ }
+}
+
+pub use self::raw::OccupiedEntry;
+
+// Extra methods that don't threaten the unsafe encapsulation.
+impl<K, V> OccupiedEntry<'_, K, V> {
+ /// Sets the value of the entry to `value`, and returns the entry's old value.
+ pub fn insert(&mut self, value: V) -> V {
+ mem::replace(self.get_mut(), value)
+ }
+
+ /// Remove the key, value pair stored in the map for this entry, and return the value.
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove()`.
+ pub fn remove(self) -> V {
+ self.swap_remove()
+ }
+
+ /// Remove the key, value pair stored in the map for this entry, and return the value.
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove(self) -> V {
+ self.swap_remove_entry().1
+ }
+
+ /// Remove the key, value pair stored in the map for this entry, and return the value.
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove(self) -> V {
+ self.shift_remove_entry().1
+ }
+
+ /// Remove and return the key, value pair stored in the map for this entry
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove_entry()`.
+ pub fn remove_entry(self) -> (K, V) {
+ self.swap_remove_entry()
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for OccupiedEntry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct(stringify!(OccupiedEntry))
+ .field("key", self.key())
+ .field("value", self.get())
+ .finish()
+ }
+}
+
+/// A view into a vacant entry in a `IndexMap`.
+/// It is part of the [`Entry`] enum.
+///
+/// [`Entry`]: enum.Entry.html
+pub struct VacantEntry<'a, K, V> {
+ map: &'a mut IndexMapCore<K, V>,
+ hash: HashValue,
+ key: K,
+}
+
+impl<'a, K, V> VacantEntry<'a, K, V> {
+ /// Gets a reference to the key that was used to find the entry.
+ pub fn key(&self) -> &K {
+ &self.key
+ }
+
+ /// Takes ownership of the key, leaving the entry vacant.
+ pub fn into_key(self) -> K {
+ self.key
+ }
+
+ /// Return the index where the key-value pair will be inserted.
+ pub fn index(&self) -> usize {
+ self.map.indices.len()
+ }
+
+ /// Inserts the entry's key and the given value into the map, and returns a mutable reference
+ /// to the value.
+ pub fn insert(self, value: V) -> &'a mut V {
+ let i = self.index();
+ let Self { map, hash, key } = self;
+ map.indices.insert(hash.get(), i, get_hash(&map.entries));
+ debug_assert_eq!(i, map.entries.len());
+ map.push_entry(hash, key, value);
+ &mut map.entries[i].value
+ }
+}
+
+impl<K: fmt::Debug, V> fmt::Debug for VacantEntry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple(stringify!(VacantEntry))
+ .field(self.key())
+ .finish()
+ }
+}
+
+#[test]
+fn assert_send_sync() {
+ fn assert_send_sync<T: Send + Sync>() {}
+ assert_send_sync::<IndexMapCore<i32, i32>>();
+ assert_send_sync::<Entry<'_, i32, i32>>();
+}
diff --git a/vendor/indexmap/src/map/core/raw.rs b/vendor/indexmap/src/map/core/raw.rs
new file mode 100644
index 0000000..be71c9c
--- /dev/null
+++ b/vendor/indexmap/src/map/core/raw.rs
@@ -0,0 +1,194 @@
+#![allow(unsafe_code)]
+//! This module encapsulates the `unsafe` access to `hashbrown::raw::RawTable`,
+//! mostly in dealing with its bucket "pointers".
+
+use super::{equivalent, get_hash, Bucket, Entry, HashValue, IndexMapCore, VacantEntry};
+use core::fmt;
+use core::mem::replace;
+use hashbrown::raw::RawTable;
+
+type RawBucket = hashbrown::raw::Bucket<usize>;
+
+/// Inserts many entries into a raw table without reallocating.
+///
+/// ***Panics*** if there is not sufficient capacity already.
+pub(super) fn insert_bulk_no_grow<K, V>(indices: &mut RawTable<usize>, entries: &[Bucket<K, V>]) {
+ assert!(indices.capacity() - indices.len() >= entries.len());
+ for entry in entries {
+ // SAFETY: we asserted that sufficient capacity exists for all entries.
+ unsafe {
+ indices.insert_no_grow(entry.hash.get(), indices.len());
+ }
+ }
+}
+
+pub(super) struct DebugIndices<'a>(pub &'a RawTable<usize>);
+impl fmt::Debug for DebugIndices<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // SAFETY: we're not letting any of the buckets escape this function
+ let indices = unsafe { self.0.iter().map(|raw_bucket| *raw_bucket.as_ref()) };
+ f.debug_list().entries(indices).finish()
+ }
+}
+
+impl<K, V> IndexMapCore<K, V> {
+ /// Sweep the whole table to erase indices start..end
+ pub(super) fn erase_indices_sweep(&mut self, start: usize, end: usize) {
+ // SAFETY: we're not letting any of the buckets escape this function
+ unsafe {
+ let offset = end - start;
+ for bucket in self.indices.iter() {
+ let i = bucket.as_mut();
+ if *i >= end {
+ *i -= offset;
+ } else if *i >= start {
+ self.indices.erase(bucket);
+ }
+ }
+ }
+ }
+
+ /// Search for a key in the table and return `Ok(entry_index)` if found.
+ /// Otherwise, insert the key and return `Err(new_index)`.
+ ///
+ /// Note that hashbrown may resize the table to reserve space for insertion,
+ /// even before checking if it's already present, so this is somewhat biased
+ /// towards new items.
+ pub(crate) fn find_or_insert(&mut self, hash: HashValue, key: &K) -> Result<usize, usize>
+ where
+ K: Eq,
+ {
+ let hash = hash.get();
+ let eq = equivalent(key, &self.entries);
+ let hasher = get_hash(&self.entries);
+ // SAFETY: We're not mutating between find and read/insert.
+ unsafe {
+ match self.indices.find_or_find_insert_slot(hash, eq, hasher) {
+ Ok(raw_bucket) => Ok(*raw_bucket.as_ref()),
+ Err(slot) => {
+ let index = self.indices.len();
+ self.indices.insert_in_slot(hash, slot, index);
+ Err(index)
+ }
+ }
+ }
+ }
+
+ pub(crate) fn entry(&mut self, hash: HashValue, key: K) -> Entry<'_, K, V>
+ where
+ K: Eq,
+ {
+ let eq = equivalent(&key, &self.entries);
+ match self.indices.find(hash.get(), eq) {
+ // SAFETY: The entry is created with a live raw bucket, at the same time
+ // we have a &mut reference to the map, so it can not be modified further.
+ Some(raw_bucket) => Entry::Occupied(OccupiedEntry {
+ map: self,
+ raw_bucket,
+ key,
+ }),
+ None => Entry::Vacant(VacantEntry {
+ map: self,
+ hash,
+ key,
+ }),
+ }
+ }
+
+ pub(super) fn indices_mut(&mut self) -> impl Iterator<Item = &mut usize> {
+ // SAFETY: we're not letting any of the buckets escape this function,
+ // only the item references that are appropriately bound to `&mut self`.
+ unsafe { self.indices.iter().map(|bucket| bucket.as_mut()) }
+ }
+}
+
+/// A view into an occupied entry in a `IndexMap`.
+/// It is part of the [`Entry`] enum.
+///
+/// [`Entry`]: enum.Entry.html
+// SAFETY: The lifetime of the map reference also constrains the raw bucket,
+// which is essentially a raw pointer into the map indices.
+pub struct OccupiedEntry<'a, K, V> {
+ map: &'a mut IndexMapCore<K, V>,
+ raw_bucket: RawBucket,
+ key: K,
+}
+
+// `hashbrown::raw::Bucket` is only `Send`, not `Sync`.
+// SAFETY: `&self` only accesses the bucket to read it.
+unsafe impl<K: Sync, V: Sync> Sync for OccupiedEntry<'_, K, V> {}
+
+// The parent module also adds methods that don't threaten the unsafe encapsulation.
+impl<'a, K, V> OccupiedEntry<'a, K, V> {
+ /// Gets a reference to the entry's key in the map.
+ ///
+ /// Note that this is not the key that was used to find the entry. There may be an observable
+ /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like
+ /// extra fields or the memory address of an allocation.
+ pub fn key(&self) -> &K {
+ &self.map.entries[self.index()].key
+ }
+
+ /// Gets a reference to the entry's value in the map.
+ pub fn get(&self) -> &V {
+ &self.map.entries[self.index()].value
+ }
+
+ /// Gets a mutable reference to the entry's value in the map.
+ ///
+ /// If you need a reference which may outlive the destruction of the
+ /// `Entry` value, see `into_mut`.
+ pub fn get_mut(&mut self) -> &mut V {
+ let index = self.index();
+ &mut self.map.entries[index].value
+ }
+
+ /// Put the new key in the occupied entry's key slot
+ pub(crate) fn replace_key(self) -> K {
+ let index = self.index();
+ let old_key = &mut self.map.entries[index].key;
+ replace(old_key, self.key)
+ }
+
+ /// Return the index of the key-value pair
+ #[inline]
+ pub fn index(&self) -> usize {
+ // SAFETY: we have &mut map keep keeping the bucket stable
+ unsafe { *self.raw_bucket.as_ref() }
+ }
+
+ /// Converts into a mutable reference to the entry's value in the map,
+ /// with a lifetime bound to the map itself.
+ pub fn into_mut(self) -> &'a mut V {
+ let index = self.index();
+ &mut self.map.entries[index].value
+ }
+
+ /// Remove and return the key, value pair stored in the map for this entry
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove_entry(self) -> (K, V) {
+ // SAFETY: This is safe because it can only happen once (self is consumed)
+ // and map.indices have not been modified since entry construction
+ let (index, _slot) = unsafe { self.map.indices.remove(self.raw_bucket) };
+ self.map.swap_remove_finish(index)
+ }
+
+ /// Remove and return the key, value pair stored in the map for this entry
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove_entry(self) -> (K, V) {
+ // SAFETY: This is safe because it can only happen once (self is consumed)
+ // and map.indices have not been modified since entry construction
+ let (index, _slot) = unsafe { self.map.indices.remove(self.raw_bucket) };
+ self.map.shift_remove_finish(index)
+ }
+}
diff --git a/vendor/indexmap/src/map/iter.rs b/vendor/indexmap/src/map/iter.rs
new file mode 100644
index 0000000..db6e140
--- /dev/null
+++ b/vendor/indexmap/src/map/iter.rs
@@ -0,0 +1,541 @@
+use super::{Bucket, Entries, IndexMap, Slice};
+
+use alloc::vec::{self, Vec};
+use core::fmt;
+use core::iter::FusedIterator;
+use core::slice;
+
+impl<'a, K, V, S> IntoIterator for &'a IndexMap<K, V, S> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = Iter<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, K, V, S> IntoIterator for &'a mut IndexMap<K, V, S> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = IterMut<'a, K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+impl<K, V, S> IntoIterator for IndexMap<K, V, S> {
+ type Item = (K, V);
+ type IntoIter = IntoIter<K, V>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter::new(self.into_entries())
+ }
+}
+
+/// An iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`iter`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`iter`]: struct.IndexMap.html#method.iter
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct Iter<'a, K, V> {
+ iter: slice::Iter<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Iter<'a, K, V> {
+ pub(super) fn new(entries: &'a [Bucket<K, V>]) -> Self {
+ Self {
+ iter: entries.iter(),
+ }
+ }
+
+ /// Returns a slice of the remaining entries in the iterator.
+ pub fn as_slice(&self) -> &'a Slice<K, V> {
+ Slice::from_slice(self.iter.as_slice())
+ }
+}
+
+impl<'a, K, V> Iterator for Iter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ iterator_methods!(Bucket::refs);
+}
+
+impl<K, V> DoubleEndedIterator for Iter<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::refs);
+}
+
+impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for Iter<'_, K, V> {}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+impl<K, V> Clone for Iter<'_, K, V> {
+ fn clone(&self) -> Self {
+ Iter {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Iter<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+impl<K, V> Default for Iter<'_, K, V> {
+ fn default() -> Self {
+ Self { iter: [].iter() }
+ }
+}
+
+/// A mutable iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: struct.IndexMap.html#method.iter_mut
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct IterMut<'a, K, V> {
+ iter: slice::IterMut<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> IterMut<'a, K, V> {
+ pub(super) fn new(entries: &'a mut [Bucket<K, V>]) -> Self {
+ Self {
+ iter: entries.iter_mut(),
+ }
+ }
+
+ /// Returns a slice of the remaining entries in the iterator.
+ pub fn as_slice(&self) -> &Slice<K, V> {
+ Slice::from_slice(self.iter.as_slice())
+ }
+
+ /// Returns a mutable slice of the remaining entries in the iterator.
+ ///
+ /// To avoid creating `&mut` references that alias, this is forced to consume the iterator.
+ pub fn into_slice(self) -> &'a mut Slice<K, V> {
+ Slice::from_mut_slice(self.iter.into_slice())
+ }
+}
+
+impl<'a, K, V> Iterator for IterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ iterator_methods!(Bucket::ref_mut);
+}
+
+impl<K, V> DoubleEndedIterator for IterMut<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::ref_mut);
+}
+
+impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for IterMut<'_, K, V> {}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IterMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<K, V> Default for IterMut<'_, K, V> {
+ fn default() -> Self {
+ Self {
+ iter: [].iter_mut(),
+ }
+ }
+}
+
+/// An owning iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`IndexMap`]
+/// (provided by the `IntoIterator` trait). See its documentation for more.
+///
+/// [`into_iter`]: struct.IndexMap.html#method.into_iter
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct IntoIter<K, V> {
+ iter: vec::IntoIter<Bucket<K, V>>,
+}
+
+impl<K, V> IntoIter<K, V> {
+ pub(super) fn new(entries: Vec<Bucket<K, V>>) -> Self {
+ Self {
+ iter: entries.into_iter(),
+ }
+ }
+
+ /// Returns a slice of the remaining entries in the iterator.
+ pub fn as_slice(&self) -> &Slice<K, V> {
+ Slice::from_slice(self.iter.as_slice())
+ }
+
+ /// Returns a mutable slice of the remaining entries in the iterator.
+ pub fn as_mut_slice(&mut self) -> &mut Slice<K, V> {
+ Slice::from_mut_slice(self.iter.as_mut_slice())
+ }
+}
+
+impl<K, V> Iterator for IntoIter<K, V> {
+ type Item = (K, V);
+
+ iterator_methods!(Bucket::key_value);
+}
+
+impl<K, V> DoubleEndedIterator for IntoIter<K, V> {
+ double_ended_iterator_methods!(Bucket::key_value);
+}
+
+impl<K, V> ExactSizeIterator for IntoIter<K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for IntoIter<K, V> {}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IntoIter<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<K, V> Default for IntoIter<K, V> {
+ fn default() -> Self {
+ Self {
+ iter: Vec::new().into_iter(),
+ }
+ }
+}
+
+/// A draining iterator over the entries of a `IndexMap`.
+///
+/// This `struct` is created by the [`drain`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`drain`]: struct.IndexMap.html#method.drain
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct Drain<'a, K, V> {
+ iter: vec::Drain<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Drain<'a, K, V> {
+ pub(super) fn new(iter: vec::Drain<'a, Bucket<K, V>>) -> Self {
+ Self { iter }
+ }
+
+ /// Returns a slice of the remaining entries in the iterator.
+ pub fn as_slice(&self) -> &Slice<K, V> {
+ Slice::from_slice(self.iter.as_slice())
+ }
+}
+
+impl<K, V> Iterator for Drain<'_, K, V> {
+ type Item = (K, V);
+
+ iterator_methods!(Bucket::key_value);
+}
+
+impl<K, V> DoubleEndedIterator for Drain<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::key_value);
+}
+
+impl<K, V> ExactSizeIterator for Drain<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for Drain<'_, K, V> {}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Drain<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::refs);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+/// An iterator over the keys of a `IndexMap`.
+///
+/// This `struct` is created by the [`keys`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`keys`]: struct.IndexMap.html#method.keys
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct Keys<'a, K, V> {
+ iter: slice::Iter<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Keys<'a, K, V> {
+ pub(super) fn new(entries: &'a [Bucket<K, V>]) -> Self {
+ Self {
+ iter: entries.iter(),
+ }
+ }
+}
+
+impl<'a, K, V> Iterator for Keys<'a, K, V> {
+ type Item = &'a K;
+
+ iterator_methods!(Bucket::key_ref);
+}
+
+impl<K, V> DoubleEndedIterator for Keys<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::key_ref);
+}
+
+impl<K, V> ExactSizeIterator for Keys<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for Keys<'_, K, V> {}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+impl<K, V> Clone for Keys<'_, K, V> {
+ fn clone(&self) -> Self {
+ Keys {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<K: fmt::Debug, V> fmt::Debug for Keys<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+impl<K, V> Default for Keys<'_, K, V> {
+ fn default() -> Self {
+ Self { iter: [].iter() }
+ }
+}
+
+/// An owning iterator over the keys of a `IndexMap`.
+///
+/// This `struct` is created by the [`into_keys`] method on [`IndexMap`].
+/// See its documentation for more.
+///
+/// [`IndexMap`]: struct.IndexMap.html
+/// [`into_keys`]: struct.IndexMap.html#method.into_keys
+pub struct IntoKeys<K, V> {
+ iter: vec::IntoIter<Bucket<K, V>>,
+}
+
+impl<K, V> IntoKeys<K, V> {
+ pub(super) fn new(entries: Vec<Bucket<K, V>>) -> Self {
+ Self {
+ iter: entries.into_iter(),
+ }
+ }
+}
+
+impl<K, V> Iterator for IntoKeys<K, V> {
+ type Item = K;
+
+ iterator_methods!(Bucket::key);
+}
+
+impl<K, V> DoubleEndedIterator for IntoKeys<K, V> {
+ double_ended_iterator_methods!(Bucket::key);
+}
+
+impl<K, V> ExactSizeIterator for IntoKeys<K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for IntoKeys<K, V> {}
+
+impl<K: fmt::Debug, V> fmt::Debug for IntoKeys<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::key_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<K, V> Default for IntoKeys<K, V> {
+ fn default() -> Self {
+ Self {
+ iter: Vec::new().into_iter(),
+ }
+ }
+}
+
+/// An iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`values`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`values`]: struct.IndexMap.html#method.values
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct Values<'a, K, V> {
+ iter: slice::Iter<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> Values<'a, K, V> {
+ pub(super) fn new(entries: &'a [Bucket<K, V>]) -> Self {
+ Self {
+ iter: entries.iter(),
+ }
+ }
+}
+
+impl<'a, K, V> Iterator for Values<'a, K, V> {
+ type Item = &'a V;
+
+ iterator_methods!(Bucket::value_ref);
+}
+
+impl<K, V> DoubleEndedIterator for Values<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::value_ref);
+}
+
+impl<K, V> ExactSizeIterator for Values<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for Values<'_, K, V> {}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+impl<K, V> Clone for Values<'_, K, V> {
+ fn clone(&self) -> Self {
+ Values {
+ iter: self.iter.clone(),
+ }
+ }
+}
+
+impl<K, V: fmt::Debug> fmt::Debug for Values<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+impl<K, V> Default for Values<'_, K, V> {
+ fn default() -> Self {
+ Self { iter: [].iter() }
+ }
+}
+
+/// A mutable iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`values_mut`] method on [`IndexMap`]. See its
+/// documentation for more.
+///
+/// [`values_mut`]: struct.IndexMap.html#method.values_mut
+/// [`IndexMap`]: struct.IndexMap.html
+pub struct ValuesMut<'a, K, V> {
+ iter: slice::IterMut<'a, Bucket<K, V>>,
+}
+
+impl<'a, K, V> ValuesMut<'a, K, V> {
+ pub(super) fn new(entries: &'a mut [Bucket<K, V>]) -> Self {
+ Self {
+ iter: entries.iter_mut(),
+ }
+ }
+}
+
+impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
+ type Item = &'a mut V;
+
+ iterator_methods!(Bucket::value_mut);
+}
+
+impl<K, V> DoubleEndedIterator for ValuesMut<'_, K, V> {
+ double_ended_iterator_methods!(Bucket::value_mut);
+}
+
+impl<K, V> ExactSizeIterator for ValuesMut<'_, K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for ValuesMut<'_, K, V> {}
+
+impl<K, V: fmt::Debug> fmt::Debug for ValuesMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::value_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<K, V> Default for ValuesMut<'_, K, V> {
+ fn default() -> Self {
+ Self {
+ iter: [].iter_mut(),
+ }
+ }
+}
+
+/// An owning iterator over the values of a `IndexMap`.
+///
+/// This `struct` is created by the [`into_values`] method on [`IndexMap`].
+/// See its documentation for more.
+///
+/// [`IndexMap`]: struct.IndexMap.html
+/// [`into_values`]: struct.IndexMap.html#method.into_values
+pub struct IntoValues<K, V> {
+ iter: vec::IntoIter<Bucket<K, V>>,
+}
+
+impl<K, V> IntoValues<K, V> {
+ pub(super) fn new(entries: Vec<Bucket<K, V>>) -> Self {
+ Self {
+ iter: entries.into_iter(),
+ }
+ }
+}
+
+impl<K, V> Iterator for IntoValues<K, V> {
+ type Item = V;
+
+ iterator_methods!(Bucket::value);
+}
+
+impl<K, V> DoubleEndedIterator for IntoValues<K, V> {
+ double_ended_iterator_methods!(Bucket::value);
+}
+
+impl<K, V> ExactSizeIterator for IntoValues<K, V> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<K, V> FusedIterator for IntoValues<K, V> {}
+
+impl<K, V: fmt::Debug> fmt::Debug for IntoValues<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let iter = self.iter.as_slice().iter().map(Bucket::value_ref);
+ f.debug_list().entries(iter).finish()
+ }
+}
+
+impl<K, V> Default for IntoValues<K, V> {
+ fn default() -> Self {
+ Self {
+ iter: Vec::new().into_iter(),
+ }
+ }
+}
diff --git a/vendor/indexmap/src/map/serde_seq.rs b/vendor/indexmap/src/map/serde_seq.rs
new file mode 100644
index 0000000..f10aa57
--- /dev/null
+++ b/vendor/indexmap/src/map/serde_seq.rs
@@ -0,0 +1,138 @@
+//! Functions to serialize and deserialize an `IndexMap` as an ordered sequence.
+//!
+//! The default `serde` implementation serializes `IndexMap` as a normal map,
+//! but there is no guarantee that serialization formats will preserve the order
+//! of the key-value pairs. This module serializes `IndexMap` as a sequence of
+//! `(key, value)` elements instead, in order.
+//!
+//! This module may be used in a field attribute for derived implementations:
+//!
+//! ```
+//! # use indexmap::IndexMap;
+//! # use serde_derive::{Deserialize, Serialize};
+//! #[derive(Deserialize, Serialize)]
+//! struct Data {
+//! #[serde(with = "indexmap::map::serde_seq")]
+//! map: IndexMap<i32, u64>,
+//! // ...
+//! }
+//! ```
+
+use serde::de::{Deserialize, Deserializer, SeqAccess, Visitor};
+use serde::ser::{Serialize, Serializer};
+
+use core::fmt::{self, Formatter};
+use core::hash::{BuildHasher, Hash};
+use core::marker::PhantomData;
+
+use crate::map::Slice as MapSlice;
+use crate::set::Slice as SetSlice;
+use crate::IndexMap;
+
+/// Serializes a `map::Slice` as an ordered sequence.
+///
+/// This behaves like [`crate::map::serde_seq`] for `IndexMap`, serializing a sequence
+/// of `(key, value)` pairs, rather than as a map that might not preserve order.
+impl<K, V> Serialize for MapSlice<K, V>
+where
+ K: Serialize,
+ V: Serialize,
+{
+ fn serialize<T>(&self, serializer: T) -> Result<T::Ok, T::Error>
+ where
+ T: Serializer,
+ {
+ serializer.collect_seq(self)
+ }
+}
+
+/// Serializes a `set::Slice` as an ordered sequence.
+impl<T> Serialize for SetSlice<T>
+where
+ T: Serialize,
+{
+ fn serialize<Se>(&self, serializer: Se) -> Result<Se::Ok, Se::Error>
+ where
+ Se: Serializer,
+ {
+ serializer.collect_seq(self)
+ }
+}
+
+/// Serializes an `IndexMap` as an ordered sequence.
+///
+/// This function may be used in a field attribute for deriving `Serialize`:
+///
+/// ```
+/// # use indexmap::IndexMap;
+/// # use serde_derive::Serialize;
+/// #[derive(Serialize)]
+/// struct Data {
+/// #[serde(serialize_with = "indexmap::map::serde_seq::serialize")]
+/// map: IndexMap<i32, u64>,
+/// // ...
+/// }
+/// ```
+pub fn serialize<K, V, S, T>(map: &IndexMap<K, V, S>, serializer: T) -> Result<T::Ok, T::Error>
+where
+ K: Serialize + Hash + Eq,
+ V: Serialize,
+ S: BuildHasher,
+ T: Serializer,
+{
+ serializer.collect_seq(map)
+}
+
+/// Visitor to deserialize a *sequenced* `IndexMap`
+struct SeqVisitor<K, V, S>(PhantomData<(K, V, S)>);
+
+impl<'de, K, V, S> Visitor<'de> for SeqVisitor<K, V, S>
+where
+ K: Deserialize<'de> + Eq + Hash,
+ V: Deserialize<'de>,
+ S: Default + BuildHasher,
+{
+ type Value = IndexMap<K, V, S>;
+
+ fn expecting(&self, formatter: &mut Formatter<'_>) -> fmt::Result {
+ write!(formatter, "a sequenced map")
+ }
+
+ fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
+ where
+ A: SeqAccess<'de>,
+ {
+ let capacity = seq.size_hint().unwrap_or(0);
+ let mut map = IndexMap::with_capacity_and_hasher(capacity, S::default());
+
+ while let Some((key, value)) = seq.next_element()? {
+ map.insert(key, value);
+ }
+
+ Ok(map)
+ }
+}
+
+/// Deserializes an `IndexMap` from an ordered sequence.
+///
+/// This function may be used in a field attribute for deriving `Deserialize`:
+///
+/// ```
+/// # use indexmap::IndexMap;
+/// # use serde_derive::Deserialize;
+/// #[derive(Deserialize)]
+/// struct Data {
+/// #[serde(deserialize_with = "indexmap::map::serde_seq::deserialize")]
+/// map: IndexMap<i32, u64>,
+/// // ...
+/// }
+/// ```
+pub fn deserialize<'de, D, K, V, S>(deserializer: D) -> Result<IndexMap<K, V, S>, D::Error>
+where
+ D: Deserializer<'de>,
+ K: Deserialize<'de> + Eq + Hash,
+ V: Deserialize<'de>,
+ S: Default + BuildHasher,
+{
+ deserializer.deserialize_seq(SeqVisitor(PhantomData))
+}
diff --git a/vendor/indexmap/src/map/slice.rs b/vendor/indexmap/src/map/slice.rs
new file mode 100644
index 0000000..9fb876f
--- /dev/null
+++ b/vendor/indexmap/src/map/slice.rs
@@ -0,0 +1,471 @@
+use super::{
+ Bucket, Entries, IndexMap, IntoIter, IntoKeys, IntoValues, Iter, IterMut, Keys, Values,
+ ValuesMut,
+};
+use crate::util::try_simplify_range;
+
+use alloc::boxed::Box;
+use alloc::vec::Vec;
+use core::cmp::Ordering;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::ops::{self, Bound, Index, IndexMut, RangeBounds};
+
+/// A dynamically-sized slice of key-value pairs in an `IndexMap`.
+///
+/// This supports indexed operations much like a `[(K, V)]` slice,
+/// but not any hashed operations on the map keys.
+///
+/// Unlike `IndexMap`, `Slice` does consider the order for `PartialEq`
+/// and `Eq`, and it also implements `PartialOrd`, `Ord`, and `Hash`.
+#[repr(transparent)]
+pub struct Slice<K, V> {
+ pub(crate) entries: [Bucket<K, V>],
+}
+
+// SAFETY: `Slice<K, V>` is a transparent wrapper around `[Bucket<K, V>]`,
+// and reference lifetimes are bound together in function signatures.
+#[allow(unsafe_code)]
+impl<K, V> Slice<K, V> {
+ pub(super) fn from_slice(entries: &[Bucket<K, V>]) -> &Self {
+ unsafe { &*(entries as *const [Bucket<K, V>] as *const Self) }
+ }
+
+ pub(super) fn from_mut_slice(entries: &mut [Bucket<K, V>]) -> &mut Self {
+ unsafe { &mut *(entries as *mut [Bucket<K, V>] as *mut Self) }
+ }
+
+ pub(super) fn from_boxed(entries: Box<[Bucket<K, V>]>) -> Box<Self> {
+ unsafe { Box::from_raw(Box::into_raw(entries) as *mut Self) }
+ }
+
+ fn into_boxed(self: Box<Self>) -> Box<[Bucket<K, V>]> {
+ unsafe { Box::from_raw(Box::into_raw(self) as *mut [Bucket<K, V>]) }
+ }
+}
+
+impl<K, V> Slice<K, V> {
+ pub(crate) fn into_entries(self: Box<Self>) -> Vec<Bucket<K, V>> {
+ self.into_boxed().into_vec()
+ }
+
+ /// Return the number of key-value pairs in the map slice.
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.entries.len()
+ }
+
+ /// Returns true if the map slice contains no elements.
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.entries.is_empty()
+ }
+
+ /// Get a key-value pair by index.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ pub fn get_index(&self, index: usize) -> Option<(&K, &V)> {
+ self.entries.get(index).map(Bucket::refs)
+ }
+
+ /// Get a key-value pair by index, with mutable access to the value.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ pub fn get_index_mut(&mut self, index: usize) -> Option<(&K, &mut V)> {
+ self.entries.get_mut(index).map(Bucket::ref_mut)
+ }
+
+ /// Returns a slice of key-value pairs in the given range of indices.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ pub fn get_range<R: RangeBounds<usize>>(&self, range: R) -> Option<&Self> {
+ let range = try_simplify_range(range, self.entries.len())?;
+ self.entries.get(range).map(Slice::from_slice)
+ }
+
+ /// Returns a mutable slice of key-value pairs in the given range of indices.
+ ///
+ /// Valid indices are *0 <= index < self.len()*
+ pub fn get_range_mut<R: RangeBounds<usize>>(&mut self, range: R) -> Option<&mut Self> {
+ let range = try_simplify_range(range, self.entries.len())?;
+ self.entries.get_mut(range).map(Slice::from_mut_slice)
+ }
+
+ /// Get the first key-value pair.
+ pub fn first(&self) -> Option<(&K, &V)> {
+ self.entries.first().map(Bucket::refs)
+ }
+
+ /// Get the first key-value pair, with mutable access to the value.
+ pub fn first_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.entries.first_mut().map(Bucket::ref_mut)
+ }
+
+ /// Get the last key-value pair.
+ pub fn last(&self) -> Option<(&K, &V)> {
+ self.entries.last().map(Bucket::refs)
+ }
+
+ /// Get the last key-value pair, with mutable access to the value.
+ pub fn last_mut(&mut self) -> Option<(&K, &mut V)> {
+ self.entries.last_mut().map(Bucket::ref_mut)
+ }
+
+ /// Divides one slice into two at an index.
+ ///
+ /// ***Panics*** if `index > len`.
+ pub fn split_at(&self, index: usize) -> (&Self, &Self) {
+ let (first, second) = self.entries.split_at(index);
+ (Self::from_slice(first), Self::from_slice(second))
+ }
+
+ /// Divides one mutable slice into two at an index.
+ ///
+ /// ***Panics*** if `index > len`.
+ pub fn split_at_mut(&mut self, index: usize) -> (&mut Self, &mut Self) {
+ let (first, second) = self.entries.split_at_mut(index);
+ (Self::from_mut_slice(first), Self::from_mut_slice(second))
+ }
+
+ /// Returns the first key-value pair and the rest of the slice,
+ /// or `None` if it is empty.
+ pub fn split_first(&self) -> Option<((&K, &V), &Self)> {
+ if let [first, rest @ ..] = &self.entries {
+ Some((first.refs(), Self::from_slice(rest)))
+ } else {
+ None
+ }
+ }
+
+ /// Returns the first key-value pair and the rest of the slice,
+ /// with mutable access to the value, or `None` if it is empty.
+ pub fn split_first_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> {
+ if let [first, rest @ ..] = &mut self.entries {
+ Some((first.ref_mut(), Self::from_mut_slice(rest)))
+ } else {
+ None
+ }
+ }
+
+ /// Returns the last key-value pair and the rest of the slice,
+ /// or `None` if it is empty.
+ pub fn split_last(&self) -> Option<((&K, &V), &Self)> {
+ if let [rest @ .., last] = &self.entries {
+ Some((last.refs(), Self::from_slice(rest)))
+ } else {
+ None
+ }
+ }
+
+ /// Returns the last key-value pair and the rest of the slice,
+ /// with mutable access to the value, or `None` if it is empty.
+ pub fn split_last_mut(&mut self) -> Option<((&K, &mut V), &mut Self)> {
+ if let [rest @ .., last] = &mut self.entries {
+ Some((last.ref_mut(), Self::from_mut_slice(rest)))
+ } else {
+ None
+ }
+ }
+
+ /// Return an iterator over the key-value pairs of the map slice.
+ pub fn iter(&self) -> Iter<'_, K, V> {
+ Iter::new(&self.entries)
+ }
+
+ /// Return an iterator over the key-value pairs of the map slice.
+ pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
+ IterMut::new(&mut self.entries)
+ }
+
+ /// Return an iterator over the keys of the map slice.
+ pub fn keys(&self) -> Keys<'_, K, V> {
+ Keys::new(&self.entries)
+ }
+
+ /// Return an owning iterator over the keys of the map slice.
+ pub fn into_keys(self: Box<Self>) -> IntoKeys<K, V> {
+ IntoKeys::new(self.into_entries())
+ }
+
+ /// Return an iterator over the values of the map slice.
+ pub fn values(&self) -> Values<'_, K, V> {
+ Values::new(&self.entries)
+ }
+
+ /// Return an iterator over mutable references to the the values of the map slice.
+ pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> {
+ ValuesMut::new(&mut self.entries)
+ }
+
+ /// Return an owning iterator over the values of the map slice.
+ pub fn into_values(self: Box<Self>) -> IntoValues<K, V> {
+ IntoValues::new(self.into_entries())
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a Slice<K, V> {
+ type IntoIter = Iter<'a, K, V>;
+ type Item = (&'a K, &'a V);
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, K, V> IntoIterator for &'a mut Slice<K, V> {
+ type IntoIter = IterMut<'a, K, V>;
+ type Item = (&'a K, &'a mut V);
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+impl<K, V> IntoIterator for Box<Slice<K, V>> {
+ type IntoIter = IntoIter<K, V>;
+ type Item = (K, V);
+
+ fn into_iter(self) -> Self::IntoIter {
+ IntoIter::new(self.into_entries())
+ }
+}
+
+impl<K, V> Default for &'_ Slice<K, V> {
+ fn default() -> Self {
+ Slice::from_slice(&[])
+ }
+}
+
+impl<K, V> Default for &'_ mut Slice<K, V> {
+ fn default() -> Self {
+ Slice::from_mut_slice(&mut [])
+ }
+}
+
+impl<K, V> Default for Box<Slice<K, V>> {
+ fn default() -> Self {
+ Slice::from_boxed(Box::default())
+ }
+}
+
+impl<K: Clone, V: Clone> Clone for Box<Slice<K, V>> {
+ fn clone(&self) -> Self {
+ Slice::from_boxed(self.entries.to_vec().into_boxed_slice())
+ }
+}
+
+impl<K: Copy, V: Copy> From<&Slice<K, V>> for Box<Slice<K, V>> {
+ fn from(slice: &Slice<K, V>) -> Self {
+ Slice::from_boxed(Box::from(&slice.entries))
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Slice<K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self).finish()
+ }
+}
+
+impl<K: PartialEq, V: PartialEq> PartialEq for Slice<K, V> {
+ fn eq(&self, other: &Self) -> bool {
+ self.len() == other.len() && self.iter().eq(other)
+ }
+}
+
+impl<K: Eq, V: Eq> Eq for Slice<K, V> {}
+
+impl<K: PartialOrd, V: PartialOrd> PartialOrd for Slice<K, V> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.iter().partial_cmp(other)
+ }
+}
+
+impl<K: Ord, V: Ord> Ord for Slice<K, V> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.iter().cmp(other)
+ }
+}
+
+impl<K: Hash, V: Hash> Hash for Slice<K, V> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.len().hash(state);
+ for (key, value) in self {
+ key.hash(state);
+ value.hash(state);
+ }
+ }
+}
+
+impl<K, V> Index<usize> for Slice<K, V> {
+ type Output = V;
+
+ fn index(&self, index: usize) -> &V {
+ &self.entries[index].value
+ }
+}
+
+impl<K, V> IndexMut<usize> for Slice<K, V> {
+ fn index_mut(&mut self, index: usize) -> &mut V {
+ &mut self.entries[index].value
+ }
+}
+
+// We can't have `impl<I: RangeBounds<usize>> Index<I>` because that conflicts
+// both upstream with `Index<usize>` and downstream with `Index<&Q>`.
+// Instead, we repeat the implementations for all the core range types.
+macro_rules! impl_index {
+ ($($range:ty),*) => {$(
+ impl<K, V, S> Index<$range> for IndexMap<K, V, S> {
+ type Output = Slice<K, V>;
+
+ fn index(&self, range: $range) -> &Self::Output {
+ Slice::from_slice(&self.as_entries()[range])
+ }
+ }
+
+ impl<K, V, S> IndexMut<$range> for IndexMap<K, V, S> {
+ fn index_mut(&mut self, range: $range) -> &mut Self::Output {
+ Slice::from_mut_slice(&mut self.as_entries_mut()[range])
+ }
+ }
+
+ impl<K, V> Index<$range> for Slice<K, V> {
+ type Output = Slice<K, V>;
+
+ fn index(&self, range: $range) -> &Self {
+ Self::from_slice(&self.entries[range])
+ }
+ }
+
+ impl<K, V> IndexMut<$range> for Slice<K, V> {
+ fn index_mut(&mut self, range: $range) -> &mut Self {
+ Self::from_mut_slice(&mut self.entries[range])
+ }
+ }
+ )*}
+}
+impl_index!(
+ ops::Range<usize>,
+ ops::RangeFrom<usize>,
+ ops::RangeFull,
+ ops::RangeInclusive<usize>,
+ ops::RangeTo<usize>,
+ ops::RangeToInclusive<usize>,
+ (Bound<usize>, Bound<usize>)
+);
+
+#[cfg(test)]
+mod tests {
+ use super::*;
+ use alloc::vec::Vec;
+
+ #[test]
+ fn slice_index() {
+ fn check(
+ vec_slice: &[(i32, i32)],
+ map_slice: &Slice<i32, i32>,
+ sub_slice: &Slice<i32, i32>,
+ ) {
+ assert_eq!(map_slice as *const _, sub_slice as *const _);
+ itertools::assert_equal(
+ vec_slice.iter().copied(),
+ map_slice.iter().map(|(&k, &v)| (k, v)),
+ );
+ itertools::assert_equal(vec_slice.iter().map(|(k, _)| k), map_slice.keys());
+ itertools::assert_equal(vec_slice.iter().map(|(_, v)| v), map_slice.values());
+ }
+
+ let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect();
+ let map: IndexMap<i32, i32> = vec.iter().cloned().collect();
+ let slice = map.as_slice();
+
+ // RangeFull
+ check(&vec[..], &map[..], &slice[..]);
+
+ for i in 0usize..10 {
+ // Index
+ assert_eq!(vec[i].1, map[i]);
+ assert_eq!(vec[i].1, slice[i]);
+ assert_eq!(map[&(i as i32)], map[i]);
+ assert_eq!(map[&(i as i32)], slice[i]);
+
+ // RangeFrom
+ check(&vec[i..], &map[i..], &slice[i..]);
+
+ // RangeTo
+ check(&vec[..i], &map[..i], &slice[..i]);
+
+ // RangeToInclusive
+ check(&vec[..=i], &map[..=i], &slice[..=i]);
+
+ // (Bound<usize>, Bound<usize>)
+ let bounds = (Bound::Excluded(i), Bound::Unbounded);
+ check(&vec[i + 1..], &map[bounds], &slice[bounds]);
+
+ for j in i..=10 {
+ // Range
+ check(&vec[i..j], &map[i..j], &slice[i..j]);
+ }
+
+ for j in i..10 {
+ // RangeInclusive
+ check(&vec[i..=j], &map[i..=j], &slice[i..=j]);
+ }
+ }
+ }
+
+ #[test]
+ fn slice_index_mut() {
+ fn check_mut(
+ vec_slice: &[(i32, i32)],
+ map_slice: &mut Slice<i32, i32>,
+ sub_slice: &mut Slice<i32, i32>,
+ ) {
+ assert_eq!(map_slice, sub_slice);
+ itertools::assert_equal(
+ vec_slice.iter().copied(),
+ map_slice.iter_mut().map(|(&k, &mut v)| (k, v)),
+ );
+ itertools::assert_equal(
+ vec_slice.iter().map(|&(_, v)| v),
+ map_slice.values_mut().map(|&mut v| v),
+ );
+ }
+
+ let vec: Vec<(i32, i32)> = (0..10).map(|i| (i, i * i)).collect();
+ let mut map: IndexMap<i32, i32> = vec.iter().cloned().collect();
+ let mut map2 = map.clone();
+ let slice = map2.as_mut_slice();
+
+ // RangeFull
+ check_mut(&vec[..], &mut map[..], &mut slice[..]);
+
+ for i in 0usize..10 {
+ // IndexMut
+ assert_eq!(&mut map[i], &mut slice[i]);
+
+ // RangeFrom
+ check_mut(&vec[i..], &mut map[i..], &mut slice[i..]);
+
+ // RangeTo
+ check_mut(&vec[..i], &mut map[..i], &mut slice[..i]);
+
+ // RangeToInclusive
+ check_mut(&vec[..=i], &mut map[..=i], &mut slice[..=i]);
+
+ // (Bound<usize>, Bound<usize>)
+ let bounds = (Bound::Excluded(i), Bound::Unbounded);
+ check_mut(&vec[i + 1..], &mut map[bounds], &mut slice[bounds]);
+
+ for j in i..=10 {
+ // Range
+ check_mut(&vec[i..j], &mut map[i..j], &mut slice[i..j]);
+ }
+
+ for j in i..10 {
+ // RangeInclusive
+ check_mut(&vec[i..=j], &mut map[i..=j], &mut slice[i..=j]);
+ }
+ }
+ }
+}
diff --git a/vendor/indexmap/src/map/tests.rs b/vendor/indexmap/src/map/tests.rs
new file mode 100644
index 0000000..f273d71
--- /dev/null
+++ b/vendor/indexmap/src/map/tests.rs
@@ -0,0 +1,449 @@
+use super::*;
+use std::string::String;
+
+#[test]
+fn it_works() {
+ let mut map = IndexMap::new();
+ assert_eq!(map.is_empty(), true);
+ map.insert(1, ());
+ map.insert(1, ());
+ assert_eq!(map.len(), 1);
+ assert!(map.get(&1).is_some());
+ assert_eq!(map.is_empty(), false);
+}
+
+#[test]
+fn new() {
+ let map = IndexMap::<String, String>::new();
+ println!("{:?}", map);
+ assert_eq!(map.capacity(), 0);
+ assert_eq!(map.len(), 0);
+ assert_eq!(map.is_empty(), true);
+}
+
+#[test]
+fn insert() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5];
+ let not_present = [1, 3, 6, 9, 10];
+ let mut map = IndexMap::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(map.len(), i);
+ map.insert(elt, elt);
+ assert_eq!(map.len(), i + 1);
+ assert_eq!(map.get(&elt), Some(&elt));
+ assert_eq!(map[&elt], elt);
+ }
+ println!("{:?}", map);
+
+ for &elt in &not_present {
+ assert!(map.get(&elt).is_none());
+ }
+}
+
+#[test]
+fn insert_full() {
+ let insert = vec![9, 2, 7, 1, 4, 6, 13];
+ let present = vec![1, 6, 2];
+ let mut map = IndexMap::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(map.len(), i);
+ let (index, existing) = map.insert_full(elt, elt);
+ assert_eq!(existing, None);
+ assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0));
+ assert_eq!(map.len(), i + 1);
+ }
+
+ let len = map.len();
+ for &elt in &present {
+ let (index, existing) = map.insert_full(elt, elt);
+ assert_eq!(existing, Some(elt));
+ assert_eq!(Some(index), map.get_full(&elt).map(|x| x.0));
+ assert_eq!(map.len(), len);
+ }
+}
+
+#[test]
+fn insert_2() {
+ let mut map = IndexMap::with_capacity(16);
+
+ let mut keys = vec![];
+ keys.extend(0..16);
+ keys.extend(if cfg!(miri) { 32..64 } else { 128..267 });
+
+ for &i in &keys {
+ let old_map = map.clone();
+ map.insert(i, ());
+ for key in old_map.keys() {
+ if map.get(key).is_none() {
+ println!("old_map: {:?}", old_map);
+ println!("map: {:?}", map);
+ panic!("did not find {} in map", key);
+ }
+ }
+ }
+
+ for &i in &keys {
+ assert!(map.get(&i).is_some(), "did not find {}", i);
+ }
+}
+
+#[test]
+fn insert_order() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut map = IndexMap::new();
+
+ for &elt in &insert {
+ map.insert(elt, ());
+ }
+
+ assert_eq!(map.keys().count(), map.len());
+ assert_eq!(map.keys().count(), insert.len());
+ for (a, b) in insert.iter().zip(map.keys()) {
+ assert_eq!(a, b);
+ }
+ for (i, k) in (0..insert.len()).zip(map.keys()) {
+ assert_eq!(map.get_index(i).unwrap().0, k);
+ }
+}
+
+#[test]
+fn grow() {
+ let insert = [0, 4, 2, 12, 8, 7, 11];
+ let not_present = [1, 3, 6, 9, 10];
+ let mut map = IndexMap::with_capacity(insert.len());
+
+ for (i, &elt) in insert.iter().enumerate() {
+ assert_eq!(map.len(), i);
+ map.insert(elt, elt);
+ assert_eq!(map.len(), i + 1);
+ assert_eq!(map.get(&elt), Some(&elt));
+ assert_eq!(map[&elt], elt);
+ }
+
+ println!("{:?}", map);
+ for &elt in &insert {
+ map.insert(elt * 10, elt);
+ }
+ for &elt in &insert {
+ map.insert(elt * 100, elt);
+ }
+ for (i, &elt) in insert.iter().cycle().enumerate().take(100) {
+ map.insert(elt * 100 + i as i32, elt);
+ }
+ println!("{:?}", map);
+ for &elt in &not_present {
+ assert!(map.get(&elt).is_none());
+ }
+}
+
+#[test]
+fn reserve() {
+ let mut map = IndexMap::<usize, usize>::new();
+ assert_eq!(map.capacity(), 0);
+ map.reserve(100);
+ let capacity = map.capacity();
+ assert!(capacity >= 100);
+ for i in 0..capacity {
+ assert_eq!(map.len(), i);
+ map.insert(i, i * i);
+ assert_eq!(map.len(), i + 1);
+ assert_eq!(map.capacity(), capacity);
+ assert_eq!(map.get(&i), Some(&(i * i)));
+ }
+ map.insert(capacity, std::usize::MAX);
+ assert_eq!(map.len(), capacity + 1);
+ assert!(map.capacity() > capacity);
+ assert_eq!(map.get(&capacity), Some(&std::usize::MAX));
+}
+
+#[test]
+fn try_reserve() {
+ let mut map = IndexMap::<usize, usize>::new();
+ assert_eq!(map.capacity(), 0);
+ assert_eq!(map.try_reserve(100), Ok(()));
+ assert!(map.capacity() >= 100);
+ assert!(map.try_reserve(usize::MAX).is_err());
+}
+
+#[test]
+fn shrink_to_fit() {
+ let mut map = IndexMap::<usize, usize>::new();
+ assert_eq!(map.capacity(), 0);
+ for i in 0..100 {
+ assert_eq!(map.len(), i);
+ map.insert(i, i * i);
+ assert_eq!(map.len(), i + 1);
+ assert!(map.capacity() >= i + 1);
+ assert_eq!(map.get(&i), Some(&(i * i)));
+ map.shrink_to_fit();
+ assert_eq!(map.len(), i + 1);
+ assert_eq!(map.capacity(), i + 1);
+ assert_eq!(map.get(&i), Some(&(i * i)));
+ }
+}
+
+#[test]
+fn remove() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut map = IndexMap::new();
+
+ for &elt in &insert {
+ map.insert(elt, elt);
+ }
+
+ assert_eq!(map.keys().count(), map.len());
+ assert_eq!(map.keys().count(), insert.len());
+ for (a, b) in insert.iter().zip(map.keys()) {
+ assert_eq!(a, b);
+ }
+
+ let remove_fail = [99, 77];
+ let remove = [4, 12, 8, 7];
+
+ for &key in &remove_fail {
+ assert!(map.swap_remove_full(&key).is_none());
+ }
+ println!("{:?}", map);
+ for &key in &remove {
+ //println!("{:?}", map);
+ let index = map.get_full(&key).unwrap().0;
+ assert_eq!(map.swap_remove_full(&key), Some((index, key, key)));
+ }
+ println!("{:?}", map);
+
+ for key in &insert {
+ assert_eq!(map.get(key).is_some(), !remove.contains(key));
+ }
+ assert_eq!(map.len(), insert.len() - remove.len());
+ assert_eq!(map.keys().count(), insert.len() - remove.len());
+}
+
+#[test]
+fn remove_to_empty() {
+ let mut map = indexmap! { 0 => 0, 4 => 4, 5 => 5 };
+ map.swap_remove(&5).unwrap();
+ map.swap_remove(&4).unwrap();
+ map.swap_remove(&0).unwrap();
+ assert!(map.is_empty());
+}
+
+#[test]
+fn swap_remove_index() {
+ let insert = [0, 4, 2, 12, 8, 7, 11, 5, 3, 17, 19, 22, 23];
+ let mut map = IndexMap::new();
+
+ for &elt in &insert {
+ map.insert(elt, elt * 2);
+ }
+
+ let mut vector = insert.to_vec();
+ let remove_sequence = &[3, 3, 10, 4, 5, 4, 3, 0, 1];
+
+ // check that the same swap remove sequence on vec and map
+ // have the same result.
+ for &rm in remove_sequence {
+ let out_vec = vector.swap_remove(rm);
+ let (out_map, _) = map.swap_remove_index(rm).unwrap();
+ assert_eq!(out_vec, out_map);
+ }
+ assert_eq!(vector.len(), map.len());
+ for (a, b) in vector.iter().zip(map.keys()) {
+ assert_eq!(a, b);
+ }
+}
+
+#[test]
+fn partial_eq_and_eq() {
+ let mut map_a = IndexMap::new();
+ map_a.insert(1, "1");
+ map_a.insert(2, "2");
+ let mut map_b = map_a.clone();
+ assert_eq!(map_a, map_b);
+ map_b.swap_remove(&1);
+ assert_ne!(map_a, map_b);
+
+ let map_c: IndexMap<_, String> = map_b.into_iter().map(|(k, v)| (k, v.into())).collect();
+ assert_ne!(map_a, map_c);
+ assert_ne!(map_c, map_a);
+}
+
+#[test]
+fn extend() {
+ let mut map = IndexMap::new();
+ map.extend(vec![(&1, &2), (&3, &4)]);
+ map.extend(vec![(5, 6)]);
+ assert_eq!(
+ map.into_iter().collect::<Vec<_>>(),
+ vec![(1, 2), (3, 4), (5, 6)]
+ );
+}
+
+#[test]
+fn entry() {
+ let mut map = IndexMap::new();
+
+ map.insert(1, "1");
+ map.insert(2, "2");
+ {
+ let e = map.entry(3);
+ assert_eq!(e.index(), 2);
+ let e = e.or_insert("3");
+ assert_eq!(e, &"3");
+ }
+
+ let e = map.entry(2);
+ assert_eq!(e.index(), 1);
+ assert_eq!(e.key(), &2);
+ match e {
+ Entry::Occupied(ref e) => assert_eq!(e.get(), &"2"),
+ Entry::Vacant(_) => panic!(),
+ }
+ assert_eq!(e.or_insert("4"), &"2");
+}
+
+#[test]
+fn entry_and_modify() {
+ let mut map = IndexMap::new();
+
+ map.insert(1, "1");
+ map.entry(1).and_modify(|x| *x = "2");
+ assert_eq!(Some(&"2"), map.get(&1));
+
+ map.entry(2).and_modify(|x| *x = "doesn't exist");
+ assert_eq!(None, map.get(&2));
+}
+
+#[test]
+fn entry_or_default() {
+ let mut map = IndexMap::new();
+
+ #[derive(Debug, PartialEq)]
+ enum TestEnum {
+ DefaultValue,
+ NonDefaultValue,
+ }
+
+ impl Default for TestEnum {
+ fn default() -> Self {
+ TestEnum::DefaultValue
+ }
+ }
+
+ map.insert(1, TestEnum::NonDefaultValue);
+ assert_eq!(&mut TestEnum::NonDefaultValue, map.entry(1).or_default());
+
+ assert_eq!(&mut TestEnum::DefaultValue, map.entry(2).or_default());
+}
+
+#[test]
+fn occupied_entry_key() {
+ // These keys match hash and equality, but their addresses are distinct.
+ let (k1, k2) = (&mut 1, &mut 1);
+ let k1_ptr = k1 as *const i32;
+ let k2_ptr = k2 as *const i32;
+ assert_ne!(k1_ptr, k2_ptr);
+
+ let mut map = IndexMap::new();
+ map.insert(k1, "value");
+ match map.entry(k2) {
+ Entry::Occupied(ref e) => {
+ // `OccupiedEntry::key` should reference the key in the map,
+ // not the key that was used to find the entry.
+ let ptr = *e.key() as *const i32;
+ assert_eq!(ptr, k1_ptr);
+ assert_ne!(ptr, k2_ptr);
+ }
+ Entry::Vacant(_) => panic!(),
+ }
+}
+
+#[test]
+fn keys() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_iter().collect();
+ let keys: Vec<_> = map.keys().copied().collect();
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+}
+
+#[test]
+fn into_keys() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_iter().collect();
+ let keys: Vec<i32> = map.into_keys().collect();
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+}
+
+#[test]
+fn values() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_iter().collect();
+ let values: Vec<_> = map.values().copied().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+}
+
+#[test]
+fn values_mut() {
+ let vec = vec![(1, 1), (2, 2), (3, 3)];
+ let mut map: IndexMap<_, _> = vec.into_iter().collect();
+ for value in map.values_mut() {
+ *value *= 2
+ }
+ let values: Vec<_> = map.values().copied().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&2));
+ assert!(values.contains(&4));
+ assert!(values.contains(&6));
+}
+
+#[test]
+fn into_values() {
+ let vec = vec![(1, 'a'), (2, 'b'), (3, 'c')];
+ let map: IndexMap<_, _> = vec.into_iter().collect();
+ let values: Vec<char> = map.into_values().collect();
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+}
+
+#[test]
+#[cfg(feature = "std")]
+fn from_array() {
+ let map = IndexMap::from([(1, 2), (3, 4)]);
+ let mut expected = IndexMap::new();
+ expected.insert(1, 2);
+ expected.insert(3, 4);
+
+ assert_eq!(map, expected)
+}
+
+#[test]
+fn iter_default() {
+ struct K;
+ struct V;
+ fn assert_default<T>()
+ where
+ T: Default + Iterator,
+ {
+ assert!(T::default().next().is_none());
+ }
+ assert_default::<Iter<'static, K, V>>();
+ assert_default::<IterMut<'static, K, V>>();
+ assert_default::<IntoIter<K, V>>();
+ assert_default::<Keys<'static, K, V>>();
+ assert_default::<IntoKeys<K, V>>();
+ assert_default::<Values<'static, K, V>>();
+ assert_default::<ValuesMut<'static, K, V>>();
+ assert_default::<IntoValues<K, V>>();
+}