summaryrefslogtreecommitdiffstats
path: root/third_party/rust/indexmap/src/map
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/rust/indexmap/src/map
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/indexmap/src/map')
-rw-r--r--third_party/rust/indexmap/src/map/core.rs700
-rw-r--r--third_party/rust/indexmap/src/map/core/raw.rs191
2 files changed, 891 insertions, 0 deletions
diff --git a/third_party/rust/indexmap/src/map/core.rs b/third_party/rust/indexmap/src/map/core.rs
new file mode 100644
index 0000000000..ea7aaae62e
--- /dev/null
+++ b/third_party/rust/indexmap/src/map/core.rs
@@ -0,0 +1,700 @@
+//! This is the core implementation that doesn't depend on the hasher at all.
+//!
+//! The methods of `IndexMapCore` don't use any Hash properties of K.
+//!
+//! It's cleaner to separate them out, then the compiler checks that we are not
+//! using Hash at all in these methods.
+//!
+//! However, we should probably not let this show in the public API or docs.
+
+mod raw;
+
+use hashbrown::raw::RawTable;
+
+use crate::vec::{Drain, Vec};
+use core::cmp;
+use core::fmt;
+use core::mem::replace;
+use core::ops::RangeBounds;
+
+use crate::equivalent::Equivalent;
+use crate::util::simplify_range;
+use crate::{Bucket, Entries, HashValue};
+
+/// Core of the map that does not depend on S
+pub(crate) struct IndexMapCore<K, V> {
+ /// indices mapping from the entry hash to its index.
+ indices: RawTable<usize>,
+ /// entries is a dense vec of entries in their order.
+ entries: Vec<Bucket<K, V>>,
+}
+
+#[inline(always)]
+fn get_hash<K, V>(entries: &[Bucket<K, V>]) -> impl Fn(&usize) -> u64 + '_ {
+ move |&i| entries[i].hash.get()
+}
+
+#[inline]
+fn equivalent<'a, K, V, Q: ?Sized + Equivalent<K>>(
+ key: &'a Q,
+ entries: &'a [Bucket<K, V>],
+) -> impl Fn(&usize) -> bool + 'a {
+ move |&i| Q::equivalent(key, &entries[i].key)
+}
+
+#[inline]
+fn erase_index(table: &mut RawTable<usize>, hash: HashValue, index: usize) {
+ let erased = table.erase_entry(hash.get(), move |&i| i == index);
+ debug_assert!(erased);
+}
+
+#[inline]
+fn update_index(table: &mut RawTable<usize>, hash: HashValue, old: usize, new: usize) {
+ let index = table
+ .get_mut(hash.get(), move |&i| i == old)
+ .expect("index not found");
+ *index = new;
+}
+
+impl<K, V> Clone for IndexMapCore<K, V>
+where
+ K: Clone,
+ V: Clone,
+{
+ fn clone(&self) -> Self {
+ let indices = self.indices.clone();
+ let mut entries = Vec::with_capacity(indices.capacity());
+ entries.clone_from(&self.entries);
+ IndexMapCore { indices, entries }
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ let hasher = get_hash(&other.entries);
+ self.indices.clone_from_with_hasher(&other.indices, hasher);
+ if self.entries.capacity() < other.entries.len() {
+ // If we must resize, match the indices capacity
+ self.reserve_entries();
+ }
+ self.entries.clone_from(&other.entries);
+ }
+}
+
+impl<K, V> fmt::Debug for IndexMapCore<K, V>
+where
+ K: fmt::Debug,
+ V: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("IndexMapCore")
+ .field("indices", &raw::DebugIndices(&self.indices))
+ .field("entries", &self.entries)
+ .finish()
+ }
+}
+
+impl<K, V> Entries for IndexMapCore<K, V> {
+ type Entry = Bucket<K, V>;
+
+ #[inline]
+ fn into_entries(self) -> Vec<Self::Entry> {
+ self.entries
+ }
+
+ #[inline]
+ fn as_entries(&self) -> &[Self::Entry] {
+ &self.entries
+ }
+
+ #[inline]
+ fn as_entries_mut(&mut self) -> &mut [Self::Entry] {
+ &mut self.entries
+ }
+
+ fn with_entries<F>(&mut self, f: F)
+ where
+ F: FnOnce(&mut [Self::Entry]),
+ {
+ f(&mut self.entries);
+ self.rebuild_hash_table();
+ }
+}
+
+impl<K, V> IndexMapCore<K, V> {
+ #[inline]
+ pub(crate) const fn new() -> Self {
+ IndexMapCore {
+ indices: RawTable::new(),
+ entries: Vec::new(),
+ }
+ }
+
+ #[inline]
+ pub(crate) fn with_capacity(n: usize) -> Self {
+ IndexMapCore {
+ indices: RawTable::with_capacity(n),
+ entries: Vec::with_capacity(n),
+ }
+ }
+
+ #[inline]
+ pub(crate) fn len(&self) -> usize {
+ self.indices.len()
+ }
+
+ #[inline]
+ pub(crate) fn capacity(&self) -> usize {
+ cmp::min(self.indices.capacity(), self.entries.capacity())
+ }
+
+ pub(crate) fn clear(&mut self) {
+ self.indices.clear();
+ self.entries.clear();
+ }
+
+ pub(crate) fn truncate(&mut self, len: usize) {
+ if len < self.len() {
+ self.erase_indices(len, self.entries.len());
+ self.entries.truncate(len);
+ }
+ }
+
+ pub(crate) fn drain<R>(&mut self, range: R) -> Drain<'_, Bucket<K, V>>
+ where
+ R: RangeBounds<usize>,
+ {
+ let range = simplify_range(range, self.entries.len());
+ self.erase_indices(range.start, range.end);
+ self.entries.drain(range)
+ }
+
+ #[cfg(feature = "rayon")]
+ pub(crate) fn par_drain<R>(&mut self, range: R) -> rayon::vec::Drain<'_, Bucket<K, V>>
+ where
+ K: Send,
+ V: Send,
+ R: RangeBounds<usize>,
+ {
+ use rayon::iter::ParallelDrainRange;
+ let range = simplify_range(range, self.entries.len());
+ self.erase_indices(range.start, range.end);
+ self.entries.par_drain(range)
+ }
+
+ pub(crate) fn split_off(&mut self, at: usize) -> Self {
+ assert!(at <= self.entries.len());
+ self.erase_indices(at, self.entries.len());
+ let entries = self.entries.split_off(at);
+
+ let mut indices = RawTable::with_capacity(entries.len());
+ raw::insert_bulk_no_grow(&mut indices, &entries);
+ Self { indices, entries }
+ }
+
+ /// Reserve capacity for `additional` more key-value pairs.
+ pub(crate) fn reserve(&mut self, additional: usize) {
+ self.indices.reserve(additional, get_hash(&self.entries));
+ self.reserve_entries();
+ }
+
+ /// Reserve entries capacity to match the indices
+ fn reserve_entries(&mut self) {
+ let additional = self.indices.capacity() - self.entries.len();
+ self.entries.reserve_exact(additional);
+ }
+
+ /// Shrink the capacity of the map with a lower bound
+ pub(crate) fn shrink_to(&mut self, min_capacity: usize) {
+ self.indices
+ .shrink_to(min_capacity, get_hash(&self.entries));
+ self.entries.shrink_to(min_capacity);
+ }
+
+ /// Remove the last key-value pair
+ pub(crate) fn pop(&mut self) -> Option<(K, V)> {
+ if let Some(entry) = self.entries.pop() {
+ let last = self.entries.len();
+ erase_index(&mut self.indices, entry.hash, last);
+ Some((entry.key, entry.value))
+ } else {
+ None
+ }
+ }
+
+ /// Append a key-value pair, *without* checking whether it already exists,
+ /// and return the pair's new index.
+ fn push(&mut self, hash: HashValue, key: K, value: V) -> usize {
+ let i = self.entries.len();
+ self.indices.insert(hash.get(), i, get_hash(&self.entries));
+ if i == self.entries.capacity() {
+ // Reserve our own capacity synced to the indices,
+ // rather than letting `Vec::push` just double it.
+ self.reserve_entries();
+ }
+ self.entries.push(Bucket { hash, key, value });
+ i
+ }
+
+ /// Return the index in `entries` where an equivalent key can be found
+ pub(crate) fn get_index_of<Q>(&self, hash: HashValue, key: &Q) -> Option<usize>
+ where
+ Q: ?Sized + Equivalent<K>,
+ {
+ let eq = equivalent(key, &self.entries);
+ self.indices.get(hash.get(), eq).copied()
+ }
+
+ pub(crate) fn insert_full(&mut self, hash: HashValue, key: K, value: V) -> (usize, Option<V>)
+ where
+ K: Eq,
+ {
+ match self.get_index_of(hash, &key) {
+ Some(i) => (i, Some(replace(&mut self.entries[i].value, value))),
+ None => (self.push(hash, key, value), None),
+ }
+ }
+
+ /// Remove an entry by shifting all entries that follow it
+ pub(crate) fn shift_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
+ where
+ Q: ?Sized + Equivalent<K>,
+ {
+ let eq = equivalent(key, &self.entries);
+ match self.indices.remove_entry(hash.get(), eq) {
+ Some(index) => {
+ let (key, value) = self.shift_remove_finish(index);
+ Some((index, key, value))
+ }
+ None => None,
+ }
+ }
+
+ /// Remove an entry by shifting all entries that follow it
+ pub(crate) fn shift_remove_index(&mut self, index: usize) -> Option<(K, V)> {
+ match self.entries.get(index) {
+ Some(entry) => {
+ erase_index(&mut self.indices, entry.hash, index);
+ Some(self.shift_remove_finish(index))
+ }
+ None => None,
+ }
+ }
+
+ /// Remove an entry by shifting all entries that follow it
+ ///
+ /// The index should already be removed from `self.indices`.
+ fn shift_remove_finish(&mut self, index: usize) -> (K, V) {
+ // Correct indices that point to the entries that followed the removed entry.
+ self.decrement_indices(index + 1, self.entries.len());
+
+ // Use Vec::remove to actually remove the entry.
+ let entry = self.entries.remove(index);
+ (entry.key, entry.value)
+ }
+
+ /// Decrement all indices in the range `start..end`.
+ ///
+ /// The index `start - 1` should not exist in `self.indices`.
+ /// All entries should still be in their original positions.
+ fn decrement_indices(&mut self, start: usize, end: usize) {
+ // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
+ let shifted_entries = &self.entries[start..end];
+ if shifted_entries.len() > self.indices.buckets() / 2 {
+ // Shift all indices in range.
+ for i in self.indices_mut() {
+ if start <= *i && *i < end {
+ *i -= 1;
+ }
+ }
+ } else {
+ // Find each entry in range to shift its index.
+ for (i, entry) in (start..end).zip(shifted_entries) {
+ update_index(&mut self.indices, entry.hash, i, i - 1);
+ }
+ }
+ }
+
+ /// Increment all indices in the range `start..end`.
+ ///
+ /// The index `end` should not exist in `self.indices`.
+ /// All entries should still be in their original positions.
+ fn increment_indices(&mut self, start: usize, end: usize) {
+ // Use a heuristic between a full sweep vs. a `find()` for every shifted item.
+ let shifted_entries = &self.entries[start..end];
+ if shifted_entries.len() > self.indices.buckets() / 2 {
+ // Shift all indices in range.
+ for i in self.indices_mut() {
+ if start <= *i && *i < end {
+ *i += 1;
+ }
+ }
+ } else {
+ // Find each entry in range to shift its index, updated in reverse so
+ // we never have duplicated indices that might have a hash collision.
+ for (i, entry) in (start..end).zip(shifted_entries).rev() {
+ update_index(&mut self.indices, entry.hash, i, i + 1);
+ }
+ }
+ }
+
+ pub(super) fn move_index(&mut self, from: usize, to: usize) {
+ let from_hash = self.entries[from].hash;
+ if from != to {
+ // Use a sentinal index so other indices don't collide.
+ update_index(&mut self.indices, from_hash, from, usize::MAX);
+
+ // Update all other indices and rotate the entry positions.
+ if from < to {
+ self.decrement_indices(from + 1, to + 1);
+ self.entries[from..=to].rotate_left(1);
+ } else if to < from {
+ self.increment_indices(to, from);
+ self.entries[to..=from].rotate_right(1);
+ }
+
+ // Change the sentinal index to its final position.
+ update_index(&mut self.indices, from_hash, usize::MAX, to);
+ }
+ }
+
+ /// Remove an entry by swapping it with the last
+ pub(crate) fn swap_remove_full<Q>(&mut self, hash: HashValue, key: &Q) -> Option<(usize, K, V)>
+ where
+ Q: ?Sized + Equivalent<K>,
+ {
+ let eq = equivalent(key, &self.entries);
+ match self.indices.remove_entry(hash.get(), eq) {
+ Some(index) => {
+ let (key, value) = self.swap_remove_finish(index);
+ Some((index, key, value))
+ }
+ None => None,
+ }
+ }
+
+ /// Remove an entry by swapping it with the last
+ pub(crate) fn swap_remove_index(&mut self, index: usize) -> Option<(K, V)> {
+ match self.entries.get(index) {
+ Some(entry) => {
+ erase_index(&mut self.indices, entry.hash, index);
+ Some(self.swap_remove_finish(index))
+ }
+ None => None,
+ }
+ }
+
+ /// Finish removing an entry by swapping it with the last
+ ///
+ /// The index should already be removed from `self.indices`.
+ fn swap_remove_finish(&mut self, index: usize) -> (K, V) {
+ // use swap_remove, but then we need to update the index that points
+ // to the other entry that has to move
+ let entry = self.entries.swap_remove(index);
+
+ // correct index that points to the entry that had to swap places
+ if let Some(entry) = self.entries.get(index) {
+ // was not last element
+ // examine new element in `index` and find it in indices
+ let last = self.entries.len();
+ update_index(&mut self.indices, entry.hash, last, index);
+ }
+
+ (entry.key, entry.value)
+ }
+
+ /// Erase `start..end` from `indices`, and shift `end..` indices down to `start..`
+ ///
+ /// All of these items should still be at their original location in `entries`.
+ /// This is used by `drain`, which will let `Vec::drain` do the work on `entries`.
+ fn erase_indices(&mut self, start: usize, end: usize) {
+ let (init, shifted_entries) = self.entries.split_at(end);
+ let (start_entries, erased_entries) = init.split_at(start);
+
+ let erased = erased_entries.len();
+ let shifted = shifted_entries.len();
+ let half_capacity = self.indices.buckets() / 2;
+
+ // Use a heuristic between different strategies
+ if erased == 0 {
+ // Degenerate case, nothing to do
+ } else if start + shifted < half_capacity && start < erased {
+ // Reinsert everything, as there are few kept indices
+ self.indices.clear();
+
+ // Reinsert stable indices, then shifted indices
+ raw::insert_bulk_no_grow(&mut self.indices, start_entries);
+ raw::insert_bulk_no_grow(&mut self.indices, shifted_entries);
+ } else if erased + shifted < half_capacity {
+ // Find each affected index, as there are few to adjust
+
+ // Find erased indices
+ for (i, entry) in (start..).zip(erased_entries) {
+ erase_index(&mut self.indices, entry.hash, i);
+ }
+
+ // Find shifted indices
+ for ((new, old), entry) in (start..).zip(end..).zip(shifted_entries) {
+ update_index(&mut self.indices, entry.hash, old, new);
+ }
+ } else {
+ // Sweep the whole table for adjustments
+ self.erase_indices_sweep(start, end);
+ }
+
+ debug_assert_eq!(self.indices.len(), start + shifted);
+ }
+
+ pub(crate) fn retain_in_order<F>(&mut self, mut keep: F)
+ where
+ F: FnMut(&mut K, &mut V) -> bool,
+ {
+ // FIXME: This could use Vec::retain_mut with MSRV 1.61.
+ // Like Vec::retain in self.entries, but with mutable K and V.
+ // We swap-shift all the items we want to keep, truncate the rest,
+ // then rebuild the raw hash table with the new indexes.
+ let len = self.entries.len();
+ let mut n_deleted = 0;
+ for i in 0..len {
+ let will_keep = {
+ let entry = &mut self.entries[i];
+ keep(&mut entry.key, &mut entry.value)
+ };
+ if !will_keep {
+ n_deleted += 1;
+ } else if n_deleted > 0 {
+ self.entries.swap(i - n_deleted, i);
+ }
+ }
+ if n_deleted > 0 {
+ self.entries.truncate(len - n_deleted);
+ self.rebuild_hash_table();
+ }
+ }
+
+ fn rebuild_hash_table(&mut self) {
+ self.indices.clear();
+ raw::insert_bulk_no_grow(&mut self.indices, &self.entries);
+ }
+
+ pub(crate) fn reverse(&mut self) {
+ self.entries.reverse();
+
+ // No need to save hash indices, can easily calculate what they should
+ // be, given that this is an in-place reversal.
+ let len = self.entries.len();
+ for i in self.indices_mut() {
+ *i = len - *i - 1;
+ }
+ }
+}
+
+/// Entry for an existing key-value pair or a vacant location to
+/// insert one.
+pub enum Entry<'a, K, V> {
+ /// Existing slot with equivalent key.
+ Occupied(OccupiedEntry<'a, K, V>),
+ /// Vacant slot (no equivalent key in the map).
+ Vacant(VacantEntry<'a, K, V>),
+}
+
+impl<'a, K, V> Entry<'a, K, V> {
+ /// Inserts the given default value in the entry if it is vacant and returns a mutable
+ /// reference to it. Otherwise a mutable reference to an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_insert(self, default: V) -> &'a mut V {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => entry.insert(default),
+ }
+ }
+
+ /// Inserts the result of the `call` function in the entry if it is vacant and returns a mutable
+ /// reference to it. Otherwise a mutable reference to an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_insert_with<F>(self, call: F) -> &'a mut V
+ where
+ F: FnOnce() -> V,
+ {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => entry.insert(call()),
+ }
+ }
+
+ /// Inserts the result of the `call` function with a reference to the entry's key if it is
+ /// vacant, and returns a mutable reference to the new value. Otherwise a mutable reference to
+ /// an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_insert_with_key<F>(self, call: F) -> &'a mut V
+ where
+ F: FnOnce(&K) -> V,
+ {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => {
+ let value = call(&entry.key);
+ entry.insert(value)
+ }
+ }
+ }
+
+ /// Gets a reference to the entry's key, either within the map if occupied,
+ /// or else the new key that was used to find the entry.
+ pub fn key(&self) -> &K {
+ match *self {
+ Entry::Occupied(ref entry) => entry.key(),
+ Entry::Vacant(ref entry) => entry.key(),
+ }
+ }
+
+ /// Return the index where the key-value pair exists or will be inserted.
+ pub fn index(&self) -> usize {
+ match *self {
+ Entry::Occupied(ref entry) => entry.index(),
+ Entry::Vacant(ref entry) => entry.index(),
+ }
+ }
+
+ /// Modifies the entry if it is occupied.
+ pub fn and_modify<F>(self, f: F) -> Self
+ where
+ F: FnOnce(&mut V),
+ {
+ match self {
+ Entry::Occupied(mut o) => {
+ f(o.get_mut());
+ Entry::Occupied(o)
+ }
+ x => x,
+ }
+ }
+
+ /// Inserts a default-constructed value in the entry if it is vacant and returns a mutable
+ /// reference to it. Otherwise a mutable reference to an already existent value is returned.
+ ///
+ /// Computes in **O(1)** time (amortized average).
+ pub fn or_default(self) -> &'a mut V
+ where
+ V: Default,
+ {
+ match self {
+ Entry::Occupied(entry) => entry.into_mut(),
+ Entry::Vacant(entry) => entry.insert(V::default()),
+ }
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Entry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Entry::Vacant(ref v) => f.debug_tuple(stringify!(Entry)).field(v).finish(),
+ Entry::Occupied(ref o) => f.debug_tuple(stringify!(Entry)).field(o).finish(),
+ }
+ }
+}
+
+pub use self::raw::OccupiedEntry;
+
+// Extra methods that don't threaten the unsafe encapsulation.
+impl<K, V> OccupiedEntry<'_, K, V> {
+ /// Sets the value of the entry to `value`, and returns the entry's old value.
+ pub fn insert(&mut self, value: V) -> V {
+ replace(self.get_mut(), value)
+ }
+
+ /// Remove the key, value pair stored in the map for this entry, and return the value.
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove()`.
+ pub fn remove(self) -> V {
+ self.swap_remove()
+ }
+
+ /// Remove the key, value pair stored in the map for this entry, and return the value.
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove(self) -> V {
+ self.swap_remove_entry().1
+ }
+
+ /// Remove the key, value pair stored in the map for this entry, and return the value.
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove(self) -> V {
+ self.shift_remove_entry().1
+ }
+
+ /// Remove and return the key, value pair stored in the map for this entry
+ ///
+ /// **NOTE:** This is equivalent to `.swap_remove_entry()`.
+ pub fn remove_entry(self) -> (K, V) {
+ self.swap_remove_entry()
+ }
+}
+
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for OccupiedEntry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct(stringify!(OccupiedEntry))
+ .field("key", self.key())
+ .field("value", self.get())
+ .finish()
+ }
+}
+
+/// A view into a vacant entry in a `IndexMap`.
+/// It is part of the [`Entry`] enum.
+///
+/// [`Entry`]: enum.Entry.html
+pub struct VacantEntry<'a, K, V> {
+ map: &'a mut IndexMapCore<K, V>,
+ hash: HashValue,
+ key: K,
+}
+
+impl<'a, K, V> VacantEntry<'a, K, V> {
+ /// Gets a reference to the key that was used to find the entry.
+ pub fn key(&self) -> &K {
+ &self.key
+ }
+
+ /// Takes ownership of the key, leaving the entry vacant.
+ pub fn into_key(self) -> K {
+ self.key
+ }
+
+ /// Return the index where the key-value pair will be inserted.
+ pub fn index(&self) -> usize {
+ self.map.len()
+ }
+
+ /// Inserts the entry's key and the given value into the map, and returns a mutable reference
+ /// to the value.
+ pub fn insert(self, value: V) -> &'a mut V {
+ let i = self.map.push(self.hash, self.key, value);
+ &mut self.map.entries[i].value
+ }
+}
+
+impl<K: fmt::Debug, V> fmt::Debug for VacantEntry<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple(stringify!(VacantEntry))
+ .field(self.key())
+ .finish()
+ }
+}
+
+#[test]
+fn assert_send_sync() {
+ fn assert_send_sync<T: Send + Sync>() {}
+ assert_send_sync::<IndexMapCore<i32, i32>>();
+ assert_send_sync::<Entry<'_, i32, i32>>();
+}
diff --git a/third_party/rust/indexmap/src/map/core/raw.rs b/third_party/rust/indexmap/src/map/core/raw.rs
new file mode 100644
index 0000000000..bf1672d52a
--- /dev/null
+++ b/third_party/rust/indexmap/src/map/core/raw.rs
@@ -0,0 +1,191 @@
+#![allow(unsafe_code)]
+//! This module encapsulates the `unsafe` access to `hashbrown::raw::RawTable`,
+//! mostly in dealing with its bucket "pointers".
+
+use super::{equivalent, Bucket, Entry, HashValue, IndexMapCore, VacantEntry};
+use core::fmt;
+use core::mem::replace;
+use hashbrown::raw::RawTable;
+
+type RawBucket = hashbrown::raw::Bucket<usize>;
+
+/// Inserts many entries into a raw table without reallocating.
+///
+/// ***Panics*** if there is not sufficient capacity already.
+pub(super) fn insert_bulk_no_grow<K, V>(indices: &mut RawTable<usize>, entries: &[Bucket<K, V>]) {
+ assert!(indices.capacity() - indices.len() >= entries.len());
+ for entry in entries {
+ // SAFETY: we asserted that sufficient capacity exists for all entries.
+ unsafe {
+ indices.insert_no_grow(entry.hash.get(), indices.len());
+ }
+ }
+}
+
+pub(super) struct DebugIndices<'a>(pub &'a RawTable<usize>);
+impl fmt::Debug for DebugIndices<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // SAFETY: we're not letting any of the buckets escape this function
+ let indices = unsafe { self.0.iter().map(|raw_bucket| raw_bucket.read()) };
+ f.debug_list().entries(indices).finish()
+ }
+}
+
+impl<K, V> IndexMapCore<K, V> {
+ /// Sweep the whole table to erase indices start..end
+ pub(super) fn erase_indices_sweep(&mut self, start: usize, end: usize) {
+ // SAFETY: we're not letting any of the buckets escape this function
+ unsafe {
+ let offset = end - start;
+ for bucket in self.indices.iter() {
+ let i = bucket.read();
+ if i >= end {
+ bucket.write(i - offset);
+ } else if i >= start {
+ self.indices.erase(bucket);
+ }
+ }
+ }
+ }
+
+ pub(crate) fn entry(&mut self, hash: HashValue, key: K) -> Entry<'_, K, V>
+ where
+ K: Eq,
+ {
+ let eq = equivalent(&key, &self.entries);
+ match self.indices.find(hash.get(), eq) {
+ // SAFETY: The entry is created with a live raw bucket, at the same time
+ // we have a &mut reference to the map, so it can not be modified further.
+ Some(raw_bucket) => Entry::Occupied(OccupiedEntry {
+ map: self,
+ raw_bucket,
+ key,
+ }),
+ None => Entry::Vacant(VacantEntry {
+ map: self,
+ hash,
+ key,
+ }),
+ }
+ }
+
+ pub(super) fn indices_mut(&mut self) -> impl Iterator<Item = &mut usize> {
+ // SAFETY: we're not letting any of the buckets escape this function,
+ // only the item references that are appropriately bound to `&mut self`.
+ unsafe { self.indices.iter().map(|bucket| bucket.as_mut()) }
+ }
+
+ /// Return the raw bucket for the given index
+ fn find_index(&self, index: usize) -> RawBucket {
+ // We'll get a "nice" bounds-check from indexing `self.entries`,
+ // and then we expect to find it in the table as well.
+ let hash = self.entries[index].hash.get();
+ self.indices
+ .find(hash, move |&i| i == index)
+ .expect("index not found")
+ }
+
+ pub(crate) fn swap_indices(&mut self, a: usize, b: usize) {
+ // SAFETY: Can't take two `get_mut` references from one table, so we
+ // must use raw buckets to do the swap. This is still safe because we
+ // are locally sure they won't dangle, and we write them individually.
+ unsafe {
+ let raw_bucket_a = self.find_index(a);
+ let raw_bucket_b = self.find_index(b);
+ raw_bucket_a.write(b);
+ raw_bucket_b.write(a);
+ }
+ self.entries.swap(a, b);
+ }
+}
+
+/// A view into an occupied entry in a `IndexMap`.
+/// It is part of the [`Entry`] enum.
+///
+/// [`Entry`]: enum.Entry.html
+// SAFETY: The lifetime of the map reference also constrains the raw bucket,
+// which is essentially a raw pointer into the map indices.
+pub struct OccupiedEntry<'a, K, V> {
+ map: &'a mut IndexMapCore<K, V>,
+ raw_bucket: RawBucket,
+ key: K,
+}
+
+// `hashbrown::raw::Bucket` is only `Send`, not `Sync`.
+// SAFETY: `&self` only accesses the bucket to read it.
+unsafe impl<K: Sync, V: Sync> Sync for OccupiedEntry<'_, K, V> {}
+
+// The parent module also adds methods that don't threaten the unsafe encapsulation.
+impl<'a, K, V> OccupiedEntry<'a, K, V> {
+ /// Gets a reference to the entry's key in the map.
+ ///
+ /// Note that this is not the key that was used to find the entry. There may be an observable
+ /// difference if the key type has any distinguishing features outside of `Hash` and `Eq`, like
+ /// extra fields or the memory address of an allocation.
+ pub fn key(&self) -> &K {
+ &self.map.entries[self.index()].key
+ }
+
+ /// Gets a reference to the entry's value in the map.
+ pub fn get(&self) -> &V {
+ &self.map.entries[self.index()].value
+ }
+
+ /// Gets a mutable reference to the entry's value in the map.
+ ///
+ /// If you need a reference which may outlive the destruction of the
+ /// `Entry` value, see `into_mut`.
+ pub fn get_mut(&mut self) -> &mut V {
+ let index = self.index();
+ &mut self.map.entries[index].value
+ }
+
+ /// Put the new key in the occupied entry's key slot
+ pub(crate) fn replace_key(self) -> K {
+ let index = self.index();
+ let old_key = &mut self.map.entries[index].key;
+ replace(old_key, self.key)
+ }
+
+ /// Return the index of the key-value pair
+ #[inline]
+ pub fn index(&self) -> usize {
+ // SAFETY: we have &mut map keep keeping the bucket stable
+ unsafe { self.raw_bucket.read() }
+ }
+
+ /// Converts into a mutable reference to the entry's value in the map,
+ /// with a lifetime bound to the map itself.
+ pub fn into_mut(self) -> &'a mut V {
+ let index = self.index();
+ &mut self.map.entries[index].value
+ }
+
+ /// Remove and return the key, value pair stored in the map for this entry
+ ///
+ /// Like `Vec::swap_remove`, the pair is removed by swapping it with the
+ /// last element of the map and popping it off. **This perturbs
+ /// the position of what used to be the last element!**
+ ///
+ /// Computes in **O(1)** time (average).
+ pub fn swap_remove_entry(self) -> (K, V) {
+ // SAFETY: This is safe because it can only happen once (self is consumed)
+ // and map.indices have not been modified since entry construction
+ let index = unsafe { self.map.indices.remove(self.raw_bucket) };
+ self.map.swap_remove_finish(index)
+ }
+
+ /// Remove and return the key, value pair stored in the map for this entry
+ ///
+ /// Like `Vec::remove`, the pair is removed by shifting all of the
+ /// elements that follow it, preserving their relative order.
+ /// **This perturbs the index of all of those elements!**
+ ///
+ /// Computes in **O(n)** time (average).
+ pub fn shift_remove_entry(self) -> (K, V) {
+ // SAFETY: This is safe because it can only happen once (self is consumed)
+ // and map.indices have not been modified since entry construction
+ let index = unsafe { self.map.indices.remove(self.raw_bucket) };
+ self.map.shift_remove_finish(index)
+ }
+}