summaryrefslogtreecommitdiffstats
path: root/library/alloc/src/collections
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /library/alloc/src/collections
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/alloc/src/collections')
-rw-r--r--library/alloc/src/collections/binary_heap.rs1720
-rw-r--r--library/alloc/src/collections/binary_heap/tests.rs489
-rw-r--r--library/alloc/src/collections/btree/append.rs107
-rw-r--r--library/alloc/src/collections/btree/borrow.rs47
-rw-r--r--library/alloc/src/collections/btree/borrow/tests.rs19
-rw-r--r--library/alloc/src/collections/btree/dedup_sorted_iter.rs47
-rw-r--r--library/alloc/src/collections/btree/fix.rs179
-rw-r--r--library/alloc/src/collections/btree/map.rs2423
-rw-r--r--library/alloc/src/collections/btree/map/entry.rs555
-rw-r--r--library/alloc/src/collections/btree/map/tests.rs2338
-rw-r--r--library/alloc/src/collections/btree/mem.rs35
-rw-r--r--library/alloc/src/collections/btree/merge_iter.rs98
-rw-r--r--library/alloc/src/collections/btree/mod.rs26
-rw-r--r--library/alloc/src/collections/btree/navigate.rs719
-rw-r--r--library/alloc/src/collections/btree/node.rs1753
-rw-r--r--library/alloc/src/collections/btree/node/tests.rs102
-rw-r--r--library/alloc/src/collections/btree/remove.rs95
-rw-r--r--library/alloc/src/collections/btree/search.rs285
-rw-r--r--library/alloc/src/collections/btree/set.rs1789
-rw-r--r--library/alloc/src/collections/btree/set/tests.rs856
-rw-r--r--library/alloc/src/collections/btree/set_val.rs29
-rw-r--r--library/alloc/src/collections/btree/split.rs73
-rw-r--r--library/alloc/src/collections/btree/testing/crash_test.rs119
-rw-r--r--library/alloc/src/collections/btree/testing/mod.rs3
-rw-r--r--library/alloc/src/collections/btree/testing/ord_chaos.rs81
-rw-r--r--library/alloc/src/collections/btree/testing/rng.rs28
-rw-r--r--library/alloc/src/collections/linked_list.rs2012
-rw-r--r--library/alloc/src/collections/linked_list/tests.rs1156
-rw-r--r--library/alloc/src/collections/mod.rs154
-rw-r--r--library/alloc/src/collections/vec_deque/drain.rs142
-rw-r--r--library/alloc/src/collections/vec_deque/into_iter.rs72
-rw-r--r--library/alloc/src/collections/vec_deque/iter.rs219
-rw-r--r--library/alloc/src/collections/vec_deque/iter_mut.rs162
-rw-r--r--library/alloc/src/collections/vec_deque/macros.rs19
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs3137
-rw-r--r--library/alloc/src/collections/vec_deque/pair_slices.rs67
-rw-r--r--library/alloc/src/collections/vec_deque/ring_slices.rs56
-rw-r--r--library/alloc/src/collections/vec_deque/spec_extend.rs132
-rw-r--r--library/alloc/src/collections/vec_deque/tests.rs1110
39 files changed, 22453 insertions, 0 deletions
diff --git a/library/alloc/src/collections/binary_heap.rs b/library/alloc/src/collections/binary_heap.rs
new file mode 100644
index 000000000..197e7aaac
--- /dev/null
+++ b/library/alloc/src/collections/binary_heap.rs
@@ -0,0 +1,1720 @@
+//! A priority queue implemented with a binary heap.
+//!
+//! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
+//! Checking the largest element is *O*(1). Converting a vector to a binary heap
+//! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
+//! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
+//! in-place heapsort.
+//!
+//! # Examples
+//!
+//! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
+//! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
+//! It shows how to use [`BinaryHeap`] with custom types.
+//!
+//! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
+//! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
+//! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
+//!
+//! ```
+//! use std::cmp::Ordering;
+//! use std::collections::BinaryHeap;
+//!
+//! #[derive(Copy, Clone, Eq, PartialEq)]
+//! struct State {
+//! cost: usize,
+//! position: usize,
+//! }
+//!
+//! // The priority queue depends on `Ord`.
+//! // Explicitly implement the trait so the queue becomes a min-heap
+//! // instead of a max-heap.
+//! impl Ord for State {
+//! fn cmp(&self, other: &Self) -> Ordering {
+//! // Notice that the we flip the ordering on costs.
+//! // In case of a tie we compare positions - this step is necessary
+//! // to make implementations of `PartialEq` and `Ord` consistent.
+//! other.cost.cmp(&self.cost)
+//! .then_with(|| self.position.cmp(&other.position))
+//! }
+//! }
+//!
+//! // `PartialOrd` needs to be implemented as well.
+//! impl PartialOrd for State {
+//! fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+//! Some(self.cmp(other))
+//! }
+//! }
+//!
+//! // Each node is represented as a `usize`, for a shorter implementation.
+//! struct Edge {
+//! node: usize,
+//! cost: usize,
+//! }
+//!
+//! // Dijkstra's shortest path algorithm.
+//!
+//! // Start at `start` and use `dist` to track the current shortest distance
+//! // to each node. This implementation isn't memory-efficient as it may leave duplicate
+//! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
+//! // for a simpler implementation.
+//! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
+//! // dist[node] = current shortest distance from `start` to `node`
+//! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
+//!
+//! let mut heap = BinaryHeap::new();
+//!
+//! // We're at `start`, with a zero cost
+//! dist[start] = 0;
+//! heap.push(State { cost: 0, position: start });
+//!
+//! // Examine the frontier with lower cost nodes first (min-heap)
+//! while let Some(State { cost, position }) = heap.pop() {
+//! // Alternatively we could have continued to find all shortest paths
+//! if position == goal { return Some(cost); }
+//!
+//! // Important as we may have already found a better way
+//! if cost > dist[position] { continue; }
+//!
+//! // For each node we can reach, see if we can find a way with
+//! // a lower cost going through this node
+//! for edge in &adj_list[position] {
+//! let next = State { cost: cost + edge.cost, position: edge.node };
+//!
+//! // If so, add it to the frontier and continue
+//! if next.cost < dist[next.position] {
+//! heap.push(next);
+//! // Relaxation, we have now found a better way
+//! dist[next.position] = next.cost;
+//! }
+//! }
+//! }
+//!
+//! // Goal not reachable
+//! None
+//! }
+//!
+//! fn main() {
+//! // This is the directed graph we're going to use.
+//! // The node numbers correspond to the different states,
+//! // and the edge weights symbolize the cost of moving
+//! // from one node to another.
+//! // Note that the edges are one-way.
+//! //
+//! // 7
+//! // +-----------------+
+//! // | |
+//! // v 1 2 | 2
+//! // 0 -----> 1 -----> 3 ---> 4
+//! // | ^ ^ ^
+//! // | | 1 | |
+//! // | | | 3 | 1
+//! // +------> 2 -------+ |
+//! // 10 | |
+//! // +---------------+
+//! //
+//! // The graph is represented as an adjacency list where each index,
+//! // corresponding to a node value, has a list of outgoing edges.
+//! // Chosen for its efficiency.
+//! let graph = vec![
+//! // Node 0
+//! vec![Edge { node: 2, cost: 10 },
+//! Edge { node: 1, cost: 1 }],
+//! // Node 1
+//! vec![Edge { node: 3, cost: 2 }],
+//! // Node 2
+//! vec![Edge { node: 1, cost: 1 },
+//! Edge { node: 3, cost: 3 },
+//! Edge { node: 4, cost: 1 }],
+//! // Node 3
+//! vec![Edge { node: 0, cost: 7 },
+//! Edge { node: 4, cost: 2 }],
+//! // Node 4
+//! vec![]];
+//!
+//! assert_eq!(shortest_path(&graph, 0, 1), Some(1));
+//! assert_eq!(shortest_path(&graph, 0, 3), Some(3));
+//! assert_eq!(shortest_path(&graph, 3, 0), Some(7));
+//! assert_eq!(shortest_path(&graph, 0, 4), Some(5));
+//! assert_eq!(shortest_path(&graph, 4, 0), None);
+//! }
+//! ```
+
+#![allow(missing_docs)]
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::fmt;
+use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
+use core::mem::{self, swap, ManuallyDrop};
+use core::ops::{Deref, DerefMut};
+use core::ptr;
+
+use crate::collections::TryReserveError;
+use crate::slice;
+use crate::vec::{self, AsVecIntoIter, Vec};
+
+use super::SpecExtend;
+
+#[cfg(test)]
+mod tests;
+
+/// A priority queue implemented with a binary heap.
+///
+/// This will be a max-heap.
+///
+/// It is a logic error for an item to be modified in such a way that the
+/// item's ordering relative to any other item, as determined by the [`Ord`]
+/// trait, changes while it is in the heap. This is normally only possible
+/// through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. The
+/// behavior resulting from such a logic error is not specified, but will
+/// be encapsulated to the `BinaryHeap` that observed the logic error and not
+/// result in undefined behavior. This could include panics, incorrect results,
+/// aborts, memory leaks, and non-termination.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::BinaryHeap;
+///
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `BinaryHeap<i32>` in this example).
+/// let mut heap = BinaryHeap::new();
+///
+/// // We can use peek to look at the next item in the heap. In this case,
+/// // there's no items in there yet so we get None.
+/// assert_eq!(heap.peek(), None);
+///
+/// // Let's add some scores...
+/// heap.push(1);
+/// heap.push(5);
+/// heap.push(2);
+///
+/// // Now peek shows the most important item in the heap.
+/// assert_eq!(heap.peek(), Some(&5));
+///
+/// // We can check the length of a heap.
+/// assert_eq!(heap.len(), 3);
+///
+/// // We can iterate over the items in the heap, although they are returned in
+/// // a random order.
+/// for x in &heap {
+/// println!("{x}");
+/// }
+///
+/// // If we instead pop these scores, they should come back in order.
+/// assert_eq!(heap.pop(), Some(5));
+/// assert_eq!(heap.pop(), Some(2));
+/// assert_eq!(heap.pop(), Some(1));
+/// assert_eq!(heap.pop(), None);
+///
+/// // We can clear the heap of any remaining items.
+/// heap.clear();
+///
+/// // The heap should now be empty.
+/// assert!(heap.is_empty())
+/// ```
+///
+/// A `BinaryHeap` with a known list of items can be initialized from an array:
+///
+/// ```
+/// use std::collections::BinaryHeap;
+///
+/// let heap = BinaryHeap::from([1, 5, 2]);
+/// ```
+///
+/// ## Min-heap
+///
+/// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
+/// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
+/// value instead of the greatest one.
+///
+/// ```
+/// use std::collections::BinaryHeap;
+/// use std::cmp::Reverse;
+///
+/// let mut heap = BinaryHeap::new();
+///
+/// // Wrap values in `Reverse`
+/// heap.push(Reverse(1));
+/// heap.push(Reverse(5));
+/// heap.push(Reverse(2));
+///
+/// // If we pop these scores now, they should come back in the reverse order.
+/// assert_eq!(heap.pop(), Some(Reverse(1)));
+/// assert_eq!(heap.pop(), Some(Reverse(2)));
+/// assert_eq!(heap.pop(), Some(Reverse(5)));
+/// assert_eq!(heap.pop(), None);
+/// ```
+///
+/// # Time complexity
+///
+/// | [push] | [pop] | [peek]/[peek\_mut] |
+/// |---------|---------------|--------------------|
+/// | *O*(1)~ | *O*(log(*n*)) | *O*(1) |
+///
+/// The value for `push` is an expected cost; the method documentation gives a
+/// more detailed analysis.
+///
+/// [`core::cmp::Reverse`]: core::cmp::Reverse
+/// [`Ord`]: core::cmp::Ord
+/// [`Cell`]: core::cell::Cell
+/// [`RefCell`]: core::cell::RefCell
+/// [push]: BinaryHeap::push
+/// [pop]: BinaryHeap::pop
+/// [peek]: BinaryHeap::peek
+/// [peek\_mut]: BinaryHeap::peek_mut
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
+pub struct BinaryHeap<T> {
+ data: Vec<T>,
+}
+
+/// Structure wrapping a mutable reference to the greatest item on a
+/// `BinaryHeap`.
+///
+/// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
+/// its documentation for more.
+///
+/// [`peek_mut`]: BinaryHeap::peek_mut
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+pub struct PeekMut<'a, T: 'a + Ord> {
+ heap: &'a mut BinaryHeap<T>,
+ sift: bool,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: Ord + fmt::Debug> fmt::Debug for PeekMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
+ }
+}
+
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+impl<T: Ord> Drop for PeekMut<'_, T> {
+ fn drop(&mut self) {
+ if self.sift {
+ // SAFETY: PeekMut is only instantiated for non-empty heaps.
+ unsafe { self.heap.sift_down(0) };
+ }
+ }
+}
+
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+impl<T: Ord> Deref for PeekMut<'_, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ debug_assert!(!self.heap.is_empty());
+ // SAFE: PeekMut is only instantiated for non-empty heaps
+ unsafe { self.heap.data.get_unchecked(0) }
+ }
+}
+
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+impl<T: Ord> DerefMut for PeekMut<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ debug_assert!(!self.heap.is_empty());
+ self.sift = true;
+ // SAFE: PeekMut is only instantiated for non-empty heaps
+ unsafe { self.heap.data.get_unchecked_mut(0) }
+ }
+}
+
+impl<'a, T: Ord> PeekMut<'a, T> {
+ /// Removes the peeked value from the heap and returns it.
+ #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
+ pub fn pop(mut this: PeekMut<'a, T>) -> T {
+ let value = this.heap.pop().unwrap();
+ this.sift = false;
+ value
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> Clone for BinaryHeap<T> {
+ fn clone(&self) -> Self {
+ BinaryHeap { data: self.data.clone() }
+ }
+
+ fn clone_from(&mut self, source: &Self) {
+ self.data.clone_from(&source.data);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Default for BinaryHeap<T> {
+ /// Creates an empty `BinaryHeap<T>`.
+ #[inline]
+ fn default() -> BinaryHeap<T> {
+ BinaryHeap::new()
+ }
+}
+
+#[stable(feature = "binaryheap_debug", since = "1.4.0")]
+impl<T: fmt::Debug> fmt::Debug for BinaryHeap<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+impl<T: Ord> BinaryHeap<T> {
+ /// Creates an empty `BinaryHeap` as a max-heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.push(4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn new() -> BinaryHeap<T> {
+ BinaryHeap { data: vec![] }
+ }
+
+ /// Creates an empty `BinaryHeap` with at least the specified capacity.
+ ///
+ /// The binary heap will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the binary heap will not allocate.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::with_capacity(10);
+ /// heap.push(4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
+ BinaryHeap { data: Vec::with_capacity(capacity) }
+ }
+
+ /// Returns a mutable reference to the greatest item in the binary heap, or
+ /// `None` if it is empty.
+ ///
+ /// Note: If the `PeekMut` value is leaked, the heap may be in an
+ /// inconsistent state.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// assert!(heap.peek_mut().is_none());
+ ///
+ /// heap.push(1);
+ /// heap.push(5);
+ /// heap.push(2);
+ /// {
+ /// let mut val = heap.peek_mut().unwrap();
+ /// *val = 0;
+ /// }
+ /// assert_eq!(heap.peek(), Some(&2));
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
+ /// otherwise it's *O*(1).
+ #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+ pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
+ if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: false }) }
+ }
+
+ /// Removes the greatest item from the binary heap and returns it, or `None` if it
+ /// is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert_eq!(heap.pop(), Some(3));
+ /// assert_eq!(heap.pop(), Some(1));
+ /// assert_eq!(heap.pop(), None);
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop(&mut self) -> Option<T> {
+ self.data.pop().map(|mut item| {
+ if !self.is_empty() {
+ swap(&mut item, &mut self.data[0]);
+ // SAFETY: !self.is_empty() means that self.len() > 0
+ unsafe { self.sift_down_to_bottom(0) };
+ }
+ item
+ })
+ }
+
+ /// Pushes an item onto the binary heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.push(3);
+ /// heap.push(5);
+ /// heap.push(1);
+ ///
+ /// assert_eq!(heap.len(), 3);
+ /// assert_eq!(heap.peek(), Some(&5));
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// The expected cost of `push`, averaged over every possible ordering of
+ /// the elements being pushed, and over a sufficiently large number of
+ /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
+ /// elements that are *not* already in any sorted pattern.
+ ///
+ /// The time complexity degrades if elements are pushed in predominantly
+ /// ascending order. In the worst case, elements are pushed in ascending
+ /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
+ /// containing *n* elements.
+ ///
+ /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
+ /// occurs when capacity is exhausted and needs a resize. The resize cost
+ /// has been amortized in the previous figures.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push(&mut self, item: T) {
+ let old_len = self.len();
+ self.data.push(item);
+ // SAFETY: Since we pushed a new item it means that
+ // old_len = self.len() - 1 < self.len()
+ unsafe { self.sift_up(0, old_len) };
+ }
+
+ /// Consumes the `BinaryHeap` and returns a vector in sorted
+ /// (ascending) order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]);
+ /// heap.push(6);
+ /// heap.push(3);
+ ///
+ /// let vec = heap.into_sorted_vec();
+ /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+ pub fn into_sorted_vec(mut self) -> Vec<T> {
+ let mut end = self.len();
+ while end > 1 {
+ end -= 1;
+ // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
+ // so it's always a valid index to access.
+ // It is safe to access index 0 (i.e. `ptr`), because
+ // 1 <= end < self.len(), which means self.len() >= 2.
+ unsafe {
+ let ptr = self.data.as_mut_ptr();
+ ptr::swap(ptr, ptr.add(end));
+ }
+ // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
+ // 0 < 1 <= end <= self.len() - 1 < self.len()
+ // Which means 0 < end and end < self.len().
+ unsafe { self.sift_down_range(0, end) };
+ }
+ self.into_vec()
+ }
+
+ // The implementations of sift_up and sift_down use unsafe blocks in
+ // order to move an element out of the vector (leaving behind a
+ // hole), shift along the others and move the removed element back into the
+ // vector at the final location of the hole.
+ // The `Hole` type is used to represent this, and make sure
+ // the hole is filled back at the end of its scope, even on panic.
+ // Using a hole reduces the constant factor compared to using swaps,
+ // which involves twice as many moves.
+
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < self.len()`.
+ unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
+ // Take out the value at `pos` and create a hole.
+ // SAFETY: The caller guarantees that pos < self.len()
+ let mut hole = unsafe { Hole::new(&mut self.data, pos) };
+
+ while hole.pos() > start {
+ let parent = (hole.pos() - 1) / 2;
+
+ // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
+ // and so hole.pos() - 1 can't underflow.
+ // This guarantees that parent < hole.pos() so
+ // it's a valid index and also != hole.pos().
+ if hole.element() <= unsafe { hole.get(parent) } {
+ break;
+ }
+
+ // SAFETY: Same as above
+ unsafe { hole.move_to(parent) };
+ }
+
+ hole.pos()
+ }
+
+ /// Take an element at `pos` and move it down the heap,
+ /// while its children are larger.
+ ///
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < end <= self.len()`.
+ unsafe fn sift_down_range(&mut self, pos: usize, end: usize) {
+ // SAFETY: The caller guarantees that pos < end <= self.len().
+ let mut hole = unsafe { Hole::new(&mut self.data, pos) };
+ let mut child = 2 * hole.pos() + 1;
+
+ // Loop invariant: child == 2 * hole.pos() + 1.
+ while child <= end.saturating_sub(2) {
+ // compare with the greater of the two children
+ // SAFETY: child < end - 1 < self.len() and
+ // child + 1 < end <= self.len(), so they're valid indexes.
+ // child == 2 * hole.pos() + 1 != hole.pos() and
+ // child + 1 == 2 * hole.pos() + 2 != hole.pos().
+ // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
+ // if T is a ZST
+ child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
+
+ // if we are already in order, stop.
+ // SAFETY: child is now either the old child or the old child+1
+ // We already proven that both are < self.len() and != hole.pos()
+ if hole.element() >= unsafe { hole.get(child) } {
+ return;
+ }
+
+ // SAFETY: same as above.
+ unsafe { hole.move_to(child) };
+ child = 2 * hole.pos() + 1;
+ }
+
+ // SAFETY: && short circuit, which means that in the
+ // second condition it's already true that child == end - 1 < self.len().
+ if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
+ // SAFETY: child is already proven to be a valid index and
+ // child == 2 * hole.pos() + 1 != hole.pos().
+ unsafe { hole.move_to(child) };
+ }
+ }
+
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < self.len()`.
+ unsafe fn sift_down(&mut self, pos: usize) {
+ let len = self.len();
+ // SAFETY: pos < len is guaranteed by the caller and
+ // obviously len = self.len() <= self.len().
+ unsafe { self.sift_down_range(pos, len) };
+ }
+
+ /// Take an element at `pos` and move it all the way down the heap,
+ /// then sift it up to its position.
+ ///
+ /// Note: This is faster when the element is known to be large / should
+ /// be closer to the bottom.
+ ///
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < self.len()`.
+ unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
+ let end = self.len();
+ let start = pos;
+
+ // SAFETY: The caller guarantees that pos < self.len().
+ let mut hole = unsafe { Hole::new(&mut self.data, pos) };
+ let mut child = 2 * hole.pos() + 1;
+
+ // Loop invariant: child == 2 * hole.pos() + 1.
+ while child <= end.saturating_sub(2) {
+ // SAFETY: child < end - 1 < self.len() and
+ // child + 1 < end <= self.len(), so they're valid indexes.
+ // child == 2 * hole.pos() + 1 != hole.pos() and
+ // child + 1 == 2 * hole.pos() + 2 != hole.pos().
+ // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
+ // if T is a ZST
+ child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
+
+ // SAFETY: Same as above
+ unsafe { hole.move_to(child) };
+ child = 2 * hole.pos() + 1;
+ }
+
+ if child == end - 1 {
+ // SAFETY: child == end - 1 < self.len(), so it's a valid index
+ // and child == 2 * hole.pos() + 1 != hole.pos().
+ unsafe { hole.move_to(child) };
+ }
+ pos = hole.pos();
+ drop(hole);
+
+ // SAFETY: pos is the position in the hole and was already proven
+ // to be a valid index.
+ unsafe { self.sift_up(start, pos) };
+ }
+
+ /// Rebuild assuming data[0..start] is still a proper heap.
+ fn rebuild_tail(&mut self, start: usize) {
+ if start == self.len() {
+ return;
+ }
+
+ let tail_len = self.len() - start;
+
+ #[inline(always)]
+ fn log2_fast(x: usize) -> usize {
+ (usize::BITS - x.leading_zeros() - 1) as usize
+ }
+
+ // `rebuild` takes O(self.len()) operations
+ // and about 2 * self.len() comparisons in the worst case
+ // while repeating `sift_up` takes O(tail_len * log(start)) operations
+ // and about 1 * tail_len * log_2(start) comparisons in the worst case,
+ // assuming start >= tail_len. For larger heaps, the crossover point
+ // no longer follows this reasoning and was determined empirically.
+ let better_to_rebuild = if start < tail_len {
+ true
+ } else if self.len() <= 2048 {
+ 2 * self.len() < tail_len * log2_fast(start)
+ } else {
+ 2 * self.len() < tail_len * 11
+ };
+
+ if better_to_rebuild {
+ self.rebuild();
+ } else {
+ for i in start..self.len() {
+ // SAFETY: The index `i` is always less than self.len().
+ unsafe { self.sift_up(0, i) };
+ }
+ }
+ }
+
+ fn rebuild(&mut self) {
+ let mut n = self.len() / 2;
+ while n > 0 {
+ n -= 1;
+ // SAFETY: n starts from self.len() / 2 and goes down to 0.
+ // The only case when !(n < self.len()) is if
+ // self.len() == 0, but it's ruled out by the loop condition.
+ unsafe { self.sift_down(n) };
+ }
+ }
+
+ /// Moves all the elements of `other` into `self`, leaving `other` empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]);
+ /// let mut b = BinaryHeap::from([-20, 5, 43]);
+ ///
+ /// a.append(&mut b);
+ ///
+ /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
+ /// assert!(b.is_empty());
+ /// ```
+ #[stable(feature = "binary_heap_append", since = "1.11.0")]
+ pub fn append(&mut self, other: &mut Self) {
+ if self.len() < other.len() {
+ swap(self, other);
+ }
+
+ let start = self.data.len();
+
+ self.data.append(&mut other.data);
+
+ self.rebuild_tail(start);
+ }
+
+ /// Clears the binary heap, returning an iterator over the removed elements
+ /// in heap order. If the iterator is dropped before being fully consumed,
+ /// it drops the remaining elements in heap order.
+ ///
+ /// The returned iterator keeps a mutable borrow on the heap to optimize
+ /// its implementation.
+ ///
+ /// Note:
+ /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
+ /// You should use the latter for most cases.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_drain_sorted)]
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]);
+ /// assert_eq!(heap.len(), 5);
+ ///
+ /// drop(heap.drain_sorted()); // removes all elements in heap order
+ /// assert_eq!(heap.len(), 0);
+ /// ```
+ #[inline]
+ #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+ pub fn drain_sorted(&mut self) -> DrainSorted<'_, T> {
+ DrainSorted { inner: self }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns
+ /// `false`. The elements are visited in unsorted (and unspecified) order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_retain)]
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
+ ///
+ /// heap.retain(|x| x % 2 == 0); // only keep even numbers
+ ///
+ /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
+ /// ```
+ #[unstable(feature = "binary_heap_retain", issue = "71503")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ let mut first_removed = self.len();
+ let mut i = 0;
+ self.data.retain(|e| {
+ let keep = f(e);
+ if !keep && i < first_removed {
+ first_removed = i;
+ }
+ i += 1;
+ keep
+ });
+ // data[0..first_removed] is untouched, so we only need to rebuild the tail:
+ self.rebuild_tail(first_removed);
+ }
+}
+
+impl<T> BinaryHeap<T> {
+ /// Returns an iterator visiting all values in the underlying vector, in
+ /// arbitrary order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4]);
+ ///
+ /// // Print 1, 2, 3, 4 in arbitrary order
+ /// for x in heap.iter() {
+ /// println!("{x}");
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter { iter: self.data.iter() }
+ }
+
+ /// Returns an iterator which retrieves elements in heap order.
+ /// This method consumes the original heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_into_iter_sorted)]
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]);
+ ///
+ /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]);
+ /// ```
+ #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+ pub fn into_iter_sorted(self) -> IntoIterSorted<T> {
+ IntoIterSorted { inner: self }
+ }
+
+ /// Returns the greatest item in the binary heap, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// assert_eq!(heap.peek(), None);
+ ///
+ /// heap.push(1);
+ /// heap.push(5);
+ /// heap.push(2);
+ /// assert_eq!(heap.peek(), Some(&5));
+ ///
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// Cost is *O*(1) in the worst case.
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn peek(&self) -> Option<&T> {
+ self.data.get(0)
+ }
+
+ /// Returns the number of elements the binary heap can hold without reallocating.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::with_capacity(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.data.capacity()
+ }
+
+ /// Reserves the minimum capacity for at least `additional` elements more than
+ /// the current length. Unlike [`reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional`. Does nothing if the capacity is already
+ /// sufficient.
+ ///
+ /// [`reserve`]: BinaryHeap::reserve
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows [`usize`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.reserve_exact(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4);
+ /// ```
+ ///
+ /// [`reserve`]: BinaryHeap::reserve
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.data.reserve_exact(additional);
+ }
+
+ /// Reserves capacity for at least `additional` elements more than the
+ /// current length. The allocator may reserve more space to speculatively
+ /// avoid frequent allocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if capacity is already sufficient.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows [`usize`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.reserve(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ self.data.reserve(additional);
+ }
+
+ /// Tries to reserve the minimum capacity for at least `additional` elements
+ /// more than the current length. Unlike [`try_reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `try_reserve_exact`, capacity will be greater than or
+ /// equal to `self.len() + additional` if it returns `Ok(())`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: BinaryHeap::try_reserve
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
+ /// let mut heap = BinaryHeap::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// heap.try_reserve_exact(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// heap.extend(data.iter());
+ ///
+ /// Ok(heap.pop())
+ /// }
+ /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve_2", since = "1.63.0")]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.data.try_reserve_exact(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` elements more than the
+ /// current length. The allocator may reserve more space to speculatively
+ /// avoid frequent allocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional` if it returns
+ /// `Ok(())`. Does nothing if capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
+ /// let mut heap = BinaryHeap::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// heap.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// heap.extend(data.iter());
+ ///
+ /// Ok(heap.pop())
+ /// }
+ /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve_2", since = "1.63.0")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.data.try_reserve(additional)
+ }
+
+ /// Discards as much additional capacity as possible.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
+ ///
+ /// assert!(heap.capacity() >= 100);
+ /// heap.shrink_to_fit();
+ /// assert!(heap.capacity() == 0);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shrink_to_fit(&mut self) {
+ self.data.shrink_to_fit();
+ }
+
+ /// Discards capacity with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
+ ///
+ /// assert!(heap.capacity() >= 100);
+ /// heap.shrink_to(10);
+ /// assert!(heap.capacity() >= 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.data.shrink_to(min_capacity)
+ }
+
+ /// Returns a slice of all values in the underlying vector, in arbitrary
+ /// order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_as_slice)]
+ /// use std::collections::BinaryHeap;
+ /// use std::io::{self, Write};
+ ///
+ /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
+ ///
+ /// io::sink().write(heap.as_slice()).unwrap();
+ /// ```
+ #[must_use]
+ #[unstable(feature = "binary_heap_as_slice", issue = "83659")]
+ pub fn as_slice(&self) -> &[T] {
+ self.data.as_slice()
+ }
+
+ /// Consumes the `BinaryHeap` and returns the underlying vector
+ /// in arbitrary order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
+ /// let vec = heap.into_vec();
+ ///
+ /// // Will print in some order
+ /// for x in vec {
+ /// println!("{x}");
+ /// }
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+ pub fn into_vec(self) -> Vec<T> {
+ self.into()
+ }
+
+ /// Returns the length of the binary heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert_eq!(heap.len(), 2);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.data.len()
+ }
+
+ /// Checks if the binary heap is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ ///
+ /// assert!(heap.is_empty());
+ ///
+ /// heap.push(3);
+ /// heap.push(5);
+ /// heap.push(1);
+ ///
+ /// assert!(!heap.is_empty());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Clears the binary heap, returning an iterator over the removed elements
+ /// in arbitrary order. If the iterator is dropped before being fully
+ /// consumed, it drops the remaining elements in arbitrary order.
+ ///
+ /// The returned iterator keeps a mutable borrow on the heap to optimize
+ /// its implementation.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert!(!heap.is_empty());
+ ///
+ /// for x in heap.drain() {
+ /// println!("{x}");
+ /// }
+ ///
+ /// assert!(heap.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain(&mut self) -> Drain<'_, T> {
+ Drain { iter: self.data.drain(..) }
+ }
+
+ /// Drops all items from the binary heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert!(!heap.is_empty());
+ ///
+ /// heap.clear();
+ ///
+ /// assert!(heap.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ self.drain();
+ }
+}
+
+/// Hole represents a hole in a slice i.e., an index without valid value
+/// (because it was moved from or duplicated).
+/// In drop, `Hole` will restore the slice by filling the hole
+/// position with the value that was originally removed.
+struct Hole<'a, T: 'a> {
+ data: &'a mut [T],
+ elt: ManuallyDrop<T>,
+ pos: usize,
+}
+
+impl<'a, T> Hole<'a, T> {
+ /// Create a new `Hole` at index `pos`.
+ ///
+ /// Unsafe because pos must be within the data slice.
+ #[inline]
+ unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
+ debug_assert!(pos < data.len());
+ // SAFE: pos should be inside the slice
+ let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
+ Hole { data, elt: ManuallyDrop::new(elt), pos }
+ }
+
+ #[inline]
+ fn pos(&self) -> usize {
+ self.pos
+ }
+
+ /// Returns a reference to the element removed.
+ #[inline]
+ fn element(&self) -> &T {
+ &self.elt
+ }
+
+ /// Returns a reference to the element at `index`.
+ ///
+ /// Unsafe because index must be within the data slice and not equal to pos.
+ #[inline]
+ unsafe fn get(&self, index: usize) -> &T {
+ debug_assert!(index != self.pos);
+ debug_assert!(index < self.data.len());
+ unsafe { self.data.get_unchecked(index) }
+ }
+
+ /// Move hole to new location
+ ///
+ /// Unsafe because index must be within the data slice and not equal to pos.
+ #[inline]
+ unsafe fn move_to(&mut self, index: usize) {
+ debug_assert!(index != self.pos);
+ debug_assert!(index < self.data.len());
+ unsafe {
+ let ptr = self.data.as_mut_ptr();
+ let index_ptr: *const _ = ptr.add(index);
+ let hole_ptr = ptr.add(self.pos);
+ ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
+ }
+ self.pos = index;
+ }
+}
+
+impl<T> Drop for Hole<'_, T> {
+ #[inline]
+ fn drop(&mut self) {
+ // fill the hole again
+ unsafe {
+ let pos = self.pos;
+ ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
+ }
+ }
+}
+
+/// An iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::iter()`]. See its
+/// documentation for more.
+///
+/// [`iter`]: BinaryHeap::iter
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ iter: slice::Iter<'a, T>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter { iter: self.iter.clone() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn last(self) -> Option<&'a T> {
+ self.iter.last()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a T> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
+
+/// An owning iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::into_iter()`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: BinaryHeap::into_iter
+/// [`IntoIterator`]: core::iter::IntoIterator
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct IntoIter<T> {
+ iter: vec::IntoIter<T>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IntoIter<T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IntoIter<T> {}
+
+// In addition to the SAFETY invariants of the following three unsafe traits
+// also refer to the vec::in_place_collect module documentation to get an overview
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<T> SourceIter for IntoIter<T> {
+ type Source = IntoIter<T>;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut Self::Source {
+ self
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<I> InPlaceIterable for IntoIter<I> {}
+
+unsafe impl<I> AsVecIntoIter for IntoIter<I> {
+ type Item = I;
+
+ fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
+ &mut self.iter
+ }
+}
+
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+#[derive(Clone, Debug)]
+pub struct IntoIterSorted<T> {
+ inner: BinaryHeap<T>,
+}
+
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+impl<T: Ord> Iterator for IntoIterSorted<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.pop()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = self.inner.len();
+ (exact, Some(exact))
+ }
+}
+
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+impl<T: Ord> ExactSizeIterator for IntoIterSorted<T> {}
+
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+impl<T: Ord> FusedIterator for IntoIterSorted<T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T: Ord> TrustedLen for IntoIterSorted<T> {}
+
+/// A draining iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::drain()`]. See its
+/// documentation for more.
+///
+/// [`drain`]: BinaryHeap::drain
+#[stable(feature = "drain", since = "1.6.0")]
+#[derive(Debug)]
+pub struct Drain<'a, T: 'a> {
+ iter: vec::Drain<'a, T>,
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T> Iterator for Drain<'_, T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T> DoubleEndedIterator for Drain<'_, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T> ExactSizeIterator for Drain<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Drain<'_, T> {}
+
+/// A draining iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
+/// documentation for more.
+///
+/// [`drain_sorted`]: BinaryHeap::drain_sorted
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+#[derive(Debug)]
+pub struct DrainSorted<'a, T: Ord> {
+ inner: &'a mut BinaryHeap<T>,
+}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<'a, T: Ord> Drop for DrainSorted<'a, T> {
+ /// Removes heap elements in heap order.
+ fn drop(&mut self) {
+ struct DropGuard<'r, 'a, T: Ord>(&'r mut DrainSorted<'a, T>);
+
+ impl<'r, 'a, T: Ord> Drop for DropGuard<'r, 'a, T> {
+ fn drop(&mut self) {
+ while self.0.inner.pop().is_some() {}
+ }
+ }
+
+ while let Some(item) = self.inner.pop() {
+ let guard = DropGuard(self);
+ drop(item);
+ mem::forget(guard);
+ }
+ }
+}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<T: Ord> Iterator for DrainSorted<'_, T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.pop()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = self.inner.len();
+ (exact, Some(exact))
+ }
+}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<T: Ord> ExactSizeIterator for DrainSorted<'_, T> {}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<T: Ord> FusedIterator for DrainSorted<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T: Ord> TrustedLen for DrainSorted<'_, T> {}
+
+#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
+ /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
+ ///
+ /// This conversion happens in-place, and has *O*(*n*) time complexity.
+ fn from(vec: Vec<T>) -> BinaryHeap<T> {
+ let mut heap = BinaryHeap { data: vec };
+ heap.rebuild();
+ heap
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
+ /// ```
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
+ /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
+ /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
+ /// assert_eq!(a, b);
+ /// }
+ /// ```
+ fn from(arr: [T; N]) -> Self {
+ Self::from_iter(arr)
+ }
+}
+
+#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+impl<T> From<BinaryHeap<T>> for Vec<T> {
+ /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
+ ///
+ /// This conversion requires no data movement or allocation, and has
+ /// constant time complexity.
+ fn from(heap: BinaryHeap<T>) -> Vec<T> {
+ heap.data
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
+ BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> IntoIterator for BinaryHeap<T> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ /// Creates a consuming iterator, that is, one that moves each value out of
+ /// the binary heap in arbitrary order. The binary heap cannot be used
+ /// after calling this.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4]);
+ ///
+ /// // Print 1, 2, 3, 4 in arbitrary order
+ /// for x in heap.into_iter() {
+ /// // x has type i32, not &i32
+ /// println!("{x}");
+ /// }
+ /// ```
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter { iter: self.data.into_iter() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> IntoIterator for &'a BinaryHeap<T> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Extend<T> for BinaryHeap<T> {
+ #[inline]
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ <Self as SpecExtend<I>>::spec_extend(self, iter);
+ }
+
+ #[inline]
+ fn extend_one(&mut self, item: T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
+ default fn spec_extend(&mut self, iter: I) {
+ self.extend_desugared(iter.into_iter());
+ }
+}
+
+impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
+ fn spec_extend(&mut self, ref mut other: Vec<T>) {
+ let start = self.data.len();
+ self.data.append(other);
+ self.rebuild_tail(start);
+ }
+}
+
+impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
+ fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
+ self.append(other);
+ }
+}
+
+impl<T: Ord> BinaryHeap<T> {
+ fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ let iterator = iter.into_iter();
+ let (lower, _) = iterator.size_hint();
+
+ self.reserve(lower);
+
+ iterator.for_each(move |elem| self.push(elem));
+ }
+}
+
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &item: &'a T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
diff --git a/library/alloc/src/collections/binary_heap/tests.rs b/library/alloc/src/collections/binary_heap/tests.rs
new file mode 100644
index 000000000..5a05215ae
--- /dev/null
+++ b/library/alloc/src/collections/binary_heap/tests.rs
@@ -0,0 +1,489 @@
+use super::*;
+use crate::boxed::Box;
+use std::iter::TrustedLen;
+use std::panic::{catch_unwind, AssertUnwindSafe};
+use std::sync::atomic::{AtomicU32, Ordering};
+
+#[test]
+fn test_iterator() {
+ let data = vec![5, 9, 3];
+ let iterout = [9, 5, 3];
+ let heap = BinaryHeap::from(data);
+ let mut i = 0;
+ for el in &heap {
+ assert_eq!(*el, iterout[i]);
+ i += 1;
+ }
+}
+
+#[test]
+fn test_iter_rev_cloned_collect() {
+ let data = vec![5, 9, 3];
+ let iterout = vec![3, 5, 9];
+ let pq = BinaryHeap::from(data);
+
+ let v: Vec<_> = pq.iter().rev().cloned().collect();
+ assert_eq!(v, iterout);
+}
+
+#[test]
+fn test_into_iter_collect() {
+ let data = vec![5, 9, 3];
+ let iterout = vec![9, 5, 3];
+ let pq = BinaryHeap::from(data);
+
+ let v: Vec<_> = pq.into_iter().collect();
+ assert_eq!(v, iterout);
+}
+
+#[test]
+fn test_into_iter_size_hint() {
+ let data = vec![5, 9];
+ let pq = BinaryHeap::from(data);
+
+ let mut it = pq.into_iter();
+
+ assert_eq!(it.size_hint(), (2, Some(2)));
+ assert_eq!(it.next(), Some(9));
+
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next(), Some(5));
+
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_into_iter_rev_collect() {
+ let data = vec![5, 9, 3];
+ let iterout = vec![3, 5, 9];
+ let pq = BinaryHeap::from(data);
+
+ let v: Vec<_> = pq.into_iter().rev().collect();
+ assert_eq!(v, iterout);
+}
+
+#[test]
+fn test_into_iter_sorted_collect() {
+ let heap = BinaryHeap::from(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
+ let it = heap.into_iter_sorted();
+ let sorted = it.collect::<Vec<_>>();
+ assert_eq!(sorted, vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 2, 1, 1, 0]);
+}
+
+#[test]
+fn test_drain_sorted_collect() {
+ let mut heap = BinaryHeap::from(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
+ let it = heap.drain_sorted();
+ let sorted = it.collect::<Vec<_>>();
+ assert_eq!(sorted, vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 2, 1, 1, 0]);
+}
+
+fn check_exact_size_iterator<I: ExactSizeIterator>(len: usize, it: I) {
+ let mut it = it;
+
+ for i in 0..it.len() {
+ let (lower, upper) = it.size_hint();
+ assert_eq!(Some(lower), upper);
+ assert_eq!(lower, len - i);
+ assert_eq!(it.len(), len - i);
+ it.next();
+ }
+ assert_eq!(it.len(), 0);
+ assert!(it.is_empty());
+}
+
+#[test]
+fn test_exact_size_iterator() {
+ let heap = BinaryHeap::from(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
+ check_exact_size_iterator(heap.len(), heap.iter());
+ check_exact_size_iterator(heap.len(), heap.clone().into_iter());
+ check_exact_size_iterator(heap.len(), heap.clone().into_iter_sorted());
+ check_exact_size_iterator(heap.len(), heap.clone().drain());
+ check_exact_size_iterator(heap.len(), heap.clone().drain_sorted());
+}
+
+fn check_trusted_len<I: TrustedLen>(len: usize, it: I) {
+ let mut it = it;
+ for i in 0..len {
+ let (lower, upper) = it.size_hint();
+ if upper.is_some() {
+ assert_eq!(Some(lower), upper);
+ assert_eq!(lower, len - i);
+ }
+ it.next();
+ }
+}
+
+#[test]
+fn test_trusted_len() {
+ let heap = BinaryHeap::from(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
+ check_trusted_len(heap.len(), heap.clone().into_iter_sorted());
+ check_trusted_len(heap.len(), heap.clone().drain_sorted());
+}
+
+#[test]
+fn test_peek_and_pop() {
+ let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
+ let mut sorted = data.clone();
+ sorted.sort();
+ let mut heap = BinaryHeap::from(data);
+ while !heap.is_empty() {
+ assert_eq!(heap.peek().unwrap(), sorted.last().unwrap());
+ assert_eq!(heap.pop().unwrap(), sorted.pop().unwrap());
+ }
+}
+
+#[test]
+fn test_peek_mut() {
+ let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
+ let mut heap = BinaryHeap::from(data);
+ assert_eq!(heap.peek(), Some(&10));
+ {
+ let mut top = heap.peek_mut().unwrap();
+ *top -= 2;
+ }
+ assert_eq!(heap.peek(), Some(&9));
+}
+
+#[test]
+fn test_peek_mut_pop() {
+ let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
+ let mut heap = BinaryHeap::from(data);
+ assert_eq!(heap.peek(), Some(&10));
+ {
+ let mut top = heap.peek_mut().unwrap();
+ *top -= 2;
+ assert_eq!(PeekMut::pop(top), 8);
+ }
+ assert_eq!(heap.peek(), Some(&9));
+}
+
+#[test]
+fn test_push() {
+ let mut heap = BinaryHeap::from(vec![2, 4, 9]);
+ assert_eq!(heap.len(), 3);
+ assert!(*heap.peek().unwrap() == 9);
+ heap.push(11);
+ assert_eq!(heap.len(), 4);
+ assert!(*heap.peek().unwrap() == 11);
+ heap.push(5);
+ assert_eq!(heap.len(), 5);
+ assert!(*heap.peek().unwrap() == 11);
+ heap.push(27);
+ assert_eq!(heap.len(), 6);
+ assert!(*heap.peek().unwrap() == 27);
+ heap.push(3);
+ assert_eq!(heap.len(), 7);
+ assert!(*heap.peek().unwrap() == 27);
+ heap.push(103);
+ assert_eq!(heap.len(), 8);
+ assert!(*heap.peek().unwrap() == 103);
+}
+
+#[test]
+fn test_push_unique() {
+ let mut heap = BinaryHeap::<Box<_>>::from(vec![Box::new(2), Box::new(4), Box::new(9)]);
+ assert_eq!(heap.len(), 3);
+ assert!(**heap.peek().unwrap() == 9);
+ heap.push(Box::new(11));
+ assert_eq!(heap.len(), 4);
+ assert!(**heap.peek().unwrap() == 11);
+ heap.push(Box::new(5));
+ assert_eq!(heap.len(), 5);
+ assert!(**heap.peek().unwrap() == 11);
+ heap.push(Box::new(27));
+ assert_eq!(heap.len(), 6);
+ assert!(**heap.peek().unwrap() == 27);
+ heap.push(Box::new(3));
+ assert_eq!(heap.len(), 7);
+ assert!(**heap.peek().unwrap() == 27);
+ heap.push(Box::new(103));
+ assert_eq!(heap.len(), 8);
+ assert!(**heap.peek().unwrap() == 103);
+}
+
+fn check_to_vec(mut data: Vec<i32>) {
+ let heap = BinaryHeap::from(data.clone());
+ let mut v = heap.clone().into_vec();
+ v.sort();
+ data.sort();
+
+ assert_eq!(v, data);
+ assert_eq!(heap.into_sorted_vec(), data);
+}
+
+#[test]
+fn test_to_vec() {
+ check_to_vec(vec![]);
+ check_to_vec(vec![5]);
+ check_to_vec(vec![3, 2]);
+ check_to_vec(vec![2, 3]);
+ check_to_vec(vec![5, 1, 2]);
+ check_to_vec(vec![1, 100, 2, 3]);
+ check_to_vec(vec![1, 3, 5, 7, 9, 2, 4, 6, 8, 0]);
+ check_to_vec(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
+ check_to_vec(vec![9, 11, 9, 9, 9, 9, 11, 2, 3, 4, 11, 9, 0, 0, 0, 0]);
+ check_to_vec(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ check_to_vec(vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]);
+ check_to_vec(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 1, 2]);
+ check_to_vec(vec![5, 4, 3, 2, 1, 5, 4, 3, 2, 1, 5, 4, 3, 2, 1]);
+}
+
+#[test]
+fn test_in_place_iterator_specialization() {
+ let src: Vec<usize> = vec![1, 2, 3];
+ let src_ptr = src.as_ptr();
+ let heap: BinaryHeap<_> = src.into_iter().map(std::convert::identity).collect();
+ let heap_ptr = heap.iter().next().unwrap() as *const usize;
+ assert_eq!(src_ptr, heap_ptr);
+ let sink: Vec<_> = heap.into_iter().map(std::convert::identity).collect();
+ let sink_ptr = sink.as_ptr();
+ assert_eq!(heap_ptr, sink_ptr);
+}
+
+#[test]
+fn test_empty_pop() {
+ let mut heap = BinaryHeap::<i32>::new();
+ assert!(heap.pop().is_none());
+}
+
+#[test]
+fn test_empty_peek() {
+ let empty = BinaryHeap::<i32>::new();
+ assert!(empty.peek().is_none());
+}
+
+#[test]
+fn test_empty_peek_mut() {
+ let mut empty = BinaryHeap::<i32>::new();
+ assert!(empty.peek_mut().is_none());
+}
+
+#[test]
+fn test_from_iter() {
+ let xs = vec![9, 8, 7, 6, 5, 4, 3, 2, 1];
+
+ let mut q: BinaryHeap<_> = xs.iter().rev().cloned().collect();
+
+ for &x in &xs {
+ assert_eq!(q.pop().unwrap(), x);
+ }
+}
+
+#[test]
+fn test_drain() {
+ let mut q: BinaryHeap<_> = [9, 8, 7, 6, 5, 4, 3, 2, 1].iter().cloned().collect();
+
+ assert_eq!(q.drain().take(5).count(), 5);
+
+ assert!(q.is_empty());
+}
+
+#[test]
+fn test_drain_sorted() {
+ let mut q: BinaryHeap<_> = [9, 8, 7, 6, 5, 4, 3, 2, 1].iter().cloned().collect();
+
+ assert_eq!(q.drain_sorted().take(5).collect::<Vec<_>>(), vec![9, 8, 7, 6, 5]);
+
+ assert!(q.is_empty());
+}
+
+#[test]
+fn test_drain_sorted_leak() {
+ static DROPS: AtomicU32 = AtomicU32::new(0);
+
+ #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
+ struct D(u32, bool);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ DROPS.fetch_add(1, Ordering::SeqCst);
+
+ if self.1 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let mut q = BinaryHeap::from(vec![
+ D(0, false),
+ D(1, false),
+ D(2, false),
+ D(3, true),
+ D(4, false),
+ D(5, false),
+ ]);
+
+ catch_unwind(AssertUnwindSafe(|| drop(q.drain_sorted()))).ok();
+
+ assert_eq!(DROPS.load(Ordering::SeqCst), 6);
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut a = BinaryHeap::new();
+ a.push(1);
+ a.push(2);
+
+ a.extend(&[3, 4, 5]);
+
+ assert_eq!(a.len(), 5);
+ assert_eq!(a.into_sorted_vec(), [1, 2, 3, 4, 5]);
+
+ let mut a = BinaryHeap::new();
+ a.push(1);
+ a.push(2);
+ let mut b = BinaryHeap::new();
+ b.push(3);
+ b.push(4);
+ b.push(5);
+
+ a.extend(&b);
+
+ assert_eq!(a.len(), 5);
+ assert_eq!(a.into_sorted_vec(), [1, 2, 3, 4, 5]);
+}
+
+#[test]
+fn test_append() {
+ let mut a = BinaryHeap::from(vec![-10, 1, 2, 3, 3]);
+ let mut b = BinaryHeap::from(vec![-20, 5, 43]);
+
+ a.append(&mut b);
+
+ assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
+ assert!(b.is_empty());
+}
+
+#[test]
+fn test_append_to_empty() {
+ let mut a = BinaryHeap::new();
+ let mut b = BinaryHeap::from(vec![-20, 5, 43]);
+
+ a.append(&mut b);
+
+ assert_eq!(a.into_sorted_vec(), [-20, 5, 43]);
+ assert!(b.is_empty());
+}
+
+#[test]
+fn test_extend_specialization() {
+ let mut a = BinaryHeap::from(vec![-10, 1, 2, 3, 3]);
+ let b = BinaryHeap::from(vec![-20, 5, 43]);
+
+ a.extend(b);
+
+ assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
+}
+
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> {
+ d
+ }
+}
+
+#[test]
+fn test_retain() {
+ let mut a = BinaryHeap::from(vec![100, 10, 50, 1, 2, 20, 30]);
+ a.retain(|&x| x != 2);
+
+ // Check that 20 moved into 10's place.
+ assert_eq!(a.clone().into_vec(), [100, 20, 50, 1, 10, 30]);
+
+ a.retain(|_| true);
+
+ assert_eq!(a.clone().into_vec(), [100, 20, 50, 1, 10, 30]);
+
+ a.retain(|&x| x < 50);
+
+ assert_eq!(a.clone().into_vec(), [30, 20, 10, 1]);
+
+ a.retain(|_| false);
+
+ assert!(a.is_empty());
+}
+
+// old binaryheap failed this test
+//
+// Integrity means that all elements are present after a comparison panics,
+// even if the order might not be correct.
+//
+// Destructors must be called exactly once per element.
+// FIXME: re-enable emscripten once it can unwind again
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn panic_safe() {
+ use rand::{seq::SliceRandom, thread_rng};
+ use std::cmp;
+ use std::panic::{self, AssertUnwindSafe};
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0);
+
+ #[derive(Eq, PartialEq, Ord, Clone, Debug)]
+ struct PanicOrd<T>(T, bool);
+
+ impl<T> Drop for PanicOrd<T> {
+ fn drop(&mut self) {
+ // update global drop count
+ DROP_COUNTER.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ impl<T: PartialOrd> PartialOrd for PanicOrd<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+ if self.1 || other.1 {
+ panic!("Panicking comparison");
+ }
+ self.0.partial_cmp(&other.0)
+ }
+ }
+ let mut rng = thread_rng();
+ const DATASZ: usize = 32;
+ // Miri is too slow
+ let ntest = if cfg!(miri) { 1 } else { 10 };
+
+ // don't use 0 in the data -- we want to catch the zeroed-out case.
+ let data = (1..=DATASZ).collect::<Vec<_>>();
+
+ // since it's a fuzzy test, run several tries.
+ for _ in 0..ntest {
+ for i in 1..=DATASZ {
+ DROP_COUNTER.store(0, Ordering::SeqCst);
+
+ let mut panic_ords: Vec<_> =
+ data.iter().filter(|&&x| x != i).map(|&x| PanicOrd(x, false)).collect();
+ let panic_item = PanicOrd(i, true);
+
+ // heapify the sane items
+ panic_ords.shuffle(&mut rng);
+ let mut heap = BinaryHeap::from(panic_ords);
+ let inner_data;
+
+ {
+ // push the panicking item to the heap and catch the panic
+ let thread_result = {
+ let mut heap_ref = AssertUnwindSafe(&mut heap);
+ panic::catch_unwind(move || {
+ heap_ref.push(panic_item);
+ })
+ };
+ assert!(thread_result.is_err());
+
+ // Assert no elements were dropped
+ let drops = DROP_COUNTER.load(Ordering::SeqCst);
+ assert!(drops == 0, "Must not drop items. drops={}", drops);
+ inner_data = heap.clone().into_vec();
+ drop(heap);
+ }
+ let drops = DROP_COUNTER.load(Ordering::SeqCst);
+ assert_eq!(drops, DATASZ);
+
+ let mut data_sorted = inner_data.into_iter().map(|p| p.0).collect::<Vec<_>>();
+ data_sorted.sort();
+ assert_eq!(data_sorted, data);
+ }
+ }
+}
diff --git a/library/alloc/src/collections/btree/append.rs b/library/alloc/src/collections/btree/append.rs
new file mode 100644
index 000000000..b6989afb6
--- /dev/null
+++ b/library/alloc/src/collections/btree/append.rs
@@ -0,0 +1,107 @@
+use super::merge_iter::MergeIterInner;
+use super::node::{self, Root};
+use core::alloc::Allocator;
+use core::iter::FusedIterator;
+
+impl<K, V> Root<K, V> {
+ /// Appends all key-value pairs from the union of two ascending iterators,
+ /// incrementing a `length` variable along the way. The latter makes it
+ /// easier for the caller to avoid a leak when a drop handler panicks.
+ ///
+ /// If both iterators produce the same key, this method drops the pair from
+ /// the left iterator and appends the pair from the right iterator.
+ ///
+ /// If you want the tree to end up in a strictly ascending order, like for
+ /// a `BTreeMap`, both iterators should produce keys in strictly ascending
+ /// order, each greater than all keys in the tree, including any keys
+ /// already in the tree upon entry.
+ pub fn append_from_sorted_iters<I, A: Allocator + Clone>(
+ &mut self,
+ left: I,
+ right: I,
+ length: &mut usize,
+ alloc: A,
+ ) where
+ K: Ord,
+ I: Iterator<Item = (K, V)> + FusedIterator,
+ {
+ // We prepare to merge `left` and `right` into a sorted sequence in linear time.
+ let iter = MergeIter(MergeIterInner::new(left, right));
+
+ // Meanwhile, we build a tree from the sorted sequence in linear time.
+ self.bulk_push(iter, length, alloc)
+ }
+
+ /// Pushes all key-value pairs to the end of the tree, incrementing a
+ /// `length` variable along the way. The latter makes it easier for the
+ /// caller to avoid a leak when the iterator panicks.
+ pub fn bulk_push<I, A: Allocator + Clone>(&mut self, iter: I, length: &mut usize, alloc: A)
+ where
+ I: Iterator<Item = (K, V)>,
+ {
+ let mut cur_node = self.borrow_mut().last_leaf_edge().into_node();
+ // Iterate through all key-value pairs, pushing them into nodes at the right level.
+ for (key, value) in iter {
+ // Try to push key-value pair into the current leaf node.
+ if cur_node.len() < node::CAPACITY {
+ cur_node.push(key, value);
+ } else {
+ // No space left, go up and push there.
+ let mut open_node;
+ let mut test_node = cur_node.forget_type();
+ loop {
+ match test_node.ascend() {
+ Ok(parent) => {
+ let parent = parent.into_node();
+ if parent.len() < node::CAPACITY {
+ // Found a node with space left, push here.
+ open_node = parent;
+ break;
+ } else {
+ // Go up again.
+ test_node = parent.forget_type();
+ }
+ }
+ Err(_) => {
+ // We are at the top, create a new root node and push there.
+ open_node = self.push_internal_level(alloc.clone());
+ break;
+ }
+ }
+ }
+
+ // Push key-value pair and new right subtree.
+ let tree_height = open_node.height() - 1;
+ let mut right_tree = Root::new(alloc.clone());
+ for _ in 0..tree_height {
+ right_tree.push_internal_level(alloc.clone());
+ }
+ open_node.push(key, value, right_tree);
+
+ // Go down to the right-most leaf again.
+ cur_node = open_node.forget_type().last_leaf_edge().into_node();
+ }
+
+ // Increment length every iteration, to make sure the map drops
+ // the appended elements even if advancing the iterator panicks.
+ *length += 1;
+ }
+ self.fix_right_border_of_plentiful();
+ }
+}
+
+// An iterator for merging two sorted sequences into one
+struct MergeIter<K, V, I: Iterator<Item = (K, V)>>(MergeIterInner<I>);
+
+impl<K: Ord, V, I> Iterator for MergeIter<K, V, I>
+where
+ I: Iterator<Item = (K, V)> + FusedIterator,
+{
+ type Item = (K, V);
+
+ /// If two keys are equal, returns the key-value pair from the right source.
+ fn next(&mut self) -> Option<(K, V)> {
+ let (a_next, b_next) = self.0.nexts(|a: &(K, V), b: &(K, V)| K::cmp(&a.0, &b.0));
+ b_next.or(a_next)
+ }
+}
diff --git a/library/alloc/src/collections/btree/borrow.rs b/library/alloc/src/collections/btree/borrow.rs
new file mode 100644
index 000000000..016f139a5
--- /dev/null
+++ b/library/alloc/src/collections/btree/borrow.rs
@@ -0,0 +1,47 @@
+use core::marker::PhantomData;
+use core::ptr::NonNull;
+
+/// Models a reborrow of some unique reference, when you know that the reborrow
+/// and all its descendants (i.e., all pointers and references derived from it)
+/// will not be used any more at some point, after which you want to use the
+/// original unique reference again.
+///
+/// The borrow checker usually handles this stacking of borrows for you, but
+/// some control flows that accomplish this stacking are too complicated for
+/// the compiler to follow. A `DormantMutRef` allows you to check borrowing
+/// yourself, while still expressing its stacked nature, and encapsulating
+/// the raw pointer code needed to do this without undefined behavior.
+pub struct DormantMutRef<'a, T> {
+ ptr: NonNull<T>,
+ _marker: PhantomData<&'a mut T>,
+}
+
+unsafe impl<'a, T> Sync for DormantMutRef<'a, T> where &'a mut T: Sync {}
+unsafe impl<'a, T> Send for DormantMutRef<'a, T> where &'a mut T: Send {}
+
+impl<'a, T> DormantMutRef<'a, T> {
+ /// Capture a unique borrow, and immediately reborrow it. For the compiler,
+ /// the lifetime of the new reference is the same as the lifetime of the
+ /// original reference, but you promise to use it for a shorter period.
+ pub fn new(t: &'a mut T) -> (&'a mut T, Self) {
+ let ptr = NonNull::from(t);
+ // SAFETY: we hold the borrow throughout 'a via `_marker`, and we expose
+ // only this reference, so it is unique.
+ let new_ref = unsafe { &mut *ptr.as_ptr() };
+ (new_ref, Self { ptr, _marker: PhantomData })
+ }
+
+ /// Revert to the unique borrow initially captured.
+ ///
+ /// # Safety
+ ///
+ /// The reborrow must have ended, i.e., the reference returned by `new` and
+ /// all pointers and references derived from it, must not be used anymore.
+ pub unsafe fn awaken(self) -> &'a mut T {
+ // SAFETY: our own safety conditions imply this reference is again unique.
+ unsafe { &mut *self.ptr.as_ptr() }
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/alloc/src/collections/btree/borrow/tests.rs b/library/alloc/src/collections/btree/borrow/tests.rs
new file mode 100644
index 000000000..56a8434fc
--- /dev/null
+++ b/library/alloc/src/collections/btree/borrow/tests.rs
@@ -0,0 +1,19 @@
+use super::DormantMutRef;
+
+#[test]
+fn test_borrow() {
+ let mut data = 1;
+ let mut stack = vec![];
+ let mut rr = &mut data;
+ for factor in [2, 3, 7].iter() {
+ let (r, dormant_r) = DormantMutRef::new(rr);
+ rr = r;
+ assert_eq!(*rr, 1);
+ stack.push((factor, dormant_r));
+ }
+ while let Some((factor, dormant_r)) = stack.pop() {
+ let r = unsafe { dormant_r.awaken() };
+ *r *= factor;
+ }
+ assert_eq!(data, 42);
+}
diff --git a/library/alloc/src/collections/btree/dedup_sorted_iter.rs b/library/alloc/src/collections/btree/dedup_sorted_iter.rs
new file mode 100644
index 000000000..60bf83b83
--- /dev/null
+++ b/library/alloc/src/collections/btree/dedup_sorted_iter.rs
@@ -0,0 +1,47 @@
+use core::iter::Peekable;
+
+/// A iterator for deduping the key of a sorted iterator.
+/// When encountering the duplicated key, only the last key-value pair is yielded.
+///
+/// Used by [`BTreeMap::bulk_build_from_sorted_iter`].
+pub struct DedupSortedIter<K, V, I>
+where
+ I: Iterator<Item = (K, V)>,
+{
+ iter: Peekable<I>,
+}
+
+impl<K, V, I> DedupSortedIter<K, V, I>
+where
+ I: Iterator<Item = (K, V)>,
+{
+ pub fn new(iter: I) -> Self {
+ Self { iter: iter.peekable() }
+ }
+}
+
+impl<K, V, I> Iterator for DedupSortedIter<K, V, I>
+where
+ K: Eq,
+ I: Iterator<Item = (K, V)>,
+{
+ type Item = (K, V);
+
+ fn next(&mut self) -> Option<(K, V)> {
+ loop {
+ let next = match self.iter.next() {
+ Some(next) => next,
+ None => return None,
+ };
+
+ let peeked = match self.iter.peek() {
+ Some(peeked) => peeked,
+ None => return Some(next),
+ };
+
+ if next.0 != peeked.0 {
+ return Some(next);
+ }
+ }
+ }
+}
diff --git a/library/alloc/src/collections/btree/fix.rs b/library/alloc/src/collections/btree/fix.rs
new file mode 100644
index 000000000..91b612180
--- /dev/null
+++ b/library/alloc/src/collections/btree/fix.rs
@@ -0,0 +1,179 @@
+use super::map::MIN_LEN;
+use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef, Root};
+use core::alloc::Allocator;
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ /// Stocks up a possibly underfull node by merging with or stealing from a
+ /// sibling. If successful but at the cost of shrinking the parent node,
+ /// returns that shrunk parent node. Returns an `Err` if the node is
+ /// an empty root.
+ fn fix_node_through_parent<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> Result<Option<NodeRef<marker::Mut<'a>, K, V, marker::Internal>>, Self> {
+ let len = self.len();
+ if len >= MIN_LEN {
+ Ok(None)
+ } else {
+ match self.choose_parent_kv() {
+ Ok(Left(mut left_parent_kv)) => {
+ if left_parent_kv.can_merge() {
+ let parent = left_parent_kv.merge_tracking_parent(alloc);
+ Ok(Some(parent))
+ } else {
+ left_parent_kv.bulk_steal_left(MIN_LEN - len);
+ Ok(None)
+ }
+ }
+ Ok(Right(mut right_parent_kv)) => {
+ if right_parent_kv.can_merge() {
+ let parent = right_parent_kv.merge_tracking_parent(alloc);
+ Ok(Some(parent))
+ } else {
+ right_parent_kv.bulk_steal_right(MIN_LEN - len);
+ Ok(None)
+ }
+ }
+ Err(root) => {
+ if len > 0 {
+ Ok(None)
+ } else {
+ Err(root)
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ /// Stocks up a possibly underfull node, and if that causes its parent node
+ /// to shrink, stocks up the parent, recursively.
+ /// Returns `true` if it fixed the tree, `false` if it couldn't because the
+ /// root node became empty.
+ ///
+ /// This method does not expect ancestors to already be underfull upon entry
+ /// and panics if it encounters an empty ancestor.
+ pub fn fix_node_and_affected_ancestors<A: Allocator + Clone>(mut self, alloc: A) -> bool {
+ loop {
+ match self.fix_node_through_parent(alloc.clone()) {
+ Ok(Some(parent)) => self = parent.forget_type(),
+ Ok(None) => return true,
+ Err(_) => return false,
+ }
+ }
+ }
+}
+
+impl<K, V> Root<K, V> {
+ /// Removes empty levels on the top, but keeps an empty leaf if the entire tree is empty.
+ pub fn fix_top<A: Allocator + Clone>(&mut self, alloc: A) {
+ while self.height() > 0 && self.len() == 0 {
+ self.pop_internal_level(alloc.clone());
+ }
+ }
+
+ /// Stocks up or merge away any underfull nodes on the right border of the
+ /// tree. The other nodes, those that are not the root nor a rightmost edge,
+ /// must already have at least MIN_LEN elements.
+ pub fn fix_right_border<A: Allocator + Clone>(&mut self, alloc: A) {
+ self.fix_top(alloc.clone());
+ if self.len() > 0 {
+ self.borrow_mut().last_kv().fix_right_border_of_right_edge(alloc.clone());
+ self.fix_top(alloc);
+ }
+ }
+
+ /// The symmetric clone of `fix_right_border`.
+ pub fn fix_left_border<A: Allocator + Clone>(&mut self, alloc: A) {
+ self.fix_top(alloc.clone());
+ if self.len() > 0 {
+ self.borrow_mut().first_kv().fix_left_border_of_left_edge(alloc.clone());
+ self.fix_top(alloc);
+ }
+ }
+
+ /// Stocks up any underfull nodes on the right border of the tree.
+ /// The other nodes, those that are neither the root nor a rightmost edge,
+ /// must be prepared to have up to MIN_LEN elements stolen.
+ pub fn fix_right_border_of_plentiful(&mut self) {
+ let mut cur_node = self.borrow_mut();
+ while let Internal(internal) = cur_node.force() {
+ // Check if right-most child is underfull.
+ let mut last_kv = internal.last_kv().consider_for_balancing();
+ debug_assert!(last_kv.left_child_len() >= MIN_LEN * 2);
+ let right_child_len = last_kv.right_child_len();
+ if right_child_len < MIN_LEN {
+ // We need to steal.
+ last_kv.bulk_steal_left(MIN_LEN - right_child_len);
+ }
+
+ // Go further down.
+ cur_node = last_kv.into_right_child();
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV> {
+ fn fix_left_border_of_left_edge<A: Allocator + Clone>(mut self, alloc: A) {
+ while let Internal(internal_kv) = self.force() {
+ self = internal_kv.fix_left_child(alloc.clone()).first_kv();
+ debug_assert!(self.reborrow().into_node().len() > MIN_LEN);
+ }
+ }
+
+ fn fix_right_border_of_right_edge<A: Allocator + Clone>(mut self, alloc: A) {
+ while let Internal(internal_kv) = self.force() {
+ self = internal_kv.fix_right_child(alloc.clone()).last_kv();
+ debug_assert!(self.reborrow().into_node().len() > MIN_LEN);
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> {
+ /// Stocks up the left child, assuming the right child isn't underfull, and
+ /// provisions an extra element to allow merging its children in turn
+ /// without becoming underfull.
+ /// Returns the left child.
+ fn fix_left_child<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ let mut internal_kv = self.consider_for_balancing();
+ let left_len = internal_kv.left_child_len();
+ debug_assert!(internal_kv.right_child_len() >= MIN_LEN);
+ if internal_kv.can_merge() {
+ internal_kv.merge_tracking_child(alloc)
+ } else {
+ // `MIN_LEN + 1` to avoid readjust if merge happens on the next level.
+ let count = (MIN_LEN + 1).saturating_sub(left_len);
+ if count > 0 {
+ internal_kv.bulk_steal_right(count);
+ }
+ internal_kv.into_left_child()
+ }
+ }
+
+ /// Stocks up the right child, assuming the left child isn't underfull, and
+ /// provisions an extra element to allow merging its children in turn
+ /// without becoming underfull.
+ /// Returns wherever the right child ended up.
+ fn fix_right_child<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ let mut internal_kv = self.consider_for_balancing();
+ let right_len = internal_kv.right_child_len();
+ debug_assert!(internal_kv.left_child_len() >= MIN_LEN);
+ if internal_kv.can_merge() {
+ internal_kv.merge_tracking_child(alloc)
+ } else {
+ // `MIN_LEN + 1` to avoid readjust if merge happens on the next level.
+ let count = (MIN_LEN + 1).saturating_sub(right_len);
+ if count > 0 {
+ internal_kv.bulk_steal_left(count);
+ }
+ internal_kv.into_right_child()
+ }
+ }
+}
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
new file mode 100644
index 000000000..cacbd54b6
--- /dev/null
+++ b/library/alloc/src/collections/btree/map.rs
@@ -0,0 +1,2423 @@
+use crate::vec::Vec;
+use core::borrow::Borrow;
+use core::cmp::Ordering;
+use core::fmt::{self, Debug};
+use core::hash::{Hash, Hasher};
+use core::iter::{FromIterator, FusedIterator};
+use core::marker::PhantomData;
+use core::mem::{self, ManuallyDrop};
+use core::ops::{Index, RangeBounds};
+use core::ptr;
+
+use crate::alloc::{Allocator, Global};
+
+use super::borrow::DormantMutRef;
+use super::dedup_sorted_iter::DedupSortedIter;
+use super::navigate::{LazyLeafRange, LeafRange};
+use super::node::{self, marker, ForceResult::*, Handle, NodeRef, Root};
+use super::search::SearchResult::*;
+use super::set_val::SetValZST;
+
+mod entry;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use entry::{Entry, OccupiedEntry, OccupiedError, VacantEntry};
+
+use Entry::*;
+
+/// Minimum number of elements in a node that is not a root.
+/// We might temporarily have fewer elements during methods.
+pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT;
+
+// A tree in a `BTreeMap` is a tree in the `node` module with additional invariants:
+// - Keys must appear in ascending order (according to the key's type).
+// - Every non-leaf node contains at least 1 element (has at least 2 children).
+// - Every non-root node contains at least MIN_LEN elements.
+//
+// An empty map is represented either by the absence of a root node or by a
+// root node that is an empty leaf.
+
+/// An ordered map based on a [B-Tree].
+///
+/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing
+/// the amount of work performed in a search. In theory, a binary search tree (BST) is the optimal
+/// choice for a sorted map, as a perfectly balanced BST performs the theoretical minimum amount of
+/// comparisons necessary to find an element (log<sub>2</sub>n). However, in practice the way this
+/// is done is *very* inefficient for modern computer architectures. In particular, every element
+/// is stored in its own individually heap-allocated node. This means that every single insertion
+/// triggers a heap-allocation, and every single comparison should be a cache-miss. Since these
+/// are both notably expensive things to do in practice, we are forced to at very least reconsider
+/// the BST strategy.
+///
+/// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing
+/// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in
+/// searches. However, this does mean that searches will have to do *more* comparisons on average.
+/// The precise number of comparisons depends on the node search strategy used. For optimal cache
+/// efficiency, one could search the nodes linearly. For optimal comparisons, one could search
+/// the node using binary search. As a compromise, one could also perform a linear search
+/// that initially only checks every i<sup>th</sup> element for some choice of i.
+///
+/// Currently, our implementation simply performs naive linear search. This provides excellent
+/// performance on *small* nodes of elements which are cheap to compare. However in the future we
+/// would like to further explore choosing the optimal search strategy based on the choice of B,
+/// and possibly other factors. Using linear search, searching for a random element is expected
+/// to take B * log(n) comparisons, which is generally worse than a BST. In practice,
+/// however, performance is excellent.
+///
+/// It is a logic error for a key to be modified in such a way that the key's ordering relative to
+/// any other key, as determined by the [`Ord`] trait, changes while it is in the map. This is
+/// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
+/// The behavior resulting from such a logic error is not specified, but will be encapsulated to the
+/// `BTreeMap` that observed the logic error and not result in undefined behavior. This could
+/// include panics, incorrect results, aborts, memory leaks, and non-termination.
+///
+/// Iterators obtained from functions such as [`BTreeMap::iter`], [`BTreeMap::values`], or
+/// [`BTreeMap::keys`] produce their items in order by key, and take worst-case logarithmic and
+/// amortized constant time per item returned.
+///
+/// [B-Tree]: https://en.wikipedia.org/wiki/B-tree
+/// [`Cell`]: core::cell::Cell
+/// [`RefCell`]: core::cell::RefCell
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::BTreeMap;
+///
+/// // type inference lets us omit an explicit type signature (which
+/// // would be `BTreeMap<&str, &str>` in this example).
+/// let mut movie_reviews = BTreeMap::new();
+///
+/// // review some movies.
+/// movie_reviews.insert("Office Space", "Deals with real issues in the workplace.");
+/// movie_reviews.insert("Pulp Fiction", "Masterpiece.");
+/// movie_reviews.insert("The Godfather", "Very enjoyable.");
+/// movie_reviews.insert("The Blues Brothers", "Eye lyked it a lot.");
+///
+/// // check for a specific one.
+/// if !movie_reviews.contains_key("Les Misérables") {
+/// println!("We've got {} reviews, but Les Misérables ain't one.",
+/// movie_reviews.len());
+/// }
+///
+/// // oops, this review has a lot of spelling mistakes, let's delete it.
+/// movie_reviews.remove("The Blues Brothers");
+///
+/// // look up the values associated with some keys.
+/// let to_find = ["Up!", "Office Space"];
+/// for movie in &to_find {
+/// match movie_reviews.get(movie) {
+/// Some(review) => println!("{movie}: {review}"),
+/// None => println!("{movie} is unreviewed.")
+/// }
+/// }
+///
+/// // Look up the value for a key (will panic if the key is not found).
+/// println!("Movie review: {}", movie_reviews["Office Space"]);
+///
+/// // iterate over everything.
+/// for (movie, review) in &movie_reviews {
+/// println!("{movie}: \"{review}\"");
+/// }
+/// ```
+///
+/// A `BTreeMap` with a known list of items can be initialized from an array:
+///
+/// ```
+/// use std::collections::BTreeMap;
+///
+/// let solar_distance = BTreeMap::from([
+/// ("Mercury", 0.4),
+/// ("Venus", 0.7),
+/// ("Earth", 1.0),
+/// ("Mars", 1.5),
+/// ]);
+/// ```
+///
+/// `BTreeMap` implements an [`Entry API`], which allows for complex
+/// methods of getting, setting, updating and removing keys and their values:
+///
+/// [`Entry API`]: BTreeMap::entry
+///
+/// ```
+/// use std::collections::BTreeMap;
+///
+/// // type inference lets us omit an explicit type signature (which
+/// // would be `BTreeMap<&str, u8>` in this example).
+/// let mut player_stats = BTreeMap::new();
+///
+/// fn random_stat_buff() -> u8 {
+/// // could actually return some random value here - let's just return
+/// // some fixed value for now
+/// 42
+/// }
+///
+/// // insert a key only if it doesn't already exist
+/// player_stats.entry("health").or_insert(100);
+///
+/// // insert a key using a function that provides a new value only if it
+/// // doesn't already exist
+/// player_stats.entry("defence").or_insert_with(random_stat_buff);
+///
+/// // update a key, guarding against the key possibly not being set
+/// let stat = player_stats.entry("attack").or_insert(100);
+/// *stat += random_stat_buff();
+///
+/// // modify an entry before an insert with in-place mutation
+/// player_stats.entry("mana").and_modify(|mana| *mana += 200).or_insert(100);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "BTreeMap")]
+#[rustc_insignificant_dtor]
+pub struct BTreeMap<
+ K,
+ V,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ root: Option<Root<K, V>>,
+ length: usize,
+ /// `ManuallyDrop` to control drop order (needs to be dropped after all the nodes).
+ pub(super) alloc: ManuallyDrop<A>,
+ // For dropck; the `Box` avoids making the `Unpin` impl more strict than before
+ _marker: PhantomData<crate::boxed::Box<(K, V)>>,
+}
+
+#[stable(feature = "btree_drop", since = "1.7.0")]
+unsafe impl<#[may_dangle] K, #[may_dangle] V, A: Allocator + Clone> Drop for BTreeMap<K, V, A> {
+ fn drop(&mut self) {
+ drop(unsafe { ptr::read(self) }.into_iter())
+ }
+}
+
+// FIXME: This implementation is "wrong", but changing it would be a breaking change.
+// (The bounds of the automatic `UnwindSafe` implementation have been like this since Rust 1.50.)
+// Maybe we can fix it nonetheless with a crater run, or if the `UnwindSafe`
+// traits are deprecated, or disarmed (no longer causing hard errors) in the future.
+#[stable(feature = "btree_unwindsafe", since = "1.64.0")]
+impl<K, V, A: Allocator + Clone> core::panic::UnwindSafe for BTreeMap<K, V, A>
+where
+ A: core::panic::UnwindSafe,
+ K: core::panic::RefUnwindSafe,
+ V: core::panic::RefUnwindSafe,
+{
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Clone, V: Clone, A: Allocator + Clone> Clone for BTreeMap<K, V, A> {
+ fn clone(&self) -> BTreeMap<K, V, A> {
+ fn clone_subtree<'a, K: Clone, V: Clone, A: Allocator + Clone>(
+ node: NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal>,
+ alloc: A,
+ ) -> BTreeMap<K, V, A>
+ where
+ K: 'a,
+ V: 'a,
+ {
+ match node.force() {
+ Leaf(leaf) => {
+ let mut out_tree = BTreeMap {
+ root: Some(Root::new(alloc.clone())),
+ length: 0,
+ alloc: ManuallyDrop::new(alloc),
+ _marker: PhantomData,
+ };
+
+ {
+ let root = out_tree.root.as_mut().unwrap(); // unwrap succeeds because we just wrapped
+ let mut out_node = match root.borrow_mut().force() {
+ Leaf(leaf) => leaf,
+ Internal(_) => unreachable!(),
+ };
+
+ let mut in_edge = leaf.first_edge();
+ while let Ok(kv) = in_edge.right_kv() {
+ let (k, v) = kv.into_kv();
+ in_edge = kv.right_edge();
+
+ out_node.push(k.clone(), v.clone());
+ out_tree.length += 1;
+ }
+ }
+
+ out_tree
+ }
+ Internal(internal) => {
+ let mut out_tree =
+ clone_subtree(internal.first_edge().descend(), alloc.clone());
+
+ {
+ let out_root = out_tree.root.as_mut().unwrap();
+ let mut out_node = out_root.push_internal_level(alloc.clone());
+ let mut in_edge = internal.first_edge();
+ while let Ok(kv) = in_edge.right_kv() {
+ let (k, v) = kv.into_kv();
+ in_edge = kv.right_edge();
+
+ let k = (*k).clone();
+ let v = (*v).clone();
+ let subtree = clone_subtree(in_edge.descend(), alloc.clone());
+
+ // We can't destructure subtree directly
+ // because BTreeMap implements Drop
+ let (subroot, sublength) = unsafe {
+ let subtree = ManuallyDrop::new(subtree);
+ let root = ptr::read(&subtree.root);
+ let length = subtree.length;
+ (root, length)
+ };
+
+ out_node.push(
+ k,
+ v,
+ subroot.unwrap_or_else(|| Root::new(alloc.clone())),
+ );
+ out_tree.length += 1 + sublength;
+ }
+ }
+
+ out_tree
+ }
+ }
+ }
+
+ if self.is_empty() {
+ BTreeMap::new_in((*self.alloc).clone())
+ } else {
+ clone_subtree(self.root.as_ref().unwrap().reborrow(), (*self.alloc).clone()) // unwrap succeeds because not empty
+ }
+ }
+}
+
+impl<K, Q: ?Sized, A: Allocator + Clone> super::Recover<Q> for BTreeMap<K, SetValZST, A>
+where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+{
+ type Key = K;
+
+ fn get(&self, key: &Q) -> Option<&K> {
+ let root_node = self.root.as_ref()?.reborrow();
+ match root_node.search_tree(key) {
+ Found(handle) => Some(handle.into_kv().0),
+ GoDown(_) => None,
+ }
+ }
+
+ fn take(&mut self, key: &Q) -> Option<K> {
+ let (map, dormant_map) = DormantMutRef::new(self);
+ let root_node = map.root.as_mut()?.borrow_mut();
+ match root_node.search_tree(key) {
+ Found(handle) => Some(
+ OccupiedEntry {
+ handle,
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ }
+ .remove_kv()
+ .0,
+ ),
+ GoDown(_) => None,
+ }
+ }
+
+ fn replace(&mut self, key: K) -> Option<K> {
+ let (map, dormant_map) = DormantMutRef::new(self);
+ let root_node =
+ map.root.get_or_insert_with(|| Root::new((*map.alloc).clone())).borrow_mut();
+ match root_node.search_tree::<K>(&key) {
+ Found(mut kv) => Some(mem::replace(kv.key_mut(), key)),
+ GoDown(handle) => {
+ VacantEntry {
+ key,
+ handle: Some(handle),
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ }
+ .insert(SetValZST::default());
+ None
+ }
+ }
+ }
+}
+
+/// An iterator over the entries of a `BTreeMap`.
+///
+/// This `struct` is created by the [`iter`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`iter`]: BTreeMap::iter
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, K: 'a, V: 'a> {
+ range: LazyLeafRange<marker::Immut<'a>, K, V>,
+ length: usize,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Iter<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A mutable iterator over the entries of a `BTreeMap`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: BTreeMap::iter_mut
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IterMut<'a, K: 'a, V: 'a> {
+ range: LazyLeafRange<marker::ValMut<'a>, K, V>,
+ length: usize,
+
+ // Be invariant in `K` and `V`
+ _marker: PhantomData<&'a mut (K, V)>,
+}
+
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IterMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let range = Iter { range: self.range.reborrow(), length: self.length };
+ f.debug_list().entries(range).finish()
+ }
+}
+
+/// An owning iterator over the entries of a `BTreeMap`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`BTreeMap`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: IntoIterator::into_iter
+/// [`IntoIterator`]: core::iter::IntoIterator
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_insignificant_dtor]
+pub struct IntoIter<
+ K,
+ V,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ range: LazyLeafRange<marker::Dying, K, V>,
+ length: usize,
+ /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
+ alloc: A,
+}
+
+impl<K, V, A: Allocator + Clone> IntoIter<K, V, A> {
+ /// Returns an iterator of references over the remaining items.
+ #[inline]
+ pub(super) fn iter(&self) -> Iter<'_, K, V> {
+ Iter { range: self.range.reborrow(), length: self.length }
+ }
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for IntoIter<K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+/// An iterator over the keys of a `BTreeMap`.
+///
+/// This `struct` is created by the [`keys`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`keys`]: BTreeMap::keys
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Keys<'a, K, V> {
+ inner: Iter<'a, K, V>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K: fmt::Debug, V> fmt::Debug for Keys<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// An iterator over the values of a `BTreeMap`.
+///
+/// This `struct` is created by the [`values`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`values`]: BTreeMap::values
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Values<'a, K, V> {
+ inner: Iter<'a, K, V>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K, V: fmt::Debug> fmt::Debug for Values<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A mutable iterator over the values of a `BTreeMap`.
+///
+/// This `struct` is created by the [`values_mut`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`values_mut`]: BTreeMap::values_mut
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+pub struct ValuesMut<'a, K, V> {
+ inner: IterMut<'a, K, V>,
+}
+
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+impl<K, V: fmt::Debug> fmt::Debug for ValuesMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.inner.iter().map(|(_, val)| val)).finish()
+ }
+}
+
+/// An owning iterator over the keys of a `BTreeMap`.
+///
+/// This `struct` is created by the [`into_keys`] method on [`BTreeMap`].
+/// See its documentation for more.
+///
+/// [`into_keys`]: BTreeMap::into_keys
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+pub struct IntoKeys<K, V, A: Allocator + Clone = Global> {
+ inner: IntoIter<K, V, A>,
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K: fmt::Debug, V, A: Allocator + Clone> fmt::Debug for IntoKeys<K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.inner.iter().map(|(key, _)| key)).finish()
+ }
+}
+
+/// An owning iterator over the values of a `BTreeMap`.
+///
+/// This `struct` is created by the [`into_values`] method on [`BTreeMap`].
+/// See its documentation for more.
+///
+/// [`into_values`]: BTreeMap::into_values
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+pub struct IntoValues<
+ K,
+ V,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ inner: IntoIter<K, V, A>,
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V: fmt::Debug, A: Allocator + Clone> fmt::Debug for IntoValues<K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.inner.iter().map(|(_, val)| val)).finish()
+ }
+}
+
+/// An iterator over a sub-range of entries in a `BTreeMap`.
+///
+/// This `struct` is created by the [`range`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`range`]: BTreeMap::range
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "btree_range", since = "1.17.0")]
+pub struct Range<'a, K: 'a, V: 'a> {
+ inner: LeafRange<marker::Immut<'a>, K, V>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Range<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A mutable iterator over a sub-range of entries in a `BTreeMap`.
+///
+/// This `struct` is created by the [`range_mut`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`range_mut`]: BTreeMap::range_mut
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "btree_range", since = "1.17.0")]
+pub struct RangeMut<'a, K: 'a, V: 'a> {
+ inner: LeafRange<marker::ValMut<'a>, K, V>,
+
+ // Be invariant in `K` and `V`
+ _marker: PhantomData<&'a mut (K, V)>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for RangeMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let range = Range { inner: self.inner.reborrow() };
+ f.debug_list().entries(range).finish()
+ }
+}
+
+impl<K, V> BTreeMap<K, V> {
+ /// Makes a new, empty `BTreeMap`.
+ ///
+ /// Does not allocate anything on its own.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ ///
+ /// // entries can now be inserted into the empty map
+ /// map.insert(1, "a");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[must_use]
+ pub const fn new() -> BTreeMap<K, V> {
+ BTreeMap { root: None, length: 0, alloc: ManuallyDrop::new(Global), _marker: PhantomData }
+ }
+}
+
+impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
+ /// Clears the map, removing all elements.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, "a");
+ /// a.clear();
+ /// assert!(a.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ // avoid moving the allocator
+ mem::drop(BTreeMap {
+ root: mem::replace(&mut self.root, None),
+ length: mem::replace(&mut self.length, 0),
+ alloc: self.alloc.clone(),
+ _marker: PhantomData,
+ });
+ }
+
+ /// Makes a new empty BTreeMap with a reasonable choice for B.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(allocator_api)]
+ /// # #![feature(btreemap_alloc)]
+ /// use std::collections::BTreeMap;
+ /// use std::alloc::Global;
+ ///
+ /// let mut map = BTreeMap::new_in(Global);
+ ///
+ /// // entries can now be inserted into the empty map
+ /// map.insert(1, "a");
+ /// ```
+ #[unstable(feature = "btreemap_alloc", issue = "32838")]
+ pub fn new_in(alloc: A) -> BTreeMap<K, V, A> {
+ BTreeMap { root: None, length: 0, alloc: ManuallyDrop::new(alloc), _marker: PhantomData }
+ }
+}
+
+impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
+ /// Returns a reference to the value corresponding to the key.
+ ///
+ /// The key may be any borrowed form of the map's key type, but the ordering
+ /// on the borrowed form *must* match the ordering on the key type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.get(&1), Some(&"a"));
+ /// assert_eq!(map.get(&2), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get<Q: ?Sized>(&self, key: &Q) -> Option<&V>
+ where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ let root_node = self.root.as_ref()?.reborrow();
+ match root_node.search_tree(key) {
+ Found(handle) => Some(handle.into_kv().1),
+ GoDown(_) => None,
+ }
+ }
+
+ /// Returns the key-value pair corresponding to the supplied key.
+ ///
+ /// The supplied key may be any borrowed form of the map's key type, but the ordering
+ /// on the borrowed form *must* match the ordering on the key type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.get_key_value(&1), Some((&1, &"a")));
+ /// assert_eq!(map.get_key_value(&2), None);
+ /// ```
+ #[stable(feature = "map_get_key_value", since = "1.40.0")]
+ pub fn get_key_value<Q: ?Sized>(&self, k: &Q) -> Option<(&K, &V)>
+ where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ let root_node = self.root.as_ref()?.reborrow();
+ match root_node.search_tree(k) {
+ Found(handle) => Some(handle.into_kv()),
+ GoDown(_) => None,
+ }
+ }
+
+ /// Returns the first key-value pair in the map.
+ /// The key in this pair is the minimum key in the map.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// assert_eq!(map.first_key_value(), None);
+ /// map.insert(1, "b");
+ /// map.insert(2, "a");
+ /// assert_eq!(map.first_key_value(), Some((&1, &"b")));
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn first_key_value(&self) -> Option<(&K, &V)>
+ where
+ K: Ord,
+ {
+ let root_node = self.root.as_ref()?.reborrow();
+ root_node.first_leaf_edge().right_kv().ok().map(Handle::into_kv)
+ }
+
+ /// Returns the first entry in the map for in-place manipulation.
+ /// The key of this entry is the minimum key in the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// map.insert(2, "b");
+ /// if let Some(mut entry) = map.first_entry() {
+ /// if *entry.key() > 0 {
+ /// entry.insert("first");
+ /// }
+ /// }
+ /// assert_eq!(*map.get(&1).unwrap(), "first");
+ /// assert_eq!(*map.get(&2).unwrap(), "b");
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn first_entry(&mut self) -> Option<OccupiedEntry<'_, K, V, A>>
+ where
+ K: Ord,
+ {
+ let (map, dormant_map) = DormantMutRef::new(self);
+ let root_node = map.root.as_mut()?.borrow_mut();
+ let kv = root_node.first_leaf_edge().right_kv().ok()?;
+ Some(OccupiedEntry {
+ handle: kv.forget_node_type(),
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ })
+ }
+
+ /// Removes and returns the first element in the map.
+ /// The key of this element is the minimum key that was in the map.
+ ///
+ /// # Examples
+ ///
+ /// Draining elements in ascending order, while keeping a usable map each iteration.
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// map.insert(2, "b");
+ /// while let Some((key, _val)) = map.pop_first() {
+ /// assert!(map.iter().all(|(k, _v)| *k > key));
+ /// }
+ /// assert!(map.is_empty());
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn pop_first(&mut self) -> Option<(K, V)>
+ where
+ K: Ord,
+ {
+ self.first_entry().map(|entry| entry.remove_entry())
+ }
+
+ /// Returns the last key-value pair in the map.
+ /// The key in this pair is the maximum key in the map.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "b");
+ /// map.insert(2, "a");
+ /// assert_eq!(map.last_key_value(), Some((&2, &"a")));
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn last_key_value(&self) -> Option<(&K, &V)>
+ where
+ K: Ord,
+ {
+ let root_node = self.root.as_ref()?.reborrow();
+ root_node.last_leaf_edge().left_kv().ok().map(Handle::into_kv)
+ }
+
+ /// Returns the last entry in the map for in-place manipulation.
+ /// The key of this entry is the maximum key in the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// map.insert(2, "b");
+ /// if let Some(mut entry) = map.last_entry() {
+ /// if *entry.key() > 0 {
+ /// entry.insert("last");
+ /// }
+ /// }
+ /// assert_eq!(*map.get(&1).unwrap(), "a");
+ /// assert_eq!(*map.get(&2).unwrap(), "last");
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn last_entry(&mut self) -> Option<OccupiedEntry<'_, K, V, A>>
+ where
+ K: Ord,
+ {
+ let (map, dormant_map) = DormantMutRef::new(self);
+ let root_node = map.root.as_mut()?.borrow_mut();
+ let kv = root_node.last_leaf_edge().left_kv().ok()?;
+ Some(OccupiedEntry {
+ handle: kv.forget_node_type(),
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ })
+ }
+
+ /// Removes and returns the last element in the map.
+ /// The key of this element is the maximum key that was in the map.
+ ///
+ /// # Examples
+ ///
+ /// Draining elements in descending order, while keeping a usable map each iteration.
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// map.insert(2, "b");
+ /// while let Some((key, _val)) = map.pop_last() {
+ /// assert!(map.iter().all(|(k, _v)| *k < key));
+ /// }
+ /// assert!(map.is_empty());
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn pop_last(&mut self) -> Option<(K, V)>
+ where
+ K: Ord,
+ {
+ self.last_entry().map(|entry| entry.remove_entry())
+ }
+
+ /// Returns `true` if the map contains a value for the specified key.
+ ///
+ /// The key may be any borrowed form of the map's key type, but the ordering
+ /// on the borrowed form *must* match the ordering on the key type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.contains_key(&1), true);
+ /// assert_eq!(map.contains_key(&2), false);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn contains_key<Q: ?Sized>(&self, key: &Q) -> bool
+ where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ self.get(key).is_some()
+ }
+
+ /// Returns a mutable reference to the value corresponding to the key.
+ ///
+ /// The key may be any borrowed form of the map's key type, but the ordering
+ /// on the borrowed form *must* match the ordering on the key type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// if let Some(x) = map.get_mut(&1) {
+ /// *x = "b";
+ /// }
+ /// assert_eq!(map[&1], "b");
+ /// ```
+ // See `get` for implementation notes, this is basically a copy-paste with mut's added
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_mut<Q: ?Sized>(&mut self, key: &Q) -> Option<&mut V>
+ where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ let root_node = self.root.as_mut()?.borrow_mut();
+ match root_node.search_tree(key) {
+ Found(handle) => Some(handle.into_val_mut()),
+ GoDown(_) => None,
+ }
+ }
+
+ /// Inserts a key-value pair into the map.
+ ///
+ /// If the map did not have this key present, `None` is returned.
+ ///
+ /// If the map did have this key present, the value is updated, and the old
+ /// value is returned. The key is not updated, though; this matters for
+ /// types that can be `==` without being identical. See the [module-level
+ /// documentation] for more.
+ ///
+ /// [module-level documentation]: index.html#insert-and-complex-keys
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// assert_eq!(map.insert(37, "a"), None);
+ /// assert_eq!(map.is_empty(), false);
+ ///
+ /// map.insert(37, "b");
+ /// assert_eq!(map.insert(37, "c"), Some("b"));
+ /// assert_eq!(map[&37], "c");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, key: K, value: V) -> Option<V>
+ where
+ K: Ord,
+ {
+ match self.entry(key) {
+ Occupied(mut entry) => Some(entry.insert(value)),
+ Vacant(entry) => {
+ entry.insert(value);
+ None
+ }
+ }
+ }
+
+ /// Tries to insert a key-value pair into the map, and returns
+ /// a mutable reference to the value in the entry.
+ ///
+ /// If the map already had this key present, nothing is updated, and
+ /// an error containing the occupied entry and the value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(map_try_insert)]
+ ///
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// assert_eq!(map.try_insert(37, "a").unwrap(), &"a");
+ ///
+ /// let err = map.try_insert(37, "b").unwrap_err();
+ /// assert_eq!(err.entry.key(), &37);
+ /// assert_eq!(err.entry.get(), &"a");
+ /// assert_eq!(err.value, "b");
+ /// ```
+ #[unstable(feature = "map_try_insert", issue = "82766")]
+ pub fn try_insert(&mut self, key: K, value: V) -> Result<&mut V, OccupiedError<'_, K, V, A>>
+ where
+ K: Ord,
+ {
+ match self.entry(key) {
+ Occupied(entry) => Err(OccupiedError { entry, value }),
+ Vacant(entry) => Ok(entry.insert(value)),
+ }
+ }
+
+ /// Removes a key from the map, returning the value at the key if the key
+ /// was previously in the map.
+ ///
+ /// The key may be any borrowed form of the map's key type, but the ordering
+ /// on the borrowed form *must* match the ordering on the key type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.remove(&1), Some("a"));
+ /// assert_eq!(map.remove(&1), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove<Q: ?Sized>(&mut self, key: &Q) -> Option<V>
+ where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ self.remove_entry(key).map(|(_, v)| v)
+ }
+
+ /// Removes a key from the map, returning the stored key and value if the key
+ /// was previously in the map.
+ ///
+ /// The key may be any borrowed form of the map's key type, but the ordering
+ /// on the borrowed form *must* match the ordering on the key type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.remove_entry(&1), Some((1, "a")));
+ /// assert_eq!(map.remove_entry(&1), None);
+ /// ```
+ #[stable(feature = "btreemap_remove_entry", since = "1.45.0")]
+ pub fn remove_entry<Q: ?Sized>(&mut self, key: &Q) -> Option<(K, V)>
+ where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ let (map, dormant_map) = DormantMutRef::new(self);
+ let root_node = map.root.as_mut()?.borrow_mut();
+ match root_node.search_tree(key) {
+ Found(handle) => Some(
+ OccupiedEntry {
+ handle,
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ }
+ .remove_entry(),
+ ),
+ GoDown(_) => None,
+ }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all pairs `(k, v)` for which `f(&k, &mut v)` returns `false`.
+ /// The elements are visited in ascending key order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<i32, i32> = (0..8).map(|x| (x, x*10)).collect();
+ /// // Keep only the elements with even-numbered keys.
+ /// map.retain(|&k, _| k % 2 == 0);
+ /// assert!(map.into_iter().eq(vec![(0, 0), (2, 20), (4, 40), (6, 60)]));
+ /// ```
+ #[inline]
+ #[stable(feature = "btree_retain", since = "1.53.0")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ K: Ord,
+ F: FnMut(&K, &mut V) -> bool,
+ {
+ self.drain_filter(|k, v| !f(k, v));
+ }
+
+ /// Moves all elements from `other` into `self`, leaving `other` empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, "a");
+ /// a.insert(2, "b");
+ /// a.insert(3, "c");
+ ///
+ /// let mut b = BTreeMap::new();
+ /// b.insert(3, "d");
+ /// b.insert(4, "e");
+ /// b.insert(5, "f");
+ ///
+ /// a.append(&mut b);
+ ///
+ /// assert_eq!(a.len(), 5);
+ /// assert_eq!(b.len(), 0);
+ ///
+ /// assert_eq!(a[&1], "a");
+ /// assert_eq!(a[&2], "b");
+ /// assert_eq!(a[&3], "d");
+ /// assert_eq!(a[&4], "e");
+ /// assert_eq!(a[&5], "f");
+ /// ```
+ #[stable(feature = "btree_append", since = "1.11.0")]
+ pub fn append(&mut self, other: &mut Self)
+ where
+ K: Ord,
+ A: Clone,
+ {
+ // Do we have to append anything at all?
+ if other.is_empty() {
+ return;
+ }
+
+ // We can just swap `self` and `other` if `self` is empty.
+ if self.is_empty() {
+ mem::swap(self, other);
+ return;
+ }
+
+ let self_iter = mem::replace(self, Self::new_in((*self.alloc).clone())).into_iter();
+ let other_iter = mem::replace(other, Self::new_in((*self.alloc).clone())).into_iter();
+ let root = self.root.get_or_insert_with(|| Root::new((*self.alloc).clone()));
+ root.append_from_sorted_iters(
+ self_iter,
+ other_iter,
+ &mut self.length,
+ (*self.alloc).clone(),
+ )
+ }
+
+ /// Constructs a double-ended iterator over a sub-range of elements in the map.
+ /// The simplest way is to use the range syntax `min..max`, thus `range(min..max)` will
+ /// yield elements from min (inclusive) to max (exclusive).
+ /// The range may also be entered as `(Bound<T>, Bound<T>)`, so for example
+ /// `range((Excluded(4), Included(10)))` will yield a left-exclusive, right-inclusive
+ /// range from 4 to 10.
+ ///
+ /// # Panics
+ ///
+ /// Panics if range `start > end`.
+ /// Panics if range `start == end` and both bounds are `Excluded`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::ops::Bound::Included;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(3, "a");
+ /// map.insert(5, "b");
+ /// map.insert(8, "c");
+ /// for (&key, &value) in map.range((Included(&4), Included(&8))) {
+ /// println!("{key}: {value}");
+ /// }
+ /// assert_eq!(Some((&5, &"b")), map.range(4..).next());
+ /// ```
+ #[stable(feature = "btree_range", since = "1.17.0")]
+ pub fn range<T: ?Sized, R>(&self, range: R) -> Range<'_, K, V>
+ where
+ T: Ord,
+ K: Borrow<T> + Ord,
+ R: RangeBounds<T>,
+ {
+ if let Some(root) = &self.root {
+ Range { inner: root.reborrow().range_search(range) }
+ } else {
+ Range { inner: LeafRange::none() }
+ }
+ }
+
+ /// Constructs a mutable double-ended iterator over a sub-range of elements in the map.
+ /// The simplest way is to use the range syntax `min..max`, thus `range(min..max)` will
+ /// yield elements from min (inclusive) to max (exclusive).
+ /// The range may also be entered as `(Bound<T>, Bound<T>)`, so for example
+ /// `range((Excluded(4), Included(10)))` will yield a left-exclusive, right-inclusive
+ /// range from 4 to 10.
+ ///
+ /// # Panics
+ ///
+ /// Panics if range `start > end`.
+ /// Panics if range `start == end` and both bounds are `Excluded`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, i32> =
+ /// [("Alice", 0), ("Bob", 0), ("Carol", 0), ("Cheryl", 0)].into();
+ /// for (_, balance) in map.range_mut("B".."Cheryl") {
+ /// *balance += 100;
+ /// }
+ /// for (name, balance) in &map {
+ /// println!("{name} => {balance}");
+ /// }
+ /// ```
+ #[stable(feature = "btree_range", since = "1.17.0")]
+ pub fn range_mut<T: ?Sized, R>(&mut self, range: R) -> RangeMut<'_, K, V>
+ where
+ T: Ord,
+ K: Borrow<T> + Ord,
+ R: RangeBounds<T>,
+ {
+ if let Some(root) = &mut self.root {
+ RangeMut { inner: root.borrow_valmut().range_search(range), _marker: PhantomData }
+ } else {
+ RangeMut { inner: LeafRange::none(), _marker: PhantomData }
+ }
+ }
+
+ /// Gets the given key's corresponding entry in the map for in-place manipulation.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut count: BTreeMap<&str, usize> = BTreeMap::new();
+ ///
+ /// // count the number of occurrences of letters in the vec
+ /// for x in ["a", "b", "a", "c", "a", "b"] {
+ /// count.entry(x).and_modify(|curr| *curr += 1).or_insert(1);
+ /// }
+ ///
+ /// assert_eq!(count["a"], 3);
+ /// assert_eq!(count["b"], 2);
+ /// assert_eq!(count["c"], 1);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn entry(&mut self, key: K) -> Entry<'_, K, V, A>
+ where
+ K: Ord,
+ {
+ let (map, dormant_map) = DormantMutRef::new(self);
+ match map.root {
+ None => Vacant(VacantEntry {
+ key,
+ handle: None,
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ }),
+ Some(ref mut root) => match root.borrow_mut().search_tree(&key) {
+ Found(handle) => Occupied(OccupiedEntry {
+ handle,
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ }),
+ GoDown(handle) => Vacant(VacantEntry {
+ key,
+ handle: Some(handle),
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ }),
+ },
+ }
+ }
+
+ /// Splits the collection into two at the given key. Returns everything after the given key,
+ /// including the key.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, "a");
+ /// a.insert(2, "b");
+ /// a.insert(3, "c");
+ /// a.insert(17, "d");
+ /// a.insert(41, "e");
+ ///
+ /// let b = a.split_off(&3);
+ ///
+ /// assert_eq!(a.len(), 2);
+ /// assert_eq!(b.len(), 3);
+ ///
+ /// assert_eq!(a[&1], "a");
+ /// assert_eq!(a[&2], "b");
+ ///
+ /// assert_eq!(b[&3], "c");
+ /// assert_eq!(b[&17], "d");
+ /// assert_eq!(b[&41], "e");
+ /// ```
+ #[stable(feature = "btree_split_off", since = "1.11.0")]
+ pub fn split_off<Q: ?Sized + Ord>(&mut self, key: &Q) -> Self
+ where
+ K: Borrow<Q> + Ord,
+ A: Clone,
+ {
+ if self.is_empty() {
+ return Self::new_in((*self.alloc).clone());
+ }
+
+ let total_num = self.len();
+ let left_root = self.root.as_mut().unwrap(); // unwrap succeeds because not empty
+
+ let right_root = left_root.split_off(key, (*self.alloc).clone());
+
+ let (new_left_len, right_len) = Root::calc_split_length(total_num, &left_root, &right_root);
+ self.length = new_left_len;
+
+ BTreeMap {
+ root: Some(right_root),
+ length: right_len,
+ alloc: self.alloc.clone(),
+ _marker: PhantomData,
+ }
+ }
+
+ /// Creates an iterator that visits all elements (key-value pairs) in
+ /// ascending key order and uses a closure to determine if an element should
+ /// be removed. If the closure returns `true`, the element is removed from
+ /// the map and yielded. If the closure returns `false`, or panics, the
+ /// element remains in the map and will not be yielded.
+ ///
+ /// The iterator also lets you mutate the value of each element in the
+ /// closure, regardless of whether you choose to keep or remove it.
+ ///
+ /// If the iterator is only partially consumed or not consumed at all, each
+ /// of the remaining elements is still subjected to the closure, which may
+ /// change its value and, by returning `true`, have the element removed and
+ /// dropped.
+ ///
+ /// It is unspecified how many more elements will be subjected to the
+ /// closure if a panic occurs in the closure, or a panic occurs while
+ /// dropping an element, or if the `DrainFilter` value is leaked.
+ ///
+ /// # Examples
+ ///
+ /// Splitting a map into even and odd keys, reusing the original map:
+ ///
+ /// ```
+ /// #![feature(btree_drain_filter)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<i32, i32> = (0..8).map(|x| (x, x)).collect();
+ /// let evens: BTreeMap<_, _> = map.drain_filter(|k, _v| k % 2 == 0).collect();
+ /// let odds = map;
+ /// assert_eq!(evens.keys().copied().collect::<Vec<_>>(), [0, 2, 4, 6]);
+ /// assert_eq!(odds.keys().copied().collect::<Vec<_>>(), [1, 3, 5, 7]);
+ /// ```
+ #[unstable(feature = "btree_drain_filter", issue = "70530")]
+ pub fn drain_filter<F>(&mut self, pred: F) -> DrainFilter<'_, K, V, F, A>
+ where
+ K: Ord,
+ F: FnMut(&K, &mut V) -> bool,
+ {
+ let (inner, alloc) = self.drain_filter_inner();
+ DrainFilter { pred, inner, alloc }
+ }
+
+ pub(super) fn drain_filter_inner(&mut self) -> (DrainFilterInner<'_, K, V>, A)
+ where
+ K: Ord,
+ {
+ if let Some(root) = self.root.as_mut() {
+ let (root, dormant_root) = DormantMutRef::new(root);
+ let front = root.borrow_mut().first_leaf_edge();
+ (
+ DrainFilterInner {
+ length: &mut self.length,
+ dormant_root: Some(dormant_root),
+ cur_leaf_edge: Some(front),
+ },
+ (*self.alloc).clone(),
+ )
+ } else {
+ (
+ DrainFilterInner {
+ length: &mut self.length,
+ dormant_root: None,
+ cur_leaf_edge: None,
+ },
+ (*self.alloc).clone(),
+ )
+ }
+ }
+
+ /// Creates a consuming iterator visiting all the keys, in sorted order.
+ /// The map cannot be used after calling this.
+ /// The iterator element type is `K`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(2, "b");
+ /// a.insert(1, "a");
+ ///
+ /// let keys: Vec<i32> = a.into_keys().collect();
+ /// assert_eq!(keys, [1, 2]);
+ /// ```
+ #[inline]
+ #[stable(feature = "map_into_keys_values", since = "1.54.0")]
+ pub fn into_keys(self) -> IntoKeys<K, V, A> {
+ IntoKeys { inner: self.into_iter() }
+ }
+
+ /// Creates a consuming iterator visiting all the values, in order by key.
+ /// The map cannot be used after calling this.
+ /// The iterator element type is `V`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, "hello");
+ /// a.insert(2, "goodbye");
+ ///
+ /// let values: Vec<&str> = a.into_values().collect();
+ /// assert_eq!(values, ["hello", "goodbye"]);
+ /// ```
+ #[inline]
+ #[stable(feature = "map_into_keys_values", since = "1.54.0")]
+ pub fn into_values(self) -> IntoValues<K, V, A> {
+ IntoValues { inner: self.into_iter() }
+ }
+
+ /// Makes a `BTreeMap` from a sorted iterator.
+ pub(crate) fn bulk_build_from_sorted_iter<I>(iter: I, alloc: A) -> Self
+ where
+ K: Ord,
+ I: IntoIterator<Item = (K, V)>,
+ {
+ let mut root = Root::new(alloc.clone());
+ let mut length = 0;
+ root.bulk_push(DedupSortedIter::new(iter.into_iter()), &mut length, alloc.clone());
+ BTreeMap { root: Some(root), length, alloc: ManuallyDrop::new(alloc), _marker: PhantomData }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V, A: Allocator + Clone> IntoIterator for &'a BTreeMap<K, V, A> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = Iter<'a, K, V>;
+
+ fn into_iter(self) -> Iter<'a, K, V> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ fn next(&mut self) -> Option<(&'a K, &'a V)> {
+ if self.length == 0 {
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.range.next_unchecked() })
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.length, Some(self.length))
+ }
+
+ fn last(mut self) -> Option<(&'a K, &'a V)> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<(&'a K, &'a V)> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<(&'a K, &'a V)> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for Iter<'_, K, V> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, K, V> {
+ fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
+ if self.length == 0 {
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.range.next_back_unchecked() })
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
+ fn len(&self) -> usize {
+ self.length
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> Clone for Iter<'_, K, V> {
+ fn clone(&self) -> Self {
+ Iter { range: self.range.clone(), length: self.length }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V, A: Allocator + Clone> IntoIterator for &'a mut BTreeMap<K, V, A> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = IterMut<'a, K, V>;
+
+ fn into_iter(self) -> IterMut<'a, K, V> {
+ self.iter_mut()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> Iterator for IterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
+ if self.length == 0 {
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.range.next_unchecked() })
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.length, Some(self.length))
+ }
+
+ fn last(mut self) -> Option<(&'a K, &'a mut V)> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<(&'a K, &'a mut V)> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<(&'a K, &'a mut V)> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> DoubleEndedIterator for IterMut<'a, K, V> {
+ fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> {
+ if self.length == 0 {
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.range.next_back_unchecked() })
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
+ fn len(&self) -> usize {
+ self.length
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for IterMut<'_, K, V> {}
+
+impl<'a, K, V> IterMut<'a, K, V> {
+ /// Returns an iterator of references over the remaining items.
+ #[inline]
+ pub(super) fn iter(&self) -> Iter<'_, K, V> {
+ Iter { range: self.range.reborrow(), length: self.length }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, A: Allocator + Clone> IntoIterator for BTreeMap<K, V, A> {
+ type Item = (K, V);
+ type IntoIter = IntoIter<K, V, A>;
+
+ fn into_iter(self) -> IntoIter<K, V, A> {
+ let mut me = ManuallyDrop::new(self);
+ if let Some(root) = me.root.take() {
+ let full_range = root.into_dying().full_range();
+
+ IntoIter {
+ range: full_range,
+ length: me.length,
+ alloc: unsafe { ManuallyDrop::take(&mut me.alloc) },
+ }
+ } else {
+ IntoIter {
+ range: LazyLeafRange::none(),
+ length: 0,
+ alloc: unsafe { ManuallyDrop::take(&mut me.alloc) },
+ }
+ }
+ }
+}
+
+#[stable(feature = "btree_drop", since = "1.7.0")]
+impl<K, V, A: Allocator + Clone> Drop for IntoIter<K, V, A> {
+ fn drop(&mut self) {
+ struct DropGuard<'a, K, V, A: Allocator + Clone>(&'a mut IntoIter<K, V, A>);
+
+ impl<'a, K, V, A: Allocator + Clone> Drop for DropGuard<'a, K, V, A> {
+ fn drop(&mut self) {
+ // Continue the same loop we perform below. This only runs when unwinding, so we
+ // don't have to care about panics this time (they'll abort).
+ while let Some(kv) = self.0.dying_next() {
+ // SAFETY: we consume the dying handle immediately.
+ unsafe { kv.drop_key_val() };
+ }
+ }
+ }
+
+ while let Some(kv) = self.dying_next() {
+ let guard = DropGuard(self);
+ // SAFETY: we don't touch the tree before consuming the dying handle.
+ unsafe { kv.drop_key_val() };
+ mem::forget(guard);
+ }
+ }
+}
+
+impl<K, V, A: Allocator + Clone> IntoIter<K, V, A> {
+ /// Core of a `next` method returning a dying KV handle,
+ /// invalidated by further calls to this function and some others.
+ fn dying_next(
+ &mut self,
+ ) -> Option<Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>> {
+ if self.length == 0 {
+ self.range.deallocating_end(self.alloc.clone());
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.range.deallocating_next_unchecked(self.alloc.clone()) })
+ }
+ }
+
+ /// Core of a `next_back` method returning a dying KV handle,
+ /// invalidated by further calls to this function and some others.
+ fn dying_next_back(
+ &mut self,
+ ) -> Option<Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>> {
+ if self.length == 0 {
+ self.range.deallocating_end(self.alloc.clone());
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.range.deallocating_next_back_unchecked(self.alloc.clone()) })
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, A: Allocator + Clone> Iterator for IntoIter<K, V, A> {
+ type Item = (K, V);
+
+ fn next(&mut self) -> Option<(K, V)> {
+ // SAFETY: we consume the dying handle immediately.
+ self.dying_next().map(unsafe { |kv| kv.into_key_val() })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.length, Some(self.length))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, A: Allocator + Clone> DoubleEndedIterator for IntoIter<K, V, A> {
+ fn next_back(&mut self) -> Option<(K, V)> {
+ // SAFETY: we consume the dying handle immediately.
+ self.dying_next_back().map(unsafe { |kv| kv.into_key_val() })
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoIter<K, V, A> {
+ fn len(&self) -> usize {
+ self.length
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V, A: Allocator + Clone> FusedIterator for IntoIter<K, V, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> Iterator for Keys<'a, K, V> {
+ type Item = &'a K;
+
+ fn next(&mut self) -> Option<&'a K> {
+ self.inner.next().map(|(k, _)| k)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn last(mut self) -> Option<&'a K> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<&'a K> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<&'a K> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> {
+ fn next_back(&mut self) -> Option<&'a K> {
+ self.inner.next_back().map(|(k, _)| k)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for Keys<'_, K, V> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for Keys<'_, K, V> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> Clone for Keys<'_, K, V> {
+ fn clone(&self) -> Self {
+ Keys { inner: self.inner.clone() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> Iterator for Values<'a, K, V> {
+ type Item = &'a V;
+
+ fn next(&mut self) -> Option<&'a V> {
+ self.inner.next().map(|(_, v)| v)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn last(mut self) -> Option<&'a V> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> {
+ fn next_back(&mut self) -> Option<&'a V> {
+ self.inner.next_back().map(|(_, v)| v)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for Values<'_, K, V> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for Values<'_, K, V> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> Clone for Values<'_, K, V> {
+ fn clone(&self) -> Self {
+ Values { inner: self.inner.clone() }
+ }
+}
+
+/// An iterator produced by calling `drain_filter` on BTreeMap.
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+pub struct DrainFilter<
+ 'a,
+ K,
+ V,
+ F,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> where
+ F: 'a + FnMut(&K, &mut V) -> bool,
+{
+ pred: F,
+ inner: DrainFilterInner<'a, K, V>,
+ /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
+ alloc: A,
+}
+/// Most of the implementation of DrainFilter are generic over the type
+/// of the predicate, thus also serving for BTreeSet::DrainFilter.
+pub(super) struct DrainFilterInner<'a, K, V> {
+ /// Reference to the length field in the borrowed map, updated live.
+ length: &'a mut usize,
+ /// Buried reference to the root field in the borrowed map.
+ /// Wrapped in `Option` to allow drop handler to `take` it.
+ dormant_root: Option<DormantMutRef<'a, Root<K, V>>>,
+ /// Contains a leaf edge preceding the next element to be returned, or the last leaf edge.
+ /// Empty if the map has no root, if iteration went beyond the last leaf edge,
+ /// or if a panic occurred in the predicate.
+ cur_leaf_edge: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>>,
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<K, V, F, A: Allocator + Clone> Drop for DrainFilter<'_, K, V, F, A>
+where
+ F: FnMut(&K, &mut V) -> bool,
+{
+ fn drop(&mut self) {
+ self.for_each(drop);
+ }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<K, V, F> fmt::Debug for DrainFilter<'_, K, V, F>
+where
+ K: fmt::Debug,
+ V: fmt::Debug,
+ F: FnMut(&K, &mut V) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("DrainFilter").field(&self.inner.peek()).finish()
+ }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<K, V, F, A: Allocator + Clone> Iterator for DrainFilter<'_, K, V, F, A>
+where
+ F: FnMut(&K, &mut V) -> bool,
+{
+ type Item = (K, V);
+
+ fn next(&mut self) -> Option<(K, V)> {
+ self.inner.next(&mut self.pred, self.alloc.clone())
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<'a, K, V> DrainFilterInner<'a, K, V> {
+ /// Allow Debug implementations to predict the next element.
+ pub(super) fn peek(&self) -> Option<(&K, &V)> {
+ let edge = self.cur_leaf_edge.as_ref()?;
+ edge.reborrow().next_kv().ok().map(Handle::into_kv)
+ }
+
+ /// Implementation of a typical `DrainFilter::next` method, given the predicate.
+ pub(super) fn next<F, A: Allocator + Clone>(&mut self, pred: &mut F, alloc: A) -> Option<(K, V)>
+ where
+ F: FnMut(&K, &mut V) -> bool,
+ {
+ while let Ok(mut kv) = self.cur_leaf_edge.take()?.next_kv() {
+ let (k, v) = kv.kv_mut();
+ if pred(k, v) {
+ *self.length -= 1;
+ let (kv, pos) = kv.remove_kv_tracking(
+ || {
+ // SAFETY: we will touch the root in a way that will not
+ // invalidate the position returned.
+ let root = unsafe { self.dormant_root.take().unwrap().awaken() };
+ root.pop_internal_level(alloc.clone());
+ self.dormant_root = Some(DormantMutRef::new(root).1);
+ },
+ alloc.clone(),
+ );
+ self.cur_leaf_edge = Some(pos);
+ return Some(kv);
+ }
+ self.cur_leaf_edge = Some(kv.next_leaf_edge());
+ }
+ None
+ }
+
+ /// Implementation of a typical `DrainFilter::size_hint` method.
+ pub(super) fn size_hint(&self) -> (usize, Option<usize>) {
+ // In most of the btree iterators, `self.length` is the number of elements
+ // yet to be visited. Here, it includes elements that were visited and that
+ // the predicate decided not to drain. Making this upper bound more tight
+ // during iteration would require an extra field.
+ (0, Some(*self.length))
+ }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<K, V, F> FusedIterator for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<'a, K, V> Iterator for Range<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ fn next(&mut self) -> Option<(&'a K, &'a V)> {
+ self.inner.next_checked()
+ }
+
+ fn last(mut self) -> Option<(&'a K, &'a V)> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<(&'a K, &'a V)> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<(&'a K, &'a V)> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
+ type Item = &'a mut V;
+
+ fn next(&mut self) -> Option<&'a mut V> {
+ self.inner.next().map(|(_, v)| v)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn last(mut self) -> Option<&'a mut V> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+impl<'a, K, V> DoubleEndedIterator for ValuesMut<'a, K, V> {
+ fn next_back(&mut self) -> Option<&'a mut V> {
+ self.inner.next_back().map(|(_, v)| v)
+ }
+}
+
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+impl<K, V> ExactSizeIterator for ValuesMut<'_, K, V> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for ValuesMut<'_, K, V> {}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> Iterator for IntoKeys<K, V, A> {
+ type Item = K;
+
+ fn next(&mut self) -> Option<K> {
+ self.inner.next().map(|(k, _)| k)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn last(mut self) -> Option<K> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<K> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<K> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> DoubleEndedIterator for IntoKeys<K, V, A> {
+ fn next_back(&mut self) -> Option<K> {
+ self.inner.next_back().map(|(k, _)| k)
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoKeys<K, V, A> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> FusedIterator for IntoKeys<K, V, A> {}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> Iterator for IntoValues<K, V, A> {
+ type Item = V;
+
+ fn next(&mut self) -> Option<V> {
+ self.inner.next().map(|(_, v)| v)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn last(mut self) -> Option<V> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> DoubleEndedIterator for IntoValues<K, V, A> {
+ fn next_back(&mut self) -> Option<V> {
+ self.inner.next_back().map(|(_, v)| v)
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoValues<K, V, A> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> FusedIterator for IntoValues<K, V, A> {}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<'a, K, V> DoubleEndedIterator for Range<'a, K, V> {
+ fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
+ self.inner.next_back_checked()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for Range<'_, K, V> {}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<K, V> Clone for Range<'_, K, V> {
+ fn clone(&self) -> Self {
+ Range { inner: self.inner.clone() }
+ }
+}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<'a, K, V> Iterator for RangeMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.inner.next_checked()
+ }
+
+ fn last(mut self) -> Option<(&'a K, &'a mut V)> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<(&'a K, &'a mut V)> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<(&'a K, &'a mut V)> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<'a, K, V> DoubleEndedIterator for RangeMut<'a, K, V> {
+ fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.inner.next_back_checked()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for RangeMut<'_, K, V> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Ord, V> FromIterator<(K, V)> for BTreeMap<K, V> {
+ fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> BTreeMap<K, V> {
+ let mut inputs: Vec<_> = iter.into_iter().collect();
+
+ if inputs.is_empty() {
+ return BTreeMap::new();
+ }
+
+ // use stable sort to preserve the insertion order.
+ inputs.sort_by(|a, b| a.0.cmp(&b.0));
+ BTreeMap::bulk_build_from_sorted_iter(inputs, Global)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Ord, V, A: Allocator + Clone> Extend<(K, V)> for BTreeMap<K, V, A> {
+ #[inline]
+ fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
+ iter.into_iter().for_each(move |(k, v)| {
+ self.insert(k, v);
+ });
+ }
+
+ #[inline]
+ fn extend_one(&mut self, (k, v): (K, V)) {
+ self.insert(k, v);
+ }
+}
+
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, K: Ord + Copy, V: Copy, A: Allocator + Clone> Extend<(&'a K, &'a V)>
+ for BTreeMap<K, V, A>
+{
+ fn extend<I: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().map(|(&key, &value)| (key, value)));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, (&k, &v): (&'a K, &'a V)) {
+ self.insert(k, v);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Hash, V: Hash, A: Allocator + Clone> Hash for BTreeMap<K, V, A> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_length_prefix(self.len());
+ for elt in self {
+ elt.hash(state);
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> Default for BTreeMap<K, V> {
+ /// Creates an empty `BTreeMap`.
+ fn default() -> BTreeMap<K, V> {
+ BTreeMap::new()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: PartialEq, V: PartialEq, A: Allocator + Clone> PartialEq for BTreeMap<K, V, A> {
+ fn eq(&self, other: &BTreeMap<K, V, A>) -> bool {
+ self.len() == other.len() && self.iter().zip(other).all(|(a, b)| a == b)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Eq, V: Eq, A: Allocator + Clone> Eq for BTreeMap<K, V, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: PartialOrd, V: PartialOrd, A: Allocator + Clone> PartialOrd for BTreeMap<K, V, A> {
+ #[inline]
+ fn partial_cmp(&self, other: &BTreeMap<K, V, A>) -> Option<Ordering> {
+ self.iter().partial_cmp(other.iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Ord, V: Ord, A: Allocator + Clone> Ord for BTreeMap<K, V, A> {
+ #[inline]
+ fn cmp(&self, other: &BTreeMap<K, V, A>) -> Ordering {
+ self.iter().cmp(other.iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for BTreeMap<K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_map().entries(self.iter()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, Q: ?Sized, V, A: Allocator + Clone> Index<&Q> for BTreeMap<K, V, A>
+where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+{
+ type Output = V;
+
+ /// Returns a reference to the value corresponding to the supplied key.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the key is not present in the `BTreeMap`.
+ #[inline]
+ fn index(&self, key: &Q) -> &V {
+ self.get(key).expect("no entry found for key")
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+impl<K: Ord, V, const N: usize> From<[(K, V); N]> for BTreeMap<K, V> {
+ /// Converts a `[(K, V); N]` into a `BTreeMap<(K, V)>`.
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let map1 = BTreeMap::from([(1, 2), (3, 4)]);
+ /// let map2: BTreeMap<_, _> = [(1, 2), (3, 4)].into();
+ /// assert_eq!(map1, map2);
+ /// ```
+ fn from(mut arr: [(K, V); N]) -> Self {
+ if N == 0 {
+ return BTreeMap::new();
+ }
+
+ // use stable sort to preserve the insertion order.
+ arr.sort_by(|a, b| a.0.cmp(&b.0));
+ BTreeMap::bulk_build_from_sorted_iter(arr, Global)
+ }
+}
+
+impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
+ /// Gets an iterator over the entries of the map, sorted by key.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(3, "c");
+ /// map.insert(2, "b");
+ /// map.insert(1, "a");
+ ///
+ /// for (key, value) in map.iter() {
+ /// println!("{key}: {value}");
+ /// }
+ ///
+ /// let (first_key, first_value) = map.iter().next().unwrap();
+ /// assert_eq!((*first_key, *first_value), (1, "a"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, K, V> {
+ if let Some(root) = &self.root {
+ let full_range = root.reborrow().full_range();
+
+ Iter { range: full_range, length: self.length }
+ } else {
+ Iter { range: LazyLeafRange::none(), length: 0 }
+ }
+ }
+
+ /// Gets a mutable iterator over the entries of the map, sorted by key.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::from([
+ /// ("a", 1),
+ /// ("b", 2),
+ /// ("c", 3),
+ /// ]);
+ ///
+ /// // add 10 to the value if the key isn't "a"
+ /// for (key, value) in map.iter_mut() {
+ /// if key != &"a" {
+ /// *value += 10;
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
+ if let Some(root) = &mut self.root {
+ let full_range = root.borrow_valmut().full_range();
+
+ IterMut { range: full_range, length: self.length, _marker: PhantomData }
+ } else {
+ IterMut { range: LazyLeafRange::none(), length: 0, _marker: PhantomData }
+ }
+ }
+
+ /// Gets an iterator over the keys of the map, in sorted order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(2, "b");
+ /// a.insert(1, "a");
+ ///
+ /// let keys: Vec<_> = a.keys().cloned().collect();
+ /// assert_eq!(keys, [1, 2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn keys(&self) -> Keys<'_, K, V> {
+ Keys { inner: self.iter() }
+ }
+
+ /// Gets an iterator over the values of the map, in order by key.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, "hello");
+ /// a.insert(2, "goodbye");
+ ///
+ /// let values: Vec<&str> = a.values().cloned().collect();
+ /// assert_eq!(values, ["hello", "goodbye"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn values(&self) -> Values<'_, K, V> {
+ Values { inner: self.iter() }
+ }
+
+ /// Gets a mutable iterator over the values of the map, in order by key.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, String::from("hello"));
+ /// a.insert(2, String::from("goodbye"));
+ ///
+ /// for value in a.values_mut() {
+ /// value.push_str("!");
+ /// }
+ ///
+ /// let values: Vec<String> = a.values().cloned().collect();
+ /// assert_eq!(values, [String::from("hello!"),
+ /// String::from("goodbye!")]);
+ /// ```
+ #[stable(feature = "map_values_mut", since = "1.10.0")]
+ pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> {
+ ValuesMut { inner: self.iter_mut() }
+ }
+
+ /// Returns the number of elements in the map.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// assert_eq!(a.len(), 0);
+ /// a.insert(1, "a");
+ /// assert_eq!(a.len(), 1);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ pub const fn len(&self) -> usize {
+ self.length
+ }
+
+ /// Returns `true` if the map contains no elements.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// assert!(a.is_empty());
+ /// a.insert(1, "a");
+ /// assert!(!a.is_empty());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ pub const fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/alloc/src/collections/btree/map/entry.rs b/library/alloc/src/collections/btree/map/entry.rs
new file mode 100644
index 000000000..b6eecf9b0
--- /dev/null
+++ b/library/alloc/src/collections/btree/map/entry.rs
@@ -0,0 +1,555 @@
+use core::fmt::{self, Debug};
+use core::marker::PhantomData;
+use core::mem;
+
+use crate::alloc::{Allocator, Global};
+
+use super::super::borrow::DormantMutRef;
+use super::super::node::{marker, Handle, NodeRef};
+use super::BTreeMap;
+
+use Entry::*;
+
+/// A view into a single entry in a map, which may either be vacant or occupied.
+///
+/// This `enum` is constructed from the [`entry`] method on [`BTreeMap`].
+///
+/// [`entry`]: BTreeMap::entry
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "BTreeEntry")]
+pub enum Entry<
+ 'a,
+ K: 'a,
+ V: 'a,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ /// A vacant entry.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Vacant(#[stable(feature = "rust1", since = "1.0.0")] VacantEntry<'a, K, V, A>),
+
+ /// An occupied entry.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Occupied(#[stable(feature = "rust1", since = "1.0.0")] OccupiedEntry<'a, K, V, A>),
+}
+
+#[stable(feature = "debug_btree_map", since = "1.12.0")]
+impl<K: Debug + Ord, V: Debug, A: Allocator + Clone> Debug for Entry<'_, K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
+ Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(),
+ }
+ }
+}
+
+/// A view into a vacant entry in a `BTreeMap`.
+/// It is part of the [`Entry`] enum.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct VacantEntry<
+ 'a,
+ K,
+ V,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ pub(super) key: K,
+ /// `None` for a (empty) map without root
+ pub(super) handle: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>>,
+ pub(super) dormant_map: DormantMutRef<'a, BTreeMap<K, V, A>>,
+
+ /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
+ pub(super) alloc: A,
+
+ // Be invariant in `K` and `V`
+ pub(super) _marker: PhantomData<&'a mut (K, V)>,
+}
+
+#[stable(feature = "debug_btree_map", since = "1.12.0")]
+impl<K: Debug + Ord, V, A: Allocator + Clone> Debug for VacantEntry<'_, K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("VacantEntry").field(self.key()).finish()
+ }
+}
+
+/// A view into an occupied entry in a `BTreeMap`.
+/// It is part of the [`Entry`] enum.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct OccupiedEntry<
+ 'a,
+ K,
+ V,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ pub(super) handle: Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV>,
+ pub(super) dormant_map: DormantMutRef<'a, BTreeMap<K, V, A>>,
+
+ /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
+ pub(super) alloc: A,
+
+ // Be invariant in `K` and `V`
+ pub(super) _marker: PhantomData<&'a mut (K, V)>,
+}
+
+#[stable(feature = "debug_btree_map", since = "1.12.0")]
+impl<K: Debug + Ord, V: Debug, A: Allocator + Clone> Debug for OccupiedEntry<'_, K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OccupiedEntry").field("key", self.key()).field("value", self.get()).finish()
+ }
+}
+
+/// The error returned by [`try_insert`](BTreeMap::try_insert) when the key already exists.
+///
+/// Contains the occupied entry, and the value that was not inserted.
+#[unstable(feature = "map_try_insert", issue = "82766")]
+pub struct OccupiedError<'a, K: 'a, V: 'a, A: Allocator + Clone = Global> {
+ /// The entry in the map that was already occupied.
+ pub entry: OccupiedEntry<'a, K, V, A>,
+ /// The value which was not inserted, because the entry was already occupied.
+ pub value: V,
+}
+
+#[unstable(feature = "map_try_insert", issue = "82766")]
+impl<K: Debug + Ord, V: Debug, A: Allocator + Clone> Debug for OccupiedError<'_, K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OccupiedError")
+ .field("key", self.entry.key())
+ .field("old_value", self.entry.get())
+ .field("new_value", &self.value)
+ .finish()
+ }
+}
+
+#[unstable(feature = "map_try_insert", issue = "82766")]
+impl<'a, K: Debug + Ord, V: Debug, A: Allocator + Clone> fmt::Display
+ for OccupiedError<'a, K, V, A>
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "failed to insert {:?}, key {:?} already exists with value {:?}",
+ self.value,
+ self.entry.key(),
+ self.entry.get(),
+ )
+ }
+}
+
+impl<'a, K: Ord, V, A: Allocator + Clone> Entry<'a, K, V, A> {
+ /// Ensures a value is in the entry by inserting the default if empty, and returns
+ /// a mutable reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// assert_eq!(map["poneyland"], 12);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn or_insert(self, default: V) -> &'a mut V {
+ match self {
+ Occupied(entry) => entry.into_mut(),
+ Vacant(entry) => entry.insert(default),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting the result of the default function if empty,
+ /// and returns a mutable reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, String> = BTreeMap::new();
+ /// let s = "hoho".to_string();
+ ///
+ /// map.entry("poneyland").or_insert_with(|| s);
+ ///
+ /// assert_eq!(map["poneyland"], "hoho".to_string());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
+ match self {
+ Occupied(entry) => entry.into_mut(),
+ Vacant(entry) => entry.insert(default()),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting, if empty, the result of the default function.
+ /// This method allows for generating key-derived values for insertion by providing the default
+ /// function a reference to the key that was moved during the `.entry(key)` method call.
+ ///
+ /// The reference to the moved key is provided so that cloning or copying the key is
+ /// unnecessary, unlike with `.or_insert_with(|| ... )`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ ///
+ /// map.entry("poneyland").or_insert_with_key(|key| key.chars().count());
+ ///
+ /// assert_eq!(map["poneyland"], 9);
+ /// ```
+ #[inline]
+ #[stable(feature = "or_insert_with_key", since = "1.50.0")]
+ pub fn or_insert_with_key<F: FnOnce(&K) -> V>(self, default: F) -> &'a mut V {
+ match self {
+ Occupied(entry) => entry.into_mut(),
+ Vacant(entry) => {
+ let value = default(entry.key());
+ entry.insert(value)
+ }
+ }
+ }
+
+ /// Returns a reference to this entry's key.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// assert_eq!(map.entry("poneyland").key(), &"poneyland");
+ /// ```
+ #[stable(feature = "map_entry_keys", since = "1.10.0")]
+ pub fn key(&self) -> &K {
+ match *self {
+ Occupied(ref entry) => entry.key(),
+ Vacant(ref entry) => entry.key(),
+ }
+ }
+
+ /// Provides in-place mutable access to an occupied entry before any
+ /// potential inserts into the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ ///
+ /// map.entry("poneyland")
+ /// .and_modify(|e| { *e += 1 })
+ /// .or_insert(42);
+ /// assert_eq!(map["poneyland"], 42);
+ ///
+ /// map.entry("poneyland")
+ /// .and_modify(|e| { *e += 1 })
+ /// .or_insert(42);
+ /// assert_eq!(map["poneyland"], 43);
+ /// ```
+ #[stable(feature = "entry_and_modify", since = "1.26.0")]
+ pub fn and_modify<F>(self, f: F) -> Self
+ where
+ F: FnOnce(&mut V),
+ {
+ match self {
+ Occupied(mut entry) => {
+ f(entry.get_mut());
+ Occupied(entry)
+ }
+ Vacant(entry) => Vacant(entry),
+ }
+ }
+}
+
+impl<'a, K: Ord, V: Default, A: Allocator + Clone> Entry<'a, K, V, A> {
+ #[stable(feature = "entry_or_default", since = "1.28.0")]
+ /// Ensures a value is in the entry by inserting the default value if empty,
+ /// and returns a mutable reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, Option<usize>> = BTreeMap::new();
+ /// map.entry("poneyland").or_default();
+ ///
+ /// assert_eq!(map["poneyland"], None);
+ /// ```
+ pub fn or_default(self) -> &'a mut V {
+ match self {
+ Occupied(entry) => entry.into_mut(),
+ Vacant(entry) => entry.insert(Default::default()),
+ }
+ }
+}
+
+impl<'a, K: Ord, V, A: Allocator + Clone> VacantEntry<'a, K, V, A> {
+ /// Gets a reference to the key that would be used when inserting a value
+ /// through the VacantEntry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// assert_eq!(map.entry("poneyland").key(), &"poneyland");
+ /// ```
+ #[stable(feature = "map_entry_keys", since = "1.10.0")]
+ pub fn key(&self) -> &K {
+ &self.key
+ }
+
+ /// Take ownership of the key.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ ///
+ /// if let Entry::Vacant(v) = map.entry("poneyland") {
+ /// v.into_key();
+ /// }
+ /// ```
+ #[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
+ pub fn into_key(self) -> K {
+ self.key
+ }
+
+ /// Sets the value of the entry with the `VacantEntry`'s key,
+ /// and returns a mutable reference to it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, u32> = BTreeMap::new();
+ ///
+ /// if let Entry::Vacant(o) = map.entry("poneyland") {
+ /// o.insert(37);
+ /// }
+ /// assert_eq!(map["poneyland"], 37);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(self, value: V) -> &'a mut V {
+ let out_ptr = match self.handle {
+ None => {
+ // SAFETY: There is no tree yet so no reference to it exists.
+ let map = unsafe { self.dormant_map.awaken() };
+ let mut root = NodeRef::new_leaf(self.alloc.clone());
+ let val_ptr = root.borrow_mut().push(self.key, value) as *mut V;
+ map.root = Some(root.forget_type());
+ map.length = 1;
+ val_ptr
+ }
+ Some(handle) => match handle.insert_recursing(self.key, value, self.alloc.clone()) {
+ (None, val_ptr) => {
+ // SAFETY: We have consumed self.handle.
+ let map = unsafe { self.dormant_map.awaken() };
+ map.length += 1;
+ val_ptr
+ }
+ (Some(ins), val_ptr) => {
+ drop(ins.left);
+ // SAFETY: We have consumed self.handle and dropped the
+ // remaining reference to the tree, ins.left.
+ let map = unsafe { self.dormant_map.awaken() };
+ let root = map.root.as_mut().unwrap(); // same as ins.left
+ root.push_internal_level(self.alloc).push(ins.kv.0, ins.kv.1, ins.right);
+ map.length += 1;
+ val_ptr
+ }
+ },
+ };
+ // Now that we have finished growing the tree using borrowed references,
+ // dereference the pointer to a part of it, that we picked up along the way.
+ unsafe { &mut *out_ptr }
+ }
+}
+
+impl<'a, K: Ord, V, A: Allocator + Clone> OccupiedEntry<'a, K, V, A> {
+ /// Gets a reference to the key in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ /// assert_eq!(map.entry("poneyland").key(), &"poneyland");
+ /// ```
+ #[must_use]
+ #[stable(feature = "map_entry_keys", since = "1.10.0")]
+ pub fn key(&self) -> &K {
+ self.handle.reborrow().into_kv().0
+ }
+
+ /// Take ownership of the key and value from the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// if let Entry::Occupied(o) = map.entry("poneyland") {
+ /// // We delete the entry from the map.
+ /// o.remove_entry();
+ /// }
+ ///
+ /// // If now try to get the value, it will panic:
+ /// // println!("{}", map["poneyland"]);
+ /// ```
+ #[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
+ pub fn remove_entry(self) -> (K, V) {
+ self.remove_kv()
+ }
+
+ /// Gets a reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// if let Entry::Occupied(o) = map.entry("poneyland") {
+ /// assert_eq!(o.get(), &12);
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get(&self) -> &V {
+ self.handle.reborrow().into_kv().1
+ }
+
+ /// Gets a mutable reference to the value in the entry.
+ ///
+ /// If you need a reference to the `OccupiedEntry` that may outlive the
+ /// destruction of the `Entry` value, see [`into_mut`].
+ ///
+ /// [`into_mut`]: OccupiedEntry::into_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// assert_eq!(map["poneyland"], 12);
+ /// if let Entry::Occupied(mut o) = map.entry("poneyland") {
+ /// *o.get_mut() += 10;
+ /// assert_eq!(*o.get(), 22);
+ ///
+ /// // We can use the same Entry multiple times.
+ /// *o.get_mut() += 2;
+ /// }
+ /// assert_eq!(map["poneyland"], 24);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_mut(&mut self) -> &mut V {
+ self.handle.kv_mut().1
+ }
+
+ /// Converts the entry into a mutable reference to its value.
+ ///
+ /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`].
+ ///
+ /// [`get_mut`]: OccupiedEntry::get_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// assert_eq!(map["poneyland"], 12);
+ /// if let Entry::Occupied(o) = map.entry("poneyland") {
+ /// *o.into_mut() += 10;
+ /// }
+ /// assert_eq!(map["poneyland"], 22);
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_mut(self) -> &'a mut V {
+ self.handle.into_val_mut()
+ }
+
+ /// Sets the value of the entry with the `OccupiedEntry`'s key,
+ /// and returns the entry's old value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// if let Entry::Occupied(mut o) = map.entry("poneyland") {
+ /// assert_eq!(o.insert(15), 12);
+ /// }
+ /// assert_eq!(map["poneyland"], 15);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, value: V) -> V {
+ mem::replace(self.get_mut(), value)
+ }
+
+ /// Takes the value of the entry out of the map, and returns it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// if let Entry::Occupied(o) = map.entry("poneyland") {
+ /// assert_eq!(o.remove(), 12);
+ /// }
+ /// // If we try to get "poneyland"'s value, it'll panic:
+ /// // println!("{}", map["poneyland"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove(self) -> V {
+ self.remove_kv().1
+ }
+
+ // Body of `remove_entry`, probably separate because the name reflects the returned pair.
+ pub(super) fn remove_kv(self) -> (K, V) {
+ let mut emptied_internal_root = false;
+ let (old_kv, _) =
+ self.handle.remove_kv_tracking(|| emptied_internal_root = true, self.alloc.clone());
+ // SAFETY: we consumed the intermediate root borrow, `self.handle`.
+ let map = unsafe { self.dormant_map.awaken() };
+ map.length -= 1;
+ if emptied_internal_root {
+ let root = map.root.as_mut().unwrap();
+ root.pop_internal_level(self.alloc);
+ }
+ old_kv
+ }
+}
diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs
new file mode 100644
index 000000000..4c372b1d6
--- /dev/null
+++ b/library/alloc/src/collections/btree/map/tests.rs
@@ -0,0 +1,2338 @@
+use super::super::testing::crash_test::{CrashTestDummy, Panic};
+use super::super::testing::ord_chaos::{Cyclic3, Governed, Governor};
+use super::super::testing::rng::DeterministicRng;
+use super::Entry::{Occupied, Vacant};
+use super::*;
+use crate::boxed::Box;
+use crate::fmt::Debug;
+use crate::rc::Rc;
+use crate::string::{String, ToString};
+use crate::vec::Vec;
+use std::cmp::Ordering;
+use std::convert::TryFrom;
+use std::iter::{self, FromIterator};
+use std::mem;
+use std::ops::Bound::{self, Excluded, Included, Unbounded};
+use std::ops::RangeBounds;
+use std::panic::{catch_unwind, AssertUnwindSafe};
+use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
+
+// Minimum number of elements to insert, to guarantee a tree with 2 levels,
+// i.e., a tree who's root is an internal node at height 1, with edges to leaf nodes.
+// It's not the minimum size: removing an element from such a tree does not always reduce height.
+const MIN_INSERTS_HEIGHT_1: usize = node::CAPACITY + 1;
+
+// Minimum number of elements to insert in ascending order, to guarantee a tree with 3 levels,
+// i.e., a tree who's root is an internal node at height 2, with edges to more internal nodes.
+// It's not the minimum size: removing an element from such a tree does not always reduce height.
+const MIN_INSERTS_HEIGHT_2: usize = 89;
+
+// Gathers all references from a mutable iterator and makes sure Miri notices if
+// using them is dangerous.
+fn test_all_refs<'a, T: 'a>(dummy: &mut T, iter: impl Iterator<Item = &'a mut T>) {
+ // Gather all those references.
+ let mut refs: Vec<&mut T> = iter.collect();
+ // Use them all. Twice, to be sure we got all interleavings.
+ for r in refs.iter_mut() {
+ mem::swap(dummy, r);
+ }
+ for r in refs {
+ mem::swap(dummy, r);
+ }
+}
+
+impl<K, V> BTreeMap<K, V> {
+ // Panics if the map (or the code navigating it) is corrupted.
+ fn check_invariants(&self) {
+ if let Some(root) = &self.root {
+ let root_node = root.reborrow();
+
+ // Check the back pointers top-down, before we attempt to rely on
+ // more serious navigation code.
+ assert!(root_node.ascend().is_err());
+ root_node.assert_back_pointers();
+
+ // Check consistency of `length` with what navigation code encounters.
+ assert_eq!(self.length, root_node.calc_length());
+
+ // Lastly, check the invariant causing the least harm.
+ root_node.assert_min_len(if root_node.height() > 0 { 1 } else { 0 });
+ } else {
+ assert_eq!(self.length, 0);
+ }
+
+ // Check that `assert_strictly_ascending` will encounter all keys.
+ assert_eq!(self.length, self.keys().count());
+ }
+
+ // Panics if the map is corrupted or if the keys are not in strictly
+ // ascending order, in the current opinion of the `Ord` implementation.
+ // If the `Ord` implementation violates transitivity, this method does not
+ // guarantee that all keys are unique, just that adjacent keys are unique.
+ fn check(&self)
+ where
+ K: Debug + Ord,
+ {
+ self.check_invariants();
+ self.assert_strictly_ascending();
+ }
+
+ // Returns the height of the root, if any.
+ fn height(&self) -> Option<usize> {
+ self.root.as_ref().map(node::Root::height)
+ }
+
+ fn dump_keys(&self) -> String
+ where
+ K: Debug,
+ {
+ if let Some(root) = self.root.as_ref() {
+ root.reborrow().dump_keys()
+ } else {
+ String::from("not yet allocated")
+ }
+ }
+
+ // Panics if the keys are not in strictly ascending order.
+ fn assert_strictly_ascending(&self)
+ where
+ K: Debug + Ord,
+ {
+ let mut keys = self.keys();
+ if let Some(mut previous) = keys.next() {
+ for next in keys {
+ assert!(previous < next, "{:?} >= {:?}", previous, next);
+ previous = next;
+ }
+ }
+ }
+
+ // Transform the tree to minimize wasted space, obtaining fewer nodes that
+ // are mostly filled up to their capacity. The same compact tree could have
+ // been obtained by inserting keys in a shrewd order.
+ fn compact(&mut self)
+ where
+ K: Ord,
+ {
+ let iter = mem::take(self).into_iter();
+ if !iter.is_empty() {
+ self.root.insert(Root::new(*self.alloc)).bulk_push(iter, &mut self.length, *self.alloc);
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> {
+ fn assert_min_len(self, min_len: usize) {
+ assert!(self.len() >= min_len, "node len {} < {}", self.len(), min_len);
+ if let node::ForceResult::Internal(node) = self.force() {
+ for idx in 0..=node.len() {
+ let edge = unsafe { Handle::new_edge(node, idx) };
+ edge.descend().assert_min_len(MIN_LEN);
+ }
+ }
+ }
+}
+
+// Tests our value of MIN_INSERTS_HEIGHT_2. Failure may mean you just need to
+// adapt that value to match a change in node::CAPACITY or the choices made
+// during insertion, otherwise other test cases may fail or be less useful.
+#[test]
+fn test_levels() {
+ let mut map = BTreeMap::new();
+ map.check();
+ assert_eq!(map.height(), None);
+ assert_eq!(map.len(), 0);
+
+ map.insert(0, ());
+ while map.height() == Some(0) {
+ let last_key = *map.last_key_value().unwrap().0;
+ map.insert(last_key + 1, ());
+ }
+ map.check();
+ // Structure:
+ // - 1 element in internal root node with 2 children
+ // - 6 elements in left leaf child
+ // - 5 elements in right leaf child
+ assert_eq!(map.height(), Some(1));
+ assert_eq!(map.len(), MIN_INSERTS_HEIGHT_1, "{}", map.dump_keys());
+
+ while map.height() == Some(1) {
+ let last_key = *map.last_key_value().unwrap().0;
+ map.insert(last_key + 1, ());
+ }
+ map.check();
+ // Structure:
+ // - 1 element in internal root node with 2 children
+ // - 6 elements in left internal child with 7 grandchildren
+ // - 42 elements in left child's 7 grandchildren with 6 elements each
+ // - 5 elements in right internal child with 6 grandchildren
+ // - 30 elements in right child's 5 first grandchildren with 6 elements each
+ // - 5 elements in right child's last grandchild
+ assert_eq!(map.height(), Some(2));
+ assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2, "{}", map.dump_keys());
+}
+
+// Ensures the testing infrastructure usually notices order violations.
+#[test]
+#[should_panic]
+fn test_check_ord_chaos() {
+ let gov = Governor::new();
+ let map = BTreeMap::from([(Governed(1, &gov), ()), (Governed(2, &gov), ())]);
+ gov.flip();
+ map.check();
+}
+
+// Ensures the testing infrastructure doesn't always mind order violations.
+#[test]
+fn test_check_invariants_ord_chaos() {
+ let gov = Governor::new();
+ let map = BTreeMap::from([(Governed(1, &gov), ()), (Governed(2, &gov), ())]);
+ gov.flip();
+ map.check_invariants();
+}
+
+#[test]
+fn test_basic_large() {
+ let mut map = BTreeMap::new();
+ // Miri is too slow
+ let size = if cfg!(miri) { MIN_INSERTS_HEIGHT_2 } else { 10000 };
+ let size = size + (size % 2); // round up to even number
+ assert_eq!(map.len(), 0);
+
+ for i in 0..size {
+ assert_eq!(map.insert(i, 10 * i), None);
+ assert_eq!(map.len(), i + 1);
+ }
+
+ assert_eq!(map.first_key_value(), Some((&0, &0)));
+ assert_eq!(map.last_key_value(), Some((&(size - 1), &(10 * (size - 1)))));
+ assert_eq!(map.first_entry().unwrap().key(), &0);
+ assert_eq!(map.last_entry().unwrap().key(), &(size - 1));
+
+ for i in 0..size {
+ assert_eq!(map.get(&i).unwrap(), &(i * 10));
+ }
+
+ for i in size..size * 2 {
+ assert_eq!(map.get(&i), None);
+ }
+
+ for i in 0..size {
+ assert_eq!(map.insert(i, 100 * i), Some(10 * i));
+ assert_eq!(map.len(), size);
+ }
+
+ for i in 0..size {
+ assert_eq!(map.get(&i).unwrap(), &(i * 100));
+ }
+
+ for i in 0..size / 2 {
+ assert_eq!(map.remove(&(i * 2)), Some(i * 200));
+ assert_eq!(map.len(), size - i - 1);
+ }
+
+ for i in 0..size / 2 {
+ assert_eq!(map.get(&(2 * i)), None);
+ assert_eq!(map.get(&(2 * i + 1)).unwrap(), &(i * 200 + 100));
+ }
+
+ for i in 0..size / 2 {
+ assert_eq!(map.remove(&(2 * i)), None);
+ assert_eq!(map.remove(&(2 * i + 1)), Some(i * 200 + 100));
+ assert_eq!(map.len(), size / 2 - i - 1);
+ }
+ map.check();
+}
+
+#[test]
+fn test_basic_small() {
+ let mut map = BTreeMap::new();
+ // Empty, root is absent (None):
+ assert_eq!(map.remove(&1), None);
+ assert_eq!(map.len(), 0);
+ assert_eq!(map.get(&1), None);
+ assert_eq!(map.get_mut(&1), None);
+ assert_eq!(map.first_key_value(), None);
+ assert_eq!(map.last_key_value(), None);
+ assert_eq!(map.keys().count(), 0);
+ assert_eq!(map.values().count(), 0);
+ assert_eq!(map.range(..).next(), None);
+ assert_eq!(map.range(..1).next(), None);
+ assert_eq!(map.range(1..).next(), None);
+ assert_eq!(map.range(1..=1).next(), None);
+ assert_eq!(map.range(1..2).next(), None);
+ assert_eq!(map.height(), None);
+ assert_eq!(map.insert(1, 1), None);
+ assert_eq!(map.height(), Some(0));
+ map.check();
+
+ // 1 key-value pair:
+ assert_eq!(map.len(), 1);
+ assert_eq!(map.get(&1), Some(&1));
+ assert_eq!(map.get_mut(&1), Some(&mut 1));
+ assert_eq!(map.first_key_value(), Some((&1, &1)));
+ assert_eq!(map.last_key_value(), Some((&1, &1)));
+ assert_eq!(map.keys().collect::<Vec<_>>(), vec![&1]);
+ assert_eq!(map.values().collect::<Vec<_>>(), vec![&1]);
+ assert_eq!(map.insert(1, 2), Some(1));
+ assert_eq!(map.len(), 1);
+ assert_eq!(map.get(&1), Some(&2));
+ assert_eq!(map.get_mut(&1), Some(&mut 2));
+ assert_eq!(map.first_key_value(), Some((&1, &2)));
+ assert_eq!(map.last_key_value(), Some((&1, &2)));
+ assert_eq!(map.keys().collect::<Vec<_>>(), vec![&1]);
+ assert_eq!(map.values().collect::<Vec<_>>(), vec![&2]);
+ assert_eq!(map.insert(2, 4), None);
+ assert_eq!(map.height(), Some(0));
+ map.check();
+
+ // 2 key-value pairs:
+ assert_eq!(map.len(), 2);
+ assert_eq!(map.get(&2), Some(&4));
+ assert_eq!(map.get_mut(&2), Some(&mut 4));
+ assert_eq!(map.first_key_value(), Some((&1, &2)));
+ assert_eq!(map.last_key_value(), Some((&2, &4)));
+ assert_eq!(map.keys().collect::<Vec<_>>(), vec![&1, &2]);
+ assert_eq!(map.values().collect::<Vec<_>>(), vec![&2, &4]);
+ assert_eq!(map.remove(&1), Some(2));
+ assert_eq!(map.height(), Some(0));
+ map.check();
+
+ // 1 key-value pair:
+ assert_eq!(map.len(), 1);
+ assert_eq!(map.get(&1), None);
+ assert_eq!(map.get_mut(&1), None);
+ assert_eq!(map.get(&2), Some(&4));
+ assert_eq!(map.get_mut(&2), Some(&mut 4));
+ assert_eq!(map.first_key_value(), Some((&2, &4)));
+ assert_eq!(map.last_key_value(), Some((&2, &4)));
+ assert_eq!(map.keys().collect::<Vec<_>>(), vec![&2]);
+ assert_eq!(map.values().collect::<Vec<_>>(), vec![&4]);
+ assert_eq!(map.remove(&2), Some(4));
+ assert_eq!(map.height(), Some(0));
+ map.check();
+
+ // Empty but root is owned (Some(...)):
+ assert_eq!(map.len(), 0);
+ assert_eq!(map.get(&1), None);
+ assert_eq!(map.get_mut(&1), None);
+ assert_eq!(map.first_key_value(), None);
+ assert_eq!(map.last_key_value(), None);
+ assert_eq!(map.keys().count(), 0);
+ assert_eq!(map.values().count(), 0);
+ assert_eq!(map.range(..).next(), None);
+ assert_eq!(map.range(..1).next(), None);
+ assert_eq!(map.range(1..).next(), None);
+ assert_eq!(map.range(1..=1).next(), None);
+ assert_eq!(map.range(1..2).next(), None);
+ assert_eq!(map.remove(&1), None);
+ assert_eq!(map.height(), Some(0));
+ map.check();
+}
+
+#[test]
+fn test_iter() {
+ // Miri is too slow
+ let size = if cfg!(miri) { 200 } else { 10000 };
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (i, i)));
+
+ fn test<T>(size: usize, mut iter: T)
+ where
+ T: Iterator<Item = (usize, usize)>,
+ {
+ for i in 0..size {
+ assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
+ assert_eq!(iter.next().unwrap(), (i, i));
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ }
+ test(size, map.iter().map(|(&k, &v)| (k, v)));
+ test(size, map.iter_mut().map(|(&k, &mut v)| (k, v)));
+ test(size, map.into_iter());
+}
+
+#[test]
+fn test_iter_rev() {
+ // Miri is too slow
+ let size = if cfg!(miri) { 200 } else { 10000 };
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (i, i)));
+
+ fn test<T>(size: usize, mut iter: T)
+ where
+ T: Iterator<Item = (usize, usize)>,
+ {
+ for i in 0..size {
+ assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
+ assert_eq!(iter.next().unwrap(), (size - i - 1, size - i - 1));
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ }
+ test(size, map.iter().rev().map(|(&k, &v)| (k, v)));
+ test(size, map.iter_mut().rev().map(|(&k, &mut v)| (k, v)));
+ test(size, map.into_iter().rev());
+}
+
+// Specifically tests iter_mut's ability to mutate the value of pairs in-line.
+fn do_test_iter_mut_mutation<T>(size: usize)
+where
+ T: Copy + Debug + Ord + TryFrom<usize>,
+ <T as TryFrom<usize>>::Error: Debug,
+{
+ let zero = T::try_from(0).unwrap();
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (T::try_from(i).unwrap(), zero)));
+
+ // Forward and backward iteration sees enough pairs (also tested elsewhere)
+ assert_eq!(map.iter_mut().count(), size);
+ assert_eq!(map.iter_mut().rev().count(), size);
+
+ // Iterate forwards, trying to mutate to unique values
+ for (i, (k, v)) in map.iter_mut().enumerate() {
+ assert_eq!(*k, T::try_from(i).unwrap());
+ assert_eq!(*v, zero);
+ *v = T::try_from(i + 1).unwrap();
+ }
+
+ // Iterate backwards, checking that mutations succeeded and trying to mutate again
+ for (i, (k, v)) in map.iter_mut().rev().enumerate() {
+ assert_eq!(*k, T::try_from(size - i - 1).unwrap());
+ assert_eq!(*v, T::try_from(size - i).unwrap());
+ *v = T::try_from(2 * size - i).unwrap();
+ }
+
+ // Check that backward mutations succeeded
+ for (i, (k, v)) in map.iter_mut().enumerate() {
+ assert_eq!(*k, T::try_from(i).unwrap());
+ assert_eq!(*v, T::try_from(size + i + 1).unwrap());
+ }
+ map.check();
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
+#[repr(align(32))]
+struct Align32(usize);
+
+impl TryFrom<usize> for Align32 {
+ type Error = ();
+
+ fn try_from(s: usize) -> Result<Align32, ()> {
+ Ok(Align32(s))
+ }
+}
+
+#[test]
+fn test_iter_mut_mutation() {
+ // Check many alignments and trees with roots at various heights.
+ do_test_iter_mut_mutation::<u8>(0);
+ do_test_iter_mut_mutation::<u8>(1);
+ do_test_iter_mut_mutation::<u8>(MIN_INSERTS_HEIGHT_1);
+ do_test_iter_mut_mutation::<u8>(MIN_INSERTS_HEIGHT_2);
+ do_test_iter_mut_mutation::<u16>(1);
+ do_test_iter_mut_mutation::<u16>(MIN_INSERTS_HEIGHT_1);
+ do_test_iter_mut_mutation::<u16>(MIN_INSERTS_HEIGHT_2);
+ do_test_iter_mut_mutation::<u32>(1);
+ do_test_iter_mut_mutation::<u32>(MIN_INSERTS_HEIGHT_1);
+ do_test_iter_mut_mutation::<u32>(MIN_INSERTS_HEIGHT_2);
+ do_test_iter_mut_mutation::<u64>(1);
+ do_test_iter_mut_mutation::<u64>(MIN_INSERTS_HEIGHT_1);
+ do_test_iter_mut_mutation::<u64>(MIN_INSERTS_HEIGHT_2);
+ do_test_iter_mut_mutation::<u128>(1);
+ do_test_iter_mut_mutation::<u128>(MIN_INSERTS_HEIGHT_1);
+ do_test_iter_mut_mutation::<u128>(MIN_INSERTS_HEIGHT_2);
+ do_test_iter_mut_mutation::<Align32>(1);
+ do_test_iter_mut_mutation::<Align32>(MIN_INSERTS_HEIGHT_1);
+ do_test_iter_mut_mutation::<Align32>(MIN_INSERTS_HEIGHT_2);
+}
+
+#[test]
+fn test_values_mut() {
+ let mut a = BTreeMap::from_iter((0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i)));
+ test_all_refs(&mut 13, a.values_mut());
+ a.check();
+}
+
+#[test]
+fn test_values_mut_mutation() {
+ let mut a = BTreeMap::new();
+ a.insert(1, String::from("hello"));
+ a.insert(2, String::from("goodbye"));
+
+ for value in a.values_mut() {
+ value.push_str("!");
+ }
+
+ let values = Vec::from_iter(a.values().cloned());
+ assert_eq!(values, [String::from("hello!"), String::from("goodbye!")]);
+ a.check();
+}
+
+#[test]
+fn test_iter_entering_root_twice() {
+ let mut map = BTreeMap::from([(0, 0), (1, 1)]);
+ let mut it = map.iter_mut();
+ let front = it.next().unwrap();
+ let back = it.next_back().unwrap();
+ assert_eq!(front, (&0, &mut 0));
+ assert_eq!(back, (&1, &mut 1));
+ *front.1 = 24;
+ *back.1 = 42;
+ assert_eq!(front, (&0, &mut 24));
+ assert_eq!(back, (&1, &mut 42));
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next_back(), None);
+ map.check();
+}
+
+#[test]
+fn test_iter_descending_to_same_node_twice() {
+ let mut map = BTreeMap::from_iter((0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i)));
+ let mut it = map.iter_mut();
+ // Descend into first child.
+ let front = it.next().unwrap();
+ // Descend into first child again, after running through second child.
+ while it.next_back().is_some() {}
+ // Check immutable access.
+ assert_eq!(front, (&0, &mut 0));
+ // Perform mutable access.
+ *front.1 = 42;
+ map.check();
+}
+
+#[test]
+fn test_iter_mixed() {
+ // Miri is too slow
+ let size = if cfg!(miri) { 200 } else { 10000 };
+
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (i, i)));
+
+ fn test<T>(size: usize, mut iter: T)
+ where
+ T: Iterator<Item = (usize, usize)> + DoubleEndedIterator,
+ {
+ for i in 0..size / 4 {
+ assert_eq!(iter.size_hint(), (size - i * 2, Some(size - i * 2)));
+ assert_eq!(iter.next().unwrap(), (i, i));
+ assert_eq!(iter.next_back().unwrap(), (size - i - 1, size - i - 1));
+ }
+ for i in size / 4..size * 3 / 4 {
+ assert_eq!(iter.size_hint(), (size * 3 / 4 - i, Some(size * 3 / 4 - i)));
+ assert_eq!(iter.next().unwrap(), (i, i));
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ }
+ test(size, map.iter().map(|(&k, &v)| (k, v)));
+ test(size, map.iter_mut().map(|(&k, &mut v)| (k, v)));
+ test(size, map.into_iter());
+}
+
+#[test]
+fn test_iter_min_max() {
+ let mut a = BTreeMap::new();
+ assert_eq!(a.iter().min(), None);
+ assert_eq!(a.iter().max(), None);
+ assert_eq!(a.iter_mut().min(), None);
+ assert_eq!(a.iter_mut().max(), None);
+ assert_eq!(a.range(..).min(), None);
+ assert_eq!(a.range(..).max(), None);
+ assert_eq!(a.range_mut(..).min(), None);
+ assert_eq!(a.range_mut(..).max(), None);
+ assert_eq!(a.keys().min(), None);
+ assert_eq!(a.keys().max(), None);
+ assert_eq!(a.values().min(), None);
+ assert_eq!(a.values().max(), None);
+ assert_eq!(a.values_mut().min(), None);
+ assert_eq!(a.values_mut().max(), None);
+ a.insert(1, 42);
+ a.insert(2, 24);
+ assert_eq!(a.iter().min(), Some((&1, &42)));
+ assert_eq!(a.iter().max(), Some((&2, &24)));
+ assert_eq!(a.iter_mut().min(), Some((&1, &mut 42)));
+ assert_eq!(a.iter_mut().max(), Some((&2, &mut 24)));
+ assert_eq!(a.range(..).min(), Some((&1, &42)));
+ assert_eq!(a.range(..).max(), Some((&2, &24)));
+ assert_eq!(a.range_mut(..).min(), Some((&1, &mut 42)));
+ assert_eq!(a.range_mut(..).max(), Some((&2, &mut 24)));
+ assert_eq!(a.keys().min(), Some(&1));
+ assert_eq!(a.keys().max(), Some(&2));
+ assert_eq!(a.values().min(), Some(&24));
+ assert_eq!(a.values().max(), Some(&42));
+ assert_eq!(a.values_mut().min(), Some(&mut 24));
+ assert_eq!(a.values_mut().max(), Some(&mut 42));
+ a.check();
+}
+
+fn range_keys(map: &BTreeMap<i32, i32>, range: impl RangeBounds<i32>) -> Vec<i32> {
+ Vec::from_iter(map.range(range).map(|(&k, &v)| {
+ assert_eq!(k, v);
+ k
+ }))
+}
+
+#[test]
+fn test_range_small() {
+ let size = 4;
+
+ let all = Vec::from_iter(1..=size);
+ let (first, last) = (vec![all[0]], vec![all[size as usize - 1]]);
+ let map = BTreeMap::from_iter(all.iter().copied().map(|i| (i, i)));
+
+ assert_eq!(range_keys(&map, (Excluded(0), Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(size))), all);
+ assert_eq!(range_keys(&map, (Excluded(0), Unbounded)), all);
+ assert_eq!(range_keys(&map, (Included(0), Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(0), Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(0), Included(size))), all);
+ assert_eq!(range_keys(&map, (Included(0), Unbounded)), all);
+ assert_eq!(range_keys(&map, (Included(1), Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(1), Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(1), Included(size))), all);
+ assert_eq!(range_keys(&map, (Included(1), Unbounded)), all);
+ assert_eq!(range_keys(&map, (Unbounded, Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Unbounded, Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Unbounded, Included(size))), all);
+ assert_eq!(range_keys(&map, ..), all);
+
+ assert_eq!(range_keys(&map, (Excluded(0), Excluded(1))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(0))), vec![]);
+ assert_eq!(range_keys(&map, (Included(0), Included(0))), vec![]);
+ assert_eq!(range_keys(&map, (Included(0), Excluded(1))), vec![]);
+ assert_eq!(range_keys(&map, (Unbounded, Excluded(1))), vec![]);
+ assert_eq!(range_keys(&map, (Unbounded, Included(0))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(0), Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(1))), first);
+ assert_eq!(range_keys(&map, (Included(0), Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Included(0), Included(1))), first);
+ assert_eq!(range_keys(&map, (Included(1), Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Included(1), Included(1))), first);
+ assert_eq!(range_keys(&map, (Unbounded, Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Unbounded, Included(1))), first);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Excluded(size + 1))), last);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Included(size + 1))), last);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Included(size))), last);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Unbounded)), last);
+ assert_eq!(range_keys(&map, (Included(size), Excluded(size + 1))), last);
+ assert_eq!(range_keys(&map, (Included(size), Included(size + 1))), last);
+ assert_eq!(range_keys(&map, (Included(size), Included(size))), last);
+ assert_eq!(range_keys(&map, (Included(size), Unbounded)), last);
+ assert_eq!(range_keys(&map, (Excluded(size), Excluded(size + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(size), Included(size))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(size), Unbounded)), vec![]);
+ assert_eq!(range_keys(&map, (Included(size + 1), Excluded(size + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Included(size + 1), Included(size + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Included(size + 1), Unbounded)), vec![]);
+
+ assert_eq!(range_keys(&map, ..3), vec![1, 2]);
+ assert_eq!(range_keys(&map, 3..), vec![3, 4]);
+ assert_eq!(range_keys(&map, 2..=3), vec![2, 3]);
+}
+
+#[test]
+fn test_range_height_1() {
+ // Tests tree with a root and 2 leaves. We test around the middle of the
+ // keys because one of those is the single key in the root node.
+ let map = BTreeMap::from_iter((0..MIN_INSERTS_HEIGHT_1 as i32).map(|i| (i, i)));
+ let middle = MIN_INSERTS_HEIGHT_1 as i32 / 2;
+ for root in middle - 2..=middle + 2 {
+ assert_eq!(range_keys(&map, (Excluded(root), Excluded(root + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(root), Included(root + 1))), vec![root + 1]);
+ assert_eq!(range_keys(&map, (Included(root), Excluded(root + 1))), vec![root]);
+ assert_eq!(range_keys(&map, (Included(root), Included(root + 1))), vec![root, root + 1]);
+
+ assert_eq!(range_keys(&map, (Excluded(root - 1), Excluded(root))), vec![]);
+ assert_eq!(range_keys(&map, (Included(root - 1), Excluded(root))), vec![root - 1]);
+ assert_eq!(range_keys(&map, (Excluded(root - 1), Included(root))), vec![root]);
+ assert_eq!(range_keys(&map, (Included(root - 1), Included(root))), vec![root - 1, root]);
+ }
+}
+
+#[test]
+fn test_range_large() {
+ let size = 200;
+
+ let all = Vec::from_iter(1..=size);
+ let (first, last) = (vec![all[0]], vec![all[size as usize - 1]]);
+ let map = BTreeMap::from_iter(all.iter().copied().map(|i| (i, i)));
+
+ assert_eq!(range_keys(&map, (Excluded(0), Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(size))), all);
+ assert_eq!(range_keys(&map, (Excluded(0), Unbounded)), all);
+ assert_eq!(range_keys(&map, (Included(0), Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(0), Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(0), Included(size))), all);
+ assert_eq!(range_keys(&map, (Included(0), Unbounded)), all);
+ assert_eq!(range_keys(&map, (Included(1), Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(1), Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(1), Included(size))), all);
+ assert_eq!(range_keys(&map, (Included(1), Unbounded)), all);
+ assert_eq!(range_keys(&map, (Unbounded, Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Unbounded, Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Unbounded, Included(size))), all);
+ assert_eq!(range_keys(&map, ..), all);
+
+ assert_eq!(range_keys(&map, (Excluded(0), Excluded(1))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(0))), vec![]);
+ assert_eq!(range_keys(&map, (Included(0), Included(0))), vec![]);
+ assert_eq!(range_keys(&map, (Included(0), Excluded(1))), vec![]);
+ assert_eq!(range_keys(&map, (Unbounded, Excluded(1))), vec![]);
+ assert_eq!(range_keys(&map, (Unbounded, Included(0))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(0), Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(1))), first);
+ assert_eq!(range_keys(&map, (Included(0), Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Included(0), Included(1))), first);
+ assert_eq!(range_keys(&map, (Included(1), Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Included(1), Included(1))), first);
+ assert_eq!(range_keys(&map, (Unbounded, Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Unbounded, Included(1))), first);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Excluded(size + 1))), last);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Included(size + 1))), last);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Included(size))), last);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Unbounded)), last);
+ assert_eq!(range_keys(&map, (Included(size), Excluded(size + 1))), last);
+ assert_eq!(range_keys(&map, (Included(size), Included(size + 1))), last);
+ assert_eq!(range_keys(&map, (Included(size), Included(size))), last);
+ assert_eq!(range_keys(&map, (Included(size), Unbounded)), last);
+ assert_eq!(range_keys(&map, (Excluded(size), Excluded(size + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(size), Included(size))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(size), Unbounded)), vec![]);
+ assert_eq!(range_keys(&map, (Included(size + 1), Excluded(size + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Included(size + 1), Included(size + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Included(size + 1), Unbounded)), vec![]);
+
+ fn check<'a, L, R>(lhs: L, rhs: R)
+ where
+ L: IntoIterator<Item = (&'a i32, &'a i32)>,
+ R: IntoIterator<Item = (&'a i32, &'a i32)>,
+ {
+ assert_eq!(Vec::from_iter(lhs), Vec::from_iter(rhs));
+ }
+
+ check(map.range(..=100), map.range(..101));
+ check(map.range(5..=8), vec![(&5, &5), (&6, &6), (&7, &7), (&8, &8)]);
+ check(map.range(-1..=2), vec![(&1, &1), (&2, &2)]);
+}
+
+#[test]
+fn test_range_inclusive_max_value() {
+ let max = usize::MAX;
+ let map = BTreeMap::from([(max, 0)]);
+ assert_eq!(Vec::from_iter(map.range(max..=max)), &[(&max, &0)]);
+}
+
+#[test]
+fn test_range_equal_empty_cases() {
+ let map = BTreeMap::from_iter((0..5).map(|i| (i, i)));
+ assert_eq!(map.range((Included(2), Excluded(2))).next(), None);
+ assert_eq!(map.range((Excluded(2), Included(2))).next(), None);
+}
+
+#[test]
+#[should_panic]
+fn test_range_equal_excluded() {
+ let map = BTreeMap::from_iter((0..5).map(|i| (i, i)));
+ let _ = map.range((Excluded(2), Excluded(2)));
+}
+
+#[test]
+#[should_panic]
+fn test_range_backwards_1() {
+ let map = BTreeMap::from_iter((0..5).map(|i| (i, i)));
+ let _ = map.range((Included(3), Included(2)));
+}
+
+#[test]
+#[should_panic]
+fn test_range_backwards_2() {
+ let map = BTreeMap::from_iter((0..5).map(|i| (i, i)));
+ let _ = map.range((Included(3), Excluded(2)));
+}
+
+#[test]
+#[should_panic]
+fn test_range_backwards_3() {
+ let map = BTreeMap::from_iter((0..5).map(|i| (i, i)));
+ let _ = map.range((Excluded(3), Included(2)));
+}
+
+#[test]
+#[should_panic]
+fn test_range_backwards_4() {
+ let map = BTreeMap::from_iter((0..5).map(|i| (i, i)));
+ let _ = map.range((Excluded(3), Excluded(2)));
+}
+
+#[test]
+fn test_range_finding_ill_order_in_map() {
+ let mut map = BTreeMap::new();
+ map.insert(Cyclic3::B, ());
+ // Lacking static_assert, call `range` conditionally, to emphasise that
+ // we cause a different panic than `test_range_backwards_1` does.
+ // A more refined `should_panic` would be welcome.
+ if Cyclic3::C < Cyclic3::A {
+ let _ = map.range(Cyclic3::C..=Cyclic3::A);
+ }
+}
+
+#[test]
+fn test_range_finding_ill_order_in_range_ord() {
+ // Has proper order the first time asked, then flips around.
+ struct EvilTwin(i32);
+
+ impl PartialOrd for EvilTwin {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+ }
+
+ static COMPARES: AtomicUsize = AtomicUsize::new(0);
+ impl Ord for EvilTwin {
+ fn cmp(&self, other: &Self) -> Ordering {
+ let ord = self.0.cmp(&other.0);
+ if COMPARES.fetch_add(1, SeqCst) > 0 { ord.reverse() } else { ord }
+ }
+ }
+
+ impl PartialEq for EvilTwin {
+ fn eq(&self, other: &Self) -> bool {
+ self.0.eq(&other.0)
+ }
+ }
+
+ impl Eq for EvilTwin {}
+
+ #[derive(PartialEq, Eq, PartialOrd, Ord)]
+ struct CompositeKey(i32, EvilTwin);
+
+ impl Borrow<EvilTwin> for CompositeKey {
+ fn borrow(&self) -> &EvilTwin {
+ &self.1
+ }
+ }
+
+ let map = BTreeMap::from_iter((0..12).map(|i| (CompositeKey(i, EvilTwin(i)), ())));
+ let _ = map.range(EvilTwin(5)..=EvilTwin(7));
+}
+
+#[test]
+fn test_range_1000() {
+ // Miri is too slow
+ let size = if cfg!(miri) { MIN_INSERTS_HEIGHT_2 as u32 } else { 1000 };
+ let map = BTreeMap::from_iter((0..size).map(|i| (i, i)));
+
+ fn test(map: &BTreeMap<u32, u32>, size: u32, min: Bound<&u32>, max: Bound<&u32>) {
+ let mut kvs = map.range((min, max)).map(|(&k, &v)| (k, v));
+ let mut pairs = (0..size).map(|i| (i, i));
+
+ for (kv, pair) in kvs.by_ref().zip(pairs.by_ref()) {
+ assert_eq!(kv, pair);
+ }
+ assert_eq!(kvs.next(), None);
+ assert_eq!(pairs.next(), None);
+ }
+ test(&map, size, Included(&0), Excluded(&size));
+ test(&map, size, Unbounded, Excluded(&size));
+ test(&map, size, Included(&0), Included(&(size - 1)));
+ test(&map, size, Unbounded, Included(&(size - 1)));
+ test(&map, size, Included(&0), Unbounded);
+ test(&map, size, Unbounded, Unbounded);
+}
+
+#[test]
+fn test_range_borrowed_key() {
+ let mut map = BTreeMap::new();
+ map.insert("aardvark".to_string(), 1);
+ map.insert("baboon".to_string(), 2);
+ map.insert("coyote".to_string(), 3);
+ map.insert("dingo".to_string(), 4);
+ // NOTE: would like to use simply "b".."d" here...
+ let mut iter = map.range::<str, _>((Included("b"), Excluded("d")));
+ assert_eq!(iter.next(), Some((&"baboon".to_string(), &2)));
+ assert_eq!(iter.next(), Some((&"coyote".to_string(), &3)));
+ assert_eq!(iter.next(), None);
+}
+
+#[test]
+fn test_range() {
+ let size = 200;
+ // Miri is too slow
+ let step = if cfg!(miri) { 66 } else { 1 };
+ let map = BTreeMap::from_iter((0..size).map(|i| (i, i)));
+
+ for i in (0..size).step_by(step) {
+ for j in (i..size).step_by(step) {
+ let mut kvs = map.range((Included(&i), Included(&j))).map(|(&k, &v)| (k, v));
+ let mut pairs = (i..=j).map(|i| (i, i));
+
+ for (kv, pair) in kvs.by_ref().zip(pairs.by_ref()) {
+ assert_eq!(kv, pair);
+ }
+ assert_eq!(kvs.next(), None);
+ assert_eq!(pairs.next(), None);
+ }
+ }
+}
+
+#[test]
+fn test_range_mut() {
+ let size = 200;
+ // Miri is too slow
+ let step = if cfg!(miri) { 66 } else { 1 };
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (i, i)));
+
+ for i in (0..size).step_by(step) {
+ for j in (i..size).step_by(step) {
+ let mut kvs = map.range_mut((Included(&i), Included(&j))).map(|(&k, &mut v)| (k, v));
+ let mut pairs = (i..=j).map(|i| (i, i));
+
+ for (kv, pair) in kvs.by_ref().zip(pairs.by_ref()) {
+ assert_eq!(kv, pair);
+ }
+ assert_eq!(kvs.next(), None);
+ assert_eq!(pairs.next(), None);
+ }
+ }
+ map.check();
+}
+
+#[should_panic(expected = "range start is greater than range end in BTreeMap")]
+#[test]
+fn test_range_panic_1() {
+ let mut map = BTreeMap::new();
+ map.insert(3, "a");
+ map.insert(5, "b");
+ map.insert(8, "c");
+
+ let _invalid_range = map.range((Included(&8), Included(&3)));
+}
+
+#[should_panic(expected = "range start and end are equal and excluded in BTreeMap")]
+#[test]
+fn test_range_panic_2() {
+ let mut map = BTreeMap::new();
+ map.insert(3, "a");
+ map.insert(5, "b");
+ map.insert(8, "c");
+
+ let _invalid_range = map.range((Excluded(&5), Excluded(&5)));
+}
+
+#[should_panic(expected = "range start and end are equal and excluded in BTreeMap")]
+#[test]
+fn test_range_panic_3() {
+ let mut map: BTreeMap<i32, ()> = BTreeMap::new();
+ map.insert(3, ());
+ map.insert(5, ());
+ map.insert(8, ());
+
+ let _invalid_range = map.range((Excluded(&5), Excluded(&5)));
+}
+
+#[test]
+fn test_retain() {
+ let mut map = BTreeMap::from_iter((0..100).map(|x| (x, x * 10)));
+
+ map.retain(|&k, _| k % 2 == 0);
+ assert_eq!(map.len(), 50);
+ assert_eq!(map[&2], 20);
+ assert_eq!(map[&4], 40);
+ assert_eq!(map[&6], 60);
+}
+
+mod test_drain_filter {
+ use super::*;
+
+ #[test]
+ fn empty() {
+ let mut map: BTreeMap<i32, i32> = BTreeMap::new();
+ map.drain_filter(|_, _| unreachable!("there's nothing to decide on"));
+ assert_eq!(map.height(), None);
+ map.check();
+ }
+
+ // Explicitly consumes the iterator, where most test cases drop it instantly.
+ #[test]
+ fn consumed_keeping_all() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ assert!(map.drain_filter(|_, _| false).eq(iter::empty()));
+ map.check();
+ }
+
+ // Explicitly consumes the iterator, where most test cases drop it instantly.
+ #[test]
+ fn consumed_removing_all() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ assert!(map.drain_filter(|_, _| true).eq(pairs));
+ assert!(map.is_empty());
+ map.check();
+ }
+
+ // Explicitly consumes the iterator and modifies values through it.
+ #[test]
+ fn mutating_and_keeping() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ assert!(
+ map.drain_filter(|_, v| {
+ *v += 6;
+ false
+ })
+ .eq(iter::empty())
+ );
+ assert!(map.keys().copied().eq(0..3));
+ assert!(map.values().copied().eq(6..9));
+ map.check();
+ }
+
+ // Explicitly consumes the iterator and modifies values through it.
+ #[test]
+ fn mutating_and_removing() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ assert!(
+ map.drain_filter(|_, v| {
+ *v += 6;
+ true
+ })
+ .eq((0..3).map(|i| (i, i + 6)))
+ );
+ assert!(map.is_empty());
+ map.check();
+ }
+
+ #[test]
+ fn underfull_keeping_all() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ map.drain_filter(|_, _| false);
+ assert!(map.keys().copied().eq(0..3));
+ map.check();
+ }
+
+ #[test]
+ fn underfull_removing_one() {
+ let pairs = (0..3).map(|i| (i, i));
+ for doomed in 0..3 {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i == doomed);
+ assert_eq!(map.len(), 2);
+ map.check();
+ }
+ }
+
+ #[test]
+ fn underfull_keeping_one() {
+ let pairs = (0..3).map(|i| (i, i));
+ for sacred in 0..3 {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i != sacred);
+ assert!(map.keys().copied().eq(sacred..=sacred));
+ map.check();
+ }
+ }
+
+ #[test]
+ fn underfull_removing_all() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ map.drain_filter(|_, _| true);
+ assert!(map.is_empty());
+ map.check();
+ }
+
+ #[test]
+ fn height_0_keeping_all() {
+ let pairs = (0..node::CAPACITY).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ map.drain_filter(|_, _| false);
+ assert!(map.keys().copied().eq(0..node::CAPACITY));
+ map.check();
+ }
+
+ #[test]
+ fn height_0_removing_one() {
+ let pairs = (0..node::CAPACITY).map(|i| (i, i));
+ for doomed in 0..node::CAPACITY {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i == doomed);
+ assert_eq!(map.len(), node::CAPACITY - 1);
+ map.check();
+ }
+ }
+
+ #[test]
+ fn height_0_keeping_one() {
+ let pairs = (0..node::CAPACITY).map(|i| (i, i));
+ for sacred in 0..node::CAPACITY {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i != sacred);
+ assert!(map.keys().copied().eq(sacred..=sacred));
+ map.check();
+ }
+ }
+
+ #[test]
+ fn height_0_removing_all() {
+ let pairs = (0..node::CAPACITY).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ map.drain_filter(|_, _| true);
+ assert!(map.is_empty());
+ map.check();
+ }
+
+ #[test]
+ fn height_0_keeping_half() {
+ let mut map = BTreeMap::from_iter((0..16).map(|i| (i, i)));
+ assert_eq!(map.drain_filter(|i, _| *i % 2 == 0).count(), 8);
+ assert_eq!(map.len(), 8);
+ map.check();
+ }
+
+ #[test]
+ fn height_1_removing_all() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ map.drain_filter(|_, _| true);
+ assert!(map.is_empty());
+ map.check();
+ }
+
+ #[test]
+ fn height_1_removing_one() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+ for doomed in 0..MIN_INSERTS_HEIGHT_1 {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i == doomed);
+ assert_eq!(map.len(), MIN_INSERTS_HEIGHT_1 - 1);
+ map.check();
+ }
+ }
+
+ #[test]
+ fn height_1_keeping_one() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+ for sacred in 0..MIN_INSERTS_HEIGHT_1 {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i != sacred);
+ assert!(map.keys().copied().eq(sacred..=sacred));
+ map.check();
+ }
+ }
+
+ #[test]
+ fn height_2_removing_one() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+ for doomed in (0..MIN_INSERTS_HEIGHT_2).step_by(12) {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i == doomed);
+ assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2 - 1);
+ map.check();
+ }
+ }
+
+ #[test]
+ fn height_2_keeping_one() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+ for sacred in (0..MIN_INSERTS_HEIGHT_2).step_by(12) {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i != sacred);
+ assert!(map.keys().copied().eq(sacred..=sacred));
+ map.check();
+ }
+ }
+
+ #[test]
+ fn height_2_removing_all() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ map.drain_filter(|_, _| true);
+ assert!(map.is_empty());
+ map.check();
+ }
+
+ #[test]
+ fn drop_panic_leak() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut map = BTreeMap::new();
+ map.insert(a.spawn(Panic::Never), ());
+ map.insert(b.spawn(Panic::InDrop), ());
+ map.insert(c.spawn(Panic::Never), ());
+
+ catch_unwind(move || drop(map.drain_filter(|dummy, _| dummy.query(true)))).unwrap_err();
+
+ assert_eq!(a.queried(), 1);
+ assert_eq!(b.queried(), 1);
+ assert_eq!(c.queried(), 0);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1);
+ assert_eq!(c.dropped(), 1);
+ }
+
+ #[test]
+ fn pred_panic_leak() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut map = BTreeMap::new();
+ map.insert(a.spawn(Panic::Never), ());
+ map.insert(b.spawn(Panic::InQuery), ());
+ map.insert(c.spawn(Panic::InQuery), ());
+
+ catch_unwind(AssertUnwindSafe(|| drop(map.drain_filter(|dummy, _| dummy.query(true)))))
+ .unwrap_err();
+
+ assert_eq!(a.queried(), 1);
+ assert_eq!(b.queried(), 1);
+ assert_eq!(c.queried(), 0);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 0);
+ assert_eq!(c.dropped(), 0);
+ assert_eq!(map.len(), 2);
+ assert_eq!(map.first_entry().unwrap().key().id(), 1);
+ assert_eq!(map.last_entry().unwrap().key().id(), 2);
+ map.check();
+ }
+
+ // Same as above, but attempt to use the iterator again after the panic in the predicate
+ #[test]
+ fn pred_panic_reuse() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut map = BTreeMap::new();
+ map.insert(a.spawn(Panic::Never), ());
+ map.insert(b.spawn(Panic::InQuery), ());
+ map.insert(c.spawn(Panic::InQuery), ());
+
+ {
+ let mut it = map.drain_filter(|dummy, _| dummy.query(true));
+ catch_unwind(AssertUnwindSafe(|| while it.next().is_some() {})).unwrap_err();
+ // Iterator behaviour after a panic is explicitly unspecified,
+ // so this is just the current implementation:
+ let result = catch_unwind(AssertUnwindSafe(|| it.next()));
+ assert!(matches!(result, Ok(None)));
+ }
+
+ assert_eq!(a.queried(), 1);
+ assert_eq!(b.queried(), 1);
+ assert_eq!(c.queried(), 0);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 0);
+ assert_eq!(c.dropped(), 0);
+ assert_eq!(map.len(), 2);
+ assert_eq!(map.first_entry().unwrap().key().id(), 1);
+ assert_eq!(map.last_entry().unwrap().key().id(), 2);
+ map.check();
+ }
+}
+
+#[test]
+fn test_borrow() {
+ // make sure these compile -- using the Borrow trait
+ {
+ let mut map = BTreeMap::new();
+ map.insert("0".to_string(), 1);
+ assert_eq!(map["0"], 1);
+ }
+
+ {
+ let mut map = BTreeMap::new();
+ map.insert(Box::new(0), 1);
+ assert_eq!(map[&0], 1);
+ }
+
+ {
+ let mut map = BTreeMap::new();
+ map.insert(Box::new([0, 1]) as Box<[i32]>, 1);
+ assert_eq!(map[&[0, 1][..]], 1);
+ }
+
+ {
+ let mut map = BTreeMap::new();
+ map.insert(Rc::new(0), 1);
+ assert_eq!(map[&0], 1);
+ }
+
+ #[allow(dead_code)]
+ fn get<T: Ord>(v: &BTreeMap<Box<T>, ()>, t: &T) {
+ let _ = v.get(t);
+ }
+
+ #[allow(dead_code)]
+ fn get_mut<T: Ord>(v: &mut BTreeMap<Box<T>, ()>, t: &T) {
+ let _ = v.get_mut(t);
+ }
+
+ #[allow(dead_code)]
+ fn get_key_value<T: Ord>(v: &BTreeMap<Box<T>, ()>, t: &T) {
+ let _ = v.get_key_value(t);
+ }
+
+ #[allow(dead_code)]
+ fn contains_key<T: Ord>(v: &BTreeMap<Box<T>, ()>, t: &T) {
+ let _ = v.contains_key(t);
+ }
+
+ #[allow(dead_code)]
+ fn range<T: Ord>(v: &BTreeMap<Box<T>, ()>, t: T) {
+ let _ = v.range(t..);
+ }
+
+ #[allow(dead_code)]
+ fn range_mut<T: Ord>(v: &mut BTreeMap<Box<T>, ()>, t: T) {
+ let _ = v.range_mut(t..);
+ }
+
+ #[allow(dead_code)]
+ fn remove<T: Ord>(v: &mut BTreeMap<Box<T>, ()>, t: &T) {
+ v.remove(t);
+ }
+
+ #[allow(dead_code)]
+ fn remove_entry<T: Ord>(v: &mut BTreeMap<Box<T>, ()>, t: &T) {
+ v.remove_entry(t);
+ }
+
+ #[allow(dead_code)]
+ fn split_off<T: Ord>(v: &mut BTreeMap<Box<T>, ()>, t: &T) {
+ v.split_off(t);
+ }
+}
+
+#[test]
+fn test_entry() {
+ let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
+
+ let mut map = BTreeMap::from(xs);
+
+ // Existing key (insert)
+ match map.entry(1) {
+ Vacant(_) => unreachable!(),
+ Occupied(mut view) => {
+ assert_eq!(view.get(), &10);
+ assert_eq!(view.insert(100), 10);
+ }
+ }
+ assert_eq!(map.get(&1).unwrap(), &100);
+ assert_eq!(map.len(), 6);
+
+ // Existing key (update)
+ match map.entry(2) {
+ Vacant(_) => unreachable!(),
+ Occupied(mut view) => {
+ let v = view.get_mut();
+ *v *= 10;
+ }
+ }
+ assert_eq!(map.get(&2).unwrap(), &200);
+ assert_eq!(map.len(), 6);
+ map.check();
+
+ // Existing key (take)
+ match map.entry(3) {
+ Vacant(_) => unreachable!(),
+ Occupied(view) => {
+ assert_eq!(view.remove(), 30);
+ }
+ }
+ assert_eq!(map.get(&3), None);
+ assert_eq!(map.len(), 5);
+ map.check();
+
+ // Inexistent key (insert)
+ match map.entry(10) {
+ Occupied(_) => unreachable!(),
+ Vacant(view) => {
+ assert_eq!(*view.insert(1000), 1000);
+ }
+ }
+ assert_eq!(map.get(&10).unwrap(), &1000);
+ assert_eq!(map.len(), 6);
+ map.check();
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut a = BTreeMap::new();
+ a.insert(1, "one");
+ let mut b = BTreeMap::new();
+ b.insert(2, "two");
+ b.insert(3, "three");
+
+ a.extend(&b);
+
+ assert_eq!(a.len(), 3);
+ assert_eq!(a[&1], "one");
+ assert_eq!(a[&2], "two");
+ assert_eq!(a[&3], "three");
+ a.check();
+}
+
+#[test]
+fn test_zst() {
+ let mut m = BTreeMap::new();
+ assert_eq!(m.len(), 0);
+
+ assert_eq!(m.insert((), ()), None);
+ assert_eq!(m.len(), 1);
+
+ assert_eq!(m.insert((), ()), Some(()));
+ assert_eq!(m.len(), 1);
+ assert_eq!(m.iter().count(), 1);
+
+ m.clear();
+ assert_eq!(m.len(), 0);
+
+ for _ in 0..100 {
+ m.insert((), ());
+ }
+
+ assert_eq!(m.len(), 1);
+ assert_eq!(m.iter().count(), 1);
+ m.check();
+}
+
+// This test's only purpose is to ensure that zero-sized keys with nonsensical orderings
+// do not cause segfaults when used with zero-sized values. All other map behavior is
+// undefined.
+#[test]
+fn test_bad_zst() {
+ #[derive(Clone, Copy, Debug)]
+ struct Bad;
+
+ impl PartialEq for Bad {
+ fn eq(&self, _: &Self) -> bool {
+ false
+ }
+ }
+
+ impl Eq for Bad {}
+
+ impl PartialOrd for Bad {
+ fn partial_cmp(&self, _: &Self) -> Option<Ordering> {
+ Some(Ordering::Less)
+ }
+ }
+
+ impl Ord for Bad {
+ fn cmp(&self, _: &Self) -> Ordering {
+ Ordering::Less
+ }
+ }
+
+ let mut m = BTreeMap::new();
+
+ for _ in 0..100 {
+ m.insert(Bad, Bad);
+ }
+ m.check();
+}
+
+#[test]
+fn test_clear() {
+ let mut map = BTreeMap::new();
+ for &len in &[MIN_INSERTS_HEIGHT_1, MIN_INSERTS_HEIGHT_2, 0, node::CAPACITY] {
+ for i in 0..len {
+ map.insert(i, ());
+ }
+ assert_eq!(map.len(), len);
+ map.clear();
+ map.check();
+ assert_eq!(map.height(), None);
+ }
+}
+
+#[test]
+fn test_clear_drop_panic_leak() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+
+ let mut map = BTreeMap::new();
+ map.insert(a.spawn(Panic::Never), ());
+ map.insert(b.spawn(Panic::InDrop), ());
+ map.insert(c.spawn(Panic::Never), ());
+
+ catch_unwind(AssertUnwindSafe(|| map.clear())).unwrap_err();
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1);
+ assert_eq!(c.dropped(), 1);
+ assert_eq!(map.len(), 0);
+
+ drop(map);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1);
+ assert_eq!(c.dropped(), 1);
+}
+
+#[test]
+fn test_clone() {
+ let mut map = BTreeMap::new();
+ let size = MIN_INSERTS_HEIGHT_1;
+ assert_eq!(map.len(), 0);
+
+ for i in 0..size {
+ assert_eq!(map.insert(i, 10 * i), None);
+ assert_eq!(map.len(), i + 1);
+ map.check();
+ assert_eq!(map, map.clone());
+ }
+
+ for i in 0..size {
+ assert_eq!(map.insert(i, 100 * i), Some(10 * i));
+ assert_eq!(map.len(), size);
+ map.check();
+ assert_eq!(map, map.clone());
+ }
+
+ for i in 0..size / 2 {
+ assert_eq!(map.remove(&(i * 2)), Some(i * 200));
+ assert_eq!(map.len(), size - i - 1);
+ map.check();
+ assert_eq!(map, map.clone());
+ }
+
+ for i in 0..size / 2 {
+ assert_eq!(map.remove(&(2 * i)), None);
+ assert_eq!(map.remove(&(2 * i + 1)), Some(i * 200 + 100));
+ assert_eq!(map.len(), size / 2 - i - 1);
+ map.check();
+ assert_eq!(map, map.clone());
+ }
+
+ // Test a tree with 2 semi-full levels and a tree with 3 levels.
+ map = BTreeMap::from_iter((1..MIN_INSERTS_HEIGHT_2).map(|i| (i, i)));
+ assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2 - 1);
+ assert_eq!(map, map.clone());
+ map.insert(0, 0);
+ assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2);
+ assert_eq!(map, map.clone());
+ map.check();
+}
+
+fn test_clone_panic_leak(size: usize) {
+ for i in 0..size {
+ let dummies = Vec::from_iter((0..size).map(|id| CrashTestDummy::new(id)));
+ let map = BTreeMap::from_iter(dummies.iter().map(|dummy| {
+ let panic = if dummy.id == i { Panic::InClone } else { Panic::Never };
+ (dummy.spawn(panic), ())
+ }));
+
+ catch_unwind(|| map.clone()).unwrap_err();
+ for d in &dummies {
+ assert_eq!(d.cloned(), if d.id <= i { 1 } else { 0 }, "id={}/{}", d.id, i);
+ assert_eq!(d.dropped(), if d.id < i { 1 } else { 0 }, "id={}/{}", d.id, i);
+ }
+ assert_eq!(map.len(), size);
+
+ drop(map);
+ for d in &dummies {
+ assert_eq!(d.cloned(), if d.id <= i { 1 } else { 0 }, "id={}/{}", d.id, i);
+ assert_eq!(d.dropped(), if d.id < i { 2 } else { 1 }, "id={}/{}", d.id, i);
+ }
+ }
+}
+
+#[test]
+fn test_clone_panic_leak_height_0() {
+ test_clone_panic_leak(3)
+}
+
+#[test]
+fn test_clone_panic_leak_height_1() {
+ test_clone_panic_leak(MIN_INSERTS_HEIGHT_1)
+}
+
+#[test]
+fn test_clone_from() {
+ let mut map1 = BTreeMap::new();
+ let max_size = MIN_INSERTS_HEIGHT_1;
+
+ // Range to max_size inclusive, because i is the size of map1 being tested.
+ for i in 0..=max_size {
+ let mut map2 = BTreeMap::new();
+ for j in 0..i {
+ let mut map1_copy = map2.clone();
+ map1_copy.clone_from(&map1); // small cloned from large
+ assert_eq!(map1_copy, map1);
+ let mut map2_copy = map1.clone();
+ map2_copy.clone_from(&map2); // large cloned from small
+ assert_eq!(map2_copy, map2);
+ map2.insert(100 * j + 1, 2 * j + 1);
+ }
+ map2.clone_from(&map1); // same length
+ map2.check();
+ assert_eq!(map2, map1);
+ map1.insert(i, 10 * i);
+ map1.check();
+ }
+}
+
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn map_key<'new>(v: BTreeMap<&'static str, ()>) -> BTreeMap<&'new str, ()> {
+ v
+ }
+ fn map_val<'new>(v: BTreeMap<(), &'static str>) -> BTreeMap<(), &'new str> {
+ v
+ }
+
+ fn iter_key<'a, 'new>(v: Iter<'a, &'static str, ()>) -> Iter<'a, &'new str, ()> {
+ v
+ }
+ fn iter_val<'a, 'new>(v: Iter<'a, (), &'static str>) -> Iter<'a, (), &'new str> {
+ v
+ }
+
+ fn into_iter_key<'new>(v: IntoIter<&'static str, ()>) -> IntoIter<&'new str, ()> {
+ v
+ }
+ fn into_iter_val<'new>(v: IntoIter<(), &'static str>) -> IntoIter<(), &'new str> {
+ v
+ }
+
+ fn into_keys_key<'new>(v: IntoKeys<&'static str, ()>) -> IntoKeys<&'new str, ()> {
+ v
+ }
+ fn into_keys_val<'new>(v: IntoKeys<(), &'static str>) -> IntoKeys<(), &'new str> {
+ v
+ }
+
+ fn into_values_key<'new>(v: IntoValues<&'static str, ()>) -> IntoValues<&'new str, ()> {
+ v
+ }
+ fn into_values_val<'new>(v: IntoValues<(), &'static str>) -> IntoValues<(), &'new str> {
+ v
+ }
+
+ fn range_key<'a, 'new>(v: Range<'a, &'static str, ()>) -> Range<'a, &'new str, ()> {
+ v
+ }
+ fn range_val<'a, 'new>(v: Range<'a, (), &'static str>) -> Range<'a, (), &'new str> {
+ v
+ }
+
+ fn keys_key<'a, 'new>(v: Keys<'a, &'static str, ()>) -> Keys<'a, &'new str, ()> {
+ v
+ }
+ fn keys_val<'a, 'new>(v: Keys<'a, (), &'static str>) -> Keys<'a, (), &'new str> {
+ v
+ }
+
+ fn values_key<'a, 'new>(v: Values<'a, &'static str, ()>) -> Values<'a, &'new str, ()> {
+ v
+ }
+ fn values_val<'a, 'new>(v: Values<'a, (), &'static str>) -> Values<'a, (), &'new str> {
+ v
+ }
+}
+
+#[allow(dead_code)]
+fn assert_sync() {
+ fn map<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
+ v
+ }
+
+ fn into_iter<T: Sync>(v: BTreeMap<T, T>) -> impl Sync {
+ v.into_iter()
+ }
+
+ fn into_keys<T: Sync + Ord>(v: BTreeMap<T, T>) -> impl Sync {
+ v.into_keys()
+ }
+
+ fn into_values<T: Sync + Ord>(v: BTreeMap<T, T>) -> impl Sync {
+ v.into_values()
+ }
+
+ fn drain_filter<T: Sync + Ord>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ v.drain_filter(|_, _| false)
+ }
+
+ fn iter<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
+ v.iter()
+ }
+
+ fn iter_mut<T: Sync>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ v.iter_mut()
+ }
+
+ fn keys<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
+ v.keys()
+ }
+
+ fn values<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
+ v.values()
+ }
+
+ fn values_mut<T: Sync>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ v.values_mut()
+ }
+
+ fn range<T: Sync + Ord>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
+ v.range(..)
+ }
+
+ fn range_mut<T: Sync + Ord>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ v.range_mut(..)
+ }
+
+ fn entry<T: Sync + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ v.entry(Default::default())
+ }
+
+ fn occupied_entry<T: Sync + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ match v.entry(Default::default()) {
+ Occupied(entry) => entry,
+ _ => unreachable!(),
+ }
+ }
+
+ fn vacant_entry<T: Sync + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ match v.entry(Default::default()) {
+ Vacant(entry) => entry,
+ _ => unreachable!(),
+ }
+ }
+}
+
+#[allow(dead_code)]
+fn assert_send() {
+ fn map<T: Send>(v: BTreeMap<T, T>) -> impl Send {
+ v
+ }
+
+ fn into_iter<T: Send>(v: BTreeMap<T, T>) -> impl Send {
+ v.into_iter()
+ }
+
+ fn into_keys<T: Send + Ord>(v: BTreeMap<T, T>) -> impl Send {
+ v.into_keys()
+ }
+
+ fn into_values<T: Send + Ord>(v: BTreeMap<T, T>) -> impl Send {
+ v.into_values()
+ }
+
+ fn drain_filter<T: Send + Ord>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ v.drain_filter(|_, _| false)
+ }
+
+ fn iter<T: Send + Sync>(v: &BTreeMap<T, T>) -> impl Send + '_ {
+ v.iter()
+ }
+
+ fn iter_mut<T: Send>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ v.iter_mut()
+ }
+
+ fn keys<T: Send + Sync>(v: &BTreeMap<T, T>) -> impl Send + '_ {
+ v.keys()
+ }
+
+ fn values<T: Send + Sync>(v: &BTreeMap<T, T>) -> impl Send + '_ {
+ v.values()
+ }
+
+ fn values_mut<T: Send>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ v.values_mut()
+ }
+
+ fn range<T: Send + Sync + Ord>(v: &BTreeMap<T, T>) -> impl Send + '_ {
+ v.range(..)
+ }
+
+ fn range_mut<T: Send + Ord>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ v.range_mut(..)
+ }
+
+ fn entry<T: Send + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ v.entry(Default::default())
+ }
+
+ fn occupied_entry<T: Send + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ match v.entry(Default::default()) {
+ Occupied(entry) => entry,
+ _ => unreachable!(),
+ }
+ }
+
+ fn vacant_entry<T: Send + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ match v.entry(Default::default()) {
+ Vacant(entry) => entry,
+ _ => unreachable!(),
+ }
+ }
+}
+
+#[test]
+fn test_ord_absence() {
+ fn map<K>(mut map: BTreeMap<K, ()>) {
+ let _ = map.is_empty();
+ let _ = map.len();
+ map.clear();
+ let _ = map.iter();
+ let _ = map.iter_mut();
+ let _ = map.keys();
+ let _ = map.values();
+ let _ = map.values_mut();
+ if true {
+ let _ = map.into_values();
+ } else if true {
+ let _ = map.into_iter();
+ } else {
+ let _ = map.into_keys();
+ }
+ }
+
+ fn map_debug<K: Debug>(mut map: BTreeMap<K, ()>) {
+ format!("{map:?}");
+ format!("{:?}", map.iter());
+ format!("{:?}", map.iter_mut());
+ format!("{:?}", map.keys());
+ format!("{:?}", map.values());
+ format!("{:?}", map.values_mut());
+ if true {
+ format!("{:?}", map.into_iter());
+ } else if true {
+ format!("{:?}", map.into_keys());
+ } else {
+ format!("{:?}", map.into_values());
+ }
+ }
+
+ fn map_clone<K: Clone>(mut map: BTreeMap<K, ()>) {
+ map.clone_from(&map.clone());
+ }
+
+ #[derive(Debug, Clone)]
+ struct NonOrd;
+ map(BTreeMap::<NonOrd, _>::new());
+ map_debug(BTreeMap::<NonOrd, _>::new());
+ map_clone(BTreeMap::<NonOrd, _>::default());
+}
+
+#[test]
+fn test_occupied_entry_key() {
+ let mut a = BTreeMap::new();
+ let key = "hello there";
+ let value = "value goes here";
+ assert_eq!(a.height(), None);
+ a.insert(key, value);
+ assert_eq!(a.len(), 1);
+ assert_eq!(a[key], value);
+
+ match a.entry(key) {
+ Vacant(_) => panic!(),
+ Occupied(e) => assert_eq!(key, *e.key()),
+ }
+ assert_eq!(a.len(), 1);
+ assert_eq!(a[key], value);
+ a.check();
+}
+
+#[test]
+fn test_vacant_entry_key() {
+ let mut a = BTreeMap::new();
+ let key = "hello there";
+ let value = "value goes here";
+
+ assert_eq!(a.height(), None);
+ match a.entry(key) {
+ Occupied(_) => unreachable!(),
+ Vacant(e) => {
+ assert_eq!(key, *e.key());
+ e.insert(value);
+ }
+ }
+ assert_eq!(a.len(), 1);
+ assert_eq!(a[key], value);
+ a.check();
+}
+
+#[test]
+fn test_vacant_entry_no_insert() {
+ let mut a = BTreeMap::<&str, ()>::new();
+ let key = "hello there";
+
+ // Non-allocated
+ assert_eq!(a.height(), None);
+ match a.entry(key) {
+ Occupied(_) => unreachable!(),
+ Vacant(e) => assert_eq!(key, *e.key()),
+ }
+ // Ensures the tree has no root.
+ assert_eq!(a.height(), None);
+ a.check();
+
+ // Allocated but still empty
+ a.insert(key, ());
+ a.remove(&key);
+ assert_eq!(a.height(), Some(0));
+ assert!(a.is_empty());
+ match a.entry(key) {
+ Occupied(_) => unreachable!(),
+ Vacant(e) => assert_eq!(key, *e.key()),
+ }
+ // Ensures the allocated root is not changed.
+ assert_eq!(a.height(), Some(0));
+ assert!(a.is_empty());
+ a.check();
+}
+
+#[test]
+fn test_first_last_entry() {
+ let mut a = BTreeMap::new();
+ assert!(a.first_entry().is_none());
+ assert!(a.last_entry().is_none());
+ a.insert(1, 42);
+ assert_eq!(a.first_entry().unwrap().key(), &1);
+ assert_eq!(a.last_entry().unwrap().key(), &1);
+ a.insert(2, 24);
+ assert_eq!(a.first_entry().unwrap().key(), &1);
+ assert_eq!(a.last_entry().unwrap().key(), &2);
+ a.insert(0, 6);
+ assert_eq!(a.first_entry().unwrap().key(), &0);
+ assert_eq!(a.last_entry().unwrap().key(), &2);
+ let (k1, v1) = a.first_entry().unwrap().remove_entry();
+ assert_eq!(k1, 0);
+ assert_eq!(v1, 6);
+ let (k2, v2) = a.last_entry().unwrap().remove_entry();
+ assert_eq!(k2, 2);
+ assert_eq!(v2, 24);
+ assert_eq!(a.first_entry().unwrap().key(), &1);
+ assert_eq!(a.last_entry().unwrap().key(), &1);
+ a.check();
+}
+
+#[test]
+fn test_pop_first_last() {
+ let mut map = BTreeMap::new();
+ assert_eq!(map.pop_first(), None);
+ assert_eq!(map.pop_last(), None);
+
+ map.insert(1, 10);
+ map.insert(2, 20);
+ map.insert(3, 30);
+ map.insert(4, 40);
+
+ assert_eq!(map.len(), 4);
+
+ let (key, val) = map.pop_first().unwrap();
+ assert_eq!(key, 1);
+ assert_eq!(val, 10);
+ assert_eq!(map.len(), 3);
+
+ let (key, val) = map.pop_first().unwrap();
+ assert_eq!(key, 2);
+ assert_eq!(val, 20);
+ assert_eq!(map.len(), 2);
+ let (key, val) = map.pop_last().unwrap();
+ assert_eq!(key, 4);
+ assert_eq!(val, 40);
+ assert_eq!(map.len(), 1);
+
+ map.insert(5, 50);
+ map.insert(6, 60);
+ assert_eq!(map.len(), 3);
+
+ let (key, val) = map.pop_first().unwrap();
+ assert_eq!(key, 3);
+ assert_eq!(val, 30);
+ assert_eq!(map.len(), 2);
+
+ let (key, val) = map.pop_last().unwrap();
+ assert_eq!(key, 6);
+ assert_eq!(val, 60);
+ assert_eq!(map.len(), 1);
+
+ let (key, val) = map.pop_last().unwrap();
+ assert_eq!(key, 5);
+ assert_eq!(val, 50);
+ assert_eq!(map.len(), 0);
+
+ assert_eq!(map.pop_first(), None);
+ assert_eq!(map.pop_last(), None);
+
+ map.insert(7, 70);
+ map.insert(8, 80);
+
+ let (key, val) = map.pop_last().unwrap();
+ assert_eq!(key, 8);
+ assert_eq!(val, 80);
+ assert_eq!(map.len(), 1);
+
+ let (key, val) = map.pop_last().unwrap();
+ assert_eq!(key, 7);
+ assert_eq!(val, 70);
+ assert_eq!(map.len(), 0);
+
+ assert_eq!(map.pop_first(), None);
+ assert_eq!(map.pop_last(), None);
+}
+
+#[test]
+fn test_get_key_value() {
+ let mut map = BTreeMap::new();
+
+ assert!(map.is_empty());
+ assert_eq!(map.get_key_value(&1), None);
+ assert_eq!(map.get_key_value(&2), None);
+
+ map.insert(1, 10);
+ map.insert(2, 20);
+ map.insert(3, 30);
+
+ assert_eq!(map.len(), 3);
+ assert_eq!(map.get_key_value(&1), Some((&1, &10)));
+ assert_eq!(map.get_key_value(&3), Some((&3, &30)));
+ assert_eq!(map.get_key_value(&4), None);
+
+ map.remove(&3);
+
+ assert_eq!(map.len(), 2);
+ assert_eq!(map.get_key_value(&3), None);
+ assert_eq!(map.get_key_value(&2), Some((&2, &20)));
+}
+
+#[test]
+fn test_insert_into_full_height_0() {
+ let size = node::CAPACITY;
+ for pos in 0..=size {
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (i * 2 + 1, ())));
+ assert!(map.insert(pos * 2, ()).is_none());
+ map.check();
+ }
+}
+
+#[test]
+fn test_insert_into_full_height_1() {
+ let size = node::CAPACITY + 1 + node::CAPACITY;
+ for pos in 0..=size {
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (i * 2 + 1, ())));
+ map.compact();
+ let root_node = map.root.as_ref().unwrap().reborrow();
+ assert_eq!(root_node.len(), 1);
+ assert_eq!(root_node.first_leaf_edge().into_node().len(), node::CAPACITY);
+ assert_eq!(root_node.last_leaf_edge().into_node().len(), node::CAPACITY);
+
+ assert!(map.insert(pos * 2, ()).is_none());
+ map.check();
+ }
+}
+
+#[test]
+fn test_try_insert() {
+ let mut map = BTreeMap::new();
+
+ assert!(map.is_empty());
+
+ assert_eq!(map.try_insert(1, 10).unwrap(), &10);
+ assert_eq!(map.try_insert(2, 20).unwrap(), &20);
+
+ let err = map.try_insert(2, 200).unwrap_err();
+ assert_eq!(err.entry.key(), &2);
+ assert_eq!(err.entry.get(), &20);
+ assert_eq!(err.value, 200);
+}
+
+macro_rules! create_append_test {
+ ($name:ident, $len:expr) => {
+ #[test]
+ fn $name() {
+ let mut a = BTreeMap::new();
+ for i in 0..8 {
+ a.insert(i, i);
+ }
+
+ let mut b = BTreeMap::new();
+ for i in 5..$len {
+ b.insert(i, 2 * i);
+ }
+
+ a.append(&mut b);
+
+ assert_eq!(a.len(), $len);
+ assert_eq!(b.len(), 0);
+
+ for i in 0..$len {
+ if i < 5 {
+ assert_eq!(a[&i], i);
+ } else {
+ assert_eq!(a[&i], 2 * i);
+ }
+ }
+
+ a.check();
+ assert_eq!(a.remove(&($len - 1)), Some(2 * ($len - 1)));
+ assert_eq!(a.insert($len - 1, 20), None);
+ a.check();
+ }
+ };
+}
+
+// These are mostly for testing the algorithm that "fixes" the right edge after insertion.
+// Single node.
+create_append_test!(test_append_9, 9);
+// Two leafs that don't need fixing.
+create_append_test!(test_append_17, 17);
+// Two leafs where the second one ends up underfull and needs stealing at the end.
+create_append_test!(test_append_14, 14);
+// Two leafs where the second one ends up empty because the insertion finished at the root.
+create_append_test!(test_append_12, 12);
+// Three levels; insertion finished at the root.
+create_append_test!(test_append_144, 144);
+// Three levels; insertion finished at leaf while there is an empty node on the second level.
+create_append_test!(test_append_145, 145);
+// Tests for several randomly chosen sizes.
+create_append_test!(test_append_170, 170);
+create_append_test!(test_append_181, 181);
+#[cfg(not(miri))] // Miri is too slow
+create_append_test!(test_append_239, 239);
+#[cfg(not(miri))] // Miri is too slow
+create_append_test!(test_append_1700, 1700);
+
+#[test]
+fn test_append_drop_leak() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut left = BTreeMap::new();
+ let mut right = BTreeMap::new();
+ left.insert(a.spawn(Panic::Never), ());
+ left.insert(b.spawn(Panic::InDrop), ()); // first duplicate key, dropped during append
+ left.insert(c.spawn(Panic::Never), ());
+ right.insert(b.spawn(Panic::Never), ());
+ right.insert(c.spawn(Panic::Never), ());
+
+ catch_unwind(move || left.append(&mut right)).unwrap_err();
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1); // should be 2 were it not for Rust issue #47949
+ assert_eq!(c.dropped(), 2);
+}
+
+#[test]
+fn test_append_ord_chaos() {
+ let mut map1 = BTreeMap::new();
+ map1.insert(Cyclic3::A, ());
+ map1.insert(Cyclic3::B, ());
+ let mut map2 = BTreeMap::new();
+ map2.insert(Cyclic3::A, ());
+ map2.insert(Cyclic3::B, ());
+ map2.insert(Cyclic3::C, ()); // lands first, before A
+ map2.insert(Cyclic3::B, ()); // lands first, before C
+ map1.check();
+ map2.check(); // keys are not unique but still strictly ascending
+ assert_eq!(map1.len(), 2);
+ assert_eq!(map2.len(), 4);
+ map1.append(&mut map2);
+ assert_eq!(map1.len(), 5);
+ assert_eq!(map2.len(), 0);
+ map1.check();
+ map2.check();
+}
+
+fn rand_data(len: usize) -> Vec<(u32, u32)> {
+ let mut rng = DeterministicRng::new();
+ Vec::from_iter((0..len).map(|_| (rng.next(), rng.next())))
+}
+
+#[test]
+fn test_split_off_empty_right() {
+ let mut data = rand_data(173);
+
+ let mut map = BTreeMap::from_iter(data.clone());
+ let right = map.split_off(&(data.iter().max().unwrap().0 + 1));
+ map.check();
+ right.check();
+
+ data.sort();
+ assert!(map.into_iter().eq(data));
+ assert!(right.into_iter().eq(None));
+}
+
+#[test]
+fn test_split_off_empty_left() {
+ let mut data = rand_data(314);
+
+ let mut map = BTreeMap::from_iter(data.clone());
+ let right = map.split_off(&data.iter().min().unwrap().0);
+ map.check();
+ right.check();
+
+ data.sort();
+ assert!(map.into_iter().eq(None));
+ assert!(right.into_iter().eq(data));
+}
+
+// In a tree with 3 levels, if all but a part of the first leaf node is split off,
+// make sure fix_top eliminates both top levels.
+#[test]
+fn test_split_off_tiny_left_height_2() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+ let mut left = BTreeMap::from_iter(pairs.clone());
+ let right = left.split_off(&1);
+ left.check();
+ right.check();
+ assert_eq!(left.len(), 1);
+ assert_eq!(right.len(), MIN_INSERTS_HEIGHT_2 - 1);
+ assert_eq!(*left.first_key_value().unwrap().0, 0);
+ assert_eq!(*right.first_key_value().unwrap().0, 1);
+}
+
+// In a tree with 3 levels, if only part of the last leaf node is split off,
+// make sure fix_top eliminates both top levels.
+#[test]
+fn test_split_off_tiny_right_height_2() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+ let last = MIN_INSERTS_HEIGHT_2 - 1;
+ let mut left = BTreeMap::from_iter(pairs.clone());
+ assert_eq!(*left.last_key_value().unwrap().0, last);
+ let right = left.split_off(&last);
+ left.check();
+ right.check();
+ assert_eq!(left.len(), MIN_INSERTS_HEIGHT_2 - 1);
+ assert_eq!(right.len(), 1);
+ assert_eq!(*left.last_key_value().unwrap().0, last - 1);
+ assert_eq!(*right.last_key_value().unwrap().0, last);
+}
+
+#[test]
+fn test_split_off_halfway() {
+ let mut rng = DeterministicRng::new();
+ for &len in &[node::CAPACITY, 25, 50, 75, 100] {
+ let mut data = Vec::from_iter((0..len).map(|_| (rng.next(), ())));
+ // Insertion in non-ascending order creates some variation in node length.
+ let mut map = BTreeMap::from_iter(data.iter().copied());
+ data.sort();
+ let small_keys = data.iter().take(len / 2).map(|kv| kv.0);
+ let large_keys = data.iter().skip(len / 2).map(|kv| kv.0);
+ let split_key = large_keys.clone().next().unwrap();
+ let right = map.split_off(&split_key);
+ map.check();
+ right.check();
+ assert!(map.keys().copied().eq(small_keys));
+ assert!(right.keys().copied().eq(large_keys));
+ }
+}
+
+#[test]
+fn test_split_off_large_random_sorted() {
+ // Miri is too slow
+ let mut data = if cfg!(miri) { rand_data(529) } else { rand_data(1529) };
+ // special case with maximum height.
+ data.sort();
+
+ let mut map = BTreeMap::from_iter(data.clone());
+ let key = data[data.len() / 2].0;
+ let right = map.split_off(&key);
+ map.check();
+ right.check();
+
+ assert!(map.into_iter().eq(data.clone().into_iter().filter(|x| x.0 < key)));
+ assert!(right.into_iter().eq(data.into_iter().filter(|x| x.0 >= key)));
+}
+
+#[test]
+fn test_into_iter_drop_leak_height_0() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let d = CrashTestDummy::new(3);
+ let e = CrashTestDummy::new(4);
+ let mut map = BTreeMap::new();
+ map.insert("a", a.spawn(Panic::Never));
+ map.insert("b", b.spawn(Panic::Never));
+ map.insert("c", c.spawn(Panic::Never));
+ map.insert("d", d.spawn(Panic::InDrop));
+ map.insert("e", e.spawn(Panic::Never));
+
+ catch_unwind(move || drop(map.into_iter())).unwrap_err();
+
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1);
+ assert_eq!(c.dropped(), 1);
+ assert_eq!(d.dropped(), 1);
+ assert_eq!(e.dropped(), 1);
+}
+
+#[test]
+fn test_into_iter_drop_leak_height_1() {
+ let size = MIN_INSERTS_HEIGHT_1;
+ for panic_point in vec![0, 1, size - 2, size - 1] {
+ let dummies = Vec::from_iter((0..size).map(|i| CrashTestDummy::new(i)));
+ let map = BTreeMap::from_iter((0..size).map(|i| {
+ let panic = if i == panic_point { Panic::InDrop } else { Panic::Never };
+ (dummies[i].spawn(Panic::Never), dummies[i].spawn(panic))
+ }));
+ catch_unwind(move || drop(map.into_iter())).unwrap_err();
+ for i in 0..size {
+ assert_eq!(dummies[i].dropped(), 2);
+ }
+ }
+}
+
+#[test]
+fn test_into_keys() {
+ let map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let keys = Vec::from_iter(map.into_keys());
+
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+}
+
+#[test]
+fn test_into_values() {
+ let map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let values = Vec::from_iter(map.into_values());
+
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+}
+
+#[test]
+fn test_insert_remove_intertwined() {
+ let loops = if cfg!(miri) { 100 } else { 1_000_000 };
+ let mut map = BTreeMap::new();
+ let mut i = 1;
+ let offset = 165; // somewhat arbitrarily chosen to cover some code paths
+ for _ in 0..loops {
+ i = (i + offset) & 0xFF;
+ map.insert(i, i);
+ map.remove(&(0xFF - i));
+ }
+ map.check();
+}
+
+#[test]
+fn test_insert_remove_intertwined_ord_chaos() {
+ let loops = if cfg!(miri) { 100 } else { 1_000_000 };
+ let gov = Governor::new();
+ let mut map = BTreeMap::new();
+ let mut i = 1;
+ let offset = 165; // more arbitrarily copied from above
+ for _ in 0..loops {
+ i = (i + offset) & 0xFF;
+ map.insert(Governed(i, &gov), ());
+ map.remove(&Governed(0xFF - i, &gov));
+ gov.flip();
+ }
+ map.check_invariants();
+}
+
+#[test]
+fn from_array() {
+ let map = BTreeMap::from([(1, 2), (3, 4)]);
+ let unordered_duplicates = BTreeMap::from([(3, 4), (1, 2), (1, 2)]);
+ assert_eq!(map, unordered_duplicates);
+}
diff --git a/library/alloc/src/collections/btree/mem.rs b/library/alloc/src/collections/btree/mem.rs
new file mode 100644
index 000000000..e1363d1ae
--- /dev/null
+++ b/library/alloc/src/collections/btree/mem.rs
@@ -0,0 +1,35 @@
+use core::intrinsics;
+use core::mem;
+use core::ptr;
+
+/// This replaces the value behind the `v` unique reference by calling the
+/// relevant function.
+///
+/// If a panic occurs in the `change` closure, the entire process will be aborted.
+#[allow(dead_code)] // keep as illustration and for future use
+#[inline]
+pub fn take_mut<T>(v: &mut T, change: impl FnOnce(T) -> T) {
+ replace(v, |value| (change(value), ()))
+}
+
+/// This replaces the value behind the `v` unique reference by calling the
+/// relevant function, and returns a result obtained along the way.
+///
+/// If a panic occurs in the `change` closure, the entire process will be aborted.
+#[inline]
+pub fn replace<T, R>(v: &mut T, change: impl FnOnce(T) -> (T, R)) -> R {
+ struct PanicGuard;
+ impl Drop for PanicGuard {
+ fn drop(&mut self) {
+ intrinsics::abort()
+ }
+ }
+ let guard = PanicGuard;
+ let value = unsafe { ptr::read(v) };
+ let (new_value, ret) = change(value);
+ unsafe {
+ ptr::write(v, new_value);
+ }
+ mem::forget(guard);
+ ret
+}
diff --git a/library/alloc/src/collections/btree/merge_iter.rs b/library/alloc/src/collections/btree/merge_iter.rs
new file mode 100644
index 000000000..7f23d93b9
--- /dev/null
+++ b/library/alloc/src/collections/btree/merge_iter.rs
@@ -0,0 +1,98 @@
+use core::cmp::Ordering;
+use core::fmt::{self, Debug};
+use core::iter::FusedIterator;
+
+/// Core of an iterator that merges the output of two strictly ascending iterators,
+/// for instance a union or a symmetric difference.
+pub struct MergeIterInner<I: Iterator> {
+ a: I,
+ b: I,
+ peeked: Option<Peeked<I>>,
+}
+
+/// Benchmarks faster than wrapping both iterators in a Peekable,
+/// probably because we can afford to impose a FusedIterator bound.
+#[derive(Clone, Debug)]
+enum Peeked<I: Iterator> {
+ A(I::Item),
+ B(I::Item),
+}
+
+impl<I: Iterator> Clone for MergeIterInner<I>
+where
+ I: Clone,
+ I::Item: Clone,
+{
+ fn clone(&self) -> Self {
+ Self { a: self.a.clone(), b: self.b.clone(), peeked: self.peeked.clone() }
+ }
+}
+
+impl<I: Iterator> Debug for MergeIterInner<I>
+where
+ I: Debug,
+ I::Item: Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("MergeIterInner").field(&self.a).field(&self.b).field(&self.peeked).finish()
+ }
+}
+
+impl<I: Iterator> MergeIterInner<I> {
+ /// Creates a new core for an iterator merging a pair of sources.
+ pub fn new(a: I, b: I) -> Self {
+ MergeIterInner { a, b, peeked: None }
+ }
+
+ /// Returns the next pair of items stemming from the pair of sources
+ /// being merged. If both returned options contain a value, that value
+ /// is equal and occurs in both sources. If one of the returned options
+ /// contains a value, that value doesn't occur in the other source (or
+ /// the sources are not strictly ascending). If neither returned option
+ /// contains a value, iteration has finished and subsequent calls will
+ /// return the same empty pair.
+ pub fn nexts<Cmp: Fn(&I::Item, &I::Item) -> Ordering>(
+ &mut self,
+ cmp: Cmp,
+ ) -> (Option<I::Item>, Option<I::Item>)
+ where
+ I: FusedIterator,
+ {
+ let mut a_next;
+ let mut b_next;
+ match self.peeked.take() {
+ Some(Peeked::A(next)) => {
+ a_next = Some(next);
+ b_next = self.b.next();
+ }
+ Some(Peeked::B(next)) => {
+ b_next = Some(next);
+ a_next = self.a.next();
+ }
+ None => {
+ a_next = self.a.next();
+ b_next = self.b.next();
+ }
+ }
+ if let (Some(ref a1), Some(ref b1)) = (&a_next, &b_next) {
+ match cmp(a1, b1) {
+ Ordering::Less => self.peeked = b_next.take().map(Peeked::B),
+ Ordering::Greater => self.peeked = a_next.take().map(Peeked::A),
+ Ordering::Equal => (),
+ }
+ }
+ (a_next, b_next)
+ }
+
+ /// Returns a pair of upper bounds for the `size_hint` of the final iterator.
+ pub fn lens(&self) -> (usize, usize)
+ where
+ I: ExactSizeIterator,
+ {
+ match self.peeked {
+ Some(Peeked::A(_)) => (1 + self.a.len(), self.b.len()),
+ Some(Peeked::B(_)) => (self.a.len(), 1 + self.b.len()),
+ _ => (self.a.len(), self.b.len()),
+ }
+ }
+}
diff --git a/library/alloc/src/collections/btree/mod.rs b/library/alloc/src/collections/btree/mod.rs
new file mode 100644
index 000000000..9d43ac5c5
--- /dev/null
+++ b/library/alloc/src/collections/btree/mod.rs
@@ -0,0 +1,26 @@
+mod append;
+mod borrow;
+mod dedup_sorted_iter;
+mod fix;
+pub mod map;
+mod mem;
+mod merge_iter;
+mod navigate;
+mod node;
+mod remove;
+mod search;
+pub mod set;
+mod set_val;
+mod split;
+
+#[doc(hidden)]
+trait Recover<Q: ?Sized> {
+ type Key;
+
+ fn get(&self, key: &Q) -> Option<&Self::Key>;
+ fn take(&mut self, key: &Q) -> Option<Self::Key>;
+ fn replace(&mut self, key: Self::Key) -> Option<Self::Key>;
+}
+
+#[cfg(test)]
+mod testing;
diff --git a/library/alloc/src/collections/btree/navigate.rs b/library/alloc/src/collections/btree/navigate.rs
new file mode 100644
index 000000000..1e33c1e64
--- /dev/null
+++ b/library/alloc/src/collections/btree/navigate.rs
@@ -0,0 +1,719 @@
+use core::borrow::Borrow;
+use core::hint;
+use core::ops::RangeBounds;
+use core::ptr;
+
+use super::node::{marker, ForceResult::*, Handle, NodeRef};
+
+use crate::alloc::Allocator;
+// `front` and `back` are always both `None` or both `Some`.
+pub struct LeafRange<BorrowType, K, V> {
+ front: Option<Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>>,
+ back: Option<Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>>,
+}
+
+impl<'a, K: 'a, V: 'a> Clone for LeafRange<marker::Immut<'a>, K, V> {
+ fn clone(&self) -> Self {
+ LeafRange { front: self.front.clone(), back: self.back.clone() }
+ }
+}
+
+impl<BorrowType, K, V> LeafRange<BorrowType, K, V> {
+ pub fn none() -> Self {
+ LeafRange { front: None, back: None }
+ }
+
+ fn is_empty(&self) -> bool {
+ self.front == self.back
+ }
+
+ /// Temporarily takes out another, immutable equivalent of the same range.
+ pub fn reborrow(&self) -> LeafRange<marker::Immut<'_>, K, V> {
+ LeafRange {
+ front: self.front.as_ref().map(|f| f.reborrow()),
+ back: self.back.as_ref().map(|b| b.reborrow()),
+ }
+ }
+}
+
+impl<'a, K, V> LeafRange<marker::Immut<'a>, K, V> {
+ #[inline]
+ pub fn next_checked(&mut self) -> Option<(&'a K, &'a V)> {
+ self.perform_next_checked(|kv| kv.into_kv())
+ }
+
+ #[inline]
+ pub fn next_back_checked(&mut self) -> Option<(&'a K, &'a V)> {
+ self.perform_next_back_checked(|kv| kv.into_kv())
+ }
+}
+
+impl<'a, K, V> LeafRange<marker::ValMut<'a>, K, V> {
+ #[inline]
+ pub fn next_checked(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.perform_next_checked(|kv| unsafe { ptr::read(kv) }.into_kv_valmut())
+ }
+
+ #[inline]
+ pub fn next_back_checked(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.perform_next_back_checked(|kv| unsafe { ptr::read(kv) }.into_kv_valmut())
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V> LeafRange<BorrowType, K, V> {
+ /// If possible, extract some result from the following KV and move to the edge beyond it.
+ fn perform_next_checked<F, R>(&mut self, f: F) -> Option<R>
+ where
+ F: Fn(&Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>) -> R,
+ {
+ if self.is_empty() {
+ None
+ } else {
+ super::mem::replace(self.front.as_mut().unwrap(), |front| {
+ let kv = front.next_kv().ok().unwrap();
+ let result = f(&kv);
+ (kv.next_leaf_edge(), Some(result))
+ })
+ }
+ }
+
+ /// If possible, extract some result from the preceding KV and move to the edge beyond it.
+ fn perform_next_back_checked<F, R>(&mut self, f: F) -> Option<R>
+ where
+ F: Fn(&Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>) -> R,
+ {
+ if self.is_empty() {
+ None
+ } else {
+ super::mem::replace(self.back.as_mut().unwrap(), |back| {
+ let kv = back.next_back_kv().ok().unwrap();
+ let result = f(&kv);
+ (kv.next_back_leaf_edge(), Some(result))
+ })
+ }
+ }
+}
+
+enum LazyLeafHandle<BorrowType, K, V> {
+ Root(NodeRef<BorrowType, K, V, marker::LeafOrInternal>), // not yet descended
+ Edge(Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>),
+}
+
+impl<'a, K: 'a, V: 'a> Clone for LazyLeafHandle<marker::Immut<'a>, K, V> {
+ fn clone(&self) -> Self {
+ match self {
+ LazyLeafHandle::Root(root) => LazyLeafHandle::Root(*root),
+ LazyLeafHandle::Edge(edge) => LazyLeafHandle::Edge(*edge),
+ }
+ }
+}
+
+impl<BorrowType, K, V> LazyLeafHandle<BorrowType, K, V> {
+ fn reborrow(&self) -> LazyLeafHandle<marker::Immut<'_>, K, V> {
+ match self {
+ LazyLeafHandle::Root(root) => LazyLeafHandle::Root(root.reborrow()),
+ LazyLeafHandle::Edge(edge) => LazyLeafHandle::Edge(edge.reborrow()),
+ }
+ }
+}
+
+// `front` and `back` are always both `None` or both `Some`.
+pub struct LazyLeafRange<BorrowType, K, V> {
+ front: Option<LazyLeafHandle<BorrowType, K, V>>,
+ back: Option<LazyLeafHandle<BorrowType, K, V>>,
+}
+
+impl<'a, K: 'a, V: 'a> Clone for LazyLeafRange<marker::Immut<'a>, K, V> {
+ fn clone(&self) -> Self {
+ LazyLeafRange { front: self.front.clone(), back: self.back.clone() }
+ }
+}
+
+impl<BorrowType, K, V> LazyLeafRange<BorrowType, K, V> {
+ pub fn none() -> Self {
+ LazyLeafRange { front: None, back: None }
+ }
+
+ /// Temporarily takes out another, immutable equivalent of the same range.
+ pub fn reborrow(&self) -> LazyLeafRange<marker::Immut<'_>, K, V> {
+ LazyLeafRange {
+ front: self.front.as_ref().map(|f| f.reborrow()),
+ back: self.back.as_ref().map(|b| b.reborrow()),
+ }
+ }
+}
+
+impl<'a, K, V> LazyLeafRange<marker::Immut<'a>, K, V> {
+ #[inline]
+ pub unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
+ unsafe { self.init_front().unwrap().next_unchecked() }
+ }
+
+ #[inline]
+ pub unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a V) {
+ unsafe { self.init_back().unwrap().next_back_unchecked() }
+ }
+}
+
+impl<'a, K, V> LazyLeafRange<marker::ValMut<'a>, K, V> {
+ #[inline]
+ pub unsafe fn next_unchecked(&mut self) -> (&'a K, &'a mut V) {
+ unsafe { self.init_front().unwrap().next_unchecked() }
+ }
+
+ #[inline]
+ pub unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a mut V) {
+ unsafe { self.init_back().unwrap().next_back_unchecked() }
+ }
+}
+
+impl<K, V> LazyLeafRange<marker::Dying, K, V> {
+ fn take_front(
+ &mut self,
+ ) -> Option<Handle<NodeRef<marker::Dying, K, V, marker::Leaf>, marker::Edge>> {
+ match self.front.take()? {
+ LazyLeafHandle::Root(root) => Some(root.first_leaf_edge()),
+ LazyLeafHandle::Edge(edge) => Some(edge),
+ }
+ }
+
+ #[inline]
+ pub unsafe fn deallocating_next_unchecked<A: Allocator + Clone>(
+ &mut self,
+ alloc: A,
+ ) -> Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV> {
+ debug_assert!(self.front.is_some());
+ let front = self.init_front().unwrap();
+ unsafe { front.deallocating_next_unchecked(alloc) }
+ }
+
+ #[inline]
+ pub unsafe fn deallocating_next_back_unchecked<A: Allocator + Clone>(
+ &mut self,
+ alloc: A,
+ ) -> Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV> {
+ debug_assert!(self.back.is_some());
+ let back = self.init_back().unwrap();
+ unsafe { back.deallocating_next_back_unchecked(alloc) }
+ }
+
+ #[inline]
+ pub fn deallocating_end<A: Allocator + Clone>(&mut self, alloc: A) {
+ if let Some(front) = self.take_front() {
+ front.deallocating_end(alloc)
+ }
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V> LazyLeafRange<BorrowType, K, V> {
+ fn init_front(
+ &mut self,
+ ) -> Option<&mut Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>> {
+ if let Some(LazyLeafHandle::Root(root)) = &self.front {
+ self.front = Some(LazyLeafHandle::Edge(unsafe { ptr::read(root) }.first_leaf_edge()));
+ }
+ match &mut self.front {
+ None => None,
+ Some(LazyLeafHandle::Edge(edge)) => Some(edge),
+ // SAFETY: the code above would have replaced it.
+ Some(LazyLeafHandle::Root(_)) => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+
+ fn init_back(
+ &mut self,
+ ) -> Option<&mut Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>> {
+ if let Some(LazyLeafHandle::Root(root)) = &self.back {
+ self.back = Some(LazyLeafHandle::Edge(unsafe { ptr::read(root) }.last_leaf_edge()));
+ }
+ match &mut self.back {
+ None => None,
+ Some(LazyLeafHandle::Edge(edge)) => Some(edge),
+ // SAFETY: the code above would have replaced it.
+ Some(LazyLeafHandle::Root(_)) => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ /// Finds the distinct leaf edges delimiting a specified range in a tree.
+ ///
+ /// If such distinct edges exist, returns them in ascending order, meaning
+ /// that a non-zero number of calls to `next_unchecked` on the `front` of
+ /// the result and/or calls to `next_back_unchecked` on the `back` of the
+ /// result will eventually reach the same edge.
+ ///
+ /// If there are no such edges, i.e., if the tree contains no key within
+ /// the range, returns an empty `front` and `back`.
+ ///
+ /// # Safety
+ /// Unless `BorrowType` is `Immut`, do not use the handles to visit the same
+ /// KV twice.
+ unsafe fn find_leaf_edges_spanning_range<Q: ?Sized, R>(
+ self,
+ range: R,
+ ) -> LeafRange<BorrowType, K, V>
+ where
+ Q: Ord,
+ K: Borrow<Q>,
+ R: RangeBounds<Q>,
+ {
+ match self.search_tree_for_bifurcation(&range) {
+ Err(_) => LeafRange::none(),
+ Ok((
+ node,
+ lower_edge_idx,
+ upper_edge_idx,
+ mut lower_child_bound,
+ mut upper_child_bound,
+ )) => {
+ let mut lower_edge = unsafe { Handle::new_edge(ptr::read(&node), lower_edge_idx) };
+ let mut upper_edge = unsafe { Handle::new_edge(node, upper_edge_idx) };
+ loop {
+ match (lower_edge.force(), upper_edge.force()) {
+ (Leaf(f), Leaf(b)) => return LeafRange { front: Some(f), back: Some(b) },
+ (Internal(f), Internal(b)) => {
+ (lower_edge, lower_child_bound) =
+ f.descend().find_lower_bound_edge(lower_child_bound);
+ (upper_edge, upper_child_bound) =
+ b.descend().find_upper_bound_edge(upper_child_bound);
+ }
+ _ => unreachable!("BTreeMap has different depths"),
+ }
+ }
+ }
+ }
+ }
+}
+
+fn full_range<BorrowType: marker::BorrowType, K, V>(
+ root1: NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
+ root2: NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
+) -> LazyLeafRange<BorrowType, K, V> {
+ LazyLeafRange {
+ front: Some(LazyLeafHandle::Root(root1)),
+ back: Some(LazyLeafHandle::Root(root2)),
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> {
+ /// Finds the pair of leaf edges delimiting a specific range in a tree.
+ ///
+ /// The result is meaningful only if the tree is ordered by key, like the tree
+ /// in a `BTreeMap` is.
+ pub fn range_search<Q, R>(self, range: R) -> LeafRange<marker::Immut<'a>, K, V>
+ where
+ Q: ?Sized + Ord,
+ K: Borrow<Q>,
+ R: RangeBounds<Q>,
+ {
+ // SAFETY: our borrow type is immutable.
+ unsafe { self.find_leaf_edges_spanning_range(range) }
+ }
+
+ /// Finds the pair of leaf edges delimiting an entire tree.
+ pub fn full_range(self) -> LazyLeafRange<marker::Immut<'a>, K, V> {
+ full_range(self, self)
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::ValMut<'a>, K, V, marker::LeafOrInternal> {
+ /// Splits a unique reference into a pair of leaf edges delimiting a specified range.
+ /// The result are non-unique references allowing (some) mutation, which must be used
+ /// carefully.
+ ///
+ /// The result is meaningful only if the tree is ordered by key, like the tree
+ /// in a `BTreeMap` is.
+ ///
+ /// # Safety
+ /// Do not use the duplicate handles to visit the same KV twice.
+ pub fn range_search<Q, R>(self, range: R) -> LeafRange<marker::ValMut<'a>, K, V>
+ where
+ Q: ?Sized + Ord,
+ K: Borrow<Q>,
+ R: RangeBounds<Q>,
+ {
+ unsafe { self.find_leaf_edges_spanning_range(range) }
+ }
+
+ /// Splits a unique reference into a pair of leaf edges delimiting the full range of the tree.
+ /// The results are non-unique references allowing mutation (of values only), so must be used
+ /// with care.
+ pub fn full_range(self) -> LazyLeafRange<marker::ValMut<'a>, K, V> {
+ // We duplicate the root NodeRef here -- we will never visit the same KV
+ // twice, and never end up with overlapping value references.
+ let self2 = unsafe { ptr::read(&self) };
+ full_range(self, self2)
+ }
+}
+
+impl<K, V> NodeRef<marker::Dying, K, V, marker::LeafOrInternal> {
+ /// Splits a unique reference into a pair of leaf edges delimiting the full range of the tree.
+ /// The results are non-unique references allowing massively destructive mutation, so must be
+ /// used with the utmost care.
+ pub fn full_range(self) -> LazyLeafRange<marker::Dying, K, V> {
+ // We duplicate the root NodeRef here -- we will never access it in a way
+ // that overlaps references obtained from the root.
+ let self2 = unsafe { ptr::read(&self) };
+ full_range(self, self2)
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V>
+ Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>
+{
+ /// Given a leaf edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
+ /// on the right side, which is either in the same leaf node or in an ancestor node.
+ /// If the leaf edge is the last one in the tree, returns [`Result::Err`] with the root node.
+ pub fn next_kv(
+ self,
+ ) -> Result<
+ Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>,
+ NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
+ > {
+ let mut edge = self.forget_node_type();
+ loop {
+ edge = match edge.right_kv() {
+ Ok(kv) => return Ok(kv),
+ Err(last_edge) => match last_edge.into_node().ascend() {
+ Ok(parent_edge) => parent_edge.forget_node_type(),
+ Err(root) => return Err(root),
+ },
+ }
+ }
+ }
+
+ /// Given a leaf edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
+ /// on the left side, which is either in the same leaf node or in an ancestor node.
+ /// If the leaf edge is the first one in the tree, returns [`Result::Err`] with the root node.
+ fn next_back_kv(
+ self,
+ ) -> Result<
+ Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>,
+ NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
+ > {
+ let mut edge = self.forget_node_type();
+ loop {
+ edge = match edge.left_kv() {
+ Ok(kv) => return Ok(kv),
+ Err(last_edge) => match last_edge.into_node().ascend() {
+ Ok(parent_edge) => parent_edge.forget_node_type(),
+ Err(root) => return Err(root),
+ },
+ }
+ }
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V>
+ Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>
+{
+ /// Given an internal edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
+ /// on the right side, which is either in the same internal node or in an ancestor node.
+ /// If the internal edge is the last one in the tree, returns [`Result::Err`] with the root node.
+ fn next_kv(
+ self,
+ ) -> Result<
+ Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::KV>,
+ NodeRef<BorrowType, K, V, marker::Internal>,
+ > {
+ let mut edge = self;
+ loop {
+ edge = match edge.right_kv() {
+ Ok(internal_kv) => return Ok(internal_kv),
+ Err(last_edge) => match last_edge.into_node().ascend() {
+ Ok(parent_edge) => parent_edge,
+ Err(root) => return Err(root),
+ },
+ }
+ }
+ }
+}
+
+impl<K, V> Handle<NodeRef<marker::Dying, K, V, marker::Leaf>, marker::Edge> {
+ /// Given a leaf edge handle into a dying tree, returns the next leaf edge
+ /// on the right side, and the key-value pair in between, if they exist.
+ ///
+ /// If the given edge is the last one in a leaf, this method deallocates
+ /// the leaf, as well as any ancestor nodes whose last edge was reached.
+ /// This implies that if no more key-value pair follows, the entire tree
+ /// will have been deallocated and there is nothing left to return.
+ ///
+ /// # Safety
+ /// - The given edge must not have been previously returned by counterpart
+ /// `deallocating_next_back`.
+ /// - The returned KV handle is only valid to access the key and value,
+ /// and only valid until the next call to a `deallocating_` method.
+ unsafe fn deallocating_next<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> Option<(Self, Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>)>
+ {
+ let mut edge = self.forget_node_type();
+ loop {
+ edge = match edge.right_kv() {
+ Ok(kv) => return Some((unsafe { ptr::read(&kv) }.next_leaf_edge(), kv)),
+ Err(last_edge) => {
+ match unsafe { last_edge.into_node().deallocate_and_ascend(alloc.clone()) } {
+ Some(parent_edge) => parent_edge.forget_node_type(),
+ None => return None,
+ }
+ }
+ }
+ }
+ }
+
+ /// Given a leaf edge handle into a dying tree, returns the next leaf edge
+ /// on the left side, and the key-value pair in between, if they exist.
+ ///
+ /// If the given edge is the first one in a leaf, this method deallocates
+ /// the leaf, as well as any ancestor nodes whose first edge was reached.
+ /// This implies that if no more key-value pair follows, the entire tree
+ /// will have been deallocated and there is nothing left to return.
+ ///
+ /// # Safety
+ /// - The given edge must not have been previously returned by counterpart
+ /// `deallocating_next`.
+ /// - The returned KV handle is only valid to access the key and value,
+ /// and only valid until the next call to a `deallocating_` method.
+ unsafe fn deallocating_next_back<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> Option<(Self, Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>)>
+ {
+ let mut edge = self.forget_node_type();
+ loop {
+ edge = match edge.left_kv() {
+ Ok(kv) => return Some((unsafe { ptr::read(&kv) }.next_back_leaf_edge(), kv)),
+ Err(last_edge) => {
+ match unsafe { last_edge.into_node().deallocate_and_ascend(alloc.clone()) } {
+ Some(parent_edge) => parent_edge.forget_node_type(),
+ None => return None,
+ }
+ }
+ }
+ }
+ }
+
+ /// Deallocates a pile of nodes from the leaf up to the root.
+ /// This is the only way to deallocate the remainder of a tree after
+ /// `deallocating_next` and `deallocating_next_back` have been nibbling at
+ /// both sides of the tree, and have hit the same edge. As it is intended
+ /// only to be called when all keys and values have been returned,
+ /// no cleanup is done on any of the keys or values.
+ fn deallocating_end<A: Allocator + Clone>(self, alloc: A) {
+ let mut edge = self.forget_node_type();
+ while let Some(parent_edge) =
+ unsafe { edge.into_node().deallocate_and_ascend(alloc.clone()) }
+ {
+ edge = parent_edge.forget_node_type();
+ }
+ }
+}
+
+impl<'a, K, V> Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge> {
+ /// Moves the leaf edge handle to the next leaf edge and returns references to the
+ /// key and value in between.
+ ///
+ /// # Safety
+ /// There must be another KV in the direction travelled.
+ unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
+ super::mem::replace(self, |leaf_edge| {
+ let kv = leaf_edge.next_kv().ok().unwrap();
+ (kv.next_leaf_edge(), kv.into_kv())
+ })
+ }
+
+ /// Moves the leaf edge handle to the previous leaf edge and returns references to the
+ /// key and value in between.
+ ///
+ /// # Safety
+ /// There must be another KV in the direction travelled.
+ unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a V) {
+ super::mem::replace(self, |leaf_edge| {
+ let kv = leaf_edge.next_back_kv().ok().unwrap();
+ (kv.next_back_leaf_edge(), kv.into_kv())
+ })
+ }
+}
+
+impl<'a, K, V> Handle<NodeRef<marker::ValMut<'a>, K, V, marker::Leaf>, marker::Edge> {
+ /// Moves the leaf edge handle to the next leaf edge and returns references to the
+ /// key and value in between.
+ ///
+ /// # Safety
+ /// There must be another KV in the direction travelled.
+ unsafe fn next_unchecked(&mut self) -> (&'a K, &'a mut V) {
+ let kv = super::mem::replace(self, |leaf_edge| {
+ let kv = leaf_edge.next_kv().ok().unwrap();
+ (unsafe { ptr::read(&kv) }.next_leaf_edge(), kv)
+ });
+ // Doing this last is faster, according to benchmarks.
+ kv.into_kv_valmut()
+ }
+
+ /// Moves the leaf edge handle to the previous leaf and returns references to the
+ /// key and value in between.
+ ///
+ /// # Safety
+ /// There must be another KV in the direction travelled.
+ unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a mut V) {
+ let kv = super::mem::replace(self, |leaf_edge| {
+ let kv = leaf_edge.next_back_kv().ok().unwrap();
+ (unsafe { ptr::read(&kv) }.next_back_leaf_edge(), kv)
+ });
+ // Doing this last is faster, according to benchmarks.
+ kv.into_kv_valmut()
+ }
+}
+
+impl<K, V> Handle<NodeRef<marker::Dying, K, V, marker::Leaf>, marker::Edge> {
+ /// Moves the leaf edge handle to the next leaf edge and returns the key and value
+ /// in between, deallocating any node left behind while leaving the corresponding
+ /// edge in its parent node dangling.
+ ///
+ /// # Safety
+ /// - There must be another KV in the direction travelled.
+ /// - That KV was not previously returned by counterpart
+ /// `deallocating_next_back_unchecked` on any copy of the handles
+ /// being used to traverse the tree.
+ ///
+ /// The only safe way to proceed with the updated handle is to compare it, drop it,
+ /// or call this method or counterpart `deallocating_next_back_unchecked` again.
+ unsafe fn deallocating_next_unchecked<A: Allocator + Clone>(
+ &mut self,
+ alloc: A,
+ ) -> Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV> {
+ super::mem::replace(self, |leaf_edge| unsafe {
+ leaf_edge.deallocating_next(alloc).unwrap()
+ })
+ }
+
+ /// Moves the leaf edge handle to the previous leaf edge and returns the key and value
+ /// in between, deallocating any node left behind while leaving the corresponding
+ /// edge in its parent node dangling.
+ ///
+ /// # Safety
+ /// - There must be another KV in the direction travelled.
+ /// - That leaf edge was not previously returned by counterpart
+ /// `deallocating_next_unchecked` on any copy of the handles
+ /// being used to traverse the tree.
+ ///
+ /// The only safe way to proceed with the updated handle is to compare it, drop it,
+ /// or call this method or counterpart `deallocating_next_unchecked` again.
+ unsafe fn deallocating_next_back_unchecked<A: Allocator + Clone>(
+ &mut self,
+ alloc: A,
+ ) -> Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV> {
+ super::mem::replace(self, |leaf_edge| unsafe {
+ leaf_edge.deallocating_next_back(alloc).unwrap()
+ })
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ /// Returns the leftmost leaf edge in or underneath a node - in other words, the edge
+ /// you need first when navigating forward (or last when navigating backward).
+ #[inline]
+ pub fn first_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
+ let mut node = self;
+ loop {
+ match node.force() {
+ Leaf(leaf) => return leaf.first_edge(),
+ Internal(internal) => node = internal.first_edge().descend(),
+ }
+ }
+ }
+
+ /// Returns the rightmost leaf edge in or underneath a node - in other words, the edge
+ /// you need last when navigating forward (or first when navigating backward).
+ #[inline]
+ pub fn last_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
+ let mut node = self;
+ loop {
+ match node.force() {
+ Leaf(leaf) => return leaf.last_edge(),
+ Internal(internal) => node = internal.last_edge().descend(),
+ }
+ }
+ }
+}
+
+pub enum Position<BorrowType, K, V> {
+ Leaf(NodeRef<BorrowType, K, V, marker::Leaf>),
+ Internal(NodeRef<BorrowType, K, V, marker::Internal>),
+ InternalKV(Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::KV>),
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> {
+ /// Visits leaf nodes and internal KVs in order of ascending keys, and also
+ /// visits internal nodes as a whole in a depth first order, meaning that
+ /// internal nodes precede their individual KVs and their child nodes.
+ pub fn visit_nodes_in_order<F>(self, mut visit: F)
+ where
+ F: FnMut(Position<marker::Immut<'a>, K, V>),
+ {
+ match self.force() {
+ Leaf(leaf) => visit(Position::Leaf(leaf)),
+ Internal(internal) => {
+ visit(Position::Internal(internal));
+ let mut edge = internal.first_edge();
+ loop {
+ edge = match edge.descend().force() {
+ Leaf(leaf) => {
+ visit(Position::Leaf(leaf));
+ match edge.next_kv() {
+ Ok(kv) => {
+ visit(Position::InternalKV(kv));
+ kv.right_edge()
+ }
+ Err(_) => return,
+ }
+ }
+ Internal(internal) => {
+ visit(Position::Internal(internal));
+ internal.first_edge()
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /// Calculates the number of elements in a (sub)tree.
+ pub fn calc_length(self) -> usize {
+ let mut result = 0;
+ self.visit_nodes_in_order(|pos| match pos {
+ Position::Leaf(node) => result += node.len(),
+ Position::Internal(node) => result += node.len(),
+ Position::InternalKV(_) => (),
+ });
+ result
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V>
+ Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>
+{
+ /// Returns the leaf edge closest to a KV for forward navigation.
+ pub fn next_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
+ match self.force() {
+ Leaf(leaf_kv) => leaf_kv.right_edge(),
+ Internal(internal_kv) => {
+ let next_internal_edge = internal_kv.right_edge();
+ next_internal_edge.descend().first_leaf_edge()
+ }
+ }
+ }
+
+ /// Returns the leaf edge closest to a KV for backward navigation.
+ fn next_back_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
+ match self.force() {
+ Leaf(leaf_kv) => leaf_kv.left_edge(),
+ Internal(internal_kv) => {
+ let next_internal_edge = internal_kv.left_edge();
+ next_internal_edge.descend().last_leaf_edge()
+ }
+ }
+ }
+}
diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs
new file mode 100644
index 000000000..d831161bc
--- /dev/null
+++ b/library/alloc/src/collections/btree/node.rs
@@ -0,0 +1,1753 @@
+// This is an attempt at an implementation following the ideal
+//
+// ```
+// struct BTreeMap<K, V> {
+// height: usize,
+// root: Option<Box<Node<K, V, height>>>
+// }
+//
+// struct Node<K, V, height: usize> {
+// keys: [K; 2 * B - 1],
+// vals: [V; 2 * B - 1],
+// edges: [if height > 0 { Box<Node<K, V, height - 1>> } else { () }; 2 * B],
+// parent: Option<(NonNull<Node<K, V, height + 1>>, u16)>,
+// len: u16,
+// }
+// ```
+//
+// Since Rust doesn't actually have dependent types and polymorphic recursion,
+// we make do with lots of unsafety.
+
+// A major goal of this module is to avoid complexity by treating the tree as a generic (if
+// weirdly shaped) container and avoiding dealing with most of the B-Tree invariants. As such,
+// this module doesn't care whether the entries are sorted, which nodes can be underfull, or
+// even what underfull means. However, we do rely on a few invariants:
+//
+// - Trees must have uniform depth/height. This means that every path down to a leaf from a
+// given node has exactly the same length.
+// - A node of length `n` has `n` keys, `n` values, and `n + 1` edges.
+// This implies that even an empty node has at least one edge.
+// For a leaf node, "having an edge" only means we can identify a position in the node,
+// since leaf edges are empty and need no data representation. In an internal node,
+// an edge both identifies a position and contains a pointer to a child node.
+
+use core::marker::PhantomData;
+use core::mem::{self, MaybeUninit};
+use core::ptr::{self, NonNull};
+use core::slice::SliceIndex;
+
+use crate::alloc::{Allocator, Layout};
+use crate::boxed::Box;
+
+const B: usize = 6;
+pub const CAPACITY: usize = 2 * B - 1;
+pub const MIN_LEN_AFTER_SPLIT: usize = B - 1;
+const KV_IDX_CENTER: usize = B - 1;
+const EDGE_IDX_LEFT_OF_CENTER: usize = B - 1;
+const EDGE_IDX_RIGHT_OF_CENTER: usize = B;
+
+/// The underlying representation of leaf nodes and part of the representation of internal nodes.
+struct LeafNode<K, V> {
+ /// We want to be covariant in `K` and `V`.
+ parent: Option<NonNull<InternalNode<K, V>>>,
+
+ /// This node's index into the parent node's `edges` array.
+ /// `*node.parent.edges[node.parent_idx]` should be the same thing as `node`.
+ /// This is only guaranteed to be initialized when `parent` is non-null.
+ parent_idx: MaybeUninit<u16>,
+
+ /// The number of keys and values this node stores.
+ len: u16,
+
+ /// The arrays storing the actual data of the node. Only the first `len` elements of each
+ /// array are initialized and valid.
+ keys: [MaybeUninit<K>; CAPACITY],
+ vals: [MaybeUninit<V>; CAPACITY],
+}
+
+impl<K, V> LeafNode<K, V> {
+ /// Initializes a new `LeafNode` in-place.
+ unsafe fn init(this: *mut Self) {
+ // As a general policy, we leave fields uninitialized if they can be, as this should
+ // be both slightly faster and easier to track in Valgrind.
+ unsafe {
+ // parent_idx, keys, and vals are all MaybeUninit
+ ptr::addr_of_mut!((*this).parent).write(None);
+ ptr::addr_of_mut!((*this).len).write(0);
+ }
+ }
+
+ /// Creates a new boxed `LeafNode`.
+ fn new<A: Allocator + Clone>(alloc: A) -> Box<Self, A> {
+ unsafe {
+ let mut leaf = Box::new_uninit_in(alloc);
+ LeafNode::init(leaf.as_mut_ptr());
+ leaf.assume_init()
+ }
+ }
+}
+
+/// The underlying representation of internal nodes. As with `LeafNode`s, these should be hidden
+/// behind `BoxedNode`s to prevent dropping uninitialized keys and values. Any pointer to an
+/// `InternalNode` can be directly cast to a pointer to the underlying `LeafNode` portion of the
+/// node, allowing code to act on leaf and internal nodes generically without having to even check
+/// which of the two a pointer is pointing at. This property is enabled by the use of `repr(C)`.
+#[repr(C)]
+// gdb_providers.py uses this type name for introspection.
+struct InternalNode<K, V> {
+ data: LeafNode<K, V>,
+
+ /// The pointers to the children of this node. `len + 1` of these are considered
+ /// initialized and valid, except that near the end, while the tree is held
+ /// through borrow type `Dying`, some of these pointers are dangling.
+ edges: [MaybeUninit<BoxedNode<K, V>>; 2 * B],
+}
+
+impl<K, V> InternalNode<K, V> {
+ /// Creates a new boxed `InternalNode`.
+ ///
+ /// # Safety
+ /// An invariant of internal nodes is that they have at least one
+ /// initialized and valid edge. This function does not set up
+ /// such an edge.
+ unsafe fn new<A: Allocator + Clone>(alloc: A) -> Box<Self, A> {
+ unsafe {
+ let mut node = Box::<Self, _>::new_uninit_in(alloc);
+ // We only need to initialize the data; the edges are MaybeUninit.
+ LeafNode::init(ptr::addr_of_mut!((*node.as_mut_ptr()).data));
+ node.assume_init()
+ }
+ }
+}
+
+/// A managed, non-null pointer to a node. This is either an owned pointer to
+/// `LeafNode<K, V>` or an owned pointer to `InternalNode<K, V>`.
+///
+/// However, `BoxedNode` contains no information as to which of the two types
+/// of nodes it actually contains, and, partially due to this lack of information,
+/// is not a separate type and has no destructor.
+type BoxedNode<K, V> = NonNull<LeafNode<K, V>>;
+
+// N.B. `NodeRef` is always covariant in `K` and `V`, even when the `BorrowType`
+// is `Mut`. This is technically wrong, but cannot result in any unsafety due to
+// internal use of `NodeRef` because we stay completely generic over `K` and `V`.
+// However, whenever a public type wraps `NodeRef`, make sure that it has the
+// correct variance.
+///
+/// A reference to a node.
+///
+/// This type has a number of parameters that controls how it acts:
+/// - `BorrowType`: A dummy type that describes the kind of borrow and carries a lifetime.
+/// - When this is `Immut<'a>`, the `NodeRef` acts roughly like `&'a Node`.
+/// - When this is `ValMut<'a>`, the `NodeRef` acts roughly like `&'a Node`
+/// with respect to keys and tree structure, but also allows many
+/// mutable references to values throughout the tree to coexist.
+/// - When this is `Mut<'a>`, the `NodeRef` acts roughly like `&'a mut Node`,
+/// although insert methods allow a mutable pointer to a value to coexist.
+/// - When this is `Owned`, the `NodeRef` acts roughly like `Box<Node>`,
+/// but does not have a destructor, and must be cleaned up manually.
+/// - When this is `Dying`, the `NodeRef` still acts roughly like `Box<Node>`,
+/// but has methods to destroy the tree bit by bit, and ordinary methods,
+/// while not marked as unsafe to call, can invoke UB if called incorrectly.
+/// Since any `NodeRef` allows navigating through the tree, `BorrowType`
+/// effectively applies to the entire tree, not just to the node itself.
+/// - `K` and `V`: These are the types of keys and values stored in the nodes.
+/// - `Type`: This can be `Leaf`, `Internal`, or `LeafOrInternal`. When this is
+/// `Leaf`, the `NodeRef` points to a leaf node, when this is `Internal` the
+/// `NodeRef` points to an internal node, and when this is `LeafOrInternal` the
+/// `NodeRef` could be pointing to either type of node.
+/// `Type` is named `NodeType` when used outside `NodeRef`.
+///
+/// Both `BorrowType` and `NodeType` restrict what methods we implement, to
+/// exploit static type safety. There are limitations in the way we can apply
+/// such restrictions:
+/// - For each type parameter, we can only define a method either generically
+/// or for one particular type. For example, we cannot define a method like
+/// `into_kv` generically for all `BorrowType`, or once for all types that
+/// carry a lifetime, because we want it to return `&'a` references.
+/// Therefore, we define it only for the least powerful type `Immut<'a>`.
+/// - We cannot get implicit coercion from say `Mut<'a>` to `Immut<'a>`.
+/// Therefore, we have to explicitly call `reborrow` on a more powerful
+/// `NodeRef` in order to reach a method like `into_kv`.
+///
+/// All methods on `NodeRef` that return some kind of reference, either:
+/// - Take `self` by value, and return the lifetime carried by `BorrowType`.
+/// Sometimes, to invoke such a method, we need to call `reborrow_mut`.
+/// - Take `self` by reference, and (implicitly) return that reference's
+/// lifetime, instead of the lifetime carried by `BorrowType`. That way,
+/// the borrow checker guarantees that the `NodeRef` remains borrowed as long
+/// as the returned reference is used.
+/// The methods supporting insert bend this rule by returning a raw pointer,
+/// i.e., a reference without any lifetime.
+pub struct NodeRef<BorrowType, K, V, Type> {
+ /// The number of levels that the node and the level of leaves are apart, a
+ /// constant of the node that cannot be entirely described by `Type`, and that
+ /// the node itself does not store. We only need to store the height of the root
+ /// node, and derive every other node's height from it.
+ /// Must be zero if `Type` is `Leaf` and non-zero if `Type` is `Internal`.
+ height: usize,
+ /// The pointer to the leaf or internal node. The definition of `InternalNode`
+ /// ensures that the pointer is valid either way.
+ node: NonNull<LeafNode<K, V>>,
+ _marker: PhantomData<(BorrowType, Type)>,
+}
+
+/// The root node of an owned tree.
+///
+/// Note that this does not have a destructor, and must be cleaned up manually.
+pub type Root<K, V> = NodeRef<marker::Owned, K, V, marker::LeafOrInternal>;
+
+impl<'a, K: 'a, V: 'a, Type> Copy for NodeRef<marker::Immut<'a>, K, V, Type> {}
+impl<'a, K: 'a, V: 'a, Type> Clone for NodeRef<marker::Immut<'a>, K, V, Type> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+unsafe impl<BorrowType, K: Sync, V: Sync, Type> Sync for NodeRef<BorrowType, K, V, Type> {}
+
+unsafe impl<'a, K: Sync + 'a, V: Sync + 'a, Type> Send for NodeRef<marker::Immut<'a>, K, V, Type> {}
+unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::Mut<'a>, K, V, Type> {}
+unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::ValMut<'a>, K, V, Type> {}
+unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Owned, K, V, Type> {}
+unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Dying, K, V, Type> {}
+
+impl<K, V> NodeRef<marker::Owned, K, V, marker::Leaf> {
+ pub fn new_leaf<A: Allocator + Clone>(alloc: A) -> Self {
+ Self::from_new_leaf(LeafNode::new(alloc))
+ }
+
+ fn from_new_leaf<A: Allocator + Clone>(leaf: Box<LeafNode<K, V>, A>) -> Self {
+ NodeRef { height: 0, node: NonNull::from(Box::leak(leaf)), _marker: PhantomData }
+ }
+}
+
+impl<K, V> NodeRef<marker::Owned, K, V, marker::Internal> {
+ fn new_internal<A: Allocator + Clone>(child: Root<K, V>, alloc: A) -> Self {
+ let mut new_node = unsafe { InternalNode::new(alloc) };
+ new_node.edges[0].write(child.node);
+ unsafe { NodeRef::from_new_internal(new_node, child.height + 1) }
+ }
+
+ /// # Safety
+ /// `height` must not be zero.
+ unsafe fn from_new_internal<A: Allocator + Clone>(
+ internal: Box<InternalNode<K, V>, A>,
+ height: usize,
+ ) -> Self {
+ debug_assert!(height > 0);
+ let node = NonNull::from(Box::leak(internal)).cast();
+ let mut this = NodeRef { height, node, _marker: PhantomData };
+ this.borrow_mut().correct_all_childrens_parent_links();
+ this
+ }
+}
+
+impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> {
+ /// Unpack a node reference that was packed as `NodeRef::parent`.
+ fn from_internal(node: NonNull<InternalNode<K, V>>, height: usize) -> Self {
+ debug_assert!(height > 0);
+ NodeRef { height, node: node.cast(), _marker: PhantomData }
+ }
+}
+
+impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> {
+ /// Exposes the data of an internal node.
+ ///
+ /// Returns a raw ptr to avoid invalidating other references to this node.
+ fn as_internal_ptr(this: &Self) -> *mut InternalNode<K, V> {
+ // SAFETY: the static node type is `Internal`.
+ this.node.as_ptr() as *mut InternalNode<K, V>
+ }
+}
+
+impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
+ /// Borrows exclusive access to the data of an internal node.
+ fn as_internal_mut(&mut self) -> &mut InternalNode<K, V> {
+ let ptr = Self::as_internal_ptr(self);
+ unsafe { &mut *ptr }
+ }
+}
+
+impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
+ /// Finds the length of the node. This is the number of keys or values.
+ /// The number of edges is `len() + 1`.
+ /// Note that, despite being safe, calling this function can have the side effect
+ /// of invalidating mutable references that unsafe code has created.
+ pub fn len(&self) -> usize {
+ // Crucially, we only access the `len` field here. If BorrowType is marker::ValMut,
+ // there might be outstanding mutable references to values that we must not invalidate.
+ unsafe { usize::from((*Self::as_leaf_ptr(self)).len) }
+ }
+
+ /// Returns the number of levels that the node and leaves are apart. Zero
+ /// height means the node is a leaf itself. If you picture trees with the
+ /// root on top, the number says at which elevation the node appears.
+ /// If you picture trees with leaves on top, the number says how high
+ /// the tree extends above the node.
+ pub fn height(&self) -> usize {
+ self.height
+ }
+
+ /// Temporarily takes out another, immutable reference to the same node.
+ pub fn reborrow(&self) -> NodeRef<marker::Immut<'_>, K, V, Type> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+
+ /// Exposes the leaf portion of any leaf or internal node.
+ ///
+ /// Returns a raw ptr to avoid invalidating other references to this node.
+ fn as_leaf_ptr(this: &Self) -> *mut LeafNode<K, V> {
+ // The node must be valid for at least the LeafNode portion.
+ // This is not a reference in the NodeRef type because we don't know if
+ // it should be unique or shared.
+ this.node.as_ptr()
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
+ /// Finds the parent of the current node. Returns `Ok(handle)` if the current
+ /// node actually has a parent, where `handle` points to the edge of the parent
+ /// that points to the current node. Returns `Err(self)` if the current node has
+ /// no parent, giving back the original `NodeRef`.
+ ///
+ /// The method name assumes you picture trees with the root node on top.
+ ///
+ /// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
+ /// both, upon success, do nothing.
+ pub fn ascend(
+ self,
+ ) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> {
+ assert!(BorrowType::PERMITS_TRAVERSAL);
+ // We need to use raw pointers to nodes because, if BorrowType is marker::ValMut,
+ // there might be outstanding mutable references to values that we must not invalidate.
+ let leaf_ptr: *const _ = Self::as_leaf_ptr(&self);
+ unsafe { (*leaf_ptr).parent }
+ .as_ref()
+ .map(|parent| Handle {
+ node: NodeRef::from_internal(*parent, self.height + 1),
+ idx: unsafe { usize::from((*leaf_ptr).parent_idx.assume_init()) },
+ _marker: PhantomData,
+ })
+ .ok_or(self)
+ }
+
+ pub fn first_edge(self) -> Handle<Self, marker::Edge> {
+ unsafe { Handle::new_edge(self, 0) }
+ }
+
+ pub fn last_edge(self) -> Handle<Self, marker::Edge> {
+ let len = self.len();
+ unsafe { Handle::new_edge(self, len) }
+ }
+
+ /// Note that `self` must be nonempty.
+ pub fn first_kv(self) -> Handle<Self, marker::KV> {
+ let len = self.len();
+ assert!(len > 0);
+ unsafe { Handle::new_kv(self, 0) }
+ }
+
+ /// Note that `self` must be nonempty.
+ pub fn last_kv(self) -> Handle<Self, marker::KV> {
+ let len = self.len();
+ assert!(len > 0);
+ unsafe { Handle::new_kv(self, len - 1) }
+ }
+}
+
+impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
+ /// Could be a public implementation of PartialEq, but only used in this module.
+ fn eq(&self, other: &Self) -> bool {
+ let Self { node, height, _marker } = self;
+ if node.eq(&other.node) {
+ debug_assert_eq!(*height, other.height);
+ true
+ } else {
+ false
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> {
+ /// Exposes the leaf portion of any leaf or internal node in an immutable tree.
+ fn into_leaf(self) -> &'a LeafNode<K, V> {
+ let ptr = Self::as_leaf_ptr(&self);
+ // SAFETY: there can be no mutable references into this tree borrowed as `Immut`.
+ unsafe { &*ptr }
+ }
+
+ /// Borrows a view into the keys stored in the node.
+ pub fn keys(&self) -> &[K] {
+ let leaf = self.into_leaf();
+ unsafe {
+ MaybeUninit::slice_assume_init_ref(leaf.keys.get_unchecked(..usize::from(leaf.len)))
+ }
+ }
+}
+
+impl<K, V> NodeRef<marker::Dying, K, V, marker::LeafOrInternal> {
+ /// Similar to `ascend`, gets a reference to a node's parent node, but also
+ /// deallocates the current node in the process. This is unsafe because the
+ /// current node will still be accessible despite being deallocated.
+ pub unsafe fn deallocate_and_ascend<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> Option<Handle<NodeRef<marker::Dying, K, V, marker::Internal>, marker::Edge>> {
+ let height = self.height;
+ let node = self.node;
+ let ret = self.ascend().ok();
+ unsafe {
+ alloc.deallocate(
+ node.cast(),
+ if height > 0 {
+ Layout::new::<InternalNode<K, V>>()
+ } else {
+ Layout::new::<LeafNode<K, V>>()
+ },
+ );
+ }
+ ret
+ }
+}
+
+impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
+ /// Temporarily takes out another mutable reference to the same node. Beware, as
+ /// this method is very dangerous, doubly so since it might not immediately appear
+ /// dangerous.
+ ///
+ /// Because mutable pointers can roam anywhere around the tree, the returned
+ /// pointer can easily be used to make the original pointer dangling, out of
+ /// bounds, or invalid under stacked borrow rules.
+ // FIXME(@gereeter) consider adding yet another type parameter to `NodeRef`
+ // that restricts the use of navigation methods on reborrowed pointers,
+ // preventing this unsafety.
+ unsafe fn reborrow_mut(&mut self) -> NodeRef<marker::Mut<'_>, K, V, Type> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+
+ /// Borrows exclusive access to the leaf portion of a leaf or internal node.
+ fn as_leaf_mut(&mut self) -> &mut LeafNode<K, V> {
+ let ptr = Self::as_leaf_ptr(self);
+ // SAFETY: we have exclusive access to the entire node.
+ unsafe { &mut *ptr }
+ }
+
+ /// Offers exclusive access to the leaf portion of a leaf or internal node.
+ fn into_leaf_mut(mut self) -> &'a mut LeafNode<K, V> {
+ let ptr = Self::as_leaf_ptr(&mut self);
+ // SAFETY: we have exclusive access to the entire node.
+ unsafe { &mut *ptr }
+ }
+}
+
+impl<K, V, Type> NodeRef<marker::Dying, K, V, Type> {
+ /// Borrows exclusive access to the leaf portion of a dying leaf or internal node.
+ fn as_leaf_dying(&mut self) -> &mut LeafNode<K, V> {
+ let ptr = Self::as_leaf_ptr(self);
+ // SAFETY: we have exclusive access to the entire node.
+ unsafe { &mut *ptr }
+ }
+}
+
+impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
+ /// Borrows exclusive access to an element of the key storage area.
+ ///
+ /// # Safety
+ /// `index` is in bounds of 0..CAPACITY
+ unsafe fn key_area_mut<I, Output: ?Sized>(&mut self, index: I) -> &mut Output
+ where
+ I: SliceIndex<[MaybeUninit<K>], Output = Output>,
+ {
+ // SAFETY: the caller will not be able to call further methods on self
+ // until the key slice reference is dropped, as we have unique access
+ // for the lifetime of the borrow.
+ unsafe { self.as_leaf_mut().keys.as_mut_slice().get_unchecked_mut(index) }
+ }
+
+ /// Borrows exclusive access to an element or slice of the node's value storage area.
+ ///
+ /// # Safety
+ /// `index` is in bounds of 0..CAPACITY
+ unsafe fn val_area_mut<I, Output: ?Sized>(&mut self, index: I) -> &mut Output
+ where
+ I: SliceIndex<[MaybeUninit<V>], Output = Output>,
+ {
+ // SAFETY: the caller will not be able to call further methods on self
+ // until the value slice reference is dropped, as we have unique access
+ // for the lifetime of the borrow.
+ unsafe { self.as_leaf_mut().vals.as_mut_slice().get_unchecked_mut(index) }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
+ /// Borrows exclusive access to an element or slice of the node's storage area for edge contents.
+ ///
+ /// # Safety
+ /// `index` is in bounds of 0..CAPACITY + 1
+ unsafe fn edge_area_mut<I, Output: ?Sized>(&mut self, index: I) -> &mut Output
+ where
+ I: SliceIndex<[MaybeUninit<BoxedNode<K, V>>], Output = Output>,
+ {
+ // SAFETY: the caller will not be able to call further methods on self
+ // until the edge slice reference is dropped, as we have unique access
+ // for the lifetime of the borrow.
+ unsafe { self.as_internal_mut().edges.as_mut_slice().get_unchecked_mut(index) }
+ }
+}
+
+impl<'a, K, V, Type> NodeRef<marker::ValMut<'a>, K, V, Type> {
+ /// # Safety
+ /// - The node has more than `idx` initialized elements.
+ unsafe fn into_key_val_mut_at(mut self, idx: usize) -> (&'a K, &'a mut V) {
+ // We only create a reference to the one element we are interested in,
+ // to avoid aliasing with outstanding references to other elements,
+ // in particular, those returned to the caller in earlier iterations.
+ let leaf = Self::as_leaf_ptr(&mut self);
+ let keys = unsafe { ptr::addr_of!((*leaf).keys) };
+ let vals = unsafe { ptr::addr_of_mut!((*leaf).vals) };
+ // We must coerce to unsized array pointers because of Rust issue #74679.
+ let keys: *const [_] = keys;
+ let vals: *mut [_] = vals;
+ let key = unsafe { (&*keys.get_unchecked(idx)).assume_init_ref() };
+ let val = unsafe { (&mut *vals.get_unchecked_mut(idx)).assume_init_mut() };
+ (key, val)
+ }
+}
+
+impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
+ /// Borrows exclusive access to the length of the node.
+ pub fn len_mut(&mut self) -> &mut u16 {
+ &mut self.as_leaf_mut().len
+ }
+}
+
+impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
+ /// # Safety
+ /// Every item returned by `range` is a valid edge index for the node.
+ unsafe fn correct_childrens_parent_links<R: Iterator<Item = usize>>(&mut self, range: R) {
+ for i in range {
+ debug_assert!(i <= self.len());
+ unsafe { Handle::new_edge(self.reborrow_mut(), i) }.correct_parent_link();
+ }
+ }
+
+ fn correct_all_childrens_parent_links(&mut self) {
+ let len = self.len();
+ unsafe { self.correct_childrens_parent_links(0..=len) };
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ /// Sets the node's link to its parent edge,
+ /// without invalidating other references to the node.
+ fn set_parent_link(&mut self, parent: NonNull<InternalNode<K, V>>, parent_idx: usize) {
+ let leaf = Self::as_leaf_ptr(self);
+ unsafe { (*leaf).parent = Some(parent) };
+ unsafe { (*leaf).parent_idx.write(parent_idx as u16) };
+ }
+}
+
+impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> {
+ /// Clears the root's link to its parent edge.
+ fn clear_parent_link(&mut self) {
+ let mut root_node = self.borrow_mut();
+ let leaf = root_node.as_leaf_mut();
+ leaf.parent = None;
+ }
+}
+
+impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> {
+ /// Returns a new owned tree, with its own root node that is initially empty.
+ pub fn new<A: Allocator + Clone>(alloc: A) -> Self {
+ NodeRef::new_leaf(alloc).forget_type()
+ }
+
+ /// Adds a new internal node with a single edge pointing to the previous root node,
+ /// make that new node the root node, and return it. This increases the height by 1
+ /// and is the opposite of `pop_internal_level`.
+ pub fn push_internal_level<A: Allocator + Clone>(
+ &mut self,
+ alloc: A,
+ ) -> NodeRef<marker::Mut<'_>, K, V, marker::Internal> {
+ super::mem::take_mut(self, |old_root| NodeRef::new_internal(old_root, alloc).forget_type());
+
+ // `self.borrow_mut()`, except that we just forgot we're internal now:
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+
+ /// Removes the internal root node, using its first child as the new root node.
+ /// As it is intended only to be called when the root node has only one child,
+ /// no cleanup is done on any of the keys, values and other children.
+ /// This decreases the height by 1 and is the opposite of `push_internal_level`.
+ ///
+ /// Requires exclusive access to the `NodeRef` object but not to the root node;
+ /// it will not invalidate other handles or references to the root node.
+ ///
+ /// Panics if there is no internal level, i.e., if the root node is a leaf.
+ pub fn pop_internal_level<A: Allocator + Clone>(&mut self, alloc: A) {
+ assert!(self.height > 0);
+
+ let top = self.node;
+
+ // SAFETY: we asserted to be internal.
+ let internal_self = unsafe { self.borrow_mut().cast_to_internal_unchecked() };
+ // SAFETY: we borrowed `self` exclusively and its borrow type is exclusive.
+ let internal_node = unsafe { &mut *NodeRef::as_internal_ptr(&internal_self) };
+ // SAFETY: the first edge is always initialized.
+ self.node = unsafe { internal_node.edges[0].assume_init_read() };
+ self.height -= 1;
+ self.clear_parent_link();
+
+ unsafe {
+ alloc.deallocate(top.cast(), Layout::new::<InternalNode<K, V>>());
+ }
+ }
+}
+
+impl<K, V, Type> NodeRef<marker::Owned, K, V, Type> {
+ /// Mutably borrows the owned root node. Unlike `reborrow_mut`, this is safe
+ /// because the return value cannot be used to destroy the root, and there
+ /// cannot be other references to the tree.
+ pub fn borrow_mut(&mut self) -> NodeRef<marker::Mut<'_>, K, V, Type> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+
+ /// Slightly mutably borrows the owned root node.
+ pub fn borrow_valmut(&mut self) -> NodeRef<marker::ValMut<'_>, K, V, Type> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+
+ /// Irreversibly transitions to a reference that permits traversal and offers
+ /// destructive methods and little else.
+ pub fn into_dying(self) -> NodeRef<marker::Dying, K, V, Type> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
+ /// Adds a key-value pair to the end of the node, and returns
+ /// the mutable reference of the inserted value.
+ pub fn push(&mut self, key: K, val: V) -> &mut V {
+ let len = self.len_mut();
+ let idx = usize::from(*len);
+ assert!(idx < CAPACITY);
+ *len += 1;
+ unsafe {
+ self.key_area_mut(idx).write(key);
+ self.val_area_mut(idx).write(val)
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
+ /// Adds a key-value pair, and an edge to go to the right of that pair,
+ /// to the end of the node.
+ pub fn push(&mut self, key: K, val: V, edge: Root<K, V>) {
+ assert!(edge.height == self.height - 1);
+
+ let len = self.len_mut();
+ let idx = usize::from(*len);
+ assert!(idx < CAPACITY);
+ *len += 1;
+ unsafe {
+ self.key_area_mut(idx).write(key);
+ self.val_area_mut(idx).write(val);
+ self.edge_area_mut(idx + 1).write(edge.node);
+ Handle::new_edge(self.reborrow_mut(), idx + 1).correct_parent_link();
+ }
+ }
+}
+
+impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Leaf> {
+ /// Removes any static information asserting that this node is a `Leaf` node.
+ pub fn forget_type(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+}
+
+impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> {
+ /// Removes any static information asserting that this node is an `Internal` node.
+ pub fn forget_type(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+}
+
+impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ /// Checks whether a node is an `Internal` node or a `Leaf` node.
+ pub fn force(
+ self,
+ ) -> ForceResult<
+ NodeRef<BorrowType, K, V, marker::Leaf>,
+ NodeRef<BorrowType, K, V, marker::Internal>,
+ > {
+ if self.height == 0 {
+ ForceResult::Leaf(NodeRef {
+ height: self.height,
+ node: self.node,
+ _marker: PhantomData,
+ })
+ } else {
+ ForceResult::Internal(NodeRef {
+ height: self.height,
+ node: self.node,
+ _marker: PhantomData,
+ })
+ }
+ }
+}
+
+impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ /// Unsafely asserts to the compiler the static information that this node is a `Leaf`.
+ unsafe fn cast_to_leaf_unchecked(self) -> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
+ debug_assert!(self.height == 0);
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+
+ /// Unsafely asserts to the compiler the static information that this node is an `Internal`.
+ unsafe fn cast_to_internal_unchecked(self) -> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
+ debug_assert!(self.height > 0);
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+}
+
+/// A reference to a specific key-value pair or edge within a node. The `Node` parameter
+/// must be a `NodeRef`, while the `Type` can either be `KV` (signifying a handle on a key-value
+/// pair) or `Edge` (signifying a handle on an edge).
+///
+/// Note that even `Leaf` nodes can have `Edge` handles. Instead of representing a pointer to
+/// a child node, these represent the spaces where child pointers would go between the key-value
+/// pairs. For example, in a node with length 2, there would be 3 possible edge locations - one
+/// to the left of the node, one between the two pairs, and one at the right of the node.
+pub struct Handle<Node, Type> {
+ node: Node,
+ idx: usize,
+ _marker: PhantomData<Type>,
+}
+
+impl<Node: Copy, Type> Copy for Handle<Node, Type> {}
+// We don't need the full generality of `#[derive(Clone)]`, as the only time `Node` will be
+// `Clone`able is when it is an immutable reference and therefore `Copy`.
+impl<Node: Copy, Type> Clone for Handle<Node, Type> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<Node, Type> Handle<Node, Type> {
+ /// Retrieves the node that contains the edge or key-value pair this handle points to.
+ pub fn into_node(self) -> Node {
+ self.node
+ }
+
+ /// Returns the position of this handle in the node.
+ pub fn idx(&self) -> usize {
+ self.idx
+ }
+}
+
+impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV> {
+ /// Creates a new handle to a key-value pair in `node`.
+ /// Unsafe because the caller must ensure that `idx < node.len()`.
+ pub unsafe fn new_kv(node: NodeRef<BorrowType, K, V, NodeType>, idx: usize) -> Self {
+ debug_assert!(idx < node.len());
+
+ Handle { node, idx, _marker: PhantomData }
+ }
+
+ pub fn left_edge(self) -> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
+ unsafe { Handle::new_edge(self.node, self.idx) }
+ }
+
+ pub fn right_edge(self) -> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
+ unsafe { Handle::new_edge(self.node, self.idx + 1) }
+ }
+}
+
+impl<BorrowType, K, V, NodeType, HandleType> PartialEq
+ for Handle<NodeRef<BorrowType, K, V, NodeType>, HandleType>
+{
+ fn eq(&self, other: &Self) -> bool {
+ let Self { node, idx, _marker } = self;
+ node.eq(&other.node) && *idx == other.idx
+ }
+}
+
+impl<BorrowType, K, V, NodeType, HandleType>
+ Handle<NodeRef<BorrowType, K, V, NodeType>, HandleType>
+{
+ /// Temporarily takes out another immutable handle on the same location.
+ pub fn reborrow(&self) -> Handle<NodeRef<marker::Immut<'_>, K, V, NodeType>, HandleType> {
+ // We can't use Handle::new_kv or Handle::new_edge because we don't know our type
+ Handle { node: self.node.reborrow(), idx: self.idx, _marker: PhantomData }
+ }
+}
+
+impl<'a, K, V, NodeType, HandleType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, HandleType> {
+ /// Temporarily takes out another mutable handle on the same location. Beware, as
+ /// this method is very dangerous, doubly so since it might not immediately appear
+ /// dangerous.
+ ///
+ /// For details, see `NodeRef::reborrow_mut`.
+ pub unsafe fn reborrow_mut(
+ &mut self,
+ ) -> Handle<NodeRef<marker::Mut<'_>, K, V, NodeType>, HandleType> {
+ // We can't use Handle::new_kv or Handle::new_edge because we don't know our type
+ Handle { node: unsafe { self.node.reborrow_mut() }, idx: self.idx, _marker: PhantomData }
+ }
+}
+
+impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
+ /// Creates a new handle to an edge in `node`.
+ /// Unsafe because the caller must ensure that `idx <= node.len()`.
+ pub unsafe fn new_edge(node: NodeRef<BorrowType, K, V, NodeType>, idx: usize) -> Self {
+ debug_assert!(idx <= node.len());
+
+ Handle { node, idx, _marker: PhantomData }
+ }
+
+ pub fn left_kv(self) -> Result<Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV>, Self> {
+ if self.idx > 0 {
+ Ok(unsafe { Handle::new_kv(self.node, self.idx - 1) })
+ } else {
+ Err(self)
+ }
+ }
+
+ pub fn right_kv(self) -> Result<Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV>, Self> {
+ if self.idx < self.node.len() {
+ Ok(unsafe { Handle::new_kv(self.node, self.idx) })
+ } else {
+ Err(self)
+ }
+ }
+}
+
+pub enum LeftOrRight<T> {
+ Left(T),
+ Right(T),
+}
+
+/// Given an edge index where we want to insert into a node filled to capacity,
+/// computes a sensible KV index of a split point and where to perform the insertion.
+/// The goal of the split point is for its key and value to end up in a parent node;
+/// the keys, values and edges to the left of the split point become the left child;
+/// the keys, values and edges to the right of the split point become the right child.
+fn splitpoint(edge_idx: usize) -> (usize, LeftOrRight<usize>) {
+ debug_assert!(edge_idx <= CAPACITY);
+ // Rust issue #74834 tries to explain these symmetric rules.
+ match edge_idx {
+ 0..EDGE_IDX_LEFT_OF_CENTER => (KV_IDX_CENTER - 1, LeftOrRight::Left(edge_idx)),
+ EDGE_IDX_LEFT_OF_CENTER => (KV_IDX_CENTER, LeftOrRight::Left(edge_idx)),
+ EDGE_IDX_RIGHT_OF_CENTER => (KV_IDX_CENTER, LeftOrRight::Right(0)),
+ _ => (KV_IDX_CENTER + 1, LeftOrRight::Right(edge_idx - (KV_IDX_CENTER + 1 + 1))),
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
+ /// Inserts a new key-value pair between the key-value pairs to the right and left of
+ /// this edge. This method assumes that there is enough space in the node for the new
+ /// pair to fit.
+ ///
+ /// The returned pointer points to the inserted value.
+ fn insert_fit(&mut self, key: K, val: V) -> *mut V {
+ debug_assert!(self.node.len() < CAPACITY);
+ let new_len = self.node.len() + 1;
+
+ unsafe {
+ slice_insert(self.node.key_area_mut(..new_len), self.idx, key);
+ slice_insert(self.node.val_area_mut(..new_len), self.idx, val);
+ *self.node.len_mut() = new_len as u16;
+
+ self.node.val_area_mut(self.idx).assume_init_mut()
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
+ /// Inserts a new key-value pair between the key-value pairs to the right and left of
+ /// this edge. This method splits the node if there isn't enough room.
+ ///
+ /// The returned pointer points to the inserted value.
+ fn insert<A: Allocator + Clone>(
+ mut self,
+ key: K,
+ val: V,
+ alloc: A,
+ ) -> (Option<SplitResult<'a, K, V, marker::Leaf>>, *mut V) {
+ if self.node.len() < CAPACITY {
+ let val_ptr = self.insert_fit(key, val);
+ (None, val_ptr)
+ } else {
+ let (middle_kv_idx, insertion) = splitpoint(self.idx);
+ let middle = unsafe { Handle::new_kv(self.node, middle_kv_idx) };
+ let mut result = middle.split(alloc);
+ let mut insertion_edge = match insertion {
+ LeftOrRight::Left(insert_idx) => unsafe {
+ Handle::new_edge(result.left.reborrow_mut(), insert_idx)
+ },
+ LeftOrRight::Right(insert_idx) => unsafe {
+ Handle::new_edge(result.right.borrow_mut(), insert_idx)
+ },
+ };
+ let val_ptr = insertion_edge.insert_fit(key, val);
+ (Some(result), val_ptr)
+ }
+ }
+}
+
+impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::Edge> {
+ /// Fixes the parent pointer and index in the child node that this edge
+ /// links to. This is useful when the ordering of edges has been changed,
+ fn correct_parent_link(self) {
+ // Create backpointer without invalidating other references to the node.
+ let ptr = unsafe { NonNull::new_unchecked(NodeRef::as_internal_ptr(&self.node)) };
+ let idx = self.idx;
+ let mut child = self.descend();
+ child.set_parent_link(ptr, idx);
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::Edge> {
+ /// Inserts a new key-value pair and an edge that will go to the right of that new pair
+ /// between this edge and the key-value pair to the right of this edge. This method assumes
+ /// that there is enough space in the node for the new pair to fit.
+ fn insert_fit(&mut self, key: K, val: V, edge: Root<K, V>) {
+ debug_assert!(self.node.len() < CAPACITY);
+ debug_assert!(edge.height == self.node.height - 1);
+ let new_len = self.node.len() + 1;
+
+ unsafe {
+ slice_insert(self.node.key_area_mut(..new_len), self.idx, key);
+ slice_insert(self.node.val_area_mut(..new_len), self.idx, val);
+ slice_insert(self.node.edge_area_mut(..new_len + 1), self.idx + 1, edge.node);
+ *self.node.len_mut() = new_len as u16;
+
+ self.node.correct_childrens_parent_links(self.idx + 1..new_len + 1);
+ }
+ }
+
+ /// Inserts a new key-value pair and an edge that will go to the right of that new pair
+ /// between this edge and the key-value pair to the right of this edge. This method splits
+ /// the node if there isn't enough room.
+ fn insert<A: Allocator + Clone>(
+ mut self,
+ key: K,
+ val: V,
+ edge: Root<K, V>,
+ alloc: A,
+ ) -> Option<SplitResult<'a, K, V, marker::Internal>> {
+ assert!(edge.height == self.node.height - 1);
+
+ if self.node.len() < CAPACITY {
+ self.insert_fit(key, val, edge);
+ None
+ } else {
+ let (middle_kv_idx, insertion) = splitpoint(self.idx);
+ let middle = unsafe { Handle::new_kv(self.node, middle_kv_idx) };
+ let mut result = middle.split(alloc);
+ let mut insertion_edge = match insertion {
+ LeftOrRight::Left(insert_idx) => unsafe {
+ Handle::new_edge(result.left.reborrow_mut(), insert_idx)
+ },
+ LeftOrRight::Right(insert_idx) => unsafe {
+ Handle::new_edge(result.right.borrow_mut(), insert_idx)
+ },
+ };
+ insertion_edge.insert_fit(key, val, edge);
+ Some(result)
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
+ /// Inserts a new key-value pair between the key-value pairs to the right and left of
+ /// this edge. This method splits the node if there isn't enough room, and tries to
+ /// insert the split off portion into the parent node recursively, until the root is reached.
+ ///
+ /// If the returned result is some `SplitResult`, the `left` field will be the root node.
+ /// The returned pointer points to the inserted value, which in the case of `SplitResult`
+ /// is in the `left` or `right` tree.
+ pub fn insert_recursing<A: Allocator + Clone>(
+ self,
+ key: K,
+ value: V,
+ alloc: A,
+ ) -> (Option<SplitResult<'a, K, V, marker::LeafOrInternal>>, *mut V) {
+ let (mut split, val_ptr) = match self.insert(key, value, alloc.clone()) {
+ (None, val_ptr) => return (None, val_ptr),
+ (Some(split), val_ptr) => (split.forget_node_type(), val_ptr),
+ };
+
+ loop {
+ split = match split.left.ascend() {
+ Ok(parent) => {
+ match parent.insert(split.kv.0, split.kv.1, split.right, alloc.clone()) {
+ None => return (None, val_ptr),
+ Some(split) => split.forget_node_type(),
+ }
+ }
+ Err(root) => return (Some(SplitResult { left: root, ..split }), val_ptr),
+ };
+ }
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V>
+ Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>
+{
+ /// Finds the node pointed to by this edge.
+ ///
+ /// The method name assumes you picture trees with the root node on top.
+ ///
+ /// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
+ /// both, upon success, do nothing.
+ pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ assert!(BorrowType::PERMITS_TRAVERSAL);
+ // We need to use raw pointers to nodes because, if BorrowType is
+ // marker::ValMut, there might be outstanding mutable references to
+ // values that we must not invalidate. There's no worry accessing the
+ // height field because that value is copied. Beware that, once the
+ // node pointer is dereferenced, we access the edges array with a
+ // reference (Rust issue #73987) and invalidate any other references
+ // to or inside the array, should any be around.
+ let parent_ptr = NodeRef::as_internal_ptr(&self.node);
+ let node = unsafe { (*parent_ptr).edges.get_unchecked(self.idx).assume_init_read() };
+ NodeRef { node, height: self.node.height - 1, _marker: PhantomData }
+ }
+}
+
+impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Immut<'a>, K, V, NodeType>, marker::KV> {
+ pub fn into_kv(self) -> (&'a K, &'a V) {
+ debug_assert!(self.idx < self.node.len());
+ let leaf = self.node.into_leaf();
+ let k = unsafe { leaf.keys.get_unchecked(self.idx).assume_init_ref() };
+ let v = unsafe { leaf.vals.get_unchecked(self.idx).assume_init_ref() };
+ (k, v)
+ }
+}
+
+impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
+ pub fn key_mut(&mut self) -> &mut K {
+ unsafe { self.node.key_area_mut(self.idx).assume_init_mut() }
+ }
+
+ pub fn into_val_mut(self) -> &'a mut V {
+ debug_assert!(self.idx < self.node.len());
+ let leaf = self.node.into_leaf_mut();
+ unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() }
+ }
+}
+
+impl<'a, K, V, NodeType> Handle<NodeRef<marker::ValMut<'a>, K, V, NodeType>, marker::KV> {
+ pub fn into_kv_valmut(self) -> (&'a K, &'a mut V) {
+ unsafe { self.node.into_key_val_mut_at(self.idx) }
+ }
+}
+
+impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
+ pub fn kv_mut(&mut self) -> (&mut K, &mut V) {
+ debug_assert!(self.idx < self.node.len());
+ // We cannot call separate key and value methods, because calling the second one
+ // invalidates the reference returned by the first.
+ unsafe {
+ let leaf = self.node.as_leaf_mut();
+ let key = leaf.keys.get_unchecked_mut(self.idx).assume_init_mut();
+ let val = leaf.vals.get_unchecked_mut(self.idx).assume_init_mut();
+ (key, val)
+ }
+ }
+
+ /// Replaces the key and value that the KV handle refers to.
+ pub fn replace_kv(&mut self, k: K, v: V) -> (K, V) {
+ let (key, val) = self.kv_mut();
+ (mem::replace(key, k), mem::replace(val, v))
+ }
+}
+
+impl<K, V, NodeType> Handle<NodeRef<marker::Dying, K, V, NodeType>, marker::KV> {
+ /// Extracts the key and value that the KV handle refers to.
+ /// # Safety
+ /// The node that the handle refers to must not yet have been deallocated.
+ pub unsafe fn into_key_val(mut self) -> (K, V) {
+ debug_assert!(self.idx < self.node.len());
+ let leaf = self.node.as_leaf_dying();
+ unsafe {
+ let key = leaf.keys.get_unchecked_mut(self.idx).assume_init_read();
+ let val = leaf.vals.get_unchecked_mut(self.idx).assume_init_read();
+ (key, val)
+ }
+ }
+
+ /// Drops the key and value that the KV handle refers to.
+ /// # Safety
+ /// The node that the handle refers to must not yet have been deallocated.
+ #[inline]
+ pub unsafe fn drop_key_val(mut self) {
+ debug_assert!(self.idx < self.node.len());
+ let leaf = self.node.as_leaf_dying();
+ unsafe {
+ leaf.keys.get_unchecked_mut(self.idx).assume_init_drop();
+ leaf.vals.get_unchecked_mut(self.idx).assume_init_drop();
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
+ /// Helps implementations of `split` for a particular `NodeType`,
+ /// by taking care of leaf data.
+ fn split_leaf_data(&mut self, new_node: &mut LeafNode<K, V>) -> (K, V) {
+ debug_assert!(self.idx < self.node.len());
+ let old_len = self.node.len();
+ let new_len = old_len - self.idx - 1;
+ new_node.len = new_len as u16;
+ unsafe {
+ let k = self.node.key_area_mut(self.idx).assume_init_read();
+ let v = self.node.val_area_mut(self.idx).assume_init_read();
+
+ move_to_slice(
+ self.node.key_area_mut(self.idx + 1..old_len),
+ &mut new_node.keys[..new_len],
+ );
+ move_to_slice(
+ self.node.val_area_mut(self.idx + 1..old_len),
+ &mut new_node.vals[..new_len],
+ );
+
+ *self.node.len_mut() = self.idx as u16;
+ (k, v)
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> {
+ /// Splits the underlying node into three parts:
+ ///
+ /// - The node is truncated to only contain the key-value pairs to the left of
+ /// this handle.
+ /// - The key and value pointed to by this handle are extracted.
+ /// - All the key-value pairs to the right of this handle are put into a newly
+ /// allocated node.
+ pub fn split<A: Allocator + Clone>(mut self, alloc: A) -> SplitResult<'a, K, V, marker::Leaf> {
+ let mut new_node = LeafNode::new(alloc);
+
+ let kv = self.split_leaf_data(&mut new_node);
+
+ let right = NodeRef::from_new_leaf(new_node);
+ SplitResult { left: self.node, kv, right }
+ }
+
+ /// Removes the key-value pair pointed to by this handle and returns it, along with the edge
+ /// that the key-value pair collapsed into.
+ pub fn remove(
+ mut self,
+ ) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
+ let old_len = self.node.len();
+ unsafe {
+ let k = slice_remove(self.node.key_area_mut(..old_len), self.idx);
+ let v = slice_remove(self.node.val_area_mut(..old_len), self.idx);
+ *self.node.len_mut() = (old_len - 1) as u16;
+ ((k, v), self.left_edge())
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> {
+ /// Splits the underlying node into three parts:
+ ///
+ /// - The node is truncated to only contain the edges and key-value pairs to the
+ /// left of this handle.
+ /// - The key and value pointed to by this handle are extracted.
+ /// - All the edges and key-value pairs to the right of this handle are put into
+ /// a newly allocated node.
+ pub fn split<A: Allocator + Clone>(
+ mut self,
+ alloc: A,
+ ) -> SplitResult<'a, K, V, marker::Internal> {
+ let old_len = self.node.len();
+ unsafe {
+ let mut new_node = InternalNode::new(alloc);
+ let kv = self.split_leaf_data(&mut new_node.data);
+ let new_len = usize::from(new_node.data.len);
+ move_to_slice(
+ self.node.edge_area_mut(self.idx + 1..old_len + 1),
+ &mut new_node.edges[..new_len + 1],
+ );
+
+ let height = self.node.height;
+ let right = NodeRef::from_new_internal(new_node, height);
+
+ SplitResult { left: self.node, kv, right }
+ }
+ }
+}
+
+/// Represents a session for evaluating and performing a balancing operation
+/// around an internal key-value pair.
+pub struct BalancingContext<'a, K, V> {
+ parent: Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV>,
+ left_child: NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>,
+ right_child: NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>,
+}
+
+impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> {
+ pub fn consider_for_balancing(self) -> BalancingContext<'a, K, V> {
+ let self1 = unsafe { ptr::read(&self) };
+ let self2 = unsafe { ptr::read(&self) };
+ BalancingContext {
+ parent: self,
+ left_child: self1.left_edge().descend(),
+ right_child: self2.right_edge().descend(),
+ }
+ }
+}
+
+impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ /// Chooses a balancing context involving the node as a child, thus between
+ /// the KV immediately to the left or to the right in the parent node.
+ /// Returns an `Err` if there is no parent.
+ /// Panics if the parent is empty.
+ ///
+ /// Prefers the left side, to be optimal if the given node is somehow
+ /// underfull, meaning here only that it has fewer elements than its left
+ /// sibling and than its right sibling, if they exist. In that case,
+ /// merging with the left sibling is faster, since we only need to move
+ /// the node's N elements, instead of shifting them to the right and moving
+ /// more than N elements in front. Stealing from the left sibling is also
+ /// typically faster, since we only need to shift the node's N elements to
+ /// the right, instead of shifting at least N of the sibling's elements to
+ /// the left.
+ pub fn choose_parent_kv(self) -> Result<LeftOrRight<BalancingContext<'a, K, V>>, Self> {
+ match unsafe { ptr::read(&self) }.ascend() {
+ Ok(parent_edge) => match parent_edge.left_kv() {
+ Ok(left_parent_kv) => Ok(LeftOrRight::Left(BalancingContext {
+ parent: unsafe { ptr::read(&left_parent_kv) },
+ left_child: left_parent_kv.left_edge().descend(),
+ right_child: self,
+ })),
+ Err(parent_edge) => match parent_edge.right_kv() {
+ Ok(right_parent_kv) => Ok(LeftOrRight::Right(BalancingContext {
+ parent: unsafe { ptr::read(&right_parent_kv) },
+ left_child: self,
+ right_child: right_parent_kv.right_edge().descend(),
+ })),
+ Err(_) => unreachable!("empty internal node"),
+ },
+ },
+ Err(root) => Err(root),
+ }
+ }
+}
+
+impl<'a, K, V> BalancingContext<'a, K, V> {
+ pub fn left_child_len(&self) -> usize {
+ self.left_child.len()
+ }
+
+ pub fn right_child_len(&self) -> usize {
+ self.right_child.len()
+ }
+
+ pub fn into_left_child(self) -> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ self.left_child
+ }
+
+ pub fn into_right_child(self) -> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ self.right_child
+ }
+
+ /// Returns whether merging is possible, i.e., whether there is enough room
+ /// in a node to combine the central KV with both adjacent child nodes.
+ pub fn can_merge(&self) -> bool {
+ self.left_child.len() + 1 + self.right_child.len() <= CAPACITY
+ }
+}
+
+impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> {
+ /// Performs a merge and lets a closure decide what to return.
+ fn do_merge<
+ F: FnOnce(
+ NodeRef<marker::Mut<'a>, K, V, marker::Internal>,
+ NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>,
+ ) -> R,
+ R,
+ A: Allocator,
+ >(
+ self,
+ result: F,
+ alloc: A,
+ ) -> R {
+ let Handle { node: mut parent_node, idx: parent_idx, _marker } = self.parent;
+ let old_parent_len = parent_node.len();
+ let mut left_node = self.left_child;
+ let old_left_len = left_node.len();
+ let mut right_node = self.right_child;
+ let right_len = right_node.len();
+ let new_left_len = old_left_len + 1 + right_len;
+
+ assert!(new_left_len <= CAPACITY);
+
+ unsafe {
+ *left_node.len_mut() = new_left_len as u16;
+
+ let parent_key = slice_remove(parent_node.key_area_mut(..old_parent_len), parent_idx);
+ left_node.key_area_mut(old_left_len).write(parent_key);
+ move_to_slice(
+ right_node.key_area_mut(..right_len),
+ left_node.key_area_mut(old_left_len + 1..new_left_len),
+ );
+
+ let parent_val = slice_remove(parent_node.val_area_mut(..old_parent_len), parent_idx);
+ left_node.val_area_mut(old_left_len).write(parent_val);
+ move_to_slice(
+ right_node.val_area_mut(..right_len),
+ left_node.val_area_mut(old_left_len + 1..new_left_len),
+ );
+
+ slice_remove(&mut parent_node.edge_area_mut(..old_parent_len + 1), parent_idx + 1);
+ parent_node.correct_childrens_parent_links(parent_idx + 1..old_parent_len);
+ *parent_node.len_mut() -= 1;
+
+ if parent_node.height > 1 {
+ // SAFETY: the height of the nodes being merged is one below the height
+ // of the node of this edge, thus above zero, so they are internal.
+ let mut left_node = left_node.reborrow_mut().cast_to_internal_unchecked();
+ let mut right_node = right_node.cast_to_internal_unchecked();
+ move_to_slice(
+ right_node.edge_area_mut(..right_len + 1),
+ left_node.edge_area_mut(old_left_len + 1..new_left_len + 1),
+ );
+
+ left_node.correct_childrens_parent_links(old_left_len + 1..new_left_len + 1);
+
+ alloc.deallocate(right_node.node.cast(), Layout::new::<InternalNode<K, V>>());
+ } else {
+ alloc.deallocate(right_node.node.cast(), Layout::new::<LeafNode<K, V>>());
+ }
+ }
+ result(parent_node, left_node)
+ }
+
+ /// Merges the parent's key-value pair and both adjacent child nodes into
+ /// the left child node and returns the shrunk parent node.
+ ///
+ /// Panics unless we `.can_merge()`.
+ pub fn merge_tracking_parent<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
+ self.do_merge(|parent, _child| parent, alloc)
+ }
+
+ /// Merges the parent's key-value pair and both adjacent child nodes into
+ /// the left child node and returns that child node.
+ ///
+ /// Panics unless we `.can_merge()`.
+ pub fn merge_tracking_child<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ self.do_merge(|_parent, child| child, alloc)
+ }
+
+ /// Merges the parent's key-value pair and both adjacent child nodes into
+ /// the left child node and returns the edge handle in that child node
+ /// where the tracked child edge ended up,
+ ///
+ /// Panics unless we `.can_merge()`.
+ pub fn merge_tracking_child_edge<A: Allocator + Clone>(
+ self,
+ track_edge_idx: LeftOrRight<usize>,
+ alloc: A,
+ ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> {
+ let old_left_len = self.left_child.len();
+ let right_len = self.right_child.len();
+ assert!(match track_edge_idx {
+ LeftOrRight::Left(idx) => idx <= old_left_len,
+ LeftOrRight::Right(idx) => idx <= right_len,
+ });
+ let child = self.merge_tracking_child(alloc);
+ let new_idx = match track_edge_idx {
+ LeftOrRight::Left(idx) => idx,
+ LeftOrRight::Right(idx) => old_left_len + 1 + idx,
+ };
+ unsafe { Handle::new_edge(child, new_idx) }
+ }
+
+ /// Removes a key-value pair from the left child and places it in the key-value storage
+ /// of the parent, while pushing the old parent key-value pair into the right child.
+ /// Returns a handle to the edge in the right child corresponding to where the original
+ /// edge specified by `track_right_edge_idx` ended up.
+ pub fn steal_left(
+ mut self,
+ track_right_edge_idx: usize,
+ ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> {
+ self.bulk_steal_left(1);
+ unsafe { Handle::new_edge(self.right_child, 1 + track_right_edge_idx) }
+ }
+
+ /// Removes a key-value pair from the right child and places it in the key-value storage
+ /// of the parent, while pushing the old parent key-value pair onto the left child.
+ /// Returns a handle to the edge in the left child specified by `track_left_edge_idx`,
+ /// which didn't move.
+ pub fn steal_right(
+ mut self,
+ track_left_edge_idx: usize,
+ ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> {
+ self.bulk_steal_right(1);
+ unsafe { Handle::new_edge(self.left_child, track_left_edge_idx) }
+ }
+
+ /// This does stealing similar to `steal_left` but steals multiple elements at once.
+ pub fn bulk_steal_left(&mut self, count: usize) {
+ assert!(count > 0);
+ unsafe {
+ let left_node = &mut self.left_child;
+ let old_left_len = left_node.len();
+ let right_node = &mut self.right_child;
+ let old_right_len = right_node.len();
+
+ // Make sure that we may steal safely.
+ assert!(old_right_len + count <= CAPACITY);
+ assert!(old_left_len >= count);
+
+ let new_left_len = old_left_len - count;
+ let new_right_len = old_right_len + count;
+ *left_node.len_mut() = new_left_len as u16;
+ *right_node.len_mut() = new_right_len as u16;
+
+ // Move leaf data.
+ {
+ // Make room for stolen elements in the right child.
+ slice_shr(right_node.key_area_mut(..new_right_len), count);
+ slice_shr(right_node.val_area_mut(..new_right_len), count);
+
+ // Move elements from the left child to the right one.
+ move_to_slice(
+ left_node.key_area_mut(new_left_len + 1..old_left_len),
+ right_node.key_area_mut(..count - 1),
+ );
+ move_to_slice(
+ left_node.val_area_mut(new_left_len + 1..old_left_len),
+ right_node.val_area_mut(..count - 1),
+ );
+
+ // Move the left-most stolen pair to the parent.
+ let k = left_node.key_area_mut(new_left_len).assume_init_read();
+ let v = left_node.val_area_mut(new_left_len).assume_init_read();
+ let (k, v) = self.parent.replace_kv(k, v);
+
+ // Move parent's key-value pair to the right child.
+ right_node.key_area_mut(count - 1).write(k);
+ right_node.val_area_mut(count - 1).write(v);
+ }
+
+ match (left_node.reborrow_mut().force(), right_node.reborrow_mut().force()) {
+ (ForceResult::Internal(mut left), ForceResult::Internal(mut right)) => {
+ // Make room for stolen edges.
+ slice_shr(right.edge_area_mut(..new_right_len + 1), count);
+
+ // Steal edges.
+ move_to_slice(
+ left.edge_area_mut(new_left_len + 1..old_left_len + 1),
+ right.edge_area_mut(..count),
+ );
+
+ right.correct_childrens_parent_links(0..new_right_len + 1);
+ }
+ (ForceResult::Leaf(_), ForceResult::Leaf(_)) => {}
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ /// The symmetric clone of `bulk_steal_left`.
+ pub fn bulk_steal_right(&mut self, count: usize) {
+ assert!(count > 0);
+ unsafe {
+ let left_node = &mut self.left_child;
+ let old_left_len = left_node.len();
+ let right_node = &mut self.right_child;
+ let old_right_len = right_node.len();
+
+ // Make sure that we may steal safely.
+ assert!(old_left_len + count <= CAPACITY);
+ assert!(old_right_len >= count);
+
+ let new_left_len = old_left_len + count;
+ let new_right_len = old_right_len - count;
+ *left_node.len_mut() = new_left_len as u16;
+ *right_node.len_mut() = new_right_len as u16;
+
+ // Move leaf data.
+ {
+ // Move the right-most stolen pair to the parent.
+ let k = right_node.key_area_mut(count - 1).assume_init_read();
+ let v = right_node.val_area_mut(count - 1).assume_init_read();
+ let (k, v) = self.parent.replace_kv(k, v);
+
+ // Move parent's key-value pair to the left child.
+ left_node.key_area_mut(old_left_len).write(k);
+ left_node.val_area_mut(old_left_len).write(v);
+
+ // Move elements from the right child to the left one.
+ move_to_slice(
+ right_node.key_area_mut(..count - 1),
+ left_node.key_area_mut(old_left_len + 1..new_left_len),
+ );
+ move_to_slice(
+ right_node.val_area_mut(..count - 1),
+ left_node.val_area_mut(old_left_len + 1..new_left_len),
+ );
+
+ // Fill gap where stolen elements used to be.
+ slice_shl(right_node.key_area_mut(..old_right_len), count);
+ slice_shl(right_node.val_area_mut(..old_right_len), count);
+ }
+
+ match (left_node.reborrow_mut().force(), right_node.reborrow_mut().force()) {
+ (ForceResult::Internal(mut left), ForceResult::Internal(mut right)) => {
+ // Steal edges.
+ move_to_slice(
+ right.edge_area_mut(..count),
+ left.edge_area_mut(old_left_len + 1..new_left_len + 1),
+ );
+
+ // Fill gap where stolen edges used to be.
+ slice_shl(right.edge_area_mut(..old_right_len + 1), count);
+
+ left.correct_childrens_parent_links(old_left_len + 1..new_left_len + 1);
+ right.correct_childrens_parent_links(0..new_right_len + 1);
+ }
+ (ForceResult::Leaf(_), ForceResult::Leaf(_)) => {}
+ _ => unreachable!(),
+ }
+ }
+ }
+}
+
+impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
+ pub fn forget_node_type(
+ self,
+ ) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::Edge> {
+ unsafe { Handle::new_edge(self.node.forget_type(), self.idx) }
+ }
+}
+
+impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge> {
+ pub fn forget_node_type(
+ self,
+ ) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::Edge> {
+ unsafe { Handle::new_edge(self.node.forget_type(), self.idx) }
+ }
+}
+
+impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::KV> {
+ pub fn forget_node_type(
+ self,
+ ) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV> {
+ unsafe { Handle::new_kv(self.node.forget_type(), self.idx) }
+ }
+}
+
+impl<BorrowType, K, V, Type> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, Type> {
+ /// Checks whether the underlying node is an `Internal` node or a `Leaf` node.
+ pub fn force(
+ self,
+ ) -> ForceResult<
+ Handle<NodeRef<BorrowType, K, V, marker::Leaf>, Type>,
+ Handle<NodeRef<BorrowType, K, V, marker::Internal>, Type>,
+ > {
+ match self.node.force() {
+ ForceResult::Leaf(node) => {
+ ForceResult::Leaf(Handle { node, idx: self.idx, _marker: PhantomData })
+ }
+ ForceResult::Internal(node) => {
+ ForceResult::Internal(Handle { node, idx: self.idx, _marker: PhantomData })
+ }
+ }
+ }
+}
+
+impl<'a, K, V, Type> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, Type> {
+ /// Unsafely asserts to the compiler the static information that the handle's node is a `Leaf`.
+ pub unsafe fn cast_to_leaf_unchecked(
+ self,
+ ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, Type> {
+ let node = unsafe { self.node.cast_to_leaf_unchecked() };
+ Handle { node, idx: self.idx, _marker: PhantomData }
+ }
+}
+
+impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> {
+ /// Move the suffix after `self` from one node to another one. `right` must be empty.
+ /// The first edge of `right` remains unchanged.
+ pub fn move_suffix(
+ &mut self,
+ right: &mut NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>,
+ ) {
+ unsafe {
+ let new_left_len = self.idx;
+ let mut left_node = self.reborrow_mut().into_node();
+ let old_left_len = left_node.len();
+
+ let new_right_len = old_left_len - new_left_len;
+ let mut right_node = right.reborrow_mut();
+
+ assert!(right_node.len() == 0);
+ assert!(left_node.height == right_node.height);
+
+ if new_right_len > 0 {
+ *left_node.len_mut() = new_left_len as u16;
+ *right_node.len_mut() = new_right_len as u16;
+
+ move_to_slice(
+ left_node.key_area_mut(new_left_len..old_left_len),
+ right_node.key_area_mut(..new_right_len),
+ );
+ move_to_slice(
+ left_node.val_area_mut(new_left_len..old_left_len),
+ right_node.val_area_mut(..new_right_len),
+ );
+ match (left_node.force(), right_node.force()) {
+ (ForceResult::Internal(mut left), ForceResult::Internal(mut right)) => {
+ move_to_slice(
+ left.edge_area_mut(new_left_len + 1..old_left_len + 1),
+ right.edge_area_mut(1..new_right_len + 1),
+ );
+ right.correct_childrens_parent_links(1..new_right_len + 1);
+ }
+ (ForceResult::Leaf(_), ForceResult::Leaf(_)) => {}
+ _ => unreachable!(),
+ }
+ }
+ }
+ }
+}
+
+pub enum ForceResult<Leaf, Internal> {
+ Leaf(Leaf),
+ Internal(Internal),
+}
+
+/// Result of insertion, when a node needed to expand beyond its capacity.
+pub struct SplitResult<'a, K, V, NodeType> {
+ // Altered node in existing tree with elements and edges that belong to the left of `kv`.
+ pub left: NodeRef<marker::Mut<'a>, K, V, NodeType>,
+ // Some key and value that existed before and were split off, to be inserted elsewhere.
+ pub kv: (K, V),
+ // Owned, unattached, new node with elements and edges that belong to the right of `kv`.
+ pub right: NodeRef<marker::Owned, K, V, NodeType>,
+}
+
+impl<'a, K, V> SplitResult<'a, K, V, marker::Leaf> {
+ pub fn forget_node_type(self) -> SplitResult<'a, K, V, marker::LeafOrInternal> {
+ SplitResult { left: self.left.forget_type(), kv: self.kv, right: self.right.forget_type() }
+ }
+}
+
+impl<'a, K, V> SplitResult<'a, K, V, marker::Internal> {
+ pub fn forget_node_type(self) -> SplitResult<'a, K, V, marker::LeafOrInternal> {
+ SplitResult { left: self.left.forget_type(), kv: self.kv, right: self.right.forget_type() }
+ }
+}
+
+pub mod marker {
+ use core::marker::PhantomData;
+
+ pub enum Leaf {}
+ pub enum Internal {}
+ pub enum LeafOrInternal {}
+
+ pub enum Owned {}
+ pub enum Dying {}
+ pub struct Immut<'a>(PhantomData<&'a ()>);
+ pub struct Mut<'a>(PhantomData<&'a mut ()>);
+ pub struct ValMut<'a>(PhantomData<&'a mut ()>);
+
+ pub trait BorrowType {
+ // Whether node references of this borrow type allow traversing
+ // to other nodes in the tree.
+ const PERMITS_TRAVERSAL: bool = true;
+ }
+ impl BorrowType for Owned {
+ // Traversal isn't needed, it happens using the result of `borrow_mut`.
+ // By disabling traversal, and only creating new references to roots,
+ // we know that every reference of the `Owned` type is to a root node.
+ const PERMITS_TRAVERSAL: bool = false;
+ }
+ impl BorrowType for Dying {}
+ impl<'a> BorrowType for Immut<'a> {}
+ impl<'a> BorrowType for Mut<'a> {}
+ impl<'a> BorrowType for ValMut<'a> {}
+
+ pub enum KV {}
+ pub enum Edge {}
+}
+
+/// Inserts a value into a slice of initialized elements followed by one uninitialized element.
+///
+/// # Safety
+/// The slice has more than `idx` elements.
+unsafe fn slice_insert<T>(slice: &mut [MaybeUninit<T>], idx: usize, val: T) {
+ unsafe {
+ let len = slice.len();
+ debug_assert!(len > idx);
+ let slice_ptr = slice.as_mut_ptr();
+ if len > idx + 1 {
+ ptr::copy(slice_ptr.add(idx), slice_ptr.add(idx + 1), len - idx - 1);
+ }
+ (*slice_ptr.add(idx)).write(val);
+ }
+}
+
+/// Removes and returns a value from a slice of all initialized elements, leaving behind one
+/// trailing uninitialized element.
+///
+/// # Safety
+/// The slice has more than `idx` elements.
+unsafe fn slice_remove<T>(slice: &mut [MaybeUninit<T>], idx: usize) -> T {
+ unsafe {
+ let len = slice.len();
+ debug_assert!(idx < len);
+ let slice_ptr = slice.as_mut_ptr();
+ let ret = (*slice_ptr.add(idx)).assume_init_read();
+ ptr::copy(slice_ptr.add(idx + 1), slice_ptr.add(idx), len - idx - 1);
+ ret
+ }
+}
+
+/// Shifts the elements in a slice `distance` positions to the left.
+///
+/// # Safety
+/// The slice has at least `distance` elements.
+unsafe fn slice_shl<T>(slice: &mut [MaybeUninit<T>], distance: usize) {
+ unsafe {
+ let slice_ptr = slice.as_mut_ptr();
+ ptr::copy(slice_ptr.add(distance), slice_ptr, slice.len() - distance);
+ }
+}
+
+/// Shifts the elements in a slice `distance` positions to the right.
+///
+/// # Safety
+/// The slice has at least `distance` elements.
+unsafe fn slice_shr<T>(slice: &mut [MaybeUninit<T>], distance: usize) {
+ unsafe {
+ let slice_ptr = slice.as_mut_ptr();
+ ptr::copy(slice_ptr, slice_ptr.add(distance), slice.len() - distance);
+ }
+}
+
+/// Moves all values from a slice of initialized elements to a slice
+/// of uninitialized elements, leaving behind `src` as all uninitialized.
+/// Works like `dst.copy_from_slice(src)` but does not require `T` to be `Copy`.
+fn move_to_slice<T>(src: &mut [MaybeUninit<T>], dst: &mut [MaybeUninit<T>]) {
+ assert!(src.len() == dst.len());
+ unsafe {
+ ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len());
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/alloc/src/collections/btree/node/tests.rs b/library/alloc/src/collections/btree/node/tests.rs
new file mode 100644
index 000000000..aadb0dc9c
--- /dev/null
+++ b/library/alloc/src/collections/btree/node/tests.rs
@@ -0,0 +1,102 @@
+use super::super::navigate;
+use super::*;
+use crate::alloc::Global;
+use crate::fmt::Debug;
+use crate::string::String;
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> {
+ // Asserts that the back pointer in each reachable node points to its parent.
+ pub fn assert_back_pointers(self) {
+ if let ForceResult::Internal(node) = self.force() {
+ for idx in 0..=node.len() {
+ let edge = unsafe { Handle::new_edge(node, idx) };
+ let child = edge.descend();
+ assert!(child.ascend().ok() == Some(edge));
+ child.assert_back_pointers();
+ }
+ }
+ }
+
+ // Renders a multi-line display of the keys in order and in tree hierarchy,
+ // picturing the tree growing sideways from its root on the left to its
+ // leaves on the right.
+ pub fn dump_keys(self) -> String
+ where
+ K: Debug,
+ {
+ let mut result = String::new();
+ self.visit_nodes_in_order(|pos| match pos {
+ navigate::Position::Leaf(leaf) => {
+ let depth = self.height();
+ let indent = " ".repeat(depth);
+ result += &format!("\n{}{:?}", indent, leaf.keys());
+ }
+ navigate::Position::Internal(_) => {}
+ navigate::Position::InternalKV(kv) => {
+ let depth = self.height() - kv.into_node().height();
+ let indent = " ".repeat(depth);
+ result += &format!("\n{}{:?}", indent, kv.into_kv().0);
+ }
+ });
+ result
+ }
+}
+
+#[test]
+fn test_splitpoint() {
+ for idx in 0..=CAPACITY {
+ let (middle_kv_idx, insertion) = splitpoint(idx);
+
+ // Simulate performing the split:
+ let mut left_len = middle_kv_idx;
+ let mut right_len = CAPACITY - middle_kv_idx - 1;
+ match insertion {
+ LeftOrRight::Left(edge_idx) => {
+ assert!(edge_idx <= left_len);
+ left_len += 1;
+ }
+ LeftOrRight::Right(edge_idx) => {
+ assert!(edge_idx <= right_len);
+ right_len += 1;
+ }
+ }
+ assert!(left_len >= MIN_LEN_AFTER_SPLIT);
+ assert!(right_len >= MIN_LEN_AFTER_SPLIT);
+ assert!(left_len + right_len == CAPACITY);
+ }
+}
+
+#[test]
+fn test_partial_eq() {
+ let mut root1 = NodeRef::new_leaf(Global);
+ root1.borrow_mut().push(1, ());
+ let mut root1 = NodeRef::new_internal(root1.forget_type(), Global).forget_type();
+ let root2 = Root::new(Global);
+ root1.reborrow().assert_back_pointers();
+ root2.reborrow().assert_back_pointers();
+
+ let leaf_edge_1a = root1.reborrow().first_leaf_edge().forget_node_type();
+ let leaf_edge_1b = root1.reborrow().last_leaf_edge().forget_node_type();
+ let top_edge_1 = root1.reborrow().first_edge();
+ let top_edge_2 = root2.reborrow().first_edge();
+
+ assert!(leaf_edge_1a == leaf_edge_1a);
+ assert!(leaf_edge_1a != leaf_edge_1b);
+ assert!(leaf_edge_1a != top_edge_1);
+ assert!(leaf_edge_1a != top_edge_2);
+ assert!(top_edge_1 == top_edge_1);
+ assert!(top_edge_1 != top_edge_2);
+
+ root1.pop_internal_level(Global);
+ unsafe { root1.into_dying().deallocate_and_ascend(Global) };
+ unsafe { root2.into_dying().deallocate_and_ascend(Global) };
+}
+
+#[test]
+#[cfg(target_arch = "x86_64")]
+fn test_sizes() {
+ assert_eq!(core::mem::size_of::<LeafNode<(), ()>>(), 16);
+ assert_eq!(core::mem::size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8);
+ assert_eq!(core::mem::size_of::<InternalNode<(), ()>>(), 16 + (CAPACITY + 1) * 8);
+ assert_eq!(core::mem::size_of::<InternalNode<i64, i64>>(), 16 + (CAPACITY * 3 + 1) * 8);
+}
diff --git a/library/alloc/src/collections/btree/remove.rs b/library/alloc/src/collections/btree/remove.rs
new file mode 100644
index 000000000..090429925
--- /dev/null
+++ b/library/alloc/src/collections/btree/remove.rs
@@ -0,0 +1,95 @@
+use super::map::MIN_LEN;
+use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef};
+use core::alloc::Allocator;
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV> {
+ /// Removes a key-value pair from the tree, and returns that pair, as well as
+ /// the leaf edge corresponding to that former pair. It's possible this empties
+ /// a root node that is internal, which the caller should pop from the map
+ /// holding the tree. The caller should also decrement the map's length.
+ pub fn remove_kv_tracking<F: FnOnce(), A: Allocator + Clone>(
+ self,
+ handle_emptied_internal_root: F,
+ alloc: A,
+ ) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
+ match self.force() {
+ Leaf(node) => node.remove_leaf_kv(handle_emptied_internal_root, alloc),
+ Internal(node) => node.remove_internal_kv(handle_emptied_internal_root, alloc),
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> {
+ fn remove_leaf_kv<F: FnOnce(), A: Allocator + Clone>(
+ self,
+ handle_emptied_internal_root: F,
+ alloc: A,
+ ) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
+ let (old_kv, mut pos) = self.remove();
+ let len = pos.reborrow().into_node().len();
+ if len < MIN_LEN {
+ let idx = pos.idx();
+ // We have to temporarily forget the child type, because there is no
+ // distinct node type for the immediate parents of a leaf.
+ let new_pos = match pos.into_node().forget_type().choose_parent_kv() {
+ Ok(Left(left_parent_kv)) => {
+ debug_assert!(left_parent_kv.right_child_len() == MIN_LEN - 1);
+ if left_parent_kv.can_merge() {
+ left_parent_kv.merge_tracking_child_edge(Right(idx), alloc.clone())
+ } else {
+ debug_assert!(left_parent_kv.left_child_len() > MIN_LEN);
+ left_parent_kv.steal_left(idx)
+ }
+ }
+ Ok(Right(right_parent_kv)) => {
+ debug_assert!(right_parent_kv.left_child_len() == MIN_LEN - 1);
+ if right_parent_kv.can_merge() {
+ right_parent_kv.merge_tracking_child_edge(Left(idx), alloc.clone())
+ } else {
+ debug_assert!(right_parent_kv.right_child_len() > MIN_LEN);
+ right_parent_kv.steal_right(idx)
+ }
+ }
+ Err(pos) => unsafe { Handle::new_edge(pos, idx) },
+ };
+ // SAFETY: `new_pos` is the leaf we started from or a sibling.
+ pos = unsafe { new_pos.cast_to_leaf_unchecked() };
+
+ // Only if we merged, the parent (if any) has shrunk, but skipping
+ // the following step otherwise does not pay off in benchmarks.
+ //
+ // SAFETY: We won't destroy or rearrange the leaf where `pos` is at
+ // by handling its parent recursively; at worst we will destroy or
+ // rearrange the parent through the grandparent, thus change the
+ // link to the parent inside the leaf.
+ if let Ok(parent) = unsafe { pos.reborrow_mut() }.into_node().ascend() {
+ if !parent.into_node().forget_type().fix_node_and_affected_ancestors(alloc) {
+ handle_emptied_internal_root();
+ }
+ }
+ }
+ (old_kv, pos)
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> {
+ fn remove_internal_kv<F: FnOnce(), A: Allocator + Clone>(
+ self,
+ handle_emptied_internal_root: F,
+ alloc: A,
+ ) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
+ // Remove an adjacent KV from its leaf and then put it back in place of
+ // the element we were asked to remove. Prefer the left adjacent KV,
+ // for the reasons listed in `choose_parent_kv`.
+ let left_leaf_kv = self.left_edge().descend().last_leaf_edge().left_kv();
+ let left_leaf_kv = unsafe { left_leaf_kv.ok().unwrap_unchecked() };
+ let (left_kv, left_hole) = left_leaf_kv.remove_leaf_kv(handle_emptied_internal_root, alloc);
+
+ // The internal node may have been stolen from or merged. Go back right
+ // to find where the original KV ended up.
+ let mut internal = unsafe { left_hole.next_kv().ok().unwrap_unchecked() };
+ let old_kv = internal.replace_kv(left_kv.0, left_kv.1);
+ let pos = internal.next_leaf_edge();
+ (old_kv, pos)
+ }
+}
diff --git a/library/alloc/src/collections/btree/search.rs b/library/alloc/src/collections/btree/search.rs
new file mode 100644
index 000000000..ad3522b4e
--- /dev/null
+++ b/library/alloc/src/collections/btree/search.rs
@@ -0,0 +1,285 @@
+use core::borrow::Borrow;
+use core::cmp::Ordering;
+use core::ops::{Bound, RangeBounds};
+
+use super::node::{marker, ForceResult::*, Handle, NodeRef};
+
+use SearchBound::*;
+use SearchResult::*;
+
+pub enum SearchBound<T> {
+ /// An inclusive bound to look for, just like `Bound::Included(T)`.
+ Included(T),
+ /// An exclusive bound to look for, just like `Bound::Excluded(T)`.
+ Excluded(T),
+ /// An unconditional inclusive bound, just like `Bound::Unbounded`.
+ AllIncluded,
+ /// An unconditional exclusive bound.
+ AllExcluded,
+}
+
+impl<T> SearchBound<T> {
+ pub fn from_range(range_bound: Bound<T>) -> Self {
+ match range_bound {
+ Bound::Included(t) => Included(t),
+ Bound::Excluded(t) => Excluded(t),
+ Bound::Unbounded => AllIncluded,
+ }
+ }
+}
+
+pub enum SearchResult<BorrowType, K, V, FoundType, GoDownType> {
+ Found(Handle<NodeRef<BorrowType, K, V, FoundType>, marker::KV>),
+ GoDown(Handle<NodeRef<BorrowType, K, V, GoDownType>, marker::Edge>),
+}
+
+pub enum IndexResult {
+ KV(usize),
+ Edge(usize),
+}
+
+impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ /// Looks up a given key in a (sub)tree headed by the node, recursively.
+ /// Returns a `Found` with the handle of the matching KV, if any. Otherwise,
+ /// returns a `GoDown` with the handle of the leaf edge where the key belongs.
+ ///
+ /// The result is meaningful only if the tree is ordered by key, like the tree
+ /// in a `BTreeMap` is.
+ pub fn search_tree<Q: ?Sized>(
+ mut self,
+ key: &Q,
+ ) -> SearchResult<BorrowType, K, V, marker::LeafOrInternal, marker::Leaf>
+ where
+ Q: Ord,
+ K: Borrow<Q>,
+ {
+ loop {
+ self = match self.search_node(key) {
+ Found(handle) => return Found(handle),
+ GoDown(handle) => match handle.force() {
+ Leaf(leaf) => return GoDown(leaf),
+ Internal(internal) => internal.descend(),
+ },
+ }
+ }
+ }
+
+ /// Descends to the nearest node where the edge matching the lower bound
+ /// of the range is different from the edge matching the upper bound, i.e.,
+ /// the nearest node that has at least one key contained in the range.
+ ///
+ /// If found, returns an `Ok` with that node, the strictly ascending pair of
+ /// edge indices in the node delimiting the range, and the corresponding
+ /// pair of bounds for continuing the search in the child nodes, in case
+ /// the node is internal.
+ ///
+ /// If not found, returns an `Err` with the leaf edge matching the entire
+ /// range.
+ ///
+ /// As a diagnostic service, panics if the range specifies impossible bounds.
+ ///
+ /// The result is meaningful only if the tree is ordered by key.
+ pub fn search_tree_for_bifurcation<'r, Q: ?Sized, R>(
+ mut self,
+ range: &'r R,
+ ) -> Result<
+ (
+ NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
+ usize,
+ usize,
+ SearchBound<&'r Q>,
+ SearchBound<&'r Q>,
+ ),
+ Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>,
+ >
+ where
+ Q: Ord,
+ K: Borrow<Q>,
+ R: RangeBounds<Q>,
+ {
+ // Determine if map or set is being searched
+ let is_set = <V as super::set_val::IsSetVal>::is_set_val();
+
+ // Inlining these variables should be avoided. We assume the bounds reported by `range`
+ // remain the same, but an adversarial implementation could change between calls (#81138).
+ let (start, end) = (range.start_bound(), range.end_bound());
+ match (start, end) {
+ (Bound::Excluded(s), Bound::Excluded(e)) if s == e => {
+ if is_set {
+ panic!("range start and end are equal and excluded in BTreeSet")
+ } else {
+ panic!("range start and end are equal and excluded in BTreeMap")
+ }
+ }
+ (Bound::Included(s) | Bound::Excluded(s), Bound::Included(e) | Bound::Excluded(e))
+ if s > e =>
+ {
+ if is_set {
+ panic!("range start is greater than range end in BTreeSet")
+ } else {
+ panic!("range start is greater than range end in BTreeMap")
+ }
+ }
+ _ => {}
+ }
+ let mut lower_bound = SearchBound::from_range(start);
+ let mut upper_bound = SearchBound::from_range(end);
+ loop {
+ let (lower_edge_idx, lower_child_bound) = self.find_lower_bound_index(lower_bound);
+ let (upper_edge_idx, upper_child_bound) =
+ unsafe { self.find_upper_bound_index(upper_bound, lower_edge_idx) };
+ if lower_edge_idx < upper_edge_idx {
+ return Ok((
+ self,
+ lower_edge_idx,
+ upper_edge_idx,
+ lower_child_bound,
+ upper_child_bound,
+ ));
+ }
+ debug_assert_eq!(lower_edge_idx, upper_edge_idx);
+ let common_edge = unsafe { Handle::new_edge(self, lower_edge_idx) };
+ match common_edge.force() {
+ Leaf(common_edge) => return Err(common_edge),
+ Internal(common_edge) => {
+ self = common_edge.descend();
+ lower_bound = lower_child_bound;
+ upper_bound = upper_child_bound;
+ }
+ }
+ }
+ }
+
+ /// Finds an edge in the node delimiting the lower bound of a range.
+ /// Also returns the lower bound to be used for continuing the search in
+ /// the matching child node, if `self` is an internal node.
+ ///
+ /// The result is meaningful only if the tree is ordered by key.
+ pub fn find_lower_bound_edge<'r, Q>(
+ self,
+ bound: SearchBound<&'r Q>,
+ ) -> (Handle<Self, marker::Edge>, SearchBound<&'r Q>)
+ where
+ Q: ?Sized + Ord,
+ K: Borrow<Q>,
+ {
+ let (edge_idx, bound) = self.find_lower_bound_index(bound);
+ let edge = unsafe { Handle::new_edge(self, edge_idx) };
+ (edge, bound)
+ }
+
+ /// Clone of `find_lower_bound_edge` for the upper bound.
+ pub fn find_upper_bound_edge<'r, Q>(
+ self,
+ bound: SearchBound<&'r Q>,
+ ) -> (Handle<Self, marker::Edge>, SearchBound<&'r Q>)
+ where
+ Q: ?Sized + Ord,
+ K: Borrow<Q>,
+ {
+ let (edge_idx, bound) = unsafe { self.find_upper_bound_index(bound, 0) };
+ let edge = unsafe { Handle::new_edge(self, edge_idx) };
+ (edge, bound)
+ }
+}
+
+impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
+ /// Looks up a given key in the node, without recursion.
+ /// Returns a `Found` with the handle of the matching KV, if any. Otherwise,
+ /// returns a `GoDown` with the handle of the edge where the key might be found
+ /// (if the node is internal) or where the key can be inserted.
+ ///
+ /// The result is meaningful only if the tree is ordered by key, like the tree
+ /// in a `BTreeMap` is.
+ pub fn search_node<Q: ?Sized>(self, key: &Q) -> SearchResult<BorrowType, K, V, Type, Type>
+ where
+ Q: Ord,
+ K: Borrow<Q>,
+ {
+ match unsafe { self.find_key_index(key, 0) } {
+ IndexResult::KV(idx) => Found(unsafe { Handle::new_kv(self, idx) }),
+ IndexResult::Edge(idx) => GoDown(unsafe { Handle::new_edge(self, idx) }),
+ }
+ }
+
+ /// Returns either the KV index in the node at which the key (or an equivalent)
+ /// exists, or the edge index where the key belongs, starting from a particular index.
+ ///
+ /// The result is meaningful only if the tree is ordered by key, like the tree
+ /// in a `BTreeMap` is.
+ ///
+ /// # Safety
+ /// `start_index` must be a valid edge index for the node.
+ unsafe fn find_key_index<Q: ?Sized>(&self, key: &Q, start_index: usize) -> IndexResult
+ where
+ Q: Ord,
+ K: Borrow<Q>,
+ {
+ let node = self.reborrow();
+ let keys = node.keys();
+ debug_assert!(start_index <= keys.len());
+ for (offset, k) in unsafe { keys.get_unchecked(start_index..) }.iter().enumerate() {
+ match key.cmp(k.borrow()) {
+ Ordering::Greater => {}
+ Ordering::Equal => return IndexResult::KV(start_index + offset),
+ Ordering::Less => return IndexResult::Edge(start_index + offset),
+ }
+ }
+ IndexResult::Edge(keys.len())
+ }
+
+ /// Finds an edge index in the node delimiting the lower bound of a range.
+ /// Also returns the lower bound to be used for continuing the search in
+ /// the matching child node, if `self` is an internal node.
+ ///
+ /// The result is meaningful only if the tree is ordered by key.
+ fn find_lower_bound_index<'r, Q>(
+ &self,
+ bound: SearchBound<&'r Q>,
+ ) -> (usize, SearchBound<&'r Q>)
+ where
+ Q: ?Sized + Ord,
+ K: Borrow<Q>,
+ {
+ match bound {
+ Included(key) => match unsafe { self.find_key_index(key, 0) } {
+ IndexResult::KV(idx) => (idx, AllExcluded),
+ IndexResult::Edge(idx) => (idx, bound),
+ },
+ Excluded(key) => match unsafe { self.find_key_index(key, 0) } {
+ IndexResult::KV(idx) => (idx + 1, AllIncluded),
+ IndexResult::Edge(idx) => (idx, bound),
+ },
+ AllIncluded => (0, AllIncluded),
+ AllExcluded => (self.len(), AllExcluded),
+ }
+ }
+
+ /// Mirror image of `find_lower_bound_index` for the upper bound,
+ /// with an additional parameter to skip part of the key array.
+ ///
+ /// # Safety
+ /// `start_index` must be a valid edge index for the node.
+ unsafe fn find_upper_bound_index<'r, Q>(
+ &self,
+ bound: SearchBound<&'r Q>,
+ start_index: usize,
+ ) -> (usize, SearchBound<&'r Q>)
+ where
+ Q: ?Sized + Ord,
+ K: Borrow<Q>,
+ {
+ match bound {
+ Included(key) => match unsafe { self.find_key_index(key, start_index) } {
+ IndexResult::KV(idx) => (idx + 1, AllExcluded),
+ IndexResult::Edge(idx) => (idx, bound),
+ },
+ Excluded(key) => match unsafe { self.find_key_index(key, start_index) } {
+ IndexResult::KV(idx) => (idx, AllIncluded),
+ IndexResult::Edge(idx) => (idx, bound),
+ },
+ AllIncluded => (self.len(), AllIncluded),
+ AllExcluded => (start_index, AllExcluded),
+ }
+ }
+}
diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs
new file mode 100644
index 000000000..2cfc08074
--- /dev/null
+++ b/library/alloc/src/collections/btree/set.rs
@@ -0,0 +1,1789 @@
+// This is pretty much entirely stolen from TreeSet, since BTreeMap has an identical interface
+// to TreeMap
+
+use crate::vec::Vec;
+use core::borrow::Borrow;
+use core::cmp::Ordering::{self, Equal, Greater, Less};
+use core::cmp::{max, min};
+use core::fmt::{self, Debug};
+use core::hash::{Hash, Hasher};
+use core::iter::{FromIterator, FusedIterator, Peekable};
+use core::mem::ManuallyDrop;
+use core::ops::{BitAnd, BitOr, BitXor, RangeBounds, Sub};
+
+use super::map::{BTreeMap, Keys};
+use super::merge_iter::MergeIterInner;
+use super::set_val::SetValZST;
+use super::Recover;
+
+use crate::alloc::{Allocator, Global};
+
+// FIXME(conventions): implement bounded iterators
+
+/// An ordered set based on a B-Tree.
+///
+/// See [`BTreeMap`]'s documentation for a detailed discussion of this collection's performance
+/// benefits and drawbacks.
+///
+/// It is a logic error for an item to be modified in such a way that the item's ordering relative
+/// to any other item, as determined by the [`Ord`] trait, changes while it is in the set. This is
+/// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
+/// The behavior resulting from such a logic error is not specified, but will be encapsulated to the
+/// `BTreeSet` that observed the logic error and not result in undefined behavior. This could
+/// include panics, incorrect results, aborts, memory leaks, and non-termination.
+///
+/// Iterators returned by [`BTreeSet::iter`] produce their items in order, and take worst-case
+/// logarithmic and amortized constant time per item returned.
+///
+/// [`Ord`]: core::cmp::Ord
+/// [`Cell`]: core::cell::Cell
+/// [`RefCell`]: core::cell::RefCell
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::BTreeSet;
+///
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `BTreeSet<&str>` in this example).
+/// let mut books = BTreeSet::new();
+///
+/// // Add some books.
+/// books.insert("A Dance With Dragons");
+/// books.insert("To Kill a Mockingbird");
+/// books.insert("The Odyssey");
+/// books.insert("The Great Gatsby");
+///
+/// // Check for a specific one.
+/// if !books.contains("The Winds of Winter") {
+/// println!("We have {} books, but The Winds of Winter ain't one.",
+/// books.len());
+/// }
+///
+/// // Remove a book.
+/// books.remove("The Odyssey");
+///
+/// // Iterate over everything.
+/// for book in &books {
+/// println!("{book}");
+/// }
+/// ```
+///
+/// A `BTreeSet` with a known list of items can be initialized from an array:
+///
+/// ```
+/// use std::collections::BTreeSet;
+///
+/// let set = BTreeSet::from([1, 2, 3]);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "BTreeSet")]
+pub struct BTreeSet<
+ T,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ map: BTreeMap<T, SetValZST, A>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Hash, A: Allocator + Clone> Hash for BTreeSet<T, A> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.map.hash(state)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialEq, A: Allocator + Clone> PartialEq for BTreeSet<T, A> {
+ fn eq(&self, other: &BTreeSet<T, A>) -> bool {
+ self.map.eq(&other.map)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq, A: Allocator + Clone> Eq for BTreeSet<T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd, A: Allocator + Clone> PartialOrd for BTreeSet<T, A> {
+ fn partial_cmp(&self, other: &BTreeSet<T, A>) -> Option<Ordering> {
+ self.map.partial_cmp(&other.map)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord, A: Allocator + Clone> Ord for BTreeSet<T, A> {
+ fn cmp(&self, other: &BTreeSet<T, A>) -> Ordering {
+ self.map.cmp(&other.map)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for BTreeSet<T, A> {
+ fn clone(&self) -> Self {
+ BTreeSet { map: self.map.clone() }
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ self.map.clone_from(&other.map);
+ }
+}
+
+/// An iterator over the items of a `BTreeSet`.
+///
+/// This `struct` is created by the [`iter`] method on [`BTreeSet`].
+/// See its documentation for more.
+///
+/// [`iter`]: BTreeSet::iter
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ iter: Keys<'a, T, SetValZST>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Iter").field(&self.iter.clone()).finish()
+ }
+}
+
+/// An owning iterator over the items of a `BTreeSet`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`BTreeSet`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: BTreeSet#method.into_iter
+/// [`IntoIterator`]: core::iter::IntoIterator
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct IntoIter<
+ T,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ iter: super::map::IntoIter<T, SetValZST, A>,
+}
+
+/// An iterator over a sub-range of items in a `BTreeSet`.
+///
+/// This `struct` is created by the [`range`] method on [`BTreeSet`].
+/// See its documentation for more.
+///
+/// [`range`]: BTreeSet::range
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[derive(Debug)]
+#[stable(feature = "btree_range", since = "1.17.0")]
+pub struct Range<'a, T: 'a> {
+ iter: super::map::Range<'a, T, SetValZST>,
+}
+
+/// A lazy iterator producing elements in the difference of `BTreeSet`s.
+///
+/// This `struct` is created by the [`difference`] method on [`BTreeSet`].
+/// See its documentation for more.
+///
+/// [`difference`]: BTreeSet::difference
+#[must_use = "this returns the difference as an iterator, \
+ without modifying either input set"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Difference<
+ 'a,
+ T: 'a,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ inner: DifferenceInner<'a, T, A>,
+}
+enum DifferenceInner<'a, T: 'a, A: Allocator + Clone> {
+ Stitch {
+ // iterate all of `self` and some of `other`, spotting matches along the way
+ self_iter: Iter<'a, T>,
+ other_iter: Peekable<Iter<'a, T>>,
+ },
+ Search {
+ // iterate `self`, look up in `other`
+ self_iter: Iter<'a, T>,
+ other_set: &'a BTreeSet<T, A>,
+ },
+ Iterate(Iter<'a, T>), // simply produce all elements in `self`
+}
+
+// Explicit Debug impl necessary because of issue #26925
+impl<T: Debug, A: Allocator + Clone> Debug for DifferenceInner<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DifferenceInner::Stitch { self_iter, other_iter } => f
+ .debug_struct("Stitch")
+ .field("self_iter", self_iter)
+ .field("other_iter", other_iter)
+ .finish(),
+ DifferenceInner::Search { self_iter, other_set } => f
+ .debug_struct("Search")
+ .field("self_iter", self_iter)
+ .field("other_iter", other_set)
+ .finish(),
+ DifferenceInner::Iterate(x) => f.debug_tuple("Iterate").field(x).finish(),
+ }
+ }
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug, A: Allocator + Clone> fmt::Debug for Difference<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Difference").field(&self.inner).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the symmetric difference of `BTreeSet`s.
+///
+/// This `struct` is created by the [`symmetric_difference`] method on
+/// [`BTreeSet`]. See its documentation for more.
+///
+/// [`symmetric_difference`]: BTreeSet::symmetric_difference
+#[must_use = "this returns the difference as an iterator, \
+ without modifying either input set"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct SymmetricDifference<'a, T: 'a>(MergeIterInner<Iter<'a, T>>);
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for SymmetricDifference<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("SymmetricDifference").field(&self.0).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the intersection of `BTreeSet`s.
+///
+/// This `struct` is created by the [`intersection`] method on [`BTreeSet`].
+/// See its documentation for more.
+///
+/// [`intersection`]: BTreeSet::intersection
+#[must_use = "this returns the intersection as an iterator, \
+ without modifying either input set"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Intersection<
+ 'a,
+ T: 'a,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ inner: IntersectionInner<'a, T, A>,
+}
+enum IntersectionInner<'a, T: 'a, A: Allocator + Clone> {
+ Stitch {
+ // iterate similarly sized sets jointly, spotting matches along the way
+ a: Iter<'a, T>,
+ b: Iter<'a, T>,
+ },
+ Search {
+ // iterate a small set, look up in the large set
+ small_iter: Iter<'a, T>,
+ large_set: &'a BTreeSet<T, A>,
+ },
+ Answer(Option<&'a T>), // return a specific element or emptiness
+}
+
+// Explicit Debug impl necessary because of issue #26925
+impl<T: Debug, A: Allocator + Clone> Debug for IntersectionInner<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ IntersectionInner::Stitch { a, b } => {
+ f.debug_struct("Stitch").field("a", a).field("b", b).finish()
+ }
+ IntersectionInner::Search { small_iter, large_set } => f
+ .debug_struct("Search")
+ .field("small_iter", small_iter)
+ .field("large_set", large_set)
+ .finish(),
+ IntersectionInner::Answer(x) => f.debug_tuple("Answer").field(x).finish(),
+ }
+ }
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: Debug, A: Allocator + Clone> Debug for Intersection<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Intersection").field(&self.inner).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the union of `BTreeSet`s.
+///
+/// This `struct` is created by the [`union`] method on [`BTreeSet`].
+/// See its documentation for more.
+///
+/// [`union`]: BTreeSet::union
+#[must_use = "this returns the union as an iterator, \
+ without modifying either input set"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Union<'a, T: 'a>(MergeIterInner<Iter<'a, T>>);
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Union<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Union").field(&self.0).finish()
+ }
+}
+
+// This constant is used by functions that compare two sets.
+// It estimates the relative size at which searching performs better
+// than iterating, based on the benchmarks in
+// https://github.com/ssomers/rust_bench_btreeset_intersection.
+// It's used to divide rather than multiply sizes, to rule out overflow,
+// and it's a power of two to make that division cheap.
+const ITER_PERFORMANCE_TIPPING_SIZE_DIFF: usize = 16;
+
+impl<T> BTreeSet<T> {
+ /// Makes a new, empty `BTreeSet`.
+ ///
+ /// Does not allocate anything on its own.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(unused_mut)]
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set: BTreeSet<i32> = BTreeSet::new();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[must_use]
+ pub const fn new() -> BTreeSet<T> {
+ BTreeSet { map: BTreeMap::new() }
+ }
+}
+
+impl<T, A: Allocator + Clone> BTreeSet<T, A> {
+ /// Makes a new `BTreeSet` with a reasonable choice of B.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(unused_mut)]
+ /// # #![feature(allocator_api)]
+ /// # #![feature(btreemap_alloc)]
+ /// use std::collections::BTreeSet;
+ /// use std::alloc::Global;
+ ///
+ /// let mut set: BTreeSet<i32> = BTreeSet::new_in(Global);
+ /// ```
+ #[unstable(feature = "btreemap_alloc", issue = "32838")]
+ pub fn new_in(alloc: A) -> BTreeSet<T, A> {
+ BTreeSet { map: BTreeMap::new_in(alloc) }
+ }
+
+ /// Constructs a double-ended iterator over a sub-range of elements in the set.
+ /// The simplest way is to use the range syntax `min..max`, thus `range(min..max)` will
+ /// yield elements from min (inclusive) to max (exclusive).
+ /// The range may also be entered as `(Bound<T>, Bound<T>)`, so for example
+ /// `range((Excluded(4), Included(10)))` will yield a left-exclusive, right-inclusive
+ /// range from 4 to 10.
+ ///
+ /// # Panics
+ ///
+ /// Panics if range `start > end`.
+ /// Panics if range `start == end` and both bounds are `Excluded`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ /// use std::ops::Bound::Included;
+ ///
+ /// let mut set = BTreeSet::new();
+ /// set.insert(3);
+ /// set.insert(5);
+ /// set.insert(8);
+ /// for &elem in set.range((Included(&4), Included(&8))) {
+ /// println!("{elem}");
+ /// }
+ /// assert_eq!(Some(&5), set.range(4..).next());
+ /// ```
+ #[stable(feature = "btree_range", since = "1.17.0")]
+ pub fn range<K: ?Sized, R>(&self, range: R) -> Range<'_, T>
+ where
+ K: Ord,
+ T: Borrow<K> + Ord,
+ R: RangeBounds<K>,
+ {
+ Range { iter: self.map.range(range) }
+ }
+
+ /// Visits the elements representing the difference,
+ /// i.e., the elements that are in `self` but not in `other`,
+ /// in ascending order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut a = BTreeSet::new();
+ /// a.insert(1);
+ /// a.insert(2);
+ ///
+ /// let mut b = BTreeSet::new();
+ /// b.insert(2);
+ /// b.insert(3);
+ ///
+ /// let diff: Vec<_> = a.difference(&b).cloned().collect();
+ /// assert_eq!(diff, [1]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn difference<'a>(&'a self, other: &'a BTreeSet<T, A>) -> Difference<'a, T, A>
+ where
+ T: Ord,
+ {
+ let (self_min, self_max) =
+ if let (Some(self_min), Some(self_max)) = (self.first(), self.last()) {
+ (self_min, self_max)
+ } else {
+ return Difference { inner: DifferenceInner::Iterate(self.iter()) };
+ };
+ let (other_min, other_max) =
+ if let (Some(other_min), Some(other_max)) = (other.first(), other.last()) {
+ (other_min, other_max)
+ } else {
+ return Difference { inner: DifferenceInner::Iterate(self.iter()) };
+ };
+ Difference {
+ inner: match (self_min.cmp(other_max), self_max.cmp(other_min)) {
+ (Greater, _) | (_, Less) => DifferenceInner::Iterate(self.iter()),
+ (Equal, _) => {
+ let mut self_iter = self.iter();
+ self_iter.next();
+ DifferenceInner::Iterate(self_iter)
+ }
+ (_, Equal) => {
+ let mut self_iter = self.iter();
+ self_iter.next_back();
+ DifferenceInner::Iterate(self_iter)
+ }
+ _ if self.len() <= other.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF => {
+ DifferenceInner::Search { self_iter: self.iter(), other_set: other }
+ }
+ _ => DifferenceInner::Stitch {
+ self_iter: self.iter(),
+ other_iter: other.iter().peekable(),
+ },
+ },
+ }
+ }
+
+ /// Visits the elements representing the symmetric difference,
+ /// i.e., the elements that are in `self` or in `other` but not in both,
+ /// in ascending order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut a = BTreeSet::new();
+ /// a.insert(1);
+ /// a.insert(2);
+ ///
+ /// let mut b = BTreeSet::new();
+ /// b.insert(2);
+ /// b.insert(3);
+ ///
+ /// let sym_diff: Vec<_> = a.symmetric_difference(&b).cloned().collect();
+ /// assert_eq!(sym_diff, [1, 3]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn symmetric_difference<'a>(
+ &'a self,
+ other: &'a BTreeSet<T, A>,
+ ) -> SymmetricDifference<'a, T>
+ where
+ T: Ord,
+ {
+ SymmetricDifference(MergeIterInner::new(self.iter(), other.iter()))
+ }
+
+ /// Visits the elements representing the intersection,
+ /// i.e., the elements that are both in `self` and `other`,
+ /// in ascending order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut a = BTreeSet::new();
+ /// a.insert(1);
+ /// a.insert(2);
+ ///
+ /// let mut b = BTreeSet::new();
+ /// b.insert(2);
+ /// b.insert(3);
+ ///
+ /// let intersection: Vec<_> = a.intersection(&b).cloned().collect();
+ /// assert_eq!(intersection, [2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn intersection<'a>(&'a self, other: &'a BTreeSet<T, A>) -> Intersection<'a, T, A>
+ where
+ T: Ord,
+ {
+ let (self_min, self_max) =
+ if let (Some(self_min), Some(self_max)) = (self.first(), self.last()) {
+ (self_min, self_max)
+ } else {
+ return Intersection { inner: IntersectionInner::Answer(None) };
+ };
+ let (other_min, other_max) =
+ if let (Some(other_min), Some(other_max)) = (other.first(), other.last()) {
+ (other_min, other_max)
+ } else {
+ return Intersection { inner: IntersectionInner::Answer(None) };
+ };
+ Intersection {
+ inner: match (self_min.cmp(other_max), self_max.cmp(other_min)) {
+ (Greater, _) | (_, Less) => IntersectionInner::Answer(None),
+ (Equal, _) => IntersectionInner::Answer(Some(self_min)),
+ (_, Equal) => IntersectionInner::Answer(Some(self_max)),
+ _ if self.len() <= other.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF => {
+ IntersectionInner::Search { small_iter: self.iter(), large_set: other }
+ }
+ _ if other.len() <= self.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF => {
+ IntersectionInner::Search { small_iter: other.iter(), large_set: self }
+ }
+ _ => IntersectionInner::Stitch { a: self.iter(), b: other.iter() },
+ },
+ }
+ }
+
+ /// Visits the elements representing the union,
+ /// i.e., all the elements in `self` or `other`, without duplicates,
+ /// in ascending order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut a = BTreeSet::new();
+ /// a.insert(1);
+ ///
+ /// let mut b = BTreeSet::new();
+ /// b.insert(2);
+ ///
+ /// let union: Vec<_> = a.union(&b).cloned().collect();
+ /// assert_eq!(union, [1, 2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn union<'a>(&'a self, other: &'a BTreeSet<T, A>) -> Union<'a, T>
+ where
+ T: Ord,
+ {
+ Union(MergeIterInner::new(self.iter(), other.iter()))
+ }
+
+ /// Clears the set, removing all elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut v = BTreeSet::new();
+ /// v.insert(1);
+ /// v.clear();
+ /// assert!(v.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self)
+ where
+ A: Clone,
+ {
+ self.map.clear()
+ }
+
+ /// Returns `true` if the set contains an element equal to the value.
+ ///
+ /// The value may be any borrowed form of the set's element type,
+ /// but the ordering on the borrowed form *must* match the
+ /// ordering on the element type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let set = BTreeSet::from([1, 2, 3]);
+ /// assert_eq!(set.contains(&1), true);
+ /// assert_eq!(set.contains(&4), false);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn contains<Q: ?Sized>(&self, value: &Q) -> bool
+ where
+ T: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ self.map.contains_key(value)
+ }
+
+ /// Returns a reference to the element in the set, if any, that is equal to
+ /// the value.
+ ///
+ /// The value may be any borrowed form of the set's element type,
+ /// but the ordering on the borrowed form *must* match the
+ /// ordering on the element type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let set = BTreeSet::from([1, 2, 3]);
+ /// assert_eq!(set.get(&2), Some(&2));
+ /// assert_eq!(set.get(&4), None);
+ /// ```
+ #[stable(feature = "set_recovery", since = "1.9.0")]
+ pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T>
+ where
+ T: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ Recover::get(&self.map, value)
+ }
+
+ /// Returns `true` if `self` has no elements in common with `other`.
+ /// This is equivalent to checking for an empty intersection.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let a = BTreeSet::from([1, 2, 3]);
+ /// let mut b = BTreeSet::new();
+ ///
+ /// assert_eq!(a.is_disjoint(&b), true);
+ /// b.insert(4);
+ /// assert_eq!(a.is_disjoint(&b), true);
+ /// b.insert(1);
+ /// assert_eq!(a.is_disjoint(&b), false);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_disjoint(&self, other: &BTreeSet<T, A>) -> bool
+ where
+ T: Ord,
+ {
+ self.intersection(other).next().is_none()
+ }
+
+ /// Returns `true` if the set is a subset of another,
+ /// i.e., `other` contains at least all the elements in `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let sup = BTreeSet::from([1, 2, 3]);
+ /// let mut set = BTreeSet::new();
+ ///
+ /// assert_eq!(set.is_subset(&sup), true);
+ /// set.insert(2);
+ /// assert_eq!(set.is_subset(&sup), true);
+ /// set.insert(4);
+ /// assert_eq!(set.is_subset(&sup), false);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_subset(&self, other: &BTreeSet<T, A>) -> bool
+ where
+ T: Ord,
+ {
+ // Same result as self.difference(other).next().is_none()
+ // but the code below is faster (hugely in some cases).
+ if self.len() > other.len() {
+ return false;
+ }
+ let (self_min, self_max) =
+ if let (Some(self_min), Some(self_max)) = (self.first(), self.last()) {
+ (self_min, self_max)
+ } else {
+ return true; // self is empty
+ };
+ let (other_min, other_max) =
+ if let (Some(other_min), Some(other_max)) = (other.first(), other.last()) {
+ (other_min, other_max)
+ } else {
+ return false; // other is empty
+ };
+ let mut self_iter = self.iter();
+ match self_min.cmp(other_min) {
+ Less => return false,
+ Equal => {
+ self_iter.next();
+ }
+ Greater => (),
+ }
+ match self_max.cmp(other_max) {
+ Greater => return false,
+ Equal => {
+ self_iter.next_back();
+ }
+ Less => (),
+ }
+ if self_iter.len() <= other.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF {
+ for next in self_iter {
+ if !other.contains(next) {
+ return false;
+ }
+ }
+ } else {
+ let mut other_iter = other.iter();
+ other_iter.next();
+ other_iter.next_back();
+ let mut self_next = self_iter.next();
+ while let Some(self1) = self_next {
+ match other_iter.next().map_or(Less, |other1| self1.cmp(other1)) {
+ Less => return false,
+ Equal => self_next = self_iter.next(),
+ Greater => (),
+ }
+ }
+ }
+ true
+ }
+
+ /// Returns `true` if the set is a superset of another,
+ /// i.e., `self` contains at least all the elements in `other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let sub = BTreeSet::from([1, 2]);
+ /// let mut set = BTreeSet::new();
+ ///
+ /// assert_eq!(set.is_superset(&sub), false);
+ ///
+ /// set.insert(0);
+ /// set.insert(1);
+ /// assert_eq!(set.is_superset(&sub), false);
+ ///
+ /// set.insert(2);
+ /// assert_eq!(set.is_superset(&sub), true);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_superset(&self, other: &BTreeSet<T, A>) -> bool
+ where
+ T: Ord,
+ {
+ other.is_subset(self)
+ }
+
+ /// Returns a reference to the first element in the set, if any.
+ /// This element is always the minimum of all elements in the set.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ /// assert_eq!(set.first(), None);
+ /// set.insert(1);
+ /// assert_eq!(set.first(), Some(&1));
+ /// set.insert(2);
+ /// assert_eq!(set.first(), Some(&1));
+ /// ```
+ #[must_use]
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn first(&self) -> Option<&T>
+ where
+ T: Ord,
+ {
+ self.map.first_key_value().map(|(k, _)| k)
+ }
+
+ /// Returns a reference to the last element in the set, if any.
+ /// This element is always the maximum of all elements in the set.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ /// assert_eq!(set.last(), None);
+ /// set.insert(1);
+ /// assert_eq!(set.last(), Some(&1));
+ /// set.insert(2);
+ /// assert_eq!(set.last(), Some(&2));
+ /// ```
+ #[must_use]
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn last(&self) -> Option<&T>
+ where
+ T: Ord,
+ {
+ self.map.last_key_value().map(|(k, _)| k)
+ }
+
+ /// Removes the first element from the set and returns it, if any.
+ /// The first element is always the minimum element in the set.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ ///
+ /// set.insert(1);
+ /// while let Some(n) = set.pop_first() {
+ /// assert_eq!(n, 1);
+ /// }
+ /// assert!(set.is_empty());
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn pop_first(&mut self) -> Option<T>
+ where
+ T: Ord,
+ {
+ self.map.pop_first().map(|kv| kv.0)
+ }
+
+ /// Removes the last element from the set and returns it, if any.
+ /// The last element is always the maximum element in the set.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ ///
+ /// set.insert(1);
+ /// while let Some(n) = set.pop_last() {
+ /// assert_eq!(n, 1);
+ /// }
+ /// assert!(set.is_empty());
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn pop_last(&mut self) -> Option<T>
+ where
+ T: Ord,
+ {
+ self.map.pop_last().map(|kv| kv.0)
+ }
+
+ /// Adds a value to the set.
+ ///
+ /// Returns whether the value was newly inserted. That is:
+ ///
+ /// - If the set did not previously contain an equal value, `true` is
+ /// returned.
+ /// - If the set already contained an equal value, `false` is returned, and
+ /// the entry is not updated.
+ ///
+ /// See the [module-level documentation] for more.
+ ///
+ /// [module-level documentation]: index.html#insert-and-complex-keys
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ ///
+ /// assert_eq!(set.insert(2), true);
+ /// assert_eq!(set.insert(2), false);
+ /// assert_eq!(set.len(), 1);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, value: T) -> bool
+ where
+ T: Ord,
+ {
+ self.map.insert(value, SetValZST::default()).is_none()
+ }
+
+ /// Adds a value to the set, replacing the existing element, if any, that is
+ /// equal to the value. Returns the replaced element.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ /// set.insert(Vec::<i32>::new());
+ ///
+ /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 0);
+ /// set.replace(Vec::with_capacity(10));
+ /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 10);
+ /// ```
+ #[stable(feature = "set_recovery", since = "1.9.0")]
+ pub fn replace(&mut self, value: T) -> Option<T>
+ where
+ T: Ord,
+ {
+ Recover::replace(&mut self.map, value)
+ }
+
+ /// If the set contains an element equal to the value, removes it from the
+ /// set and drops it. Returns whether such an element was present.
+ ///
+ /// The value may be any borrowed form of the set's element type,
+ /// but the ordering on the borrowed form *must* match the
+ /// ordering on the element type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ ///
+ /// set.insert(2);
+ /// assert_eq!(set.remove(&2), true);
+ /// assert_eq!(set.remove(&2), false);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove<Q: ?Sized>(&mut self, value: &Q) -> bool
+ where
+ T: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ self.map.remove(value).is_some()
+ }
+
+ /// Removes and returns the element in the set, if any, that is equal to
+ /// the value.
+ ///
+ /// The value may be any borrowed form of the set's element type,
+ /// but the ordering on the borrowed form *must* match the
+ /// ordering on the element type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::from([1, 2, 3]);
+ /// assert_eq!(set.take(&2), Some(2));
+ /// assert_eq!(set.take(&2), None);
+ /// ```
+ #[stable(feature = "set_recovery", since = "1.9.0")]
+ pub fn take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
+ where
+ T: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ Recover::take(&mut self.map, value)
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns `false`.
+ /// The elements are visited in ascending order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::from([1, 2, 3, 4, 5, 6]);
+ /// // Keep only the even numbers.
+ /// set.retain(|&k| k % 2 == 0);
+ /// assert!(set.iter().eq([2, 4, 6].iter()));
+ /// ```
+ #[stable(feature = "btree_retain", since = "1.53.0")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ T: Ord,
+ F: FnMut(&T) -> bool,
+ {
+ self.drain_filter(|v| !f(v));
+ }
+
+ /// Moves all elements from `other` into `self`, leaving `other` empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut a = BTreeSet::new();
+ /// a.insert(1);
+ /// a.insert(2);
+ /// a.insert(3);
+ ///
+ /// let mut b = BTreeSet::new();
+ /// b.insert(3);
+ /// b.insert(4);
+ /// b.insert(5);
+ ///
+ /// a.append(&mut b);
+ ///
+ /// assert_eq!(a.len(), 5);
+ /// assert_eq!(b.len(), 0);
+ ///
+ /// assert!(a.contains(&1));
+ /// assert!(a.contains(&2));
+ /// assert!(a.contains(&3));
+ /// assert!(a.contains(&4));
+ /// assert!(a.contains(&5));
+ /// ```
+ #[stable(feature = "btree_append", since = "1.11.0")]
+ pub fn append(&mut self, other: &mut Self)
+ where
+ T: Ord,
+ A: Clone,
+ {
+ self.map.append(&mut other.map);
+ }
+
+ /// Splits the collection into two at the value. Returns a new collection
+ /// with all elements greater than or equal to the value.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut a = BTreeSet::new();
+ /// a.insert(1);
+ /// a.insert(2);
+ /// a.insert(3);
+ /// a.insert(17);
+ /// a.insert(41);
+ ///
+ /// let b = a.split_off(&3);
+ ///
+ /// assert_eq!(a.len(), 2);
+ /// assert_eq!(b.len(), 3);
+ ///
+ /// assert!(a.contains(&1));
+ /// assert!(a.contains(&2));
+ ///
+ /// assert!(b.contains(&3));
+ /// assert!(b.contains(&17));
+ /// assert!(b.contains(&41));
+ /// ```
+ #[stable(feature = "btree_split_off", since = "1.11.0")]
+ pub fn split_off<Q: ?Sized + Ord>(&mut self, value: &Q) -> Self
+ where
+ T: Borrow<Q> + Ord,
+ A: Clone,
+ {
+ BTreeSet { map: self.map.split_off(value) }
+ }
+
+ /// Creates an iterator that visits all elements in ascending order and
+ /// uses a closure to determine if an element should be removed.
+ ///
+ /// If the closure returns `true`, the element is removed from the set and
+ /// yielded. If the closure returns `false`, or panics, the element remains
+ /// in the set and will not be yielded.
+ ///
+ /// If the iterator is only partially consumed or not consumed at all, each
+ /// of the remaining elements is still subjected to the closure and removed
+ /// and dropped if it returns `true`.
+ ///
+ /// It is unspecified how many more elements will be subjected to the
+ /// closure if a panic occurs in the closure, or if a panic occurs while
+ /// dropping an element, or if the `DrainFilter` itself is leaked.
+ ///
+ /// # Examples
+ ///
+ /// Splitting a set into even and odd values, reusing the original set:
+ ///
+ /// ```
+ /// #![feature(btree_drain_filter)]
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set: BTreeSet<i32> = (0..8).collect();
+ /// let evens: BTreeSet<_> = set.drain_filter(|v| v % 2 == 0).collect();
+ /// let odds = set;
+ /// assert_eq!(evens.into_iter().collect::<Vec<_>>(), vec![0, 2, 4, 6]);
+ /// assert_eq!(odds.into_iter().collect::<Vec<_>>(), vec![1, 3, 5, 7]);
+ /// ```
+ #[unstable(feature = "btree_drain_filter", issue = "70530")]
+ pub fn drain_filter<'a, F>(&'a mut self, pred: F) -> DrainFilter<'a, T, F, A>
+ where
+ T: Ord,
+ F: 'a + FnMut(&T) -> bool,
+ {
+ let (inner, alloc) = self.map.drain_filter_inner();
+ DrainFilter { pred, inner, alloc }
+ }
+
+ /// Gets an iterator that visits the elements in the `BTreeSet` in ascending
+ /// order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let set = BTreeSet::from([1, 2, 3]);
+ /// let mut set_iter = set.iter();
+ /// assert_eq!(set_iter.next(), Some(&1));
+ /// assert_eq!(set_iter.next(), Some(&2));
+ /// assert_eq!(set_iter.next(), Some(&3));
+ /// assert_eq!(set_iter.next(), None);
+ /// ```
+ ///
+ /// Values returned by the iterator are returned in ascending order:
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let set = BTreeSet::from([3, 1, 2]);
+ /// let mut set_iter = set.iter();
+ /// assert_eq!(set_iter.next(), Some(&1));
+ /// assert_eq!(set_iter.next(), Some(&2));
+ /// assert_eq!(set_iter.next(), Some(&3));
+ /// assert_eq!(set_iter.next(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter { iter: self.map.keys() }
+ }
+
+ /// Returns the number of elements in the set.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut v = BTreeSet::new();
+ /// assert_eq!(v.len(), 0);
+ /// v.insert(1);
+ /// assert_eq!(v.len(), 1);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ pub const fn len(&self) -> usize {
+ self.map.len()
+ }
+
+ /// Returns `true` if the set contains no elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut v = BTreeSet::new();
+ /// assert!(v.is_empty());
+ /// v.insert(1);
+ /// assert!(!v.is_empty());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ pub const fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> FromIterator<T> for BTreeSet<T> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BTreeSet<T> {
+ let mut inputs: Vec<_> = iter.into_iter().collect();
+
+ if inputs.is_empty() {
+ return BTreeSet::new();
+ }
+
+ // use stable sort to preserve the insertion order.
+ inputs.sort();
+ BTreeSet::from_sorted_iter(inputs.into_iter(), Global)
+ }
+}
+
+impl<T: Ord, A: Allocator + Clone> BTreeSet<T, A> {
+ fn from_sorted_iter<I: Iterator<Item = T>>(iter: I, alloc: A) -> BTreeSet<T, A> {
+ let iter = iter.map(|k| (k, SetValZST::default()));
+ let map = BTreeMap::bulk_build_from_sorted_iter(iter, alloc);
+ BTreeSet { map }
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+impl<T: Ord, const N: usize> From<[T; N]> for BTreeSet<T> {
+ /// Converts a `[T; N]` into a `BTreeSet<T>`.
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let set1 = BTreeSet::from([1, 2, 3, 4]);
+ /// let set2: BTreeSet<_> = [1, 2, 3, 4].into();
+ /// assert_eq!(set1, set2);
+ /// ```
+ fn from(mut arr: [T; N]) -> Self {
+ if N == 0 {
+ return BTreeSet::new();
+ }
+
+ // use stable sort to preserve the insertion order.
+ arr.sort();
+ let iter = IntoIterator::into_iter(arr).map(|k| (k, SetValZST::default()));
+ let map = BTreeMap::bulk_build_from_sorted_iter(iter, Global);
+ BTreeSet { map }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator + Clone> IntoIterator for BTreeSet<T, A> {
+ type Item = T;
+ type IntoIter = IntoIter<T, A>;
+
+ /// Gets an iterator for moving out the `BTreeSet`'s contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let set = BTreeSet::from([1, 2, 3, 4]);
+ ///
+ /// let v: Vec<_> = set.into_iter().collect();
+ /// assert_eq!(v, [1, 2, 3, 4]);
+ /// ```
+ fn into_iter(self) -> IntoIter<T, A> {
+ IntoIter { iter: self.map.into_iter() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, A: Allocator + Clone> IntoIterator for &'a BTreeSet<T, A> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+/// An iterator produced by calling `drain_filter` on BTreeSet.
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+pub struct DrainFilter<
+ 'a,
+ T,
+ F,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> where
+ T: 'a,
+ F: 'a + FnMut(&T) -> bool,
+{
+ pred: F,
+ inner: super::map::DrainFilterInner<'a, T, SetValZST>,
+ /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
+ alloc: A,
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<T, F, A: Allocator + Clone> Drop for DrainFilter<'_, T, F, A>
+where
+ F: FnMut(&T) -> bool,
+{
+ fn drop(&mut self) {
+ self.for_each(drop);
+ }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<T, F, A: Allocator + Clone> fmt::Debug for DrainFilter<'_, T, F, A>
+where
+ T: fmt::Debug,
+ F: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("DrainFilter").field(&self.inner.peek().map(|(k, _)| k)).finish()
+ }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, T, F, A: Allocator + Clone> Iterator for DrainFilter<'_, T, F, A>
+where
+ F: 'a + FnMut(&T) -> bool,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ let pred = &mut self.pred;
+ let mut mapped_pred = |k: &T, _v: &mut SetValZST| pred(k);
+ self.inner.next(&mut mapped_pred, self.alloc.clone()).map(|(k, _)| k)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<T, F, A: Allocator + Clone> FusedIterator for DrainFilter<'_, T, F, A> where
+ F: FnMut(&T) -> bool
+{
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord, A: Allocator + Clone> Extend<T> for BTreeSet<T, A> {
+ #[inline]
+ fn extend<Iter: IntoIterator<Item = T>>(&mut self, iter: Iter) {
+ iter.into_iter().for_each(move |elem| {
+ self.insert(elem);
+ });
+ }
+
+ #[inline]
+ fn extend_one(&mut self, elem: T) {
+ self.insert(elem);
+ }
+}
+
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: 'a + Ord + Copy, A: Allocator + Clone> Extend<&'a T> for BTreeSet<T, A> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &elem: &'a T) {
+ self.insert(elem);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Default for BTreeSet<T> {
+ /// Creates an empty `BTreeSet`.
+ fn default() -> BTreeSet<T> {
+ BTreeSet::new()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord + Clone, A: Allocator + Clone> Sub<&BTreeSet<T, A>> for &BTreeSet<T, A> {
+ type Output = BTreeSet<T, A>;
+
+ /// Returns the difference of `self` and `rhs` as a new `BTreeSet<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let a = BTreeSet::from([1, 2, 3]);
+ /// let b = BTreeSet::from([3, 4, 5]);
+ ///
+ /// let result = &a - &b;
+ /// assert_eq!(result, BTreeSet::from([1, 2]));
+ /// ```
+ fn sub(self, rhs: &BTreeSet<T, A>) -> BTreeSet<T, A> {
+ BTreeSet::from_sorted_iter(
+ self.difference(rhs).cloned(),
+ ManuallyDrop::into_inner(self.map.alloc.clone()),
+ )
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord + Clone, A: Allocator + Clone> BitXor<&BTreeSet<T, A>> for &BTreeSet<T, A> {
+ type Output = BTreeSet<T, A>;
+
+ /// Returns the symmetric difference of `self` and `rhs` as a new `BTreeSet<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let a = BTreeSet::from([1, 2, 3]);
+ /// let b = BTreeSet::from([2, 3, 4]);
+ ///
+ /// let result = &a ^ &b;
+ /// assert_eq!(result, BTreeSet::from([1, 4]));
+ /// ```
+ fn bitxor(self, rhs: &BTreeSet<T, A>) -> BTreeSet<T, A> {
+ BTreeSet::from_sorted_iter(
+ self.symmetric_difference(rhs).cloned(),
+ ManuallyDrop::into_inner(self.map.alloc.clone()),
+ )
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord + Clone, A: Allocator + Clone> BitAnd<&BTreeSet<T, A>> for &BTreeSet<T, A> {
+ type Output = BTreeSet<T, A>;
+
+ /// Returns the intersection of `self` and `rhs` as a new `BTreeSet<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let a = BTreeSet::from([1, 2, 3]);
+ /// let b = BTreeSet::from([2, 3, 4]);
+ ///
+ /// let result = &a & &b;
+ /// assert_eq!(result, BTreeSet::from([2, 3]));
+ /// ```
+ fn bitand(self, rhs: &BTreeSet<T, A>) -> BTreeSet<T, A> {
+ BTreeSet::from_sorted_iter(
+ self.intersection(rhs).cloned(),
+ ManuallyDrop::into_inner(self.map.alloc.clone()),
+ )
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord + Clone, A: Allocator + Clone> BitOr<&BTreeSet<T, A>> for &BTreeSet<T, A> {
+ type Output = BTreeSet<T, A>;
+
+ /// Returns the union of `self` and `rhs` as a new `BTreeSet<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let a = BTreeSet::from([1, 2, 3]);
+ /// let b = BTreeSet::from([3, 4, 5]);
+ ///
+ /// let result = &a | &b;
+ /// assert_eq!(result, BTreeSet::from([1, 2, 3, 4, 5]));
+ /// ```
+ fn bitor(self, rhs: &BTreeSet<T, A>) -> BTreeSet<T, A> {
+ BTreeSet::from_sorted_iter(
+ self.union(rhs).cloned(),
+ ManuallyDrop::into_inner(self.map.alloc.clone()),
+ )
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Debug, A: Allocator + Clone> Debug for BTreeSet<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_set().entries(self.iter()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter { iter: self.iter.clone() }
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<&'a T> {
+ self.iter.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ fn last(mut self) -> Option<&'a T> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<&'a T> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<&'a T> {
+ self.next_back()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+ fn next_back(&mut self) -> Option<&'a T> {
+ self.iter.next_back()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator + Clone> Iterator for IntoIter<T, A> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ self.iter.next().map(|(k, _)| k)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator + Clone> DoubleEndedIterator for IntoIter<T, A> {
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back().map(|(k, _)| k)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator + Clone> ExactSizeIterator for IntoIter<T, A> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator + Clone> FusedIterator for IntoIter<T, A> {}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<T> Clone for Range<'_, T> {
+ fn clone(&self) -> Self {
+ Range { iter: self.iter.clone() }
+ }
+}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<'a, T> Iterator for Range<'a, T> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<&'a T> {
+ self.iter.next().map(|(k, _)| k)
+ }
+
+ fn last(mut self) -> Option<&'a T> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<&'a T> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<&'a T> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<'a, T> DoubleEndedIterator for Range<'a, T> {
+ fn next_back(&mut self) -> Option<&'a T> {
+ self.iter.next_back().map(|(k, _)| k)
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Range<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator + Clone> Clone for Difference<'_, T, A> {
+ fn clone(&self) -> Self {
+ Difference {
+ inner: match &self.inner {
+ DifferenceInner::Stitch { self_iter, other_iter } => DifferenceInner::Stitch {
+ self_iter: self_iter.clone(),
+ other_iter: other_iter.clone(),
+ },
+ DifferenceInner::Search { self_iter, other_set } => {
+ DifferenceInner::Search { self_iter: self_iter.clone(), other_set }
+ }
+ DifferenceInner::Iterate(iter) => DifferenceInner::Iterate(iter.clone()),
+ },
+ }
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T: Ord, A: Allocator + Clone> Iterator for Difference<'a, T, A> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<&'a T> {
+ match &mut self.inner {
+ DifferenceInner::Stitch { self_iter, other_iter } => {
+ let mut self_next = self_iter.next()?;
+ loop {
+ match other_iter.peek().map_or(Less, |other_next| self_next.cmp(other_next)) {
+ Less => return Some(self_next),
+ Equal => {
+ self_next = self_iter.next()?;
+ other_iter.next();
+ }
+ Greater => {
+ other_iter.next();
+ }
+ }
+ }
+ }
+ DifferenceInner::Search { self_iter, other_set } => loop {
+ let self_next = self_iter.next()?;
+ if !other_set.contains(&self_next) {
+ return Some(self_next);
+ }
+ },
+ DifferenceInner::Iterate(iter) => iter.next(),
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (self_len, other_len) = match &self.inner {
+ DifferenceInner::Stitch { self_iter, other_iter } => {
+ (self_iter.len(), other_iter.len())
+ }
+ DifferenceInner::Search { self_iter, other_set } => (self_iter.len(), other_set.len()),
+ DifferenceInner::Iterate(iter) => (iter.len(), 0),
+ };
+ (self_len.saturating_sub(other_len), Some(self_len))
+ }
+
+ fn min(mut self) -> Option<&'a T> {
+ self.next()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T: Ord, A: Allocator + Clone> FusedIterator for Difference<'_, T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for SymmetricDifference<'_, T> {
+ fn clone(&self) -> Self {
+ SymmetricDifference(self.0.clone())
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T: Ord> Iterator for SymmetricDifference<'a, T> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<&'a T> {
+ loop {
+ let (a_next, b_next) = self.0.nexts(Self::Item::cmp);
+ if a_next.and(b_next).is_none() {
+ return a_next.or(b_next);
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (a_len, b_len) = self.0.lens();
+ // No checked_add, because even if a and b refer to the same set,
+ // and T is a zero-sized type, the storage overhead of sets limits
+ // the number of elements to less than half the range of usize.
+ (0, Some(a_len + b_len))
+ }
+
+ fn min(mut self) -> Option<&'a T> {
+ self.next()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T: Ord> FusedIterator for SymmetricDifference<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator + Clone> Clone for Intersection<'_, T, A> {
+ fn clone(&self) -> Self {
+ Intersection {
+ inner: match &self.inner {
+ IntersectionInner::Stitch { a, b } => {
+ IntersectionInner::Stitch { a: a.clone(), b: b.clone() }
+ }
+ IntersectionInner::Search { small_iter, large_set } => {
+ IntersectionInner::Search { small_iter: small_iter.clone(), large_set }
+ }
+ IntersectionInner::Answer(answer) => IntersectionInner::Answer(*answer),
+ },
+ }
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T: Ord, A: Allocator + Clone> Iterator for Intersection<'a, T, A> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<&'a T> {
+ match &mut self.inner {
+ IntersectionInner::Stitch { a, b } => {
+ let mut a_next = a.next()?;
+ let mut b_next = b.next()?;
+ loop {
+ match a_next.cmp(b_next) {
+ Less => a_next = a.next()?,
+ Greater => b_next = b.next()?,
+ Equal => return Some(a_next),
+ }
+ }
+ }
+ IntersectionInner::Search { small_iter, large_set } => loop {
+ let small_next = small_iter.next()?;
+ if large_set.contains(&small_next) {
+ return Some(small_next);
+ }
+ },
+ IntersectionInner::Answer(answer) => answer.take(),
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match &self.inner {
+ IntersectionInner::Stitch { a, b } => (0, Some(min(a.len(), b.len()))),
+ IntersectionInner::Search { small_iter, .. } => (0, Some(small_iter.len())),
+ IntersectionInner::Answer(None) => (0, Some(0)),
+ IntersectionInner::Answer(Some(_)) => (1, Some(1)),
+ }
+ }
+
+ fn min(mut self) -> Option<&'a T> {
+ self.next()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T: Ord, A: Allocator + Clone> FusedIterator for Intersection<'_, T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Union<'_, T> {
+ fn clone(&self) -> Self {
+ Union(self.0.clone())
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T: Ord> Iterator for Union<'a, T> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<&'a T> {
+ let (a_next, b_next) = self.0.nexts(Self::Item::cmp);
+ a_next.or(b_next)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (a_len, b_len) = self.0.lens();
+ // No checked_add - see SymmetricDifference::size_hint.
+ (max(a_len, b_len), Some(a_len + b_len))
+ }
+
+ fn min(mut self) -> Option<&'a T> {
+ self.next()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T: Ord> FusedIterator for Union<'_, T> {}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs
new file mode 100644
index 000000000..502d3e1d1
--- /dev/null
+++ b/library/alloc/src/collections/btree/set/tests.rs
@@ -0,0 +1,856 @@
+use super::super::testing::crash_test::{CrashTestDummy, Panic};
+use super::super::testing::rng::DeterministicRng;
+use super::*;
+use crate::vec::Vec;
+use std::cmp::Ordering;
+use std::hash::{Hash, Hasher};
+use std::iter::FromIterator;
+use std::ops::Bound::{Excluded, Included};
+use std::panic::{catch_unwind, AssertUnwindSafe};
+
+#[test]
+fn test_clone_eq() {
+ let mut m = BTreeSet::new();
+
+ m.insert(1);
+ m.insert(2);
+
+ assert_eq!(m.clone(), m);
+}
+
+#[test]
+fn test_iter_min_max() {
+ let mut a = BTreeSet::new();
+ assert_eq!(a.iter().min(), None);
+ assert_eq!(a.iter().max(), None);
+ assert_eq!(a.range(..).min(), None);
+ assert_eq!(a.range(..).max(), None);
+ assert_eq!(a.difference(&BTreeSet::new()).min(), None);
+ assert_eq!(a.difference(&BTreeSet::new()).max(), None);
+ assert_eq!(a.intersection(&a).min(), None);
+ assert_eq!(a.intersection(&a).max(), None);
+ assert_eq!(a.symmetric_difference(&BTreeSet::new()).min(), None);
+ assert_eq!(a.symmetric_difference(&BTreeSet::new()).max(), None);
+ assert_eq!(a.union(&a).min(), None);
+ assert_eq!(a.union(&a).max(), None);
+ a.insert(1);
+ a.insert(2);
+ assert_eq!(a.iter().min(), Some(&1));
+ assert_eq!(a.iter().max(), Some(&2));
+ assert_eq!(a.range(..).min(), Some(&1));
+ assert_eq!(a.range(..).max(), Some(&2));
+ assert_eq!(a.difference(&BTreeSet::new()).min(), Some(&1));
+ assert_eq!(a.difference(&BTreeSet::new()).max(), Some(&2));
+ assert_eq!(a.intersection(&a).min(), Some(&1));
+ assert_eq!(a.intersection(&a).max(), Some(&2));
+ assert_eq!(a.symmetric_difference(&BTreeSet::new()).min(), Some(&1));
+ assert_eq!(a.symmetric_difference(&BTreeSet::new()).max(), Some(&2));
+ assert_eq!(a.union(&a).min(), Some(&1));
+ assert_eq!(a.union(&a).max(), Some(&2));
+}
+
+fn check<F>(a: &[i32], b: &[i32], expected: &[i32], f: F)
+where
+ F: FnOnce(&BTreeSet<i32>, &BTreeSet<i32>, &mut dyn FnMut(&i32) -> bool) -> bool,
+{
+ let mut set_a = BTreeSet::new();
+ let mut set_b = BTreeSet::new();
+
+ for x in a {
+ assert!(set_a.insert(*x))
+ }
+ for y in b {
+ assert!(set_b.insert(*y))
+ }
+
+ let mut i = 0;
+ f(&set_a, &set_b, &mut |&x| {
+ if i < expected.len() {
+ assert_eq!(x, expected[i]);
+ }
+ i += 1;
+ true
+ });
+ assert_eq!(i, expected.len());
+}
+
+#[test]
+fn test_intersection() {
+ fn check_intersection(a: &[i32], b: &[i32], expected: &[i32]) {
+ check(a, b, expected, |x, y, f| x.intersection(y).all(f))
+ }
+
+ check_intersection(&[], &[], &[]);
+ check_intersection(&[1, 2, 3], &[], &[]);
+ check_intersection(&[], &[1, 2, 3], &[]);
+ check_intersection(&[2], &[1, 2, 3], &[2]);
+ check_intersection(&[1, 2, 3], &[2], &[2]);
+ check_intersection(&[11, 1, 3, 77, 103, 5, -5], &[2, 11, 77, -9, -42, 5, 3], &[3, 5, 11, 77]);
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ let large = Vec::from_iter(0..100);
+ check_intersection(&[], &large, &[]);
+ check_intersection(&large, &[], &[]);
+ check_intersection(&[-1], &large, &[]);
+ check_intersection(&large, &[-1], &[]);
+ check_intersection(&[0], &large, &[0]);
+ check_intersection(&large, &[0], &[0]);
+ check_intersection(&[99], &large, &[99]);
+ check_intersection(&large, &[99], &[99]);
+ check_intersection(&[100], &large, &[]);
+ check_intersection(&large, &[100], &[]);
+ check_intersection(&[11, 5000, 1, 3, 77, 8924], &large, &[1, 3, 11, 77]);
+}
+
+#[test]
+fn test_intersection_size_hint() {
+ let x = BTreeSet::from([3, 4]);
+ let y = BTreeSet::from([1, 2, 3]);
+ let mut iter = x.intersection(&y);
+ assert_eq!(iter.size_hint(), (1, Some(1)));
+ assert_eq!(iter.next(), Some(&3));
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+
+ iter = y.intersection(&y);
+ assert_eq!(iter.size_hint(), (0, Some(3)));
+ assert_eq!(iter.next(), Some(&1));
+ assert_eq!(iter.size_hint(), (0, Some(2)));
+}
+
+#[test]
+fn test_difference() {
+ fn check_difference(a: &[i32], b: &[i32], expected: &[i32]) {
+ check(a, b, expected, |x, y, f| x.difference(y).all(f))
+ }
+
+ check_difference(&[], &[], &[]);
+ check_difference(&[1, 12], &[], &[1, 12]);
+ check_difference(&[], &[1, 2, 3, 9], &[]);
+ check_difference(&[1, 3, 5, 9, 11], &[3, 9], &[1, 5, 11]);
+ check_difference(&[1, 3, 5, 9, 11], &[3, 6, 9], &[1, 5, 11]);
+ check_difference(&[1, 3, 5, 9, 11], &[0, 1], &[3, 5, 9, 11]);
+ check_difference(&[1, 3, 5, 9, 11], &[11, 12], &[1, 3, 5, 9]);
+ check_difference(
+ &[-5, 11, 22, 33, 40, 42],
+ &[-12, -5, 14, 23, 34, 38, 39, 50],
+ &[11, 22, 33, 40, 42],
+ );
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ let large = Vec::from_iter(0..100);
+ check_difference(&[], &large, &[]);
+ check_difference(&[-1], &large, &[-1]);
+ check_difference(&[0], &large, &[]);
+ check_difference(&[99], &large, &[]);
+ check_difference(&[100], &large, &[100]);
+ check_difference(&[11, 5000, 1, 3, 77, 8924], &large, &[5000, 8924]);
+ check_difference(&large, &[], &large);
+ check_difference(&large, &[-1], &large);
+ check_difference(&large, &[100], &large);
+}
+
+#[test]
+fn test_difference_size_hint() {
+ let s246 = BTreeSet::from([2, 4, 6]);
+ let s23456 = BTreeSet::from_iter(2..=6);
+ let mut iter = s246.difference(&s23456);
+ assert_eq!(iter.size_hint(), (0, Some(3)));
+ assert_eq!(iter.next(), None);
+
+ let s12345 = BTreeSet::from_iter(1..=5);
+ iter = s246.difference(&s12345);
+ assert_eq!(iter.size_hint(), (0, Some(3)));
+ assert_eq!(iter.next(), Some(&6));
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+
+ let s34567 = BTreeSet::from_iter(3..=7);
+ iter = s246.difference(&s34567);
+ assert_eq!(iter.size_hint(), (0, Some(3)));
+ assert_eq!(iter.next(), Some(&2));
+ assert_eq!(iter.size_hint(), (0, Some(2)));
+ assert_eq!(iter.next(), None);
+
+ let s1 = BTreeSet::from_iter(-9..=1);
+ iter = s246.difference(&s1);
+ assert_eq!(iter.size_hint(), (3, Some(3)));
+
+ let s2 = BTreeSet::from_iter(-9..=2);
+ iter = s246.difference(&s2);
+ assert_eq!(iter.size_hint(), (2, Some(2)));
+ assert_eq!(iter.next(), Some(&4));
+ assert_eq!(iter.size_hint(), (1, Some(1)));
+
+ let s23 = BTreeSet::from([2, 3]);
+ iter = s246.difference(&s23);
+ assert_eq!(iter.size_hint(), (1, Some(3)));
+ assert_eq!(iter.next(), Some(&4));
+ assert_eq!(iter.size_hint(), (1, Some(1)));
+
+ let s4 = BTreeSet::from([4]);
+ iter = s246.difference(&s4);
+ assert_eq!(iter.size_hint(), (2, Some(3)));
+ assert_eq!(iter.next(), Some(&2));
+ assert_eq!(iter.size_hint(), (1, Some(2)));
+ assert_eq!(iter.next(), Some(&6));
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+
+ let s56 = BTreeSet::from([5, 6]);
+ iter = s246.difference(&s56);
+ assert_eq!(iter.size_hint(), (1, Some(3)));
+ assert_eq!(iter.next(), Some(&2));
+ assert_eq!(iter.size_hint(), (0, Some(2)));
+
+ let s6 = BTreeSet::from_iter(6..=19);
+ iter = s246.difference(&s6);
+ assert_eq!(iter.size_hint(), (2, Some(2)));
+ assert_eq!(iter.next(), Some(&2));
+ assert_eq!(iter.size_hint(), (1, Some(1)));
+
+ let s7 = BTreeSet::from_iter(7..=19);
+ iter = s246.difference(&s7);
+ assert_eq!(iter.size_hint(), (3, Some(3)));
+}
+
+#[test]
+fn test_symmetric_difference() {
+ fn check_symmetric_difference(a: &[i32], b: &[i32], expected: &[i32]) {
+ check(a, b, expected, |x, y, f| x.symmetric_difference(y).all(f))
+ }
+
+ check_symmetric_difference(&[], &[], &[]);
+ check_symmetric_difference(&[1, 2, 3], &[2], &[1, 3]);
+ check_symmetric_difference(&[2], &[1, 2, 3], &[1, 3]);
+ check_symmetric_difference(&[1, 3, 5, 9, 11], &[-2, 3, 9, 14, 22], &[-2, 1, 5, 11, 14, 22]);
+}
+
+#[test]
+fn test_symmetric_difference_size_hint() {
+ let x = BTreeSet::from([2, 4]);
+ let y = BTreeSet::from([1, 2, 3]);
+ let mut iter = x.symmetric_difference(&y);
+ assert_eq!(iter.size_hint(), (0, Some(5)));
+ assert_eq!(iter.next(), Some(&1));
+ assert_eq!(iter.size_hint(), (0, Some(4)));
+ assert_eq!(iter.next(), Some(&3));
+ assert_eq!(iter.size_hint(), (0, Some(1)));
+}
+
+#[test]
+fn test_union() {
+ fn check_union(a: &[i32], b: &[i32], expected: &[i32]) {
+ check(a, b, expected, |x, y, f| x.union(y).all(f))
+ }
+
+ check_union(&[], &[], &[]);
+ check_union(&[1, 2, 3], &[2], &[1, 2, 3]);
+ check_union(&[2], &[1, 2, 3], &[1, 2, 3]);
+ check_union(
+ &[1, 3, 5, 9, 11, 16, 19, 24],
+ &[-2, 1, 5, 9, 13, 19],
+ &[-2, 1, 3, 5, 9, 11, 13, 16, 19, 24],
+ );
+}
+
+#[test]
+fn test_union_size_hint() {
+ let x = BTreeSet::from([2, 4]);
+ let y = BTreeSet::from([1, 2, 3]);
+ let mut iter = x.union(&y);
+ assert_eq!(iter.size_hint(), (3, Some(5)));
+ assert_eq!(iter.next(), Some(&1));
+ assert_eq!(iter.size_hint(), (2, Some(4)));
+ assert_eq!(iter.next(), Some(&2));
+ assert_eq!(iter.size_hint(), (1, Some(2)));
+}
+
+#[test]
+// Only tests the simple function definition with respect to intersection
+fn test_is_disjoint() {
+ let one = BTreeSet::from([1]);
+ let two = BTreeSet::from([2]);
+ assert!(one.is_disjoint(&two));
+}
+
+#[test]
+// Also implicitly tests the trivial function definition of is_superset
+fn test_is_subset() {
+ fn is_subset(a: &[i32], b: &[i32]) -> bool {
+ let set_a = BTreeSet::from_iter(a.iter());
+ let set_b = BTreeSet::from_iter(b.iter());
+ set_a.is_subset(&set_b)
+ }
+
+ assert_eq!(is_subset(&[], &[]), true);
+ assert_eq!(is_subset(&[], &[1, 2]), true);
+ assert_eq!(is_subset(&[0], &[1, 2]), false);
+ assert_eq!(is_subset(&[1], &[1, 2]), true);
+ assert_eq!(is_subset(&[2], &[1, 2]), true);
+ assert_eq!(is_subset(&[3], &[1, 2]), false);
+ assert_eq!(is_subset(&[1, 2], &[1]), false);
+ assert_eq!(is_subset(&[1, 2], &[1, 2]), true);
+ assert_eq!(is_subset(&[1, 2], &[2, 3]), false);
+ assert_eq!(
+ is_subset(&[-5, 11, 22, 33, 40, 42], &[-12, -5, 11, 14, 22, 23, 33, 34, 38, 39, 40, 42]),
+ true
+ );
+ assert_eq!(is_subset(&[-5, 11, 22, 33, 40, 42], &[-12, -5, 11, 14, 22, 23, 34, 38]), false);
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ let large = Vec::from_iter(0..100);
+ assert_eq!(is_subset(&[], &large), true);
+ assert_eq!(is_subset(&large, &[]), false);
+ assert_eq!(is_subset(&[-1], &large), false);
+ assert_eq!(is_subset(&[0], &large), true);
+ assert_eq!(is_subset(&[1, 2], &large), true);
+ assert_eq!(is_subset(&[99, 100], &large), false);
+}
+
+#[test]
+fn test_is_superset() {
+ fn is_superset(a: &[i32], b: &[i32]) -> bool {
+ let set_a = BTreeSet::from_iter(a.iter());
+ let set_b = BTreeSet::from_iter(b.iter());
+ set_a.is_superset(&set_b)
+ }
+
+ assert_eq!(is_superset(&[], &[]), true);
+ assert_eq!(is_superset(&[], &[1, 2]), false);
+ assert_eq!(is_superset(&[0], &[1, 2]), false);
+ assert_eq!(is_superset(&[1], &[1, 2]), false);
+ assert_eq!(is_superset(&[4], &[1, 2]), false);
+ assert_eq!(is_superset(&[1, 4], &[1, 2]), false);
+ assert_eq!(is_superset(&[1, 2], &[1, 2]), true);
+ assert_eq!(is_superset(&[1, 2, 3], &[1, 3]), true);
+ assert_eq!(is_superset(&[1, 2, 3], &[]), true);
+ assert_eq!(is_superset(&[-1, 1, 2, 3], &[-1, 3]), true);
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ let large = Vec::from_iter(0..100);
+ assert_eq!(is_superset(&[], &large), false);
+ assert_eq!(is_superset(&large, &[]), true);
+ assert_eq!(is_superset(&large, &[1]), true);
+ assert_eq!(is_superset(&large, &[50, 99]), true);
+ assert_eq!(is_superset(&large, &[100]), false);
+ assert_eq!(is_superset(&large, &[0, 99]), true);
+ assert_eq!(is_superset(&[-1], &large), false);
+ assert_eq!(is_superset(&[0], &large), false);
+ assert_eq!(is_superset(&[99, 100], &large), false);
+}
+
+#[test]
+fn test_retain() {
+ let mut set = BTreeSet::from([1, 2, 3, 4, 5, 6]);
+ set.retain(|&k| k % 2 == 0);
+ assert_eq!(set.len(), 3);
+ assert!(set.contains(&2));
+ assert!(set.contains(&4));
+ assert!(set.contains(&6));
+}
+
+#[test]
+fn test_drain_filter() {
+ let mut x = BTreeSet::from([1]);
+ let mut y = BTreeSet::from([1]);
+
+ x.drain_filter(|_| true);
+ y.drain_filter(|_| false);
+ assert_eq!(x.len(), 0);
+ assert_eq!(y.len(), 1);
+}
+
+#[test]
+fn test_drain_filter_drop_panic_leak() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut set = BTreeSet::new();
+ set.insert(a.spawn(Panic::Never));
+ set.insert(b.spawn(Panic::InDrop));
+ set.insert(c.spawn(Panic::Never));
+
+ catch_unwind(move || drop(set.drain_filter(|dummy| dummy.query(true)))).ok();
+
+ assert_eq!(a.queried(), 1);
+ assert_eq!(b.queried(), 1);
+ assert_eq!(c.queried(), 0);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1);
+ assert_eq!(c.dropped(), 1);
+}
+
+#[test]
+fn test_drain_filter_pred_panic_leak() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut set = BTreeSet::new();
+ set.insert(a.spawn(Panic::Never));
+ set.insert(b.spawn(Panic::InQuery));
+ set.insert(c.spawn(Panic::InQuery));
+
+ catch_unwind(AssertUnwindSafe(|| drop(set.drain_filter(|dummy| dummy.query(true))))).ok();
+
+ assert_eq!(a.queried(), 1);
+ assert_eq!(b.queried(), 1);
+ assert_eq!(c.queried(), 0);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 0);
+ assert_eq!(c.dropped(), 0);
+ assert_eq!(set.len(), 2);
+ assert_eq!(set.first().unwrap().id(), 1);
+ assert_eq!(set.last().unwrap().id(), 2);
+}
+
+#[test]
+fn test_clear() {
+ let mut x = BTreeSet::new();
+ x.insert(1);
+
+ x.clear();
+ assert!(x.is_empty());
+}
+#[test]
+fn test_remove() {
+ let mut x = BTreeSet::new();
+ assert!(x.is_empty());
+
+ x.insert(1);
+ x.insert(2);
+ x.insert(3);
+ x.insert(4);
+
+ assert_eq!(x.remove(&2), true);
+ assert_eq!(x.remove(&0), false);
+ assert_eq!(x.remove(&5), false);
+ assert_eq!(x.remove(&1), true);
+ assert_eq!(x.remove(&2), false);
+ assert_eq!(x.remove(&3), true);
+ assert_eq!(x.remove(&4), true);
+ assert_eq!(x.remove(&4), false);
+ assert!(x.is_empty());
+}
+
+#[test]
+fn test_zip() {
+ let mut x = BTreeSet::new();
+ x.insert(5);
+ x.insert(12);
+ x.insert(11);
+
+ let mut y = BTreeSet::new();
+ y.insert("foo");
+ y.insert("bar");
+
+ let x = x;
+ let y = y;
+ let mut z = x.iter().zip(&y);
+
+ assert_eq!(z.next().unwrap(), (&5, &("bar")));
+ assert_eq!(z.next().unwrap(), (&11, &("foo")));
+ assert!(z.next().is_none());
+}
+
+#[test]
+fn test_from_iter() {
+ let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9];
+
+ let set = BTreeSet::from_iter(xs.iter());
+
+ for x in &xs {
+ assert!(set.contains(x));
+ }
+}
+
+#[test]
+fn test_show() {
+ let mut set = BTreeSet::new();
+ let empty = BTreeSet::<i32>::new();
+
+ set.insert(1);
+ set.insert(2);
+
+ let set_str = format!("{set:?}");
+
+ assert_eq!(set_str, "{1, 2}");
+ assert_eq!(format!("{empty:?}"), "{}");
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut a = BTreeSet::new();
+ a.insert(1);
+
+ a.extend(&[2, 3, 4]);
+
+ assert_eq!(a.len(), 4);
+ assert!(a.contains(&1));
+ assert!(a.contains(&2));
+ assert!(a.contains(&3));
+ assert!(a.contains(&4));
+
+ let mut b = BTreeSet::new();
+ b.insert(5);
+ b.insert(6);
+
+ a.extend(&b);
+
+ assert_eq!(a.len(), 6);
+ assert!(a.contains(&1));
+ assert!(a.contains(&2));
+ assert!(a.contains(&3));
+ assert!(a.contains(&4));
+ assert!(a.contains(&5));
+ assert!(a.contains(&6));
+}
+
+#[test]
+fn test_recovery() {
+ #[derive(Debug)]
+ struct Foo(&'static str, i32);
+
+ impl PartialEq for Foo {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+ }
+
+ impl Eq for Foo {}
+
+ impl PartialOrd for Foo {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.0.partial_cmp(&other.0)
+ }
+ }
+
+ impl Ord for Foo {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.0.cmp(&other.0)
+ }
+ }
+
+ let mut s = BTreeSet::new();
+ assert_eq!(s.replace(Foo("a", 1)), None);
+ assert_eq!(s.len(), 1);
+ assert_eq!(s.replace(Foo("a", 2)), Some(Foo("a", 1)));
+ assert_eq!(s.len(), 1);
+
+ {
+ let mut it = s.iter();
+ assert_eq!(it.next(), Some(&Foo("a", 2)));
+ assert_eq!(it.next(), None);
+ }
+
+ assert_eq!(s.get(&Foo("a", 1)), Some(&Foo("a", 2)));
+ assert_eq!(s.take(&Foo("a", 1)), Some(Foo("a", 2)));
+ assert_eq!(s.len(), 0);
+
+ assert_eq!(s.get(&Foo("a", 1)), None);
+ assert_eq!(s.take(&Foo("a", 1)), None);
+
+ assert_eq!(s.iter().next(), None);
+}
+
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn set<'new>(v: BTreeSet<&'static str>) -> BTreeSet<&'new str> {
+ v
+ }
+ fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> {
+ v
+ }
+ fn into_iter<'new>(v: IntoIter<&'static str>) -> IntoIter<&'new str> {
+ v
+ }
+ fn range<'a, 'new>(v: Range<'a, &'static str>) -> Range<'a, &'new str> {
+ v
+ }
+ // not applied to Difference, Intersection, SymmetricDifference, Union
+}
+
+#[allow(dead_code)]
+fn assert_sync() {
+ fn set<T: Sync>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v
+ }
+
+ fn iter<T: Sync>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v.iter()
+ }
+
+ fn into_iter<T: Sync>(v: BTreeSet<T>) -> impl Sync {
+ v.into_iter()
+ }
+
+ fn range<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v.range(..)
+ }
+
+ fn drain_filter<T: Sync + Ord>(v: &mut BTreeSet<T>) -> impl Sync + '_ {
+ v.drain_filter(|_| false)
+ }
+
+ fn difference<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v.difference(&v)
+ }
+
+ fn intersection<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v.intersection(&v)
+ }
+
+ fn symmetric_difference<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v.symmetric_difference(&v)
+ }
+
+ fn union<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v.union(&v)
+ }
+}
+
+#[allow(dead_code)]
+fn assert_send() {
+ fn set<T: Send>(v: BTreeSet<T>) -> impl Send {
+ v
+ }
+
+ fn iter<T: Send + Sync>(v: &BTreeSet<T>) -> impl Send + '_ {
+ v.iter()
+ }
+
+ fn into_iter<T: Send>(v: BTreeSet<T>) -> impl Send {
+ v.into_iter()
+ }
+
+ fn range<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
+ v.range(..)
+ }
+
+ fn drain_filter<T: Send + Ord>(v: &mut BTreeSet<T>) -> impl Send + '_ {
+ v.drain_filter(|_| false)
+ }
+
+ fn difference<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
+ v.difference(&v)
+ }
+
+ fn intersection<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
+ v.intersection(&v)
+ }
+
+ fn symmetric_difference<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
+ v.symmetric_difference(&v)
+ }
+
+ fn union<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
+ v.union(&v)
+ }
+}
+
+#[allow(dead_code)]
+// Check that the member-like functions conditionally provided by #[derive()]
+// are not overridden by genuine member functions with a different signature.
+fn assert_derives() {
+ fn hash<T: Hash, H: Hasher>(v: BTreeSet<T>, state: &mut H) {
+ v.hash(state);
+ // Tested much more thoroughly outside the crate in btree_set_hash.rs
+ }
+ fn eq<T: PartialEq>(v: BTreeSet<T>) {
+ let _ = v.eq(&v);
+ }
+ fn ne<T: PartialEq>(v: BTreeSet<T>) {
+ let _ = v.ne(&v);
+ }
+ fn cmp<T: Ord>(v: BTreeSet<T>) {
+ let _ = v.cmp(&v);
+ }
+ fn min<T: Ord>(v: BTreeSet<T>, w: BTreeSet<T>) {
+ let _ = v.min(w);
+ }
+ fn max<T: Ord>(v: BTreeSet<T>, w: BTreeSet<T>) {
+ let _ = v.max(w);
+ }
+ fn clamp<T: Ord>(v: BTreeSet<T>, w: BTreeSet<T>, x: BTreeSet<T>) {
+ let _ = v.clamp(w, x);
+ }
+ fn partial_cmp<T: PartialOrd>(v: &BTreeSet<T>) {
+ let _ = v.partial_cmp(&v);
+ }
+}
+
+#[test]
+fn test_ord_absence() {
+ fn set<K>(mut set: BTreeSet<K>) {
+ let _ = set.is_empty();
+ let _ = set.len();
+ set.clear();
+ let _ = set.iter();
+ let _ = set.into_iter();
+ }
+
+ fn set_debug<K: Debug>(set: BTreeSet<K>) {
+ format!("{set:?}");
+ format!("{:?}", set.iter());
+ format!("{:?}", set.into_iter());
+ }
+
+ fn set_clone<K: Clone>(mut set: BTreeSet<K>) {
+ set.clone_from(&set.clone());
+ }
+
+ #[derive(Debug, Clone)]
+ struct NonOrd;
+ set(BTreeSet::<NonOrd>::new());
+ set_debug(BTreeSet::<NonOrd>::new());
+ set_clone(BTreeSet::<NonOrd>::default());
+}
+
+#[test]
+fn test_append() {
+ let mut a = BTreeSet::new();
+ a.insert(1);
+ a.insert(2);
+ a.insert(3);
+
+ let mut b = BTreeSet::new();
+ b.insert(3);
+ b.insert(4);
+ b.insert(5);
+
+ a.append(&mut b);
+
+ assert_eq!(a.len(), 5);
+ assert_eq!(b.len(), 0);
+
+ assert_eq!(a.contains(&1), true);
+ assert_eq!(a.contains(&2), true);
+ assert_eq!(a.contains(&3), true);
+ assert_eq!(a.contains(&4), true);
+ assert_eq!(a.contains(&5), true);
+}
+
+#[test]
+fn test_first_last() {
+ let mut a = BTreeSet::new();
+ assert_eq!(a.first(), None);
+ assert_eq!(a.last(), None);
+ a.insert(1);
+ assert_eq!(a.first(), Some(&1));
+ assert_eq!(a.last(), Some(&1));
+ a.insert(2);
+ assert_eq!(a.first(), Some(&1));
+ assert_eq!(a.last(), Some(&2));
+ for i in 3..=12 {
+ a.insert(i);
+ }
+ assert_eq!(a.first(), Some(&1));
+ assert_eq!(a.last(), Some(&12));
+ assert_eq!(a.pop_first(), Some(1));
+ assert_eq!(a.pop_last(), Some(12));
+ assert_eq!(a.pop_first(), Some(2));
+ assert_eq!(a.pop_last(), Some(11));
+ assert_eq!(a.pop_first(), Some(3));
+ assert_eq!(a.pop_last(), Some(10));
+ assert_eq!(a.pop_first(), Some(4));
+ assert_eq!(a.pop_first(), Some(5));
+ assert_eq!(a.pop_first(), Some(6));
+ assert_eq!(a.pop_first(), Some(7));
+ assert_eq!(a.pop_first(), Some(8));
+ assert_eq!(a.clone().pop_last(), Some(9));
+ assert_eq!(a.pop_first(), Some(9));
+ assert_eq!(a.pop_first(), None);
+ assert_eq!(a.pop_last(), None);
+}
+
+// Unlike the function with the same name in map/tests, returns no values.
+// Which also means it returns different predetermined pseudo-random keys,
+// and the test cases using this function explore slightly different trees.
+fn rand_data(len: usize) -> Vec<u32> {
+ let mut rng = DeterministicRng::new();
+ Vec::from_iter((0..len).map(|_| rng.next()))
+}
+
+#[test]
+fn test_split_off_empty_right() {
+ let mut data = rand_data(173);
+
+ let mut set = BTreeSet::from_iter(data.clone());
+ let right = set.split_off(&(data.iter().max().unwrap() + 1));
+
+ data.sort();
+ assert!(set.into_iter().eq(data));
+ assert!(right.into_iter().eq(None));
+}
+
+#[test]
+fn test_split_off_empty_left() {
+ let mut data = rand_data(314);
+
+ let mut set = BTreeSet::from_iter(data.clone());
+ let right = set.split_off(data.iter().min().unwrap());
+
+ data.sort();
+ assert!(set.into_iter().eq(None));
+ assert!(right.into_iter().eq(data));
+}
+
+#[test]
+fn test_split_off_large_random_sorted() {
+ // Miri is too slow
+ let mut data = if cfg!(miri) { rand_data(529) } else { rand_data(1529) };
+ // special case with maximum height.
+ data.sort();
+
+ let mut set = BTreeSet::from_iter(data.clone());
+ let key = data[data.len() / 2];
+ let right = set.split_off(&key);
+
+ assert!(set.into_iter().eq(data.clone().into_iter().filter(|x| *x < key)));
+ assert!(right.into_iter().eq(data.into_iter().filter(|x| *x >= key)));
+}
+
+#[test]
+fn from_array() {
+ let set = BTreeSet::from([1, 2, 3, 4]);
+ let unordered_duplicates = BTreeSet::from([4, 1, 4, 3, 2]);
+ assert_eq!(set, unordered_duplicates);
+}
+
+#[should_panic(expected = "range start is greater than range end in BTreeSet")]
+#[test]
+fn test_range_panic_1() {
+ let mut set = BTreeSet::new();
+ set.insert(3);
+ set.insert(5);
+ set.insert(8);
+
+ let _invalid_range = set.range((Included(&8), Included(&3)));
+}
+
+#[should_panic(expected = "range start and end are equal and excluded in BTreeSet")]
+#[test]
+fn test_range_panic_2() {
+ let mut set = BTreeSet::new();
+ set.insert(3);
+ set.insert(5);
+ set.insert(8);
+
+ let _invalid_range = set.range((Excluded(&5), Excluded(&5)));
+}
diff --git a/library/alloc/src/collections/btree/set_val.rs b/library/alloc/src/collections/btree/set_val.rs
new file mode 100644
index 000000000..80c459bcf
--- /dev/null
+++ b/library/alloc/src/collections/btree/set_val.rs
@@ -0,0 +1,29 @@
+/// Zero-Sized Type (ZST) for internal `BTreeSet` values.
+/// Used instead of `()` to differentiate between:
+/// * `BTreeMap<T, ()>` (possible user-defined map)
+/// * `BTreeMap<T, SetValZST>` (internal set representation)
+#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone, Default)]
+pub struct SetValZST;
+
+/// A trait to differentiate between `BTreeMap` and `BTreeSet` values.
+/// Returns `true` only for type `SetValZST`, `false` for all other types (blanket implementation).
+/// `TypeId` requires a `'static` lifetime, use of this trait avoids that restriction.
+///
+/// [`TypeId`]: std::any::TypeId
+pub trait IsSetVal {
+ fn is_set_val() -> bool;
+}
+
+// Blanket implementation
+impl<V> IsSetVal for V {
+ default fn is_set_val() -> bool {
+ false
+ }
+}
+
+// Specialization
+impl IsSetVal for SetValZST {
+ fn is_set_val() -> bool {
+ true
+ }
+}
diff --git a/library/alloc/src/collections/btree/split.rs b/library/alloc/src/collections/btree/split.rs
new file mode 100644
index 000000000..638dc98fc
--- /dev/null
+++ b/library/alloc/src/collections/btree/split.rs
@@ -0,0 +1,73 @@
+use super::node::{ForceResult::*, Root};
+use super::search::SearchResult::*;
+use core::alloc::Allocator;
+use core::borrow::Borrow;
+
+impl<K, V> Root<K, V> {
+ /// Calculates the length of both trees that result from splitting up
+ /// a given number of distinct key-value pairs.
+ pub fn calc_split_length(
+ total_num: usize,
+ root_a: &Root<K, V>,
+ root_b: &Root<K, V>,
+ ) -> (usize, usize) {
+ let (length_a, length_b);
+ if root_a.height() < root_b.height() {
+ length_a = root_a.reborrow().calc_length();
+ length_b = total_num - length_a;
+ debug_assert_eq!(length_b, root_b.reborrow().calc_length());
+ } else {
+ length_b = root_b.reborrow().calc_length();
+ length_a = total_num - length_b;
+ debug_assert_eq!(length_a, root_a.reborrow().calc_length());
+ }
+ (length_a, length_b)
+ }
+
+ /// Split off a tree with key-value pairs at and after the given key.
+ /// The result is meaningful only if the tree is ordered by key,
+ /// and if the ordering of `Q` corresponds to that of `K`.
+ /// If `self` respects all `BTreeMap` tree invariants, then both
+ /// `self` and the returned tree will respect those invariants.
+ pub fn split_off<Q: ?Sized + Ord, A: Allocator + Clone>(&mut self, key: &Q, alloc: A) -> Self
+ where
+ K: Borrow<Q>,
+ {
+ let left_root = self;
+ let mut right_root = Root::new_pillar(left_root.height(), alloc.clone());
+ let mut left_node = left_root.borrow_mut();
+ let mut right_node = right_root.borrow_mut();
+
+ loop {
+ let mut split_edge = match left_node.search_node(key) {
+ // key is going to the right tree
+ Found(kv) => kv.left_edge(),
+ GoDown(edge) => edge,
+ };
+
+ split_edge.move_suffix(&mut right_node);
+
+ match (split_edge.force(), right_node.force()) {
+ (Internal(edge), Internal(node)) => {
+ left_node = edge.descend();
+ right_node = node.first_edge().descend();
+ }
+ (Leaf(_), Leaf(_)) => break,
+ _ => unreachable!(),
+ }
+ }
+
+ left_root.fix_right_border(alloc.clone());
+ right_root.fix_left_border(alloc);
+ right_root
+ }
+
+ /// Creates a tree consisting of empty nodes.
+ fn new_pillar<A: Allocator + Clone>(height: usize, alloc: A) -> Self {
+ let mut root = Root::new(alloc.clone());
+ for _ in 0..height {
+ root.push_internal_level(alloc.clone());
+ }
+ root
+ }
+}
diff --git a/library/alloc/src/collections/btree/testing/crash_test.rs b/library/alloc/src/collections/btree/testing/crash_test.rs
new file mode 100644
index 000000000..bcf5f5f72
--- /dev/null
+++ b/library/alloc/src/collections/btree/testing/crash_test.rs
@@ -0,0 +1,119 @@
+// We avoid relying on anything else in the crate, apart from the `Debug` trait.
+use crate::fmt::Debug;
+use std::cmp::Ordering;
+use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
+
+/// A blueprint for crash test dummy instances that monitor particular events.
+/// Some instances may be configured to panic at some point.
+/// Events are `clone`, `drop` or some anonymous `query`.
+///
+/// Crash test dummies are identified and ordered by an id, so they can be used
+/// as keys in a BTreeMap.
+#[derive(Debug)]
+pub struct CrashTestDummy {
+ pub id: usize,
+ cloned: AtomicUsize,
+ dropped: AtomicUsize,
+ queried: AtomicUsize,
+}
+
+impl CrashTestDummy {
+ /// Creates a crash test dummy design. The `id` determines order and equality of instances.
+ pub fn new(id: usize) -> CrashTestDummy {
+ CrashTestDummy {
+ id,
+ cloned: AtomicUsize::new(0),
+ dropped: AtomicUsize::new(0),
+ queried: AtomicUsize::new(0),
+ }
+ }
+
+ /// Creates an instance of a crash test dummy that records what events it experiences
+ /// and optionally panics.
+ pub fn spawn(&self, panic: Panic) -> Instance<'_> {
+ Instance { origin: self, panic }
+ }
+
+ /// Returns how many times instances of the dummy have been cloned.
+ pub fn cloned(&self) -> usize {
+ self.cloned.load(SeqCst)
+ }
+
+ /// Returns how many times instances of the dummy have been dropped.
+ pub fn dropped(&self) -> usize {
+ self.dropped.load(SeqCst)
+ }
+
+ /// Returns how many times instances of the dummy have had their `query` member invoked.
+ pub fn queried(&self) -> usize {
+ self.queried.load(SeqCst)
+ }
+}
+
+#[derive(Debug)]
+pub struct Instance<'a> {
+ origin: &'a CrashTestDummy,
+ panic: Panic,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum Panic {
+ Never,
+ InClone,
+ InDrop,
+ InQuery,
+}
+
+impl Instance<'_> {
+ pub fn id(&self) -> usize {
+ self.origin.id
+ }
+
+ /// Some anonymous query, the result of which is already given.
+ pub fn query<R>(&self, result: R) -> R {
+ self.origin.queried.fetch_add(1, SeqCst);
+ if self.panic == Panic::InQuery {
+ panic!("panic in `query`");
+ }
+ result
+ }
+}
+
+impl Clone for Instance<'_> {
+ fn clone(&self) -> Self {
+ self.origin.cloned.fetch_add(1, SeqCst);
+ if self.panic == Panic::InClone {
+ panic!("panic in `clone`");
+ }
+ Self { origin: self.origin, panic: Panic::Never }
+ }
+}
+
+impl Drop for Instance<'_> {
+ fn drop(&mut self) {
+ self.origin.dropped.fetch_add(1, SeqCst);
+ if self.panic == Panic::InDrop {
+ panic!("panic in `drop`");
+ }
+ }
+}
+
+impl PartialOrd for Instance<'_> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.id().partial_cmp(&other.id())
+ }
+}
+
+impl Ord for Instance<'_> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.id().cmp(&other.id())
+ }
+}
+
+impl PartialEq for Instance<'_> {
+ fn eq(&self, other: &Self) -> bool {
+ self.id().eq(&other.id())
+ }
+}
+
+impl Eq for Instance<'_> {}
diff --git a/library/alloc/src/collections/btree/testing/mod.rs b/library/alloc/src/collections/btree/testing/mod.rs
new file mode 100644
index 000000000..7a094f8a5
--- /dev/null
+++ b/library/alloc/src/collections/btree/testing/mod.rs
@@ -0,0 +1,3 @@
+pub mod crash_test;
+pub mod ord_chaos;
+pub mod rng;
diff --git a/library/alloc/src/collections/btree/testing/ord_chaos.rs b/library/alloc/src/collections/btree/testing/ord_chaos.rs
new file mode 100644
index 000000000..96ce7c157
--- /dev/null
+++ b/library/alloc/src/collections/btree/testing/ord_chaos.rs
@@ -0,0 +1,81 @@
+use std::cell::Cell;
+use std::cmp::Ordering::{self, *};
+use std::ptr;
+
+// Minimal type with an `Ord` implementation violating transitivity.
+#[derive(Debug)]
+pub enum Cyclic3 {
+ A,
+ B,
+ C,
+}
+use Cyclic3::*;
+
+impl PartialOrd for Cyclic3 {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for Cyclic3 {
+ fn cmp(&self, other: &Self) -> Ordering {
+ match (self, other) {
+ (A, A) | (B, B) | (C, C) => Equal,
+ (A, B) | (B, C) | (C, A) => Less,
+ (A, C) | (B, A) | (C, B) => Greater,
+ }
+ }
+}
+
+impl PartialEq for Cyclic3 {
+ fn eq(&self, other: &Self) -> bool {
+ self.cmp(&other) == Equal
+ }
+}
+
+impl Eq for Cyclic3 {}
+
+// Controls the ordering of values wrapped by `Governed`.
+#[derive(Debug)]
+pub struct Governor {
+ flipped: Cell<bool>,
+}
+
+impl Governor {
+ pub fn new() -> Self {
+ Governor { flipped: Cell::new(false) }
+ }
+
+ pub fn flip(&self) {
+ self.flipped.set(!self.flipped.get());
+ }
+}
+
+// Type with an `Ord` implementation that forms a total order at any moment
+// (assuming that `T` respects total order), but can suddenly be made to invert
+// that total order.
+#[derive(Debug)]
+pub struct Governed<'a, T>(pub T, pub &'a Governor);
+
+impl<T: Ord> PartialOrd for Governed<'_, T> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl<T: Ord> Ord for Governed<'_, T> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ assert!(ptr::eq(self.1, other.1));
+ let ord = self.0.cmp(&other.0);
+ if self.1.flipped.get() { ord.reverse() } else { ord }
+ }
+}
+
+impl<T: PartialEq> PartialEq for Governed<'_, T> {
+ fn eq(&self, other: &Self) -> bool {
+ assert!(ptr::eq(self.1, other.1));
+ self.0.eq(&other.0)
+ }
+}
+
+impl<T: Eq> Eq for Governed<'_, T> {}
diff --git a/library/alloc/src/collections/btree/testing/rng.rs b/library/alloc/src/collections/btree/testing/rng.rs
new file mode 100644
index 000000000..ecf543bee
--- /dev/null
+++ b/library/alloc/src/collections/btree/testing/rng.rs
@@ -0,0 +1,28 @@
+/// XorShiftRng
+pub struct DeterministicRng {
+ count: usize,
+ x: u32,
+ y: u32,
+ z: u32,
+ w: u32,
+}
+
+impl DeterministicRng {
+ pub fn new() -> Self {
+ DeterministicRng { count: 0, x: 0x193a6754, y: 0xa8a7d469, z: 0x97830e05, w: 0x113ba7bb }
+ }
+
+ /// Guarantees that each returned number is unique.
+ pub fn next(&mut self) -> u32 {
+ self.count += 1;
+ assert!(self.count <= 70029);
+ let x = self.x;
+ let t = x ^ (x << 11);
+ self.x = self.y;
+ self.y = self.z;
+ self.z = self.w;
+ let w_ = self.w;
+ self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8));
+ self.w
+ }
+}
diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs
new file mode 100644
index 000000000..e21c8aa3b
--- /dev/null
+++ b/library/alloc/src/collections/linked_list.rs
@@ -0,0 +1,2012 @@
+//! A doubly-linked list with owned nodes.
+//!
+//! The `LinkedList` allows pushing and popping elements at either end
+//! in constant time.
+//!
+//! NOTE: It is almost always better to use [`Vec`] or [`VecDeque`] because
+//! array-based containers are generally faster,
+//! more memory efficient, and make better use of CPU cache.
+//!
+//! [`Vec`]: crate::vec::Vec
+//! [`VecDeque`]: super::vec_deque::VecDeque
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::cmp::Ordering;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::iter::{FromIterator, FusedIterator};
+use core::marker::PhantomData;
+use core::mem;
+use core::ptr::NonNull;
+
+use super::SpecExtend;
+use crate::boxed::Box;
+
+#[cfg(test)]
+mod tests;
+
+/// A doubly-linked list with owned nodes.
+///
+/// The `LinkedList` allows pushing and popping elements at either end
+/// in constant time.
+///
+/// A `LinkedList` with a known list of items can be initialized from an array:
+/// ```
+/// use std::collections::LinkedList;
+///
+/// let list = LinkedList::from([1, 2, 3]);
+/// ```
+///
+/// NOTE: It is almost always better to use [`Vec`] or [`VecDeque`] because
+/// array-based containers are generally faster,
+/// more memory efficient, and make better use of CPU cache.
+///
+/// [`Vec`]: crate::vec::Vec
+/// [`VecDeque`]: super::vec_deque::VecDeque
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "LinkedList")]
+#[rustc_insignificant_dtor]
+pub struct LinkedList<T> {
+ head: Option<NonNull<Node<T>>>,
+ tail: Option<NonNull<Node<T>>>,
+ len: usize,
+ marker: PhantomData<Box<Node<T>>>,
+}
+
+struct Node<T> {
+ next: Option<NonNull<Node<T>>>,
+ prev: Option<NonNull<Node<T>>>,
+ element: T,
+}
+
+/// An iterator over the elements of a `LinkedList`.
+///
+/// This `struct` is created by [`LinkedList::iter()`]. See its
+/// documentation for more.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ head: Option<NonNull<Node<T>>>,
+ tail: Option<NonNull<Node<T>>>,
+ len: usize,
+ marker: PhantomData<&'a Node<T>>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Iter")
+ .field(&*mem::ManuallyDrop::new(LinkedList {
+ head: self.head,
+ tail: self.tail,
+ len: self.len,
+ marker: PhantomData,
+ }))
+ .field(&self.len)
+ .finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter { ..*self }
+ }
+}
+
+/// A mutable iterator over the elements of a `LinkedList`.
+///
+/// This `struct` is created by [`LinkedList::iter_mut()`]. See its
+/// documentation for more.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IterMut<'a, T: 'a> {
+ head: Option<NonNull<Node<T>>>,
+ tail: Option<NonNull<Node<T>>>,
+ len: usize,
+ marker: PhantomData<&'a mut Node<T>>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IterMut")
+ .field(&*mem::ManuallyDrop::new(LinkedList {
+ head: self.head,
+ tail: self.tail,
+ len: self.len,
+ marker: PhantomData,
+ }))
+ .field(&self.len)
+ .finish()
+ }
+}
+
+/// An owning iterator over the elements of a `LinkedList`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`LinkedList`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: LinkedList::into_iter
+/// [`IntoIterator`]: core::iter::IntoIterator
+#[derive(Clone)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoIter<T> {
+ list: LinkedList<T>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.list).finish()
+ }
+}
+
+impl<T> Node<T> {
+ fn new(element: T) -> Self {
+ Node { next: None, prev: None, element }
+ }
+
+ fn into_element(self: Box<Self>) -> T {
+ self.element
+ }
+}
+
+// private methods
+impl<T> LinkedList<T> {
+ /// Adds the given node to the front of the list.
+ #[inline]
+ fn push_front_node(&mut self, mut node: Box<Node<T>>) {
+ // This method takes care not to create mutable references to whole nodes,
+ // to maintain validity of aliasing pointers into `element`.
+ unsafe {
+ node.next = self.head;
+ node.prev = None;
+ let node = Some(Box::leak(node).into());
+
+ match self.head {
+ None => self.tail = node,
+ // Not creating new mutable (unique!) references overlapping `element`.
+ Some(head) => (*head.as_ptr()).prev = node,
+ }
+
+ self.head = node;
+ self.len += 1;
+ }
+ }
+
+ /// Removes and returns the node at the front of the list.
+ #[inline]
+ fn pop_front_node(&mut self) -> Option<Box<Node<T>>> {
+ // This method takes care not to create mutable references to whole nodes,
+ // to maintain validity of aliasing pointers into `element`.
+ self.head.map(|node| unsafe {
+ let node = Box::from_raw(node.as_ptr());
+ self.head = node.next;
+
+ match self.head {
+ None => self.tail = None,
+ // Not creating new mutable (unique!) references overlapping `element`.
+ Some(head) => (*head.as_ptr()).prev = None,
+ }
+
+ self.len -= 1;
+ node
+ })
+ }
+
+ /// Adds the given node to the back of the list.
+ #[inline]
+ fn push_back_node(&mut self, mut node: Box<Node<T>>) {
+ // This method takes care not to create mutable references to whole nodes,
+ // to maintain validity of aliasing pointers into `element`.
+ unsafe {
+ node.next = None;
+ node.prev = self.tail;
+ let node = Some(Box::leak(node).into());
+
+ match self.tail {
+ None => self.head = node,
+ // Not creating new mutable (unique!) references overlapping `element`.
+ Some(tail) => (*tail.as_ptr()).next = node,
+ }
+
+ self.tail = node;
+ self.len += 1;
+ }
+ }
+
+ /// Removes and returns the node at the back of the list.
+ #[inline]
+ fn pop_back_node(&mut self) -> Option<Box<Node<T>>> {
+ // This method takes care not to create mutable references to whole nodes,
+ // to maintain validity of aliasing pointers into `element`.
+ self.tail.map(|node| unsafe {
+ let node = Box::from_raw(node.as_ptr());
+ self.tail = node.prev;
+
+ match self.tail {
+ None => self.head = None,
+ // Not creating new mutable (unique!) references overlapping `element`.
+ Some(tail) => (*tail.as_ptr()).next = None,
+ }
+
+ self.len -= 1;
+ node
+ })
+ }
+
+ /// Unlinks the specified node from the current list.
+ ///
+ /// Warning: this will not check that the provided node belongs to the current list.
+ ///
+ /// This method takes care not to create mutable references to `element`, to
+ /// maintain validity of aliasing pointers.
+ #[inline]
+ unsafe fn unlink_node(&mut self, mut node: NonNull<Node<T>>) {
+ let node = unsafe { node.as_mut() }; // this one is ours now, we can create an &mut.
+
+ // Not creating new mutable (unique!) references overlapping `element`.
+ match node.prev {
+ Some(prev) => unsafe { (*prev.as_ptr()).next = node.next },
+ // this node is the head node
+ None => self.head = node.next,
+ };
+
+ match node.next {
+ Some(next) => unsafe { (*next.as_ptr()).prev = node.prev },
+ // this node is the tail node
+ None => self.tail = node.prev,
+ };
+
+ self.len -= 1;
+ }
+
+ /// Splices a series of nodes between two existing nodes.
+ ///
+ /// Warning: this will not check that the provided node belongs to the two existing lists.
+ #[inline]
+ unsafe fn splice_nodes(
+ &mut self,
+ existing_prev: Option<NonNull<Node<T>>>,
+ existing_next: Option<NonNull<Node<T>>>,
+ mut splice_start: NonNull<Node<T>>,
+ mut splice_end: NonNull<Node<T>>,
+ splice_length: usize,
+ ) {
+ // This method takes care not to create multiple mutable references to whole nodes at the same time,
+ // to maintain validity of aliasing pointers into `element`.
+ if let Some(mut existing_prev) = existing_prev {
+ unsafe {
+ existing_prev.as_mut().next = Some(splice_start);
+ }
+ } else {
+ self.head = Some(splice_start);
+ }
+ if let Some(mut existing_next) = existing_next {
+ unsafe {
+ existing_next.as_mut().prev = Some(splice_end);
+ }
+ } else {
+ self.tail = Some(splice_end);
+ }
+ unsafe {
+ splice_start.as_mut().prev = existing_prev;
+ splice_end.as_mut().next = existing_next;
+ }
+
+ self.len += splice_length;
+ }
+
+ /// Detaches all nodes from a linked list as a series of nodes.
+ #[inline]
+ fn detach_all_nodes(mut self) -> Option<(NonNull<Node<T>>, NonNull<Node<T>>, usize)> {
+ let head = self.head.take();
+ let tail = self.tail.take();
+ let len = mem::replace(&mut self.len, 0);
+ if let Some(head) = head {
+ // SAFETY: In a LinkedList, either both the head and tail are None because
+ // the list is empty, or both head and tail are Some because the list is populated.
+ // Since we have verified the head is Some, we are sure the tail is Some too.
+ let tail = unsafe { tail.unwrap_unchecked() };
+ Some((head, tail, len))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ unsafe fn split_off_before_node(
+ &mut self,
+ split_node: Option<NonNull<Node<T>>>,
+ at: usize,
+ ) -> Self {
+ // The split node is the new head node of the second part
+ if let Some(mut split_node) = split_node {
+ let first_part_head;
+ let first_part_tail;
+ unsafe {
+ first_part_tail = split_node.as_mut().prev.take();
+ }
+ if let Some(mut tail) = first_part_tail {
+ unsafe {
+ tail.as_mut().next = None;
+ }
+ first_part_head = self.head;
+ } else {
+ first_part_head = None;
+ }
+
+ let first_part = LinkedList {
+ head: first_part_head,
+ tail: first_part_tail,
+ len: at,
+ marker: PhantomData,
+ };
+
+ // Fix the head ptr of the second part
+ self.head = Some(split_node);
+ self.len = self.len - at;
+
+ first_part
+ } else {
+ mem::replace(self, LinkedList::new())
+ }
+ }
+
+ #[inline]
+ unsafe fn split_off_after_node(
+ &mut self,
+ split_node: Option<NonNull<Node<T>>>,
+ at: usize,
+ ) -> Self {
+ // The split node is the new tail node of the first part and owns
+ // the head of the second part.
+ if let Some(mut split_node) = split_node {
+ let second_part_head;
+ let second_part_tail;
+ unsafe {
+ second_part_head = split_node.as_mut().next.take();
+ }
+ if let Some(mut head) = second_part_head {
+ unsafe {
+ head.as_mut().prev = None;
+ }
+ second_part_tail = self.tail;
+ } else {
+ second_part_tail = None;
+ }
+
+ let second_part = LinkedList {
+ head: second_part_head,
+ tail: second_part_tail,
+ len: self.len - at,
+ marker: PhantomData,
+ };
+
+ // Fix the tail ptr of the first part
+ self.tail = Some(split_node);
+ self.len = at;
+
+ second_part
+ } else {
+ mem::replace(self, LinkedList::new())
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Default for LinkedList<T> {
+ /// Creates an empty `LinkedList<T>`.
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<T> LinkedList<T> {
+ /// Creates an empty `LinkedList`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let list: LinkedList<u32> = LinkedList::new();
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_linked_list_new", since = "1.39.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub const fn new() -> Self {
+ LinkedList { head: None, tail: None, len: 0, marker: PhantomData }
+ }
+
+ /// Moves all elements from `other` to the end of the list.
+ ///
+ /// This reuses all the nodes from `other` and moves them into `self`. After
+ /// this operation, `other` becomes empty.
+ ///
+ /// This operation should compute in *O*(1) time and *O*(1) memory.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut list1 = LinkedList::new();
+ /// list1.push_back('a');
+ ///
+ /// let mut list2 = LinkedList::new();
+ /// list2.push_back('b');
+ /// list2.push_back('c');
+ ///
+ /// list1.append(&mut list2);
+ ///
+ /// let mut iter = list1.iter();
+ /// assert_eq!(iter.next(), Some(&'a'));
+ /// assert_eq!(iter.next(), Some(&'b'));
+ /// assert_eq!(iter.next(), Some(&'c'));
+ /// assert!(iter.next().is_none());
+ ///
+ /// assert!(list2.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn append(&mut self, other: &mut Self) {
+ match self.tail {
+ None => mem::swap(self, other),
+ Some(mut tail) => {
+ // `as_mut` is okay here because we have exclusive access to the entirety
+ // of both lists.
+ if let Some(mut other_head) = other.head.take() {
+ unsafe {
+ tail.as_mut().next = Some(other_head);
+ other_head.as_mut().prev = Some(tail);
+ }
+
+ self.tail = other.tail.take();
+ self.len += mem::replace(&mut other.len, 0);
+ }
+ }
+ }
+ }
+
+ /// Provides a forward iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut list: LinkedList<u32> = LinkedList::new();
+ ///
+ /// list.push_back(0);
+ /// list.push_back(1);
+ /// list.push_back(2);
+ ///
+ /// let mut iter = list.iter();
+ /// assert_eq!(iter.next(), Some(&0));
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter { head: self.head, tail: self.tail, len: self.len, marker: PhantomData }
+ }
+
+ /// Provides a forward iterator with mutable references.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut list: LinkedList<u32> = LinkedList::new();
+ ///
+ /// list.push_back(0);
+ /// list.push_back(1);
+ /// list.push_back(2);
+ ///
+ /// for element in list.iter_mut() {
+ /// *element += 10;
+ /// }
+ ///
+ /// let mut iter = list.iter();
+ /// assert_eq!(iter.next(), Some(&10));
+ /// assert_eq!(iter.next(), Some(&11));
+ /// assert_eq!(iter.next(), Some(&12));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+ IterMut { head: self.head, tail: self.tail, len: self.len, marker: PhantomData }
+ }
+
+ /// Provides a cursor at the front element.
+ ///
+ /// The cursor is pointing to the "ghost" non-element if the list is empty.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn cursor_front(&self) -> Cursor<'_, T> {
+ Cursor { index: 0, current: self.head, list: self }
+ }
+
+ /// Provides a cursor with editing operations at the front element.
+ ///
+ /// The cursor is pointing to the "ghost" non-element if the list is empty.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn cursor_front_mut(&mut self) -> CursorMut<'_, T> {
+ CursorMut { index: 0, current: self.head, list: self }
+ }
+
+ /// Provides a cursor at the back element.
+ ///
+ /// The cursor is pointing to the "ghost" non-element if the list is empty.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn cursor_back(&self) -> Cursor<'_, T> {
+ Cursor { index: self.len.checked_sub(1).unwrap_or(0), current: self.tail, list: self }
+ }
+
+ /// Provides a cursor with editing operations at the back element.
+ ///
+ /// The cursor is pointing to the "ghost" non-element if the list is empty.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn cursor_back_mut(&mut self) -> CursorMut<'_, T> {
+ CursorMut { index: self.len.checked_sub(1).unwrap_or(0), current: self.tail, list: self }
+ }
+
+ /// Returns `true` if the `LinkedList` is empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ /// assert!(dl.is_empty());
+ ///
+ /// dl.push_front("foo");
+ /// assert!(!dl.is_empty());
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.head.is_none()
+ }
+
+ /// Returns the length of the `LinkedList`.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ ///
+ /// dl.push_front(2);
+ /// assert_eq!(dl.len(), 1);
+ ///
+ /// dl.push_front(1);
+ /// assert_eq!(dl.len(), 2);
+ ///
+ /// dl.push_back(3);
+ /// assert_eq!(dl.len(), 3);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Removes all elements from the `LinkedList`.
+ ///
+ /// This operation should compute in *O*(*n*) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ ///
+ /// dl.push_front(2);
+ /// dl.push_front(1);
+ /// assert_eq!(dl.len(), 2);
+ /// assert_eq!(dl.front(), Some(&1));
+ ///
+ /// dl.clear();
+ /// assert_eq!(dl.len(), 0);
+ /// assert_eq!(dl.front(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ *self = Self::new();
+ }
+
+ /// Returns `true` if the `LinkedList` contains an element equal to the
+ /// given value.
+ ///
+ /// This operation should compute linearly in *O*(*n*) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut list: LinkedList<u32> = LinkedList::new();
+ ///
+ /// list.push_back(0);
+ /// list.push_back(1);
+ /// list.push_back(2);
+ ///
+ /// assert_eq!(list.contains(&0), true);
+ /// assert_eq!(list.contains(&10), false);
+ /// ```
+ #[stable(feature = "linked_list_contains", since = "1.12.0")]
+ pub fn contains(&self, x: &T) -> bool
+ where
+ T: PartialEq<T>,
+ {
+ self.iter().any(|e| e == x)
+ }
+
+ /// Provides a reference to the front element, or `None` if the list is
+ /// empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ /// assert_eq!(dl.front(), None);
+ ///
+ /// dl.push_front(1);
+ /// assert_eq!(dl.front(), Some(&1));
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn front(&self) -> Option<&T> {
+ unsafe { self.head.as_ref().map(|node| &node.as_ref().element) }
+ }
+
+ /// Provides a mutable reference to the front element, or `None` if the list
+ /// is empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ /// assert_eq!(dl.front(), None);
+ ///
+ /// dl.push_front(1);
+ /// assert_eq!(dl.front(), Some(&1));
+ ///
+ /// match dl.front_mut() {
+ /// None => {},
+ /// Some(x) => *x = 5,
+ /// }
+ /// assert_eq!(dl.front(), Some(&5));
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn front_mut(&mut self) -> Option<&mut T> {
+ unsafe { self.head.as_mut().map(|node| &mut node.as_mut().element) }
+ }
+
+ /// Provides a reference to the back element, or `None` if the list is
+ /// empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ /// assert_eq!(dl.back(), None);
+ ///
+ /// dl.push_back(1);
+ /// assert_eq!(dl.back(), Some(&1));
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn back(&self) -> Option<&T> {
+ unsafe { self.tail.as_ref().map(|node| &node.as_ref().element) }
+ }
+
+ /// Provides a mutable reference to the back element, or `None` if the list
+ /// is empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ /// assert_eq!(dl.back(), None);
+ ///
+ /// dl.push_back(1);
+ /// assert_eq!(dl.back(), Some(&1));
+ ///
+ /// match dl.back_mut() {
+ /// None => {},
+ /// Some(x) => *x = 5,
+ /// }
+ /// assert_eq!(dl.back(), Some(&5));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn back_mut(&mut self) -> Option<&mut T> {
+ unsafe { self.tail.as_mut().map(|node| &mut node.as_mut().element) }
+ }
+
+ /// Adds an element first in the list.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ ///
+ /// dl.push_front(2);
+ /// assert_eq!(dl.front().unwrap(), &2);
+ ///
+ /// dl.push_front(1);
+ /// assert_eq!(dl.front().unwrap(), &1);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push_front(&mut self, elt: T) {
+ self.push_front_node(Box::new(Node::new(elt)));
+ }
+
+ /// Removes the first element and returns it, or `None` if the list is
+ /// empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ /// assert_eq!(d.pop_front(), None);
+ ///
+ /// d.push_front(1);
+ /// d.push_front(3);
+ /// assert_eq!(d.pop_front(), Some(3));
+ /// assert_eq!(d.pop_front(), Some(1));
+ /// assert_eq!(d.pop_front(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop_front(&mut self) -> Option<T> {
+ self.pop_front_node().map(Node::into_element)
+ }
+
+ /// Appends an element to the back of a list.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ /// d.push_back(1);
+ /// d.push_back(3);
+ /// assert_eq!(3, *d.back().unwrap());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push_back(&mut self, elt: T) {
+ self.push_back_node(Box::new(Node::new(elt)));
+ }
+
+ /// Removes the last element from a list and returns it, or `None` if
+ /// it is empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ /// assert_eq!(d.pop_back(), None);
+ /// d.push_back(1);
+ /// d.push_back(3);
+ /// assert_eq!(d.pop_back(), Some(3));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop_back(&mut self) -> Option<T> {
+ self.pop_back_node().map(Node::into_element)
+ }
+
+ /// Splits the list into two at the given index. Returns everything after the given index,
+ /// including the index.
+ ///
+ /// This operation should compute in *O*(*n*) time.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ ///
+ /// d.push_front(1);
+ /// d.push_front(2);
+ /// d.push_front(3);
+ ///
+ /// let mut split = d.split_off(2);
+ ///
+ /// assert_eq!(split.pop_front(), Some(1));
+ /// assert_eq!(split.pop_front(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn split_off(&mut self, at: usize) -> LinkedList<T> {
+ let len = self.len();
+ assert!(at <= len, "Cannot split off at a nonexistent index");
+ if at == 0 {
+ return mem::take(self);
+ } else if at == len {
+ return Self::new();
+ }
+
+ // Below, we iterate towards the `i-1`th node, either from the start or the end,
+ // depending on which would be faster.
+ let split_node = if at - 1 <= len - 1 - (at - 1) {
+ let mut iter = self.iter_mut();
+ // instead of skipping using .skip() (which creates a new struct),
+ // we skip manually so we can access the head field without
+ // depending on implementation details of Skip
+ for _ in 0..at - 1 {
+ iter.next();
+ }
+ iter.head
+ } else {
+ // better off starting from the end
+ let mut iter = self.iter_mut();
+ for _ in 0..len - 1 - (at - 1) {
+ iter.next_back();
+ }
+ iter.tail
+ };
+ unsafe { self.split_off_after_node(split_node, at) }
+ }
+
+ /// Removes the element at the given index and returns it.
+ ///
+ /// This operation should compute in *O*(*n*) time.
+ ///
+ /// # Panics
+ /// Panics if at >= len
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(linked_list_remove)]
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ ///
+ /// d.push_front(1);
+ /// d.push_front(2);
+ /// d.push_front(3);
+ ///
+ /// assert_eq!(d.remove(1), 2);
+ /// assert_eq!(d.remove(0), 3);
+ /// assert_eq!(d.remove(0), 1);
+ /// ```
+ #[unstable(feature = "linked_list_remove", issue = "69210")]
+ pub fn remove(&mut self, at: usize) -> T {
+ let len = self.len();
+ assert!(at < len, "Cannot remove at an index outside of the list bounds");
+
+ // Below, we iterate towards the node at the given index, either from
+ // the start or the end, depending on which would be faster.
+ let offset_from_end = len - at - 1;
+ if at <= offset_from_end {
+ let mut cursor = self.cursor_front_mut();
+ for _ in 0..at {
+ cursor.move_next();
+ }
+ cursor.remove_current().unwrap()
+ } else {
+ let mut cursor = self.cursor_back_mut();
+ for _ in 0..offset_from_end {
+ cursor.move_prev();
+ }
+ cursor.remove_current().unwrap()
+ }
+ }
+
+ /// Creates an iterator which uses a closure to determine if an element should be removed.
+ ///
+ /// If the closure returns true, then the element is removed and yielded.
+ /// If the closure returns false, the element will remain in the list and will not be yielded
+ /// by the iterator.
+ ///
+ /// Note that `drain_filter` lets you mutate every element in the filter closure, regardless of
+ /// whether you choose to keep or remove it.
+ ///
+ /// # Examples
+ ///
+ /// Splitting a list into evens and odds, reusing the original list:
+ ///
+ /// ```
+ /// #![feature(drain_filter)]
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut numbers: LinkedList<u32> = LinkedList::new();
+ /// numbers.extend(&[1, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15]);
+ ///
+ /// let evens = numbers.drain_filter(|x| *x % 2 == 0).collect::<LinkedList<_>>();
+ /// let odds = numbers;
+ ///
+ /// assert_eq!(evens.into_iter().collect::<Vec<_>>(), vec![2, 4, 6, 8, 14]);
+ /// assert_eq!(odds.into_iter().collect::<Vec<_>>(), vec![1, 3, 5, 9, 11, 13, 15]);
+ /// ```
+ #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+ pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ // avoid borrow issues.
+ let it = self.head;
+ let old_len = self.len;
+
+ DrainFilter { list: self, it, pred: filter, idx: 0, old_len }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T> Drop for LinkedList<T> {
+ fn drop(&mut self) {
+ struct DropGuard<'a, T>(&'a mut LinkedList<T>);
+
+ impl<'a, T> Drop for DropGuard<'a, T> {
+ fn drop(&mut self) {
+ // Continue the same loop we do below. This only runs when a destructor has
+ // panicked. If another one panics this will abort.
+ while self.0.pop_front_node().is_some() {}
+ }
+ }
+
+ while let Some(node) = self.pop_front_node() {
+ let guard = DropGuard(self);
+ drop(node);
+ mem::forget(guard);
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ if self.len == 0 {
+ None
+ } else {
+ self.head.map(|node| unsafe {
+ // Need an unbound lifetime to get 'a
+ let node = &*node.as_ptr();
+ self.len -= 1;
+ self.head = node.next;
+ &node.element
+ })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.len, Some(self.len))
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a T> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a T> {
+ if self.len == 0 {
+ None
+ } else {
+ self.tail.map(|node| unsafe {
+ // Need an unbound lifetime to get 'a
+ let node = &*node.as_ptr();
+ self.len -= 1;
+ self.tail = node.prev;
+ &node.element
+ })
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for IterMut<'a, T> {
+ type Item = &'a mut T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut T> {
+ if self.len == 0 {
+ None
+ } else {
+ self.head.map(|node| unsafe {
+ // Need an unbound lifetime to get 'a
+ let node = &mut *node.as_ptr();
+ self.len -= 1;
+ self.head = node.next;
+ &mut node.element
+ })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.len, Some(self.len))
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a mut T> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut T> {
+ if self.len == 0 {
+ None
+ } else {
+ self.tail.map(|node| unsafe {
+ // Need an unbound lifetime to get 'a
+ let node = &mut *node.as_ptr();
+ self.len -= 1;
+ self.tail = node.prev;
+ &mut node.element
+ })
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IterMut<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IterMut<'_, T> {}
+
+/// A cursor over a `LinkedList`.
+///
+/// A `Cursor` is like an iterator, except that it can freely seek back-and-forth.
+///
+/// Cursors always rest between two elements in the list, and index in a logically circular way.
+/// To accommodate this, there is a "ghost" non-element that yields `None` between the head and
+/// tail of the list.
+///
+/// When created, cursors start at the front of the list, or the "ghost" non-element if the list is empty.
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+pub struct Cursor<'a, T: 'a> {
+ index: usize,
+ current: Option<NonNull<Node<T>>>,
+ list: &'a LinkedList<T>,
+}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+impl<T> Clone for Cursor<'_, T> {
+ fn clone(&self) -> Self {
+ let Cursor { index, current, list } = *self;
+ Cursor { index, current, list }
+ }
+}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+impl<T: fmt::Debug> fmt::Debug for Cursor<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Cursor").field(&self.list).field(&self.index()).finish()
+ }
+}
+
+/// A cursor over a `LinkedList` with editing operations.
+///
+/// A `Cursor` is like an iterator, except that it can freely seek back-and-forth, and can
+/// safely mutate the list during iteration. This is because the lifetime of its yielded
+/// references is tied to its own lifetime, instead of just the underlying list. This means
+/// cursors cannot yield multiple elements at once.
+///
+/// Cursors always rest between two elements in the list, and index in a logically circular way.
+/// To accommodate this, there is a "ghost" non-element that yields `None` between the head and
+/// tail of the list.
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+pub struct CursorMut<'a, T: 'a> {
+ index: usize,
+ current: Option<NonNull<Node<T>>>,
+ list: &'a mut LinkedList<T>,
+}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+impl<T: fmt::Debug> fmt::Debug for CursorMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("CursorMut").field(&self.list).field(&self.index()).finish()
+ }
+}
+
+impl<'a, T> Cursor<'a, T> {
+ /// Returns the cursor position index within the `LinkedList`.
+ ///
+ /// This returns `None` if the cursor is currently pointing to the
+ /// "ghost" non-element.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn index(&self) -> Option<usize> {
+ let _ = self.current?;
+ Some(self.index)
+ }
+
+ /// Moves the cursor to the next element of the `LinkedList`.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this will move it to
+ /// the first element of the `LinkedList`. If it is pointing to the last
+ /// element of the `LinkedList` then this will move it to the "ghost" non-element.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn move_next(&mut self) {
+ match self.current.take() {
+ // We had no current element; the cursor was sitting at the start position
+ // Next element should be the head of the list
+ None => {
+ self.current = self.list.head;
+ self.index = 0;
+ }
+ // We had a previous element, so let's go to its next
+ Some(current) => unsafe {
+ self.current = current.as_ref().next;
+ self.index += 1;
+ },
+ }
+ }
+
+ /// Moves the cursor to the previous element of the `LinkedList`.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this will move it to
+ /// the last element of the `LinkedList`. If it is pointing to the first
+ /// element of the `LinkedList` then this will move it to the "ghost" non-element.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn move_prev(&mut self) {
+ match self.current.take() {
+ // No current. We're at the start of the list. Yield None and jump to the end.
+ None => {
+ self.current = self.list.tail;
+ self.index = self.list.len().checked_sub(1).unwrap_or(0);
+ }
+ // Have a prev. Yield it and go to the previous element.
+ Some(current) => unsafe {
+ self.current = current.as_ref().prev;
+ self.index = self.index.checked_sub(1).unwrap_or_else(|| self.list.len());
+ },
+ }
+ }
+
+ /// Returns a reference to the element that the cursor is currently
+ /// pointing to.
+ ///
+ /// This returns `None` if the cursor is currently pointing to the
+ /// "ghost" non-element.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn current(&self) -> Option<&'a T> {
+ unsafe { self.current.map(|current| &(*current.as_ptr()).element) }
+ }
+
+ /// Returns a reference to the next element.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this returns
+ /// the first element of the `LinkedList`. If it is pointing to the last
+ /// element of the `LinkedList` then this returns `None`.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn peek_next(&self) -> Option<&'a T> {
+ unsafe {
+ let next = match self.current {
+ None => self.list.head,
+ Some(current) => current.as_ref().next,
+ };
+ next.map(|next| &(*next.as_ptr()).element)
+ }
+ }
+
+ /// Returns a reference to the previous element.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this returns
+ /// the last element of the `LinkedList`. If it is pointing to the first
+ /// element of the `LinkedList` then this returns `None`.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn peek_prev(&self) -> Option<&'a T> {
+ unsafe {
+ let prev = match self.current {
+ None => self.list.tail,
+ Some(current) => current.as_ref().prev,
+ };
+ prev.map(|prev| &(*prev.as_ptr()).element)
+ }
+ }
+
+ /// Provides a reference to the front element of the cursor's parent list,
+ /// or None if the list is empty.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn front(&self) -> Option<&'a T> {
+ self.list.front()
+ }
+
+ /// Provides a reference to the back element of the cursor's parent list,
+ /// or None if the list is empty.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn back(&self) -> Option<&'a T> {
+ self.list.back()
+ }
+}
+
+impl<'a, T> CursorMut<'a, T> {
+ /// Returns the cursor position index within the `LinkedList`.
+ ///
+ /// This returns `None` if the cursor is currently pointing to the
+ /// "ghost" non-element.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn index(&self) -> Option<usize> {
+ let _ = self.current?;
+ Some(self.index)
+ }
+
+ /// Moves the cursor to the next element of the `LinkedList`.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this will move it to
+ /// the first element of the `LinkedList`. If it is pointing to the last
+ /// element of the `LinkedList` then this will move it to the "ghost" non-element.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn move_next(&mut self) {
+ match self.current.take() {
+ // We had no current element; the cursor was sitting at the start position
+ // Next element should be the head of the list
+ None => {
+ self.current = self.list.head;
+ self.index = 0;
+ }
+ // We had a previous element, so let's go to its next
+ Some(current) => unsafe {
+ self.current = current.as_ref().next;
+ self.index += 1;
+ },
+ }
+ }
+
+ /// Moves the cursor to the previous element of the `LinkedList`.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this will move it to
+ /// the last element of the `LinkedList`. If it is pointing to the first
+ /// element of the `LinkedList` then this will move it to the "ghost" non-element.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn move_prev(&mut self) {
+ match self.current.take() {
+ // No current. We're at the start of the list. Yield None and jump to the end.
+ None => {
+ self.current = self.list.tail;
+ self.index = self.list.len().checked_sub(1).unwrap_or(0);
+ }
+ // Have a prev. Yield it and go to the previous element.
+ Some(current) => unsafe {
+ self.current = current.as_ref().prev;
+ self.index = self.index.checked_sub(1).unwrap_or_else(|| self.list.len());
+ },
+ }
+ }
+
+ /// Returns a reference to the element that the cursor is currently
+ /// pointing to.
+ ///
+ /// This returns `None` if the cursor is currently pointing to the
+ /// "ghost" non-element.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn current(&mut self) -> Option<&mut T> {
+ unsafe { self.current.map(|current| &mut (*current.as_ptr()).element) }
+ }
+
+ /// Returns a reference to the next element.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this returns
+ /// the first element of the `LinkedList`. If it is pointing to the last
+ /// element of the `LinkedList` then this returns `None`.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn peek_next(&mut self) -> Option<&mut T> {
+ unsafe {
+ let next = match self.current {
+ None => self.list.head,
+ Some(current) => current.as_ref().next,
+ };
+ next.map(|next| &mut (*next.as_ptr()).element)
+ }
+ }
+
+ /// Returns a reference to the previous element.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this returns
+ /// the last element of the `LinkedList`. If it is pointing to the first
+ /// element of the `LinkedList` then this returns `None`.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn peek_prev(&mut self) -> Option<&mut T> {
+ unsafe {
+ let prev = match self.current {
+ None => self.list.tail,
+ Some(current) => current.as_ref().prev,
+ };
+ prev.map(|prev| &mut (*prev.as_ptr()).element)
+ }
+ }
+
+ /// Returns a read-only cursor pointing to the current element.
+ ///
+ /// The lifetime of the returned `Cursor` is bound to that of the
+ /// `CursorMut`, which means it cannot outlive the `CursorMut` and that the
+ /// `CursorMut` is frozen for the lifetime of the `Cursor`.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn as_cursor(&self) -> Cursor<'_, T> {
+ Cursor { list: self.list, current: self.current, index: self.index }
+ }
+}
+
+// Now the list editing operations
+
+impl<'a, T> CursorMut<'a, T> {
+ /// Inserts a new element into the `LinkedList` after the current one.
+ ///
+ /// If the cursor is pointing at the "ghost" non-element then the new element is
+ /// inserted at the front of the `LinkedList`.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn insert_after(&mut self, item: T) {
+ unsafe {
+ let spliced_node = Box::leak(Box::new(Node::new(item))).into();
+ let node_next = match self.current {
+ None => self.list.head,
+ Some(node) => node.as_ref().next,
+ };
+ self.list.splice_nodes(self.current, node_next, spliced_node, spliced_node, 1);
+ if self.current.is_none() {
+ // The "ghost" non-element's index has changed.
+ self.index = self.list.len;
+ }
+ }
+ }
+
+ /// Inserts a new element into the `LinkedList` before the current one.
+ ///
+ /// If the cursor is pointing at the "ghost" non-element then the new element is
+ /// inserted at the end of the `LinkedList`.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn insert_before(&mut self, item: T) {
+ unsafe {
+ let spliced_node = Box::leak(Box::new(Node::new(item))).into();
+ let node_prev = match self.current {
+ None => self.list.tail,
+ Some(node) => node.as_ref().prev,
+ };
+ self.list.splice_nodes(node_prev, self.current, spliced_node, spliced_node, 1);
+ self.index += 1;
+ }
+ }
+
+ /// Removes the current element from the `LinkedList`.
+ ///
+ /// The element that was removed is returned, and the cursor is
+ /// moved to point to the next element in the `LinkedList`.
+ ///
+ /// If the cursor is currently pointing to the "ghost" non-element then no element
+ /// is removed and `None` is returned.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn remove_current(&mut self) -> Option<T> {
+ let unlinked_node = self.current?;
+ unsafe {
+ self.current = unlinked_node.as_ref().next;
+ self.list.unlink_node(unlinked_node);
+ let unlinked_node = Box::from_raw(unlinked_node.as_ptr());
+ Some(unlinked_node.element)
+ }
+ }
+
+ /// Removes the current element from the `LinkedList` without deallocating the list node.
+ ///
+ /// The node that was removed is returned as a new `LinkedList` containing only this node.
+ /// The cursor is moved to point to the next element in the current `LinkedList`.
+ ///
+ /// If the cursor is currently pointing to the "ghost" non-element then no element
+ /// is removed and `None` is returned.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn remove_current_as_list(&mut self) -> Option<LinkedList<T>> {
+ let mut unlinked_node = self.current?;
+ unsafe {
+ self.current = unlinked_node.as_ref().next;
+ self.list.unlink_node(unlinked_node);
+
+ unlinked_node.as_mut().prev = None;
+ unlinked_node.as_mut().next = None;
+ Some(LinkedList {
+ head: Some(unlinked_node),
+ tail: Some(unlinked_node),
+ len: 1,
+ marker: PhantomData,
+ })
+ }
+ }
+
+ /// Inserts the elements from the given `LinkedList` after the current one.
+ ///
+ /// If the cursor is pointing at the "ghost" non-element then the new elements are
+ /// inserted at the start of the `LinkedList`.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn splice_after(&mut self, list: LinkedList<T>) {
+ unsafe {
+ let (splice_head, splice_tail, splice_len) = match list.detach_all_nodes() {
+ Some(parts) => parts,
+ _ => return,
+ };
+ let node_next = match self.current {
+ None => self.list.head,
+ Some(node) => node.as_ref().next,
+ };
+ self.list.splice_nodes(self.current, node_next, splice_head, splice_tail, splice_len);
+ if self.current.is_none() {
+ // The "ghost" non-element's index has changed.
+ self.index = self.list.len;
+ }
+ }
+ }
+
+ /// Inserts the elements from the given `LinkedList` before the current one.
+ ///
+ /// If the cursor is pointing at the "ghost" non-element then the new elements are
+ /// inserted at the end of the `LinkedList`.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn splice_before(&mut self, list: LinkedList<T>) {
+ unsafe {
+ let (splice_head, splice_tail, splice_len) = match list.detach_all_nodes() {
+ Some(parts) => parts,
+ _ => return,
+ };
+ let node_prev = match self.current {
+ None => self.list.tail,
+ Some(node) => node.as_ref().prev,
+ };
+ self.list.splice_nodes(node_prev, self.current, splice_head, splice_tail, splice_len);
+ self.index += splice_len;
+ }
+ }
+
+ /// Splits the list into two after the current element. This will return a
+ /// new list consisting of everything after the cursor, with the original
+ /// list retaining everything before.
+ ///
+ /// If the cursor is pointing at the "ghost" non-element then the entire contents
+ /// of the `LinkedList` are moved.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn split_after(&mut self) -> LinkedList<T> {
+ let split_off_idx = if self.index == self.list.len { 0 } else { self.index + 1 };
+ if self.index == self.list.len {
+ // The "ghost" non-element's index has changed to 0.
+ self.index = 0;
+ }
+ unsafe { self.list.split_off_after_node(self.current, split_off_idx) }
+ }
+
+ /// Splits the list into two before the current element. This will return a
+ /// new list consisting of everything before the cursor, with the original
+ /// list retaining everything after.
+ ///
+ /// If the cursor is pointing at the "ghost" non-element then the entire contents
+ /// of the `LinkedList` are moved.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn split_before(&mut self) -> LinkedList<T> {
+ let split_off_idx = self.index;
+ self.index = 0;
+ unsafe { self.list.split_off_before_node(self.current, split_off_idx) }
+ }
+
+ /// Appends an element to the front of the cursor's parent list. The node
+ /// that the cursor points to is unchanged, even if it is the "ghost" node.
+ ///
+ /// This operation should compute in *O*(1) time.
+ // `push_front` continues to point to "ghost" when it addes a node to mimic
+ // the behavior of `insert_before` on an empty list.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn push_front(&mut self, elt: T) {
+ // Safety: We know that `push_front` does not change the position in
+ // memory of other nodes. This ensures that `self.current` remains
+ // valid.
+ self.list.push_front(elt);
+ self.index += 1;
+ }
+
+ /// Appends an element to the back of the cursor's parent list. The node
+ /// that the cursor points to is unchanged, even if it is the "ghost" node.
+ ///
+ /// This operation should compute in *O*(1) time.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn push_back(&mut self, elt: T) {
+ // Safety: We know that `push_back` does not change the position in
+ // memory of other nodes. This ensures that `self.current` remains
+ // valid.
+ self.list.push_back(elt);
+ if self.current().is_none() {
+ // The index of "ghost" is the length of the list, so we just need
+ // to increment self.index to reflect the new length of the list.
+ self.index += 1;
+ }
+ }
+
+ /// Removes the first element from the cursor's parent list and returns it,
+ /// or None if the list is empty. The element the cursor points to remains
+ /// unchanged, unless it was pointing to the front element. In that case, it
+ /// points to the new front element.
+ ///
+ /// This operation should compute in *O*(1) time.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn pop_front(&mut self) -> Option<T> {
+ // We can't check if current is empty, we must check the list directly.
+ // It is possible for `self.current == None` and the list to be
+ // non-empty.
+ if self.list.is_empty() {
+ None
+ } else {
+ // We can't point to the node that we pop. Copying the behavior of
+ // `remove_current`, we move on the the next node in the sequence.
+ // If the list is of length 1 then we end pointing to the "ghost"
+ // node at index 0, which is expected.
+ if self.list.head == self.current {
+ self.move_next();
+ } else {
+ self.index -= 1;
+ }
+ self.list.pop_front()
+ }
+ }
+
+ /// Removes the last element from the cursor's parent list and returns it,
+ /// or None if the list is empty. The element the cursor points to remains
+ /// unchanged, unless it was pointing to the back element. In that case, it
+ /// points to the "ghost" element.
+ ///
+ /// This operation should compute in *O*(1) time.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn pop_back(&mut self) -> Option<T> {
+ if self.list.is_empty() {
+ None
+ } else {
+ if self.list.tail == self.current {
+ // The index now reflects the length of the list. It was the
+ // length of the list minus 1, but now the list is 1 smaller. No
+ // change is needed for `index`.
+ self.current = None;
+ } else if self.current.is_none() {
+ self.index = self.list.len - 1;
+ }
+ self.list.pop_back()
+ }
+ }
+
+ /// Provides a reference to the front element of the cursor's parent list,
+ /// or None if the list is empty.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn front(&self) -> Option<&T> {
+ self.list.front()
+ }
+
+ /// Provides a mutable reference to the front element of the cursor's
+ /// parent list, or None if the list is empty.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn front_mut(&mut self) -> Option<&mut T> {
+ self.list.front_mut()
+ }
+
+ /// Provides a reference to the back element of the cursor's parent list,
+ /// or None if the list is empty.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn back(&self) -> Option<&T> {
+ self.list.back()
+ }
+
+ /// Provides a mutable reference to back element of the cursor's parent
+ /// list, or `None` if the list is empty.
+ ///
+ /// # Examples
+ /// Building and mutating a list with a cursor, then getting the back element:
+ /// ```
+ /// #![feature(linked_list_cursors)]
+ /// use std::collections::LinkedList;
+ /// let mut dl = LinkedList::new();
+ /// dl.push_front(3);
+ /// dl.push_front(2);
+ /// dl.push_front(1);
+ /// let mut cursor = dl.cursor_front_mut();
+ /// *cursor.current().unwrap() = 99;
+ /// *cursor.back_mut().unwrap() = 0;
+ /// let mut contents = dl.into_iter();
+ /// assert_eq!(contents.next(), Some(99));
+ /// assert_eq!(contents.next(), Some(2));
+ /// assert_eq!(contents.next(), Some(0));
+ /// assert_eq!(contents.next(), None);
+ /// ```
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn back_mut(&mut self) -> Option<&mut T> {
+ self.list.back_mut()
+ }
+}
+
+/// An iterator produced by calling `drain_filter` on LinkedList.
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+pub struct DrainFilter<'a, T: 'a, F: 'a>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ list: &'a mut LinkedList<T>,
+ it: Option<NonNull<Node<T>>>,
+ pred: F,
+ idx: usize,
+ old_len: usize,
+}
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+impl<T, F> Iterator for DrainFilter<'_, T, F>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ while let Some(mut node) = self.it {
+ unsafe {
+ self.it = node.as_ref().next;
+ self.idx += 1;
+
+ if (self.pred)(&mut node.as_mut().element) {
+ // `unlink_node` is okay with aliasing `element` references.
+ self.list.unlink_node(node);
+ return Some(Box::from_raw(node.as_ptr()).element);
+ }
+ }
+ }
+
+ None
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(self.old_len - self.idx))
+ }
+}
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+impl<T, F> Drop for DrainFilter<'_, T, F>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ fn drop(&mut self) {
+ struct DropGuard<'r, 'a, T, F>(&'r mut DrainFilter<'a, T, F>)
+ where
+ F: FnMut(&mut T) -> bool;
+
+ impl<'r, 'a, T, F> Drop for DropGuard<'r, 'a, T, F>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ fn drop(&mut self) {
+ self.0.for_each(drop);
+ }
+ }
+
+ while let Some(item) = self.next() {
+ let guard = DropGuard(self);
+ drop(item);
+ mem::forget(guard);
+ }
+ }
+}
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+impl<T: fmt::Debug, F> fmt::Debug for DrainFilter<'_, T, F>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("DrainFilter").field(&self.list).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.list.pop_front()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.list.len, Some(self.list.len))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.list.pop_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IntoIter<T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IntoIter<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> FromIterator<T> for LinkedList<T> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
+ let mut list = Self::new();
+ list.extend(iter);
+ list
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> IntoIterator for LinkedList<T> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ /// Consumes the list into an iterator yielding elements by value.
+ #[inline]
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter { list: self }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> IntoIterator for &'a LinkedList<T> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> IntoIterator for &'a mut LinkedList<T> {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Extend<T> for LinkedList<T> {
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ <Self as SpecExtend<I>>::spec_extend(self, iter);
+ }
+
+ #[inline]
+ fn extend_one(&mut self, elem: T) {
+ self.push_back(elem);
+ }
+}
+
+impl<I: IntoIterator> SpecExtend<I> for LinkedList<I::Item> {
+ default fn spec_extend(&mut self, iter: I) {
+ iter.into_iter().for_each(move |elt| self.push_back(elt));
+ }
+}
+
+impl<T> SpecExtend<LinkedList<T>> for LinkedList<T> {
+ fn spec_extend(&mut self, ref mut other: LinkedList<T>) {
+ self.append(other);
+ }
+}
+
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: 'a + Copy> Extend<&'a T> for LinkedList<T> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &elem: &'a T) {
+ self.push_back(elem);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialEq> PartialEq for LinkedList<T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.len() == other.len() && self.iter().eq(other)
+ }
+
+ fn ne(&self, other: &Self) -> bool {
+ self.len() != other.len() || self.iter().ne(other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq> Eq for LinkedList<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd> PartialOrd for LinkedList<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.iter().partial_cmp(other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Ord for LinkedList<T> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.iter().cmp(other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> Clone for LinkedList<T> {
+ fn clone(&self) -> Self {
+ self.iter().cloned().collect()
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ let mut iter_other = other.iter();
+ if self.len() > other.len() {
+ self.split_off(other.len());
+ }
+ for (elem, elem_other) in self.iter_mut().zip(&mut iter_other) {
+ elem.clone_from(elem_other);
+ }
+ if !iter_other.is_empty() {
+ self.extend(iter_other.cloned());
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug> fmt::Debug for LinkedList<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Hash> Hash for LinkedList<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_length_prefix(self.len());
+ for elt in self {
+ elt.hash(state);
+ }
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+impl<T, const N: usize> From<[T; N]> for LinkedList<T> {
+ /// Converts a `[T; N]` into a `LinkedList<T>`.
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let list1 = LinkedList::from([1, 2, 3, 4]);
+ /// let list2: LinkedList<_> = [1, 2, 3, 4].into();
+ /// assert_eq!(list1, list2);
+ /// ```
+ fn from(arr: [T; N]) -> Self {
+ Self::from_iter(arr)
+ }
+}
+
+// Ensure that `LinkedList` and its read-only iterators are covariant in their type parameters.
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn a<'a>(x: LinkedList<&'static str>) -> LinkedList<&'a str> {
+ x
+ }
+ fn b<'i, 'a>(x: Iter<'i, &'static str>) -> Iter<'i, &'a str> {
+ x
+ }
+ fn c<'a>(x: IntoIter<&'static str>) -> IntoIter<&'a str> {
+ x
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send> Send for LinkedList<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for LinkedList<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Send for Iter<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for Iter<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send> Send for IterMut<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+unsafe impl<T: Sync> Send for Cursor<'_, T> {}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+unsafe impl<T: Sync> Sync for Cursor<'_, T> {}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+unsafe impl<T: Send> Send for CursorMut<'_, T> {}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+unsafe impl<T: Sync> Sync for CursorMut<'_, T> {}
diff --git a/library/alloc/src/collections/linked_list/tests.rs b/library/alloc/src/collections/linked_list/tests.rs
new file mode 100644
index 000000000..f8fbfa1bf
--- /dev/null
+++ b/library/alloc/src/collections/linked_list/tests.rs
@@ -0,0 +1,1156 @@
+use super::*;
+use crate::vec::Vec;
+
+use std::panic::{catch_unwind, AssertUnwindSafe};
+use std::thread;
+
+use rand::{thread_rng, RngCore};
+
+#[test]
+fn test_basic() {
+ let mut m = LinkedList::<Box<_>>::new();
+ assert_eq!(m.pop_front(), None);
+ assert_eq!(m.pop_back(), None);
+ assert_eq!(m.pop_front(), None);
+ m.push_front(Box::new(1));
+ assert_eq!(m.pop_front(), Some(Box::new(1)));
+ m.push_back(Box::new(2));
+ m.push_back(Box::new(3));
+ assert_eq!(m.len(), 2);
+ assert_eq!(m.pop_front(), Some(Box::new(2)));
+ assert_eq!(m.pop_front(), Some(Box::new(3)));
+ assert_eq!(m.len(), 0);
+ assert_eq!(m.pop_front(), None);
+ m.push_back(Box::new(1));
+ m.push_back(Box::new(3));
+ m.push_back(Box::new(5));
+ m.push_back(Box::new(7));
+ assert_eq!(m.pop_front(), Some(Box::new(1)));
+
+ let mut n = LinkedList::new();
+ n.push_front(2);
+ n.push_front(3);
+ {
+ assert_eq!(n.front().unwrap(), &3);
+ let x = n.front_mut().unwrap();
+ assert_eq!(*x, 3);
+ *x = 0;
+ }
+ {
+ assert_eq!(n.back().unwrap(), &2);
+ let y = n.back_mut().unwrap();
+ assert_eq!(*y, 2);
+ *y = 1;
+ }
+ assert_eq!(n.pop_front(), Some(0));
+ assert_eq!(n.pop_front(), Some(1));
+}
+
+fn generate_test() -> LinkedList<i32> {
+ list_from(&[0, 1, 2, 3, 4, 5, 6])
+}
+
+fn list_from<T: Clone>(v: &[T]) -> LinkedList<T> {
+ v.iter().cloned().collect()
+}
+
+pub fn check_links<T>(list: &LinkedList<T>) {
+ unsafe {
+ let mut len = 0;
+ let mut last_ptr: Option<&Node<T>> = None;
+ let mut node_ptr: &Node<T>;
+ match list.head {
+ None => {
+ // tail node should also be None.
+ assert!(list.tail.is_none());
+ assert_eq!(0, list.len);
+ return;
+ }
+ Some(node) => node_ptr = &*node.as_ptr(),
+ }
+ loop {
+ match (last_ptr, node_ptr.prev) {
+ (None, None) => {}
+ (None, _) => panic!("prev link for head"),
+ (Some(p), Some(pptr)) => {
+ assert_eq!(p as *const Node<T>, pptr.as_ptr() as *const Node<T>);
+ }
+ _ => panic!("prev link is none, not good"),
+ }
+ match node_ptr.next {
+ Some(next) => {
+ last_ptr = Some(node_ptr);
+ node_ptr = &*next.as_ptr();
+ len += 1;
+ }
+ None => {
+ len += 1;
+ break;
+ }
+ }
+ }
+
+ // verify that the tail node points to the last node.
+ let tail = list.tail.as_ref().expect("some tail node").as_ref();
+ assert_eq!(tail as *const Node<T>, node_ptr as *const Node<T>);
+ // check that len matches interior links.
+ assert_eq!(len, list.len);
+ }
+}
+
+#[test]
+fn test_append() {
+ // Empty to empty
+ {
+ let mut m = LinkedList::<i32>::new();
+ let mut n = LinkedList::new();
+ m.append(&mut n);
+ check_links(&m);
+ assert_eq!(m.len(), 0);
+ assert_eq!(n.len(), 0);
+ }
+ // Non-empty to empty
+ {
+ let mut m = LinkedList::new();
+ let mut n = LinkedList::new();
+ n.push_back(2);
+ m.append(&mut n);
+ check_links(&m);
+ assert_eq!(m.len(), 1);
+ assert_eq!(m.pop_back(), Some(2));
+ assert_eq!(n.len(), 0);
+ check_links(&m);
+ }
+ // Empty to non-empty
+ {
+ let mut m = LinkedList::new();
+ let mut n = LinkedList::new();
+ m.push_back(2);
+ m.append(&mut n);
+ check_links(&m);
+ assert_eq!(m.len(), 1);
+ assert_eq!(m.pop_back(), Some(2));
+ check_links(&m);
+ }
+
+ // Non-empty to non-empty
+ let v = vec![1, 2, 3, 4, 5];
+ let u = vec![9, 8, 1, 2, 3, 4, 5];
+ let mut m = list_from(&v);
+ let mut n = list_from(&u);
+ m.append(&mut n);
+ check_links(&m);
+ let mut sum = v;
+ sum.extend_from_slice(&u);
+ assert_eq!(sum.len(), m.len());
+ for elt in sum {
+ assert_eq!(m.pop_front(), Some(elt))
+ }
+ assert_eq!(n.len(), 0);
+ // Let's make sure it's working properly, since we
+ // did some direct changes to private members.
+ n.push_back(3);
+ assert_eq!(n.len(), 1);
+ assert_eq!(n.pop_front(), Some(3));
+ check_links(&n);
+}
+
+#[test]
+fn test_iterator() {
+ let m = generate_test();
+ for (i, elt) in m.iter().enumerate() {
+ assert_eq!(i as i32, *elt);
+ }
+ let mut n = LinkedList::new();
+ assert_eq!(n.iter().next(), None);
+ n.push_front(4);
+ let mut it = n.iter();
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next().unwrap(), &4);
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iterator_clone() {
+ let mut n = LinkedList::new();
+ n.push_back(2);
+ n.push_back(3);
+ n.push_back(4);
+ let mut it = n.iter();
+ it.next();
+ let mut jt = it.clone();
+ assert_eq!(it.next(), jt.next());
+ assert_eq!(it.next_back(), jt.next_back());
+ assert_eq!(it.next(), jt.next());
+}
+
+#[test]
+fn test_iterator_double_end() {
+ let mut n = LinkedList::new();
+ assert_eq!(n.iter().next(), None);
+ n.push_front(4);
+ n.push_front(5);
+ n.push_front(6);
+ let mut it = n.iter();
+ assert_eq!(it.size_hint(), (3, Some(3)));
+ assert_eq!(it.next().unwrap(), &6);
+ assert_eq!(it.size_hint(), (2, Some(2)));
+ assert_eq!(it.next_back().unwrap(), &4);
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next_back().unwrap(), &5);
+ assert_eq!(it.next_back(), None);
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_rev_iter() {
+ let m = generate_test();
+ for (i, elt) in m.iter().rev().enumerate() {
+ assert_eq!((6 - i) as i32, *elt);
+ }
+ let mut n = LinkedList::new();
+ assert_eq!(n.iter().rev().next(), None);
+ n.push_front(4);
+ let mut it = n.iter().rev();
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next().unwrap(), &4);
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_mut_iter() {
+ let mut m = generate_test();
+ let mut len = m.len();
+ for (i, elt) in m.iter_mut().enumerate() {
+ assert_eq!(i as i32, *elt);
+ len -= 1;
+ }
+ assert_eq!(len, 0);
+ let mut n = LinkedList::new();
+ assert!(n.iter_mut().next().is_none());
+ n.push_front(4);
+ n.push_back(5);
+ let mut it = n.iter_mut();
+ assert_eq!(it.size_hint(), (2, Some(2)));
+ assert!(it.next().is_some());
+ assert!(it.next().is_some());
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert!(it.next().is_none());
+}
+
+#[test]
+fn test_iterator_mut_double_end() {
+ let mut n = LinkedList::new();
+ assert!(n.iter_mut().next_back().is_none());
+ n.push_front(4);
+ n.push_front(5);
+ n.push_front(6);
+ let mut it = n.iter_mut();
+ assert_eq!(it.size_hint(), (3, Some(3)));
+ assert_eq!(*it.next().unwrap(), 6);
+ assert_eq!(it.size_hint(), (2, Some(2)));
+ assert_eq!(*it.next_back().unwrap(), 4);
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(*it.next_back().unwrap(), 5);
+ assert!(it.next_back().is_none());
+ assert!(it.next().is_none());
+}
+
+#[test]
+fn test_mut_rev_iter() {
+ let mut m = generate_test();
+ for (i, elt) in m.iter_mut().rev().enumerate() {
+ assert_eq!((6 - i) as i32, *elt);
+ }
+ let mut n = LinkedList::new();
+ assert!(n.iter_mut().rev().next().is_none());
+ n.push_front(4);
+ let mut it = n.iter_mut().rev();
+ assert!(it.next().is_some());
+ assert!(it.next().is_none());
+}
+
+#[test]
+fn test_clone_from() {
+ // Short cloned from long
+ {
+ let v = vec![1, 2, 3, 4, 5];
+ let u = vec![8, 7, 6, 2, 3, 4, 5];
+ let mut m = list_from(&v);
+ let n = list_from(&u);
+ m.clone_from(&n);
+ check_links(&m);
+ assert_eq!(m, n);
+ for elt in u {
+ assert_eq!(m.pop_front(), Some(elt))
+ }
+ }
+ // Long cloned from short
+ {
+ let v = vec![1, 2, 3, 4, 5];
+ let u = vec![6, 7, 8];
+ let mut m = list_from(&v);
+ let n = list_from(&u);
+ m.clone_from(&n);
+ check_links(&m);
+ assert_eq!(m, n);
+ for elt in u {
+ assert_eq!(m.pop_front(), Some(elt))
+ }
+ }
+ // Two equal length lists
+ {
+ let v = vec![1, 2, 3, 4, 5];
+ let u = vec![9, 8, 1, 2, 3];
+ let mut m = list_from(&v);
+ let n = list_from(&u);
+ m.clone_from(&n);
+ check_links(&m);
+ assert_eq!(m, n);
+ for elt in u {
+ assert_eq!(m.pop_front(), Some(elt))
+ }
+ }
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn test_send() {
+ let n = list_from(&[1, 2, 3]);
+ thread::spawn(move || {
+ check_links(&n);
+ let a: &[_] = &[&1, &2, &3];
+ assert_eq!(a, &*n.iter().collect::<Vec<_>>());
+ })
+ .join()
+ .ok()
+ .unwrap();
+}
+
+#[test]
+fn test_eq() {
+ let mut n = list_from(&[]);
+ let mut m = list_from(&[]);
+ assert!(n == m);
+ n.push_front(1);
+ assert!(n != m);
+ m.push_back(1);
+ assert!(n == m);
+
+ let n = list_from(&[2, 3, 4]);
+ let m = list_from(&[1, 2, 3]);
+ assert!(n != m);
+}
+
+#[test]
+fn test_ord() {
+ let n = list_from(&[]);
+ let m = list_from(&[1, 2, 3]);
+ assert!(n < m);
+ assert!(m > n);
+ assert!(n <= n);
+ assert!(n >= n);
+}
+
+#[test]
+fn test_ord_nan() {
+ let nan = 0.0f64 / 0.0;
+ let n = list_from(&[nan]);
+ let m = list_from(&[nan]);
+ assert!(!(n < m));
+ assert!(!(n > m));
+ assert!(!(n <= m));
+ assert!(!(n >= m));
+
+ let n = list_from(&[nan]);
+ let one = list_from(&[1.0f64]);
+ assert!(!(n < one));
+ assert!(!(n > one));
+ assert!(!(n <= one));
+ assert!(!(n >= one));
+
+ let u = list_from(&[1.0f64, 2.0, nan]);
+ let v = list_from(&[1.0f64, 2.0, 3.0]);
+ assert!(!(u < v));
+ assert!(!(u > v));
+ assert!(!(u <= v));
+ assert!(!(u >= v));
+
+ let s = list_from(&[1.0f64, 2.0, 4.0, 2.0]);
+ let t = list_from(&[1.0f64, 2.0, 3.0, 2.0]);
+ assert!(!(s < t));
+ assert!(s > one);
+ assert!(!(s <= one));
+ assert!(s >= one);
+}
+
+#[test]
+fn test_26021() {
+ // There was a bug in split_off that failed to null out the RHS's head's prev ptr.
+ // This caused the RHS's dtor to walk up into the LHS at drop and delete all of
+ // its nodes.
+ //
+ // https://github.com/rust-lang/rust/issues/26021
+ let mut v1 = LinkedList::new();
+ v1.push_front(1);
+ v1.push_front(1);
+ v1.push_front(1);
+ v1.push_front(1);
+ let _ = v1.split_off(3); // Dropping this now should not cause laundry consumption
+ assert_eq!(v1.len(), 3);
+
+ assert_eq!(v1.iter().len(), 3);
+ assert_eq!(v1.iter().collect::<Vec<_>>().len(), 3);
+}
+
+#[test]
+fn test_split_off() {
+ let mut v1 = LinkedList::new();
+ v1.push_front(1);
+ v1.push_front(1);
+ v1.push_front(1);
+ v1.push_front(1);
+
+ // test all splits
+ for ix in 0..1 + v1.len() {
+ let mut a = v1.clone();
+ let b = a.split_off(ix);
+ check_links(&a);
+ check_links(&b);
+ a.extend(b);
+ assert_eq!(v1, a);
+ }
+}
+
+#[test]
+fn test_split_off_2() {
+ // singleton
+ {
+ let mut m = LinkedList::new();
+ m.push_back(1);
+
+ let p = m.split_off(0);
+ assert_eq!(m.len(), 0);
+ assert_eq!(p.len(), 1);
+ assert_eq!(p.back(), Some(&1));
+ assert_eq!(p.front(), Some(&1));
+ }
+
+ // not singleton, forwards
+ {
+ let u = vec![1, 2, 3, 4, 5];
+ let mut m = list_from(&u);
+ let mut n = m.split_off(2);
+ assert_eq!(m.len(), 2);
+ assert_eq!(n.len(), 3);
+ for elt in 1..3 {
+ assert_eq!(m.pop_front(), Some(elt));
+ }
+ for elt in 3..6 {
+ assert_eq!(n.pop_front(), Some(elt));
+ }
+ }
+ // not singleton, backwards
+ {
+ let u = vec![1, 2, 3, 4, 5];
+ let mut m = list_from(&u);
+ let mut n = m.split_off(4);
+ assert_eq!(m.len(), 4);
+ assert_eq!(n.len(), 1);
+ for elt in 1..5 {
+ assert_eq!(m.pop_front(), Some(elt));
+ }
+ for elt in 5..6 {
+ assert_eq!(n.pop_front(), Some(elt));
+ }
+ }
+
+ // no-op on the last index
+ {
+ let mut m = LinkedList::new();
+ m.push_back(1);
+
+ let p = m.split_off(1);
+ assert_eq!(m.len(), 1);
+ assert_eq!(p.len(), 0);
+ assert_eq!(m.back(), Some(&1));
+ assert_eq!(m.front(), Some(&1));
+ }
+}
+
+fn fuzz_test(sz: i32) {
+ let mut m: LinkedList<_> = LinkedList::new();
+ let mut v = vec![];
+ for i in 0..sz {
+ check_links(&m);
+ let r: u8 = thread_rng().next_u32() as u8;
+ match r % 6 {
+ 0 => {
+ m.pop_back();
+ v.pop();
+ }
+ 1 => {
+ if !v.is_empty() {
+ m.pop_front();
+ v.remove(0);
+ }
+ }
+ 2 | 4 => {
+ m.push_front(-i);
+ v.insert(0, -i);
+ }
+ 3 | 5 | _ => {
+ m.push_back(i);
+ v.push(i);
+ }
+ }
+ }
+
+ check_links(&m);
+
+ let mut i = 0;
+ for (a, &b) in m.into_iter().zip(&v) {
+ i += 1;
+ assert_eq!(a, b);
+ }
+ assert_eq!(i, v.len());
+}
+
+#[test]
+fn test_fuzz() {
+ for _ in 0..25 {
+ fuzz_test(3);
+ fuzz_test(16);
+ #[cfg(not(miri))] // Miri is too slow
+ fuzz_test(189);
+ }
+}
+
+#[test]
+fn test_show() {
+ let list: LinkedList<_> = (0..10).collect();
+ assert_eq!(format!("{list:?}"), "[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]");
+
+ let list: LinkedList<_> = ["just", "one", "test", "more"].into_iter().collect();
+ assert_eq!(format!("{list:?}"), "[\"just\", \"one\", \"test\", \"more\"]");
+}
+
+#[test]
+fn drain_filter_test() {
+ let mut m: LinkedList<u32> = LinkedList::new();
+ m.extend(&[1, 2, 3, 4, 5, 6]);
+ let deleted = m.drain_filter(|v| *v < 4).collect::<Vec<_>>();
+
+ check_links(&m);
+
+ assert_eq!(deleted, &[1, 2, 3]);
+ assert_eq!(m.into_iter().collect::<Vec<_>>(), &[4, 5, 6]);
+}
+
+#[test]
+fn drain_to_empty_test() {
+ let mut m: LinkedList<u32> = LinkedList::new();
+ m.extend(&[1, 2, 3, 4, 5, 6]);
+ let deleted = m.drain_filter(|_| true).collect::<Vec<_>>();
+
+ check_links(&m);
+
+ assert_eq!(deleted, &[1, 2, 3, 4, 5, 6]);
+ assert_eq!(m.into_iter().collect::<Vec<_>>(), &[]);
+}
+
+#[test]
+fn test_cursor_move_peek() {
+ let mut m: LinkedList<u32> = LinkedList::new();
+ m.extend(&[1, 2, 3, 4, 5, 6]);
+ let mut cursor = m.cursor_front();
+ assert_eq!(cursor.current(), Some(&1));
+ assert_eq!(cursor.peek_next(), Some(&2));
+ assert_eq!(cursor.peek_prev(), None);
+ assert_eq!(cursor.index(), Some(0));
+ cursor.move_prev();
+ assert_eq!(cursor.current(), None);
+ assert_eq!(cursor.peek_next(), Some(&1));
+ assert_eq!(cursor.peek_prev(), Some(&6));
+ assert_eq!(cursor.index(), None);
+ cursor.move_next();
+ cursor.move_next();
+ assert_eq!(cursor.current(), Some(&2));
+ assert_eq!(cursor.peek_next(), Some(&3));
+ assert_eq!(cursor.peek_prev(), Some(&1));
+ assert_eq!(cursor.index(), Some(1));
+
+ let mut cursor = m.cursor_back();
+ assert_eq!(cursor.current(), Some(&6));
+ assert_eq!(cursor.peek_next(), None);
+ assert_eq!(cursor.peek_prev(), Some(&5));
+ assert_eq!(cursor.index(), Some(5));
+ cursor.move_next();
+ assert_eq!(cursor.current(), None);
+ assert_eq!(cursor.peek_next(), Some(&1));
+ assert_eq!(cursor.peek_prev(), Some(&6));
+ assert_eq!(cursor.index(), None);
+ cursor.move_prev();
+ cursor.move_prev();
+ assert_eq!(cursor.current(), Some(&5));
+ assert_eq!(cursor.peek_next(), Some(&6));
+ assert_eq!(cursor.peek_prev(), Some(&4));
+ assert_eq!(cursor.index(), Some(4));
+
+ let mut m: LinkedList<u32> = LinkedList::new();
+ m.extend(&[1, 2, 3, 4, 5, 6]);
+ let mut cursor = m.cursor_front_mut();
+ assert_eq!(cursor.current(), Some(&mut 1));
+ assert_eq!(cursor.peek_next(), Some(&mut 2));
+ assert_eq!(cursor.peek_prev(), None);
+ assert_eq!(cursor.index(), Some(0));
+ cursor.move_prev();
+ assert_eq!(cursor.current(), None);
+ assert_eq!(cursor.peek_next(), Some(&mut 1));
+ assert_eq!(cursor.peek_prev(), Some(&mut 6));
+ assert_eq!(cursor.index(), None);
+ cursor.move_next();
+ cursor.move_next();
+ assert_eq!(cursor.current(), Some(&mut 2));
+ assert_eq!(cursor.peek_next(), Some(&mut 3));
+ assert_eq!(cursor.peek_prev(), Some(&mut 1));
+ assert_eq!(cursor.index(), Some(1));
+ let mut cursor2 = cursor.as_cursor();
+ assert_eq!(cursor2.current(), Some(&2));
+ assert_eq!(cursor2.index(), Some(1));
+ cursor2.move_next();
+ assert_eq!(cursor2.current(), Some(&3));
+ assert_eq!(cursor2.index(), Some(2));
+ assert_eq!(cursor.current(), Some(&mut 2));
+ assert_eq!(cursor.index(), Some(1));
+
+ let mut m: LinkedList<u32> = LinkedList::new();
+ m.extend(&[1, 2, 3, 4, 5, 6]);
+ let mut cursor = m.cursor_back_mut();
+ assert_eq!(cursor.current(), Some(&mut 6));
+ assert_eq!(cursor.peek_next(), None);
+ assert_eq!(cursor.peek_prev(), Some(&mut 5));
+ assert_eq!(cursor.index(), Some(5));
+ cursor.move_next();
+ assert_eq!(cursor.current(), None);
+ assert_eq!(cursor.peek_next(), Some(&mut 1));
+ assert_eq!(cursor.peek_prev(), Some(&mut 6));
+ assert_eq!(cursor.index(), None);
+ cursor.move_prev();
+ cursor.move_prev();
+ assert_eq!(cursor.current(), Some(&mut 5));
+ assert_eq!(cursor.peek_next(), Some(&mut 6));
+ assert_eq!(cursor.peek_prev(), Some(&mut 4));
+ assert_eq!(cursor.index(), Some(4));
+ let mut cursor2 = cursor.as_cursor();
+ assert_eq!(cursor2.current(), Some(&5));
+ assert_eq!(cursor2.index(), Some(4));
+ cursor2.move_prev();
+ assert_eq!(cursor2.current(), Some(&4));
+ assert_eq!(cursor2.index(), Some(3));
+ assert_eq!(cursor.current(), Some(&mut 5));
+ assert_eq!(cursor.index(), Some(4));
+}
+
+#[test]
+fn test_cursor_mut_insert() {
+ let mut m: LinkedList<u32> = LinkedList::new();
+ m.extend(&[1, 2, 3, 4, 5, 6]);
+ let mut cursor = m.cursor_front_mut();
+ cursor.insert_before(7);
+ cursor.insert_after(8);
+ check_links(&m);
+ assert_eq!(m.iter().cloned().collect::<Vec<_>>(), &[7, 1, 8, 2, 3, 4, 5, 6]);
+ let mut cursor = m.cursor_front_mut();
+ cursor.move_prev();
+ cursor.insert_before(9);
+ cursor.insert_after(10);
+ check_links(&m);
+ assert_eq!(m.iter().cloned().collect::<Vec<_>>(), &[10, 7, 1, 8, 2, 3, 4, 5, 6, 9]);
+ let mut cursor = m.cursor_front_mut();
+ cursor.move_prev();
+ assert_eq!(cursor.remove_current(), None);
+ cursor.move_next();
+ cursor.move_next();
+ assert_eq!(cursor.remove_current(), Some(7));
+ cursor.move_prev();
+ cursor.move_prev();
+ cursor.move_prev();
+ assert_eq!(cursor.remove_current(), Some(9));
+ cursor.move_next();
+ assert_eq!(cursor.remove_current(), Some(10));
+ check_links(&m);
+ assert_eq!(m.iter().cloned().collect::<Vec<_>>(), &[1, 8, 2, 3, 4, 5, 6]);
+ let mut cursor = m.cursor_front_mut();
+ let mut p: LinkedList<u32> = LinkedList::new();
+ p.extend(&[100, 101, 102, 103]);
+ let mut q: LinkedList<u32> = LinkedList::new();
+ q.extend(&[200, 201, 202, 203]);
+ cursor.splice_after(p);
+ cursor.splice_before(q);
+ check_links(&m);
+ assert_eq!(
+ m.iter().cloned().collect::<Vec<_>>(),
+ &[200, 201, 202, 203, 1, 100, 101, 102, 103, 8, 2, 3, 4, 5, 6]
+ );
+ let mut cursor = m.cursor_front_mut();
+ cursor.move_prev();
+ let tmp = cursor.split_before();
+ assert_eq!(m.into_iter().collect::<Vec<_>>(), &[]);
+ m = tmp;
+ let mut cursor = m.cursor_front_mut();
+ cursor.move_next();
+ cursor.move_next();
+ cursor.move_next();
+ cursor.move_next();
+ cursor.move_next();
+ cursor.move_next();
+ let tmp = cursor.split_after();
+ assert_eq!(tmp.into_iter().collect::<Vec<_>>(), &[102, 103, 8, 2, 3, 4, 5, 6]);
+ check_links(&m);
+ assert_eq!(m.iter().cloned().collect::<Vec<_>>(), &[200, 201, 202, 203, 1, 100, 101]);
+}
+
+#[test]
+fn test_cursor_push_front_back() {
+ let mut ll: LinkedList<u32> = LinkedList::new();
+ ll.extend(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ let mut c = ll.cursor_front_mut();
+ assert_eq!(c.current(), Some(&mut 1));
+ assert_eq!(c.index(), Some(0));
+ c.push_front(0);
+ assert_eq!(c.current(), Some(&mut 1));
+ assert_eq!(c.peek_prev(), Some(&mut 0));
+ assert_eq!(c.index(), Some(1));
+ c.push_back(11);
+ drop(c);
+ let p = ll.cursor_back().front().unwrap();
+ assert_eq!(p, &0);
+ assert_eq!(ll, (0..12).collect());
+ check_links(&ll);
+}
+
+#[test]
+fn test_cursor_pop_front_back() {
+ let mut ll: LinkedList<u32> = LinkedList::new();
+ ll.extend(&[1, 2, 3, 4, 5, 6]);
+ let mut c = ll.cursor_back_mut();
+ assert_eq!(c.pop_front(), Some(1));
+ c.move_prev();
+ c.move_prev();
+ c.move_prev();
+ assert_eq!(c.pop_back(), Some(6));
+ let c = c.as_cursor();
+ assert_eq!(c.front(), Some(&2));
+ assert_eq!(c.back(), Some(&5));
+ assert_eq!(c.index(), Some(1));
+ drop(c);
+ assert_eq!(ll, (2..6).collect());
+ check_links(&ll);
+ let mut c = ll.cursor_back_mut();
+ assert_eq!(c.current(), Some(&mut 5));
+ assert_eq!(c.index, 3);
+ assert_eq!(c.pop_back(), Some(5));
+ assert_eq!(c.current(), None);
+ assert_eq!(c.index, 3);
+ assert_eq!(c.pop_back(), Some(4));
+ assert_eq!(c.current(), None);
+ assert_eq!(c.index, 2);
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut a = LinkedList::new();
+ a.push_back(1);
+
+ a.extend(&[2, 3, 4]);
+
+ assert_eq!(a.len(), 4);
+ assert_eq!(a, list_from(&[1, 2, 3, 4]));
+
+ let mut b = LinkedList::new();
+ b.push_back(5);
+ b.push_back(6);
+ a.extend(&b);
+
+ assert_eq!(a.len(), 6);
+ assert_eq!(a, list_from(&[1, 2, 3, 4, 5, 6]));
+}
+
+#[test]
+fn test_extend() {
+ let mut a = LinkedList::new();
+ a.push_back(1);
+ a.extend(vec![2, 3, 4]); // uses iterator
+
+ assert_eq!(a.len(), 4);
+ assert!(a.iter().eq(&[1, 2, 3, 4]));
+
+ let b: LinkedList<_> = [5, 6, 7].into_iter().collect();
+ a.extend(b); // specializes to `append`
+
+ assert_eq!(a.len(), 7);
+ assert!(a.iter().eq(&[1, 2, 3, 4, 5, 6, 7]));
+}
+
+#[test]
+fn test_contains() {
+ let mut l = LinkedList::new();
+ l.extend(&[2, 3, 4]);
+
+ assert!(l.contains(&3));
+ assert!(!l.contains(&1));
+
+ l.clear();
+
+ assert!(!l.contains(&3));
+}
+
+#[test]
+fn drain_filter_empty() {
+ let mut list: LinkedList<i32> = LinkedList::new();
+
+ {
+ let mut iter = list.drain_filter(|_| true);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ assert_eq!(list.len(), 0);
+ assert_eq!(list.into_iter().collect::<Vec<_>>(), vec![]);
+}
+
+#[test]
+fn drain_filter_zst() {
+ let mut list: LinkedList<_> = [(), (), (), (), ()].into_iter().collect();
+ let initial_len = list.len();
+ let mut count = 0;
+
+ {
+ let mut iter = list.drain_filter(|_| true);
+ assert_eq!(iter.size_hint(), (0, Some(initial_len)));
+ while let Some(_) = iter.next() {
+ count += 1;
+ assert_eq!(iter.size_hint(), (0, Some(initial_len - count)));
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ assert_eq!(count, initial_len);
+ assert_eq!(list.len(), 0);
+ assert_eq!(list.into_iter().collect::<Vec<_>>(), vec![]);
+}
+
+#[test]
+fn drain_filter_false() {
+ let mut list: LinkedList<_> = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
+
+ let initial_len = list.len();
+ let mut count = 0;
+
+ {
+ let mut iter = list.drain_filter(|_| false);
+ assert_eq!(iter.size_hint(), (0, Some(initial_len)));
+ for _ in iter.by_ref() {
+ count += 1;
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ assert_eq!(count, 0);
+ assert_eq!(list.len(), initial_len);
+ assert_eq!(list.into_iter().collect::<Vec<_>>(), vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+}
+
+#[test]
+fn drain_filter_true() {
+ let mut list: LinkedList<_> = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
+
+ let initial_len = list.len();
+ let mut count = 0;
+
+ {
+ let mut iter = list.drain_filter(|_| true);
+ assert_eq!(iter.size_hint(), (0, Some(initial_len)));
+ while let Some(_) = iter.next() {
+ count += 1;
+ assert_eq!(iter.size_hint(), (0, Some(initial_len - count)));
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ assert_eq!(count, initial_len);
+ assert_eq!(list.len(), 0);
+ assert_eq!(list.into_iter().collect::<Vec<_>>(), vec![]);
+}
+
+#[test]
+fn drain_filter_complex() {
+ {
+ // [+xxx++++++xxxxx++++x+x++]
+ let mut list = [
+ 1, 2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37,
+ 39,
+ ]
+ .into_iter()
+ .collect::<LinkedList<_>>();
+
+ let removed = list.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]);
+
+ assert_eq!(list.len(), 14);
+ assert_eq!(
+ list.into_iter().collect::<Vec<_>>(),
+ vec![1, 7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35, 37, 39]
+ );
+ }
+
+ {
+ // [xxx++++++xxxxx++++x+x++]
+ let mut list =
+ [2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37, 39]
+ .into_iter()
+ .collect::<LinkedList<_>>();
+
+ let removed = list.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]);
+
+ assert_eq!(list.len(), 13);
+ assert_eq!(
+ list.into_iter().collect::<Vec<_>>(),
+ vec![7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35, 37, 39]
+ );
+ }
+
+ {
+ // [xxx++++++xxxxx++++x+x]
+ let mut list =
+ [2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36]
+ .into_iter()
+ .collect::<LinkedList<_>>();
+
+ let removed = list.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]);
+
+ assert_eq!(list.len(), 11);
+ assert_eq!(
+ list.into_iter().collect::<Vec<_>>(),
+ vec![7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35]
+ );
+ }
+
+ {
+ // [xxxxxxxxxx+++++++++++]
+ let mut list = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
+ .into_iter()
+ .collect::<LinkedList<_>>();
+
+ let removed = list.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20]);
+
+ assert_eq!(list.len(), 10);
+ assert_eq!(list.into_iter().collect::<Vec<_>>(), vec![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]);
+ }
+
+ {
+ // [+++++++++++xxxxxxxxxx]
+ let mut list = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
+ .into_iter()
+ .collect::<LinkedList<_>>();
+
+ let removed = list.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20]);
+
+ assert_eq!(list.len(), 10);
+ assert_eq!(list.into_iter().collect::<Vec<_>>(), vec![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]);
+ }
+}
+
+#[test]
+fn drain_filter_drop_panic_leak() {
+ static mut DROPS: i32 = 0;
+
+ struct D(bool);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+
+ if self.0 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let mut q = LinkedList::new();
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_front(D(false));
+ q.push_front(D(true));
+ q.push_front(D(false));
+
+ catch_unwind(AssertUnwindSafe(|| drop(q.drain_filter(|_| true)))).ok();
+
+ assert_eq!(unsafe { DROPS }, 8);
+ assert!(q.is_empty());
+}
+
+#[test]
+fn drain_filter_pred_panic_leak() {
+ static mut DROPS: i32 = 0;
+
+ #[derive(Debug)]
+ struct D(u32);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let mut q = LinkedList::new();
+ q.push_back(D(3));
+ q.push_back(D(4));
+ q.push_back(D(5));
+ q.push_back(D(6));
+ q.push_back(D(7));
+ q.push_front(D(2));
+ q.push_front(D(1));
+ q.push_front(D(0));
+
+ catch_unwind(AssertUnwindSafe(|| {
+ drop(q.drain_filter(|item| if item.0 >= 2 { panic!() } else { true }))
+ }))
+ .ok();
+
+ assert_eq!(unsafe { DROPS }, 2); // 0 and 1
+ assert_eq!(q.len(), 6);
+}
+
+#[test]
+fn test_drop() {
+ static mut DROPS: i32 = 0;
+ struct Elem;
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let mut ring = LinkedList::new();
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ drop(ring);
+
+ assert_eq!(unsafe { DROPS }, 4);
+}
+
+#[test]
+fn test_drop_with_pop() {
+ static mut DROPS: i32 = 0;
+ struct Elem;
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let mut ring = LinkedList::new();
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+
+ drop(ring.pop_back());
+ drop(ring.pop_front());
+ assert_eq!(unsafe { DROPS }, 2);
+
+ drop(ring);
+ assert_eq!(unsafe { DROPS }, 4);
+}
+
+#[test]
+fn test_drop_clear() {
+ static mut DROPS: i32 = 0;
+ struct Elem;
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let mut ring = LinkedList::new();
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ ring.clear();
+ assert_eq!(unsafe { DROPS }, 4);
+
+ drop(ring);
+ assert_eq!(unsafe { DROPS }, 4);
+}
+
+#[test]
+fn test_drop_panic() {
+ static mut DROPS: i32 = 0;
+
+ struct D(bool);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+
+ if self.0 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let mut q = LinkedList::new();
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_front(D(false));
+ q.push_front(D(false));
+ q.push_front(D(true));
+
+ catch_unwind(move || drop(q)).ok();
+
+ assert_eq!(unsafe { DROPS }, 8);
+}
diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs
new file mode 100644
index 000000000..628a5b155
--- /dev/null
+++ b/library/alloc/src/collections/mod.rs
@@ -0,0 +1,154 @@
+//! Collection types.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(not(no_global_oom_handling))]
+pub mod binary_heap;
+#[cfg(not(no_global_oom_handling))]
+mod btree;
+#[cfg(not(no_global_oom_handling))]
+pub mod linked_list;
+#[cfg(not(no_global_oom_handling))]
+pub mod vec_deque;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod btree_map {
+ //! An ordered map based on a B-Tree.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::btree::map::*;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod btree_set {
+ //! An ordered set based on a B-Tree.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::btree::set::*;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use binary_heap::BinaryHeap;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use btree_map::BTreeMap;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use btree_set::BTreeSet;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use linked_list::LinkedList;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use vec_deque::VecDeque;
+
+use crate::alloc::{Layout, LayoutError};
+use core::fmt::Display;
+
+/// The error type for `try_reserve` methods.
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[stable(feature = "try_reserve", since = "1.57.0")]
+pub struct TryReserveError {
+ kind: TryReserveErrorKind,
+}
+
+impl TryReserveError {
+ /// Details about the allocation that caused the error
+ #[inline]
+ #[must_use]
+ #[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+ )]
+ pub fn kind(&self) -> TryReserveErrorKind {
+ self.kind.clone()
+ }
+}
+
+/// Details of the allocation that caused a `TryReserveError`
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+)]
+pub enum TryReserveErrorKind {
+ /// Error due to the computed capacity exceeding the collection's maximum
+ /// (usually `isize::MAX` bytes).
+ CapacityOverflow,
+
+ /// The memory allocator returned an error
+ AllocError {
+ /// The layout of allocation request that failed
+ layout: Layout,
+
+ #[doc(hidden)]
+ #[unstable(
+ feature = "container_error_extra",
+ issue = "none",
+ reason = "\
+ Enable exposing the allocator’s custom error value \
+ if an associated type is added in the future: \
+ https://github.com/rust-lang/wg-allocators/issues/23"
+ )]
+ non_exhaustive: (),
+ },
+}
+
+#[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+)]
+impl From<TryReserveErrorKind> for TryReserveError {
+ #[inline]
+ fn from(kind: TryReserveErrorKind) -> Self {
+ Self { kind }
+ }
+}
+
+#[unstable(feature = "try_reserve_kind", reason = "new API", issue = "48043")]
+impl From<LayoutError> for TryReserveErrorKind {
+ /// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
+ #[inline]
+ fn from(_: LayoutError) -> Self {
+ TryReserveErrorKind::CapacityOverflow
+ }
+}
+
+#[stable(feature = "try_reserve", since = "1.57.0")]
+impl Display for TryReserveError {
+ fn fmt(
+ &self,
+ fmt: &mut core::fmt::Formatter<'_>,
+ ) -> core::result::Result<(), core::fmt::Error> {
+ fmt.write_str("memory allocation failed")?;
+ let reason = match self.kind {
+ TryReserveErrorKind::CapacityOverflow => {
+ " because the computed capacity exceeded the collection's maximum"
+ }
+ TryReserveErrorKind::AllocError { .. } => {
+ " because the memory allocator returned a error"
+ }
+ };
+ fmt.write_str(reason)
+ }
+}
+
+/// An intermediate trait for specialization of `Extend`.
+#[doc(hidden)]
+trait SpecExtend<I: IntoIterator> {
+ /// Extends `self` with the contents of the given iterator.
+ fn spec_extend(&mut self, iter: I);
+}
diff --git a/library/alloc/src/collections/vec_deque/drain.rs b/library/alloc/src/collections/vec_deque/drain.rs
new file mode 100644
index 000000000..05f94da6d
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/drain.rs
@@ -0,0 +1,142 @@
+use core::iter::FusedIterator;
+use core::ptr::{self, NonNull};
+use core::{fmt, mem};
+
+use crate::alloc::{Allocator, Global};
+
+use super::{count, Iter, VecDeque};
+
+/// A draining iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`drain`] method on [`VecDeque`]. See its
+/// documentation for more.
+///
+/// [`drain`]: VecDeque::drain
+#[stable(feature = "drain", since = "1.6.0")]
+pub struct Drain<
+ 'a,
+ T: 'a,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
+ after_tail: usize,
+ after_head: usize,
+ iter: Iter<'a, T>,
+ deque: NonNull<VecDeque<T, A>>,
+}
+
+impl<'a, T, A: Allocator> Drain<'a, T, A> {
+ pub(super) unsafe fn new(
+ after_tail: usize,
+ after_head: usize,
+ iter: Iter<'a, T>,
+ deque: NonNull<VecDeque<T, A>>,
+ ) -> Self {
+ Drain { after_tail, after_head, iter, deque }
+ }
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Drain")
+ .field(&self.after_tail)
+ .field(&self.after_head)
+ .field(&self.iter)
+ .finish()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl<T: Sync, A: Allocator + Sync> Sync for Drain<'_, T, A> {}
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl<T: Send, A: Allocator + Send> Send for Drain<'_, T, A> {}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> Drop for Drain<'_, T, A> {
+ fn drop(&mut self) {
+ struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
+
+ impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
+ fn drop(&mut self) {
+ self.0.for_each(drop);
+
+ let source_deque = unsafe { self.0.deque.as_mut() };
+
+ // T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head
+ //
+ // T t h H
+ // [. . . o o x x o o . . .]
+ //
+ let orig_tail = source_deque.tail;
+ let drain_tail = source_deque.head;
+ let drain_head = self.0.after_tail;
+ let orig_head = self.0.after_head;
+
+ let tail_len = count(orig_tail, drain_tail, source_deque.cap());
+ let head_len = count(drain_head, orig_head, source_deque.cap());
+
+ // Restore the original head value
+ source_deque.head = orig_head;
+
+ match (tail_len, head_len) {
+ (0, 0) => {
+ source_deque.head = 0;
+ source_deque.tail = 0;
+ }
+ (0, _) => {
+ source_deque.tail = drain_head;
+ }
+ (_, 0) => {
+ source_deque.head = drain_tail;
+ }
+ _ => unsafe {
+ if tail_len <= head_len {
+ source_deque.tail = source_deque.wrap_sub(drain_head, tail_len);
+ source_deque.wrap_copy(source_deque.tail, orig_tail, tail_len);
+ } else {
+ source_deque.head = source_deque.wrap_add(drain_tail, head_len);
+ source_deque.wrap_copy(drain_tail, drain_head, head_len);
+ }
+ },
+ }
+ }
+ }
+
+ while let Some(item) = self.next() {
+ let guard = DropGuard(self);
+ drop(item);
+ mem::forget(guard);
+ }
+
+ DropGuard(self);
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next().map(|elt| unsafe { ptr::read(elt) })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back().map(|elt| unsafe { ptr::read(elt) })
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}
diff --git a/library/alloc/src/collections/vec_deque/into_iter.rs b/library/alloc/src/collections/vec_deque/into_iter.rs
new file mode 100644
index 000000000..55f6138cd
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/into_iter.rs
@@ -0,0 +1,72 @@
+use core::fmt;
+use core::iter::{FusedIterator, TrustedLen};
+
+use crate::alloc::{Allocator, Global};
+
+use super::VecDeque;
+
+/// An owning iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`VecDeque`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: VecDeque::into_iter
+/// [`IntoIterator`]: core::iter::IntoIterator
+#[derive(Clone)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoIter<
+ T,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
+ inner: VecDeque<T, A>,
+}
+
+impl<T, A: Allocator> IntoIter<T, A> {
+ pub(super) fn new(inner: VecDeque<T, A>) -> Self {
+ IntoIter { inner }
+ }
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.inner).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Iterator for IntoIter<T, A> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.pop_front()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.inner.len();
+ (len, Some(len))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.inner.pop_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
+ fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
diff --git a/library/alloc/src/collections/vec_deque/iter.rs b/library/alloc/src/collections/vec_deque/iter.rs
new file mode 100644
index 000000000..e696d7ed6
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/iter.rs
@@ -0,0 +1,219 @@
+use core::fmt;
+use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
+use core::mem::MaybeUninit;
+use core::ops::Try;
+
+use super::{count, wrap_index, RingSlices};
+
+/// An iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`iter`] method on [`super::VecDeque`]. See its
+/// documentation for more.
+///
+/// [`iter`]: super::VecDeque::iter
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ ring: &'a [MaybeUninit<T>],
+ tail: usize,
+ head: usize,
+}
+
+impl<'a, T> Iter<'a, T> {
+ pub(super) fn new(ring: &'a [MaybeUninit<T>], tail: usize, head: usize) -> Self {
+ Iter { ring, tail, head }
+ }
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ f.debug_tuple("Iter")
+ .field(&MaybeUninit::slice_assume_init_ref(front))
+ .field(&MaybeUninit::slice_assume_init_ref(back))
+ .finish()
+ }
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter { ring: self.ring, tail: self.tail, head: self.head }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ if self.tail == self.head {
+ return None;
+ }
+ let tail = self.tail;
+ self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+ // Safety:
+ // - `self.tail` in a ring buffer is always a valid index.
+ // - `self.head` and `self.tail` equality is checked above.
+ unsafe { Some(self.ring.get_unchecked(tail).assume_init_ref()) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = count(self.tail, self.head, self.ring.len());
+ (len, Some(len))
+ }
+
+ fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ accum = MaybeUninit::slice_assume_init_ref(front).iter().fold(accum, &mut f);
+ MaybeUninit::slice_assume_init_ref(back).iter().fold(accum, &mut f)
+ }
+ }
+
+ fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let (mut iter, final_res);
+ if self.tail <= self.head {
+ // Safety: single slice self.ring[self.tail..self.head] is initialized.
+ iter = unsafe { MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]) }
+ .iter();
+ final_res = iter.try_fold(init, &mut f);
+ } else {
+ // Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
+ let (front, back) = self.ring.split_at(self.tail);
+
+ let mut back_iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
+ let res = back_iter.try_fold(init, &mut f);
+ let len = self.ring.len();
+ self.tail = (self.ring.len() - back_iter.len()) & (len - 1);
+ iter = unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
+ final_res = iter.try_fold(res?, &mut f);
+ }
+ self.tail = self.head - iter.len();
+ final_res
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ if n >= count(self.tail, self.head, self.ring.len()) {
+ self.tail = self.head;
+ None
+ } else {
+ self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
+ self.next()
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a T> {
+ self.next_back()
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ // Safety: The TrustedRandomAccess contract requires that callers only pass an index
+ // that is in bounds.
+ unsafe {
+ let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len());
+ self.ring.get_unchecked(idx).assume_init_ref()
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a T> {
+ if self.tail == self.head {
+ return None;
+ }
+ self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+ // Safety:
+ // - `self.head` in a ring buffer is always a valid index.
+ // - `self.head` and `self.tail` equality is checked above.
+ unsafe { Some(self.ring.get_unchecked(self.head).assume_init_ref()) }
+ }
+
+ fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ accum = MaybeUninit::slice_assume_init_ref(back).iter().rfold(accum, &mut f);
+ MaybeUninit::slice_assume_init_ref(front).iter().rfold(accum, &mut f)
+ }
+ }
+
+ fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let (mut iter, final_res);
+ if self.tail <= self.head {
+ // Safety: single slice self.ring[self.tail..self.head] is initialized.
+ iter = unsafe {
+ MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]).iter()
+ };
+ final_res = iter.try_rfold(init, &mut f);
+ } else {
+ // Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
+ let (front, back) = self.ring.split_at(self.tail);
+
+ let mut front_iter =
+ unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
+ let res = front_iter.try_rfold(init, &mut f);
+ self.head = front_iter.len();
+ iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
+ final_res = iter.try_rfold(res?, &mut f);
+ }
+ self.head = self.tail + iter.len();
+ final_res
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.head == self.tail
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for Iter<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<T> TrustedRandomAccess for Iter<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<T> TrustedRandomAccessNoCoerce for Iter<'_, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
diff --git a/library/alloc/src/collections/vec_deque/iter_mut.rs b/library/alloc/src/collections/vec_deque/iter_mut.rs
new file mode 100644
index 000000000..b78c0d5e1
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/iter_mut.rs
@@ -0,0 +1,162 @@
+use core::fmt;
+use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
+use core::marker::PhantomData;
+
+use super::{count, wrap_index, RingSlices};
+
+/// A mutable iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`super::VecDeque`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: super::VecDeque::iter_mut
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IterMut<'a, T: 'a> {
+ // Internal safety invariant: the entire slice is dereferenceable.
+ ring: *mut [T],
+ tail: usize,
+ head: usize,
+ phantom: PhantomData<&'a mut [T]>,
+}
+
+impl<'a, T> IterMut<'a, T> {
+ pub(super) unsafe fn new(
+ ring: *mut [T],
+ tail: usize,
+ head: usize,
+ phantom: PhantomData<&'a mut [T]>,
+ ) -> Self {
+ IterMut { ring, tail, head, phantom }
+ }
+}
+
+// SAFETY: we do nothing thread-local and there is no interior mutability,
+// so the usual structural `Send`/`Sync` apply.
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send> Send for IterMut<'_, T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+ // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
+ // The `IterMut` invariant also ensures everything is dereferenceable.
+ let (front, back) = unsafe { (&*front, &*back) };
+ f.debug_tuple("IterMut").field(&front).field(&back).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for IterMut<'a, T> {
+ type Item = &'a mut T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut T> {
+ if self.tail == self.head {
+ return None;
+ }
+ let tail = self.tail;
+ self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+
+ unsafe {
+ let elem = self.ring.get_unchecked_mut(tail);
+ Some(&mut *elem)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = count(self.tail, self.head, self.ring.len());
+ (len, Some(len))
+ }
+
+ fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+ // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
+ // The `IterMut` invariant also ensures everything is dereferenceable.
+ let (front, back) = unsafe { (&mut *front, &mut *back) };
+ accum = front.iter_mut().fold(accum, &mut f);
+ back.iter_mut().fold(accum, &mut f)
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ if n >= count(self.tail, self.head, self.ring.len()) {
+ self.tail = self.head;
+ None
+ } else {
+ self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
+ self.next()
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a mut T> {
+ self.next_back()
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ // Safety: The TrustedRandomAccess contract requires that callers only pass an index
+ // that is in bounds.
+ unsafe {
+ let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len());
+ &mut *self.ring.get_unchecked_mut(idx)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut T> {
+ if self.tail == self.head {
+ return None;
+ }
+ self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+
+ unsafe {
+ let elem = self.ring.get_unchecked_mut(self.head);
+ Some(&mut *elem)
+ }
+ }
+
+ fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+ // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
+ // The `IterMut` invariant also ensures everything is dereferenceable.
+ let (front, back) = unsafe { (&mut *front, &mut *back) };
+ accum = back.iter_mut().rfold(accum, &mut f);
+ front.iter_mut().rfold(accum, &mut f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IterMut<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.head == self.tail
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IterMut<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for IterMut<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<T> TrustedRandomAccess for IterMut<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<T> TrustedRandomAccessNoCoerce for IterMut<'_, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
diff --git a/library/alloc/src/collections/vec_deque/macros.rs b/library/alloc/src/collections/vec_deque/macros.rs
new file mode 100644
index 000000000..5c7913073
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/macros.rs
@@ -0,0 +1,19 @@
+macro_rules! __impl_slice_eq1 {
+ ([$($vars:tt)*] $lhs:ty, $rhs:ty, $($constraints:tt)*) => {
+ #[stable(feature = "vec_deque_partial_eq_slice", since = "1.17.0")]
+ impl<T, U, A: Allocator, $($vars)*> PartialEq<$rhs> for $lhs
+ where
+ T: PartialEq<U>,
+ $($constraints)*
+ {
+ fn eq(&self, other: &$rhs) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+ let (sa, sb) = self.as_slices();
+ let (oa, ob) = other[..].split_at(sa.len());
+ sa == oa && sb == ob
+ }
+ }
+ }
+}
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
new file mode 100644
index 000000000..4d895d837
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -0,0 +1,3137 @@
+//! A double-ended queue (deque) implemented with a growable ring buffer.
+//!
+//! This queue has *O*(1) amortized inserts and removals from both ends of the
+//! container. It also has *O*(1) indexing like a vector. The contained elements
+//! are not required to be copyable, and the queue will be sendable if the
+//! contained type is sendable.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::cmp::{self, Ordering};
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::iter::{repeat_with, FromIterator};
+use core::marker::PhantomData;
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::ops::{Index, IndexMut, Range, RangeBounds};
+use core::ptr::{self, NonNull};
+use core::slice;
+
+use crate::alloc::{Allocator, Global};
+use crate::collections::TryReserveError;
+use crate::collections::TryReserveErrorKind;
+use crate::raw_vec::RawVec;
+use crate::vec::Vec;
+
+#[macro_use]
+mod macros;
+
+#[stable(feature = "drain", since = "1.6.0")]
+pub use self::drain::Drain;
+
+mod drain;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::iter_mut::IterMut;
+
+mod iter_mut;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::into_iter::IntoIter;
+
+mod into_iter;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::iter::Iter;
+
+mod iter;
+
+use self::pair_slices::PairSlices;
+
+mod pair_slices;
+
+use self::ring_slices::RingSlices;
+
+mod ring_slices;
+
+use self::spec_extend::SpecExtend;
+
+mod spec_extend;
+
+#[cfg(test)]
+mod tests;
+
+const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
+const MINIMUM_CAPACITY: usize = 1; // 2 - 1
+
+const MAXIMUM_ZST_CAPACITY: usize = 1 << (usize::BITS - 1); // Largest possible power of two
+
+/// A double-ended queue implemented with a growable ring buffer.
+///
+/// The "default" usage of this type as a queue is to use [`push_back`] to add to
+/// the queue, and [`pop_front`] to remove from the queue. [`extend`] and [`append`]
+/// push onto the back in this manner, and iterating over `VecDeque` goes front
+/// to back.
+///
+/// A `VecDeque` with a known list of items can be initialized from an array:
+///
+/// ```
+/// use std::collections::VecDeque;
+///
+/// let deq = VecDeque::from([-1, 0, 1]);
+/// ```
+///
+/// Since `VecDeque` is a ring buffer, its elements are not necessarily contiguous
+/// in memory. If you want to access the elements as a single slice, such as for
+/// efficient sorting, you can use [`make_contiguous`]. It rotates the `VecDeque`
+/// so that its elements do not wrap, and returns a mutable slice to the
+/// now-contiguous element sequence.
+///
+/// [`push_back`]: VecDeque::push_back
+/// [`pop_front`]: VecDeque::pop_front
+/// [`extend`]: VecDeque::extend
+/// [`append`]: VecDeque::append
+/// [`make_contiguous`]: VecDeque::make_contiguous
+#[cfg_attr(not(test), rustc_diagnostic_item = "VecDeque")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_insignificant_dtor]
+pub struct VecDeque<
+ T,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
+ // tail and head are pointers into the buffer. Tail always points
+ // to the first element that could be read, Head always points
+ // to where data should be written.
+ // If tail == head the buffer is empty. The length of the ringbuffer
+ // is defined as the distance between the two.
+ tail: usize,
+ head: usize,
+ buf: RawVec<T, A>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for VecDeque<T, A> {
+ fn clone(&self) -> Self {
+ let mut deq = Self::with_capacity_in(self.len(), self.allocator().clone());
+ deq.extend(self.iter().cloned());
+ deq
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ self.truncate(other.len());
+
+ let mut iter = PairSlices::from(self, other);
+ while let Some((dst, src)) = iter.next() {
+ dst.clone_from_slice(&src);
+ }
+
+ if iter.has_remainder() {
+ for remainder in iter.remainder() {
+ self.extend(remainder.iter().cloned());
+ }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for VecDeque<T, A> {
+ fn drop(&mut self) {
+ /// Runs the destructor for all items in the slice when it gets dropped (normally or
+ /// during unwinding).
+ struct Dropper<'a, T>(&'a mut [T]);
+
+ impl<'a, T> Drop for Dropper<'a, T> {
+ fn drop(&mut self) {
+ unsafe {
+ ptr::drop_in_place(self.0);
+ }
+ }
+ }
+
+ let (front, back) = self.as_mut_slices();
+ unsafe {
+ let _back_dropper = Dropper(back);
+ // use drop for [T]
+ ptr::drop_in_place(front);
+ }
+ // RawVec handles deallocation
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Default for VecDeque<T> {
+ /// Creates an empty deque.
+ #[inline]
+ fn default() -> VecDeque<T> {
+ VecDeque::new()
+ }
+}
+
+impl<T, A: Allocator> VecDeque<T, A> {
+ /// Marginally more convenient
+ #[inline]
+ fn ptr(&self) -> *mut T {
+ self.buf.ptr()
+ }
+
+ /// Marginally more convenient
+ #[inline]
+ fn cap(&self) -> usize {
+ if mem::size_of::<T>() == 0 {
+ // For zero sized types, we are always at maximum capacity
+ MAXIMUM_ZST_CAPACITY
+ } else {
+ self.buf.capacity()
+ }
+ }
+
+ /// Turn ptr into a slice, since the elements of the backing buffer may be uninitialized,
+ /// we will return a slice of [`MaybeUninit<T>`].
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[inline]
+ unsafe fn buffer_as_slice(&self) -> &[MaybeUninit<T>] {
+ unsafe { slice::from_raw_parts(self.ptr() as *mut MaybeUninit<T>, self.cap()) }
+ }
+
+ /// Turn ptr into a mut slice, since the elements of the backing buffer may be uninitialized,
+ /// we will return a slice of [`MaybeUninit<T>`].
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[inline]
+ unsafe fn buffer_as_mut_slice(&mut self) -> &mut [MaybeUninit<T>] {
+ unsafe { slice::from_raw_parts_mut(self.ptr() as *mut MaybeUninit<T>, self.cap()) }
+ }
+
+ /// Moves an element out of the buffer
+ #[inline]
+ unsafe fn buffer_read(&mut self, off: usize) -> T {
+ unsafe { ptr::read(self.ptr().add(off)) }
+ }
+
+ /// Writes an element into the buffer, moving it.
+ #[inline]
+ unsafe fn buffer_write(&mut self, off: usize, value: T) {
+ unsafe {
+ ptr::write(self.ptr().add(off), value);
+ }
+ }
+
+ /// Returns `true` if the buffer is at full capacity.
+ #[inline]
+ fn is_full(&self) -> bool {
+ self.cap() - self.len() == 1
+ }
+
+ /// Returns the index in the underlying buffer for a given logical element
+ /// index.
+ #[inline]
+ fn wrap_index(&self, idx: usize) -> usize {
+ wrap_index(idx, self.cap())
+ }
+
+ /// Returns the index in the underlying buffer for a given logical element
+ /// index + addend.
+ #[inline]
+ fn wrap_add(&self, idx: usize, addend: usize) -> usize {
+ wrap_index(idx.wrapping_add(addend), self.cap())
+ }
+
+ /// Returns the index in the underlying buffer for a given logical element
+ /// index - subtrahend.
+ #[inline]
+ fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
+ wrap_index(idx.wrapping_sub(subtrahend), self.cap())
+ }
+
+ /// Copies a contiguous block of memory len long from src to dst
+ #[inline]
+ unsafe fn copy(&self, dst: usize, src: usize, len: usize) {
+ debug_assert!(
+ dst + len <= self.cap(),
+ "cpy dst={} src={} len={} cap={}",
+ dst,
+ src,
+ len,
+ self.cap()
+ );
+ debug_assert!(
+ src + len <= self.cap(),
+ "cpy dst={} src={} len={} cap={}",
+ dst,
+ src,
+ len,
+ self.cap()
+ );
+ unsafe {
+ ptr::copy(self.ptr().add(src), self.ptr().add(dst), len);
+ }
+ }
+
+ /// Copies a contiguous block of memory len long from src to dst
+ #[inline]
+ unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) {
+ debug_assert!(
+ dst + len <= self.cap(),
+ "cno dst={} src={} len={} cap={}",
+ dst,
+ src,
+ len,
+ self.cap()
+ );
+ debug_assert!(
+ src + len <= self.cap(),
+ "cno dst={} src={} len={} cap={}",
+ dst,
+ src,
+ len,
+ self.cap()
+ );
+ unsafe {
+ ptr::copy_nonoverlapping(self.ptr().add(src), self.ptr().add(dst), len);
+ }
+ }
+
+ /// Copies a potentially wrapping block of memory len long from src to dest.
+ /// (abs(dst - src) + len) must be no larger than cap() (There must be at
+ /// most one continuous overlapping region between src and dest).
+ unsafe fn wrap_copy(&self, dst: usize, src: usize, len: usize) {
+ #[allow(dead_code)]
+ fn diff(a: usize, b: usize) -> usize {
+ if a <= b { b - a } else { a - b }
+ }
+ debug_assert!(
+ cmp::min(diff(dst, src), self.cap() - diff(dst, src)) + len <= self.cap(),
+ "wrc dst={} src={} len={} cap={}",
+ dst,
+ src,
+ len,
+ self.cap()
+ );
+
+ if src == dst || len == 0 {
+ return;
+ }
+
+ let dst_after_src = self.wrap_sub(dst, src) < len;
+
+ let src_pre_wrap_len = self.cap() - src;
+ let dst_pre_wrap_len = self.cap() - dst;
+ let src_wraps = src_pre_wrap_len < len;
+ let dst_wraps = dst_pre_wrap_len < len;
+
+ match (dst_after_src, src_wraps, dst_wraps) {
+ (_, false, false) => {
+ // src doesn't wrap, dst doesn't wrap
+ //
+ // S . . .
+ // 1 [_ _ A A B B C C _]
+ // 2 [_ _ A A A A B B _]
+ // D . . .
+ //
+ unsafe {
+ self.copy(dst, src, len);
+ }
+ }
+ (false, false, true) => {
+ // dst before src, src doesn't wrap, dst wraps
+ //
+ // S . . .
+ // 1 [A A B B _ _ _ C C]
+ // 2 [A A B B _ _ _ A A]
+ // 3 [B B B B _ _ _ A A]
+ // . . D .
+ //
+ unsafe {
+ self.copy(dst, src, dst_pre_wrap_len);
+ self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
+ }
+ }
+ (true, false, true) => {
+ // src before dst, src doesn't wrap, dst wraps
+ //
+ // S . . .
+ // 1 [C C _ _ _ A A B B]
+ // 2 [B B _ _ _ A A B B]
+ // 3 [B B _ _ _ A A A A]
+ // . . D .
+ //
+ unsafe {
+ self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
+ self.copy(dst, src, dst_pre_wrap_len);
+ }
+ }
+ (false, true, false) => {
+ // dst before src, src wraps, dst doesn't wrap
+ //
+ // . . S .
+ // 1 [C C _ _ _ A A B B]
+ // 2 [C C _ _ _ B B B B]
+ // 3 [C C _ _ _ B B C C]
+ // D . . .
+ //
+ unsafe {
+ self.copy(dst, src, src_pre_wrap_len);
+ self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
+ }
+ }
+ (true, true, false) => {
+ // src before dst, src wraps, dst doesn't wrap
+ //
+ // . . S .
+ // 1 [A A B B _ _ _ C C]
+ // 2 [A A A A _ _ _ C C]
+ // 3 [C C A A _ _ _ C C]
+ // D . . .
+ //
+ unsafe {
+ self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
+ self.copy(dst, src, src_pre_wrap_len);
+ }
+ }
+ (false, true, true) => {
+ // dst before src, src wraps, dst wraps
+ //
+ // . . . S .
+ // 1 [A B C D _ E F G H]
+ // 2 [A B C D _ E G H H]
+ // 3 [A B C D _ E G H A]
+ // 4 [B C C D _ E G H A]
+ // . . D . .
+ //
+ debug_assert!(dst_pre_wrap_len > src_pre_wrap_len);
+ let delta = dst_pre_wrap_len - src_pre_wrap_len;
+ unsafe {
+ self.copy(dst, src, src_pre_wrap_len);
+ self.copy(dst + src_pre_wrap_len, 0, delta);
+ self.copy(0, delta, len - dst_pre_wrap_len);
+ }
+ }
+ (true, true, true) => {
+ // src before dst, src wraps, dst wraps
+ //
+ // . . S . .
+ // 1 [A B C D _ E F G H]
+ // 2 [A A B D _ E F G H]
+ // 3 [H A B D _ E F G H]
+ // 4 [H A B D _ E F F G]
+ // . . . D .
+ //
+ debug_assert!(src_pre_wrap_len > dst_pre_wrap_len);
+ let delta = src_pre_wrap_len - dst_pre_wrap_len;
+ unsafe {
+ self.copy(delta, 0, len - src_pre_wrap_len);
+ self.copy(0, self.cap() - delta, delta);
+ self.copy(dst, src, dst_pre_wrap_len);
+ }
+ }
+ }
+ }
+
+ /// Copies all values from `src` to `dst`, wrapping around if needed.
+ /// Assumes capacity is sufficient.
+ #[inline]
+ unsafe fn copy_slice(&mut self, dst: usize, src: &[T]) {
+ debug_assert!(src.len() <= self.cap());
+ let head_room = self.cap() - dst;
+ if src.len() <= head_room {
+ unsafe {
+ ptr::copy_nonoverlapping(src.as_ptr(), self.ptr().add(dst), src.len());
+ }
+ } else {
+ let (left, right) = src.split_at(head_room);
+ unsafe {
+ ptr::copy_nonoverlapping(left.as_ptr(), self.ptr().add(dst), left.len());
+ ptr::copy_nonoverlapping(right.as_ptr(), self.ptr(), right.len());
+ }
+ }
+ }
+
+ /// Writes all values from `iter` to `dst`.
+ ///
+ /// # Safety
+ ///
+ /// Assumes no wrapping around happens.
+ /// Assumes capacity is sufficient.
+ #[inline]
+ unsafe fn write_iter(
+ &mut self,
+ dst: usize,
+ iter: impl Iterator<Item = T>,
+ written: &mut usize,
+ ) {
+ iter.enumerate().for_each(|(i, element)| unsafe {
+ self.buffer_write(dst + i, element);
+ *written += 1;
+ });
+ }
+
+ /// Frobs the head and tail sections around to handle the fact that we
+ /// just reallocated. Unsafe because it trusts old_capacity.
+ #[inline]
+ unsafe fn handle_capacity_increase(&mut self, old_capacity: usize) {
+ let new_capacity = self.cap();
+
+ // Move the shortest contiguous section of the ring buffer
+ // T H
+ // [o o o o o o o . ]
+ // T H
+ // A [o o o o o o o . . . . . . . . . ]
+ // H T
+ // [o o . o o o o o ]
+ // T H
+ // B [. . . o o o o o o o . . . . . . ]
+ // H T
+ // [o o o o o . o o ]
+ // H T
+ // C [o o o o o . . . . . . . . . o o ]
+
+ if self.tail <= self.head {
+ // A
+ // Nop
+ } else if self.head < old_capacity - self.tail {
+ // B
+ unsafe {
+ self.copy_nonoverlapping(old_capacity, 0, self.head);
+ }
+ self.head += old_capacity;
+ debug_assert!(self.head > self.tail);
+ } else {
+ // C
+ let new_tail = new_capacity - (old_capacity - self.tail);
+ unsafe {
+ self.copy_nonoverlapping(new_tail, self.tail, old_capacity - self.tail);
+ }
+ self.tail = new_tail;
+ debug_assert!(self.head < self.tail);
+ }
+ debug_assert!(self.head < self.cap());
+ debug_assert!(self.tail < self.cap());
+ debug_assert!(self.cap().count_ones() == 1);
+ }
+}
+
+impl<T> VecDeque<T> {
+ /// Creates an empty deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<u32> = VecDeque::new();
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn new() -> VecDeque<T> {
+ VecDeque::new_in(Global)
+ }
+
+ /// Creates an empty deque with space for at least `capacity` elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<u32> = VecDeque::with_capacity(10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn with_capacity(capacity: usize) -> VecDeque<T> {
+ Self::with_capacity_in(capacity, Global)
+ }
+}
+
+impl<T, A: Allocator> VecDeque<T, A> {
+ /// Creates an empty deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<u32> = VecDeque::new();
+ /// ```
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn new_in(alloc: A) -> VecDeque<T, A> {
+ VecDeque::with_capacity_in(INITIAL_CAPACITY, alloc)
+ }
+
+ /// Creates an empty deque with space for at least `capacity` elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<u32> = VecDeque::with_capacity(10);
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> VecDeque<T, A> {
+ assert!(capacity < 1_usize << usize::BITS - 1, "capacity overflow");
+ // +1 since the ringbuffer always leaves one space empty
+ let cap = cmp::max(capacity + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
+
+ VecDeque { tail: 0, head: 0, buf: RawVec::with_capacity_in(cap, alloc) }
+ }
+
+ /// Provides a reference to the element at the given index.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(3);
+ /// buf.push_back(4);
+ /// buf.push_back(5);
+ /// assert_eq!(buf.get(1), Some(&4));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get(&self, index: usize) -> Option<&T> {
+ if index < self.len() {
+ let idx = self.wrap_add(self.tail, index);
+ unsafe { Some(&*self.ptr().add(idx)) }
+ } else {
+ None
+ }
+ }
+
+ /// Provides a mutable reference to the element at the given index.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(3);
+ /// buf.push_back(4);
+ /// buf.push_back(5);
+ /// if let Some(elem) = buf.get_mut(1) {
+ /// *elem = 7;
+ /// }
+ ///
+ /// assert_eq!(buf[1], 7);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
+ if index < self.len() {
+ let idx = self.wrap_add(self.tail, index);
+ unsafe { Some(&mut *self.ptr().add(idx)) }
+ } else {
+ None
+ }
+ }
+
+ /// Swaps elements at indices `i` and `j`.
+ ///
+ /// `i` and `j` may be equal.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Panics
+ ///
+ /// Panics if either index is out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(3);
+ /// buf.push_back(4);
+ /// buf.push_back(5);
+ /// assert_eq!(buf, [3, 4, 5]);
+ /// buf.swap(0, 2);
+ /// assert_eq!(buf, [5, 4, 3]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn swap(&mut self, i: usize, j: usize) {
+ assert!(i < self.len());
+ assert!(j < self.len());
+ let ri = self.wrap_add(self.tail, i);
+ let rj = self.wrap_add(self.tail, j);
+ unsafe { ptr::swap(self.ptr().add(ri), self.ptr().add(rj)) }
+ }
+
+ /// Returns the number of elements the deque can hold without
+ /// reallocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let buf: VecDeque<i32> = VecDeque::with_capacity(10);
+ /// assert!(buf.capacity() >= 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.cap() - 1
+ }
+
+ /// Reserves the minimum capacity for at least `additional` more elements to be inserted in the
+ /// given deque. Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it requests. Therefore
+ /// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future
+ /// insertions are expected.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows `usize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf: VecDeque<i32> = [1].into();
+ /// buf.reserve_exact(10);
+ /// assert!(buf.capacity() >= 11);
+ /// ```
+ ///
+ /// [`reserve`]: VecDeque::reserve
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+
+ /// Reserves capacity for at least `additional` more elements to be inserted in the given
+ /// deque. The collection may reserve more space to speculatively avoid frequent reallocations.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows `usize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf: VecDeque<i32> = [1].into();
+ /// buf.reserve(10);
+ /// assert!(buf.capacity() >= 11);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ let old_cap = self.cap();
+ let used_cap = self.len() + 1;
+ let new_cap = used_cap
+ .checked_add(additional)
+ .and_then(|needed_cap| needed_cap.checked_next_power_of_two())
+ .expect("capacity overflow");
+
+ if new_cap > old_cap {
+ self.buf.reserve_exact(used_cap, new_cap - used_cap);
+ unsafe {
+ self.handle_capacity_increase(old_cap);
+ }
+ }
+ }
+
+ /// Tries to reserve the minimum capacity for at least `additional` more elements to
+ /// be inserted in the given deque. After calling `try_reserve_exact`,
+ /// capacity will be greater than or equal to `self.len() + additional` if
+ /// it returns `Ok(())`. Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: VecDeque::try_reserve
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows `usize`, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ /// use std::collections::VecDeque;
+ ///
+ /// fn process_data(data: &[u32]) -> Result<VecDeque<u32>, TryReserveError> {
+ /// let mut output = VecDeque::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve_exact(data.len())?;
+ ///
+ /// // Now we know this can't OOM(Out-Of-Memory) in the middle of our complex work
+ /// output.extend(data.iter().map(|&val| {
+ /// val * 2 + 5 // very complicated
+ /// }));
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.try_reserve(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` more elements to be inserted
+ /// in the given deque. The collection may reserve more space to speculatively avoid
+ /// frequent reallocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional` if it returns
+ /// `Ok(())`. Does nothing if capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows `usize`, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ /// use std::collections::VecDeque;
+ ///
+ /// fn process_data(data: &[u32]) -> Result<VecDeque<u32>, TryReserveError> {
+ /// let mut output = VecDeque::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.extend(data.iter().map(|&val| {
+ /// val * 2 + 5 // very complicated
+ /// }));
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ let old_cap = self.cap();
+ let used_cap = self.len() + 1;
+ let new_cap = used_cap
+ .checked_add(additional)
+ .and_then(|needed_cap| needed_cap.checked_next_power_of_two())
+ .ok_or(TryReserveErrorKind::CapacityOverflow)?;
+
+ if new_cap > old_cap {
+ self.buf.try_reserve_exact(used_cap, new_cap - used_cap)?;
+ unsafe {
+ self.handle_capacity_increase(old_cap);
+ }
+ }
+ Ok(())
+ }
+
+ /// Shrinks the capacity of the deque as much as possible.
+ ///
+ /// It will drop down as close as possible to the length but the allocator may still inform the
+ /// deque that there is space for a few more elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::with_capacity(15);
+ /// buf.extend(0..4);
+ /// assert_eq!(buf.capacity(), 15);
+ /// buf.shrink_to_fit();
+ /// assert!(buf.capacity() >= 4);
+ /// ```
+ #[stable(feature = "deque_extras_15", since = "1.5.0")]
+ pub fn shrink_to_fit(&mut self) {
+ self.shrink_to(0);
+ }
+
+ /// Shrinks the capacity of the deque with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::with_capacity(15);
+ /// buf.extend(0..4);
+ /// assert_eq!(buf.capacity(), 15);
+ /// buf.shrink_to(6);
+ /// assert!(buf.capacity() >= 6);
+ /// buf.shrink_to(0);
+ /// assert!(buf.capacity() >= 4);
+ /// ```
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ let min_capacity = cmp::min(min_capacity, self.capacity());
+ // We don't have to worry about an overflow as neither `self.len()` nor `self.capacity()`
+ // can ever be `usize::MAX`. +1 as the ringbuffer always leaves one space empty.
+ let target_cap = cmp::max(cmp::max(min_capacity, self.len()) + 1, MINIMUM_CAPACITY + 1)
+ .next_power_of_two();
+
+ if target_cap < self.cap() {
+ // There are three cases of interest:
+ // All elements are out of desired bounds
+ // Elements are contiguous, and head is out of desired bounds
+ // Elements are discontiguous, and tail is out of desired bounds
+ //
+ // At all other times, element positions are unaffected.
+ //
+ // Indicates that elements at the head should be moved.
+ let head_outside = self.head == 0 || self.head >= target_cap;
+ // Move elements from out of desired bounds (positions after target_cap)
+ if self.tail >= target_cap && head_outside {
+ // T H
+ // [. . . . . . . . o o o o o o o . ]
+ // T H
+ // [o o o o o o o . ]
+ unsafe {
+ self.copy_nonoverlapping(0, self.tail, self.len());
+ }
+ self.head = self.len();
+ self.tail = 0;
+ } else if self.tail != 0 && self.tail < target_cap && head_outside {
+ // T H
+ // [. . . o o o o o o o . . . . . . ]
+ // H T
+ // [o o . o o o o o ]
+ let len = self.wrap_sub(self.head, target_cap);
+ unsafe {
+ self.copy_nonoverlapping(0, target_cap, len);
+ }
+ self.head = len;
+ debug_assert!(self.head < self.tail);
+ } else if self.tail >= target_cap {
+ // H T
+ // [o o o o o . . . . . . . . . o o ]
+ // H T
+ // [o o o o o . o o ]
+ debug_assert!(self.wrap_sub(self.head, 1) < target_cap);
+ let len = self.cap() - self.tail;
+ let new_tail = target_cap - len;
+ unsafe {
+ self.copy_nonoverlapping(new_tail, self.tail, len);
+ }
+ self.tail = new_tail;
+ debug_assert!(self.head < self.tail);
+ }
+
+ self.buf.shrink_to_fit(target_cap);
+
+ debug_assert!(self.head < self.cap());
+ debug_assert!(self.tail < self.cap());
+ debug_assert!(self.cap().count_ones() == 1);
+ }
+ }
+
+ /// Shortens the deque, keeping the first `len` elements and dropping
+ /// the rest.
+ ///
+ /// If `len` is greater than the deque's current length, this has no
+ /// effect.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(5);
+ /// buf.push_back(10);
+ /// buf.push_back(15);
+ /// assert_eq!(buf, [5, 10, 15]);
+ /// buf.truncate(1);
+ /// assert_eq!(buf, [5]);
+ /// ```
+ #[stable(feature = "deque_extras", since = "1.16.0")]
+ pub fn truncate(&mut self, len: usize) {
+ /// Runs the destructor for all items in the slice when it gets dropped (normally or
+ /// during unwinding).
+ struct Dropper<'a, T>(&'a mut [T]);
+
+ impl<'a, T> Drop for Dropper<'a, T> {
+ fn drop(&mut self) {
+ unsafe {
+ ptr::drop_in_place(self.0);
+ }
+ }
+ }
+
+ // Safe because:
+ //
+ // * Any slice passed to `drop_in_place` is valid; the second case has
+ // `len <= front.len()` and returning on `len > self.len()` ensures
+ // `begin <= back.len()` in the first case
+ // * The head of the VecDeque is moved before calling `drop_in_place`,
+ // so no value is dropped twice if `drop_in_place` panics
+ unsafe {
+ if len > self.len() {
+ return;
+ }
+ let num_dropped = self.len() - len;
+ let (front, back) = self.as_mut_slices();
+ if len > front.len() {
+ let begin = len - front.len();
+ let drop_back = back.get_unchecked_mut(begin..) as *mut _;
+ self.head = self.wrap_sub(self.head, num_dropped);
+ ptr::drop_in_place(drop_back);
+ } else {
+ let drop_back = back as *mut _;
+ let drop_front = front.get_unchecked_mut(len..) as *mut _;
+ self.head = self.wrap_sub(self.head, num_dropped);
+
+ // Make sure the second half is dropped even when a destructor
+ // in the first one panics.
+ let _back_dropper = Dropper(&mut *drop_back);
+ ptr::drop_in_place(drop_front);
+ }
+ }
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ self.buf.allocator()
+ }
+
+ /// Returns a front-to-back iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(5);
+ /// buf.push_back(3);
+ /// buf.push_back(4);
+ /// let b: &[_] = &[&5, &3, &4];
+ /// let c: Vec<&i32> = buf.iter().collect();
+ /// assert_eq!(&c[..], b);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter::new(unsafe { self.buffer_as_slice() }, self.tail, self.head)
+ }
+
+ /// Returns a front-to-back iterator that returns mutable references.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(5);
+ /// buf.push_back(3);
+ /// buf.push_back(4);
+ /// for num in buf.iter_mut() {
+ /// *num = *num - 2;
+ /// }
+ /// let b: &[_] = &[&mut 3, &mut 1, &mut 2];
+ /// assert_eq!(&buf.iter_mut().collect::<Vec<&mut i32>>()[..], b);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+ // SAFETY: The internal `IterMut` safety invariant is established because the
+ // `ring` we create is a dereferenceable slice for lifetime '_.
+ let ring = ptr::slice_from_raw_parts_mut(self.ptr(), self.cap());
+
+ unsafe { IterMut::new(ring, self.tail, self.head, PhantomData) }
+ }
+
+ /// Returns a pair of slices which contain, in order, the contents of the
+ /// deque.
+ ///
+ /// If [`make_contiguous`] was previously called, all elements of the
+ /// deque will be in the first slice and the second slice will be empty.
+ ///
+ /// [`make_contiguous`]: VecDeque::make_contiguous
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque = VecDeque::new();
+ ///
+ /// deque.push_back(0);
+ /// deque.push_back(1);
+ /// deque.push_back(2);
+ ///
+ /// assert_eq!(deque.as_slices(), (&[0, 1, 2][..], &[][..]));
+ ///
+ /// deque.push_front(10);
+ /// deque.push_front(9);
+ ///
+ /// assert_eq!(deque.as_slices(), (&[9, 10][..], &[0, 1, 2][..]));
+ /// ```
+ #[inline]
+ #[stable(feature = "deque_extras_15", since = "1.5.0")]
+ pub fn as_slices(&self) -> (&[T], &[T]) {
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ let buf = self.buffer_as_slice();
+ let (front, back) = RingSlices::ring_slices(buf, self.head, self.tail);
+ (MaybeUninit::slice_assume_init_ref(front), MaybeUninit::slice_assume_init_ref(back))
+ }
+ }
+
+ /// Returns a pair of slices which contain, in order, the contents of the
+ /// deque.
+ ///
+ /// If [`make_contiguous`] was previously called, all elements of the
+ /// deque will be in the first slice and the second slice will be empty.
+ ///
+ /// [`make_contiguous`]: VecDeque::make_contiguous
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque = VecDeque::new();
+ ///
+ /// deque.push_back(0);
+ /// deque.push_back(1);
+ ///
+ /// deque.push_front(10);
+ /// deque.push_front(9);
+ ///
+ /// deque.as_mut_slices().0[0] = 42;
+ /// deque.as_mut_slices().1[0] = 24;
+ /// assert_eq!(deque.as_slices(), (&[42, 10][..], &[24, 1][..]));
+ /// ```
+ #[inline]
+ #[stable(feature = "deque_extras_15", since = "1.5.0")]
+ pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ let head = self.head;
+ let tail = self.tail;
+ let buf = self.buffer_as_mut_slice();
+ let (front, back) = RingSlices::ring_slices(buf, head, tail);
+ (MaybeUninit::slice_assume_init_mut(front), MaybeUninit::slice_assume_init_mut(back))
+ }
+ }
+
+ /// Returns the number of elements in the deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque = VecDeque::new();
+ /// assert_eq!(deque.len(), 0);
+ /// deque.push_back(1);
+ /// assert_eq!(deque.len(), 1);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ count(self.tail, self.head, self.cap())
+ }
+
+ /// Returns `true` if the deque is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque = VecDeque::new();
+ /// assert!(deque.is_empty());
+ /// deque.push_front(1);
+ /// assert!(!deque.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.tail == self.head
+ }
+
+ fn range_tail_head<R>(&self, range: R) -> (usize, usize)
+ where
+ R: RangeBounds<usize>,
+ {
+ let Range { start, end } = slice::range(range, ..self.len());
+ let tail = self.wrap_add(self.tail, start);
+ let head = self.wrap_add(self.tail, end);
+ (tail, head)
+ }
+
+ /// Creates an iterator that covers the specified range in the deque.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<_> = [1, 2, 3].into();
+ /// let range = deque.range(2..).copied().collect::<VecDeque<_>>();
+ /// assert_eq!(range, [3]);
+ ///
+ /// // A full range covers all contents
+ /// let all = deque.range(..);
+ /// assert_eq!(all.len(), 3);
+ /// ```
+ #[inline]
+ #[stable(feature = "deque_range", since = "1.51.0")]
+ pub fn range<R>(&self, range: R) -> Iter<'_, T>
+ where
+ R: RangeBounds<usize>,
+ {
+ let (tail, head) = self.range_tail_head(range);
+ // The shared reference we have in &self is maintained in the '_ of Iter.
+ Iter::new(unsafe { self.buffer_as_slice() }, tail, head)
+ }
+
+ /// Creates an iterator that covers the specified mutable range in the deque.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque: VecDeque<_> = [1, 2, 3].into();
+ /// for v in deque.range_mut(2..) {
+ /// *v *= 2;
+ /// }
+ /// assert_eq!(deque, [1, 2, 6]);
+ ///
+ /// // A full range covers all contents
+ /// for v in deque.range_mut(..) {
+ /// *v *= 2;
+ /// }
+ /// assert_eq!(deque, [2, 4, 12]);
+ /// ```
+ #[inline]
+ #[stable(feature = "deque_range", since = "1.51.0")]
+ pub fn range_mut<R>(&mut self, range: R) -> IterMut<'_, T>
+ where
+ R: RangeBounds<usize>,
+ {
+ let (tail, head) = self.range_tail_head(range);
+
+ // SAFETY: The internal `IterMut` safety invariant is established because the
+ // `ring` we create is a dereferenceable slice for lifetime '_.
+ let ring = ptr::slice_from_raw_parts_mut(self.ptr(), self.cap());
+
+ unsafe { IterMut::new(ring, tail, head, PhantomData) }
+ }
+
+ /// Removes the specified range from the deque in bulk, returning all
+ /// removed elements as an iterator. If the iterator is dropped before
+ /// being fully consumed, it drops the remaining removed elements.
+ ///
+ /// The returned iterator keeps a mutable borrow on the queue to optimize
+ /// its implementation.
+ ///
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the deque.
+ ///
+ /// # Leaking
+ ///
+ /// If the returned iterator goes out of scope without being dropped (due to
+ /// [`mem::forget`], for example), the deque may have lost and leaked
+ /// elements arbitrarily, including elements outside the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque: VecDeque<_> = [1, 2, 3].into();
+ /// let drained = deque.drain(2..).collect::<VecDeque<_>>();
+ /// assert_eq!(drained, [3]);
+ /// assert_eq!(deque, [1, 2]);
+ ///
+ /// // A full range clears all contents, like `clear()` does
+ /// deque.drain(..);
+ /// assert!(deque.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_, T, A>
+ where
+ R: RangeBounds<usize>,
+ {
+ // Memory safety
+ //
+ // When the Drain is first created, the source deque is shortened to
+ // make sure no uninitialized or moved-from elements are accessible at
+ // all if the Drain's destructor never gets to run.
+ //
+ // Drain will ptr::read out the values to remove.
+ // When finished, the remaining data will be copied back to cover the hole,
+ // and the head/tail values will be restored correctly.
+ //
+ let (drain_tail, drain_head) = self.range_tail_head(range);
+
+ // The deque's elements are parted into three segments:
+ // * self.tail -> drain_tail
+ // * drain_tail -> drain_head
+ // * drain_head -> self.head
+ //
+ // T = self.tail; H = self.head; t = drain_tail; h = drain_head
+ //
+ // We store drain_tail as self.head, and drain_head and self.head as
+ // after_tail and after_head respectively on the Drain. This also
+ // truncates the effective array such that if the Drain is leaked, we
+ // have forgotten about the potentially moved values after the start of
+ // the drain.
+ //
+ // T t h H
+ // [. . . o o x x o o . . .]
+ //
+ let head = self.head;
+
+ // "forget" about the values after the start of the drain until after
+ // the drain is complete and the Drain destructor is run.
+ self.head = drain_tail;
+
+ let deque = NonNull::from(&mut *self);
+ unsafe {
+ // Crucially, we only create shared references from `self` here and read from
+ // it. We do not write to `self` nor reborrow to a mutable reference.
+ // Hence the raw pointer we created above, for `deque`, remains valid.
+ let ring = self.buffer_as_slice();
+ let iter = Iter::new(ring, drain_tail, drain_head);
+
+ Drain::new(drain_head, head, iter, deque)
+ }
+ }
+
+ /// Clears the deque, removing all values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque = VecDeque::new();
+ /// deque.push_back(1);
+ /// deque.clear();
+ /// assert!(deque.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn clear(&mut self) {
+ self.truncate(0);
+ }
+
+ /// Returns `true` if the deque contains an element equal to the
+ /// given value.
+ ///
+ /// This operation is *O*(*n*).
+ ///
+ /// Note that if you have a sorted `VecDeque`, [`binary_search`] may be faster.
+ ///
+ /// [`binary_search`]: VecDeque::binary_search
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque: VecDeque<u32> = VecDeque::new();
+ ///
+ /// deque.push_back(0);
+ /// deque.push_back(1);
+ ///
+ /// assert_eq!(deque.contains(&1), true);
+ /// assert_eq!(deque.contains(&10), false);
+ /// ```
+ #[stable(feature = "vec_deque_contains", since = "1.12.0")]
+ pub fn contains(&self, x: &T) -> bool
+ where
+ T: PartialEq<T>,
+ {
+ let (a, b) = self.as_slices();
+ a.contains(x) || b.contains(x)
+ }
+
+ /// Provides a reference to the front element, or `None` if the deque is
+ /// empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut d = VecDeque::new();
+ /// assert_eq!(d.front(), None);
+ ///
+ /// d.push_back(1);
+ /// d.push_back(2);
+ /// assert_eq!(d.front(), Some(&1));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn front(&self) -> Option<&T> {
+ self.get(0)
+ }
+
+ /// Provides a mutable reference to the front element, or `None` if the
+ /// deque is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut d = VecDeque::new();
+ /// assert_eq!(d.front_mut(), None);
+ ///
+ /// d.push_back(1);
+ /// d.push_back(2);
+ /// match d.front_mut() {
+ /// Some(x) => *x = 9,
+ /// None => (),
+ /// }
+ /// assert_eq!(d.front(), Some(&9));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn front_mut(&mut self) -> Option<&mut T> {
+ self.get_mut(0)
+ }
+
+ /// Provides a reference to the back element, or `None` if the deque is
+ /// empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut d = VecDeque::new();
+ /// assert_eq!(d.back(), None);
+ ///
+ /// d.push_back(1);
+ /// d.push_back(2);
+ /// assert_eq!(d.back(), Some(&2));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn back(&self) -> Option<&T> {
+ self.get(self.len().wrapping_sub(1))
+ }
+
+ /// Provides a mutable reference to the back element, or `None` if the
+ /// deque is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut d = VecDeque::new();
+ /// assert_eq!(d.back(), None);
+ ///
+ /// d.push_back(1);
+ /// d.push_back(2);
+ /// match d.back_mut() {
+ /// Some(x) => *x = 9,
+ /// None => (),
+ /// }
+ /// assert_eq!(d.back(), Some(&9));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn back_mut(&mut self) -> Option<&mut T> {
+ self.get_mut(self.len().wrapping_sub(1))
+ }
+
+ /// Removes the first element and returns it, or `None` if the deque is
+ /// empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut d = VecDeque::new();
+ /// d.push_back(1);
+ /// d.push_back(2);
+ ///
+ /// assert_eq!(d.pop_front(), Some(1));
+ /// assert_eq!(d.pop_front(), Some(2));
+ /// assert_eq!(d.pop_front(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop_front(&mut self) -> Option<T> {
+ if self.is_empty() {
+ None
+ } else {
+ let tail = self.tail;
+ self.tail = self.wrap_add(self.tail, 1);
+ unsafe { Some(self.buffer_read(tail)) }
+ }
+ }
+
+ /// Removes the last element from the deque and returns it, or `None` if
+ /// it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// assert_eq!(buf.pop_back(), None);
+ /// buf.push_back(1);
+ /// buf.push_back(3);
+ /// assert_eq!(buf.pop_back(), Some(3));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop_back(&mut self) -> Option<T> {
+ if self.is_empty() {
+ None
+ } else {
+ self.head = self.wrap_sub(self.head, 1);
+ let head = self.head;
+ unsafe { Some(self.buffer_read(head)) }
+ }
+ }
+
+ /// Prepends an element to the deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut d = VecDeque::new();
+ /// d.push_front(1);
+ /// d.push_front(2);
+ /// assert_eq!(d.front(), Some(&2));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push_front(&mut self, value: T) {
+ if self.is_full() {
+ self.grow();
+ }
+
+ self.tail = self.wrap_sub(self.tail, 1);
+ let tail = self.tail;
+ unsafe {
+ self.buffer_write(tail, value);
+ }
+ }
+
+ /// Appends an element to the back of the deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(1);
+ /// buf.push_back(3);
+ /// assert_eq!(3, *buf.back().unwrap());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push_back(&mut self, value: T) {
+ if self.is_full() {
+ self.grow();
+ }
+
+ let head = self.head;
+ self.head = self.wrap_add(self.head, 1);
+ unsafe { self.buffer_write(head, value) }
+ }
+
+ #[inline]
+ fn is_contiguous(&self) -> bool {
+ // FIXME: Should we consider `head == 0` to mean
+ // that `self` is contiguous?
+ self.tail <= self.head
+ }
+
+ /// Removes an element from anywhere in the deque and returns it,
+ /// replacing it with the first element.
+ ///
+ /// This does not preserve ordering, but is *O*(1).
+ ///
+ /// Returns `None` if `index` is out of bounds.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// assert_eq!(buf.swap_remove_front(0), None);
+ /// buf.push_back(1);
+ /// buf.push_back(2);
+ /// buf.push_back(3);
+ /// assert_eq!(buf, [1, 2, 3]);
+ ///
+ /// assert_eq!(buf.swap_remove_front(2), Some(3));
+ /// assert_eq!(buf, [2, 1]);
+ /// ```
+ #[stable(feature = "deque_extras_15", since = "1.5.0")]
+ pub fn swap_remove_front(&mut self, index: usize) -> Option<T> {
+ let length = self.len();
+ if length > 0 && index < length && index != 0 {
+ self.swap(index, 0);
+ } else if index >= length {
+ return None;
+ }
+ self.pop_front()
+ }
+
+ /// Removes an element from anywhere in the deque and returns it,
+ /// replacing it with the last element.
+ ///
+ /// This does not preserve ordering, but is *O*(1).
+ ///
+ /// Returns `None` if `index` is out of bounds.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// assert_eq!(buf.swap_remove_back(0), None);
+ /// buf.push_back(1);
+ /// buf.push_back(2);
+ /// buf.push_back(3);
+ /// assert_eq!(buf, [1, 2, 3]);
+ ///
+ /// assert_eq!(buf.swap_remove_back(0), Some(1));
+ /// assert_eq!(buf, [3, 2]);
+ /// ```
+ #[stable(feature = "deque_extras_15", since = "1.5.0")]
+ pub fn swap_remove_back(&mut self, index: usize) -> Option<T> {
+ let length = self.len();
+ if length > 0 && index < length - 1 {
+ self.swap(index, length - 1);
+ } else if index >= length {
+ return None;
+ }
+ self.pop_back()
+ }
+
+ /// Inserts an element at `index` within the deque, shifting all elements
+ /// with indices greater than or equal to `index` towards the back.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is greater than deque's length
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut vec_deque = VecDeque::new();
+ /// vec_deque.push_back('a');
+ /// vec_deque.push_back('b');
+ /// vec_deque.push_back('c');
+ /// assert_eq!(vec_deque, &['a', 'b', 'c']);
+ ///
+ /// vec_deque.insert(1, 'd');
+ /// assert_eq!(vec_deque, &['a', 'd', 'b', 'c']);
+ /// ```
+ #[stable(feature = "deque_extras_15", since = "1.5.0")]
+ pub fn insert(&mut self, index: usize, value: T) {
+ assert!(index <= self.len(), "index out of bounds");
+ if self.is_full() {
+ self.grow();
+ }
+
+ // Move the least number of elements in the ring buffer and insert
+ // the given object
+ //
+ // At most len/2 - 1 elements will be moved. O(min(n, n-i))
+ //
+ // There are three main cases:
+ // Elements are contiguous
+ // - special case when tail is 0
+ // Elements are discontiguous and the insert is in the tail section
+ // Elements are discontiguous and the insert is in the head section
+ //
+ // For each of those there are two more cases:
+ // Insert is closer to tail
+ // Insert is closer to head
+ //
+ // Key: H - self.head
+ // T - self.tail
+ // o - Valid element
+ // I - Insertion element
+ // A - The element that should be after the insertion point
+ // M - Indicates element was moved
+
+ let idx = self.wrap_add(self.tail, index);
+
+ let distance_to_tail = index;
+ let distance_to_head = self.len() - index;
+
+ let contiguous = self.is_contiguous();
+
+ match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
+ (true, true, _) if index == 0 => {
+ // push_front
+ //
+ // T
+ // I H
+ // [A o o o o o o . . . . . . . . .]
+ //
+ // H T
+ // [A o o o o o o o . . . . . I]
+ //
+
+ self.tail = self.wrap_sub(self.tail, 1);
+ }
+ (true, true, _) => {
+ unsafe {
+ // contiguous, insert closer to tail:
+ //
+ // T I H
+ // [. . . o o A o o o o . . . . . .]
+ //
+ // T H
+ // [. . o o I A o o o o . . . . . .]
+ // M M
+ //
+ // contiguous, insert closer to tail and tail is 0:
+ //
+ //
+ // T I H
+ // [o o A o o o o . . . . . . . . .]
+ //
+ // H T
+ // [o I A o o o o o . . . . . . . o]
+ // M M
+
+ let new_tail = self.wrap_sub(self.tail, 1);
+
+ self.copy(new_tail, self.tail, 1);
+ // Already moved the tail, so we only copy `index - 1` elements.
+ self.copy(self.tail, self.tail + 1, index - 1);
+
+ self.tail = new_tail;
+ }
+ }
+ (true, false, _) => {
+ unsafe {
+ // contiguous, insert closer to head:
+ //
+ // T I H
+ // [. . . o o o o A o o . . . . . .]
+ //
+ // T H
+ // [. . . o o o o I A o o . . . . .]
+ // M M M
+
+ self.copy(idx + 1, idx, self.head - idx);
+ self.head = self.wrap_add(self.head, 1);
+ }
+ }
+ (false, true, true) => {
+ unsafe {
+ // discontiguous, insert closer to tail, tail section:
+ //
+ // H T I
+ // [o o o o o o . . . . . o o A o o]
+ //
+ // H T
+ // [o o o o o o . . . . o o I A o o]
+ // M M
+
+ self.copy(self.tail - 1, self.tail, index);
+ self.tail -= 1;
+ }
+ }
+ (false, false, true) => {
+ unsafe {
+ // discontiguous, insert closer to head, tail section:
+ //
+ // H T I
+ // [o o . . . . . . . o o o o o A o]
+ //
+ // H T
+ // [o o o . . . . . . o o o o o I A]
+ // M M M M
+
+ // copy elements up to new head
+ self.copy(1, 0, self.head);
+
+ // copy last element into empty spot at bottom of buffer
+ self.copy(0, self.cap() - 1, 1);
+
+ // move elements from idx to end forward not including ^ element
+ self.copy(idx + 1, idx, self.cap() - 1 - idx);
+
+ self.head += 1;
+ }
+ }
+ (false, true, false) if idx == 0 => {
+ unsafe {
+ // discontiguous, insert is closer to tail, head section,
+ // and is at index zero in the internal buffer:
+ //
+ // I H T
+ // [A o o o o o o o o o . . . o o o]
+ //
+ // H T
+ // [A o o o o o o o o o . . o o o I]
+ // M M M
+
+ // copy elements up to new tail
+ self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
+
+ // copy last element into empty spot at bottom of buffer
+ self.copy(self.cap() - 1, 0, 1);
+
+ self.tail -= 1;
+ }
+ }
+ (false, true, false) => {
+ unsafe {
+ // discontiguous, insert closer to tail, head section:
+ //
+ // I H T
+ // [o o o A o o o o o o . . . o o o]
+ //
+ // H T
+ // [o o I A o o o o o o . . o o o o]
+ // M M M M M M
+
+ // copy elements up to new tail
+ self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
+
+ // copy last element into empty spot at bottom of buffer
+ self.copy(self.cap() - 1, 0, 1);
+
+ // move elements from idx-1 to end forward not including ^ element
+ self.copy(0, 1, idx - 1);
+
+ self.tail -= 1;
+ }
+ }
+ (false, false, false) => {
+ unsafe {
+ // discontiguous, insert closer to head, head section:
+ //
+ // I H T
+ // [o o o o A o o . . . . . . o o o]
+ //
+ // H T
+ // [o o o o I A o o . . . . . o o o]
+ // M M M
+
+ self.copy(idx + 1, idx, self.head - idx);
+ self.head += 1;
+ }
+ }
+ }
+
+ // tail might've been changed so we need to recalculate
+ let new_idx = self.wrap_add(self.tail, index);
+ unsafe {
+ self.buffer_write(new_idx, value);
+ }
+ }
+
+ /// Removes and returns the element at `index` from the deque.
+ /// Whichever end is closer to the removal point will be moved to make
+ /// room, and all the affected elements will be moved to new positions.
+ /// Returns `None` if `index` is out of bounds.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(1);
+ /// buf.push_back(2);
+ /// buf.push_back(3);
+ /// assert_eq!(buf, [1, 2, 3]);
+ ///
+ /// assert_eq!(buf.remove(1), Some(2));
+ /// assert_eq!(buf, [1, 3]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove(&mut self, index: usize) -> Option<T> {
+ if self.is_empty() || self.len() <= index {
+ return None;
+ }
+
+ // There are three main cases:
+ // Elements are contiguous
+ // Elements are discontiguous and the removal is in the tail section
+ // Elements are discontiguous and the removal is in the head section
+ // - special case when elements are technically contiguous,
+ // but self.head = 0
+ //
+ // For each of those there are two more cases:
+ // Insert is closer to tail
+ // Insert is closer to head
+ //
+ // Key: H - self.head
+ // T - self.tail
+ // o - Valid element
+ // x - Element marked for removal
+ // R - Indicates element that is being removed
+ // M - Indicates element was moved
+
+ let idx = self.wrap_add(self.tail, index);
+
+ let elem = unsafe { Some(self.buffer_read(idx)) };
+
+ let distance_to_tail = index;
+ let distance_to_head = self.len() - index;
+
+ let contiguous = self.is_contiguous();
+
+ match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
+ (true, true, _) => {
+ unsafe {
+ // contiguous, remove closer to tail:
+ //
+ // T R H
+ // [. . . o o x o o o o . . . . . .]
+ //
+ // T H
+ // [. . . . o o o o o o . . . . . .]
+ // M M
+
+ self.copy(self.tail + 1, self.tail, index);
+ self.tail += 1;
+ }
+ }
+ (true, false, _) => {
+ unsafe {
+ // contiguous, remove closer to head:
+ //
+ // T R H
+ // [. . . o o o o x o o . . . . . .]
+ //
+ // T H
+ // [. . . o o o o o o . . . . . . .]
+ // M M
+
+ self.copy(idx, idx + 1, self.head - idx - 1);
+ self.head -= 1;
+ }
+ }
+ (false, true, true) => {
+ unsafe {
+ // discontiguous, remove closer to tail, tail section:
+ //
+ // H T R
+ // [o o o o o o . . . . . o o x o o]
+ //
+ // H T
+ // [o o o o o o . . . . . . o o o o]
+ // M M
+
+ self.copy(self.tail + 1, self.tail, index);
+ self.tail = self.wrap_add(self.tail, 1);
+ }
+ }
+ (false, false, false) => {
+ unsafe {
+ // discontiguous, remove closer to head, head section:
+ //
+ // R H T
+ // [o o o o x o o . . . . . . o o o]
+ //
+ // H T
+ // [o o o o o o . . . . . . . o o o]
+ // M M
+
+ self.copy(idx, idx + 1, self.head - idx - 1);
+ self.head -= 1;
+ }
+ }
+ (false, false, true) => {
+ unsafe {
+ // discontiguous, remove closer to head, tail section:
+ //
+ // H T R
+ // [o o o . . . . . . o o o o o x o]
+ //
+ // H T
+ // [o o . . . . . . . o o o o o o o]
+ // M M M M
+ //
+ // or quasi-discontiguous, remove next to head, tail section:
+ //
+ // H T R
+ // [. . . . . . . . . o o o o o x o]
+ //
+ // T H
+ // [. . . . . . . . . o o o o o o .]
+ // M
+
+ // draw in elements in the tail section
+ self.copy(idx, idx + 1, self.cap() - idx - 1);
+
+ // Prevents underflow.
+ if self.head != 0 {
+ // copy first element into empty spot
+ self.copy(self.cap() - 1, 0, 1);
+
+ // move elements in the head section backwards
+ self.copy(0, 1, self.head - 1);
+ }
+
+ self.head = self.wrap_sub(self.head, 1);
+ }
+ }
+ (false, true, false) => {
+ unsafe {
+ // discontiguous, remove closer to tail, head section:
+ //
+ // R H T
+ // [o o x o o o o o o o . . . o o o]
+ //
+ // H T
+ // [o o o o o o o o o o . . . . o o]
+ // M M M M M
+
+ // draw in elements up to idx
+ self.copy(1, 0, idx);
+
+ // copy last element into empty spot
+ self.copy(0, self.cap() - 1, 1);
+
+ // move elements from tail to end forward, excluding the last one
+ self.copy(self.tail + 1, self.tail, self.cap() - self.tail - 1);
+
+ self.tail = self.wrap_add(self.tail, 1);
+ }
+ }
+ }
+
+ elem
+ }
+
+ /// Splits the deque into two at the given index.
+ ///
+ /// Returns a newly allocated `VecDeque`. `self` contains elements `[0, at)`,
+ /// and the returned deque contains elements `[at, len)`.
+ ///
+ /// Note that the capacity of `self` does not change.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf: VecDeque<_> = [1, 2, 3].into();
+ /// let buf2 = buf.split_off(1);
+ /// assert_eq!(buf, [1]);
+ /// assert_eq!(buf2, [2, 3]);
+ /// ```
+ #[inline]
+ #[must_use = "use `.truncate()` if you don't need the other half"]
+ #[stable(feature = "split_off", since = "1.4.0")]
+ pub fn split_off(&mut self, at: usize) -> Self
+ where
+ A: Clone,
+ {
+ let len = self.len();
+ assert!(at <= len, "`at` out of bounds");
+
+ let other_len = len - at;
+ let mut other = VecDeque::with_capacity_in(other_len, self.allocator().clone());
+
+ unsafe {
+ let (first_half, second_half) = self.as_slices();
+
+ let first_len = first_half.len();
+ let second_len = second_half.len();
+ if at < first_len {
+ // `at` lies in the first half.
+ let amount_in_first = first_len - at;
+
+ ptr::copy_nonoverlapping(first_half.as_ptr().add(at), other.ptr(), amount_in_first);
+
+ // just take all of the second half.
+ ptr::copy_nonoverlapping(
+ second_half.as_ptr(),
+ other.ptr().add(amount_in_first),
+ second_len,
+ );
+ } else {
+ // `at` lies in the second half, need to factor in the elements we skipped
+ // in the first half.
+ let offset = at - first_len;
+ let amount_in_second = second_len - offset;
+ ptr::copy_nonoverlapping(
+ second_half.as_ptr().add(offset),
+ other.ptr(),
+ amount_in_second,
+ );
+ }
+ }
+
+ // Cleanup where the ends of the buffers are
+ self.head = self.wrap_sub(self.head, other_len);
+ other.head = other.wrap_index(other_len);
+
+ other
+ }
+
+ /// Moves all the elements of `other` into `self`, leaving `other` empty.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new number of elements in self overflows a `usize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf: VecDeque<_> = [1, 2].into();
+ /// let mut buf2: VecDeque<_> = [3, 4].into();
+ /// buf.append(&mut buf2);
+ /// assert_eq!(buf, [1, 2, 3, 4]);
+ /// assert_eq!(buf2, []);
+ /// ```
+ #[inline]
+ #[stable(feature = "append", since = "1.4.0")]
+ pub fn append(&mut self, other: &mut Self) {
+ self.reserve(other.len());
+ unsafe {
+ let (left, right) = other.as_slices();
+ self.copy_slice(self.head, left);
+ self.copy_slice(self.wrap_add(self.head, left.len()), right);
+ }
+ // SAFETY: Update pointers after copying to avoid leaving doppelganger
+ // in case of panics.
+ self.head = self.wrap_add(self.head, other.len());
+ // Silently drop values in `other`.
+ other.tail = other.head;
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns false.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.extend(1..5);
+ /// buf.retain(|&x| x % 2 == 0);
+ /// assert_eq!(buf, [2, 4]);
+ /// ```
+ ///
+ /// Because the elements are visited exactly once in the original order,
+ /// external state may be used to decide which elements to keep.
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.extend(1..6);
+ ///
+ /// let keep = [false, true, true, false, true];
+ /// let mut iter = keep.iter();
+ /// buf.retain(|_| *iter.next().unwrap());
+ /// assert_eq!(buf, [2, 3, 5]);
+ /// ```
+ #[stable(feature = "vec_deque_retain", since = "1.4.0")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ self.retain_mut(|elem| f(elem));
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns false.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.extend(1..5);
+ /// buf.retain_mut(|x| if *x % 2 == 0 {
+ /// *x += 1;
+ /// true
+ /// } else {
+ /// false
+ /// });
+ /// assert_eq!(buf, [3, 5]);
+ /// ```
+ #[stable(feature = "vec_retain_mut", since = "1.61.0")]
+ pub fn retain_mut<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ let len = self.len();
+ let mut idx = 0;
+ let mut cur = 0;
+
+ // Stage 1: All values are retained.
+ while cur < len {
+ if !f(&mut self[cur]) {
+ cur += 1;
+ break;
+ }
+ cur += 1;
+ idx += 1;
+ }
+ // Stage 2: Swap retained value into current idx.
+ while cur < len {
+ if !f(&mut self[cur]) {
+ cur += 1;
+ continue;
+ }
+
+ self.swap(idx, cur);
+ cur += 1;
+ idx += 1;
+ }
+ // Stage 3: Truncate all values after idx.
+ if cur != idx {
+ self.truncate(idx);
+ }
+ }
+
+ // Double the buffer size. This method is inline(never), so we expect it to only
+ // be called in cold paths.
+ // This may panic or abort
+ #[inline(never)]
+ fn grow(&mut self) {
+ // Extend or possibly remove this assertion when valid use-cases for growing the
+ // buffer without it being full emerge
+ debug_assert!(self.is_full());
+ let old_cap = self.cap();
+ self.buf.reserve_exact(old_cap, old_cap);
+ assert!(self.cap() == old_cap * 2);
+ unsafe {
+ self.handle_capacity_increase(old_cap);
+ }
+ debug_assert!(!self.is_full());
+ }
+
+ /// Modifies the deque in-place so that `len()` is equal to `new_len`,
+ /// either by removing excess elements from the back or by appending
+ /// elements generated by calling `generator` to the back.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(5);
+ /// buf.push_back(10);
+ /// buf.push_back(15);
+ /// assert_eq!(buf, [5, 10, 15]);
+ ///
+ /// buf.resize_with(5, Default::default);
+ /// assert_eq!(buf, [5, 10, 15, 0, 0]);
+ ///
+ /// buf.resize_with(2, || unreachable!());
+ /// assert_eq!(buf, [5, 10]);
+ ///
+ /// let mut state = 100;
+ /// buf.resize_with(5, || { state += 1; state });
+ /// assert_eq!(buf, [5, 10, 101, 102, 103]);
+ /// ```
+ #[stable(feature = "vec_resize_with", since = "1.33.0")]
+ pub fn resize_with(&mut self, new_len: usize, generator: impl FnMut() -> T) {
+ let len = self.len();
+
+ if new_len > len {
+ self.extend(repeat_with(generator).take(new_len - len))
+ } else {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Rearranges the internal storage of this deque so it is one contiguous
+ /// slice, which is then returned.
+ ///
+ /// This method does not allocate and does not change the order of the
+ /// inserted elements. As it returns a mutable slice, this can be used to
+ /// sort a deque.
+ ///
+ /// Once the internal storage is contiguous, the [`as_slices`] and
+ /// [`as_mut_slices`] methods will return the entire contents of the
+ /// deque in a single slice.
+ ///
+ /// [`as_slices`]: VecDeque::as_slices
+ /// [`as_mut_slices`]: VecDeque::as_mut_slices
+ ///
+ /// # Examples
+ ///
+ /// Sorting the content of a deque.
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::with_capacity(15);
+ ///
+ /// buf.push_back(2);
+ /// buf.push_back(1);
+ /// buf.push_front(3);
+ ///
+ /// // sorting the deque
+ /// buf.make_contiguous().sort();
+ /// assert_eq!(buf.as_slices(), (&[1, 2, 3] as &[_], &[] as &[_]));
+ ///
+ /// // sorting it in reverse order
+ /// buf.make_contiguous().sort_by(|a, b| b.cmp(a));
+ /// assert_eq!(buf.as_slices(), (&[3, 2, 1] as &[_], &[] as &[_]));
+ /// ```
+ ///
+ /// Getting immutable access to the contiguous slice.
+ ///
+ /// ```rust
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ ///
+ /// buf.push_back(2);
+ /// buf.push_back(1);
+ /// buf.push_front(3);
+ ///
+ /// buf.make_contiguous();
+ /// if let (slice, &[]) = buf.as_slices() {
+ /// // we can now be sure that `slice` contains all elements of the deque,
+ /// // while still having immutable access to `buf`.
+ /// assert_eq!(buf.len(), slice.len());
+ /// assert_eq!(slice, &[3, 2, 1] as &[_]);
+ /// }
+ /// ```
+ #[stable(feature = "deque_make_contiguous", since = "1.48.0")]
+ pub fn make_contiguous(&mut self) -> &mut [T] {
+ if self.is_contiguous() {
+ let tail = self.tail;
+ let head = self.head;
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ return unsafe {
+ MaybeUninit::slice_assume_init_mut(
+ RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0,
+ )
+ };
+ }
+
+ let buf = self.buf.ptr();
+ let cap = self.cap();
+ let len = self.len();
+
+ let free = self.tail - self.head;
+ let tail_len = cap - self.tail;
+
+ if free >= tail_len {
+ // there is enough free space to copy the tail in one go,
+ // this means that we first shift the head backwards, and then
+ // copy the tail to the correct position.
+ //
+ // from: DEFGH....ABC
+ // to: ABCDEFGH....
+ unsafe {
+ ptr::copy(buf, buf.add(tail_len), self.head);
+ // ...DEFGH.ABC
+ ptr::copy_nonoverlapping(buf.add(self.tail), buf, tail_len);
+ // ABCDEFGH....
+
+ self.tail = 0;
+ self.head = len;
+ }
+ } else if free > self.head {
+ // FIXME: We currently do not consider ....ABCDEFGH
+ // to be contiguous because `head` would be `0` in this
+ // case. While we probably want to change this it
+ // isn't trivial as a few places expect `is_contiguous`
+ // to mean that we can just slice using `buf[tail..head]`.
+
+ // there is enough free space to copy the head in one go,
+ // this means that we first shift the tail forwards, and then
+ // copy the head to the correct position.
+ //
+ // from: FGH....ABCDE
+ // to: ...ABCDEFGH.
+ unsafe {
+ ptr::copy(buf.add(self.tail), buf.add(self.head), tail_len);
+ // FGHABCDE....
+ ptr::copy_nonoverlapping(buf, buf.add(self.head + tail_len), self.head);
+ // ...ABCDEFGH.
+
+ self.tail = self.head;
+ self.head = self.wrap_add(self.tail, len);
+ }
+ } else {
+ // free is smaller than both head and tail,
+ // this means we have to slowly "swap" the tail and the head.
+ //
+ // from: EFGHI...ABCD or HIJK.ABCDEFG
+ // to: ABCDEFGHI... or ABCDEFGHIJK.
+ let mut left_edge: usize = 0;
+ let mut right_edge: usize = self.tail;
+ unsafe {
+ // The general problem looks like this
+ // GHIJKLM...ABCDEF - before any swaps
+ // ABCDEFM...GHIJKL - after 1 pass of swaps
+ // ABCDEFGHIJM...KL - swap until the left edge reaches the temp store
+ // - then restart the algorithm with a new (smaller) store
+ // Sometimes the temp store is reached when the right edge is at the end
+ // of the buffer - this means we've hit the right order with fewer swaps!
+ // E.g
+ // EF..ABCD
+ // ABCDEF.. - after four only swaps we've finished
+ while left_edge < len && right_edge != cap {
+ let mut right_offset = 0;
+ for i in left_edge..right_edge {
+ right_offset = (i - left_edge) % (cap - right_edge);
+ let src: isize = (right_edge + right_offset) as isize;
+ ptr::swap(buf.add(i), buf.offset(src));
+ }
+ let n_ops = right_edge - left_edge;
+ left_edge += n_ops;
+ right_edge += right_offset + 1;
+ }
+
+ self.tail = 0;
+ self.head = len;
+ }
+ }
+
+ let tail = self.tail;
+ let head = self.head;
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ MaybeUninit::slice_assume_init_mut(
+ RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0,
+ )
+ }
+ }
+
+ /// Rotates the double-ended queue `mid` places to the left.
+ ///
+ /// Equivalently,
+ /// - Rotates item `mid` into the first position.
+ /// - Pops the first `mid` items and pushes them to the end.
+ /// - Rotates `len() - mid` places to the right.
+ ///
+ /// # Panics
+ ///
+ /// If `mid` is greater than `len()`. Note that `mid == len()`
+ /// does _not_ panic and is a no-op rotation.
+ ///
+ /// # Complexity
+ ///
+ /// Takes `*O*(min(mid, len() - mid))` time and no extra space.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf: VecDeque<_> = (0..10).collect();
+ ///
+ /// buf.rotate_left(3);
+ /// assert_eq!(buf, [3, 4, 5, 6, 7, 8, 9, 0, 1, 2]);
+ ///
+ /// for i in 1..10 {
+ /// assert_eq!(i * 3 % 10, buf[0]);
+ /// buf.rotate_left(3);
+ /// }
+ /// assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+ /// ```
+ #[stable(feature = "vecdeque_rotate", since = "1.36.0")]
+ pub fn rotate_left(&mut self, mid: usize) {
+ assert!(mid <= self.len());
+ let k = self.len() - mid;
+ if mid <= k {
+ unsafe { self.rotate_left_inner(mid) }
+ } else {
+ unsafe { self.rotate_right_inner(k) }
+ }
+ }
+
+ /// Rotates the double-ended queue `k` places to the right.
+ ///
+ /// Equivalently,
+ /// - Rotates the first item into position `k`.
+ /// - Pops the last `k` items and pushes them to the front.
+ /// - Rotates `len() - k` places to the left.
+ ///
+ /// # Panics
+ ///
+ /// If `k` is greater than `len()`. Note that `k == len()`
+ /// does _not_ panic and is a no-op rotation.
+ ///
+ /// # Complexity
+ ///
+ /// Takes `*O*(min(k, len() - k))` time and no extra space.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf: VecDeque<_> = (0..10).collect();
+ ///
+ /// buf.rotate_right(3);
+ /// assert_eq!(buf, [7, 8, 9, 0, 1, 2, 3, 4, 5, 6]);
+ ///
+ /// for i in 1..10 {
+ /// assert_eq!(0, buf[i * 3 % 10]);
+ /// buf.rotate_right(3);
+ /// }
+ /// assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+ /// ```
+ #[stable(feature = "vecdeque_rotate", since = "1.36.0")]
+ pub fn rotate_right(&mut self, k: usize) {
+ assert!(k <= self.len());
+ let mid = self.len() - k;
+ if k <= mid {
+ unsafe { self.rotate_right_inner(k) }
+ } else {
+ unsafe { self.rotate_left_inner(mid) }
+ }
+ }
+
+ // SAFETY: the following two methods require that the rotation amount
+ // be less than half the length of the deque.
+ //
+ // `wrap_copy` requires that `min(x, cap() - x) + copy_len <= cap()`,
+ // but than `min` is never more than half the capacity, regardless of x,
+ // so it's sound to call here because we're calling with something
+ // less than half the length, which is never above half the capacity.
+
+ unsafe fn rotate_left_inner(&mut self, mid: usize) {
+ debug_assert!(mid * 2 <= self.len());
+ unsafe {
+ self.wrap_copy(self.head, self.tail, mid);
+ }
+ self.head = self.wrap_add(self.head, mid);
+ self.tail = self.wrap_add(self.tail, mid);
+ }
+
+ unsafe fn rotate_right_inner(&mut self, k: usize) {
+ debug_assert!(k * 2 <= self.len());
+ self.head = self.wrap_sub(self.head, k);
+ self.tail = self.wrap_sub(self.tail, k);
+ unsafe {
+ self.wrap_copy(self.tail, self.head, k);
+ }
+ }
+
+ /// Binary searches this `VecDeque` for a given element.
+ /// This behaves similarly to [`contains`] if this `VecDeque` is sorted.
+ ///
+ /// If the value is found then [`Result::Ok`] is returned, containing the
+ /// index of the matching element. If there are multiple matches, then any
+ /// one of the matches could be returned. If the value is not found then
+ /// [`Result::Err`] is returned, containing the index where a matching
+ /// element could be inserted while maintaining sorted order.
+ ///
+ /// See also [`binary_search_by`], [`binary_search_by_key`], and [`partition_point`].
+ ///
+ /// [`contains`]: VecDeque::contains
+ /// [`binary_search_by`]: VecDeque::binary_search_by
+ /// [`binary_search_by_key`]: VecDeque::binary_search_by_key
+ /// [`partition_point`]: VecDeque::partition_point
+ ///
+ /// # Examples
+ ///
+ /// Looks up a series of four elements. The first is found, with a
+ /// uniquely determined position; the second and third are not
+ /// found; the fourth could match any position in `[1, 4]`.
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<_> = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55].into();
+ ///
+ /// assert_eq!(deque.binary_search(&13), Ok(9));
+ /// assert_eq!(deque.binary_search(&4), Err(7));
+ /// assert_eq!(deque.binary_search(&100), Err(13));
+ /// let r = deque.binary_search(&1);
+ /// assert!(matches!(r, Ok(1..=4)));
+ /// ```
+ ///
+ /// If you want to insert an item to a sorted deque, while maintaining
+ /// sort order, consider using [`partition_point`]:
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque: VecDeque<_> = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55].into();
+ /// let num = 42;
+ /// let idx = deque.partition_point(|&x| x < num);
+ /// // The above is equivalent to `let idx = deque.binary_search(&num).unwrap_or_else(|x| x);`
+ /// deque.insert(idx, num);
+ /// assert_eq!(deque, &[0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
+ /// ```
+ #[stable(feature = "vecdeque_binary_search", since = "1.54.0")]
+ #[inline]
+ pub fn binary_search(&self, x: &T) -> Result<usize, usize>
+ where
+ T: Ord,
+ {
+ self.binary_search_by(|e| e.cmp(x))
+ }
+
+ /// Binary searches this `VecDeque` with a comparator function.
+ /// This behaves similarly to [`contains`] if this `VecDeque` is sorted.
+ ///
+ /// The comparator function should implement an order consistent
+ /// with the sort order of the deque, returning an order code that
+ /// indicates whether its argument is `Less`, `Equal` or `Greater`
+ /// than the desired target.
+ ///
+ /// If the value is found then [`Result::Ok`] is returned, containing the
+ /// index of the matching element. If there are multiple matches, then any
+ /// one of the matches could be returned. If the value is not found then
+ /// [`Result::Err`] is returned, containing the index where a matching
+ /// element could be inserted while maintaining sorted order.
+ ///
+ /// See also [`binary_search`], [`binary_search_by_key`], and [`partition_point`].
+ ///
+ /// [`contains`]: VecDeque::contains
+ /// [`binary_search`]: VecDeque::binary_search
+ /// [`binary_search_by_key`]: VecDeque::binary_search_by_key
+ /// [`partition_point`]: VecDeque::partition_point
+ ///
+ /// # Examples
+ ///
+ /// Looks up a series of four elements. The first is found, with a
+ /// uniquely determined position; the second and third are not
+ /// found; the fourth could match any position in `[1, 4]`.
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<_> = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55].into();
+ ///
+ /// assert_eq!(deque.binary_search_by(|x| x.cmp(&13)), Ok(9));
+ /// assert_eq!(deque.binary_search_by(|x| x.cmp(&4)), Err(7));
+ /// assert_eq!(deque.binary_search_by(|x| x.cmp(&100)), Err(13));
+ /// let r = deque.binary_search_by(|x| x.cmp(&1));
+ /// assert!(matches!(r, Ok(1..=4)));
+ /// ```
+ #[stable(feature = "vecdeque_binary_search", since = "1.54.0")]
+ pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
+ where
+ F: FnMut(&'a T) -> Ordering,
+ {
+ let (front, back) = self.as_slices();
+ let cmp_back = back.first().map(|elem| f(elem));
+
+ if let Some(Ordering::Equal) = cmp_back {
+ Ok(front.len())
+ } else if let Some(Ordering::Less) = cmp_back {
+ back.binary_search_by(f).map(|idx| idx + front.len()).map_err(|idx| idx + front.len())
+ } else {
+ front.binary_search_by(f)
+ }
+ }
+
+ /// Binary searches this `VecDeque` with a key extraction function.
+ /// This behaves similarly to [`contains`] if this `VecDeque` is sorted.
+ ///
+ /// Assumes that the deque is sorted by the key, for instance with
+ /// [`make_contiguous().sort_by_key()`] using the same key extraction function.
+ ///
+ /// If the value is found then [`Result::Ok`] is returned, containing the
+ /// index of the matching element. If there are multiple matches, then any
+ /// one of the matches could be returned. If the value is not found then
+ /// [`Result::Err`] is returned, containing the index where a matching
+ /// element could be inserted while maintaining sorted order.
+ ///
+ /// See also [`binary_search`], [`binary_search_by`], and [`partition_point`].
+ ///
+ /// [`contains`]: VecDeque::contains
+ /// [`make_contiguous().sort_by_key()`]: VecDeque::make_contiguous
+ /// [`binary_search`]: VecDeque::binary_search
+ /// [`binary_search_by`]: VecDeque::binary_search_by
+ /// [`partition_point`]: VecDeque::partition_point
+ ///
+ /// # Examples
+ ///
+ /// Looks up a series of four elements in a slice of pairs sorted by
+ /// their second elements. The first is found, with a uniquely
+ /// determined position; the second and third are not found; the
+ /// fourth could match any position in `[1, 4]`.
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<_> = [(0, 0), (2, 1), (4, 1), (5, 1),
+ /// (3, 1), (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
+ /// (1, 21), (2, 34), (4, 55)].into();
+ ///
+ /// assert_eq!(deque.binary_search_by_key(&13, |&(a, b)| b), Ok(9));
+ /// assert_eq!(deque.binary_search_by_key(&4, |&(a, b)| b), Err(7));
+ /// assert_eq!(deque.binary_search_by_key(&100, |&(a, b)| b), Err(13));
+ /// let r = deque.binary_search_by_key(&1, |&(a, b)| b);
+ /// assert!(matches!(r, Ok(1..=4)));
+ /// ```
+ #[stable(feature = "vecdeque_binary_search", since = "1.54.0")]
+ #[inline]
+ pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
+ where
+ F: FnMut(&'a T) -> B,
+ B: Ord,
+ {
+ self.binary_search_by(|k| f(k).cmp(b))
+ }
+
+ /// Returns the index of the partition point according to the given predicate
+ /// (the index of the first element of the second partition).
+ ///
+ /// The deque is assumed to be partitioned according to the given predicate.
+ /// This means that all elements for which the predicate returns true are at the start of the deque
+ /// and all elements for which the predicate returns false are at the end.
+ /// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
+ /// (all odd numbers are at the start, all even at the end).
+ ///
+ /// If the deque is not partitioned, the returned result is unspecified and meaningless,
+ /// as this method performs a kind of binary search.
+ ///
+ /// See also [`binary_search`], [`binary_search_by`], and [`binary_search_by_key`].
+ ///
+ /// [`binary_search`]: VecDeque::binary_search
+ /// [`binary_search_by`]: VecDeque::binary_search_by
+ /// [`binary_search_by_key`]: VecDeque::binary_search_by_key
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<_> = [1, 2, 3, 3, 5, 6, 7].into();
+ /// let i = deque.partition_point(|&x| x < 5);
+ ///
+ /// assert_eq!(i, 4);
+ /// assert!(deque.iter().take(i).all(|&x| x < 5));
+ /// assert!(deque.iter().skip(i).all(|&x| !(x < 5)));
+ /// ```
+ ///
+ /// If you want to insert an item to a sorted deque, while maintaining
+ /// sort order:
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque: VecDeque<_> = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55].into();
+ /// let num = 42;
+ /// let idx = deque.partition_point(|&x| x < num);
+ /// deque.insert(idx, num);
+ /// assert_eq!(deque, &[0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
+ /// ```
+ #[stable(feature = "vecdeque_binary_search", since = "1.54.0")]
+ pub fn partition_point<P>(&self, mut pred: P) -> usize
+ where
+ P: FnMut(&T) -> bool,
+ {
+ let (front, back) = self.as_slices();
+
+ if let Some(true) = back.first().map(|v| pred(v)) {
+ back.partition_point(pred) + front.len()
+ } else {
+ front.partition_point(pred)
+ }
+ }
+}
+
+impl<T: Clone, A: Allocator> VecDeque<T, A> {
+ /// Modifies the deque in-place so that `len()` is equal to new_len,
+ /// either by removing excess elements from the back or by appending clones of `value`
+ /// to the back.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(5);
+ /// buf.push_back(10);
+ /// buf.push_back(15);
+ /// assert_eq!(buf, [5, 10, 15]);
+ ///
+ /// buf.resize(2, 0);
+ /// assert_eq!(buf, [5, 10]);
+ ///
+ /// buf.resize(5, 20);
+ /// assert_eq!(buf, [5, 10, 20, 20, 20]);
+ /// ```
+ #[stable(feature = "deque_extras", since = "1.16.0")]
+ pub fn resize(&mut self, new_len: usize, value: T) {
+ self.resize_with(new_len, || value.clone());
+ }
+}
+
+/// Returns the index in the underlying buffer for a given logical element index.
+#[inline]
+fn wrap_index(index: usize, size: usize) -> usize {
+ // size is always a power of 2
+ debug_assert!(size.is_power_of_two());
+ index & (size - 1)
+}
+
+/// Calculate the number of elements left to be read in the buffer
+#[inline]
+fn count(tail: usize, head: usize, size: usize) -> usize {
+ // size is always a power of 2
+ (head.wrapping_sub(tail)) & (size - 1)
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialEq, A: Allocator> PartialEq for VecDeque<T, A> {
+ fn eq(&self, other: &Self) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+ let (sa, sb) = self.as_slices();
+ let (oa, ob) = other.as_slices();
+ if sa.len() == oa.len() {
+ sa == oa && sb == ob
+ } else if sa.len() < oa.len() {
+ // Always divisible in three sections, for example:
+ // self: [a b c|d e f]
+ // other: [0 1 2 3|4 5]
+ // front = 3, mid = 1,
+ // [a b c] == [0 1 2] && [d] == [3] && [e f] == [4 5]
+ let front = sa.len();
+ let mid = oa.len() - front;
+
+ let (oa_front, oa_mid) = oa.split_at(front);
+ let (sb_mid, sb_back) = sb.split_at(mid);
+ debug_assert_eq!(sa.len(), oa_front.len());
+ debug_assert_eq!(sb_mid.len(), oa_mid.len());
+ debug_assert_eq!(sb_back.len(), ob.len());
+ sa == oa_front && sb_mid == oa_mid && sb_back == ob
+ } else {
+ let front = oa.len();
+ let mid = sa.len() - front;
+
+ let (sa_front, sa_mid) = sa.split_at(front);
+ let (ob_mid, ob_back) = ob.split_at(mid);
+ debug_assert_eq!(sa_front.len(), oa.len());
+ debug_assert_eq!(sa_mid.len(), ob_mid.len());
+ debug_assert_eq!(sb.len(), ob_back.len());
+ sa_front == oa && sa_mid == ob_mid && sb == ob_back
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq, A: Allocator> Eq for VecDeque<T, A> {}
+
+__impl_slice_eq1! { [] VecDeque<T, A>, Vec<U, A>, }
+__impl_slice_eq1! { [] VecDeque<T, A>, &[U], }
+__impl_slice_eq1! { [] VecDeque<T, A>, &mut [U], }
+__impl_slice_eq1! { [const N: usize] VecDeque<T, A>, [U; N], }
+__impl_slice_eq1! { [const N: usize] VecDeque<T, A>, &[U; N], }
+__impl_slice_eq1! { [const N: usize] VecDeque<T, A>, &mut [U; N], }
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd, A: Allocator> PartialOrd for VecDeque<T, A> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.iter().partial_cmp(other.iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord, A: Allocator> Ord for VecDeque<T, A> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.iter().cmp(other.iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Hash, A: Allocator> Hash for VecDeque<T, A> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_length_prefix(self.len());
+ // It's not possible to use Hash::hash_slice on slices
+ // returned by as_slices method as their length can vary
+ // in otherwise identical deques.
+ //
+ // Hasher only guarantees equivalence for the exact same
+ // set of calls to its methods.
+ self.iter().for_each(|elem| elem.hash(state));
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Index<usize> for VecDeque<T, A> {
+ type Output = T;
+
+ #[inline]
+ fn index(&self, index: usize) -> &T {
+ self.get(index).expect("Out of bounds access")
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> IndexMut<usize> for VecDeque<T, A> {
+ #[inline]
+ fn index_mut(&mut self, index: usize) -> &mut T {
+ self.get_mut(index).expect("Out of bounds access")
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> FromIterator<T> for VecDeque<T> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> VecDeque<T> {
+ let iterator = iter.into_iter();
+ let (lower, _) = iterator.size_hint();
+ let mut deq = VecDeque::with_capacity(lower);
+ deq.extend(iterator);
+ deq
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> IntoIterator for VecDeque<T, A> {
+ type Item = T;
+ type IntoIter = IntoIter<T, A>;
+
+ /// Consumes the deque into a front-to-back iterator yielding elements by
+ /// value.
+ fn into_iter(self) -> IntoIter<T, A> {
+ IntoIter::new(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, A: Allocator> IntoIterator for &'a VecDeque<T, A> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, A: Allocator> IntoIterator for &'a mut VecDeque<T, A> {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Extend<T> for VecDeque<T, A> {
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ <Self as SpecExtend<T, I::IntoIter>>::spec_extend(self, iter.into_iter());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, elem: T) {
+ self.push_back(elem);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: 'a + Copy, A: Allocator> Extend<&'a T> for VecDeque<T, A> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.spec_extend(iter.into_iter());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &elem: &T) {
+ self.push_back(elem);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for VecDeque<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self).finish()
+ }
+}
+
+#[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")]
+impl<T, A: Allocator> From<Vec<T, A>> for VecDeque<T, A> {
+ /// Turn a [`Vec<T>`] into a [`VecDeque<T>`].
+ ///
+ /// [`Vec<T>`]: crate::vec::Vec
+ /// [`VecDeque<T>`]: crate::collections::VecDeque
+ ///
+ /// This avoids reallocating where possible, but the conditions for that are
+ /// strict, and subject to change, and so shouldn't be relied upon unless the
+ /// `Vec<T>` came from `From<VecDeque<T>>` and hasn't been reallocated.
+ fn from(mut other: Vec<T, A>) -> Self {
+ let len = other.len();
+ if mem::size_of::<T>() == 0 {
+ // There's no actual allocation for ZSTs to worry about capacity,
+ // but `VecDeque` can't handle as much length as `Vec`.
+ assert!(len < MAXIMUM_ZST_CAPACITY, "capacity overflow");
+ } else {
+ // We need to resize if the capacity is not a power of two, too small or
+ // doesn't have at least one free space. We do this while it's still in
+ // the `Vec` so the items will drop on panic.
+ let min_cap = cmp::max(MINIMUM_CAPACITY, len) + 1;
+ let cap = cmp::max(min_cap, other.capacity()).next_power_of_two();
+ if other.capacity() != cap {
+ other.reserve_exact(cap - len);
+ }
+ }
+
+ unsafe {
+ let (other_buf, len, capacity, alloc) = other.into_raw_parts_with_alloc();
+ let buf = RawVec::from_raw_parts_in(other_buf, capacity, alloc);
+ VecDeque { tail: 0, head: len, buf }
+ }
+ }
+}
+
+#[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")]
+impl<T, A: Allocator> From<VecDeque<T, A>> for Vec<T, A> {
+ /// Turn a [`VecDeque<T>`] into a [`Vec<T>`].
+ ///
+ /// [`Vec<T>`]: crate::vec::Vec
+ /// [`VecDeque<T>`]: crate::collections::VecDeque
+ ///
+ /// This never needs to re-allocate, but does need to do *O*(*n*) data movement if
+ /// the circular buffer doesn't happen to be at the beginning of the allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// // This one is *O*(1).
+ /// let deque: VecDeque<_> = (1..5).collect();
+ /// let ptr = deque.as_slices().0.as_ptr();
+ /// let vec = Vec::from(deque);
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// assert_eq!(vec.as_ptr(), ptr);
+ ///
+ /// // This one needs data rearranging.
+ /// let mut deque: VecDeque<_> = (1..5).collect();
+ /// deque.push_front(9);
+ /// deque.push_front(8);
+ /// let ptr = deque.as_slices().1.as_ptr();
+ /// let vec = Vec::from(deque);
+ /// assert_eq!(vec, [8, 9, 1, 2, 3, 4]);
+ /// assert_eq!(vec.as_ptr(), ptr);
+ /// ```
+ fn from(mut other: VecDeque<T, A>) -> Self {
+ other.make_contiguous();
+
+ unsafe {
+ let other = ManuallyDrop::new(other);
+ let buf = other.buf.ptr();
+ let len = other.len();
+ let cap = other.cap();
+ let alloc = ptr::read(other.allocator());
+
+ if other.tail != 0 {
+ ptr::copy(buf.add(other.tail), buf, len);
+ }
+ Vec::from_raw_parts_in(buf, len, cap, alloc)
+ }
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+impl<T, const N: usize> From<[T; N]> for VecDeque<T> {
+ /// Converts a `[T; N]` into a `VecDeque<T>`.
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deq1 = VecDeque::from([1, 2, 3, 4]);
+ /// let deq2: VecDeque<_> = [1, 2, 3, 4].into();
+ /// assert_eq!(deq1, deq2);
+ /// ```
+ fn from(arr: [T; N]) -> Self {
+ let mut deq = VecDeque::with_capacity(N);
+ let arr = ManuallyDrop::new(arr);
+ if mem::size_of::<T>() != 0 {
+ // SAFETY: VecDeque::with_capacity ensures that there is enough capacity.
+ unsafe {
+ ptr::copy_nonoverlapping(arr.as_ptr(), deq.ptr(), N);
+ }
+ }
+ deq.tail = 0;
+ deq.head = N;
+ deq
+ }
+}
diff --git a/library/alloc/src/collections/vec_deque/pair_slices.rs b/library/alloc/src/collections/vec_deque/pair_slices.rs
new file mode 100644
index 000000000..6735424a3
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/pair_slices.rs
@@ -0,0 +1,67 @@
+use core::cmp::{self};
+use core::mem::replace;
+
+use crate::alloc::Allocator;
+
+use super::VecDeque;
+
+/// PairSlices pairs up equal length slice parts of two deques
+///
+/// For example, given deques "A" and "B" with the following division into slices:
+///
+/// A: [0 1 2] [3 4 5]
+/// B: [a b] [c d e]
+///
+/// It produces the following sequence of matching slices:
+///
+/// ([0 1], [a b])
+/// (\[2\], \[c\])
+/// ([3 4], [d e])
+///
+/// and the uneven remainder of either A or B is skipped.
+pub struct PairSlices<'a, 'b, T> {
+ a0: &'a mut [T],
+ a1: &'a mut [T],
+ b0: &'b [T],
+ b1: &'b [T],
+}
+
+impl<'a, 'b, T> PairSlices<'a, 'b, T> {
+ pub fn from<A: Allocator>(to: &'a mut VecDeque<T, A>, from: &'b VecDeque<T, A>) -> Self {
+ let (a0, a1) = to.as_mut_slices();
+ let (b0, b1) = from.as_slices();
+ PairSlices { a0, a1, b0, b1 }
+ }
+
+ pub fn has_remainder(&self) -> bool {
+ !self.b0.is_empty()
+ }
+
+ pub fn remainder(self) -> impl Iterator<Item = &'b [T]> {
+ IntoIterator::into_iter([self.b0, self.b1])
+ }
+}
+
+impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> {
+ type Item = (&'a mut [T], &'b [T]);
+ fn next(&mut self) -> Option<Self::Item> {
+ // Get next part length
+ let part = cmp::min(self.a0.len(), self.b0.len());
+ if part == 0 {
+ return None;
+ }
+ let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part);
+ let (q0, q1) = self.b0.split_at(part);
+
+ // Move a1 into a0, if it's empty (and b1, b0 the same way).
+ self.a0 = p1;
+ self.b0 = q1;
+ if self.a0.is_empty() {
+ self.a0 = replace(&mut self.a1, &mut []);
+ }
+ if self.b0.is_empty() {
+ self.b0 = replace(&mut self.b1, &[]);
+ }
+ Some((p0, q0))
+ }
+}
diff --git a/library/alloc/src/collections/vec_deque/ring_slices.rs b/library/alloc/src/collections/vec_deque/ring_slices.rs
new file mode 100644
index 000000000..dd0fa7d60
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/ring_slices.rs
@@ -0,0 +1,56 @@
+use core::ptr::{self};
+
+/// Returns the two slices that cover the `VecDeque`'s valid range
+pub trait RingSlices: Sized {
+ fn slice(self, from: usize, to: usize) -> Self;
+ fn split_at(self, i: usize) -> (Self, Self);
+
+ fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
+ let contiguous = tail <= head;
+ if contiguous {
+ let (empty, buf) = buf.split_at(0);
+ (buf.slice(tail, head), empty)
+ } else {
+ let (mid, right) = buf.split_at(tail);
+ let (left, _) = mid.split_at(head);
+ (right, left)
+ }
+ }
+}
+
+impl<T> RingSlices for &[T] {
+ fn slice(self, from: usize, to: usize) -> Self {
+ &self[from..to]
+ }
+ fn split_at(self, i: usize) -> (Self, Self) {
+ (*self).split_at(i)
+ }
+}
+
+impl<T> RingSlices for &mut [T] {
+ fn slice(self, from: usize, to: usize) -> Self {
+ &mut self[from..to]
+ }
+ fn split_at(self, i: usize) -> (Self, Self) {
+ (*self).split_at_mut(i)
+ }
+}
+
+impl<T> RingSlices for *mut [T] {
+ fn slice(self, from: usize, to: usize) -> Self {
+ assert!(from <= to && to < self.len());
+ // Not using `get_unchecked_mut` to keep this a safe operation.
+ let len = to - from;
+ ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len)
+ }
+
+ fn split_at(self, mid: usize) -> (Self, Self) {
+ let len = self.len();
+ let ptr = self.as_mut_ptr();
+ assert!(mid <= len);
+ (
+ ptr::slice_from_raw_parts_mut(ptr, mid),
+ ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid),
+ )
+ }
+}
diff --git a/library/alloc/src/collections/vec_deque/spec_extend.rs b/library/alloc/src/collections/vec_deque/spec_extend.rs
new file mode 100644
index 000000000..97ff8b765
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/spec_extend.rs
@@ -0,0 +1,132 @@
+use crate::alloc::Allocator;
+use crate::vec;
+use core::iter::{ByRefSized, TrustedLen};
+use core::slice;
+
+use super::VecDeque;
+
+// Specialization trait used for VecDeque::extend
+pub(super) trait SpecExtend<T, I> {
+ fn spec_extend(&mut self, iter: I);
+}
+
+impl<T, I, A: Allocator> SpecExtend<T, I> for VecDeque<T, A>
+where
+ I: Iterator<Item = T>,
+{
+ default fn spec_extend(&mut self, mut iter: I) {
+ // This function should be the moral equivalent of:
+ //
+ // for item in iter {
+ // self.push_back(item);
+ // }
+ while let Some(element) = iter.next() {
+ if self.len() == self.capacity() {
+ let (lower, _) = iter.size_hint();
+ self.reserve(lower.saturating_add(1));
+ }
+
+ let head = self.head;
+ self.head = self.wrap_add(self.head, 1);
+ unsafe {
+ self.buffer_write(head, element);
+ }
+ }
+ }
+}
+
+impl<T, I, A: Allocator> SpecExtend<T, I> for VecDeque<T, A>
+where
+ I: TrustedLen<Item = T>,
+{
+ default fn spec_extend(&mut self, mut iter: I) {
+ // This is the case for a TrustedLen iterator.
+ let (low, high) = iter.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.reserve(additional);
+
+ struct WrapAddOnDrop<'a, T, A: Allocator> {
+ vec_deque: &'a mut VecDeque<T, A>,
+ written: usize,
+ }
+
+ impl<'a, T, A: Allocator> Drop for WrapAddOnDrop<'a, T, A> {
+ fn drop(&mut self) {
+ self.vec_deque.head =
+ self.vec_deque.wrap_add(self.vec_deque.head, self.written);
+ }
+ }
+
+ let mut wrapper = WrapAddOnDrop { vec_deque: self, written: 0 };
+
+ let head_room = wrapper.vec_deque.cap() - wrapper.vec_deque.head;
+ unsafe {
+ wrapper.vec_deque.write_iter(
+ wrapper.vec_deque.head,
+ ByRefSized(&mut iter).take(head_room),
+ &mut wrapper.written,
+ );
+
+ if additional > head_room {
+ wrapper.vec_deque.write_iter(0, iter, &mut wrapper.written);
+ }
+ }
+
+ debug_assert_eq!(
+ additional, wrapper.written,
+ "The number of items written to VecDeque doesn't match the TrustedLen size hint"
+ );
+ } else {
+ // Per TrustedLen contract a `None` upper bound means that the iterator length
+ // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
+ // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
+ // This avoids additional codegen for a fallback code path which would eventually
+ // panic anyway.
+ panic!("capacity overflow");
+ }
+ }
+}
+
+impl<T, A: Allocator> SpecExtend<T, vec::IntoIter<T>> for VecDeque<T, A> {
+ fn spec_extend(&mut self, mut iterator: vec::IntoIter<T>) {
+ let slice = iterator.as_slice();
+ self.reserve(slice.len());
+
+ unsafe {
+ self.copy_slice(self.head, slice);
+ self.head = self.wrap_add(self.head, slice.len());
+ }
+ iterator.forget_remaining_elements();
+ }
+}
+
+impl<'a, T: 'a, I, A: Allocator> SpecExtend<&'a T, I> for VecDeque<T, A>
+where
+ I: Iterator<Item = &'a T>,
+ T: Copy,
+{
+ default fn spec_extend(&mut self, iterator: I) {
+ self.spec_extend(iterator.copied())
+ }
+}
+
+impl<'a, T: 'a, A: Allocator> SpecExtend<&'a T, slice::Iter<'a, T>> for VecDeque<T, A>
+where
+ T: Copy,
+{
+ fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
+ let slice = iterator.as_slice();
+ self.reserve(slice.len());
+
+ unsafe {
+ self.copy_slice(self.head, slice);
+ self.head = self.wrap_add(self.head, slice.len());
+ }
+ }
+}
diff --git a/library/alloc/src/collections/vec_deque/tests.rs b/library/alloc/src/collections/vec_deque/tests.rs
new file mode 100644
index 000000000..1f2daef21
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/tests.rs
@@ -0,0 +1,1110 @@
+use core::iter::TrustedLen;
+
+use super::*;
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_push_back_100(b: &mut test::Bencher) {
+ let mut deq = VecDeque::with_capacity(101);
+ b.iter(|| {
+ for i in 0..100 {
+ deq.push_back(i);
+ }
+ deq.head = 0;
+ deq.tail = 0;
+ })
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_push_front_100(b: &mut test::Bencher) {
+ let mut deq = VecDeque::with_capacity(101);
+ b.iter(|| {
+ for i in 0..100 {
+ deq.push_front(i);
+ }
+ deq.head = 0;
+ deq.tail = 0;
+ })
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_pop_back_100(b: &mut test::Bencher) {
+ let mut deq = VecDeque::<i32>::with_capacity(101);
+
+ b.iter(|| {
+ deq.head = 100;
+ deq.tail = 0;
+ while !deq.is_empty() {
+ test::black_box(deq.pop_back());
+ }
+ })
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_retain_whole_10000(b: &mut test::Bencher) {
+ let v = (1..100000).collect::<VecDeque<u32>>();
+
+ b.iter(|| {
+ let mut v = v.clone();
+ v.retain(|x| *x > 0)
+ })
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_retain_odd_10000(b: &mut test::Bencher) {
+ let v = (1..100000).collect::<VecDeque<u32>>();
+
+ b.iter(|| {
+ let mut v = v.clone();
+ v.retain(|x| x & 1 == 0)
+ })
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_retain_half_10000(b: &mut test::Bencher) {
+ let v = (1..100000).collect::<VecDeque<u32>>();
+
+ b.iter(|| {
+ let mut v = v.clone();
+ v.retain(|x| *x > 50000)
+ })
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_pop_front_100(b: &mut test::Bencher) {
+ let mut deq = VecDeque::<i32>::with_capacity(101);
+
+ b.iter(|| {
+ deq.head = 100;
+ deq.tail = 0;
+ while !deq.is_empty() {
+ test::black_box(deq.pop_front());
+ }
+ })
+}
+
+#[test]
+fn test_swap_front_back_remove() {
+ fn test(back: bool) {
+ // This test checks that every single combination of tail position and length is tested.
+ // Capacity 15 should be large enough to cover every case.
+ let mut tester = VecDeque::with_capacity(15);
+ let usable_cap = tester.capacity();
+ let final_len = usable_cap / 2;
+
+ for len in 0..final_len {
+ let expected: VecDeque<_> =
+ if back { (0..len).collect() } else { (0..len).rev().collect() };
+ for tail_pos in 0..usable_cap {
+ tester.tail = tail_pos;
+ tester.head = tail_pos;
+ if back {
+ for i in 0..len * 2 {
+ tester.push_front(i);
+ }
+ for i in 0..len {
+ assert_eq!(tester.swap_remove_back(i), Some(len * 2 - 1 - i));
+ }
+ } else {
+ for i in 0..len * 2 {
+ tester.push_back(i);
+ }
+ for i in 0..len {
+ let idx = tester.len() - 1 - i;
+ assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i));
+ }
+ }
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+ assert_eq!(tester, expected);
+ }
+ }
+ }
+ test(true);
+ test(false);
+}
+
+#[test]
+fn test_insert() {
+ // This test checks that every single combination of tail position, length, and
+ // insertion position is tested. Capacity 15 should be large enough to cover every case.
+
+ let mut tester = VecDeque::with_capacity(15);
+ // can't guarantee we got 15, so have to get what we got.
+ // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
+ // this test isn't covering what it wants to
+ let cap = tester.capacity();
+
+ // len is the length *after* insertion
+ let minlen = if cfg!(miri) { cap - 1 } else { 1 }; // Miri is too slow
+ for len in minlen..cap {
+ // 0, 1, 2, .., len - 1
+ let expected = (0..).take(len).collect::<VecDeque<_>>();
+ for tail_pos in 0..cap {
+ for to_insert in 0..len {
+ tester.tail = tail_pos;
+ tester.head = tail_pos;
+ for i in 0..len {
+ if i != to_insert {
+ tester.push_back(i);
+ }
+ }
+ tester.insert(to_insert, to_insert);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+ assert_eq!(tester, expected);
+ }
+ }
+ }
+}
+
+#[test]
+fn test_get() {
+ let mut tester = VecDeque::new();
+ tester.push_back(1);
+ tester.push_back(2);
+ tester.push_back(3);
+
+ assert_eq!(tester.len(), 3);
+
+ assert_eq!(tester.get(1), Some(&2));
+ assert_eq!(tester.get(2), Some(&3));
+ assert_eq!(tester.get(0), Some(&1));
+ assert_eq!(tester.get(3), None);
+
+ tester.remove(0);
+
+ assert_eq!(tester.len(), 2);
+ assert_eq!(tester.get(0), Some(&2));
+ assert_eq!(tester.get(1), Some(&3));
+ assert_eq!(tester.get(2), None);
+}
+
+#[test]
+fn test_get_mut() {
+ let mut tester = VecDeque::new();
+ tester.push_back(1);
+ tester.push_back(2);
+ tester.push_back(3);
+
+ assert_eq!(tester.len(), 3);
+
+ if let Some(elem) = tester.get_mut(0) {
+ assert_eq!(*elem, 1);
+ *elem = 10;
+ }
+
+ if let Some(elem) = tester.get_mut(2) {
+ assert_eq!(*elem, 3);
+ *elem = 30;
+ }
+
+ assert_eq!(tester.get(0), Some(&10));
+ assert_eq!(tester.get(2), Some(&30));
+ assert_eq!(tester.get_mut(3), None);
+
+ tester.remove(2);
+
+ assert_eq!(tester.len(), 2);
+ assert_eq!(tester.get(0), Some(&10));
+ assert_eq!(tester.get(1), Some(&2));
+ assert_eq!(tester.get(2), None);
+}
+
+#[test]
+fn test_swap() {
+ let mut tester = VecDeque::new();
+ tester.push_back(1);
+ tester.push_back(2);
+ tester.push_back(3);
+
+ assert_eq!(tester, [1, 2, 3]);
+
+ tester.swap(0, 0);
+ assert_eq!(tester, [1, 2, 3]);
+ tester.swap(0, 1);
+ assert_eq!(tester, [2, 1, 3]);
+ tester.swap(2, 1);
+ assert_eq!(tester, [2, 3, 1]);
+ tester.swap(1, 2);
+ assert_eq!(tester, [2, 1, 3]);
+ tester.swap(0, 2);
+ assert_eq!(tester, [3, 1, 2]);
+ tester.swap(2, 2);
+ assert_eq!(tester, [3, 1, 2]);
+}
+
+#[test]
+#[should_panic = "assertion failed: j < self.len()"]
+fn test_swap_panic() {
+ let mut tester = VecDeque::new();
+ tester.push_back(1);
+ tester.push_back(2);
+ tester.push_back(3);
+ tester.swap(2, 3);
+}
+
+#[test]
+fn test_reserve_exact() {
+ let mut tester: VecDeque<i32> = VecDeque::with_capacity(1);
+ assert!(tester.capacity() == 1);
+ tester.reserve_exact(50);
+ assert!(tester.capacity() >= 51);
+ tester.reserve_exact(40);
+ assert!(tester.capacity() >= 51);
+ tester.reserve_exact(200);
+ assert!(tester.capacity() >= 200);
+}
+
+#[test]
+#[should_panic = "capacity overflow"]
+fn test_reserve_exact_panic() {
+ let mut tester: VecDeque<i32> = VecDeque::new();
+ tester.reserve_exact(usize::MAX);
+}
+
+#[test]
+fn test_try_reserve_exact() {
+ let mut tester: VecDeque<i32> = VecDeque::with_capacity(1);
+ assert!(tester.capacity() == 1);
+ assert_eq!(tester.try_reserve_exact(100), Ok(()));
+ assert!(tester.capacity() >= 100);
+ assert_eq!(tester.try_reserve_exact(50), Ok(()));
+ assert!(tester.capacity() >= 100);
+ assert_eq!(tester.try_reserve_exact(200), Ok(()));
+ assert!(tester.capacity() >= 200);
+ assert_eq!(tester.try_reserve_exact(0), Ok(()));
+ assert!(tester.capacity() >= 200);
+ assert!(tester.try_reserve_exact(usize::MAX).is_err());
+}
+
+#[test]
+fn test_try_reserve() {
+ let mut tester: VecDeque<i32> = VecDeque::with_capacity(1);
+ assert!(tester.capacity() == 1);
+ assert_eq!(tester.try_reserve(100), Ok(()));
+ assert!(tester.capacity() >= 100);
+ assert_eq!(tester.try_reserve(50), Ok(()));
+ assert!(tester.capacity() >= 100);
+ assert_eq!(tester.try_reserve(200), Ok(()));
+ assert!(tester.capacity() >= 200);
+ assert_eq!(tester.try_reserve(0), Ok(()));
+ assert!(tester.capacity() >= 200);
+ assert!(tester.try_reserve(usize::MAX).is_err());
+}
+
+#[test]
+fn test_contains() {
+ let mut tester = VecDeque::new();
+ tester.push_back(1);
+ tester.push_back(2);
+ tester.push_back(3);
+
+ assert!(tester.contains(&1));
+ assert!(tester.contains(&3));
+ assert!(!tester.contains(&0));
+ assert!(!tester.contains(&4));
+ tester.remove(0);
+ assert!(!tester.contains(&1));
+ assert!(tester.contains(&2));
+ assert!(tester.contains(&3));
+}
+
+#[test]
+fn test_rotate_left_right() {
+ let mut tester: VecDeque<_> = (1..=10).collect();
+
+ assert_eq!(tester.len(), 10);
+
+ tester.rotate_left(0);
+ assert_eq!(tester, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+
+ tester.rotate_right(0);
+ assert_eq!(tester, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+
+ tester.rotate_left(3);
+ assert_eq!(tester, [4, 5, 6, 7, 8, 9, 10, 1, 2, 3]);
+
+ tester.rotate_right(5);
+ assert_eq!(tester, [9, 10, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+ tester.rotate_left(tester.len());
+ assert_eq!(tester, [9, 10, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+ tester.rotate_right(tester.len());
+ assert_eq!(tester, [9, 10, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+ tester.rotate_left(1);
+ assert_eq!(tester, [10, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+}
+
+#[test]
+#[should_panic = "assertion failed: mid <= self.len()"]
+fn test_rotate_left_panic() {
+ let mut tester: VecDeque<_> = (1..=10).collect();
+ tester.rotate_left(tester.len() + 1);
+}
+
+#[test]
+#[should_panic = "assertion failed: k <= self.len()"]
+fn test_rotate_right_panic() {
+ let mut tester: VecDeque<_> = (1..=10).collect();
+ tester.rotate_right(tester.len() + 1);
+}
+
+#[test]
+fn test_binary_search() {
+ // If the givin VecDeque is not sorted, the returned result is unspecified and meaningless,
+ // as this method performs a binary search.
+
+ let tester: VecDeque<_> = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55].into();
+
+ assert_eq!(tester.binary_search(&0), Ok(0));
+ assert_eq!(tester.binary_search(&5), Ok(5));
+ assert_eq!(tester.binary_search(&55), Ok(10));
+ assert_eq!(tester.binary_search(&4), Err(5));
+ assert_eq!(tester.binary_search(&-1), Err(0));
+ assert!(matches!(tester.binary_search(&1), Ok(1..=2)));
+
+ let tester: VecDeque<_> = [1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3].into();
+ assert_eq!(tester.binary_search(&1), Ok(0));
+ assert!(matches!(tester.binary_search(&2), Ok(1..=4)));
+ assert!(matches!(tester.binary_search(&3), Ok(5..=13)));
+ assert_eq!(tester.binary_search(&-2), Err(0));
+ assert_eq!(tester.binary_search(&0), Err(0));
+ assert_eq!(tester.binary_search(&4), Err(14));
+ assert_eq!(tester.binary_search(&5), Err(14));
+}
+
+#[test]
+fn test_binary_search_by() {
+ // If the givin VecDeque is not sorted, the returned result is unspecified and meaningless,
+ // as this method performs a binary search.
+
+ let tester: VecDeque<_> = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55].into();
+
+ assert_eq!(tester.binary_search_by(|x| x.cmp(&0)), Ok(0));
+ assert_eq!(tester.binary_search_by(|x| x.cmp(&5)), Ok(5));
+ assert_eq!(tester.binary_search_by(|x| x.cmp(&55)), Ok(10));
+ assert_eq!(tester.binary_search_by(|x| x.cmp(&4)), Err(5));
+ assert_eq!(tester.binary_search_by(|x| x.cmp(&-1)), Err(0));
+ assert!(matches!(tester.binary_search_by(|x| x.cmp(&1)), Ok(1..=2)));
+}
+
+#[test]
+fn test_binary_search_key() {
+ // If the givin VecDeque is not sorted, the returned result is unspecified and meaningless,
+ // as this method performs a binary search.
+
+ let tester: VecDeque<_> = [
+ (-1, 0),
+ (2, 10),
+ (6, 5),
+ (7, 1),
+ (8, 10),
+ (10, 2),
+ (20, 3),
+ (24, 5),
+ (25, 18),
+ (28, 13),
+ (31, 21),
+ (32, 4),
+ (54, 25),
+ ]
+ .into();
+
+ assert_eq!(tester.binary_search_by_key(&-1, |&(a, _b)| a), Ok(0));
+ assert_eq!(tester.binary_search_by_key(&8, |&(a, _b)| a), Ok(4));
+ assert_eq!(tester.binary_search_by_key(&25, |&(a, _b)| a), Ok(8));
+ assert_eq!(tester.binary_search_by_key(&54, |&(a, _b)| a), Ok(12));
+ assert_eq!(tester.binary_search_by_key(&-2, |&(a, _b)| a), Err(0));
+ assert_eq!(tester.binary_search_by_key(&1, |&(a, _b)| a), Err(1));
+ assert_eq!(tester.binary_search_by_key(&4, |&(a, _b)| a), Err(2));
+ assert_eq!(tester.binary_search_by_key(&13, |&(a, _b)| a), Err(6));
+ assert_eq!(tester.binary_search_by_key(&55, |&(a, _b)| a), Err(13));
+ assert_eq!(tester.binary_search_by_key(&100, |&(a, _b)| a), Err(13));
+
+ let tester: VecDeque<_> = [
+ (0, 0),
+ (2, 1),
+ (6, 1),
+ (5, 1),
+ (3, 1),
+ (1, 2),
+ (2, 3),
+ (4, 5),
+ (5, 8),
+ (8, 13),
+ (1, 21),
+ (2, 34),
+ (4, 55),
+ ]
+ .into();
+
+ assert_eq!(tester.binary_search_by_key(&0, |&(_a, b)| b), Ok(0));
+ assert!(matches!(tester.binary_search_by_key(&1, |&(_a, b)| b), Ok(1..=4)));
+ assert_eq!(tester.binary_search_by_key(&8, |&(_a, b)| b), Ok(8));
+ assert_eq!(tester.binary_search_by_key(&13, |&(_a, b)| b), Ok(9));
+ assert_eq!(tester.binary_search_by_key(&55, |&(_a, b)| b), Ok(12));
+ assert_eq!(tester.binary_search_by_key(&-1, |&(_a, b)| b), Err(0));
+ assert_eq!(tester.binary_search_by_key(&4, |&(_a, b)| b), Err(7));
+ assert_eq!(tester.binary_search_by_key(&56, |&(_a, b)| b), Err(13));
+ assert_eq!(tester.binary_search_by_key(&100, |&(_a, b)| b), Err(13));
+}
+
+#[test]
+fn make_contiguous_big_tail() {
+ let mut tester = VecDeque::with_capacity(15);
+
+ for i in 0..3 {
+ tester.push_back(i);
+ }
+
+ for i in 3..10 {
+ tester.push_front(i);
+ }
+
+ // 012......9876543
+ assert_eq!(tester.capacity(), 15);
+ assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices());
+
+ let expected_start = tester.head;
+ tester.make_contiguous();
+ assert_eq!(tester.tail, expected_start);
+ assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices());
+}
+
+#[test]
+fn make_contiguous_big_head() {
+ let mut tester = VecDeque::with_capacity(15);
+
+ for i in 0..8 {
+ tester.push_back(i);
+ }
+
+ for i in 8..10 {
+ tester.push_front(i);
+ }
+
+ // 01234567......98
+ let expected_start = 0;
+ tester.make_contiguous();
+ assert_eq!(tester.tail, expected_start);
+ assert_eq!((&[9, 8, 0, 1, 2, 3, 4, 5, 6, 7] as &[_], &[] as &[_]), tester.as_slices());
+}
+
+#[test]
+fn make_contiguous_small_free() {
+ let mut tester = VecDeque::with_capacity(15);
+
+ for i in 'A' as u8..'I' as u8 {
+ tester.push_back(i as char);
+ }
+
+ for i in 'I' as u8..'N' as u8 {
+ tester.push_front(i as char);
+ }
+
+ // ABCDEFGH...MLKJI
+ let expected_start = 0;
+ tester.make_contiguous();
+ assert_eq!(tester.tail, expected_start);
+ assert_eq!(
+ (&['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] as &[_], &[] as &[_]),
+ tester.as_slices()
+ );
+
+ tester.clear();
+ for i in 'I' as u8..'N' as u8 {
+ tester.push_back(i as char);
+ }
+
+ for i in 'A' as u8..'I' as u8 {
+ tester.push_front(i as char);
+ }
+
+ // IJKLM...HGFEDCBA
+ let expected_start = 0;
+ tester.make_contiguous();
+ assert_eq!(tester.tail, expected_start);
+ assert_eq!(
+ (&['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'I', 'J', 'K', 'L', 'M'] as &[_], &[] as &[_]),
+ tester.as_slices()
+ );
+}
+
+#[test]
+fn make_contiguous_head_to_end() {
+ let mut dq = VecDeque::with_capacity(3);
+ dq.push_front('B');
+ dq.push_front('A');
+ dq.push_back('C');
+ dq.make_contiguous();
+ let expected_tail = 0;
+ let expected_head = 3;
+ assert_eq!(expected_tail, dq.tail);
+ assert_eq!(expected_head, dq.head);
+ assert_eq!((&['A', 'B', 'C'] as &[_], &[] as &[_]), dq.as_slices());
+}
+
+#[test]
+fn make_contiguous_head_to_end_2() {
+ // Another test case for #79808, taken from #80293.
+
+ let mut dq = VecDeque::from_iter(0..6);
+ dq.pop_front();
+ dq.pop_front();
+ dq.push_back(6);
+ dq.push_back(7);
+ dq.push_back(8);
+ dq.make_contiguous();
+ let collected: Vec<_> = dq.iter().copied().collect();
+ assert_eq!(dq.as_slices(), (&collected[..], &[] as &[_]));
+}
+
+#[test]
+fn test_remove() {
+ // This test checks that every single combination of tail position, length, and
+ // removal position is tested. Capacity 15 should be large enough to cover every case.
+
+ let mut tester = VecDeque::with_capacity(15);
+ // can't guarantee we got 15, so have to get what we got.
+ // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
+ // this test isn't covering what it wants to
+ let cap = tester.capacity();
+
+ // len is the length *after* removal
+ let minlen = if cfg!(miri) { cap - 2 } else { 0 }; // Miri is too slow
+ for len in minlen..cap - 1 {
+ // 0, 1, 2, .., len - 1
+ let expected = (0..).take(len).collect::<VecDeque<_>>();
+ for tail_pos in 0..cap {
+ for to_remove in 0..=len {
+ tester.tail = tail_pos;
+ tester.head = tail_pos;
+ for i in 0..len {
+ if i == to_remove {
+ tester.push_back(1234);
+ }
+ tester.push_back(i);
+ }
+ if to_remove == len {
+ tester.push_back(1234);
+ }
+ tester.remove(to_remove);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+ assert_eq!(tester, expected);
+ }
+ }
+ }
+}
+
+#[test]
+fn test_range() {
+ let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
+
+ let cap = tester.capacity();
+ let minlen = if cfg!(miri) { cap - 1 } else { 0 }; // Miri is too slow
+ for len in minlen..=cap {
+ for tail in 0..=cap {
+ for start in 0..=len {
+ for end in start..=len {
+ tester.tail = tail;
+ tester.head = tail;
+ for i in 0..len {
+ tester.push_back(i);
+ }
+
+ // Check that we iterate over the correct values
+ let range: VecDeque<_> = tester.range(start..end).copied().collect();
+ let expected: VecDeque<_> = (start..end).collect();
+ assert_eq!(range, expected);
+ }
+ }
+ }
+ }
+}
+
+#[test]
+fn test_range_mut() {
+ let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
+
+ let cap = tester.capacity();
+ for len in 0..=cap {
+ for tail in 0..=cap {
+ for start in 0..=len {
+ for end in start..=len {
+ tester.tail = tail;
+ tester.head = tail;
+ for i in 0..len {
+ tester.push_back(i);
+ }
+
+ let head_was = tester.head;
+ let tail_was = tester.tail;
+
+ // Check that we iterate over the correct values
+ let range: VecDeque<_> = tester.range_mut(start..end).map(|v| *v).collect();
+ let expected: VecDeque<_> = (start..end).collect();
+ assert_eq!(range, expected);
+
+ // We shouldn't have changed the capacity or made the
+ // head or tail out of bounds
+ assert_eq!(tester.capacity(), cap);
+ assert_eq!(tester.tail, tail_was);
+ assert_eq!(tester.head, head_was);
+ }
+ }
+ }
+ }
+}
+
+#[test]
+fn test_drain() {
+ let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
+
+ let cap = tester.capacity();
+ for len in 0..=cap {
+ for tail in 0..=cap {
+ for drain_start in 0..=len {
+ for drain_end in drain_start..=len {
+ tester.tail = tail;
+ tester.head = tail;
+ for i in 0..len {
+ tester.push_back(i);
+ }
+
+ // Check that we drain the correct values
+ let drained: VecDeque<_> = tester.drain(drain_start..drain_end).collect();
+ let drained_expected: VecDeque<_> = (drain_start..drain_end).collect();
+ assert_eq!(drained, drained_expected);
+
+ // We shouldn't have changed the capacity or made the
+ // head or tail out of bounds
+ assert_eq!(tester.capacity(), cap);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+
+ // We should see the correct values in the VecDeque
+ let expected: VecDeque<_> = (0..drain_start).chain(drain_end..len).collect();
+ assert_eq!(expected, tester);
+ }
+ }
+ }
+ }
+}
+
+#[test]
+fn test_shrink_to_fit() {
+ // This test checks that every single combination of head and tail position,
+ // is tested. Capacity 15 should be large enough to cover every case.
+
+ let mut tester = VecDeque::with_capacity(15);
+ // can't guarantee we got 15, so have to get what we got.
+ // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
+ // this test isn't covering what it wants to
+ let cap = tester.capacity();
+ tester.reserve(63);
+ let max_cap = tester.capacity();
+
+ for len in 0..=cap {
+ // 0, 1, 2, .., len - 1
+ let expected = (0..).take(len).collect::<VecDeque<_>>();
+ for tail_pos in 0..=max_cap {
+ tester.tail = tail_pos;
+ tester.head = tail_pos;
+ tester.reserve(63);
+ for i in 0..len {
+ tester.push_back(i);
+ }
+ tester.shrink_to_fit();
+ assert!(tester.capacity() <= cap);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+ assert_eq!(tester, expected);
+ }
+ }
+}
+
+#[test]
+fn test_split_off() {
+ // This test checks that every single combination of tail position, length, and
+ // split position is tested. Capacity 15 should be large enough to cover every case.
+
+ let mut tester = VecDeque::with_capacity(15);
+ // can't guarantee we got 15, so have to get what we got.
+ // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
+ // this test isn't covering what it wants to
+ let cap = tester.capacity();
+
+ // len is the length *before* splitting
+ let minlen = if cfg!(miri) { cap - 1 } else { 0 }; // Miri is too slow
+ for len in minlen..cap {
+ // index to split at
+ for at in 0..=len {
+ // 0, 1, 2, .., at - 1 (may be empty)
+ let expected_self = (0..).take(at).collect::<VecDeque<_>>();
+ // at, at + 1, .., len - 1 (may be empty)
+ let expected_other = (at..).take(len - at).collect::<VecDeque<_>>();
+
+ for tail_pos in 0..cap {
+ tester.tail = tail_pos;
+ tester.head = tail_pos;
+ for i in 0..len {
+ tester.push_back(i);
+ }
+ let result = tester.split_off(at);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+ assert!(result.tail < result.cap());
+ assert!(result.head < result.cap());
+ assert_eq!(tester, expected_self);
+ assert_eq!(result, expected_other);
+ }
+ }
+ }
+}
+
+#[test]
+fn test_from_vec() {
+ use crate::vec::Vec;
+ for cap in 0..35 {
+ for len in 0..=cap {
+ let mut vec = Vec::with_capacity(cap);
+ vec.extend(0..len);
+
+ let vd = VecDeque::from(vec.clone());
+ assert!(vd.cap().is_power_of_two());
+ assert_eq!(vd.len(), vec.len());
+ assert!(vd.into_iter().eq(vec));
+ }
+ }
+
+ let vec = Vec::from([(); MAXIMUM_ZST_CAPACITY - 1]);
+ let vd = VecDeque::from(vec.clone());
+ assert!(vd.cap().is_power_of_two());
+ assert_eq!(vd.len(), vec.len());
+}
+
+#[test]
+fn test_extend_basic() {
+ test_extend_impl(false);
+}
+
+#[test]
+fn test_extend_trusted_len() {
+ test_extend_impl(true);
+}
+
+fn test_extend_impl(trusted_len: bool) {
+ struct VecDequeTester {
+ test: VecDeque<usize>,
+ expected: VecDeque<usize>,
+ trusted_len: bool,
+ }
+
+ impl VecDequeTester {
+ fn new(trusted_len: bool) -> Self {
+ Self { test: VecDeque::new(), expected: VecDeque::new(), trusted_len }
+ }
+
+ fn test_extend<I>(&mut self, iter: I)
+ where
+ I: Iterator<Item = usize> + TrustedLen + Clone,
+ {
+ struct BasicIterator<I>(I);
+ impl<I> Iterator for BasicIterator<I>
+ where
+ I: Iterator<Item = usize>,
+ {
+ type Item = usize;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.0.next()
+ }
+ }
+
+ if self.trusted_len {
+ self.test.extend(iter.clone());
+ } else {
+ self.test.extend(BasicIterator(iter.clone()));
+ }
+
+ for item in iter {
+ self.expected.push_back(item)
+ }
+
+ assert_eq!(self.test, self.expected);
+ let (a1, b1) = self.test.as_slices();
+ let (a2, b2) = self.expected.as_slices();
+ assert_eq!(a1, a2);
+ assert_eq!(b1, b2);
+ }
+
+ fn drain<R: RangeBounds<usize> + Clone>(&mut self, range: R) {
+ self.test.drain(range.clone());
+ self.expected.drain(range);
+
+ assert_eq!(self.test, self.expected);
+ }
+
+ fn clear(&mut self) {
+ self.test.clear();
+ self.expected.clear();
+ }
+
+ fn remaining_capacity(&self) -> usize {
+ self.test.capacity() - self.test.len()
+ }
+ }
+
+ let mut tester = VecDequeTester::new(trusted_len);
+
+ // Initial capacity
+ tester.test_extend(0..tester.remaining_capacity() - 1);
+
+ // Grow
+ tester.test_extend(1024..2048);
+
+ // Wrap around
+ tester.drain(..128);
+
+ tester.test_extend(0..tester.remaining_capacity() - 1);
+
+ // Continue
+ tester.drain(256..);
+ tester.test_extend(4096..8196);
+
+ tester.clear();
+
+ // Start again
+ tester.test_extend(0..32);
+}
+
+#[test]
+#[should_panic = "capacity overflow"]
+fn test_from_vec_zst_overflow() {
+ use crate::vec::Vec;
+ let vec = Vec::from([(); MAXIMUM_ZST_CAPACITY]);
+ let vd = VecDeque::from(vec.clone()); // no room for +1
+ assert!(vd.cap().is_power_of_two());
+ assert_eq!(vd.len(), vec.len());
+}
+
+#[test]
+fn test_from_array() {
+ fn test<const N: usize>() {
+ let mut array: [usize; N] = [0; N];
+
+ for i in 0..N {
+ array[i] = i;
+ }
+
+ let deq: VecDeque<_> = array.into();
+
+ for i in 0..N {
+ assert_eq!(deq[i], i);
+ }
+
+ assert!(deq.cap().is_power_of_two());
+ assert_eq!(deq.len(), N);
+ }
+ test::<0>();
+ test::<1>();
+ test::<2>();
+ test::<32>();
+ test::<35>();
+
+ let array = [(); MAXIMUM_ZST_CAPACITY - 1];
+ let deq = VecDeque::from(array);
+ assert!(deq.cap().is_power_of_two());
+ assert_eq!(deq.len(), MAXIMUM_ZST_CAPACITY - 1);
+}
+
+#[test]
+fn test_vec_from_vecdeque() {
+ use crate::vec::Vec;
+
+ fn create_vec_and_test_convert(capacity: usize, offset: usize, len: usize) {
+ let mut vd = VecDeque::with_capacity(capacity);
+ for _ in 0..offset {
+ vd.push_back(0);
+ vd.pop_front();
+ }
+ vd.extend(0..len);
+
+ let vec: Vec<_> = Vec::from(vd.clone());
+ assert_eq!(vec.len(), vd.len());
+ assert!(vec.into_iter().eq(vd));
+ }
+
+ // Miri is too slow
+ let max_pwr = if cfg!(miri) { 5 } else { 7 };
+
+ for cap_pwr in 0..max_pwr {
+ // Make capacity as a (2^x)-1, so that the ring size is 2^x
+ let cap = (2i32.pow(cap_pwr) - 1) as usize;
+
+ // In these cases there is enough free space to solve it with copies
+ for len in 0..((cap + 1) / 2) {
+ // Test contiguous cases
+ for offset in 0..(cap - len) {
+ create_vec_and_test_convert(cap, offset, len)
+ }
+
+ // Test cases where block at end of buffer is bigger than block at start
+ for offset in (cap - len)..(cap - (len / 2)) {
+ create_vec_and_test_convert(cap, offset, len)
+ }
+
+ // Test cases where block at start of buffer is bigger than block at end
+ for offset in (cap - (len / 2))..cap {
+ create_vec_and_test_convert(cap, offset, len)
+ }
+ }
+
+ // Now there's not (necessarily) space to straighten the ring with simple copies,
+ // the ring will use swapping when:
+ // (cap + 1 - offset) > (cap + 1 - len) && (len - (cap + 1 - offset)) > (cap + 1 - len))
+ // right block size > free space && left block size > free space
+ for len in ((cap + 1) / 2)..cap {
+ // Test contiguous cases
+ for offset in 0..(cap - len) {
+ create_vec_and_test_convert(cap, offset, len)
+ }
+
+ // Test cases where block at end of buffer is bigger than block at start
+ for offset in (cap - len)..(cap - (len / 2)) {
+ create_vec_and_test_convert(cap, offset, len)
+ }
+
+ // Test cases where block at start of buffer is bigger than block at end
+ for offset in (cap - (len / 2))..cap {
+ create_vec_and_test_convert(cap, offset, len)
+ }
+ }
+ }
+}
+
+#[test]
+fn test_clone_from() {
+ let m = vec![1; 8];
+ let n = vec![2; 12];
+ let limit = if cfg!(miri) { 4 } else { 8 }; // Miri is too slow
+ for pfv in 0..limit {
+ for pfu in 0..limit {
+ for longer in 0..2 {
+ let (vr, ur) = if longer == 0 { (&m, &n) } else { (&n, &m) };
+ let mut v = VecDeque::from(vr.clone());
+ for _ in 0..pfv {
+ v.push_front(1);
+ }
+ let mut u = VecDeque::from(ur.clone());
+ for _ in 0..pfu {
+ u.push_front(2);
+ }
+ v.clone_from(&u);
+ assert_eq!(&v, &u);
+ }
+ }
+ }
+}
+
+#[test]
+fn test_vec_deque_truncate_drop() {
+ static mut DROPS: u32 = 0;
+ #[derive(Clone)]
+ struct Elem(i32);
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let v = vec![Elem(1), Elem(2), Elem(3), Elem(4), Elem(5)];
+ for push_front in 0..=v.len() {
+ let v = v.clone();
+ let mut tester = VecDeque::with_capacity(5);
+ for (index, elem) in v.into_iter().enumerate() {
+ if index < push_front {
+ tester.push_front(elem);
+ } else {
+ tester.push_back(elem);
+ }
+ }
+ assert_eq!(unsafe { DROPS }, 0);
+ tester.truncate(3);
+ assert_eq!(unsafe { DROPS }, 2);
+ tester.truncate(0);
+ assert_eq!(unsafe { DROPS }, 5);
+ unsafe {
+ DROPS = 0;
+ }
+ }
+}
+
+#[test]
+fn issue_53529() {
+ use crate::boxed::Box;
+
+ let mut dst = VecDeque::new();
+ dst.push_front(Box::new(1));
+ dst.push_front(Box::new(2));
+ assert_eq!(*dst.pop_back().unwrap(), 1);
+
+ let mut src = VecDeque::new();
+ src.push_front(Box::new(2));
+ dst.append(&mut src);
+ for a in dst {
+ assert_eq!(*a, 2);
+ }
+}
+
+#[test]
+fn issue_80303() {
+ use core::iter;
+ use core::num::Wrapping;
+
+ // This is a valid, albeit rather bad hash function implementation.
+ struct SimpleHasher(Wrapping<u64>);
+
+ impl Hasher for SimpleHasher {
+ fn finish(&self) -> u64 {
+ self.0.0
+ }
+
+ fn write(&mut self, bytes: &[u8]) {
+ // This particular implementation hashes value 24 in addition to bytes.
+ // Such an implementation is valid as Hasher only guarantees equivalence
+ // for the exact same set of calls to its methods.
+ for &v in iter::once(&24).chain(bytes) {
+ self.0 = Wrapping(31) * self.0 + Wrapping(u64::from(v));
+ }
+ }
+ }
+
+ fn hash_code(value: impl Hash) -> u64 {
+ let mut hasher = SimpleHasher(Wrapping(1));
+ value.hash(&mut hasher);
+ hasher.finish()
+ }
+
+ // This creates two deques for which values returned by as_slices
+ // method differ.
+ let vda: VecDeque<u8> = (0..10).collect();
+ let mut vdb = VecDeque::with_capacity(10);
+ vdb.extend(5..10);
+ (0..5).rev().for_each(|elem| vdb.push_front(elem));
+ assert_ne!(vda.as_slices(), vdb.as_slices());
+ assert_eq!(vda, vdb);
+ assert_eq!(hash_code(vda), hash_code(vdb));
+}