summaryrefslogtreecommitdiffstats
path: root/library/alloc
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--library/alloc/Cargo.toml4
-rw-r--r--library/alloc/benches/slice.rs4
-rw-r--r--library/alloc/src/alloc.rs30
-rw-r--r--library/alloc/src/boxed.rs34
-rw-r--r--library/alloc/src/boxed/thin.rs47
-rw-r--r--library/alloc/src/collections/binary_heap.rs1721
-rw-r--r--library/alloc/src/collections/binary_heap/mod.rs1766
-rw-r--r--library/alloc/src/collections/binary_heap/tests.rs121
-rw-r--r--library/alloc/src/collections/btree/map/tests.rs6
-rw-r--r--library/alloc/src/collections/btree/mod.rs3
-rw-r--r--library/alloc/src/collections/btree/node.rs28
-rw-r--r--library/alloc/src/collections/btree/set/tests.rs4
-rw-r--r--library/alloc/src/collections/linked_list/tests.rs69
-rw-r--r--library/alloc/src/collections/vec_deque/into_iter.rs4
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs186
-rw-r--r--library/alloc/src/collections/vec_deque/spec_from_iter.rs33
-rw-r--r--library/alloc/src/collections/vec_deque/tests.rs42
-rw-r--r--library/alloc/src/fmt.rs4
-rw-r--r--library/alloc/src/lib.rs30
-rw-r--r--library/alloc/src/rc.rs8
-rw-r--r--library/alloc/src/slice.rs353
-rw-r--r--library/alloc/src/slice/tests.rs359
-rw-r--r--library/alloc/src/str.rs14
-rw-r--r--library/alloc/src/string.rs13
-rw-r--r--library/alloc/src/sync.rs14
-rw-r--r--library/alloc/src/testing/crash_test.rs (renamed from library/alloc/src/collections/btree/testing/crash_test.rs)0
-rw-r--r--library/alloc/src/testing/mod.rs (renamed from library/alloc/src/collections/btree/testing/mod.rs)0
-rw-r--r--library/alloc/src/testing/ord_chaos.rs (renamed from library/alloc/src/collections/btree/testing/ord_chaos.rs)0
-rw-r--r--library/alloc/src/testing/rng.rs (renamed from library/alloc/src/collections/btree/testing/rng.rs)0
-rw-r--r--library/alloc/src/vec/drain.rs6
-rw-r--r--library/alloc/src/vec/into_iter.rs52
-rw-r--r--library/alloc/src/vec/is_zero.rs22
-rw-r--r--library/alloc/src/vec/mod.rs15
-rw-r--r--library/alloc/src/vec/splice.rs6
-rw-r--r--library/alloc/tests/autotraits.rs2
-rw-r--r--library/alloc/tests/lib.rs1
-rw-r--r--library/alloc/tests/slice.rs349
-rw-r--r--library/alloc/tests/vec.rs31
-rw-r--r--library/alloc/tests/vec_deque.rs50
39 files changed, 2782 insertions, 2649 deletions
diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
index 265020209..95c07abf7 100644
--- a/library/alloc/Cargo.toml
+++ b/library/alloc/Cargo.toml
@@ -13,8 +13,8 @@ core = { path = "../core" }
compiler_builtins = { version = "0.1.40", features = ['rustc-dep-of-std'] }
[dev-dependencies]
-rand = "0.7"
-rand_xorshift = "0.2"
+rand = { version = "0.8.5", default-features = false, features = ["alloc"] }
+rand_xorshift = "0.3.0"
[[test]]
name = "collectionstests"
diff --git a/library/alloc/benches/slice.rs b/library/alloc/benches/slice.rs
index bd6f38f2f..b62be9d39 100644
--- a/library/alloc/benches/slice.rs
+++ b/library/alloc/benches/slice.rs
@@ -1,6 +1,6 @@
use std::{mem, ptr};
-use rand::distributions::{Alphanumeric, Standard};
+use rand::distributions::{Alphanumeric, DistString, Standard};
use rand::Rng;
use test::{black_box, Bencher};
@@ -218,7 +218,7 @@ fn gen_strings(len: usize) -> Vec<String> {
let mut v = vec![];
for _ in 0..len {
let n = rng.gen::<usize>() % 20 + 1;
- v.push((&mut rng).sample_iter(&Alphanumeric).take(n).collect());
+ v.push(Alphanumeric.sample_string(&mut rng, n));
}
v
}
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index e5fbfc557..3a797bd5e 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -20,10 +20,10 @@ use core::marker::Destruct;
mod tests;
extern "Rust" {
- // These are the magic symbols to call the global allocator. rustc generates
+ // These are the magic symbols to call the global allocator. rustc generates
// them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
// (the code expanding that attribute macro generates those functions), or to call
- // the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
+ // the default implementations in std (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
// otherwise.
// The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
// like `malloc`, `realloc`, and `free`, respectively.
@@ -353,7 +353,7 @@ pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Dest
#[cfg(not(no_global_oom_handling))]
extern "Rust" {
- // This is the magic symbol to call the global alloc error handler. rustc generates
+ // This is the magic symbol to call the global alloc error handler. rustc generates
// it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
// default implementations below (`__rdl_oom`) otherwise.
fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
@@ -402,20 +402,20 @@ pub mod __alloc_error_handler {
// `#[alloc_error_handler]`.
#[rustc_std_internal_symbol]
pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! {
- panic!("memory allocation of {size} bytes failed")
- }
-
- #[cfg(bootstrap)]
- #[rustc_std_internal_symbol]
- pub unsafe fn __rg_oom(size: usize, align: usize) -> ! {
- use crate::alloc::Layout;
-
- let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
extern "Rust" {
- #[lang = "oom"]
- fn oom_impl(layout: Layout) -> !;
+ // This symbol is emitted by rustc next to __rust_alloc_error_handler.
+ // Its value depends on the -Zoom={panic,abort} compiler option.
+ static __rust_alloc_error_handler_should_panic: u8;
+ }
+
+ #[allow(unused_unsafe)]
+ if unsafe { __rust_alloc_error_handler_should_panic != 0 } {
+ panic!("memory allocation of {size} bytes failed")
+ } else {
+ core::panicking::panic_nounwind_fmt(format_args!(
+ "memory allocation of {size} bytes failed"
+ ))
}
- unsafe { oom_impl(layout) }
}
}
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index e5f6b0c0c..a563b2587 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -158,7 +158,6 @@ use core::hash::{Hash, Hasher};
#[cfg(not(no_global_oom_handling))]
use core::iter::FromIterator;
use core::iter::{FusedIterator, Iterator};
-#[cfg(not(bootstrap))]
use core::marker::Tuple;
use core::marker::{Destruct, Unpin, Unsize};
use core::mem;
@@ -954,7 +953,7 @@ impl<T: ?Sized> Box<T> {
/// [`Layout`]: crate::Layout
#[stable(feature = "box_raw", since = "1.4.0")]
#[inline]
- #[must_use = "call `drop(from_raw(ptr))` if you intend to drop the `Box`"]
+ #[must_use = "call `drop(Box::from_raw(ptr))` if you intend to drop the `Box`"]
pub unsafe fn from_raw(raw: *mut T) -> Self {
unsafe { Self::from_raw_in(raw, Global) }
}
@@ -1981,17 +1980,6 @@ impl<I: ExactSizeIterator + ?Sized, A: Allocator> ExactSizeIterator for Box<I, A
#[stable(feature = "fused", since = "1.26.0")]
impl<I: FusedIterator + ?Sized, A: Allocator> FusedIterator for Box<I, A> {}
-#[cfg(bootstrap)]
-#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
- type Output = <F as FnOnce<Args>>::Output;
-
- extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
- <F as FnOnce<Args>>::call_once(*self, args)
- }
-}
-
-#[cfg(not(bootstrap))]
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args: Tuple, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
type Output = <F as FnOnce<Args>>::Output;
@@ -2001,15 +1989,6 @@ impl<Args: Tuple, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F
}
}
-#[cfg(bootstrap)]
-#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
- extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
- <F as FnMut<Args>>::call_mut(self, args)
- }
-}
-
-#[cfg(not(bootstrap))]
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args: Tuple, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
@@ -2017,15 +1996,6 @@ impl<Args: Tuple, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F,
}
}
-#[cfg(bootstrap)]
-#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
-impl<Args, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
- extern "rust-call" fn call(&self, args: Args) -> Self::Output {
- <F as Fn<Args>>::call(self, args)
- }
-}
-
-#[cfg(not(bootstrap))]
#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
impl<Args: Tuple, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
extern "rust-call" fn call(&self, args: Args) -> Self::Output {
@@ -2033,7 +2003,7 @@ impl<Args: Tuple, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
}
}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs
index c477c4490..c1a82e452 100644
--- a/library/alloc/src/boxed/thin.rs
+++ b/library/alloc/src/boxed/thin.rs
@@ -226,24 +226,45 @@ impl<H> WithHeader<H> {
// - Assumes that either `value` can be dereferenced, or is the
// `NonNull::dangling()` we use when both `T` and `H` are ZSTs.
unsafe fn drop<T: ?Sized>(&self, value: *mut T) {
+ struct DropGuard<H> {
+ ptr: NonNull<u8>,
+ value_layout: Layout,
+ _marker: PhantomData<H>,
+ }
+
+ impl<H> Drop for DropGuard<H> {
+ fn drop(&mut self) {
+ unsafe {
+ // SAFETY: Layout must have been computable if we're in drop
+ let (layout, value_offset) =
+ WithHeader::<H>::alloc_layout(self.value_layout).unwrap_unchecked();
+
+ // Note: Don't deallocate if the layout size is zero, because the pointer
+ // didn't come from the allocator.
+ if layout.size() != 0 {
+ alloc::dealloc(self.ptr.as_ptr().sub(value_offset), layout);
+ } else {
+ debug_assert!(
+ value_offset == 0
+ && mem::size_of::<H>() == 0
+ && self.value_layout.size() == 0
+ );
+ }
+ }
+ }
+ }
+
unsafe {
- let value_layout = Layout::for_value_raw(value);
- // SAFETY: Layout must have been computable if we're in drop
- let (layout, value_offset) = Self::alloc_layout(value_layout).unwrap_unchecked();
+ // `_guard` will deallocate the memory when dropped, even if `drop_in_place` unwinds.
+ let _guard = DropGuard {
+ ptr: self.0,
+ value_layout: Layout::for_value_raw(value),
+ _marker: PhantomData::<H>,
+ };
// We only drop the value because the Pointee trait requires that the metadata is copy
// aka trivially droppable.
ptr::drop_in_place::<T>(value);
-
- // Note: Don't deallocate if the layout size is zero, because the pointer
- // didn't come from the allocator.
- if layout.size() != 0 {
- alloc::dealloc(self.0.as_ptr().sub(value_offset), layout);
- } else {
- debug_assert!(
- value_offset == 0 && mem::size_of::<H>() == 0 && value_layout.size() == 0
- );
- }
}
}
diff --git a/library/alloc/src/collections/binary_heap.rs b/library/alloc/src/collections/binary_heap.rs
deleted file mode 100644
index 4583bc9a1..000000000
--- a/library/alloc/src/collections/binary_heap.rs
+++ /dev/null
@@ -1,1721 +0,0 @@
-//! A priority queue implemented with a binary heap.
-//!
-//! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
-//! Checking the largest element is *O*(1). Converting a vector to a binary heap
-//! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
-//! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
-//! in-place heapsort.
-//!
-//! # Examples
-//!
-//! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
-//! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
-//! It shows how to use [`BinaryHeap`] with custom types.
-//!
-//! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
-//! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
-//! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
-//!
-//! ```
-//! use std::cmp::Ordering;
-//! use std::collections::BinaryHeap;
-//!
-//! #[derive(Copy, Clone, Eq, PartialEq)]
-//! struct State {
-//! cost: usize,
-//! position: usize,
-//! }
-//!
-//! // The priority queue depends on `Ord`.
-//! // Explicitly implement the trait so the queue becomes a min-heap
-//! // instead of a max-heap.
-//! impl Ord for State {
-//! fn cmp(&self, other: &Self) -> Ordering {
-//! // Notice that the we flip the ordering on costs.
-//! // In case of a tie we compare positions - this step is necessary
-//! // to make implementations of `PartialEq` and `Ord` consistent.
-//! other.cost.cmp(&self.cost)
-//! .then_with(|| self.position.cmp(&other.position))
-//! }
-//! }
-//!
-//! // `PartialOrd` needs to be implemented as well.
-//! impl PartialOrd for State {
-//! fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
-//! Some(self.cmp(other))
-//! }
-//! }
-//!
-//! // Each node is represented as a `usize`, for a shorter implementation.
-//! struct Edge {
-//! node: usize,
-//! cost: usize,
-//! }
-//!
-//! // Dijkstra's shortest path algorithm.
-//!
-//! // Start at `start` and use `dist` to track the current shortest distance
-//! // to each node. This implementation isn't memory-efficient as it may leave duplicate
-//! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
-//! // for a simpler implementation.
-//! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
-//! // dist[node] = current shortest distance from `start` to `node`
-//! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
-//!
-//! let mut heap = BinaryHeap::new();
-//!
-//! // We're at `start`, with a zero cost
-//! dist[start] = 0;
-//! heap.push(State { cost: 0, position: start });
-//!
-//! // Examine the frontier with lower cost nodes first (min-heap)
-//! while let Some(State { cost, position }) = heap.pop() {
-//! // Alternatively we could have continued to find all shortest paths
-//! if position == goal { return Some(cost); }
-//!
-//! // Important as we may have already found a better way
-//! if cost > dist[position] { continue; }
-//!
-//! // For each node we can reach, see if we can find a way with
-//! // a lower cost going through this node
-//! for edge in &adj_list[position] {
-//! let next = State { cost: cost + edge.cost, position: edge.node };
-//!
-//! // If so, add it to the frontier and continue
-//! if next.cost < dist[next.position] {
-//! heap.push(next);
-//! // Relaxation, we have now found a better way
-//! dist[next.position] = next.cost;
-//! }
-//! }
-//! }
-//!
-//! // Goal not reachable
-//! None
-//! }
-//!
-//! fn main() {
-//! // This is the directed graph we're going to use.
-//! // The node numbers correspond to the different states,
-//! // and the edge weights symbolize the cost of moving
-//! // from one node to another.
-//! // Note that the edges are one-way.
-//! //
-//! // 7
-//! // +-----------------+
-//! // | |
-//! // v 1 2 | 2
-//! // 0 -----> 1 -----> 3 ---> 4
-//! // | ^ ^ ^
-//! // | | 1 | |
-//! // | | | 3 | 1
-//! // +------> 2 -------+ |
-//! // 10 | |
-//! // +---------------+
-//! //
-//! // The graph is represented as an adjacency list where each index,
-//! // corresponding to a node value, has a list of outgoing edges.
-//! // Chosen for its efficiency.
-//! let graph = vec![
-//! // Node 0
-//! vec![Edge { node: 2, cost: 10 },
-//! Edge { node: 1, cost: 1 }],
-//! // Node 1
-//! vec![Edge { node: 3, cost: 2 }],
-//! // Node 2
-//! vec![Edge { node: 1, cost: 1 },
-//! Edge { node: 3, cost: 3 },
-//! Edge { node: 4, cost: 1 }],
-//! // Node 3
-//! vec![Edge { node: 0, cost: 7 },
-//! Edge { node: 4, cost: 2 }],
-//! // Node 4
-//! vec![]];
-//!
-//! assert_eq!(shortest_path(&graph, 0, 1), Some(1));
-//! assert_eq!(shortest_path(&graph, 0, 3), Some(3));
-//! assert_eq!(shortest_path(&graph, 3, 0), Some(7));
-//! assert_eq!(shortest_path(&graph, 0, 4), Some(5));
-//! assert_eq!(shortest_path(&graph, 4, 0), None);
-//! }
-//! ```
-
-#![allow(missing_docs)]
-#![stable(feature = "rust1", since = "1.0.0")]
-
-use core::fmt;
-use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
-use core::mem::{self, swap, ManuallyDrop};
-use core::ops::{Deref, DerefMut};
-use core::ptr;
-
-use crate::collections::TryReserveError;
-use crate::slice;
-use crate::vec::{self, AsVecIntoIter, Vec};
-
-use super::SpecExtend;
-
-#[cfg(test)]
-mod tests;
-
-/// A priority queue implemented with a binary heap.
-///
-/// This will be a max-heap.
-///
-/// It is a logic error for an item to be modified in such a way that the
-/// item's ordering relative to any other item, as determined by the [`Ord`]
-/// trait, changes while it is in the heap. This is normally only possible
-/// through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. The
-/// behavior resulting from such a logic error is not specified, but will
-/// be encapsulated to the `BinaryHeap` that observed the logic error and not
-/// result in undefined behavior. This could include panics, incorrect results,
-/// aborts, memory leaks, and non-termination.
-///
-/// # Examples
-///
-/// ```
-/// use std::collections::BinaryHeap;
-///
-/// // Type inference lets us omit an explicit type signature (which
-/// // would be `BinaryHeap<i32>` in this example).
-/// let mut heap = BinaryHeap::new();
-///
-/// // We can use peek to look at the next item in the heap. In this case,
-/// // there's no items in there yet so we get None.
-/// assert_eq!(heap.peek(), None);
-///
-/// // Let's add some scores...
-/// heap.push(1);
-/// heap.push(5);
-/// heap.push(2);
-///
-/// // Now peek shows the most important item in the heap.
-/// assert_eq!(heap.peek(), Some(&5));
-///
-/// // We can check the length of a heap.
-/// assert_eq!(heap.len(), 3);
-///
-/// // We can iterate over the items in the heap, although they are returned in
-/// // a random order.
-/// for x in &heap {
-/// println!("{x}");
-/// }
-///
-/// // If we instead pop these scores, they should come back in order.
-/// assert_eq!(heap.pop(), Some(5));
-/// assert_eq!(heap.pop(), Some(2));
-/// assert_eq!(heap.pop(), Some(1));
-/// assert_eq!(heap.pop(), None);
-///
-/// // We can clear the heap of any remaining items.
-/// heap.clear();
-///
-/// // The heap should now be empty.
-/// assert!(heap.is_empty())
-/// ```
-///
-/// A `BinaryHeap` with a known list of items can be initialized from an array:
-///
-/// ```
-/// use std::collections::BinaryHeap;
-///
-/// let heap = BinaryHeap::from([1, 5, 2]);
-/// ```
-///
-/// ## Min-heap
-///
-/// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
-/// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
-/// value instead of the greatest one.
-///
-/// ```
-/// use std::collections::BinaryHeap;
-/// use std::cmp::Reverse;
-///
-/// let mut heap = BinaryHeap::new();
-///
-/// // Wrap values in `Reverse`
-/// heap.push(Reverse(1));
-/// heap.push(Reverse(5));
-/// heap.push(Reverse(2));
-///
-/// // If we pop these scores now, they should come back in the reverse order.
-/// assert_eq!(heap.pop(), Some(Reverse(1)));
-/// assert_eq!(heap.pop(), Some(Reverse(2)));
-/// assert_eq!(heap.pop(), Some(Reverse(5)));
-/// assert_eq!(heap.pop(), None);
-/// ```
-///
-/// # Time complexity
-///
-/// | [push] | [pop] | [peek]/[peek\_mut] |
-/// |---------|---------------|--------------------|
-/// | *O*(1)~ | *O*(log(*n*)) | *O*(1) |
-///
-/// The value for `push` is an expected cost; the method documentation gives a
-/// more detailed analysis.
-///
-/// [`core::cmp::Reverse`]: core::cmp::Reverse
-/// [`Ord`]: core::cmp::Ord
-/// [`Cell`]: core::cell::Cell
-/// [`RefCell`]: core::cell::RefCell
-/// [push]: BinaryHeap::push
-/// [pop]: BinaryHeap::pop
-/// [peek]: BinaryHeap::peek
-/// [peek\_mut]: BinaryHeap::peek_mut
-#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
-pub struct BinaryHeap<T> {
- data: Vec<T>,
-}
-
-/// Structure wrapping a mutable reference to the greatest item on a
-/// `BinaryHeap`.
-///
-/// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
-/// its documentation for more.
-///
-/// [`peek_mut`]: BinaryHeap::peek_mut
-#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
-pub struct PeekMut<'a, T: 'a + Ord> {
- heap: &'a mut BinaryHeap<T>,
- sift: bool,
-}
-
-#[stable(feature = "collection_debug", since = "1.17.0")]
-impl<T: Ord + fmt::Debug> fmt::Debug for PeekMut<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
- }
-}
-
-#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
-impl<T: Ord> Drop for PeekMut<'_, T> {
- fn drop(&mut self) {
- if self.sift {
- // SAFETY: PeekMut is only instantiated for non-empty heaps.
- unsafe { self.heap.sift_down(0) };
- }
- }
-}
-
-#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
-impl<T: Ord> Deref for PeekMut<'_, T> {
- type Target = T;
- fn deref(&self) -> &T {
- debug_assert!(!self.heap.is_empty());
- // SAFE: PeekMut is only instantiated for non-empty heaps
- unsafe { self.heap.data.get_unchecked(0) }
- }
-}
-
-#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
-impl<T: Ord> DerefMut for PeekMut<'_, T> {
- fn deref_mut(&mut self) -> &mut T {
- debug_assert!(!self.heap.is_empty());
- self.sift = true;
- // SAFE: PeekMut is only instantiated for non-empty heaps
- unsafe { self.heap.data.get_unchecked_mut(0) }
- }
-}
-
-impl<'a, T: Ord> PeekMut<'a, T> {
- /// Removes the peeked value from the heap and returns it.
- #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
- pub fn pop(mut this: PeekMut<'a, T>) -> T {
- let value = this.heap.pop().unwrap();
- this.sift = false;
- value
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Clone> Clone for BinaryHeap<T> {
- fn clone(&self) -> Self {
- BinaryHeap { data: self.data.clone() }
- }
-
- fn clone_from(&mut self, source: &Self) {
- self.data.clone_from(&source.data);
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Ord> Default for BinaryHeap<T> {
- /// Creates an empty `BinaryHeap<T>`.
- #[inline]
- fn default() -> BinaryHeap<T> {
- BinaryHeap::new()
- }
-}
-
-#[stable(feature = "binaryheap_debug", since = "1.4.0")]
-impl<T: fmt::Debug> fmt::Debug for BinaryHeap<T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_list().entries(self.iter()).finish()
- }
-}
-
-impl<T: Ord> BinaryHeap<T> {
- /// Creates an empty `BinaryHeap` as a max-heap.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// heap.push(4);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- pub fn new() -> BinaryHeap<T> {
- BinaryHeap { data: vec![] }
- }
-
- /// Creates an empty `BinaryHeap` with at least the specified capacity.
- ///
- /// The binary heap will be able to hold at least `capacity` elements without
- /// reallocating. This method is allowed to allocate for more elements than
- /// `capacity`. If `capacity` is 0, the binary heap will not allocate.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::with_capacity(10);
- /// heap.push(4);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- #[must_use]
- pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
- BinaryHeap { data: Vec::with_capacity(capacity) }
- }
-
- /// Returns a mutable reference to the greatest item in the binary heap, or
- /// `None` if it is empty.
- ///
- /// Note: If the `PeekMut` value is leaked, the heap may be in an
- /// inconsistent state.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// assert!(heap.peek_mut().is_none());
- ///
- /// heap.push(1);
- /// heap.push(5);
- /// heap.push(2);
- /// {
- /// let mut val = heap.peek_mut().unwrap();
- /// *val = 0;
- /// }
- /// assert_eq!(heap.peek(), Some(&2));
- /// ```
- ///
- /// # Time complexity
- ///
- /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
- /// otherwise it's *O*(1).
- #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
- pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
- if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: false }) }
- }
-
- /// Removes the greatest item from the binary heap and returns it, or `None` if it
- /// is empty.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::from([1, 3]);
- ///
- /// assert_eq!(heap.pop(), Some(3));
- /// assert_eq!(heap.pop(), Some(1));
- /// assert_eq!(heap.pop(), None);
- /// ```
- ///
- /// # Time complexity
- ///
- /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn pop(&mut self) -> Option<T> {
- self.data.pop().map(|mut item| {
- if !self.is_empty() {
- swap(&mut item, &mut self.data[0]);
- // SAFETY: !self.is_empty() means that self.len() > 0
- unsafe { self.sift_down_to_bottom(0) };
- }
- item
- })
- }
-
- /// Pushes an item onto the binary heap.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// heap.push(3);
- /// heap.push(5);
- /// heap.push(1);
- ///
- /// assert_eq!(heap.len(), 3);
- /// assert_eq!(heap.peek(), Some(&5));
- /// ```
- ///
- /// # Time complexity
- ///
- /// The expected cost of `push`, averaged over every possible ordering of
- /// the elements being pushed, and over a sufficiently large number of
- /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
- /// elements that are *not* already in any sorted pattern.
- ///
- /// The time complexity degrades if elements are pushed in predominantly
- /// ascending order. In the worst case, elements are pushed in ascending
- /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
- /// containing *n* elements.
- ///
- /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
- /// occurs when capacity is exhausted and needs a resize. The resize cost
- /// has been amortized in the previous figures.
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn push(&mut self, item: T) {
- let old_len = self.len();
- self.data.push(item);
- // SAFETY: Since we pushed a new item it means that
- // old_len = self.len() - 1 < self.len()
- unsafe { self.sift_up(0, old_len) };
- }
-
- /// Consumes the `BinaryHeap` and returns a vector in sorted
- /// (ascending) order.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- ///
- /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]);
- /// heap.push(6);
- /// heap.push(3);
- ///
- /// let vec = heap.into_sorted_vec();
- /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
- /// ```
- #[must_use = "`self` will be dropped if the result is not used"]
- #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
- pub fn into_sorted_vec(mut self) -> Vec<T> {
- let mut end = self.len();
- while end > 1 {
- end -= 1;
- // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
- // so it's always a valid index to access.
- // It is safe to access index 0 (i.e. `ptr`), because
- // 1 <= end < self.len(), which means self.len() >= 2.
- unsafe {
- let ptr = self.data.as_mut_ptr();
- ptr::swap(ptr, ptr.add(end));
- }
- // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
- // 0 < 1 <= end <= self.len() - 1 < self.len()
- // Which means 0 < end and end < self.len().
- unsafe { self.sift_down_range(0, end) };
- }
- self.into_vec()
- }
-
- // The implementations of sift_up and sift_down use unsafe blocks in
- // order to move an element out of the vector (leaving behind a
- // hole), shift along the others and move the removed element back into the
- // vector at the final location of the hole.
- // The `Hole` type is used to represent this, and make sure
- // the hole is filled back at the end of its scope, even on panic.
- // Using a hole reduces the constant factor compared to using swaps,
- // which involves twice as many moves.
-
- /// # Safety
- ///
- /// The caller must guarantee that `pos < self.len()`.
- unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
- // Take out the value at `pos` and create a hole.
- // SAFETY: The caller guarantees that pos < self.len()
- let mut hole = unsafe { Hole::new(&mut self.data, pos) };
-
- while hole.pos() > start {
- let parent = (hole.pos() - 1) / 2;
-
- // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
- // and so hole.pos() - 1 can't underflow.
- // This guarantees that parent < hole.pos() so
- // it's a valid index and also != hole.pos().
- if hole.element() <= unsafe { hole.get(parent) } {
- break;
- }
-
- // SAFETY: Same as above
- unsafe { hole.move_to(parent) };
- }
-
- hole.pos()
- }
-
- /// Take an element at `pos` and move it down the heap,
- /// while its children are larger.
- ///
- /// # Safety
- ///
- /// The caller must guarantee that `pos < end <= self.len()`.
- unsafe fn sift_down_range(&mut self, pos: usize, end: usize) {
- // SAFETY: The caller guarantees that pos < end <= self.len().
- let mut hole = unsafe { Hole::new(&mut self.data, pos) };
- let mut child = 2 * hole.pos() + 1;
-
- // Loop invariant: child == 2 * hole.pos() + 1.
- while child <= end.saturating_sub(2) {
- // compare with the greater of the two children
- // SAFETY: child < end - 1 < self.len() and
- // child + 1 < end <= self.len(), so they're valid indexes.
- // child == 2 * hole.pos() + 1 != hole.pos() and
- // child + 1 == 2 * hole.pos() + 2 != hole.pos().
- // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
- // if T is a ZST
- child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
-
- // if we are already in order, stop.
- // SAFETY: child is now either the old child or the old child+1
- // We already proven that both are < self.len() and != hole.pos()
- if hole.element() >= unsafe { hole.get(child) } {
- return;
- }
-
- // SAFETY: same as above.
- unsafe { hole.move_to(child) };
- child = 2 * hole.pos() + 1;
- }
-
- // SAFETY: && short circuit, which means that in the
- // second condition it's already true that child == end - 1 < self.len().
- if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
- // SAFETY: child is already proven to be a valid index and
- // child == 2 * hole.pos() + 1 != hole.pos().
- unsafe { hole.move_to(child) };
- }
- }
-
- /// # Safety
- ///
- /// The caller must guarantee that `pos < self.len()`.
- unsafe fn sift_down(&mut self, pos: usize) {
- let len = self.len();
- // SAFETY: pos < len is guaranteed by the caller and
- // obviously len = self.len() <= self.len().
- unsafe { self.sift_down_range(pos, len) };
- }
-
- /// Take an element at `pos` and move it all the way down the heap,
- /// then sift it up to its position.
- ///
- /// Note: This is faster when the element is known to be large / should
- /// be closer to the bottom.
- ///
- /// # Safety
- ///
- /// The caller must guarantee that `pos < self.len()`.
- unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
- let end = self.len();
- let start = pos;
-
- // SAFETY: The caller guarantees that pos < self.len().
- let mut hole = unsafe { Hole::new(&mut self.data, pos) };
- let mut child = 2 * hole.pos() + 1;
-
- // Loop invariant: child == 2 * hole.pos() + 1.
- while child <= end.saturating_sub(2) {
- // SAFETY: child < end - 1 < self.len() and
- // child + 1 < end <= self.len(), so they're valid indexes.
- // child == 2 * hole.pos() + 1 != hole.pos() and
- // child + 1 == 2 * hole.pos() + 2 != hole.pos().
- // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
- // if T is a ZST
- child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
-
- // SAFETY: Same as above
- unsafe { hole.move_to(child) };
- child = 2 * hole.pos() + 1;
- }
-
- if child == end - 1 {
- // SAFETY: child == end - 1 < self.len(), so it's a valid index
- // and child == 2 * hole.pos() + 1 != hole.pos().
- unsafe { hole.move_to(child) };
- }
- pos = hole.pos();
- drop(hole);
-
- // SAFETY: pos is the position in the hole and was already proven
- // to be a valid index.
- unsafe { self.sift_up(start, pos) };
- }
-
- /// Rebuild assuming data[0..start] is still a proper heap.
- fn rebuild_tail(&mut self, start: usize) {
- if start == self.len() {
- return;
- }
-
- let tail_len = self.len() - start;
-
- #[inline(always)]
- fn log2_fast(x: usize) -> usize {
- (usize::BITS - x.leading_zeros() - 1) as usize
- }
-
- // `rebuild` takes O(self.len()) operations
- // and about 2 * self.len() comparisons in the worst case
- // while repeating `sift_up` takes O(tail_len * log(start)) operations
- // and about 1 * tail_len * log_2(start) comparisons in the worst case,
- // assuming start >= tail_len. For larger heaps, the crossover point
- // no longer follows this reasoning and was determined empirically.
- let better_to_rebuild = if start < tail_len {
- true
- } else if self.len() <= 2048 {
- 2 * self.len() < tail_len * log2_fast(start)
- } else {
- 2 * self.len() < tail_len * 11
- };
-
- if better_to_rebuild {
- self.rebuild();
- } else {
- for i in start..self.len() {
- // SAFETY: The index `i` is always less than self.len().
- unsafe { self.sift_up(0, i) };
- }
- }
- }
-
- fn rebuild(&mut self) {
- let mut n = self.len() / 2;
- while n > 0 {
- n -= 1;
- // SAFETY: n starts from self.len() / 2 and goes down to 0.
- // The only case when !(n < self.len()) is if
- // self.len() == 0, but it's ruled out by the loop condition.
- unsafe { self.sift_down(n) };
- }
- }
-
- /// Moves all the elements of `other` into `self`, leaving `other` empty.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- ///
- /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]);
- /// let mut b = BinaryHeap::from([-20, 5, 43]);
- ///
- /// a.append(&mut b);
- ///
- /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
- /// assert!(b.is_empty());
- /// ```
- #[stable(feature = "binary_heap_append", since = "1.11.0")]
- pub fn append(&mut self, other: &mut Self) {
- if self.len() < other.len() {
- swap(self, other);
- }
-
- let start = self.data.len();
-
- self.data.append(&mut other.data);
-
- self.rebuild_tail(start);
- }
-
- /// Clears the binary heap, returning an iterator over the removed elements
- /// in heap order. If the iterator is dropped before being fully consumed,
- /// it drops the remaining elements in heap order.
- ///
- /// The returned iterator keeps a mutable borrow on the heap to optimize
- /// its implementation.
- ///
- /// Note:
- /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
- /// You should use the latter for most cases.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// #![feature(binary_heap_drain_sorted)]
- /// use std::collections::BinaryHeap;
- ///
- /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]);
- /// assert_eq!(heap.len(), 5);
- ///
- /// drop(heap.drain_sorted()); // removes all elements in heap order
- /// assert_eq!(heap.len(), 0);
- /// ```
- #[inline]
- #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
- pub fn drain_sorted(&mut self) -> DrainSorted<'_, T> {
- DrainSorted { inner: self }
- }
-
- /// Retains only the elements specified by the predicate.
- ///
- /// In other words, remove all elements `e` for which `f(&e)` returns
- /// `false`. The elements are visited in unsorted (and unspecified) order.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// #![feature(binary_heap_retain)]
- /// use std::collections::BinaryHeap;
- ///
- /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
- ///
- /// heap.retain(|x| x % 2 == 0); // only keep even numbers
- ///
- /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
- /// ```
- #[unstable(feature = "binary_heap_retain", issue = "71503")]
- pub fn retain<F>(&mut self, mut f: F)
- where
- F: FnMut(&T) -> bool,
- {
- let mut first_removed = self.len();
- let mut i = 0;
- self.data.retain(|e| {
- let keep = f(e);
- if !keep && i < first_removed {
- first_removed = i;
- }
- i += 1;
- keep
- });
- // data[0..first_removed] is untouched, so we only need to rebuild the tail:
- self.rebuild_tail(first_removed);
- }
-}
-
-impl<T> BinaryHeap<T> {
- /// Returns an iterator visiting all values in the underlying vector, in
- /// arbitrary order.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let heap = BinaryHeap::from([1, 2, 3, 4]);
- ///
- /// // Print 1, 2, 3, 4 in arbitrary order
- /// for x in heap.iter() {
- /// println!("{x}");
- /// }
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn iter(&self) -> Iter<'_, T> {
- Iter { iter: self.data.iter() }
- }
-
- /// Returns an iterator which retrieves elements in heap order.
- /// This method consumes the original heap.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// #![feature(binary_heap_into_iter_sorted)]
- /// use std::collections::BinaryHeap;
- /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]);
- ///
- /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]);
- /// ```
- #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
- pub fn into_iter_sorted(self) -> IntoIterSorted<T> {
- IntoIterSorted { inner: self }
- }
-
- /// Returns the greatest item in the binary heap, or `None` if it is empty.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// assert_eq!(heap.peek(), None);
- ///
- /// heap.push(1);
- /// heap.push(5);
- /// heap.push(2);
- /// assert_eq!(heap.peek(), Some(&5));
- ///
- /// ```
- ///
- /// # Time complexity
- ///
- /// Cost is *O*(1) in the worst case.
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn peek(&self) -> Option<&T> {
- self.data.get(0)
- }
-
- /// Returns the number of elements the binary heap can hold without reallocating.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::with_capacity(100);
- /// assert!(heap.capacity() >= 100);
- /// heap.push(4);
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn capacity(&self) -> usize {
- self.data.capacity()
- }
-
- /// Reserves the minimum capacity for at least `additional` elements more than
- /// the current length. Unlike [`reserve`], this will not
- /// deliberately over-allocate to speculatively avoid frequent allocations.
- /// After calling `reserve_exact`, capacity will be greater than or equal to
- /// `self.len() + additional`. Does nothing if the capacity is already
- /// sufficient.
- ///
- /// [`reserve`]: BinaryHeap::reserve
- ///
- /// # Panics
- ///
- /// Panics if the new capacity overflows [`usize`].
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// heap.reserve_exact(100);
- /// assert!(heap.capacity() >= 100);
- /// heap.push(4);
- /// ```
- ///
- /// [`reserve`]: BinaryHeap::reserve
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn reserve_exact(&mut self, additional: usize) {
- self.data.reserve_exact(additional);
- }
-
- /// Reserves capacity for at least `additional` elements more than the
- /// current length. The allocator may reserve more space to speculatively
- /// avoid frequent allocations. After calling `reserve`,
- /// capacity will be greater than or equal to `self.len() + additional`.
- /// Does nothing if capacity is already sufficient.
- ///
- /// # Panics
- ///
- /// Panics if the new capacity overflows [`usize`].
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- /// heap.reserve(100);
- /// assert!(heap.capacity() >= 100);
- /// heap.push(4);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn reserve(&mut self, additional: usize) {
- self.data.reserve(additional);
- }
-
- /// Tries to reserve the minimum capacity for at least `additional` elements
- /// more than the current length. Unlike [`try_reserve`], this will not
- /// deliberately over-allocate to speculatively avoid frequent allocations.
- /// After calling `try_reserve_exact`, capacity will be greater than or
- /// equal to `self.len() + additional` if it returns `Ok(())`.
- /// Does nothing if the capacity is already sufficient.
- ///
- /// Note that the allocator may give the collection more space than it
- /// requests. Therefore, capacity can not be relied upon to be precisely
- /// minimal. Prefer [`try_reserve`] if future insertions are expected.
- ///
- /// [`try_reserve`]: BinaryHeap::try_reserve
- ///
- /// # Errors
- ///
- /// If the capacity overflows, or the allocator reports a failure, then an error
- /// is returned.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// use std::collections::TryReserveError;
- ///
- /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
- /// let mut heap = BinaryHeap::new();
- ///
- /// // Pre-reserve the memory, exiting if we can't
- /// heap.try_reserve_exact(data.len())?;
- ///
- /// // Now we know this can't OOM in the middle of our complex work
- /// heap.extend(data.iter());
- ///
- /// Ok(heap.pop())
- /// }
- /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
- /// ```
- #[stable(feature = "try_reserve_2", since = "1.63.0")]
- pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
- self.data.try_reserve_exact(additional)
- }
-
- /// Tries to reserve capacity for at least `additional` elements more than the
- /// current length. The allocator may reserve more space to speculatively
- /// avoid frequent allocations. After calling `try_reserve`, capacity will be
- /// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient. This method
- /// preserves the contents even if an error occurs.
- ///
- /// # Errors
- ///
- /// If the capacity overflows, or the allocator reports a failure, then an error
- /// is returned.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// use std::collections::TryReserveError;
- ///
- /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
- /// let mut heap = BinaryHeap::new();
- ///
- /// // Pre-reserve the memory, exiting if we can't
- /// heap.try_reserve(data.len())?;
- ///
- /// // Now we know this can't OOM in the middle of our complex work
- /// heap.extend(data.iter());
- ///
- /// Ok(heap.pop())
- /// }
- /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
- /// ```
- #[stable(feature = "try_reserve_2", since = "1.63.0")]
- pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
- self.data.try_reserve(additional)
- }
-
- /// Discards as much additional capacity as possible.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
- ///
- /// assert!(heap.capacity() >= 100);
- /// heap.shrink_to_fit();
- /// assert!(heap.capacity() == 0);
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn shrink_to_fit(&mut self) {
- self.data.shrink_to_fit();
- }
-
- /// Discards capacity with a lower bound.
- ///
- /// The capacity will remain at least as large as both the length
- /// and the supplied value.
- ///
- /// If the current capacity is less than the lower limit, this is a no-op.
- ///
- /// # Examples
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
- ///
- /// assert!(heap.capacity() >= 100);
- /// heap.shrink_to(10);
- /// assert!(heap.capacity() >= 10);
- /// ```
- #[inline]
- #[stable(feature = "shrink_to", since = "1.56.0")]
- pub fn shrink_to(&mut self, min_capacity: usize) {
- self.data.shrink_to(min_capacity)
- }
-
- /// Returns a slice of all values in the underlying vector, in arbitrary
- /// order.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// #![feature(binary_heap_as_slice)]
- /// use std::collections::BinaryHeap;
- /// use std::io::{self, Write};
- ///
- /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
- ///
- /// io::sink().write(heap.as_slice()).unwrap();
- /// ```
- #[must_use]
- #[unstable(feature = "binary_heap_as_slice", issue = "83659")]
- pub fn as_slice(&self) -> &[T] {
- self.data.as_slice()
- }
-
- /// Consumes the `BinaryHeap` and returns the underlying vector
- /// in arbitrary order.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
- /// let vec = heap.into_vec();
- ///
- /// // Will print in some order
- /// for x in vec {
- /// println!("{x}");
- /// }
- /// ```
- #[must_use = "`self` will be dropped if the result is not used"]
- #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
- pub fn into_vec(self) -> Vec<T> {
- self.into()
- }
-
- /// Returns the length of the binary heap.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let heap = BinaryHeap::from([1, 3]);
- ///
- /// assert_eq!(heap.len(), 2);
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn len(&self) -> usize {
- self.data.len()
- }
-
- /// Checks if the binary heap is empty.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::new();
- ///
- /// assert!(heap.is_empty());
- ///
- /// heap.push(3);
- /// heap.push(5);
- /// heap.push(1);
- ///
- /// assert!(!heap.is_empty());
- /// ```
- #[must_use]
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn is_empty(&self) -> bool {
- self.len() == 0
- }
-
- /// Clears the binary heap, returning an iterator over the removed elements
- /// in arbitrary order. If the iterator is dropped before being fully
- /// consumed, it drops the remaining elements in arbitrary order.
- ///
- /// The returned iterator keeps a mutable borrow on the heap to optimize
- /// its implementation.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::from([1, 3]);
- ///
- /// assert!(!heap.is_empty());
- ///
- /// for x in heap.drain() {
- /// println!("{x}");
- /// }
- ///
- /// assert!(heap.is_empty());
- /// ```
- #[inline]
- #[stable(feature = "drain", since = "1.6.0")]
- pub fn drain(&mut self) -> Drain<'_, T> {
- Drain { iter: self.data.drain(..) }
- }
-
- /// Drops all items from the binary heap.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let mut heap = BinaryHeap::from([1, 3]);
- ///
- /// assert!(!heap.is_empty());
- ///
- /// heap.clear();
- ///
- /// assert!(heap.is_empty());
- /// ```
- #[stable(feature = "rust1", since = "1.0.0")]
- pub fn clear(&mut self) {
- self.drain();
- }
-}
-
-/// Hole represents a hole in a slice i.e., an index without valid value
-/// (because it was moved from or duplicated).
-/// In drop, `Hole` will restore the slice by filling the hole
-/// position with the value that was originally removed.
-struct Hole<'a, T: 'a> {
- data: &'a mut [T],
- elt: ManuallyDrop<T>,
- pos: usize,
-}
-
-impl<'a, T> Hole<'a, T> {
- /// Create a new `Hole` at index `pos`.
- ///
- /// Unsafe because pos must be within the data slice.
- #[inline]
- unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
- debug_assert!(pos < data.len());
- // SAFE: pos should be inside the slice
- let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
- Hole { data, elt: ManuallyDrop::new(elt), pos }
- }
-
- #[inline]
- fn pos(&self) -> usize {
- self.pos
- }
-
- /// Returns a reference to the element removed.
- #[inline]
- fn element(&self) -> &T {
- &self.elt
- }
-
- /// Returns a reference to the element at `index`.
- ///
- /// Unsafe because index must be within the data slice and not equal to pos.
- #[inline]
- unsafe fn get(&self, index: usize) -> &T {
- debug_assert!(index != self.pos);
- debug_assert!(index < self.data.len());
- unsafe { self.data.get_unchecked(index) }
- }
-
- /// Move hole to new location
- ///
- /// Unsafe because index must be within the data slice and not equal to pos.
- #[inline]
- unsafe fn move_to(&mut self, index: usize) {
- debug_assert!(index != self.pos);
- debug_assert!(index < self.data.len());
- unsafe {
- let ptr = self.data.as_mut_ptr();
- let index_ptr: *const _ = ptr.add(index);
- let hole_ptr = ptr.add(self.pos);
- ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
- }
- self.pos = index;
- }
-}
-
-impl<T> Drop for Hole<'_, T> {
- #[inline]
- fn drop(&mut self) {
- // fill the hole again
- unsafe {
- let pos = self.pos;
- ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
- }
- }
-}
-
-/// An iterator over the elements of a `BinaryHeap`.
-///
-/// This `struct` is created by [`BinaryHeap::iter()`]. See its
-/// documentation for more.
-///
-/// [`iter`]: BinaryHeap::iter
-#[must_use = "iterators are lazy and do nothing unless consumed"]
-#[stable(feature = "rust1", since = "1.0.0")]
-pub struct Iter<'a, T: 'a> {
- iter: slice::Iter<'a, T>,
-}
-
-#[stable(feature = "collection_debug", since = "1.17.0")]
-impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
- }
-}
-
-// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Clone for Iter<'_, T> {
- fn clone(&self) -> Self {
- Iter { iter: self.iter.clone() }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> Iterator for Iter<'a, T> {
- type Item = &'a T;
-
- #[inline]
- fn next(&mut self) -> Option<&'a T> {
- self.iter.next()
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- self.iter.size_hint()
- }
-
- #[inline]
- fn last(self) -> Option<&'a T> {
- self.iter.last()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
- #[inline]
- fn next_back(&mut self) -> Option<&'a T> {
- self.iter.next_back()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> ExactSizeIterator for Iter<'_, T> {
- fn is_empty(&self) -> bool {
- self.iter.is_empty()
- }
-}
-
-#[stable(feature = "fused", since = "1.26.0")]
-impl<T> FusedIterator for Iter<'_, T> {}
-
-/// An owning iterator over the elements of a `BinaryHeap`.
-///
-/// This `struct` is created by [`BinaryHeap::into_iter()`]
-/// (provided by the [`IntoIterator`] trait). See its documentation for more.
-///
-/// [`into_iter`]: BinaryHeap::into_iter
-/// [`IntoIterator`]: core::iter::IntoIterator
-#[stable(feature = "rust1", since = "1.0.0")]
-#[derive(Clone)]
-pub struct IntoIter<T> {
- iter: vec::IntoIter<T>,
-}
-
-#[stable(feature = "collection_debug", since = "1.17.0")]
-impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
- fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
- f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> Iterator for IntoIter<T> {
- type Item = T;
-
- #[inline]
- fn next(&mut self) -> Option<T> {
- self.iter.next()
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- self.iter.size_hint()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> DoubleEndedIterator for IntoIter<T> {
- #[inline]
- fn next_back(&mut self) -> Option<T> {
- self.iter.next_back()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> ExactSizeIterator for IntoIter<T> {
- fn is_empty(&self) -> bool {
- self.iter.is_empty()
- }
-}
-
-#[stable(feature = "fused", since = "1.26.0")]
-impl<T> FusedIterator for IntoIter<T> {}
-
-// In addition to the SAFETY invariants of the following three unsafe traits
-// also refer to the vec::in_place_collect module documentation to get an overview
-#[unstable(issue = "none", feature = "inplace_iteration")]
-#[doc(hidden)]
-unsafe impl<T> SourceIter for IntoIter<T> {
- type Source = IntoIter<T>;
-
- #[inline]
- unsafe fn as_inner(&mut self) -> &mut Self::Source {
- self
- }
-}
-
-#[unstable(issue = "none", feature = "inplace_iteration")]
-#[doc(hidden)]
-unsafe impl<I> InPlaceIterable for IntoIter<I> {}
-
-unsafe impl<I> AsVecIntoIter for IntoIter<I> {
- type Item = I;
-
- fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
- &mut self.iter
- }
-}
-
-#[must_use = "iterators are lazy and do nothing unless consumed"]
-#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
-#[derive(Clone, Debug)]
-pub struct IntoIterSorted<T> {
- inner: BinaryHeap<T>,
-}
-
-#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
-impl<T: Ord> Iterator for IntoIterSorted<T> {
- type Item = T;
-
- #[inline]
- fn next(&mut self) -> Option<T> {
- self.inner.pop()
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- let exact = self.inner.len();
- (exact, Some(exact))
- }
-}
-
-#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
-impl<T: Ord> ExactSizeIterator for IntoIterSorted<T> {}
-
-#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
-impl<T: Ord> FusedIterator for IntoIterSorted<T> {}
-
-#[unstable(feature = "trusted_len", issue = "37572")]
-unsafe impl<T: Ord> TrustedLen for IntoIterSorted<T> {}
-
-/// A draining iterator over the elements of a `BinaryHeap`.
-///
-/// This `struct` is created by [`BinaryHeap::drain()`]. See its
-/// documentation for more.
-///
-/// [`drain`]: BinaryHeap::drain
-#[stable(feature = "drain", since = "1.6.0")]
-#[derive(Debug)]
-pub struct Drain<'a, T: 'a> {
- iter: vec::Drain<'a, T>,
-}
-
-#[stable(feature = "drain", since = "1.6.0")]
-impl<T> Iterator for Drain<'_, T> {
- type Item = T;
-
- #[inline]
- fn next(&mut self) -> Option<T> {
- self.iter.next()
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- self.iter.size_hint()
- }
-}
-
-#[stable(feature = "drain", since = "1.6.0")]
-impl<T> DoubleEndedIterator for Drain<'_, T> {
- #[inline]
- fn next_back(&mut self) -> Option<T> {
- self.iter.next_back()
- }
-}
-
-#[stable(feature = "drain", since = "1.6.0")]
-impl<T> ExactSizeIterator for Drain<'_, T> {
- fn is_empty(&self) -> bool {
- self.iter.is_empty()
- }
-}
-
-#[stable(feature = "fused", since = "1.26.0")]
-impl<T> FusedIterator for Drain<'_, T> {}
-
-/// A draining iterator over the elements of a `BinaryHeap`.
-///
-/// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
-/// documentation for more.
-///
-/// [`drain_sorted`]: BinaryHeap::drain_sorted
-#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
-#[derive(Debug)]
-pub struct DrainSorted<'a, T: Ord> {
- inner: &'a mut BinaryHeap<T>,
-}
-
-#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
-impl<'a, T: Ord> Drop for DrainSorted<'a, T> {
- /// Removes heap elements in heap order.
- fn drop(&mut self) {
- struct DropGuard<'r, 'a, T: Ord>(&'r mut DrainSorted<'a, T>);
-
- impl<'r, 'a, T: Ord> Drop for DropGuard<'r, 'a, T> {
- fn drop(&mut self) {
- while self.0.inner.pop().is_some() {}
- }
- }
-
- while let Some(item) = self.inner.pop() {
- let guard = DropGuard(self);
- drop(item);
- mem::forget(guard);
- }
- }
-}
-
-#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
-impl<T: Ord> Iterator for DrainSorted<'_, T> {
- type Item = T;
-
- #[inline]
- fn next(&mut self) -> Option<T> {
- self.inner.pop()
- }
-
- #[inline]
- fn size_hint(&self) -> (usize, Option<usize>) {
- let exact = self.inner.len();
- (exact, Some(exact))
- }
-}
-
-#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
-impl<T: Ord> ExactSizeIterator for DrainSorted<'_, T> {}
-
-#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
-impl<T: Ord> FusedIterator for DrainSorted<'_, T> {}
-
-#[unstable(feature = "trusted_len", issue = "37572")]
-unsafe impl<T: Ord> TrustedLen for DrainSorted<'_, T> {}
-
-#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
-impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
- /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
- ///
- /// This conversion happens in-place, and has *O*(*n*) time complexity.
- fn from(vec: Vec<T>) -> BinaryHeap<T> {
- let mut heap = BinaryHeap { data: vec };
- heap.rebuild();
- heap
- }
-}
-
-#[stable(feature = "std_collections_from_array", since = "1.56.0")]
-impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
- /// ```
- /// use std::collections::BinaryHeap;
- ///
- /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
- /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
- /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
- /// assert_eq!(a, b);
- /// }
- /// ```
- fn from(arr: [T; N]) -> Self {
- Self::from_iter(arr)
- }
-}
-
-#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
-impl<T> From<BinaryHeap<T>> for Vec<T> {
- /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
- ///
- /// This conversion requires no data movement or allocation, and has
- /// constant time complexity.
- fn from(heap: BinaryHeap<T>) -> Vec<T> {
- heap.data
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
- fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
- BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T> IntoIterator for BinaryHeap<T> {
- type Item = T;
- type IntoIter = IntoIter<T>;
-
- /// Creates a consuming iterator, that is, one that moves each value out of
- /// the binary heap in arbitrary order. The binary heap cannot be used
- /// after calling this.
- ///
- /// # Examples
- ///
- /// Basic usage:
- ///
- /// ```
- /// use std::collections::BinaryHeap;
- /// let heap = BinaryHeap::from([1, 2, 3, 4]);
- ///
- /// // Print 1, 2, 3, 4 in arbitrary order
- /// for x in heap.into_iter() {
- /// // x has type i32, not &i32
- /// println!("{x}");
- /// }
- /// ```
- fn into_iter(self) -> IntoIter<T> {
- IntoIter { iter: self.data.into_iter() }
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<'a, T> IntoIterator for &'a BinaryHeap<T> {
- type Item = &'a T;
- type IntoIter = Iter<'a, T>;
-
- fn into_iter(self) -> Iter<'a, T> {
- self.iter()
- }
-}
-
-#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: Ord> Extend<T> for BinaryHeap<T> {
- #[inline]
- fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
- <Self as SpecExtend<I>>::spec_extend(self, iter);
- }
-
- #[inline]
- fn extend_one(&mut self, item: T) {
- self.push(item);
- }
-
- #[inline]
- fn extend_reserve(&mut self, additional: usize) {
- self.reserve(additional);
- }
-}
-
-impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
- default fn spec_extend(&mut self, iter: I) {
- self.extend_desugared(iter.into_iter());
- }
-}
-
-impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
- fn spec_extend(&mut self, ref mut other: Vec<T>) {
- let start = self.data.len();
- self.data.append(other);
- self.rebuild_tail(start);
- }
-}
-
-impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
- fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
- self.append(other);
- }
-}
-
-impl<T: Ord> BinaryHeap<T> {
- fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
- let iterator = iter.into_iter();
- let (lower, _) = iterator.size_hint();
-
- self.reserve(lower);
-
- iterator.for_each(move |elem| self.push(elem));
- }
-}
-
-#[stable(feature = "extend_ref", since = "1.2.0")]
-impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
- fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
- self.extend(iter.into_iter().cloned());
- }
-
- #[inline]
- fn extend_one(&mut self, &item: &'a T) {
- self.push(item);
- }
-
- #[inline]
- fn extend_reserve(&mut self, additional: usize) {
- self.reserve(additional);
- }
-}
diff --git a/library/alloc/src/collections/binary_heap/mod.rs b/library/alloc/src/collections/binary_heap/mod.rs
new file mode 100644
index 000000000..0b73b1af4
--- /dev/null
+++ b/library/alloc/src/collections/binary_heap/mod.rs
@@ -0,0 +1,1766 @@
+//! A priority queue implemented with a binary heap.
+//!
+//! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
+//! Checking the largest element is *O*(1). Converting a vector to a binary heap
+//! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
+//! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
+//! in-place heapsort.
+//!
+//! # Examples
+//!
+//! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
+//! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
+//! It shows how to use [`BinaryHeap`] with custom types.
+//!
+//! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
+//! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
+//! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
+//!
+//! ```
+//! use std::cmp::Ordering;
+//! use std::collections::BinaryHeap;
+//!
+//! #[derive(Copy, Clone, Eq, PartialEq)]
+//! struct State {
+//! cost: usize,
+//! position: usize,
+//! }
+//!
+//! // The priority queue depends on `Ord`.
+//! // Explicitly implement the trait so the queue becomes a min-heap
+//! // instead of a max-heap.
+//! impl Ord for State {
+//! fn cmp(&self, other: &Self) -> Ordering {
+//! // Notice that the we flip the ordering on costs.
+//! // In case of a tie we compare positions - this step is necessary
+//! // to make implementations of `PartialEq` and `Ord` consistent.
+//! other.cost.cmp(&self.cost)
+//! .then_with(|| self.position.cmp(&other.position))
+//! }
+//! }
+//!
+//! // `PartialOrd` needs to be implemented as well.
+//! impl PartialOrd for State {
+//! fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+//! Some(self.cmp(other))
+//! }
+//! }
+//!
+//! // Each node is represented as a `usize`, for a shorter implementation.
+//! struct Edge {
+//! node: usize,
+//! cost: usize,
+//! }
+//!
+//! // Dijkstra's shortest path algorithm.
+//!
+//! // Start at `start` and use `dist` to track the current shortest distance
+//! // to each node. This implementation isn't memory-efficient as it may leave duplicate
+//! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
+//! // for a simpler implementation.
+//! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
+//! // dist[node] = current shortest distance from `start` to `node`
+//! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
+//!
+//! let mut heap = BinaryHeap::new();
+//!
+//! // We're at `start`, with a zero cost
+//! dist[start] = 0;
+//! heap.push(State { cost: 0, position: start });
+//!
+//! // Examine the frontier with lower cost nodes first (min-heap)
+//! while let Some(State { cost, position }) = heap.pop() {
+//! // Alternatively we could have continued to find all shortest paths
+//! if position == goal { return Some(cost); }
+//!
+//! // Important as we may have already found a better way
+//! if cost > dist[position] { continue; }
+//!
+//! // For each node we can reach, see if we can find a way with
+//! // a lower cost going through this node
+//! for edge in &adj_list[position] {
+//! let next = State { cost: cost + edge.cost, position: edge.node };
+//!
+//! // If so, add it to the frontier and continue
+//! if next.cost < dist[next.position] {
+//! heap.push(next);
+//! // Relaxation, we have now found a better way
+//! dist[next.position] = next.cost;
+//! }
+//! }
+//! }
+//!
+//! // Goal not reachable
+//! None
+//! }
+//!
+//! fn main() {
+//! // This is the directed graph we're going to use.
+//! // The node numbers correspond to the different states,
+//! // and the edge weights symbolize the cost of moving
+//! // from one node to another.
+//! // Note that the edges are one-way.
+//! //
+//! // 7
+//! // +-----------------+
+//! // | |
+//! // v 1 2 | 2
+//! // 0 -----> 1 -----> 3 ---> 4
+//! // | ^ ^ ^
+//! // | | 1 | |
+//! // | | | 3 | 1
+//! // +------> 2 -------+ |
+//! // 10 | |
+//! // +---------------+
+//! //
+//! // The graph is represented as an adjacency list where each index,
+//! // corresponding to a node value, has a list of outgoing edges.
+//! // Chosen for its efficiency.
+//! let graph = vec![
+//! // Node 0
+//! vec![Edge { node: 2, cost: 10 },
+//! Edge { node: 1, cost: 1 }],
+//! // Node 1
+//! vec![Edge { node: 3, cost: 2 }],
+//! // Node 2
+//! vec![Edge { node: 1, cost: 1 },
+//! Edge { node: 3, cost: 3 },
+//! Edge { node: 4, cost: 1 }],
+//! // Node 3
+//! vec![Edge { node: 0, cost: 7 },
+//! Edge { node: 4, cost: 2 }],
+//! // Node 4
+//! vec![]];
+//!
+//! assert_eq!(shortest_path(&graph, 0, 1), Some(1));
+//! assert_eq!(shortest_path(&graph, 0, 3), Some(3));
+//! assert_eq!(shortest_path(&graph, 3, 0), Some(7));
+//! assert_eq!(shortest_path(&graph, 0, 4), Some(5));
+//! assert_eq!(shortest_path(&graph, 4, 0), None);
+//! }
+//! ```
+
+#![allow(missing_docs)]
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::fmt;
+use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
+use core::mem::{self, swap, ManuallyDrop};
+use core::num::NonZeroUsize;
+use core::ops::{Deref, DerefMut};
+use core::ptr;
+
+use crate::collections::TryReserveError;
+use crate::slice;
+use crate::vec::{self, AsVecIntoIter, Vec};
+
+use super::SpecExtend;
+
+#[cfg(test)]
+mod tests;
+
+/// A priority queue implemented with a binary heap.
+///
+/// This will be a max-heap.
+///
+/// It is a logic error for an item to be modified in such a way that the
+/// item's ordering relative to any other item, as determined by the [`Ord`]
+/// trait, changes while it is in the heap. This is normally only possible
+/// through interior mutability, global state, I/O, or unsafe code. The
+/// behavior resulting from such a logic error is not specified, but will
+/// be encapsulated to the `BinaryHeap` that observed the logic error and not
+/// result in undefined behavior. This could include panics, incorrect results,
+/// aborts, memory leaks, and non-termination.
+///
+/// As long as no elements change their relative order while being in the heap
+/// as described above, the API of `BinaryHeap` guarantees that the heap
+/// invariant remains intact i.e. its methods all behave as documented. For
+/// example if a method is documented as iterating in sorted order, that's
+/// guaranteed to work as long as elements in the heap have not changed order,
+/// even in the presence of closures getting unwinded out of, iterators getting
+/// leaked, and similar foolishness.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::BinaryHeap;
+///
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `BinaryHeap<i32>` in this example).
+/// let mut heap = BinaryHeap::new();
+///
+/// // We can use peek to look at the next item in the heap. In this case,
+/// // there's no items in there yet so we get None.
+/// assert_eq!(heap.peek(), None);
+///
+/// // Let's add some scores...
+/// heap.push(1);
+/// heap.push(5);
+/// heap.push(2);
+///
+/// // Now peek shows the most important item in the heap.
+/// assert_eq!(heap.peek(), Some(&5));
+///
+/// // We can check the length of a heap.
+/// assert_eq!(heap.len(), 3);
+///
+/// // We can iterate over the items in the heap, although they are returned in
+/// // a random order.
+/// for x in &heap {
+/// println!("{x}");
+/// }
+///
+/// // If we instead pop these scores, they should come back in order.
+/// assert_eq!(heap.pop(), Some(5));
+/// assert_eq!(heap.pop(), Some(2));
+/// assert_eq!(heap.pop(), Some(1));
+/// assert_eq!(heap.pop(), None);
+///
+/// // We can clear the heap of any remaining items.
+/// heap.clear();
+///
+/// // The heap should now be empty.
+/// assert!(heap.is_empty())
+/// ```
+///
+/// A `BinaryHeap` with a known list of items can be initialized from an array:
+///
+/// ```
+/// use std::collections::BinaryHeap;
+///
+/// let heap = BinaryHeap::from([1, 5, 2]);
+/// ```
+///
+/// ## Min-heap
+///
+/// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
+/// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
+/// value instead of the greatest one.
+///
+/// ```
+/// use std::collections::BinaryHeap;
+/// use std::cmp::Reverse;
+///
+/// let mut heap = BinaryHeap::new();
+///
+/// // Wrap values in `Reverse`
+/// heap.push(Reverse(1));
+/// heap.push(Reverse(5));
+/// heap.push(Reverse(2));
+///
+/// // If we pop these scores now, they should come back in the reverse order.
+/// assert_eq!(heap.pop(), Some(Reverse(1)));
+/// assert_eq!(heap.pop(), Some(Reverse(2)));
+/// assert_eq!(heap.pop(), Some(Reverse(5)));
+/// assert_eq!(heap.pop(), None);
+/// ```
+///
+/// # Time complexity
+///
+/// | [push] | [pop] | [peek]/[peek\_mut] |
+/// |---------|---------------|--------------------|
+/// | *O*(1)~ | *O*(log(*n*)) | *O*(1) |
+///
+/// The value for `push` is an expected cost; the method documentation gives a
+/// more detailed analysis.
+///
+/// [`core::cmp::Reverse`]: core::cmp::Reverse
+/// [`Ord`]: core::cmp::Ord
+/// [`Cell`]: core::cell::Cell
+/// [`RefCell`]: core::cell::RefCell
+/// [push]: BinaryHeap::push
+/// [pop]: BinaryHeap::pop
+/// [peek]: BinaryHeap::peek
+/// [peek\_mut]: BinaryHeap::peek_mut
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
+pub struct BinaryHeap<T> {
+ data: Vec<T>,
+}
+
+/// Structure wrapping a mutable reference to the greatest item on a
+/// `BinaryHeap`.
+///
+/// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
+/// its documentation for more.
+///
+/// [`peek_mut`]: BinaryHeap::peek_mut
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+pub struct PeekMut<'a, T: 'a + Ord> {
+ heap: &'a mut BinaryHeap<T>,
+ // If a set_len + sift_down are required, this is Some. If a &mut T has not
+ // yet been exposed to peek_mut()'s caller, it's None.
+ original_len: Option<NonZeroUsize>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: Ord + fmt::Debug> fmt::Debug for PeekMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
+ }
+}
+
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+impl<T: Ord> Drop for PeekMut<'_, T> {
+ fn drop(&mut self) {
+ if let Some(original_len) = self.original_len {
+ // SAFETY: That's how many elements were in the Vec at the time of
+ // the PeekMut::deref_mut call, and therefore also at the time of
+ // the BinaryHeap::peek_mut call. Since the PeekMut did not end up
+ // getting leaked, we are now undoing the leak amplification that
+ // the DerefMut prepared for.
+ unsafe { self.heap.data.set_len(original_len.get()) };
+
+ // SAFETY: PeekMut is only instantiated for non-empty heaps.
+ unsafe { self.heap.sift_down(0) };
+ }
+ }
+}
+
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+impl<T: Ord> Deref for PeekMut<'_, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ debug_assert!(!self.heap.is_empty());
+ // SAFE: PeekMut is only instantiated for non-empty heaps
+ unsafe { self.heap.data.get_unchecked(0) }
+ }
+}
+
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+impl<T: Ord> DerefMut for PeekMut<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ debug_assert!(!self.heap.is_empty());
+
+ let len = self.heap.len();
+ if len > 1 {
+ // Here we preemptively leak all the rest of the underlying vector
+ // after the currently max element. If the caller mutates the &mut T
+ // we're about to give them, and then leaks the PeekMut, all these
+ // elements will remain leaked. If they don't leak the PeekMut, then
+ // either Drop or PeekMut::pop will un-leak the vector elements.
+ //
+ // This is technique is described throughout several other places in
+ // the standard library as "leak amplification".
+ unsafe {
+ // SAFETY: len > 1 so len != 0.
+ self.original_len = Some(NonZeroUsize::new_unchecked(len));
+ // SAFETY: len > 1 so all this does for now is leak elements,
+ // which is safe.
+ self.heap.data.set_len(1);
+ }
+ }
+
+ // SAFE: PeekMut is only instantiated for non-empty heaps
+ unsafe { self.heap.data.get_unchecked_mut(0) }
+ }
+}
+
+impl<'a, T: Ord> PeekMut<'a, T> {
+ /// Removes the peeked value from the heap and returns it.
+ #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
+ pub fn pop(mut this: PeekMut<'a, T>) -> T {
+ if let Some(original_len) = this.original_len.take() {
+ // SAFETY: This is how many elements were in the Vec at the time of
+ // the BinaryHeap::peek_mut call.
+ unsafe { this.heap.data.set_len(original_len.get()) };
+
+ // Unlike in Drop, here we don't also need to do a sift_down even if
+ // the caller could've mutated the element. It is removed from the
+ // heap on the next line and pop() is not sensitive to its value.
+ }
+ this.heap.pop().unwrap()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> Clone for BinaryHeap<T> {
+ fn clone(&self) -> Self {
+ BinaryHeap { data: self.data.clone() }
+ }
+
+ fn clone_from(&mut self, source: &Self) {
+ self.data.clone_from(&source.data);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Default for BinaryHeap<T> {
+ /// Creates an empty `BinaryHeap<T>`.
+ #[inline]
+ fn default() -> BinaryHeap<T> {
+ BinaryHeap::new()
+ }
+}
+
+#[stable(feature = "binaryheap_debug", since = "1.4.0")]
+impl<T: fmt::Debug> fmt::Debug for BinaryHeap<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+impl<T: Ord> BinaryHeap<T> {
+ /// Creates an empty `BinaryHeap` as a max-heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.push(4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn new() -> BinaryHeap<T> {
+ BinaryHeap { data: vec![] }
+ }
+
+ /// Creates an empty `BinaryHeap` with at least the specified capacity.
+ ///
+ /// The binary heap will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the binary heap will not allocate.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::with_capacity(10);
+ /// heap.push(4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
+ BinaryHeap { data: Vec::with_capacity(capacity) }
+ }
+
+ /// Returns a mutable reference to the greatest item in the binary heap, or
+ /// `None` if it is empty.
+ ///
+ /// Note: If the `PeekMut` value is leaked, some heap elements might get
+ /// leaked along with it, but the remaining elements will remain a valid
+ /// heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// assert!(heap.peek_mut().is_none());
+ ///
+ /// heap.push(1);
+ /// heap.push(5);
+ /// heap.push(2);
+ /// {
+ /// let mut val = heap.peek_mut().unwrap();
+ /// *val = 0;
+ /// }
+ /// assert_eq!(heap.peek(), Some(&2));
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
+ /// otherwise it's *O*(1).
+ #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+ pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
+ if self.is_empty() { None } else { Some(PeekMut { heap: self, original_len: None }) }
+ }
+
+ /// Removes the greatest item from the binary heap and returns it, or `None` if it
+ /// is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert_eq!(heap.pop(), Some(3));
+ /// assert_eq!(heap.pop(), Some(1));
+ /// assert_eq!(heap.pop(), None);
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop(&mut self) -> Option<T> {
+ self.data.pop().map(|mut item| {
+ if !self.is_empty() {
+ swap(&mut item, &mut self.data[0]);
+ // SAFETY: !self.is_empty() means that self.len() > 0
+ unsafe { self.sift_down_to_bottom(0) };
+ }
+ item
+ })
+ }
+
+ /// Pushes an item onto the binary heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.push(3);
+ /// heap.push(5);
+ /// heap.push(1);
+ ///
+ /// assert_eq!(heap.len(), 3);
+ /// assert_eq!(heap.peek(), Some(&5));
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// The expected cost of `push`, averaged over every possible ordering of
+ /// the elements being pushed, and over a sufficiently large number of
+ /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
+ /// elements that are *not* already in any sorted pattern.
+ ///
+ /// The time complexity degrades if elements are pushed in predominantly
+ /// ascending order. In the worst case, elements are pushed in ascending
+ /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
+ /// containing *n* elements.
+ ///
+ /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
+ /// occurs when capacity is exhausted and needs a resize. The resize cost
+ /// has been amortized in the previous figures.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push(&mut self, item: T) {
+ let old_len = self.len();
+ self.data.push(item);
+ // SAFETY: Since we pushed a new item it means that
+ // old_len = self.len() - 1 < self.len()
+ unsafe { self.sift_up(0, old_len) };
+ }
+
+ /// Consumes the `BinaryHeap` and returns a vector in sorted
+ /// (ascending) order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]);
+ /// heap.push(6);
+ /// heap.push(3);
+ ///
+ /// let vec = heap.into_sorted_vec();
+ /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+ pub fn into_sorted_vec(mut self) -> Vec<T> {
+ let mut end = self.len();
+ while end > 1 {
+ end -= 1;
+ // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
+ // so it's always a valid index to access.
+ // It is safe to access index 0 (i.e. `ptr`), because
+ // 1 <= end < self.len(), which means self.len() >= 2.
+ unsafe {
+ let ptr = self.data.as_mut_ptr();
+ ptr::swap(ptr, ptr.add(end));
+ }
+ // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
+ // 0 < 1 <= end <= self.len() - 1 < self.len()
+ // Which means 0 < end and end < self.len().
+ unsafe { self.sift_down_range(0, end) };
+ }
+ self.into_vec()
+ }
+
+ // The implementations of sift_up and sift_down use unsafe blocks in
+ // order to move an element out of the vector (leaving behind a
+ // hole), shift along the others and move the removed element back into the
+ // vector at the final location of the hole.
+ // The `Hole` type is used to represent this, and make sure
+ // the hole is filled back at the end of its scope, even on panic.
+ // Using a hole reduces the constant factor compared to using swaps,
+ // which involves twice as many moves.
+
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < self.len()`.
+ unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
+ // Take out the value at `pos` and create a hole.
+ // SAFETY: The caller guarantees that pos < self.len()
+ let mut hole = unsafe { Hole::new(&mut self.data, pos) };
+
+ while hole.pos() > start {
+ let parent = (hole.pos() - 1) / 2;
+
+ // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
+ // and so hole.pos() - 1 can't underflow.
+ // This guarantees that parent < hole.pos() so
+ // it's a valid index and also != hole.pos().
+ if hole.element() <= unsafe { hole.get(parent) } {
+ break;
+ }
+
+ // SAFETY: Same as above
+ unsafe { hole.move_to(parent) };
+ }
+
+ hole.pos()
+ }
+
+ /// Take an element at `pos` and move it down the heap,
+ /// while its children are larger.
+ ///
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < end <= self.len()`.
+ unsafe fn sift_down_range(&mut self, pos: usize, end: usize) {
+ // SAFETY: The caller guarantees that pos < end <= self.len().
+ let mut hole = unsafe { Hole::new(&mut self.data, pos) };
+ let mut child = 2 * hole.pos() + 1;
+
+ // Loop invariant: child == 2 * hole.pos() + 1.
+ while child <= end.saturating_sub(2) {
+ // compare with the greater of the two children
+ // SAFETY: child < end - 1 < self.len() and
+ // child + 1 < end <= self.len(), so they're valid indexes.
+ // child == 2 * hole.pos() + 1 != hole.pos() and
+ // child + 1 == 2 * hole.pos() + 2 != hole.pos().
+ // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
+ // if T is a ZST
+ child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
+
+ // if we are already in order, stop.
+ // SAFETY: child is now either the old child or the old child+1
+ // We already proven that both are < self.len() and != hole.pos()
+ if hole.element() >= unsafe { hole.get(child) } {
+ return;
+ }
+
+ // SAFETY: same as above.
+ unsafe { hole.move_to(child) };
+ child = 2 * hole.pos() + 1;
+ }
+
+ // SAFETY: && short circuit, which means that in the
+ // second condition it's already true that child == end - 1 < self.len().
+ if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
+ // SAFETY: child is already proven to be a valid index and
+ // child == 2 * hole.pos() + 1 != hole.pos().
+ unsafe { hole.move_to(child) };
+ }
+ }
+
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < self.len()`.
+ unsafe fn sift_down(&mut self, pos: usize) {
+ let len = self.len();
+ // SAFETY: pos < len is guaranteed by the caller and
+ // obviously len = self.len() <= self.len().
+ unsafe { self.sift_down_range(pos, len) };
+ }
+
+ /// Take an element at `pos` and move it all the way down the heap,
+ /// then sift it up to its position.
+ ///
+ /// Note: This is faster when the element is known to be large / should
+ /// be closer to the bottom.
+ ///
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < self.len()`.
+ unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
+ let end = self.len();
+ let start = pos;
+
+ // SAFETY: The caller guarantees that pos < self.len().
+ let mut hole = unsafe { Hole::new(&mut self.data, pos) };
+ let mut child = 2 * hole.pos() + 1;
+
+ // Loop invariant: child == 2 * hole.pos() + 1.
+ while child <= end.saturating_sub(2) {
+ // SAFETY: child < end - 1 < self.len() and
+ // child + 1 < end <= self.len(), so they're valid indexes.
+ // child == 2 * hole.pos() + 1 != hole.pos() and
+ // child + 1 == 2 * hole.pos() + 2 != hole.pos().
+ // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
+ // if T is a ZST
+ child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
+
+ // SAFETY: Same as above
+ unsafe { hole.move_to(child) };
+ child = 2 * hole.pos() + 1;
+ }
+
+ if child == end - 1 {
+ // SAFETY: child == end - 1 < self.len(), so it's a valid index
+ // and child == 2 * hole.pos() + 1 != hole.pos().
+ unsafe { hole.move_to(child) };
+ }
+ pos = hole.pos();
+ drop(hole);
+
+ // SAFETY: pos is the position in the hole and was already proven
+ // to be a valid index.
+ unsafe { self.sift_up(start, pos) };
+ }
+
+ /// Rebuild assuming data[0..start] is still a proper heap.
+ fn rebuild_tail(&mut self, start: usize) {
+ if start == self.len() {
+ return;
+ }
+
+ let tail_len = self.len() - start;
+
+ #[inline(always)]
+ fn log2_fast(x: usize) -> usize {
+ (usize::BITS - x.leading_zeros() - 1) as usize
+ }
+
+ // `rebuild` takes O(self.len()) operations
+ // and about 2 * self.len() comparisons in the worst case
+ // while repeating `sift_up` takes O(tail_len * log(start)) operations
+ // and about 1 * tail_len * log_2(start) comparisons in the worst case,
+ // assuming start >= tail_len. For larger heaps, the crossover point
+ // no longer follows this reasoning and was determined empirically.
+ let better_to_rebuild = if start < tail_len {
+ true
+ } else if self.len() <= 2048 {
+ 2 * self.len() < tail_len * log2_fast(start)
+ } else {
+ 2 * self.len() < tail_len * 11
+ };
+
+ if better_to_rebuild {
+ self.rebuild();
+ } else {
+ for i in start..self.len() {
+ // SAFETY: The index `i` is always less than self.len().
+ unsafe { self.sift_up(0, i) };
+ }
+ }
+ }
+
+ fn rebuild(&mut self) {
+ let mut n = self.len() / 2;
+ while n > 0 {
+ n -= 1;
+ // SAFETY: n starts from self.len() / 2 and goes down to 0.
+ // The only case when !(n < self.len()) is if
+ // self.len() == 0, but it's ruled out by the loop condition.
+ unsafe { self.sift_down(n) };
+ }
+ }
+
+ /// Moves all the elements of `other` into `self`, leaving `other` empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]);
+ /// let mut b = BinaryHeap::from([-20, 5, 43]);
+ ///
+ /// a.append(&mut b);
+ ///
+ /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
+ /// assert!(b.is_empty());
+ /// ```
+ #[stable(feature = "binary_heap_append", since = "1.11.0")]
+ pub fn append(&mut self, other: &mut Self) {
+ if self.len() < other.len() {
+ swap(self, other);
+ }
+
+ let start = self.data.len();
+
+ self.data.append(&mut other.data);
+
+ self.rebuild_tail(start);
+ }
+
+ /// Clears the binary heap, returning an iterator over the removed elements
+ /// in heap order. If the iterator is dropped before being fully consumed,
+ /// it drops the remaining elements in heap order.
+ ///
+ /// The returned iterator keeps a mutable borrow on the heap to optimize
+ /// its implementation.
+ ///
+ /// Note:
+ /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
+ /// You should use the latter for most cases.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_drain_sorted)]
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]);
+ /// assert_eq!(heap.len(), 5);
+ ///
+ /// drop(heap.drain_sorted()); // removes all elements in heap order
+ /// assert_eq!(heap.len(), 0);
+ /// ```
+ #[inline]
+ #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+ pub fn drain_sorted(&mut self) -> DrainSorted<'_, T> {
+ DrainSorted { inner: self }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns
+ /// `false`. The elements are visited in unsorted (and unspecified) order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_retain)]
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
+ ///
+ /// heap.retain(|x| x % 2 == 0); // only keep even numbers
+ ///
+ /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
+ /// ```
+ #[unstable(feature = "binary_heap_retain", issue = "71503")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ let mut first_removed = self.len();
+ let mut i = 0;
+ self.data.retain(|e| {
+ let keep = f(e);
+ if !keep && i < first_removed {
+ first_removed = i;
+ }
+ i += 1;
+ keep
+ });
+ // data[0..first_removed] is untouched, so we only need to rebuild the tail:
+ self.rebuild_tail(first_removed);
+ }
+}
+
+impl<T> BinaryHeap<T> {
+ /// Returns an iterator visiting all values in the underlying vector, in
+ /// arbitrary order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4]);
+ ///
+ /// // Print 1, 2, 3, 4 in arbitrary order
+ /// for x in heap.iter() {
+ /// println!("{x}");
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter { iter: self.data.iter() }
+ }
+
+ /// Returns an iterator which retrieves elements in heap order.
+ /// This method consumes the original heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_into_iter_sorted)]
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]);
+ ///
+ /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]);
+ /// ```
+ #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+ pub fn into_iter_sorted(self) -> IntoIterSorted<T> {
+ IntoIterSorted { inner: self }
+ }
+
+ /// Returns the greatest item in the binary heap, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// assert_eq!(heap.peek(), None);
+ ///
+ /// heap.push(1);
+ /// heap.push(5);
+ /// heap.push(2);
+ /// assert_eq!(heap.peek(), Some(&5));
+ ///
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// Cost is *O*(1) in the worst case.
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn peek(&self) -> Option<&T> {
+ self.data.get(0)
+ }
+
+ /// Returns the number of elements the binary heap can hold without reallocating.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::with_capacity(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.data.capacity()
+ }
+
+ /// Reserves the minimum capacity for at least `additional` elements more than
+ /// the current length. Unlike [`reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional`. Does nothing if the capacity is already
+ /// sufficient.
+ ///
+ /// [`reserve`]: BinaryHeap::reserve
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows [`usize`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.reserve_exact(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4);
+ /// ```
+ ///
+ /// [`reserve`]: BinaryHeap::reserve
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.data.reserve_exact(additional);
+ }
+
+ /// Reserves capacity for at least `additional` elements more than the
+ /// current length. The allocator may reserve more space to speculatively
+ /// avoid frequent allocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if capacity is already sufficient.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows [`usize`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.reserve(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ self.data.reserve(additional);
+ }
+
+ /// Tries to reserve the minimum capacity for at least `additional` elements
+ /// more than the current length. Unlike [`try_reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `try_reserve_exact`, capacity will be greater than or
+ /// equal to `self.len() + additional` if it returns `Ok(())`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: BinaryHeap::try_reserve
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
+ /// let mut heap = BinaryHeap::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// heap.try_reserve_exact(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// heap.extend(data.iter());
+ ///
+ /// Ok(heap.pop())
+ /// }
+ /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve_2", since = "1.63.0")]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.data.try_reserve_exact(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` elements more than the
+ /// current length. The allocator may reserve more space to speculatively
+ /// avoid frequent allocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional` if it returns
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
+ /// let mut heap = BinaryHeap::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// heap.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// heap.extend(data.iter());
+ ///
+ /// Ok(heap.pop())
+ /// }
+ /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve_2", since = "1.63.0")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.data.try_reserve(additional)
+ }
+
+ /// Discards as much additional capacity as possible.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
+ ///
+ /// assert!(heap.capacity() >= 100);
+ /// heap.shrink_to_fit();
+ /// assert!(heap.capacity() == 0);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shrink_to_fit(&mut self) {
+ self.data.shrink_to_fit();
+ }
+
+ /// Discards capacity with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
+ ///
+ /// assert!(heap.capacity() >= 100);
+ /// heap.shrink_to(10);
+ /// assert!(heap.capacity() >= 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.data.shrink_to(min_capacity)
+ }
+
+ /// Returns a slice of all values in the underlying vector, in arbitrary
+ /// order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_as_slice)]
+ /// use std::collections::BinaryHeap;
+ /// use std::io::{self, Write};
+ ///
+ /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
+ ///
+ /// io::sink().write(heap.as_slice()).unwrap();
+ /// ```
+ #[must_use]
+ #[unstable(feature = "binary_heap_as_slice", issue = "83659")]
+ pub fn as_slice(&self) -> &[T] {
+ self.data.as_slice()
+ }
+
+ /// Consumes the `BinaryHeap` and returns the underlying vector
+ /// in arbitrary order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
+ /// let vec = heap.into_vec();
+ ///
+ /// // Will print in some order
+ /// for x in vec {
+ /// println!("{x}");
+ /// }
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+ pub fn into_vec(self) -> Vec<T> {
+ self.into()
+ }
+
+ /// Returns the length of the binary heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert_eq!(heap.len(), 2);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.data.len()
+ }
+
+ /// Checks if the binary heap is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ ///
+ /// assert!(heap.is_empty());
+ ///
+ /// heap.push(3);
+ /// heap.push(5);
+ /// heap.push(1);
+ ///
+ /// assert!(!heap.is_empty());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Clears the binary heap, returning an iterator over the removed elements
+ /// in arbitrary order. If the iterator is dropped before being fully
+ /// consumed, it drops the remaining elements in arbitrary order.
+ ///
+ /// The returned iterator keeps a mutable borrow on the heap to optimize
+ /// its implementation.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert!(!heap.is_empty());
+ ///
+ /// for x in heap.drain() {
+ /// println!("{x}");
+ /// }
+ ///
+ /// assert!(heap.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain(&mut self) -> Drain<'_, T> {
+ Drain { iter: self.data.drain(..) }
+ }
+
+ /// Drops all items from the binary heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert!(!heap.is_empty());
+ ///
+ /// heap.clear();
+ ///
+ /// assert!(heap.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ self.drain();
+ }
+}
+
+/// Hole represents a hole in a slice i.e., an index without valid value
+/// (because it was moved from or duplicated).
+/// In drop, `Hole` will restore the slice by filling the hole
+/// position with the value that was originally removed.
+struct Hole<'a, T: 'a> {
+ data: &'a mut [T],
+ elt: ManuallyDrop<T>,
+ pos: usize,
+}
+
+impl<'a, T> Hole<'a, T> {
+ /// Create a new `Hole` at index `pos`.
+ ///
+ /// Unsafe because pos must be within the data slice.
+ #[inline]
+ unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
+ debug_assert!(pos < data.len());
+ // SAFE: pos should be inside the slice
+ let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
+ Hole { data, elt: ManuallyDrop::new(elt), pos }
+ }
+
+ #[inline]
+ fn pos(&self) -> usize {
+ self.pos
+ }
+
+ /// Returns a reference to the element removed.
+ #[inline]
+ fn element(&self) -> &T {
+ &self.elt
+ }
+
+ /// Returns a reference to the element at `index`.
+ ///
+ /// Unsafe because index must be within the data slice and not equal to pos.
+ #[inline]
+ unsafe fn get(&self, index: usize) -> &T {
+ debug_assert!(index != self.pos);
+ debug_assert!(index < self.data.len());
+ unsafe { self.data.get_unchecked(index) }
+ }
+
+ /// Move hole to new location
+ ///
+ /// Unsafe because index must be within the data slice and not equal to pos.
+ #[inline]
+ unsafe fn move_to(&mut self, index: usize) {
+ debug_assert!(index != self.pos);
+ debug_assert!(index < self.data.len());
+ unsafe {
+ let ptr = self.data.as_mut_ptr();
+ let index_ptr: *const _ = ptr.add(index);
+ let hole_ptr = ptr.add(self.pos);
+ ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
+ }
+ self.pos = index;
+ }
+}
+
+impl<T> Drop for Hole<'_, T> {
+ #[inline]
+ fn drop(&mut self) {
+ // fill the hole again
+ unsafe {
+ let pos = self.pos;
+ ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
+ }
+ }
+}
+
+/// An iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::iter()`]. See its
+/// documentation for more.
+///
+/// [`iter`]: BinaryHeap::iter
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ iter: slice::Iter<'a, T>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter { iter: self.iter.clone() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn last(self) -> Option<&'a T> {
+ self.iter.last()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a T> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
+
+/// An owning iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::into_iter()`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: BinaryHeap::into_iter
+/// [`IntoIterator`]: core::iter::IntoIterator
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct IntoIter<T> {
+ iter: vec::IntoIter<T>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IntoIter<T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IntoIter<T> {}
+
+// In addition to the SAFETY invariants of the following three unsafe traits
+// also refer to the vec::in_place_collect module documentation to get an overview
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<T> SourceIter for IntoIter<T> {
+ type Source = IntoIter<T>;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut Self::Source {
+ self
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<I> InPlaceIterable for IntoIter<I> {}
+
+unsafe impl<I> AsVecIntoIter for IntoIter<I> {
+ type Item = I;
+
+ fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
+ &mut self.iter
+ }
+}
+
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+#[derive(Clone, Debug)]
+pub struct IntoIterSorted<T> {
+ inner: BinaryHeap<T>,
+}
+
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+impl<T: Ord> Iterator for IntoIterSorted<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.pop()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = self.inner.len();
+ (exact, Some(exact))
+ }
+}
+
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+impl<T: Ord> ExactSizeIterator for IntoIterSorted<T> {}
+
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+impl<T: Ord> FusedIterator for IntoIterSorted<T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T: Ord> TrustedLen for IntoIterSorted<T> {}
+
+/// A draining iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::drain()`]. See its
+/// documentation for more.
+///
+/// [`drain`]: BinaryHeap::drain
+#[stable(feature = "drain", since = "1.6.0")]
+#[derive(Debug)]
+pub struct Drain<'a, T: 'a> {
+ iter: vec::Drain<'a, T>,
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T> Iterator for Drain<'_, T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T> DoubleEndedIterator for Drain<'_, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T> ExactSizeIterator for Drain<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Drain<'_, T> {}
+
+/// A draining iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
+/// documentation for more.
+///
+/// [`drain_sorted`]: BinaryHeap::drain_sorted
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+#[derive(Debug)]
+pub struct DrainSorted<'a, T: Ord> {
+ inner: &'a mut BinaryHeap<T>,
+}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<'a, T: Ord> Drop for DrainSorted<'a, T> {
+ /// Removes heap elements in heap order.
+ fn drop(&mut self) {
+ struct DropGuard<'r, 'a, T: Ord>(&'r mut DrainSorted<'a, T>);
+
+ impl<'r, 'a, T: Ord> Drop for DropGuard<'r, 'a, T> {
+ fn drop(&mut self) {
+ while self.0.inner.pop().is_some() {}
+ }
+ }
+
+ while let Some(item) = self.inner.pop() {
+ let guard = DropGuard(self);
+ drop(item);
+ mem::forget(guard);
+ }
+ }
+}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<T: Ord> Iterator for DrainSorted<'_, T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.pop()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = self.inner.len();
+ (exact, Some(exact))
+ }
+}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<T: Ord> ExactSizeIterator for DrainSorted<'_, T> {}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<T: Ord> FusedIterator for DrainSorted<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T: Ord> TrustedLen for DrainSorted<'_, T> {}
+
+#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
+ /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
+ ///
+ /// This conversion happens in-place, and has *O*(*n*) time complexity.
+ fn from(vec: Vec<T>) -> BinaryHeap<T> {
+ let mut heap = BinaryHeap { data: vec };
+ heap.rebuild();
+ heap
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
+ /// ```
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
+ /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
+ /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
+ /// assert_eq!(a, b);
+ /// }
+ /// ```
+ fn from(arr: [T; N]) -> Self {
+ Self::from_iter(arr)
+ }
+}
+
+#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+impl<T> From<BinaryHeap<T>> for Vec<T> {
+ /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
+ ///
+ /// This conversion requires no data movement or allocation, and has
+ /// constant time complexity.
+ fn from(heap: BinaryHeap<T>) -> Vec<T> {
+ heap.data
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
+ BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> IntoIterator for BinaryHeap<T> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ /// Creates a consuming iterator, that is, one that moves each value out of
+ /// the binary heap in arbitrary order. The binary heap cannot be used
+ /// after calling this.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4]);
+ ///
+ /// // Print 1, 2, 3, 4 in arbitrary order
+ /// for x in heap.into_iter() {
+ /// // x has type i32, not &i32
+ /// println!("{x}");
+ /// }
+ /// ```
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter { iter: self.data.into_iter() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> IntoIterator for &'a BinaryHeap<T> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Extend<T> for BinaryHeap<T> {
+ #[inline]
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ <Self as SpecExtend<I>>::spec_extend(self, iter);
+ }
+
+ #[inline]
+ fn extend_one(&mut self, item: T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
+ default fn spec_extend(&mut self, iter: I) {
+ self.extend_desugared(iter.into_iter());
+ }
+}
+
+impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
+ fn spec_extend(&mut self, ref mut other: Vec<T>) {
+ let start = self.data.len();
+ self.data.append(other);
+ self.rebuild_tail(start);
+ }
+}
+
+impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
+ fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
+ self.append(other);
+ }
+}
+
+impl<T: Ord> BinaryHeap<T> {
+ fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ let iterator = iter.into_iter();
+ let (lower, _) = iterator.size_hint();
+
+ self.reserve(lower);
+
+ iterator.for_each(move |elem| self.push(elem));
+ }
+}
+
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &item: &'a T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
diff --git a/library/alloc/src/collections/binary_heap/tests.rs b/library/alloc/src/collections/binary_heap/tests.rs
index 5a05215ae..ffbb6c80a 100644
--- a/library/alloc/src/collections/binary_heap/tests.rs
+++ b/library/alloc/src/collections/binary_heap/tests.rs
@@ -1,8 +1,9 @@
use super::*;
use crate::boxed::Box;
+use crate::testing::crash_test::{CrashTestDummy, Panic};
+use core::mem;
use std::iter::TrustedLen;
use std::panic::{catch_unwind, AssertUnwindSafe};
-use std::sync::atomic::{AtomicU32, Ordering};
#[test]
fn test_iterator() {
@@ -147,6 +148,24 @@ fn test_peek_mut() {
}
#[test]
+fn test_peek_mut_leek() {
+ let data = vec![4, 2, 7];
+ let mut heap = BinaryHeap::from(data);
+ let mut max = heap.peek_mut().unwrap();
+ *max = -1;
+
+ // The PeekMut object's Drop impl would have been responsible for moving the
+ // -1 out of the max position of the BinaryHeap, but we don't run it.
+ mem::forget(max);
+
+ // Absent some mitigation like leak amplification, the -1 would incorrectly
+ // end up in the last position of the returned Vec, with the rest of the
+ // heap's original contents in front of it in sorted order.
+ let sorted_vec = heap.into_sorted_vec();
+ assert!(sorted_vec.is_sorted(), "{:?}", sorted_vec);
+}
+
+#[test]
fn test_peek_mut_pop() {
let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
let mut heap = BinaryHeap::from(data);
@@ -291,33 +310,83 @@ fn test_drain_sorted() {
#[test]
fn test_drain_sorted_leak() {
- static DROPS: AtomicU32 = AtomicU32::new(0);
-
- #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
- struct D(u32, bool);
-
- impl Drop for D {
- fn drop(&mut self) {
- DROPS.fetch_add(1, Ordering::SeqCst);
-
- if self.1 {
- panic!("panic in `drop`");
- }
- }
- }
-
+ let d0 = CrashTestDummy::new(0);
+ let d1 = CrashTestDummy::new(1);
+ let d2 = CrashTestDummy::new(2);
+ let d3 = CrashTestDummy::new(3);
+ let d4 = CrashTestDummy::new(4);
+ let d5 = CrashTestDummy::new(5);
let mut q = BinaryHeap::from(vec![
- D(0, false),
- D(1, false),
- D(2, false),
- D(3, true),
- D(4, false),
- D(5, false),
+ d0.spawn(Panic::Never),
+ d1.spawn(Panic::Never),
+ d2.spawn(Panic::Never),
+ d3.spawn(Panic::InDrop),
+ d4.spawn(Panic::Never),
+ d5.spawn(Panic::Never),
]);
- catch_unwind(AssertUnwindSafe(|| drop(q.drain_sorted()))).ok();
+ catch_unwind(AssertUnwindSafe(|| drop(q.drain_sorted()))).unwrap_err();
+
+ assert_eq!(d0.dropped(), 1);
+ assert_eq!(d1.dropped(), 1);
+ assert_eq!(d2.dropped(), 1);
+ assert_eq!(d3.dropped(), 1);
+ assert_eq!(d4.dropped(), 1);
+ assert_eq!(d5.dropped(), 1);
+ assert!(q.is_empty());
+}
+
+#[test]
+fn test_drain_forget() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut q =
+ BinaryHeap::from(vec![a.spawn(Panic::Never), b.spawn(Panic::Never), c.spawn(Panic::Never)]);
+
+ catch_unwind(AssertUnwindSafe(|| {
+ let mut it = q.drain();
+ it.next();
+ mem::forget(it);
+ }))
+ .unwrap();
+ // Behaviour after leaking is explicitly unspecified and order is arbitrary,
+ // so it's fine if these start failing, but probably worth knowing.
+ assert!(q.is_empty());
+ assert_eq!(a.dropped() + b.dropped() + c.dropped(), 1);
+ assert_eq!(a.dropped(), 0);
+ assert_eq!(b.dropped(), 0);
+ assert_eq!(c.dropped(), 1);
+ drop(q);
+ assert_eq!(a.dropped(), 0);
+ assert_eq!(b.dropped(), 0);
+ assert_eq!(c.dropped(), 1);
+}
- assert_eq!(DROPS.load(Ordering::SeqCst), 6);
+#[test]
+fn test_drain_sorted_forget() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut q =
+ BinaryHeap::from(vec![a.spawn(Panic::Never), b.spawn(Panic::Never), c.spawn(Panic::Never)]);
+
+ catch_unwind(AssertUnwindSafe(|| {
+ let mut it = q.drain_sorted();
+ it.next();
+ mem::forget(it);
+ }))
+ .unwrap();
+ // Behaviour after leaking is explicitly unspecified,
+ // so it's fine if these start failing, but probably worth knowing.
+ assert_eq!(q.len(), 2);
+ assert_eq!(a.dropped(), 0);
+ assert_eq!(b.dropped(), 0);
+ assert_eq!(c.dropped(), 1);
+ drop(q);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1);
+ assert_eq!(c.dropped(), 1);
}
#[test]
@@ -415,7 +484,7 @@ fn test_retain() {
#[test]
#[cfg(not(target_os = "emscripten"))]
fn panic_safe() {
- use rand::{seq::SliceRandom, thread_rng};
+ use rand::seq::SliceRandom;
use std::cmp;
use std::panic::{self, AssertUnwindSafe};
use std::sync::atomic::{AtomicUsize, Ordering};
@@ -440,7 +509,7 @@ fn panic_safe() {
self.0.partial_cmp(&other.0)
}
}
- let mut rng = thread_rng();
+ let mut rng = crate::test_helpers::test_rng();
const DATASZ: usize = 32;
// Miri is too slow
let ntest = if cfg!(miri) { 1 } else { 10 };
diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs
index 4c372b1d6..700b1463b 100644
--- a/library/alloc/src/collections/btree/map/tests.rs
+++ b/library/alloc/src/collections/btree/map/tests.rs
@@ -1,12 +1,12 @@
-use super::super::testing::crash_test::{CrashTestDummy, Panic};
-use super::super::testing::ord_chaos::{Cyclic3, Governed, Governor};
-use super::super::testing::rng::DeterministicRng;
use super::Entry::{Occupied, Vacant};
use super::*;
use crate::boxed::Box;
use crate::fmt::Debug;
use crate::rc::Rc;
use crate::string::{String, ToString};
+use crate::testing::crash_test::{CrashTestDummy, Panic};
+use crate::testing::ord_chaos::{Cyclic3, Governed, Governor};
+use crate::testing::rng::DeterministicRng;
use crate::vec::Vec;
use std::cmp::Ordering;
use std::convert::TryFrom;
diff --git a/library/alloc/src/collections/btree/mod.rs b/library/alloc/src/collections/btree/mod.rs
index 9d43ac5c5..7552f2fc0 100644
--- a/library/alloc/src/collections/btree/mod.rs
+++ b/library/alloc/src/collections/btree/mod.rs
@@ -21,6 +21,3 @@ trait Recover<Q: ?Sized> {
fn take(&mut self, key: &Q) -> Option<Self::Key>;
fn replace(&mut self, key: Self::Key) -> Option<Self::Key>;
}
-
-#[cfg(test)]
-mod testing;
diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs
index da766b67a..691246644 100644
--- a/library/alloc/src/collections/btree/node.rs
+++ b/library/alloc/src/collections/btree/node.rs
@@ -318,7 +318,10 @@ impl<BorrowType: marker::BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type>
pub fn ascend(
self,
) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> {
- let _ = BorrowType::TRAVERSAL_PERMIT;
+ const {
+ assert!(BorrowType::TRAVERSAL_PERMIT);
+ }
+
// We need to use raw pointers to nodes because, if BorrowType is marker::ValMut,
// there might be outstanding mutable references to values that we must not invalidate.
let leaf_ptr: *const _ = Self::as_leaf_ptr(&self);
@@ -1003,7 +1006,10 @@ impl<BorrowType: marker::BorrowType, K, V>
/// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
/// both, upon success, do nothing.
pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
- let _ = BorrowType::TRAVERSAL_PERMIT;
+ const {
+ assert!(BorrowType::TRAVERSAL_PERMIT);
+ }
+
// We need to use raw pointers to nodes because, if BorrowType is
// marker::ValMut, there might be outstanding mutable references to
// values that we must not invalidate. There's no worry accessing the
@@ -1666,17 +1672,17 @@ pub mod marker {
pub struct ValMut<'a>(PhantomData<&'a mut ()>);
pub trait BorrowType {
- // If node references of this borrow type allow traversing to other
- // nodes in the tree, this constant can be evaluated. Thus reading it
- // serves as a compile-time assertion.
- const TRAVERSAL_PERMIT: () = ();
+ /// If node references of this borrow type allow traversing to other
+ /// nodes in the tree, this constant is set to `true`. It can be used
+ /// for a compile-time assertion.
+ const TRAVERSAL_PERMIT: bool = true;
}
impl BorrowType for Owned {
- // Reject evaluation, because traversal isn't needed. Instead traversal
- // happens using the result of `borrow_mut`.
- // By disabling traversal, and only creating new references to roots,
- // we know that every reference of the `Owned` type is to a root node.
- const TRAVERSAL_PERMIT: () = panic!();
+ /// Reject traversal, because it isn't needed. Instead traversal
+ /// happens using the result of `borrow_mut`.
+ /// By disabling traversal, and only creating new references to roots,
+ /// we know that every reference of the `Owned` type is to a root node.
+ const TRAVERSAL_PERMIT: bool = false;
}
impl BorrowType for Dying {}
impl<'a> BorrowType for Immut<'a> {}
diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs
index 502d3e1d1..7b8d41a60 100644
--- a/library/alloc/src/collections/btree/set/tests.rs
+++ b/library/alloc/src/collections/btree/set/tests.rs
@@ -1,6 +1,6 @@
-use super::super::testing::crash_test::{CrashTestDummy, Panic};
-use super::super::testing::rng::DeterministicRng;
use super::*;
+use crate::testing::crash_test::{CrashTestDummy, Panic};
+use crate::testing::rng::DeterministicRng;
use crate::vec::Vec;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
diff --git a/library/alloc/src/collections/linked_list/tests.rs b/library/alloc/src/collections/linked_list/tests.rs
index f8fbfa1bf..04594d55b 100644
--- a/library/alloc/src/collections/linked_list/tests.rs
+++ b/library/alloc/src/collections/linked_list/tests.rs
@@ -1,10 +1,11 @@
use super::*;
+use crate::testing::crash_test::{CrashTestDummy, Panic};
use crate::vec::Vec;
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::thread;
-use rand::{thread_rng, RngCore};
+use rand::RngCore;
#[test]
fn test_basic() {
@@ -480,12 +481,12 @@ fn test_split_off_2() {
}
}
-fn fuzz_test(sz: i32) {
+fn fuzz_test(sz: i32, rng: &mut impl RngCore) {
let mut m: LinkedList<_> = LinkedList::new();
let mut v = vec![];
for i in 0..sz {
check_links(&m);
- let r: u8 = thread_rng().next_u32() as u8;
+ let r: u8 = rng.next_u32() as u8;
match r % 6 {
0 => {
m.pop_back();
@@ -520,11 +521,12 @@ fn fuzz_test(sz: i32) {
#[test]
fn test_fuzz() {
+ let mut rng = crate::test_helpers::test_rng();
for _ in 0..25 {
- fuzz_test(3);
- fuzz_test(16);
+ fuzz_test(3, &mut rng);
+ fuzz_test(16, &mut rng);
#[cfg(not(miri))] // Miri is too slow
- fuzz_test(189);
+ fuzz_test(189, &mut rng);
}
}
@@ -984,35 +986,34 @@ fn drain_filter_complex() {
#[test]
fn drain_filter_drop_panic_leak() {
- static mut DROPS: i32 = 0;
-
- struct D(bool);
-
- impl Drop for D {
- fn drop(&mut self) {
- unsafe {
- DROPS += 1;
- }
-
- if self.0 {
- panic!("panic in `drop`");
- }
- }
- }
-
+ let d0 = CrashTestDummy::new(0);
+ let d1 = CrashTestDummy::new(1);
+ let d2 = CrashTestDummy::new(2);
+ let d3 = CrashTestDummy::new(3);
+ let d4 = CrashTestDummy::new(4);
+ let d5 = CrashTestDummy::new(5);
+ let d6 = CrashTestDummy::new(6);
+ let d7 = CrashTestDummy::new(7);
let mut q = LinkedList::new();
- q.push_back(D(false));
- q.push_back(D(false));
- q.push_back(D(false));
- q.push_back(D(false));
- q.push_back(D(false));
- q.push_front(D(false));
- q.push_front(D(true));
- q.push_front(D(false));
-
- catch_unwind(AssertUnwindSafe(|| drop(q.drain_filter(|_| true)))).ok();
-
- assert_eq!(unsafe { DROPS }, 8);
+ q.push_back(d3.spawn(Panic::Never));
+ q.push_back(d4.spawn(Panic::Never));
+ q.push_back(d5.spawn(Panic::Never));
+ q.push_back(d6.spawn(Panic::Never));
+ q.push_back(d7.spawn(Panic::Never));
+ q.push_front(d2.spawn(Panic::Never));
+ q.push_front(d1.spawn(Panic::InDrop));
+ q.push_front(d0.spawn(Panic::Never));
+
+ catch_unwind(AssertUnwindSafe(|| drop(q.drain_filter(|_| true)))).unwrap_err();
+
+ assert_eq!(d0.dropped(), 1);
+ assert_eq!(d1.dropped(), 1);
+ assert_eq!(d2.dropped(), 1);
+ assert_eq!(d3.dropped(), 1);
+ assert_eq!(d4.dropped(), 1);
+ assert_eq!(d5.dropped(), 1);
+ assert_eq!(d6.dropped(), 1);
+ assert_eq!(d7.dropped(), 1);
assert!(q.is_empty());
}
diff --git a/library/alloc/src/collections/vec_deque/into_iter.rs b/library/alloc/src/collections/vec_deque/into_iter.rs
index 55f6138cd..e54880e86 100644
--- a/library/alloc/src/collections/vec_deque/into_iter.rs
+++ b/library/alloc/src/collections/vec_deque/into_iter.rs
@@ -25,6 +25,10 @@ impl<T, A: Allocator> IntoIter<T, A> {
pub(super) fn new(inner: VecDeque<T, A>) -> Self {
IntoIter { inner }
}
+
+ pub(super) fn into_vecdeque(self) -> VecDeque<T, A> {
+ self.inner
+ }
}
#[stable(feature = "collection_debug", since = "1.17.0")]
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 4866c53e7..8317ac431 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -55,6 +55,10 @@ use self::spec_extend::SpecExtend;
mod spec_extend;
+use self::spec_from_iter::SpecFromIter;
+
+mod spec_from_iter;
+
#[cfg(test)]
mod tests;
@@ -531,12 +535,13 @@ impl<T> VecDeque<T> {
///
/// let deque: VecDeque<u32> = VecDeque::new();
/// ```
- // FIXME: This should probably be const
#[inline]
#[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_stable(feature = "const_vec_deque_new", since = "1.68.0")]
#[must_use]
- pub fn new() -> VecDeque<T> {
- VecDeque::new_in(Global)
+ pub const fn new() -> VecDeque<T> {
+ // FIXME: This should just be `VecDeque::new_in(Global)` once that hits stable.
+ VecDeque { head: 0, len: 0, buf: RawVec::NEW }
}
/// Creates an empty deque with space for at least `capacity` elements.
@@ -586,6 +591,38 @@ impl<T, A: Allocator> VecDeque<T, A> {
VecDeque { head: 0, len: 0, buf: RawVec::with_capacity_in(capacity, alloc) }
}
+ /// Creates a `VecDeque` from a raw allocation, when the initialized
+ /// part of that allocation forms a *contiguous* subslice thereof.
+ ///
+ /// For use by `vec::IntoIter::into_vecdeque`
+ ///
+ /// # Safety
+ ///
+ /// All the usual requirements on the allocated memory like in
+ /// `Vec::from_raw_parts_in`, but takes a *range* of elements that are
+ /// initialized rather than only supporting `0..len`. Requires that
+ /// `initialized.start` ≤ `initialized.end` ≤ `capacity`.
+ #[inline]
+ pub(crate) unsafe fn from_contiguous_raw_parts_in(
+ ptr: *mut T,
+ initialized: Range<usize>,
+ capacity: usize,
+ alloc: A,
+ ) -> Self {
+ debug_assert!(initialized.start <= initialized.end);
+ debug_assert!(initialized.end <= capacity);
+
+ // SAFETY: Our safety precondition guarantees the range length won't wrap,
+ // and that the allocation is valid for use in `RawVec`.
+ unsafe {
+ VecDeque {
+ head: initialized.start,
+ len: initialized.end.unchecked_sub(initialized.start),
+ buf: RawVec::from_raw_parts_in(ptr, capacity, alloc),
+ }
+ }
+ }
+
/// Provides a reference to the element at the given index.
///
/// Element at index 0 is the front of the queue.
@@ -599,6 +636,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
+ /// buf.push_back(6);
/// assert_eq!(buf.get(1), Some(&4));
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
@@ -624,10 +662,11 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// buf.push_back(3);
/// buf.push_back(4);
/// buf.push_back(5);
+ /// buf.push_back(6);
+ /// assert_eq!(buf[1], 4);
/// if let Some(elem) = buf.get_mut(1) {
/// *elem = 7;
/// }
- ///
/// assert_eq!(buf[1], 7);
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
@@ -905,65 +944,72 @@ impl<T, A: Allocator> VecDeque<T, A> {
return;
}
- if target_cap < self.capacity() {
- // There are three cases of interest:
- // All elements are out of desired bounds
- // Elements are contiguous, and head is out of desired bounds
- // Elements are discontiguous, and tail is out of desired bounds
+ // There are three cases of interest:
+ // All elements are out of desired bounds
+ // Elements are contiguous, and tail is out of desired bounds
+ // Elements are discontiguous
+ //
+ // At all other times, element positions are unaffected.
+
+ // `head` and `len` are at most `isize::MAX` and `target_cap < self.capacity()`, so nothing can
+ // overflow.
+ let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
+
+ if self.len == 0 {
+ self.head = 0;
+ } else if self.head >= target_cap && tail_outside {
+ // Head and tail are both out of bounds, so copy all of them to the front.
//
- // At all other times, element positions are unaffected.
+ // H := head
+ // L := last element
+ // H L
+ // [. . . . . . . . o o o o o o o . ]
+ // H L
+ // [o o o o o o o . ]
+ unsafe {
+ // nonoverlapping because `self.head >= target_cap >= self.len`.
+ self.copy_nonoverlapping(self.head, 0, self.len);
+ }
+ self.head = 0;
+ } else if self.head < target_cap && tail_outside {
+ // Head is in bounds, tail is out of bounds.
+ // Copy the overflowing part to the beginning of the
+ // buffer. This won't overlap because `target_cap >= self.len`.
//
- // Indicates that elements at the head should be moved.
-
- let tail_outside = (target_cap + 1..=self.capacity()).contains(&(self.head + self.len));
- // Move elements from out of desired bounds (positions after target_cap)
- if self.len == 0 {
- self.head = 0;
- } else if self.head >= target_cap && tail_outside {
- // H := head
- // L := last element
- // H L
- // [. . . . . . . . o o o o o o o . ]
- // H L
- // [o o o o o o o . ]
- unsafe {
- // nonoverlapping because self.head >= target_cap >= self.len
- self.copy_nonoverlapping(self.head, 0, self.len);
- }
- self.head = 0;
- } else if self.head < target_cap && tail_outside {
- // H := head
- // L := last element
- // H L
- // [. . . o o o o o o o . . . . . . ]
- // L H
- // [o o . o o o o o ]
- let len = self.head + self.len - target_cap;
- unsafe {
- self.copy_nonoverlapping(target_cap, 0, len);
- }
- } else if self.head >= target_cap {
- // H := head
- // L := last element
- // L H
- // [o o o o o . . . . . . . . . o o ]
- // L H
- // [o o o o o . o o ]
- let len = self.capacity() - self.head;
- let new_head = target_cap - len;
- unsafe {
- // can't use copy_nonoverlapping here for the same reason
- // as in `handle_capacity_increase()`
- self.copy(self.head, new_head, len);
- }
- self.head = new_head;
+ // H := head
+ // L := last element
+ // H L
+ // [. . . o o o o o o o . . . . . . ]
+ // L H
+ // [o o . o o o o o ]
+ let len = self.head + self.len - target_cap;
+ unsafe {
+ self.copy_nonoverlapping(target_cap, 0, len);
}
-
- self.buf.shrink_to_fit(target_cap);
-
- debug_assert!(self.head < self.capacity() || self.capacity() == 0);
- debug_assert!(self.len <= self.capacity());
+ } else if !self.is_contiguous() {
+ // The head slice is at least partially out of bounds, tail is in bounds.
+ // Copy the head backwards so it lines up with the target capacity.
+ // This won't overlap because `target_cap >= self.len`.
+ //
+ // H := head
+ // L := last element
+ // L H
+ // [o o o o o . . . . . . . . . o o ]
+ // L H
+ // [o o o o o . o o ]
+ let head_len = self.capacity() - self.head;
+ let new_head = target_cap - head_len;
+ unsafe {
+ // can't use `copy_nonoverlapping()` here because the new and old
+ // regions for the head might overlap.
+ self.copy(self.head, new_head, head_len);
+ }
+ self.head = new_head;
}
+ self.buf.shrink_to_fit(target_cap);
+
+ debug_assert!(self.head < self.capacity() || self.capacity() == 0);
+ debug_assert!(self.len <= self.capacity());
}
/// Shortens the deque, keeping the first `len` elements and dropping
@@ -1878,7 +1924,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[stable(feature = "append", since = "1.4.0")]
pub fn append(&mut self, other: &mut Self) {
if T::IS_ZST {
- self.len += other.len;
+ self.len = self.len.checked_add(other.len).expect("capacity overflow");
other.len = 0;
other.head = 0;
return;
@@ -2505,7 +2551,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// The deque is assumed to be partitioned according to the given predicate.
/// This means that all elements for which the predicate returns true are at the start of the deque
/// and all elements for which the predicate returns false are at the end.
- /// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
+ /// For example, `[7, 15, 3, 5, 4, 12, 6]` is partitioned under the predicate `x % 2 != 0`
/// (all odd numbers are at the start, all even at the end).
///
/// If the deque is not partitioned, the returned result is unspecified and meaningless,
@@ -2699,18 +2745,8 @@ impl<T, A: Allocator> IndexMut<usize> for VecDeque<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> FromIterator<T> for VecDeque<T> {
- #[inline]
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> VecDeque<T> {
- // Since converting is O(1) now, might as well re-use that logic
- // (including things like the `vec::IntoIter`→`Vec` specialization)
- // especially as that could save us some monomorphiziation work
- // if one uses the same iterators (like slice ones) with both.
- return from_iter_via_vec(iter.into_iter());
-
- #[inline]
- fn from_iter_via_vec<U>(iter: impl Iterator<Item = U>) -> VecDeque<U> {
- Vec::from_iter(iter).into()
- }
+ SpecFromIter::spec_from_iter(iter.into_iter())
}
}
@@ -2794,9 +2830,9 @@ impl<T, A: Allocator> From<Vec<T, A>> for VecDeque<T, A> {
/// [`Vec<T>`]: crate::vec::Vec
/// [`VecDeque<T>`]: crate::collections::VecDeque
///
- /// In its current implementation, this is a very cheap
- /// conversion. This isn't yet a guarantee though, and
- /// shouldn't be relied on.
+ /// This conversion is guaranteed to run in *O*(1) time
+ /// and to not re-allocate the `Vec`'s buffer or allocate
+ /// any additional memory.
#[inline]
fn from(other: Vec<T, A>) -> Self {
let (ptr, len, cap, alloc) = other.into_raw_parts_with_alloc();
diff --git a/library/alloc/src/collections/vec_deque/spec_from_iter.rs b/library/alloc/src/collections/vec_deque/spec_from_iter.rs
new file mode 100644
index 000000000..7650492eb
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/spec_from_iter.rs
@@ -0,0 +1,33 @@
+use super::{IntoIter, VecDeque};
+
+/// Specialization trait used for `VecDeque::from_iter`
+pub(super) trait SpecFromIter<T, I> {
+ fn spec_from_iter(iter: I) -> Self;
+}
+
+impl<T, I> SpecFromIter<T, I> for VecDeque<T>
+where
+ I: Iterator<Item = T>,
+{
+ default fn spec_from_iter(iterator: I) -> Self {
+ // Since converting is O(1) now, just re-use the `Vec` logic for
+ // anything where we can't do something extra-special for `VecDeque`,
+ // especially as that could save us some monomorphiziation work
+ // if one uses the same iterators (like slice ones) with both.
+ crate::vec::Vec::from_iter(iterator).into()
+ }
+}
+
+impl<T> SpecFromIter<T, crate::vec::IntoIter<T>> for VecDeque<T> {
+ #[inline]
+ fn spec_from_iter(iterator: crate::vec::IntoIter<T>) -> Self {
+ iterator.into_vecdeque()
+ }
+}
+
+impl<T> SpecFromIter<T, IntoIter<T>> for VecDeque<T> {
+ #[inline]
+ fn spec_from_iter(iterator: IntoIter<T>) -> Self {
+ iterator.into_vecdeque()
+ }
+}
diff --git a/library/alloc/src/collections/vec_deque/tests.rs b/library/alloc/src/collections/vec_deque/tests.rs
index 220ad71be..205a8ff3c 100644
--- a/library/alloc/src/collections/vec_deque/tests.rs
+++ b/library/alloc/src/collections/vec_deque/tests.rs
@@ -749,6 +749,48 @@ fn test_drain() {
}
#[test]
+fn issue_108453() {
+ let mut deque = VecDeque::with_capacity(10);
+
+ deque.push_back(1u8);
+ deque.push_back(2);
+ deque.push_back(3);
+
+ deque.push_front(10);
+ deque.push_front(9);
+
+ deque.shrink_to(9);
+
+ assert_eq!(deque.into_iter().collect::<Vec<_>>(), vec![9, 10, 1, 2, 3]);
+}
+
+#[test]
+fn test_shrink_to() {
+ // test deques with capacity 16 with all possible head positions, lengths and target capacities.
+ let cap = 16;
+
+ for len in 0..cap {
+ for head in 0..cap {
+ let expected = (1..=len).collect::<VecDeque<_>>();
+
+ for target_cap in len..cap {
+ let mut deque = VecDeque::with_capacity(cap);
+ // currently, `with_capacity` always allocates the exact capacity if it's greater than 8.
+ assert_eq!(deque.capacity(), cap);
+
+ // we can let the head point anywhere in the buffer since the deque is empty.
+ deque.head = head;
+ deque.extend(1..=len);
+
+ deque.shrink_to(target_cap);
+
+ assert_eq!(deque, expected);
+ }
+ }
+ }
+}
+
+#[test]
fn test_shrink_to_fit() {
// This test checks that every single combination of head and tail position,
// is tested. Capacity 15 should be large enough to cover every case.
diff --git a/library/alloc/src/fmt.rs b/library/alloc/src/fmt.rs
index 799ce9d5d..eadb35cb9 100644
--- a/library/alloc/src/fmt.rs
+++ b/library/alloc/src/fmt.rs
@@ -419,7 +419,7 @@
//! // documentation for details, and the function `pad` can be used
//! // to pad strings.
//! let decimals = f.precision().unwrap_or(3);
-//! let string = format!("{:.*}", decimals, magnitude);
+//! let string = format!("{magnitude:.decimals$}");
//! f.pad_integral(true, "", &string)
//! }
//! }
@@ -518,7 +518,7 @@
//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
//!
//! fn my_fmt_fn(args: fmt::Arguments) {
-//! write!(&mut io::stdout(), "{}", args);
+//! write!(&mut io::stdout(), "{args}");
//! }
//! my_fmt_fn(format_args!(", or a {} too", "function"));
//! ```
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index 96960d43f..ca75c3895 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -3,7 +3,7 @@
//! This library provides smart pointers and collections for managing
//! heap-allocated values.
//!
-//! This library, like libcore, normally doesn’t need to be used directly
+//! This library, like core, normally doesn’t need to be used directly
//! since its contents are re-exported in the [`std` crate](../std/index.html).
//! Crates that use the `#![no_std]` attribute however will typically
//! not depend on `std`, so they’d use this crate instead.
@@ -75,7 +75,7 @@
))]
#![no_std]
#![needs_allocator]
-// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
+// To run alloc tests without x.py without ending up with two copies of alloc, Miri needs to be
// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
// rustc itself never sets the feature, so this line has no affect there.
#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
@@ -106,10 +106,12 @@
#![feature(const_size_of_val)]
#![feature(const_align_of_val)]
#![feature(const_ptr_read)]
+#![feature(const_maybe_uninit_zeroed)]
#![feature(const_maybe_uninit_write)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_refs_to_cell)]
#![feature(core_intrinsics)]
+#![feature(core_panic)]
#![feature(const_eval_select)]
#![feature(const_pin)]
#![feature(const_waker)]
@@ -122,7 +124,9 @@
#![feature(fmt_internals)]
#![feature(fn_traits)]
#![feature(hasher_prefixfree_extras)]
+#![feature(inline_const)]
#![feature(inplace_iteration)]
+#![cfg_attr(test, feature(is_sorted))]
#![feature(iter_advance_by)]
#![feature(iter_next_chunk)]
#![feature(iter_repeat_n)]
@@ -152,7 +156,7 @@
#![feature(trusted_len)]
#![feature(trusted_random_access)]
#![feature(try_trait_v2)]
-#![cfg_attr(not(bootstrap), feature(tuple_trait))]
+#![feature(tuple_trait)]
#![feature(unchecked_math)]
#![feature(unicode_internals)]
#![feature(unsize)]
@@ -190,6 +194,7 @@
#![feature(unsized_fn_params)]
#![feature(c_unwind)]
#![feature(with_negative_coherence)]
+#![cfg_attr(test, feature(panic_update_hook))]
//
// Rustdoc features:
#![feature(doc_cfg)]
@@ -206,6 +211,8 @@
extern crate std;
#[cfg(test)]
extern crate test;
+#[cfg(test)]
+mod testing;
// Module with internal macros used by other modules (needs to be included before other modules).
#[macro_use]
@@ -251,3 +258,20 @@ pub mod vec;
pub mod __export {
pub use core::format_args;
}
+
+#[cfg(test)]
+#[allow(dead_code)] // Not used in all configurations
+pub(crate) mod test_helpers {
+ /// Copied from `std::test_helpers::test_rng`, since these tests rely on the
+ /// seed not being the same for every RNG invocation too.
+ pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
+ use std::hash::{BuildHasher, Hash, Hasher};
+ let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+ std::panic::Location::caller().hash(&mut hasher);
+ let hc64 = hasher.finish();
+ let seed_vec =
+ hc64.to_le_bytes().into_iter().chain(0u8..8).collect::<crate::vec::Vec<u8>>();
+ let seed: [u8; 16] = seed_vec.as_slice().try_into().unwrap();
+ rand::SeedableRng::from_seed(seed)
+ }
+}
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index 38e31b180..c9aa23fc4 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -336,7 +336,7 @@ impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Rc<T> {}
#[stable(feature = "rc_ref_unwind_safe", since = "1.58.0")]
impl<T: RefUnwindSafe + ?Sized> RefUnwindSafe for Rc<T> {}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
@@ -2179,7 +2179,7 @@ pub struct Weak<T: ?Sized> {
// This is a `NonNull` to allow optimizing the size of this type in enums,
// but it is not necessarily a valid pointer.
// `Weak::new` sets this to `usize::MAX` so that it doesn’t need
- // to allocate space on the heap. That's not a value a real pointer
+ // to allocate space on the heap. That's not a value a real pointer
// will ever have because RcBox has alignment at least 2.
// This is only possible when `T: Sized`; unsized `T` never dangle.
ptr: NonNull<RcBox<T>>,
@@ -2190,7 +2190,7 @@ impl<T: ?Sized> !marker::Send for Weak<T> {}
#[stable(feature = "rc_weak", since = "1.4.0")]
impl<T: ?Sized> !marker::Sync for Weak<T> {}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
@@ -2561,7 +2561,7 @@ impl<T: ?Sized> Clone for Weak<T> {
}
#[stable(feature = "rc_weak", since = "1.4.0")]
-impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
+impl<T: ?Sized> fmt::Debug for Weak<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(Weak)")
}
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
index 1b61ede34..fecacc2bb 100644
--- a/library/alloc/src/slice.rs
+++ b/library/alloc/src/slice.rs
@@ -19,15 +19,20 @@ use core::cmp::Ordering::{self, Less};
use core::mem::{self, SizedTypeProperties};
#[cfg(not(no_global_oom_handling))]
use core::ptr;
+#[cfg(not(no_global_oom_handling))]
+use core::slice::sort;
use crate::alloc::Allocator;
#[cfg(not(no_global_oom_handling))]
-use crate::alloc::Global;
+use crate::alloc::{self, Global};
#[cfg(not(no_global_oom_handling))]
use crate::borrow::ToOwned;
use crate::boxed::Box;
use crate::vec::Vec;
+#[cfg(test)]
+mod tests;
+
#[unstable(feature = "slice_range", issue = "76393")]
pub use core::slice::range;
#[unstable(feature = "array_chunks", issue = "74985")]
@@ -203,7 +208,7 @@ impl<T> [T] {
where
T: Ord,
{
- merge_sort(self, T::lt);
+ stable_sort(self, T::lt);
}
/// Sorts the slice with a comparator function.
@@ -259,7 +264,7 @@ impl<T> [T] {
where
F: FnMut(&T, &T) -> Ordering,
{
- merge_sort(self, |a, b| compare(a, b) == Less);
+ stable_sort(self, |a, b| compare(a, b) == Less);
}
/// Sorts the slice with a key extraction function.
@@ -302,7 +307,7 @@ impl<T> [T] {
F: FnMut(&T) -> K,
K: Ord,
{
- merge_sort(self, |a, b| f(a).lt(&f(b)));
+ stable_sort(self, |a, b| f(a).lt(&f(b)));
}
/// Sorts the slice with a key extraction function.
@@ -653,7 +658,7 @@ impl [u8] {
///
/// ```error
/// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
-/// --> src/liballoc/slice.rs:608:6
+/// --> library/alloc/src/slice.rs:608:6
/// |
/// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
/// | ^ unconstrained type parameter
@@ -809,324 +814,52 @@ impl<T: Clone> ToOwned for [T] {
// Sorting
////////////////////////////////////////////////////////////////////////////////
-/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
-///
-/// This is the integral subroutine of insertion sort.
-#[cfg(not(no_global_oom_handling))]
-fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
-where
- F: FnMut(&T, &T) -> bool,
-{
- if v.len() >= 2 && is_less(&v[1], &v[0]) {
- unsafe {
- // There are three ways to implement insertion here:
- //
- // 1. Swap adjacent elements until the first one gets to its final destination.
- // However, this way we copy data around more than is necessary. If elements are big
- // structures (costly to copy), this method will be slow.
- //
- // 2. Iterate until the right place for the first element is found. Then shift the
- // elements succeeding it to make room for it and finally place it into the
- // remaining hole. This is a good method.
- //
- // 3. Copy the first element into a temporary variable. Iterate until the right place
- // for it is found. As we go along, copy every traversed element into the slot
- // preceding it. Finally, copy data from the temporary variable into the remaining
- // hole. This method is very good. Benchmarks demonstrated slightly better
- // performance than with the 2nd method.
- //
- // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
- let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
-
- // Intermediate state of the insertion process is always tracked by `hole`, which
- // serves two purposes:
- // 1. Protects integrity of `v` from panics in `is_less`.
- // 2. Fills the remaining hole in `v` in the end.
- //
- // Panic safety:
- //
- // If `is_less` panics at any point during the process, `hole` will get dropped and
- // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
- // initially held exactly once.
- let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
- ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
-
- for i in 2..v.len() {
- if !is_less(&v[i], &*tmp) {
- break;
- }
- ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
- hole.dest = &mut v[i];
- }
- // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
- }
- }
-
- // When dropped, copies from `src` into `dest`.
- struct InsertionHole<T> {
- src: *const T,
- dest: *mut T,
- }
-
- impl<T> Drop for InsertionHole<T> {
- fn drop(&mut self) {
- unsafe {
- ptr::copy_nonoverlapping(self.src, self.dest, 1);
- }
- }
- }
-}
-
-/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
-/// stores the result into `v[..]`.
-///
-/// # Safety
-///
-/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
-/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
-#[cfg(not(no_global_oom_handling))]
-unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
-where
- F: FnMut(&T, &T) -> bool,
-{
- let len = v.len();
- let v = v.as_mut_ptr();
- let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
-
- // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
- // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
- // copying the lesser (or greater) one into `v`.
- //
- // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
- // consumed first, then we must copy whatever is left of the shorter run into the remaining
- // hole in `v`.
- //
- // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
- // 1. Protects integrity of `v` from panics in `is_less`.
- // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
- //
- // Panic safety:
- //
- // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
- // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
- // object it initially held exactly once.
- let mut hole;
-
- if mid <= len - mid {
- // The left run is shorter.
- unsafe {
- ptr::copy_nonoverlapping(v, buf, mid);
- hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
- }
-
- // Initially, these pointers point to the beginnings of their arrays.
- let left = &mut hole.start;
- let mut right = v_mid;
- let out = &mut hole.dest;
-
- while *left < hole.end && right < v_end {
- // Consume the lesser side.
- // If equal, prefer the left run to maintain stability.
- unsafe {
- let to_copy = if is_less(&*right, &**left) {
- get_and_increment(&mut right)
- } else {
- get_and_increment(left)
- };
- ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
- }
- }
- } else {
- // The right run is shorter.
- unsafe {
- ptr::copy_nonoverlapping(v_mid, buf, len - mid);
- hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
- }
-
- // Initially, these pointers point past the ends of their arrays.
- let left = &mut hole.dest;
- let right = &mut hole.end;
- let mut out = v_end;
-
- while v < *left && buf < *right {
- // Consume the greater side.
- // If equal, prefer the right run to maintain stability.
- unsafe {
- let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
- decrement_and_get(left)
- } else {
- decrement_and_get(right)
- };
- ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
- }
- }
- }
- // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
- // it will now be copied into the hole in `v`.
-
- unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
- let old = *ptr;
- *ptr = unsafe { ptr.add(1) };
- old
- }
-
- unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
- *ptr = unsafe { ptr.sub(1) };
- *ptr
- }
-
- // When dropped, copies the range `start..end` into `dest..`.
- struct MergeHole<T> {
- start: *mut T,
- end: *mut T,
- dest: *mut T,
- }
-
- impl<T> Drop for MergeHole<T> {
- fn drop(&mut self) {
- // `T` is not a zero-sized type, and these are pointers into a slice's elements.
- unsafe {
- let len = self.end.sub_ptr(self.start);
- ptr::copy_nonoverlapping(self.start, self.dest, len);
- }
- }
- }
-}
-
-/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
-/// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
-///
-/// The algorithm identifies strictly descending and non-descending subsequences, which are called
-/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
-/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
-/// satisfied:
-///
-/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
-/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
-///
-/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
+#[inline]
#[cfg(not(no_global_oom_handling))]
-fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
+fn stable_sort<T, F>(v: &mut [T], mut is_less: F)
where
F: FnMut(&T, &T) -> bool,
{
- // Slices of up to this length get sorted using insertion sort.
- const MAX_INSERTION: usize = 20;
- // Very short runs are extended using insertion sort to span at least this many elements.
- const MIN_RUN: usize = 10;
-
- // Sorting has no meaningful behavior on zero-sized types.
if T::IS_ZST {
+ // Sorting has no meaningful behavior on zero-sized types. Do nothing.
return;
}
- let len = v.len();
+ let elem_alloc_fn = |len: usize| -> *mut T {
+ // SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
+ // v.len(). Alloc in general will only be used as 'shadow-region' to store temporary swap
+ // elements.
+ unsafe { alloc::alloc(alloc::Layout::array::<T>(len).unwrap_unchecked()) as *mut T }
+ };
- // Short arrays get sorted in-place via insertion sort to avoid allocations.
- if len <= MAX_INSERTION {
- if len >= 2 {
- for i in (0..len - 1).rev() {
- insert_head(&mut v[i..], &mut is_less);
- }
- }
- return;
- }
-
- // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
- // shallow copies of the contents of `v` without risking the dtors running on copies if
- // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
- // which will always have length at most `len / 2`.
- let mut buf = Vec::with_capacity(len / 2);
-
- // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
- // strange decision, but consider the fact that merges more often go in the opposite direction
- // (forwards). According to benchmarks, merging forwards is slightly faster than merging
- // backwards. To conclude, identifying runs by traversing backwards improves performance.
- let mut runs = vec![];
- let mut end = len;
- while end > 0 {
- // Find the next natural run, and reverse it if it's strictly descending.
- let mut start = end - 1;
- if start > 0 {
- start -= 1;
- unsafe {
- if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
- while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
- start -= 1;
- }
- v[start..end].reverse();
- } else {
- while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
- {
- start -= 1;
- }
- }
- }
- }
-
- // Insert some more elements into the run if it's too short. Insertion sort is faster than
- // merge sort on short sequences, so this significantly improves performance.
- while start > 0 && end - start < MIN_RUN {
- start -= 1;
- insert_head(&mut v[start..end], &mut is_less);
+ let elem_dealloc_fn = |buf_ptr: *mut T, len: usize| {
+ // SAFETY: Creating the layout is safe as long as merge_sort never calls this with len >
+ // v.len(). The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
+ // len.
+ unsafe {
+ alloc::dealloc(buf_ptr as *mut u8, alloc::Layout::array::<T>(len).unwrap_unchecked());
}
+ };
- // Push this run onto the stack.
- runs.push(Run { start, len: end - start });
- end = start;
-
- // Merge some pairs of adjacent runs to satisfy the invariants.
- while let Some(r) = collapse(&runs) {
- let left = runs[r + 1];
- let right = runs[r];
- unsafe {
- merge(
- &mut v[left.start..right.start + right.len],
- left.len,
- buf.as_mut_ptr(),
- &mut is_less,
- );
- }
- runs[r] = Run { start: left.start, len: left.len + right.len };
- runs.remove(r + 1);
+ let run_alloc_fn = |len: usize| -> *mut sort::TimSortRun {
+ // SAFETY: Creating the layout is safe as long as merge_sort never calls this with an
+ // obscene length or 0.
+ unsafe {
+ alloc::alloc(alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked())
+ as *mut sort::TimSortRun
}
- }
-
- // Finally, exactly one run must remain in the stack.
- debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
+ };
- // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
- // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
- // algorithm should continue building a new run instead, `None` is returned.
- //
- // TimSort is infamous for its buggy implementations, as described here:
- // http://envisage-project.eu/timsort-specification-and-verification/
- //
- // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
- // Enforcing them on just top three is not sufficient to ensure that the invariants will still
- // hold for *all* runs in the stack.
- //
- // This function correctly checks invariants for the top four runs. Additionally, if the top
- // run starts at index 0, it will always demand a merge operation until the stack is fully
- // collapsed, in order to complete the sort.
- #[inline]
- fn collapse(runs: &[Run]) -> Option<usize> {
- let n = runs.len();
- if n >= 2
- && (runs[n - 1].start == 0
- || runs[n - 2].len <= runs[n - 1].len
- || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
- || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
- {
- if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
- } else {
- None
+ let run_dealloc_fn = |buf_ptr: *mut sort::TimSortRun, len: usize| {
+ // SAFETY: The caller must ensure that buf_ptr was created by elem_alloc_fn with the same
+ // len.
+ unsafe {
+ alloc::dealloc(
+ buf_ptr as *mut u8,
+ alloc::Layout::array::<sort::TimSortRun>(len).unwrap_unchecked(),
+ );
}
- }
+ };
- #[derive(Clone, Copy)]
- struct Run {
- start: usize,
- len: usize,
- }
+ sort::merge_sort(v, &mut is_less, elem_alloc_fn, elem_dealloc_fn, run_alloc_fn, run_dealloc_fn);
}
diff --git a/library/alloc/src/slice/tests.rs b/library/alloc/src/slice/tests.rs
new file mode 100644
index 000000000..f674530aa
--- /dev/null
+++ b/library/alloc/src/slice/tests.rs
@@ -0,0 +1,359 @@
+use crate::borrow::ToOwned;
+use crate::rc::Rc;
+use crate::string::ToString;
+use crate::test_helpers::test_rng;
+use crate::vec::Vec;
+
+use core::cell::Cell;
+use core::cmp::Ordering::{self, Equal, Greater, Less};
+use core::convert::identity;
+use core::fmt;
+use core::mem;
+use core::sync::atomic::{AtomicUsize, Ordering::Relaxed};
+use rand::{distributions::Standard, prelude::*, Rng, RngCore};
+use std::panic;
+
+macro_rules! do_test {
+ ($input:ident, $func:ident) => {
+ let len = $input.len();
+
+ // Work out the total number of comparisons required to sort
+ // this array...
+ let mut count = 0usize;
+ $input.to_owned().$func(|a, b| {
+ count += 1;
+ a.cmp(b)
+ });
+
+ // ... and then panic on each and every single one.
+ for panic_countdown in 0..count {
+ // Refresh the counters.
+ VERSIONS.store(0, Relaxed);
+ for i in 0..len {
+ DROP_COUNTS[i].store(0, Relaxed);
+ }
+
+ let v = $input.to_owned();
+ let _ = std::panic::catch_unwind(move || {
+ let mut v = v;
+ let mut panic_countdown = panic_countdown;
+ v.$func(|a, b| {
+ if panic_countdown == 0 {
+ SILENCE_PANIC.with(|s| s.set(true));
+ panic!();
+ }
+ panic_countdown -= 1;
+ a.cmp(b)
+ })
+ });
+
+ // Check that the number of things dropped is exactly
+ // what we expect (i.e., the contents of `v`).
+ for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
+ let count = c.load(Relaxed);
+ assert!(count == 1, "found drop count == {} for i == {}, len == {}", count, i, len);
+ }
+
+ // Check that the most recent versions of values were dropped.
+ assert_eq!(VERSIONS.load(Relaxed), 0);
+ }
+ };
+}
+
+const MAX_LEN: usize = 80;
+
+static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [
+ // FIXME(RFC 1109): AtomicUsize is not Copy.
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+];
+
+static VERSIONS: AtomicUsize = AtomicUsize::new(0);
+
+#[derive(Clone, Eq)]
+struct DropCounter {
+ x: u32,
+ id: usize,
+ version: Cell<usize>,
+}
+
+impl PartialEq for DropCounter {
+ fn eq(&self, other: &Self) -> bool {
+ self.partial_cmp(other) == Some(Ordering::Equal)
+ }
+}
+
+impl PartialOrd for DropCounter {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.version.set(self.version.get() + 1);
+ other.version.set(other.version.get() + 1);
+ VERSIONS.fetch_add(2, Relaxed);
+ self.x.partial_cmp(&other.x)
+ }
+}
+
+impl Ord for DropCounter {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.partial_cmp(other).unwrap()
+ }
+}
+
+impl Drop for DropCounter {
+ fn drop(&mut self) {
+ DROP_COUNTS[self.id].fetch_add(1, Relaxed);
+ VERSIONS.fetch_sub(self.version.get(), Relaxed);
+ }
+}
+
+std::thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)] // no threads
+fn panic_safe() {
+ panic::update_hook(move |prev, info| {
+ if !SILENCE_PANIC.with(|s| s.get()) {
+ prev(info);
+ }
+ });
+
+ let mut rng = test_rng();
+
+ // Miri is too slow (but still need to `chain` to make the types match)
+ let lens = if cfg!(miri) { (1..10).chain(0..0) } else { (1..20).chain(70..MAX_LEN) };
+ let moduli: &[u32] = if cfg!(miri) { &[5] } else { &[5, 20, 50] };
+
+ for len in lens {
+ for &modulus in moduli {
+ for &has_runs in &[false, true] {
+ let mut input = (0..len)
+ .map(|id| DropCounter {
+ x: rng.next_u32() % modulus,
+ id: id,
+ version: Cell::new(0),
+ })
+ .collect::<Vec<_>>();
+
+ if has_runs {
+ for c in &mut input {
+ c.x = c.id as u32;
+ }
+
+ for _ in 0..5 {
+ let a = rng.gen::<usize>() % len;
+ let b = rng.gen::<usize>() % len;
+ if a < b {
+ input[a..b].reverse();
+ } else {
+ input.swap(a, b);
+ }
+ }
+ }
+
+ do_test!(input, sort_by);
+ do_test!(input, sort_unstable_by);
+ }
+ }
+ }
+
+ // Set default panic hook again.
+ drop(panic::take_hook());
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn test_sort() {
+ let mut rng = test_rng();
+
+ for len in (2..25).chain(500..510) {
+ for &modulus in &[5, 10, 100, 1000] {
+ for _ in 0..10 {
+ let orig: Vec<_> = (&mut rng)
+ .sample_iter::<i32, _>(&Standard)
+ .map(|x| x % modulus)
+ .take(len)
+ .collect();
+
+ // Sort in default order.
+ let mut v = orig.clone();
+ v.sort();
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+
+ // Sort in ascending order.
+ let mut v = orig.clone();
+ v.sort_by(|a, b| a.cmp(b));
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+
+ // Sort in descending order.
+ let mut v = orig.clone();
+ v.sort_by(|a, b| b.cmp(a));
+ assert!(v.windows(2).all(|w| w[0] >= w[1]));
+
+ // Sort in lexicographic order.
+ let mut v1 = orig.clone();
+ let mut v2 = orig.clone();
+ v1.sort_by_key(|x| x.to_string());
+ v2.sort_by_cached_key(|x| x.to_string());
+ assert!(v1.windows(2).all(|w| w[0].to_string() <= w[1].to_string()));
+ assert!(v1 == v2);
+
+ // Sort with many pre-sorted runs.
+ let mut v = orig.clone();
+ v.sort();
+ v.reverse();
+ for _ in 0..5 {
+ let a = rng.gen::<usize>() % len;
+ let b = rng.gen::<usize>() % len;
+ if a < b {
+ v[a..b].reverse();
+ } else {
+ v.swap(a, b);
+ }
+ }
+ v.sort();
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+ }
+ }
+ }
+
+ // Sort using a completely random comparison function.
+ // This will reorder the elements *somehow*, but won't panic.
+ let mut v = [0; 500];
+ for i in 0..v.len() {
+ v[i] = i as i32;
+ }
+ v.sort_by(|_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
+ v.sort();
+ for i in 0..v.len() {
+ assert_eq!(v[i], i as i32);
+ }
+
+ // Should not panic.
+ [0i32; 0].sort();
+ [(); 10].sort();
+ [(); 100].sort();
+
+ let mut v = [0xDEADBEEFu64];
+ v.sort();
+ assert!(v == [0xDEADBEEF]);
+}
+
+#[test]
+fn test_sort_stability() {
+ // Miri is too slow
+ let large_range = if cfg!(miri) { 0..0 } else { 500..510 };
+ let rounds = if cfg!(miri) { 1 } else { 10 };
+
+ let mut rng = test_rng();
+ for len in (2..25).chain(large_range) {
+ for _ in 0..rounds {
+ let mut counts = [0; 10];
+
+ // create a vector like [(6, 1), (5, 1), (6, 2), ...],
+ // where the first item of each tuple is random, but
+ // the second item represents which occurrence of that
+ // number this element is, i.e., the second elements
+ // will occur in sorted order.
+ let orig: Vec<_> = (0..len)
+ .map(|_| {
+ let n = rng.gen::<usize>() % 10;
+ counts[n] += 1;
+ (n, counts[n])
+ })
+ .collect();
+
+ let mut v = orig.clone();
+ // Only sort on the first element, so an unstable sort
+ // may mix up the counts.
+ v.sort_by(|&(a, _), &(b, _)| a.cmp(&b));
+
+ // This comparison includes the count (the second item
+ // of the tuple), so elements with equal first items
+ // will need to be ordered with increasing
+ // counts... i.e., exactly asserting that this sort is
+ // stable.
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+
+ let mut v = orig.clone();
+ v.sort_by_cached_key(|&(x, _)| x);
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+ }
+ }
+}
diff --git a/library/alloc/src/str.rs b/library/alloc/src/str.rs
index b28d20cda..afbe5cfaf 100644
--- a/library/alloc/src/str.rs
+++ b/library/alloc/src/str.rs
@@ -559,10 +559,9 @@ impl str {
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_uppercase(&self) -> String {
- let mut bytes = self.as_bytes().to_vec();
- bytes.make_ascii_uppercase();
- // make_ascii_uppercase() preserves the UTF-8 invariant.
- unsafe { String::from_utf8_unchecked(bytes) }
+ let mut s = self.to_owned();
+ s.make_ascii_uppercase();
+ s
}
/// Returns a copy of this string where each character is mapped to its
@@ -592,10 +591,9 @@ impl str {
#[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
#[inline]
pub fn to_ascii_lowercase(&self) -> String {
- let mut bytes = self.as_bytes().to_vec();
- bytes.make_ascii_lowercase();
- // make_ascii_lowercase() preserves the UTF-8 invariant.
- unsafe { String::from_utf8_unchecked(bytes) }
+ let mut s = self.to_owned();
+ s.make_ascii_lowercase();
+ s
}
}
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index 7a8e6f088..ca182c810 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -363,7 +363,7 @@ use crate::vec::Vec;
/// [`as_str()`]: String::as_str
#[derive(PartialOrd, Eq, Ord)]
#[stable(feature = "rust1", since = "1.0.0")]
-#[cfg_attr(all(not(bootstrap), not(test)), lang = "String")]
+#[cfg_attr(not(test), lang = "String")]
pub struct String {
vec: Vec<u8>,
}
@@ -2549,6 +2549,15 @@ impl ToString for char {
}
#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "bool_to_string_specialization", since = "1.68.0")]
+impl ToString for bool {
+ #[inline]
+ fn to_string(&self) -> String {
+ String::from(if *self { "true" } else { "false" })
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
#[stable(feature = "u8_to_string_specialization", since = "1.54.0")]
impl ToString for u8 {
#[inline]
@@ -2678,7 +2687,7 @@ impl From<&String> for String {
}
}
-// note: test pulls in libstd, which causes errors here
+// note: test pulls in std, which causes errors here
#[cfg(not(test))]
#[stable(feature = "string_from_box", since = "1.18.0")]
impl From<Box<str>> for String {
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index f7dc4d109..bab7f5f53 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -254,7 +254,7 @@ unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Arc<T> {}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
@@ -295,7 +295,7 @@ pub struct Weak<T: ?Sized> {
// This is a `NonNull` to allow optimizing the size of this type in enums,
// but it is not necessarily a valid pointer.
// `Weak::new` sets this to `usize::MAX` so that it doesn’t need
- // to allocate space on the heap. That's not a value a real pointer
+ // to allocate space on the heap. That's not a value a real pointer
// will ever have because RcBox has alignment at least 2.
// This is only possible when `T: Sized`; unsized `T` never dangle.
ptr: NonNull<ArcInner<T>>,
@@ -306,13 +306,13 @@ unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
#[stable(feature = "arc_weak", since = "1.4.0")]
unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
-#[unstable(feature = "coerce_unsized", issue = "27732")]
+#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
#[unstable(feature = "dispatch_from_dyn", issue = "none")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
#[stable(feature = "arc_weak", since = "1.4.0")]
-impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
+impl<T: ?Sized> fmt::Debug for Weak<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "(Weak)")
}
@@ -1656,7 +1656,7 @@ impl<T: ?Sized> Arc<T> {
//
// The acquire label here ensures a happens-before relationship with any
// writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
- // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
+ // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
// weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
// This needs to be an `Acquire` to synchronize with the decrement of the `strong`
@@ -1712,7 +1712,7 @@ unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
}
// This fence is needed to prevent reordering of use of the data and
- // deletion of the data. Because it is marked `Release`, the decreasing
+ // deletion of the data. Because it is marked `Release`, the decreasing
// of the reference count synchronizes with this `Acquire` fence. This
// means that use of the data happens before decreasing the reference
// count, which happens before this fence, which happens before the
@@ -2172,7 +2172,7 @@ impl<T: ?Sized> Clone for Weak<T> {
} else {
return Weak { ptr: self.ptr };
};
- // See comments in Arc::clone() for why this is relaxed. This can use a
+ // See comments in Arc::clone() for why this is relaxed. This can use a
// fetch_add (ignoring the lock) because the weak count is only locked
// where are *no other* weak pointers in existence. (So we can't be
// running this code in that case).
diff --git a/library/alloc/src/collections/btree/testing/crash_test.rs b/library/alloc/src/testing/crash_test.rs
index bcf5f5f72..bcf5f5f72 100644
--- a/library/alloc/src/collections/btree/testing/crash_test.rs
+++ b/library/alloc/src/testing/crash_test.rs
diff --git a/library/alloc/src/collections/btree/testing/mod.rs b/library/alloc/src/testing/mod.rs
index 7a094f8a5..7a094f8a5 100644
--- a/library/alloc/src/collections/btree/testing/mod.rs
+++ b/library/alloc/src/testing/mod.rs
diff --git a/library/alloc/src/collections/btree/testing/ord_chaos.rs b/library/alloc/src/testing/ord_chaos.rs
index 96ce7c157..96ce7c157 100644
--- a/library/alloc/src/collections/btree/testing/ord_chaos.rs
+++ b/library/alloc/src/testing/ord_chaos.rs
diff --git a/library/alloc/src/collections/btree/testing/rng.rs b/library/alloc/src/testing/rng.rs
index ecf543bee..ecf543bee 100644
--- a/library/alloc/src/collections/btree/testing/rng.rs
+++ b/library/alloc/src/testing/rng.rs
diff --git a/library/alloc/src/vec/drain.rs b/library/alloc/src/vec/drain.rs
index 541f99bcf..2b1a787cc 100644
--- a/library/alloc/src/vec/drain.rs
+++ b/library/alloc/src/vec/drain.rs
@@ -223,9 +223,9 @@ impl<T, A: Allocator> Drop for Drain<'_, T, A> {
}
// as_slice() must only be called when iter.len() is > 0 because
- // vec::Splice modifies vec::Drain fields and may grow the vec which would invalidate
- // the iterator's internal pointers. Creating a reference to deallocated memory
- // is invalid even when it is zero-length
+ // it also gets touched by vec::Splice which may turn it into a dangling pointer
+ // which would make it and the vec pointer point to different allocations which would
+ // lead to invalid pointer arithmetic below.
let drop_ptr = iter.as_slice().as_ptr();
unsafe {
diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs
index 02cc7691a..37966007e 100644
--- a/library/alloc/src/vec/into_iter.rs
+++ b/library/alloc/src/vec/into_iter.rs
@@ -1,6 +1,8 @@
#[cfg(not(no_global_oom_handling))]
use super::AsVecIntoIter;
use crate::alloc::{Allocator, Global};
+#[cfg(not(no_global_oom_handling))]
+use crate::collections::VecDeque;
use crate::raw_vec::RawVec;
use core::array;
use core::fmt;
@@ -38,7 +40,9 @@ pub struct IntoIter<
// to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
pub(super) alloc: ManuallyDrop<A>,
pub(super) ptr: *const T,
- pub(super) end: *const T,
+ pub(super) end: *const T, // If T is a ZST, this is actually ptr+len. This encoding is picked so that
+ // ptr == end is a quick test for the Iterator being empty, that works
+ // for both ZST and non-ZST.
}
#[stable(feature = "vec_intoiter_debug", since = "1.13.0")]
@@ -130,7 +134,36 @@ impl<T, A: Allocator> IntoIter<T, A> {
/// Forgets to Drop the remaining elements while still allowing the backing allocation to be freed.
pub(crate) fn forget_remaining_elements(&mut self) {
- self.ptr = self.end;
+ // For th ZST case, it is crucial that we mutate `end` here, not `ptr`.
+ // `ptr` must stay aligned, while `end` may be unaligned.
+ self.end = self.ptr;
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub(crate) fn into_vecdeque(self) -> VecDeque<T, A> {
+ // Keep our `Drop` impl from dropping the elements and the allocator
+ let mut this = ManuallyDrop::new(self);
+
+ // SAFETY: This allocation originally came from a `Vec`, so it passes
+ // all those checks. We have `this.buf` ≤ `this.ptr` ≤ `this.end`,
+ // so the `sub_ptr`s below cannot wrap, and will produce a well-formed
+ // range. `end` ≤ `buf + cap`, so the range will be in-bounds.
+ // Taking `alloc` is ok because nothing else is going to look at it,
+ // since our `Drop` impl isn't going to run so there's no more code.
+ unsafe {
+ let buf = this.buf.as_ptr();
+ let initialized = if T::IS_ZST {
+ // All the pointers are the same for ZSTs, so it's fine to
+ // say that they're all at the beginning of the "allocation".
+ 0..this.len()
+ } else {
+ this.ptr.sub_ptr(buf)..this.end.sub_ptr(buf)
+ };
+ let cap = this.cap;
+ let alloc = ManuallyDrop::take(&mut this.alloc);
+ VecDeque::from_contiguous_raw_parts_in(buf, initialized, cap, alloc)
+ }
}
}
@@ -155,10 +188,9 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
if self.ptr == self.end {
None
} else if T::IS_ZST {
- // purposefully don't use 'ptr.offset' because for
- // vectors with 0-size elements this would return the
- // same pointer.
- self.ptr = self.ptr.wrapping_byte_add(1);
+ // `ptr` has to stay where it is to remain aligned, so we reduce the length by 1 by
+ // reducing the `end`.
+ self.end = self.end.wrapping_byte_sub(1);
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
@@ -185,10 +217,8 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
if T::IS_ZST {
- // SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound
- // effectively results in unsigned pointers representing positions 0..usize::MAX,
- // which is valid for ZSTs.
- self.ptr = self.ptr.wrapping_byte_add(step_size);
+ // See `next` for why we sub `end` here.
+ self.end = self.end.wrapping_byte_sub(step_size);
} else {
// SAFETY: the min() above ensures that step_size is in bounds
self.ptr = unsafe { self.ptr.add(step_size) };
@@ -221,7 +251,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) });
}
- self.ptr = self.ptr.wrapping_byte_add(N);
+ self.end = self.end.wrapping_byte_sub(N);
// Safety: ditto
return Ok(unsafe { raw_ary.transpose().assume_init() });
}
diff --git a/library/alloc/src/vec/is_zero.rs b/library/alloc/src/vec/is_zero.rs
index 8e652d676..cb9adf05c 100644
--- a/library/alloc/src/vec/is_zero.rs
+++ b/library/alloc/src/vec/is_zero.rs
@@ -4,7 +4,8 @@ use crate::boxed::Box;
#[rustc_specialization_trait]
pub(super) unsafe trait IsZero {
- /// Whether this value's representation is all zeros
+ /// Whether this value's representation is all zeros,
+ /// or can be represented with all zeroes.
fn is_zero(&self) -> bool;
}
@@ -57,7 +58,7 @@ unsafe impl<T: IsZero, const N: usize> IsZero for [T; N] {
#[inline]
fn is_zero(&self) -> bool {
// Because this is generated as a runtime check, it's not obvious that
- // it's worth doing if the array is really long. The threshold here
+ // it's worth doing if the array is really long. The threshold here
// is largely arbitrary, but was picked because as of 2022-07-01 LLVM
// fails to const-fold the check in `vec![[1; 32]; n]`
// See https://github.com/rust-lang/rust/pull/97581#issuecomment-1166628022
@@ -147,6 +148,23 @@ impl_is_zero_option_of_nonzero!(
NonZeroIsize,
);
+macro_rules! impl_is_zero_option_of_num {
+ ($($t:ty,)+) => {$(
+ unsafe impl IsZero for Option<$t> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ const {
+ let none: Self = unsafe { core::mem::MaybeUninit::zeroed().assume_init() };
+ assert!(none.is_none());
+ }
+ self.is_none()
+ }
+ }
+ )+};
+}
+
+impl_is_zero_option_of_num!(u8, u16, u32, u64, u128, i8, i16, i32, i64, i128, usize, isize,);
+
unsafe impl<T: IsZero> IsZero for Wrapping<T> {
#[inline]
fn is_zero(&self) -> bool {
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index ba34ab680..36b0b3c9e 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -166,7 +166,7 @@ mod spec_extend;
/// vec[0] = 7;
/// assert_eq!(vec[0], 7);
///
-/// vec.extend([1, 2, 3].iter().copied());
+/// vec.extend([1, 2, 3]);
///
/// for x in &vec {
/// println!("{x}");
@@ -490,6 +490,8 @@ impl<T> Vec<T> {
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
+ /// * `ptr` must have been allocated using the global allocator, such as via
+ /// the [`alloc::alloc`] function.
/// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
@@ -526,6 +528,7 @@ impl<T> Vec<T> {
/// function.
///
/// [`String`]: crate::string::String
+ /// [`alloc::alloc`]: crate::alloc::alloc
/// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
///
/// # Examples
@@ -681,6 +684,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// This is highly unsafe, due to the number of invariants that aren't
/// checked:
///
+ /// * `ptr` must be [*currently allocated*] via the given allocator `alloc`.
/// * `T` needs to have the same alignment as what `ptr` was allocated with.
/// (`T` having a less strict alignment is not sufficient, the alignment really
/// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
@@ -714,6 +718,7 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// [`String`]: crate::string::String
/// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ /// [*currently allocated*]: crate::alloc::Allocator#currently-allocated-memory
/// [*fit*]: crate::alloc::Allocator#memory-fitting
///
/// # Examples
@@ -2424,7 +2429,7 @@ impl<T: Clone, A: Allocator> Vec<T, A> {
self.reserve(range.len());
// SAFETY:
- // - `slice::range` guarantees that the given range is valid for indexing self
+ // - `slice::range` guarantees that the given range is valid for indexing self
unsafe {
self.spec_extend_from_within(range);
}
@@ -2681,7 +2686,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
// HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
// required for this method definition, is not available. Instead use the
- // `slice::to_vec` function which is only available with cfg(test)
+ // `slice::to_vec` function which is only available with cfg(test)
// NB see the slice::hack module in slice.rs for more information
#[cfg(test)]
fn clone(&self) -> Self {
@@ -3191,7 +3196,7 @@ where
}
}
-// note: test pulls in libstd, which causes errors here
+// note: test pulls in std, which causes errors here
#[cfg(not(test))]
#[stable(feature = "vec_from_box", since = "1.18.0")]
impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
@@ -3209,7 +3214,7 @@ impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
}
}
-// note: test pulls in libstd, which causes errors here
+// note: test pulls in std, which causes errors here
#[cfg(not(no_global_oom_handling))]
#[cfg(not(test))]
#[stable(feature = "box_from_vec", since = "1.20.0")]
diff --git a/library/alloc/src/vec/splice.rs b/library/alloc/src/vec/splice.rs
index bad765c7f..1861147fe 100644
--- a/library/alloc/src/vec/splice.rs
+++ b/library/alloc/src/vec/splice.rs
@@ -54,6 +54,12 @@ impl<I: Iterator, A: Allocator> ExactSizeIterator for Splice<'_, I, A> {}
impl<I: Iterator, A: Allocator> Drop for Splice<'_, I, A> {
fn drop(&mut self) {
self.drain.by_ref().for_each(drop);
+ // At this point draining is done and the only remaining tasks are splicing
+ // and moving things into the final place.
+ // Which means we can replace the slice::Iter with pointers that won't point to deallocated
+ // memory, so that Drain::drop is still allowed to call iter.len(), otherwise it would break
+ // the ptr.sub_ptr contract.
+ self.drain.iter = (&[]).iter();
unsafe {
if self.drain.tail_len == 0 {
diff --git a/library/alloc/tests/autotraits.rs b/library/alloc/tests/autotraits.rs
index 8ff5f0abe..879e32b3f 100644
--- a/library/alloc/tests/autotraits.rs
+++ b/library/alloc/tests/autotraits.rs
@@ -32,7 +32,7 @@ fn test_btree_map() {
// spawn(f());
// }
//
- // where with some unintentionally overconstrained Send impls in liballoc's
+ // where with some unintentionally overconstrained Send impls in alloc's
// internals, the future might incorrectly not be Send even though every
// single type involved in the program is Send and Sync.
require_send_sync(async {
diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs
index d6d2b055b..2a93a242d 100644
--- a/library/alloc/tests/lib.rs
+++ b/library/alloc/tests/lib.rs
@@ -1,7 +1,6 @@
#![feature(allocator_api)]
#![feature(alloc_layout_extra)]
#![feature(assert_matches)]
-#![feature(box_syntax)]
#![feature(btree_drain_filter)]
#![feature(cow_is_borrowed)]
#![feature(const_box)]
diff --git a/library/alloc/tests/slice.rs b/library/alloc/tests/slice.rs
index 21f894343..0693beb48 100644
--- a/library/alloc/tests/slice.rs
+++ b/library/alloc/tests/slice.rs
@@ -1,15 +1,9 @@
-use std::cell::Cell;
-use std::cmp::Ordering::{self, Equal, Greater, Less};
+use std::cmp::Ordering::{Equal, Greater, Less};
use std::convert::identity;
use std::fmt;
use std::mem;
use std::panic;
use std::rc::Rc;
-use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
-
-use rand::distributions::Standard;
-use rand::seq::SliceRandom;
-use rand::{thread_rng, Rng, RngCore};
fn square(n: usize) -> usize {
n * n
@@ -389,123 +383,6 @@ fn test_reverse() {
}
#[test]
-#[cfg_attr(miri, ignore)] // Miri is too slow
-fn test_sort() {
- let mut rng = thread_rng();
-
- for len in (2..25).chain(500..510) {
- for &modulus in &[5, 10, 100, 1000] {
- for _ in 0..10 {
- let orig: Vec<_> =
- rng.sample_iter::<i32, _>(&Standard).map(|x| x % modulus).take(len).collect();
-
- // Sort in default order.
- let mut v = orig.clone();
- v.sort();
- assert!(v.windows(2).all(|w| w[0] <= w[1]));
-
- // Sort in ascending order.
- let mut v = orig.clone();
- v.sort_by(|a, b| a.cmp(b));
- assert!(v.windows(2).all(|w| w[0] <= w[1]));
-
- // Sort in descending order.
- let mut v = orig.clone();
- v.sort_by(|a, b| b.cmp(a));
- assert!(v.windows(2).all(|w| w[0] >= w[1]));
-
- // Sort in lexicographic order.
- let mut v1 = orig.clone();
- let mut v2 = orig.clone();
- v1.sort_by_key(|x| x.to_string());
- v2.sort_by_cached_key(|x| x.to_string());
- assert!(v1.windows(2).all(|w| w[0].to_string() <= w[1].to_string()));
- assert!(v1 == v2);
-
- // Sort with many pre-sorted runs.
- let mut v = orig.clone();
- v.sort();
- v.reverse();
- for _ in 0..5 {
- let a = rng.gen::<usize>() % len;
- let b = rng.gen::<usize>() % len;
- if a < b {
- v[a..b].reverse();
- } else {
- v.swap(a, b);
- }
- }
- v.sort();
- assert!(v.windows(2).all(|w| w[0] <= w[1]));
- }
- }
- }
-
- // Sort using a completely random comparison function.
- // This will reorder the elements *somehow*, but won't panic.
- let mut v = [0; 500];
- for i in 0..v.len() {
- v[i] = i as i32;
- }
- v.sort_by(|_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
- v.sort();
- for i in 0..v.len() {
- assert_eq!(v[i], i as i32);
- }
-
- // Should not panic.
- [0i32; 0].sort();
- [(); 10].sort();
- [(); 100].sort();
-
- let mut v = [0xDEADBEEFu64];
- v.sort();
- assert!(v == [0xDEADBEEF]);
-}
-
-#[test]
-fn test_sort_stability() {
- // Miri is too slow
- let large_range = if cfg!(miri) { 0..0 } else { 500..510 };
- let rounds = if cfg!(miri) { 1 } else { 10 };
-
- for len in (2..25).chain(large_range) {
- for _ in 0..rounds {
- let mut counts = [0; 10];
-
- // create a vector like [(6, 1), (5, 1), (6, 2), ...],
- // where the first item of each tuple is random, but
- // the second item represents which occurrence of that
- // number this element is, i.e., the second elements
- // will occur in sorted order.
- let orig: Vec<_> = (0..len)
- .map(|_| {
- let n = thread_rng().gen::<usize>() % 10;
- counts[n] += 1;
- (n, counts[n])
- })
- .collect();
-
- let mut v = orig.clone();
- // Only sort on the first element, so an unstable sort
- // may mix up the counts.
- v.sort_by(|&(a, _), &(b, _)| a.cmp(&b));
-
- // This comparison includes the count (the second item
- // of the tuple), so elements with equal first items
- // will need to be ordered with increasing
- // counts... i.e., exactly asserting that this sort is
- // stable.
- assert!(v.windows(2).all(|w| w[0] <= w[1]));
-
- let mut v = orig.clone();
- v.sort_by_cached_key(|&(x, _)| x);
- assert!(v.windows(2).all(|w| w[0] <= w[1]));
- }
- }
-}
-
-#[test]
fn test_rotate_left() {
let expected: Vec<_> = (0..13).collect();
let mut v = Vec::new();
@@ -1608,230 +1485,6 @@ fn test_copy_from_slice_dst_shorter() {
dst.copy_from_slice(&src);
}
-const MAX_LEN: usize = 80;
-
-static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [
- // FIXME(RFC 1109): AtomicUsize is not Copy.
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
- AtomicUsize::new(0),
-];
-
-static VERSIONS: AtomicUsize = AtomicUsize::new(0);
-
-#[derive(Clone, Eq)]
-struct DropCounter {
- x: u32,
- id: usize,
- version: Cell<usize>,
-}
-
-impl PartialEq for DropCounter {
- fn eq(&self, other: &Self) -> bool {
- self.partial_cmp(other) == Some(Ordering::Equal)
- }
-}
-
-impl PartialOrd for DropCounter {
- fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
- self.version.set(self.version.get() + 1);
- other.version.set(other.version.get() + 1);
- VERSIONS.fetch_add(2, Relaxed);
- self.x.partial_cmp(&other.x)
- }
-}
-
-impl Ord for DropCounter {
- fn cmp(&self, other: &Self) -> Ordering {
- self.partial_cmp(other).unwrap()
- }
-}
-
-impl Drop for DropCounter {
- fn drop(&mut self) {
- DROP_COUNTS[self.id].fetch_add(1, Relaxed);
- VERSIONS.fetch_sub(self.version.get(), Relaxed);
- }
-}
-
-macro_rules! test {
- ($input:ident, $func:ident) => {
- let len = $input.len();
-
- // Work out the total number of comparisons required to sort
- // this array...
- let mut count = 0usize;
- $input.to_owned().$func(|a, b| {
- count += 1;
- a.cmp(b)
- });
-
- // ... and then panic on each and every single one.
- for panic_countdown in 0..count {
- // Refresh the counters.
- VERSIONS.store(0, Relaxed);
- for i in 0..len {
- DROP_COUNTS[i].store(0, Relaxed);
- }
-
- let v = $input.to_owned();
- let _ = std::panic::catch_unwind(move || {
- let mut v = v;
- let mut panic_countdown = panic_countdown;
- v.$func(|a, b| {
- if panic_countdown == 0 {
- SILENCE_PANIC.with(|s| s.set(true));
- panic!();
- }
- panic_countdown -= 1;
- a.cmp(b)
- })
- });
-
- // Check that the number of things dropped is exactly
- // what we expect (i.e., the contents of `v`).
- for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
- let count = c.load(Relaxed);
- assert!(count == 1, "found drop count == {} for i == {}, len == {}", count, i, len);
- }
-
- // Check that the most recent versions of values were dropped.
- assert_eq!(VERSIONS.load(Relaxed), 0);
- }
- };
-}
-
-thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
-
-#[test]
-#[cfg_attr(target_os = "emscripten", ignore)] // no threads
-fn panic_safe() {
- panic::update_hook(move |prev, info| {
- if !SILENCE_PANIC.with(|s| s.get()) {
- prev(info);
- }
- });
-
- let mut rng = thread_rng();
-
- // Miri is too slow (but still need to `chain` to make the types match)
- let lens = if cfg!(miri) { (1..10).chain(0..0) } else { (1..20).chain(70..MAX_LEN) };
- let moduli: &[u32] = if cfg!(miri) { &[5] } else { &[5, 20, 50] };
-
- for len in lens {
- for &modulus in moduli {
- for &has_runs in &[false, true] {
- let mut input = (0..len)
- .map(|id| DropCounter {
- x: rng.next_u32() % modulus,
- id: id,
- version: Cell::new(0),
- })
- .collect::<Vec<_>>();
-
- if has_runs {
- for c in &mut input {
- c.x = c.id as u32;
- }
-
- for _ in 0..5 {
- let a = rng.gen::<usize>() % len;
- let b = rng.gen::<usize>() % len;
- if a < b {
- input[a..b].reverse();
- } else {
- input.swap(a, b);
- }
- }
- }
-
- test!(input, sort_by);
- test!(input, sort_unstable_by);
- }
- }
- }
-
- // Set default panic hook again.
- drop(panic::take_hook());
-}
-
#[test]
fn repeat_generic_slice() {
assert_eq!([1, 2].repeat(2), vec![1, 2, 1, 2]);
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
index 7ebed0d5c..2f07c2911 100644
--- a/library/alloc/tests/vec.rs
+++ b/library/alloc/tests/vec.rs
@@ -7,7 +7,9 @@ use std::borrow::Cow;
use std::cell::Cell;
use std::collections::TryReserveErrorKind::*;
use std::fmt::Debug;
+use std::hint;
use std::iter::InPlaceIterable;
+use std::mem;
use std::mem::{size_of, swap};
use std::ops::Bound::*;
use std::panic::{catch_unwind, AssertUnwindSafe};
@@ -1107,8 +1109,31 @@ fn test_into_iter_drop_allocator() {
#[test]
fn test_into_iter_zst() {
- for _ in vec![[0u64; 0]].into_iter() {}
- for _ in vec![[0u64; 0]; 5].into_iter().rev() {}
+ #[derive(Debug, Clone)]
+ struct AlignedZstWithDrop([u64; 0]);
+ impl Drop for AlignedZstWithDrop {
+ fn drop(&mut self) {
+ let addr = self as *mut _ as usize;
+ assert!(hint::black_box(addr) % mem::align_of::<u64>() == 0);
+ }
+ }
+
+ const C: AlignedZstWithDrop = AlignedZstWithDrop([0u64; 0]);
+
+ for _ in vec![C].into_iter() {}
+ for _ in vec![C; 5].into_iter().rev() {}
+
+ let mut it = vec![C, C].into_iter();
+ it.advance_by(1).unwrap();
+ drop(it);
+
+ let mut it = vec![C, C].into_iter();
+ it.next_chunk::<1>().unwrap();
+ drop(it);
+
+ let mut it = vec![C, C].into_iter();
+ it.next_chunk::<4>().unwrap_err();
+ drop(it);
}
#[test]
@@ -1824,7 +1849,7 @@ fn test_stable_pointers() {
}
// Test that, if we reserved enough space, adding and removing elements does not
- // invalidate references into the vector (such as `v0`). This test also
+ // invalidate references into the vector (such as `v0`). This test also
// runs in Miri, which would detect such problems.
// Note that this test does *not* constitute a stable guarantee that all these functions do not
// reallocate! Only what is explicitly documented at
diff --git a/library/alloc/tests/vec_deque.rs b/library/alloc/tests/vec_deque.rs
index d04de5a07..5a0b852e8 100644
--- a/library/alloc/tests/vec_deque.rs
+++ b/library/alloc/tests/vec_deque.rs
@@ -1046,6 +1046,20 @@ fn test_append_double_drop() {
}
#[test]
+#[should_panic]
+fn test_append_zst_capacity_overflow() {
+ let mut v = Vec::with_capacity(usize::MAX);
+ // note: using resize instead of set_len here would
+ // be *extremely* slow in unoptimized builds.
+ // SAFETY: `v` has capacity `usize::MAX`, and no initialization
+ // is needed for empty tuples.
+ unsafe { v.set_len(usize::MAX) };
+ let mut v = VecDeque::from(v);
+ let mut w = vec![()].into();
+ v.append(&mut w);
+}
+
+#[test]
fn test_retain() {
let mut buf = VecDeque::new();
buf.extend(1..5);
@@ -1736,3 +1750,39 @@ fn test_resize_keeps_reserved_space_from_item() {
d.resize(1, v);
assert_eq!(d[0].capacity(), 1234);
}
+
+#[test]
+fn test_collect_from_into_iter_keeps_allocation() {
+ let mut v = Vec::with_capacity(13);
+ v.extend(0..7);
+ check(v.as_ptr(), v.last().unwrap(), v.into_iter());
+
+ let mut v = VecDeque::with_capacity(13);
+ v.extend(0..7);
+ check(&v[0], &v[v.len() - 1], v.into_iter());
+
+ fn check(buf: *const i32, last: *const i32, mut it: impl Iterator<Item = i32>) {
+ assert_eq!(it.next(), Some(0));
+ assert_eq!(it.next(), Some(1));
+
+ let mut v: VecDeque<i32> = it.collect();
+ assert_eq!(v.capacity(), 13);
+ assert_eq!(v.as_slices().0.as_ptr(), buf.wrapping_add(2));
+ assert_eq!(&v[v.len() - 1] as *const _, last);
+
+ assert_eq!(v.as_slices(), ([2, 3, 4, 5, 6].as_slice(), [].as_slice()));
+ v.push_front(7);
+ assert_eq!(v.as_slices(), ([7, 2, 3, 4, 5, 6].as_slice(), [].as_slice()));
+ v.push_front(8);
+ assert_eq!(v.as_slices(), ([8, 7, 2, 3, 4, 5, 6].as_slice(), [].as_slice()));
+
+ // Now that we've adding thing in place of the two that we removed from
+ // the front of the iterator, we're back to matching the buffer pointer.
+ assert_eq!(v.as_slices().0.as_ptr(), buf);
+ assert_eq!(&v[v.len() - 1] as *const _, last);
+
+ v.push_front(9);
+ assert_eq!(v.as_slices(), ([9].as_slice(), [8, 7, 2, 3, 4, 5, 6].as_slice()));
+ assert_eq!(v.capacity(), 13);
+ }
+}