summaryrefslogtreecommitdiffstats
path: root/library/alloc/src/collections
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:11:28 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:11:28 +0000
commit94a0819fe3a0d679c3042a77bfe6a2afc505daea (patch)
tree2b827afe6a05f3538db3f7803a88c4587fe85648 /library/alloc/src/collections
parentAdding upstream version 1.64.0+dfsg1. (diff)
downloadrustc-94a0819fe3a0d679c3042a77bfe6a2afc505daea.tar.xz
rustc-94a0819fe3a0d679c3042a77bfe6a2afc505daea.zip
Adding upstream version 1.66.0+dfsg1.upstream/1.66.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/alloc/src/collections')
-rw-r--r--library/alloc/src/collections/binary_heap.rs3
-rw-r--r--library/alloc/src/collections/btree/dedup_sorted_iter.rs4
-rw-r--r--library/alloc/src/collections/btree/map.rs41
-rw-r--r--library/alloc/src/collections/btree/map/entry.rs10
-rw-r--r--library/alloc/src/collections/btree/node.rs22
-rw-r--r--library/alloc/src/collections/btree/set.rs26
-rw-r--r--library/alloc/src/collections/linked_list.rs4
-rw-r--r--library/alloc/src/collections/mod.rs3
-rw-r--r--library/alloc/src/collections/vec_deque/drain.rs44
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs24
10 files changed, 120 insertions, 61 deletions
diff --git a/library/alloc/src/collections/binary_heap.rs b/library/alloc/src/collections/binary_heap.rs
index 197e7aaac..4583bc9a1 100644
--- a/library/alloc/src/collections/binary_heap.rs
+++ b/library/alloc/src/collections/binary_heap.rs
@@ -1010,7 +1010,8 @@ impl<T> BinaryHeap<T> {
/// current length. The allocator may reserve more space to speculatively
/// avoid frequent allocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient.
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
///
/// # Errors
///
diff --git a/library/alloc/src/collections/btree/dedup_sorted_iter.rs b/library/alloc/src/collections/btree/dedup_sorted_iter.rs
index 60bf83b83..17ee78045 100644
--- a/library/alloc/src/collections/btree/dedup_sorted_iter.rs
+++ b/library/alloc/src/collections/btree/dedup_sorted_iter.rs
@@ -3,7 +3,9 @@ use core::iter::Peekable;
/// A iterator for deduping the key of a sorted iterator.
/// When encountering the duplicated key, only the last key-value pair is yielded.
///
-/// Used by [`BTreeMap::bulk_build_from_sorted_iter`].
+/// Used by [`BTreeMap::bulk_build_from_sorted_iter`][1].
+///
+/// [1]: crate::collections::BTreeMap::bulk_build_from_sorted_iter
pub struct DedupSortedIter<K, V, I>
where
I: Iterator<Item = (K, V)>,
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
index cacbd54b6..8a7719347 100644
--- a/library/alloc/src/collections/btree/map.rs
+++ b/library/alloc/src/collections/btree/map.rs
@@ -580,7 +580,7 @@ impl<K, V> BTreeMap<K, V> {
/// map.insert(1, "a");
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[rustc_const_stable(feature = "const_btree_new", since = "1.66.0")]
#[must_use]
pub const fn new() -> BTreeMap<K, V> {
BTreeMap { root: None, length: 0, alloc: ManuallyDrop::new(Global), _marker: PhantomData }
@@ -703,7 +703,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// Basic usage:
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
@@ -712,7 +711,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// map.insert(2, "a");
/// assert_eq!(map.first_key_value(), Some((&1, &"b")));
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn first_key_value(&self) -> Option<(&K, &V)>
where
K: Ord,
@@ -727,7 +726,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// # Examples
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
@@ -741,7 +739,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// assert_eq!(*map.get(&1).unwrap(), "first");
/// assert_eq!(*map.get(&2).unwrap(), "b");
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn first_entry(&mut self) -> Option<OccupiedEntry<'_, K, V, A>>
where
K: Ord,
@@ -765,7 +763,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// Draining elements in ascending order, while keeping a usable map each iteration.
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
@@ -776,7 +773,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// }
/// assert!(map.is_empty());
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn pop_first(&mut self) -> Option<(K, V)>
where
K: Ord,
@@ -792,7 +789,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// Basic usage:
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
@@ -800,7 +796,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// map.insert(2, "a");
/// assert_eq!(map.last_key_value(), Some((&2, &"a")));
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn last_key_value(&self) -> Option<(&K, &V)>
where
K: Ord,
@@ -815,7 +811,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// # Examples
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
@@ -829,7 +824,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// assert_eq!(*map.get(&1).unwrap(), "a");
/// assert_eq!(*map.get(&2).unwrap(), "last");
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn last_entry(&mut self) -> Option<OccupiedEntry<'_, K, V, A>>
where
K: Ord,
@@ -853,7 +848,6 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// Draining elements in descending order, while keeping a usable map each iteration.
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeMap;
///
/// let mut map = BTreeMap::new();
@@ -864,7 +858,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// }
/// assert!(map.is_empty());
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn pop_last(&mut self) -> Option<(K, V)>
where
K: Ord,
@@ -1099,6 +1093,9 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// Moves all elements from `other` into `self`, leaving `other` empty.
///
+ /// If a key from `other` is already present in `self`, the respective
+ /// value from `self` will be overwritten with the respective value from `other`.
+ ///
/// # Examples
///
/// ```
@@ -1107,10 +1104,10 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// let mut a = BTreeMap::new();
/// a.insert(1, "a");
/// a.insert(2, "b");
- /// a.insert(3, "c");
+ /// a.insert(3, "c"); // Note: Key (3) also present in b.
///
/// let mut b = BTreeMap::new();
- /// b.insert(3, "d");
+ /// b.insert(3, "d"); // Note: Key (3) also present in a.
/// b.insert(4, "e");
/// b.insert(5, "f");
///
@@ -1121,7 +1118,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
///
/// assert_eq!(a[&1], "a");
/// assert_eq!(a[&2], "b");
- /// assert_eq!(a[&3], "d");
+ /// assert_eq!(a[&3], "d"); // Note: "c" has been overwritten.
/// assert_eq!(a[&4], "e");
/// assert_eq!(a[&5], "f");
/// ```
@@ -2392,7 +2389,11 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[rustc_const_unstable(
+ feature = "const_btree_len",
+ issue = "71835",
+ implied_by = "const_btree_new"
+ )]
pub const fn len(&self) -> usize {
self.length
}
@@ -2413,7 +2414,11 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[rustc_const_unstable(
+ feature = "const_btree_len",
+ issue = "71835",
+ implied_by = "const_btree_new"
+ )]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
diff --git a/library/alloc/src/collections/btree/map/entry.rs b/library/alloc/src/collections/btree/map/entry.rs
index b6eecf9b0..370b58864 100644
--- a/library/alloc/src/collections/btree/map/entry.rs
+++ b/library/alloc/src/collections/btree/map/entry.rs
@@ -133,6 +133,16 @@ impl<'a, K: Debug + Ord, V: Debug, A: Allocator + Clone> fmt::Display
}
}
+#[unstable(feature = "map_try_insert", issue = "82766")]
+impl<'a, K: core::fmt::Debug + Ord, V: core::fmt::Debug> core::error::Error
+ for crate::collections::btree_map::OccupiedError<'a, K, V>
+{
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "key already exists"
+ }
+}
+
impl<'a, K: Ord, V, A: Allocator + Clone> Entry<'a, K, V, A> {
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs
index d831161bc..da766b67a 100644
--- a/library/alloc/src/collections/btree/node.rs
+++ b/library/alloc/src/collections/btree/node.rs
@@ -206,9 +206,9 @@ impl<'a, K: 'a, V: 'a, Type> Clone for NodeRef<marker::Immut<'a>, K, V, Type> {
unsafe impl<BorrowType, K: Sync, V: Sync, Type> Sync for NodeRef<BorrowType, K, V, Type> {}
-unsafe impl<'a, K: Sync + 'a, V: Sync + 'a, Type> Send for NodeRef<marker::Immut<'a>, K, V, Type> {}
-unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::Mut<'a>, K, V, Type> {}
-unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::ValMut<'a>, K, V, Type> {}
+unsafe impl<K: Sync, V: Sync, Type> Send for NodeRef<marker::Immut<'_>, K, V, Type> {}
+unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Mut<'_>, K, V, Type> {}
+unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::ValMut<'_>, K, V, Type> {}
unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Owned, K, V, Type> {}
unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Dying, K, V, Type> {}
@@ -318,7 +318,7 @@ impl<BorrowType: marker::BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type>
pub fn ascend(
self,
) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> {
- assert!(BorrowType::PERMITS_TRAVERSAL);
+ let _ = BorrowType::TRAVERSAL_PERMIT;
// We need to use raw pointers to nodes because, if BorrowType is marker::ValMut,
// there might be outstanding mutable references to values that we must not invalidate.
let leaf_ptr: *const _ = Self::as_leaf_ptr(&self);
@@ -1003,7 +1003,7 @@ impl<BorrowType: marker::BorrowType, K, V>
/// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
/// both, upon success, do nothing.
pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
- assert!(BorrowType::PERMITS_TRAVERSAL);
+ let _ = BorrowType::TRAVERSAL_PERMIT;
// We need to use raw pointers to nodes because, if BorrowType is
// marker::ValMut, there might be outstanding mutable references to
// values that we must not invalidate. There's no worry accessing the
@@ -1666,15 +1666,17 @@ pub mod marker {
pub struct ValMut<'a>(PhantomData<&'a mut ()>);
pub trait BorrowType {
- // Whether node references of this borrow type allow traversing
- // to other nodes in the tree.
- const PERMITS_TRAVERSAL: bool = true;
+ // If node references of this borrow type allow traversing to other
+ // nodes in the tree, this constant can be evaluated. Thus reading it
+ // serves as a compile-time assertion.
+ const TRAVERSAL_PERMIT: () = ();
}
impl BorrowType for Owned {
- // Traversal isn't needed, it happens using the result of `borrow_mut`.
+ // Reject evaluation, because traversal isn't needed. Instead traversal
+ // happens using the result of `borrow_mut`.
// By disabling traversal, and only creating new references to roots,
// we know that every reference of the `Owned` type is to a root node.
- const PERMITS_TRAVERSAL: bool = false;
+ const TRAVERSAL_PERMIT: () = panic!();
}
impl BorrowType for Dying {}
impl<'a> BorrowType for Immut<'a> {}
diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs
index 2cfc08074..4ddb21192 100644
--- a/library/alloc/src/collections/btree/set.rs
+++ b/library/alloc/src/collections/btree/set.rs
@@ -343,7 +343,7 @@ impl<T> BTreeSet<T> {
/// let mut set: BTreeSet<i32> = BTreeSet::new();
/// ```
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[rustc_const_stable(feature = "const_btree_new", since = "1.66.0")]
#[must_use]
pub const fn new() -> BTreeSet<T> {
BTreeSet { map: BTreeMap::new() }
@@ -786,7 +786,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// Basic usage:
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeSet;
///
/// let mut set = BTreeSet::new();
@@ -797,7 +796,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// assert_eq!(set.first(), Some(&1));
/// ```
#[must_use]
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn first(&self) -> Option<&T>
where
T: Ord,
@@ -813,7 +812,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// Basic usage:
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeSet;
///
/// let mut set = BTreeSet::new();
@@ -824,7 +822,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// assert_eq!(set.last(), Some(&2));
/// ```
#[must_use]
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn last(&self) -> Option<&T>
where
T: Ord,
@@ -838,7 +836,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// # Examples
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeSet;
///
/// let mut set = BTreeSet::new();
@@ -849,7 +846,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// }
/// assert!(set.is_empty());
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn pop_first(&mut self) -> Option<T>
where
T: Ord,
@@ -863,7 +860,6 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// # Examples
///
/// ```
- /// #![feature(map_first_last)]
/// use std::collections::BTreeSet;
///
/// let mut set = BTreeSet::new();
@@ -874,7 +870,7 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// }
/// assert!(set.is_empty());
/// ```
- #[unstable(feature = "map_first_last", issue = "62924")]
+ #[stable(feature = "map_first_last", since = "1.66.0")]
pub fn pop_last(&mut self) -> Option<T>
where
T: Ord,
@@ -1174,7 +1170,11 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[rustc_const_unstable(
+ feature = "const_btree_len",
+ issue = "71835",
+ implied_by = "const_btree_new"
+ )]
pub const fn len(&self) -> usize {
self.map.len()
}
@@ -1193,7 +1193,11 @@ impl<T, A: Allocator + Clone> BTreeSet<T, A> {
/// ```
#[must_use]
#[stable(feature = "rust1", since = "1.0.0")]
- #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[rustc_const_unstable(
+ feature = "const_btree_len",
+ issue = "71835",
+ implied_by = "const_btree_new"
+ )]
pub const fn is_empty(&self) -> bool {
self.len() == 0
}
diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs
index e21c8aa3b..f2f5dffc2 100644
--- a/library/alloc/src/collections/linked_list.rs
+++ b/library/alloc/src/collections/linked_list.rs
@@ -1570,7 +1570,7 @@ impl<'a, T> CursorMut<'a, T> {
/// that the cursor points to is unchanged, even if it is the "ghost" node.
///
/// This operation should compute in *O*(1) time.
- // `push_front` continues to point to "ghost" when it addes a node to mimic
+ // `push_front` continues to point to "ghost" when it adds a node to mimic
// the behavior of `insert_before` on an empty list.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn push_front(&mut self, elt: T) {
@@ -1613,7 +1613,7 @@ impl<'a, T> CursorMut<'a, T> {
None
} else {
// We can't point to the node that we pop. Copying the behavior of
- // `remove_current`, we move on the the next node in the sequence.
+ // `remove_current`, we move on to the next node in the sequence.
// If the list is of length 1 then we end pointing to the "ghost"
// node at index 0, which is expected.
if self.list.head == self.current {
diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs
index 628a5b155..161a37573 100644
--- a/library/alloc/src/collections/mod.rs
+++ b/library/alloc/src/collections/mod.rs
@@ -152,3 +152,6 @@ trait SpecExtend<I: IntoIterator> {
/// Extends `self` with the contents of the given iterator.
fn spec_extend(&mut self, iter: I);
}
+
+#[stable(feature = "try_reserve", since = "1.57.0")]
+impl core::error::Error for TryReserveError {}
diff --git a/library/alloc/src/collections/vec_deque/drain.rs b/library/alloc/src/collections/vec_deque/drain.rs
index 05f94da6d..41baa7102 100644
--- a/library/alloc/src/collections/vec_deque/drain.rs
+++ b/library/alloc/src/collections/vec_deque/drain.rs
@@ -1,10 +1,12 @@
+use core::fmt;
use core::iter::FusedIterator;
+use core::marker::PhantomData;
+use core::mem::{self, MaybeUninit};
use core::ptr::{self, NonNull};
-use core::{fmt, mem};
use crate::alloc::{Allocator, Global};
-use super::{count, Iter, VecDeque};
+use super::{count, wrap_index, VecDeque};
/// A draining iterator over the elements of a `VecDeque`.
///
@@ -20,18 +22,24 @@ pub struct Drain<
> {
after_tail: usize,
after_head: usize,
- iter: Iter<'a, T>,
+ ring: NonNull<[T]>,
+ tail: usize,
+ head: usize,
deque: NonNull<VecDeque<T, A>>,
+ _phantom: PhantomData<&'a T>,
}
impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub(super) unsafe fn new(
after_tail: usize,
after_head: usize,
- iter: Iter<'a, T>,
+ ring: &'a [MaybeUninit<T>],
+ tail: usize,
+ head: usize,
deque: NonNull<VecDeque<T, A>>,
) -> Self {
- Drain { after_tail, after_head, iter, deque }
+ let ring = unsafe { NonNull::new_unchecked(ring as *const [MaybeUninit<T>] as *mut _) };
+ Drain { after_tail, after_head, ring, tail, head, deque, _phantom: PhantomData }
}
}
@@ -41,7 +49,9 @@ impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
f.debug_tuple("Drain")
.field(&self.after_tail)
.field(&self.after_head)
- .field(&self.iter)
+ .field(&self.ring)
+ .field(&self.tail)
+ .field(&self.head)
.finish()
}
}
@@ -118,12 +128,21 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
#[inline]
fn next(&mut self) -> Option<T> {
- self.iter.next().map(|elt| unsafe { ptr::read(elt) })
+ if self.tail == self.head {
+ return None;
+ }
+ let tail = self.tail;
+ self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+ // Safety:
+ // - `self.tail` in a ring buffer is always a valid index.
+ // - `self.head` and `self.tail` equality is checked above.
+ unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(tail))) }
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- self.iter.size_hint()
+ let len = count(self.tail, self.head, self.ring.len());
+ (len, Some(len))
}
}
@@ -131,7 +150,14 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
- self.iter.next_back().map(|elt| unsafe { ptr::read(elt) })
+ if self.tail == self.head {
+ return None;
+ }
+ self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+ // Safety:
+ // - `self.head` in a ring buffer is always a valid index.
+ // - `self.head` and `self.tail` equality is checked above.
+ unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(self.head))) }
}
}
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 4d895d837..2a57dad89 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -12,11 +12,17 @@ use core::fmt;
use core::hash::{Hash, Hasher};
use core::iter::{repeat_with, FromIterator};
use core::marker::PhantomData;
-use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::mem::{ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ops::{Index, IndexMut, Range, RangeBounds};
use core::ptr::{self, NonNull};
use core::slice;
+// This is used in a bunch of intra-doc links.
+// FIXME: For some reason, `#[cfg(doc)]` wasn't sufficient, resulting in
+// failures in linkchecker even though rustdoc built the docs just fine.
+#[allow(unused_imports)]
+use core::mem;
+
use crate::alloc::{Allocator, Global};
use crate::collections::TryReserveError;
use crate::collections::TryReserveErrorKind;
@@ -177,7 +183,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// Marginally more convenient
#[inline]
fn cap(&self) -> usize {
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// For zero sized types, we are always at maximum capacity
MAXIMUM_ZST_CAPACITY
} else {
@@ -794,7 +800,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// in the given deque. The collection may reserve more space to speculatively avoid
/// frequent reallocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient.
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
///
/// # Errors
///
@@ -1333,9 +1340,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// it. We do not write to `self` nor reborrow to a mutable reference.
// Hence the raw pointer we created above, for `deque`, remains valid.
let ring = self.buffer_as_slice();
- let iter = Iter::new(ring, drain_tail, drain_head);
- Drain::new(drain_head, head, iter, deque)
+ Drain::new(drain_head, head, ring, drain_tail, drain_head, deque)
}
}
@@ -2447,8 +2453,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
let mut right_offset = 0;
for i in left_edge..right_edge {
right_offset = (i - left_edge) % (cap - right_edge);
- let src: isize = (right_edge + right_offset) as isize;
- ptr::swap(buf.add(i), buf.offset(src));
+ let src = right_edge + right_offset;
+ ptr::swap(buf.add(i), buf.add(src));
}
let n_ops = right_edge - left_edge;
left_edge += n_ops;
@@ -3038,7 +3044,7 @@ impl<T, A: Allocator> From<Vec<T, A>> for VecDeque<T, A> {
/// `Vec<T>` came from `From<VecDeque<T>>` and hasn't been reallocated.
fn from(mut other: Vec<T, A>) -> Self {
let len = other.len();
- if mem::size_of::<T>() == 0 {
+ if T::IS_ZST {
// There's no actual allocation for ZSTs to worry about capacity,
// but `VecDeque` can't handle as much length as `Vec`.
assert!(len < MAXIMUM_ZST_CAPACITY, "capacity overflow");
@@ -3124,7 +3130,7 @@ impl<T, const N: usize> From<[T; N]> for VecDeque<T> {
fn from(arr: [T; N]) -> Self {
let mut deq = VecDeque::with_capacity(N);
let arr = ManuallyDrop::new(arr);
- if mem::size_of::<T>() != 0 {
+ if !<T>::IS_ZST {
// SAFETY: VecDeque::with_capacity ensures that there is enough capacity.
unsafe {
ptr::copy_nonoverlapping(arr.as_ptr(), deq.ptr(), N);