diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:06:37 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-17 12:06:37 +0000 |
commit | 246f239d9f40f633160f0c18f87a20922d4e77bb (patch) | |
tree | 5a88572663584b3d4d28e5a20e10abab1be40884 /library/alloc/src/collections | |
parent | Releasing progress-linux version 1.64.0+dfsg1-1~progress7.99u1. (diff) | |
download | rustc-246f239d9f40f633160f0c18f87a20922d4e77bb.tar.xz rustc-246f239d9f40f633160f0c18f87a20922d4e77bb.zip |
Merging debian version 1.65.0+dfsg1-2.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | library/alloc/src/collections/binary_heap.rs | 3 | ||||
-rw-r--r-- | library/alloc/src/collections/btree/dedup_sorted_iter.rs | 4 | ||||
-rw-r--r-- | library/alloc/src/collections/btree/map/entry.rs | 11 | ||||
-rw-r--r-- | library/alloc/src/collections/btree/node.rs | 16 | ||||
-rw-r--r-- | library/alloc/src/collections/linked_list.rs | 2 | ||||
-rw-r--r-- | library/alloc/src/collections/mod.rs | 4 | ||||
-rw-r--r-- | library/alloc/src/collections/vec_deque/drain.rs | 44 | ||||
-rw-r--r-- | library/alloc/src/collections/vec_deque/mod.rs | 10 |
8 files changed, 70 insertions, 24 deletions
diff --git a/library/alloc/src/collections/binary_heap.rs b/library/alloc/src/collections/binary_heap.rs index 197e7aaac..4583bc9a1 100644 --- a/library/alloc/src/collections/binary_heap.rs +++ b/library/alloc/src/collections/binary_heap.rs @@ -1010,7 +1010,8 @@ impl<T> BinaryHeap<T> { /// current length. The allocator may reserve more space to speculatively /// avoid frequent allocations. After calling `try_reserve`, capacity will be /// greater than or equal to `self.len() + additional` if it returns - /// `Ok(())`. Does nothing if capacity is already sufficient. + /// `Ok(())`. Does nothing if capacity is already sufficient. This method + /// preserves the contents even if an error occurs. /// /// # Errors /// diff --git a/library/alloc/src/collections/btree/dedup_sorted_iter.rs b/library/alloc/src/collections/btree/dedup_sorted_iter.rs index 60bf83b83..17ee78045 100644 --- a/library/alloc/src/collections/btree/dedup_sorted_iter.rs +++ b/library/alloc/src/collections/btree/dedup_sorted_iter.rs @@ -3,7 +3,9 @@ use core::iter::Peekable; /// A iterator for deduping the key of a sorted iterator. /// When encountering the duplicated key, only the last key-value pair is yielded. /// -/// Used by [`BTreeMap::bulk_build_from_sorted_iter`]. +/// Used by [`BTreeMap::bulk_build_from_sorted_iter`][1]. +/// +/// [1]: crate::collections::BTreeMap::bulk_build_from_sorted_iter pub struct DedupSortedIter<K, V, I> where I: Iterator<Item = (K, V)>, diff --git a/library/alloc/src/collections/btree/map/entry.rs b/library/alloc/src/collections/btree/map/entry.rs index b6eecf9b0..cd7cdc192 100644 --- a/library/alloc/src/collections/btree/map/entry.rs +++ b/library/alloc/src/collections/btree/map/entry.rs @@ -133,6 +133,17 @@ impl<'a, K: Debug + Ord, V: Debug, A: Allocator + Clone> fmt::Display } } +#[cfg(not(bootstrap))] +#[unstable(feature = "map_try_insert", issue = "82766")] +impl<'a, K: core::fmt::Debug + Ord, V: core::fmt::Debug> core::error::Error + for crate::collections::btree_map::OccupiedError<'a, K, V> +{ + #[allow(deprecated)] + fn description(&self) -> &str { + "key already exists" + } +} + impl<'a, K: Ord, V, A: Allocator + Clone> Entry<'a, K, V, A> { /// Ensures a value is in the entry by inserting the default if empty, and returns /// a mutable reference to the value in the entry. diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs index d831161bc..f1d2d3b30 100644 --- a/library/alloc/src/collections/btree/node.rs +++ b/library/alloc/src/collections/btree/node.rs @@ -318,7 +318,7 @@ impl<BorrowType: marker::BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> pub fn ascend( self, ) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> { - assert!(BorrowType::PERMITS_TRAVERSAL); + let _ = BorrowType::TRAVERSAL_PERMIT; // We need to use raw pointers to nodes because, if BorrowType is marker::ValMut, // there might be outstanding mutable references to values that we must not invalidate. let leaf_ptr: *const _ = Self::as_leaf_ptr(&self); @@ -1003,7 +1003,7 @@ impl<BorrowType: marker::BorrowType, K, V> /// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should /// both, upon success, do nothing. pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> { - assert!(BorrowType::PERMITS_TRAVERSAL); + let _ = BorrowType::TRAVERSAL_PERMIT; // We need to use raw pointers to nodes because, if BorrowType is // marker::ValMut, there might be outstanding mutable references to // values that we must not invalidate. There's no worry accessing the @@ -1666,15 +1666,17 @@ pub mod marker { pub struct ValMut<'a>(PhantomData<&'a mut ()>); pub trait BorrowType { - // Whether node references of this borrow type allow traversing - // to other nodes in the tree. - const PERMITS_TRAVERSAL: bool = true; + // If node references of this borrow type allow traversing to other + // nodes in the tree, this constant can be evaluated. Thus reading it + // serves as a compile-time assertion. + const TRAVERSAL_PERMIT: () = (); } impl BorrowType for Owned { - // Traversal isn't needed, it happens using the result of `borrow_mut`. + // Reject evaluation, because traversal isn't needed. Instead traversal + // happens using the result of `borrow_mut`. // By disabling traversal, and only creating new references to roots, // we know that every reference of the `Owned` type is to a root node. - const PERMITS_TRAVERSAL: bool = false; + const TRAVERSAL_PERMIT: () = panic!(); } impl BorrowType for Dying {} impl<'a> BorrowType for Immut<'a> {} diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs index e21c8aa3b..6480fcaf9 100644 --- a/library/alloc/src/collections/linked_list.rs +++ b/library/alloc/src/collections/linked_list.rs @@ -1570,7 +1570,7 @@ impl<'a, T> CursorMut<'a, T> { /// that the cursor points to is unchanged, even if it is the "ghost" node. /// /// This operation should compute in *O*(1) time. - // `push_front` continues to point to "ghost" when it addes a node to mimic + // `push_front` continues to point to "ghost" when it adds a node to mimic // the behavior of `insert_before` on an empty list. #[unstable(feature = "linked_list_cursors", issue = "58533")] pub fn push_front(&mut self, elt: T) { diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs index 628a5b155..21d0def08 100644 --- a/library/alloc/src/collections/mod.rs +++ b/library/alloc/src/collections/mod.rs @@ -152,3 +152,7 @@ trait SpecExtend<I: IntoIterator> { /// Extends `self` with the contents of the given iterator. fn spec_extend(&mut self, iter: I); } + +#[cfg(not(bootstrap))] +#[stable(feature = "try_reserve", since = "1.57.0")] +impl core::error::Error for TryReserveError {} diff --git a/library/alloc/src/collections/vec_deque/drain.rs b/library/alloc/src/collections/vec_deque/drain.rs index 05f94da6d..41baa7102 100644 --- a/library/alloc/src/collections/vec_deque/drain.rs +++ b/library/alloc/src/collections/vec_deque/drain.rs @@ -1,10 +1,12 @@ +use core::fmt; use core::iter::FusedIterator; +use core::marker::PhantomData; +use core::mem::{self, MaybeUninit}; use core::ptr::{self, NonNull}; -use core::{fmt, mem}; use crate::alloc::{Allocator, Global}; -use super::{count, Iter, VecDeque}; +use super::{count, wrap_index, VecDeque}; /// A draining iterator over the elements of a `VecDeque`. /// @@ -20,18 +22,24 @@ pub struct Drain< > { after_tail: usize, after_head: usize, - iter: Iter<'a, T>, + ring: NonNull<[T]>, + tail: usize, + head: usize, deque: NonNull<VecDeque<T, A>>, + _phantom: PhantomData<&'a T>, } impl<'a, T, A: Allocator> Drain<'a, T, A> { pub(super) unsafe fn new( after_tail: usize, after_head: usize, - iter: Iter<'a, T>, + ring: &'a [MaybeUninit<T>], + tail: usize, + head: usize, deque: NonNull<VecDeque<T, A>>, ) -> Self { - Drain { after_tail, after_head, iter, deque } + let ring = unsafe { NonNull::new_unchecked(ring as *const [MaybeUninit<T>] as *mut _) }; + Drain { after_tail, after_head, ring, tail, head, deque, _phantom: PhantomData } } } @@ -41,7 +49,9 @@ impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> { f.debug_tuple("Drain") .field(&self.after_tail) .field(&self.after_head) - .field(&self.iter) + .field(&self.ring) + .field(&self.tail) + .field(&self.head) .finish() } } @@ -118,12 +128,21 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> { #[inline] fn next(&mut self) -> Option<T> { - self.iter.next().map(|elt| unsafe { ptr::read(elt) }) + if self.tail == self.head { + return None; + } + let tail = self.tail; + self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len()); + // Safety: + // - `self.tail` in a ring buffer is always a valid index. + // - `self.head` and `self.tail` equality is checked above. + unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(tail))) } } #[inline] fn size_hint(&self) -> (usize, Option<usize>) { - self.iter.size_hint() + let len = count(self.tail, self.head, self.ring.len()); + (len, Some(len)) } } @@ -131,7 +150,14 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> { impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> { #[inline] fn next_back(&mut self) -> Option<T> { - self.iter.next_back().map(|elt| unsafe { ptr::read(elt) }) + if self.tail == self.head { + return None; + } + self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len()); + // Safety: + // - `self.head` in a ring buffer is always a valid index. + // - `self.head` and `self.tail` equality is checked above. + unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(self.head))) } } } diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs index 4d895d837..e3f4deb08 100644 --- a/library/alloc/src/collections/vec_deque/mod.rs +++ b/library/alloc/src/collections/vec_deque/mod.rs @@ -794,7 +794,8 @@ impl<T, A: Allocator> VecDeque<T, A> { /// in the given deque. The collection may reserve more space to speculatively avoid /// frequent reallocations. After calling `try_reserve`, capacity will be /// greater than or equal to `self.len() + additional` if it returns - /// `Ok(())`. Does nothing if capacity is already sufficient. + /// `Ok(())`. Does nothing if capacity is already sufficient. This method + /// preserves the contents even if an error occurs. /// /// # Errors /// @@ -1333,9 +1334,8 @@ impl<T, A: Allocator> VecDeque<T, A> { // it. We do not write to `self` nor reborrow to a mutable reference. // Hence the raw pointer we created above, for `deque`, remains valid. let ring = self.buffer_as_slice(); - let iter = Iter::new(ring, drain_tail, drain_head); - Drain::new(drain_head, head, iter, deque) + Drain::new(drain_head, head, ring, drain_tail, drain_head, deque) } } @@ -2447,8 +2447,8 @@ impl<T, A: Allocator> VecDeque<T, A> { let mut right_offset = 0; for i in left_edge..right_edge { right_offset = (i - left_edge) % (cap - right_edge); - let src: isize = (right_edge + right_offset) as isize; - ptr::swap(buf.add(i), buf.offset(src)); + let src = right_edge + right_offset; + ptr::swap(buf.add(i), buf.add(src)); } let n_ops = right_edge - left_edge; left_edge += n_ops; |