summaryrefslogtreecommitdiffstats
path: root/library/alloc/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:20:29 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:20:29 +0000
commit631cd5845e8de329d0e227aaa707d7ea228b8f8f (patch)
treea1b87c8f8cad01cf18f7c5f57a08f102771ed303 /library/alloc/src
parentAdding debian version 1.69.0+dfsg1-1. (diff)
downloadrustc-631cd5845e8de329d0e227aaa707d7ea228b8f8f.tar.xz
rustc-631cd5845e8de329d0e227aaa707d7ea228b8f8f.zip
Merging upstream version 1.70.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/alloc/src')
-rw-r--r--library/alloc/src/alloc.rs8
-rw-r--r--library/alloc/src/borrow.rs5
-rw-r--r--library/alloc/src/boxed.rs50
-rw-r--r--library/alloc/src/collections/binary_heap/mod.rs91
-rw-r--r--library/alloc/src/collections/btree/map.rs128
-rw-r--r--library/alloc/src/collections/btree/map/tests.rs67
-rw-r--r--library/alloc/src/collections/btree/mod.rs1
-rw-r--r--library/alloc/src/collections/btree/navigate.rs12
-rw-r--r--library/alloc/src/collections/btree/set.rs55
-rw-r--r--library/alloc/src/collections/btree/set/tests.rs1
-rw-r--r--library/alloc/src/collections/linked_list.rs38
-rw-r--r--library/alloc/src/collections/vec_deque/drain.rs40
-rw-r--r--library/alloc/src/collections/vec_deque/into_iter.rs30
-rw-r--r--library/alloc/src/collections/vec_deque/iter.rs33
-rw-r--r--library/alloc/src/collections/vec_deque/iter_mut.rs32
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs49
-rw-r--r--library/alloc/src/lib.rs49
-rw-r--r--library/alloc/src/raw_vec.rs1
-rw-r--r--library/alloc/src/rc.rs44
-rw-r--r--library/alloc/src/rc/tests.rs15
-rw-r--r--library/alloc/src/str.rs4
-rw-r--r--library/alloc/src/string.rs14
-rw-r--r--library/alloc/src/sync.rs60
-rw-r--r--library/alloc/src/tests.rs24
-rw-r--r--library/alloc/src/vec/cow.rs1
-rw-r--r--library/alloc/src/vec/into_iter.rs35
-rw-r--r--library/alloc/src/vec/mod.rs20
27 files changed, 572 insertions, 335 deletions
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index 3a797bd5e..6f2ba957b 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -14,8 +14,6 @@ use core::ptr::{self, NonNull};
#[doc(inline)]
pub use core::alloc::*;
-use core::marker::Destruct;
-
#[cfg(test)]
mod tests;
@@ -331,16 +329,12 @@ unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
#[cfg_attr(not(test), lang = "box_free")]
#[inline]
-#[rustc_const_unstable(feature = "const_box", issue = "92521")]
// This signature has to be the same as `Box`, otherwise an ICE will happen.
// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
// well.
// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
-pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Destruct>(
- ptr: Unique<T>,
- alloc: A,
-) {
+pub(crate) unsafe fn box_free<T: ?Sized, A: Allocator>(ptr: Unique<T>, alloc: A) {
unsafe {
let size = size_of_val(ptr.as_ref());
let align = min_align_of_val(ptr.as_ref());
diff --git a/library/alloc/src/borrow.rs b/library/alloc/src/borrow.rs
index 83a138559..0c8c796ae 100644
--- a/library/alloc/src/borrow.rs
+++ b/library/alloc/src/borrow.rs
@@ -328,10 +328,9 @@ impl<B: ?Sized + ToOwned> Cow<'_, B> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-#[rustc_const_unstable(feature = "const_deref", issue = "88955")]
-impl<B: ?Sized + ToOwned> const Deref for Cow<'_, B>
+impl<B: ?Sized + ToOwned> Deref for Cow<'_, B>
where
- B::Owned: ~const Borrow<B>,
+ B::Owned: Borrow<B>,
{
type Target = B;
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index 44a378990..7f88327bf 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -150,16 +150,13 @@ use core::any::Any;
use core::async_iter::AsyncIterator;
use core::borrow;
use core::cmp::Ordering;
-use core::convert::{From, TryFrom};
use core::error::Error;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
-#[cfg(not(no_global_oom_handling))]
-use core::iter::FromIterator;
-use core::iter::{FusedIterator, Iterator};
+use core::iter::FusedIterator;
use core::marker::Tuple;
-use core::marker::{Destruct, Unpin, Unsize};
+use core::marker::Unsize;
use core::mem;
use core::ops::{
CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
@@ -214,6 +211,7 @@ impl<T> Box<T> {
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
+ #[rustc_diagnostic_item = "box_new"]
pub fn new(x: T) -> Self {
#[rustc_box]
Box::new(x)
@@ -375,12 +373,11 @@ impl<T, A: Allocator> Box<T, A> {
/// ```
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline]
- pub const fn new_in(x: T, alloc: A) -> Self
+ pub fn new_in(x: T, alloc: A) -> Self
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let mut boxed = Self::new_uninit_in(alloc);
unsafe {
@@ -405,12 +402,10 @@ impl<T, A: Allocator> Box<T, A> {
/// # Ok::<(), std::alloc::AllocError>(())
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
+ pub fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
where
- T: ~const Destruct,
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let mut boxed = Self::try_new_uninit_in(alloc)?;
unsafe {
@@ -440,13 +435,12 @@ impl<T, A: Allocator> Box<T, A> {
/// assert_eq!(*five, 5)
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[cfg(not(no_global_oom_handling))]
#[must_use]
// #[unstable(feature = "new_uninit", issue = "63291")]
- pub const fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ pub fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
@@ -481,10 +475,9 @@ impl<T, A: Allocator> Box<T, A> {
/// ```
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ pub fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate(layout)?.cast();
@@ -512,13 +505,12 @@ impl<T, A: Allocator> Box<T, A> {
///
/// [zeroed]: mem::MaybeUninit::zeroed
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[cfg(not(no_global_oom_handling))]
// #[unstable(feature = "new_uninit", issue = "63291")]
#[must_use]
- pub const fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ pub fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
// NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
@@ -553,10 +545,9 @@ impl<T, A: Allocator> Box<T, A> {
/// [zeroed]: mem::MaybeUninit::zeroed
#[unstable(feature = "allocator_api", issue = "32838")]
// #[unstable(feature = "new_uninit", issue = "63291")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
- pub const fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ pub fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
where
- A: ~const Allocator + ~const Destruct,
+ A: Allocator,
{
let layout = Layout::new::<mem::MaybeUninit<T>>();
let ptr = alloc.allocate_zeroed(layout)?.cast();
@@ -572,12 +563,11 @@ impl<T, A: Allocator> Box<T, A> {
/// construct a (pinned) `Box` in a different way than with [`Box::new_in`].
#[cfg(not(no_global_oom_handling))]
#[unstable(feature = "allocator_api", issue = "32838")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[must_use]
#[inline(always)]
- pub const fn pin_in(x: T, alloc: A) -> Pin<Self>
+ pub fn pin_in(x: T, alloc: A) -> Pin<Self>
where
- A: 'static + ~const Allocator + ~const Destruct,
+ A: 'static + Allocator,
{
Self::into_pin(Self::new_in(x, alloc))
}
@@ -604,12 +594,8 @@ impl<T, A: Allocator> Box<T, A> {
/// assert_eq!(Box::into_inner(c), 5);
/// ```
#[unstable(feature = "box_into_inner", issue = "80437")]
- #[rustc_const_unstable(feature = "const_box", issue = "92521")]
#[inline]
- pub const fn into_inner(boxed: Self) -> T
- where
- Self: ~const Destruct,
- {
+ pub fn into_inner(boxed: Self) -> T {
*boxed
}
}
diff --git a/library/alloc/src/collections/binary_heap/mod.rs b/library/alloc/src/collections/binary_heap/mod.rs
index f1d0a305d..2c089bb31 100644
--- a/library/alloc/src/collections/binary_heap/mod.rs
+++ b/library/alloc/src/collections/binary_heap/mod.rs
@@ -144,7 +144,7 @@
#![stable(feature = "rust1", since = "1.0.0")]
use core::fmt;
-use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
+use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
use core::mem::{self, swap, ManuallyDrop};
use core::num::NonZeroUsize;
use core::ops::{Deref, DerefMut};
@@ -154,8 +154,6 @@ use crate::collections::TryReserveError;
use crate::slice;
use crate::vec::{self, AsVecIntoIter, Vec};
-use super::SpecExtend;
-
#[cfg(test)]
mod tests;
@@ -265,7 +263,6 @@ mod tests;
/// more detailed analysis.
///
/// [`core::cmp::Reverse`]: core::cmp::Reverse
-/// [`Ord`]: core::cmp::Ord
/// [`Cell`]: core::cell::Cell
/// [`RefCell`]: core::cell::RefCell
/// [push]: BinaryHeap::push
@@ -400,6 +397,17 @@ impl<T: fmt::Debug> fmt::Debug for BinaryHeap<T> {
}
}
+struct RebuildOnDrop<'a, T: Ord> {
+ heap: &'a mut BinaryHeap<T>,
+ rebuild_from: usize,
+}
+
+impl<'a, T: Ord> Drop for RebuildOnDrop<'a, T> {
+ fn drop(&mut self) {
+ self.heap.rebuild_tail(self.rebuild_from);
+ }
+}
+
impl<T: Ord> BinaryHeap<T> {
/// Creates an empty `BinaryHeap` as a max-heap.
///
@@ -837,7 +845,6 @@ impl<T: Ord> BinaryHeap<T> {
/// Basic usage:
///
/// ```
- /// #![feature(binary_heap_retain)]
/// use std::collections::BinaryHeap;
///
/// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
@@ -846,35 +853,24 @@ impl<T: Ord> BinaryHeap<T> {
///
/// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
/// ```
- #[unstable(feature = "binary_heap_retain", issue = "71503")]
+ #[stable(feature = "binary_heap_retain", since = "1.70.0")]
pub fn retain<F>(&mut self, mut f: F)
where
F: FnMut(&T) -> bool,
{
- struct RebuildOnDrop<'a, T: Ord> {
- heap: &'a mut BinaryHeap<T>,
- first_removed: usize,
- }
-
- let mut guard = RebuildOnDrop { first_removed: self.len(), heap: self };
-
+ // rebuild_start will be updated to the first touched element below, and the rebuild will
+ // only be done for the tail.
+ let mut guard = RebuildOnDrop { rebuild_from: self.len(), heap: self };
let mut i = 0;
+
guard.heap.data.retain(|e| {
let keep = f(e);
- if !keep && i < guard.first_removed {
- guard.first_removed = i;
+ if !keep && i < guard.rebuild_from {
+ guard.rebuild_from = i;
}
i += 1;
keep
});
-
- impl<'a, T: Ord> Drop for RebuildOnDrop<'a, T> {
- fn drop(&mut self) {
- // data[..first_removed] is untouched, so we only need to
- // rebuild the tail:
- self.heap.rebuild_tail(self.first_removed);
- }
- }
}
}
@@ -1421,7 +1417,6 @@ impl<T> FusedIterator for Iter<'_, T> {}
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: BinaryHeap::into_iter
-/// [`IntoIterator`]: core::iter::IntoIterator
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Clone)]
pub struct IntoIter<T> {
@@ -1468,6 +1463,20 @@ impl<T> ExactSizeIterator for IntoIter<T> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for IntoIter<T> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T> Default for IntoIter<T> {
+ /// Creates an empty `binary_heap::IntoIter`.
+ ///
+ /// ```
+ /// # use std::collections::binary_heap;
+ /// let iter: binary_heap::IntoIter<u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ IntoIter { iter: Default::default() }
+ }
+}
+
// In addition to the SAFETY invariants of the following three unsafe traits
// also refer to the vec::in_place_collect module documentation to get an overview
#[unstable(issue = "none", feature = "inplace_iteration")]
@@ -1715,7 +1724,8 @@ impl<'a, T> IntoIterator for &'a BinaryHeap<T> {
impl<T: Ord> Extend<T> for BinaryHeap<T> {
#[inline]
fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
- <Self as SpecExtend<I>>::spec_extend(self, iter);
+ let guard = RebuildOnDrop { rebuild_from: self.len(), heap: self };
+ guard.heap.data.extend(iter);
}
#[inline]
@@ -1729,37 +1739,6 @@ impl<T: Ord> Extend<T> for BinaryHeap<T> {
}
}
-impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
- default fn spec_extend(&mut self, iter: I) {
- self.extend_desugared(iter.into_iter());
- }
-}
-
-impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
- fn spec_extend(&mut self, ref mut other: Vec<T>) {
- let start = self.data.len();
- self.data.append(other);
- self.rebuild_tail(start);
- }
-}
-
-impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
- fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
- self.append(other);
- }
-}
-
-impl<T: Ord> BinaryHeap<T> {
- fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
- let iterator = iter.into_iter();
- let (lower, _) = iterator.size_hint();
-
- self.reserve(lower);
-
- iterator.for_each(move |elem| self.push(elem));
- }
-}
-
#[stable(feature = "extend_ref", since = "1.2.0")]
impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
index 386cd1a16..afdc99817 100644
--- a/library/alloc/src/collections/btree/map.rs
+++ b/library/alloc/src/collections/btree/map.rs
@@ -3,7 +3,7 @@ use core::borrow::Borrow;
use core::cmp::Ordering;
use core::fmt::{self, Debug};
use core::hash::{Hash, Hasher};
-use core::iter::{FromIterator, FusedIterator};
+use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop};
use core::ops::{Bound, Index, RangeBounds};
@@ -362,6 +362,20 @@ impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Iter<'_, K, V> {
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<'a, K: 'a, V: 'a> Default for Iter<'a, K, V> {
+ /// Creates an empty `btree_map::Iter`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::Iter<'_, u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Iter { range: Default::default(), length: 0 }
+ }
+}
+
/// A mutable iterator over the entries of a `BTreeMap`.
///
/// This `struct` is created by the [`iter_mut`] method on [`BTreeMap`]. See its
@@ -386,13 +400,26 @@ impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IterMut<'_, K, V> {
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<'a, K: 'a, V: 'a> Default for IterMut<'a, K, V> {
+ /// Creates an empty `btree_map::IterMut`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::IterMut<'_, u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ IterMut { range: Default::default(), length: 0, _marker: PhantomData {} }
+ }
+}
+
/// An owning iterator over the entries of a `BTreeMap`.
///
/// This `struct` is created by the [`into_iter`] method on [`BTreeMap`]
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: IntoIterator::into_iter
-/// [`IntoIterator`]: core::iter::IntoIterator
#[stable(feature = "rust1", since = "1.0.0")]
#[rustc_insignificant_dtor]
pub struct IntoIter<
@@ -421,6 +448,23 @@ impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for IntoIter<K, V, A> {
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<K, V, A> Default for IntoIter<K, V, A>
+where
+ A: Allocator + Default + Clone,
+{
+ /// Creates an empty `btree_map::IntoIter`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::IntoIter<u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ IntoIter { range: Default::default(), length: 0, alloc: Default::default() }
+ }
+}
+
/// An iterator over the keys of a `BTreeMap`.
///
/// This `struct` is created by the [`keys`] method on [`BTreeMap`]. See its
@@ -605,7 +649,7 @@ impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn clear(&mut self) {
// avoid moving the allocator
- mem::drop(BTreeMap {
+ drop(BTreeMap {
root: mem::replace(&mut self.root, None),
length: mem::replace(&mut self.length, 0),
alloc: self.alloc.clone(),
@@ -1768,6 +1812,20 @@ impl<K, V> Clone for Keys<'_, K, V> {
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<K, V> Default for Keys<'_, K, V> {
+ /// Creates an empty `btree_map::Keys`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::Keys<'_, u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Keys { inner: Default::default() }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, K, V> Iterator for Values<'a, K, V> {
type Item = &'a V;
@@ -1809,6 +1867,20 @@ impl<K, V> Clone for Values<'_, K, V> {
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<K, V> Default for Values<'_, K, V> {
+ /// Creates an empty `btree_map::Values`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::Values<'_, u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Values { inner: Default::default() }
+ }
+}
+
/// An iterator produced by calling `drain_filter` on BTreeMap.
#[unstable(feature = "btree_drain_filter", issue = "70530")]
pub struct DrainFilter<
@@ -1945,6 +2017,20 @@ impl<'a, K, V> Iterator for Range<'a, K, V> {
}
}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<K, V> Default for Range<'_, K, V> {
+ /// Creates an empty `btree_map::Range`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::Range<'_, u8, u8> = Default::default();
+ /// assert_eq!(iter.count(), 0);
+ /// ```
+ fn default() -> Self {
+ Range { inner: Default::default() }
+ }
+}
+
#[stable(feature = "map_values_mut", since = "1.10.0")]
impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
type Item = &'a mut V;
@@ -2021,6 +2107,23 @@ impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoKeys<K, V, A> {
#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V, A: Allocator + Clone> FusedIterator for IntoKeys<K, V, A> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<K, V, A> Default for IntoKeys<K, V, A>
+where
+ A: Allocator + Default + Clone,
+{
+ /// Creates an empty `btree_map::IntoKeys`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::IntoKeys<u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ IntoKeys { inner: Default::default() }
+ }
+}
+
#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V, A: Allocator + Clone> Iterator for IntoValues<K, V, A> {
type Item = V;
@@ -2055,6 +2158,23 @@ impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoValues<K, V, A> {
#[stable(feature = "map_into_keys_values", since = "1.54.0")]
impl<K, V, A: Allocator + Clone> FusedIterator for IntoValues<K, V, A> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<K, V, A> Default for IntoValues<K, V, A>
+where
+ A: Allocator + Default + Clone,
+{
+ /// Creates an empty `btree_map::IntoValues`.
+ ///
+ /// ```
+ /// # use std::collections::btree_map;
+ /// let iter: btree_map::IntoValues<u8, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ IntoValues { inner: Default::default() }
+ }
+}
+
#[stable(feature = "btree_range", since = "1.17.0")]
impl<'a, K, V> DoubleEndedIterator for Range<'a, K, V> {
fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
@@ -3062,7 +3182,7 @@ impl<'a, K: Ord, V, A: Allocator + Clone> CursorMut<'a, K, V, A> {
panic!("key must be ordered above the current element");
}
}
- if let Some((next, _)) = self.peek_prev() {
+ if let Some((next, _)) = self.peek_next() {
if &key >= next {
panic!("key must be ordered below the next element");
}
diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs
index 76c2f27b4..da00d83bd 100644
--- a/library/alloc/src/collections/btree/map/tests.rs
+++ b/library/alloc/src/collections/btree/map/tests.rs
@@ -9,8 +9,7 @@ use crate::testing::ord_chaos::{Cyclic3, Governed, Governor};
use crate::testing::rng::DeterministicRng;
use crate::vec::Vec;
use std::cmp::Ordering;
-use std::convert::TryFrom;
-use std::iter::{self, FromIterator};
+use std::iter;
use std::mem;
use std::ops::Bound::{self, Excluded, Included, Unbounded};
use std::ops::RangeBounds;
@@ -2385,3 +2384,67 @@ fn test_cursor_mut() {
assert_eq!(cur.key(), Some(&4));
assert_eq!(map, BTreeMap::from([(0, '?'), (1, 'a'), (3, 'c'), (4, 'd')]));
}
+
+#[should_panic(expected = "key must be ordered above the previous element")]
+#[test]
+fn test_cursor_mut_insert_before_1() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_before(0, 'd');
+}
+
+#[should_panic(expected = "key must be ordered above the previous element")]
+#[test]
+fn test_cursor_mut_insert_before_2() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_before(1, 'd');
+}
+
+#[should_panic(expected = "key must be ordered below the current element")]
+#[test]
+fn test_cursor_mut_insert_before_3() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_before(2, 'd');
+}
+
+#[should_panic(expected = "key must be ordered below the current element")]
+#[test]
+fn test_cursor_mut_insert_before_4() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_before(3, 'd');
+}
+
+#[should_panic(expected = "key must be ordered above the current element")]
+#[test]
+fn test_cursor_mut_insert_after_1() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_after(1, 'd');
+}
+
+#[should_panic(expected = "key must be ordered above the current element")]
+#[test]
+fn test_cursor_mut_insert_after_2() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_after(2, 'd');
+}
+
+#[should_panic(expected = "key must be ordered below the next element")]
+#[test]
+fn test_cursor_mut_insert_after_3() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_after(3, 'd');
+}
+
+#[should_panic(expected = "key must be ordered below the next element")]
+#[test]
+fn test_cursor_mut_insert_after_4() {
+ let mut map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let mut cur = map.upper_bound_mut(Bound::Included(&2));
+ cur.insert_after(4, 'd');
+}
diff --git a/library/alloc/src/collections/btree/mod.rs b/library/alloc/src/collections/btree/mod.rs
index 7552f2fc0..c7d0144de 100644
--- a/library/alloc/src/collections/btree/mod.rs
+++ b/library/alloc/src/collections/btree/mod.rs
@@ -13,7 +13,6 @@ pub mod set;
mod set_val;
mod split;
-#[doc(hidden)]
trait Recover<Q: ?Sized> {
type Key;
diff --git a/library/alloc/src/collections/btree/navigate.rs b/library/alloc/src/collections/btree/navigate.rs
index b890717e5..a85a31624 100644
--- a/library/alloc/src/collections/btree/navigate.rs
+++ b/library/alloc/src/collections/btree/navigate.rs
@@ -19,6 +19,12 @@ impl<'a, K: 'a, V: 'a> Clone for LeafRange<marker::Immut<'a>, K, V> {
}
}
+impl<B, K, V> Default for LeafRange<B, K, V> {
+ fn default() -> Self {
+ LeafRange { front: None, back: None }
+ }
+}
+
impl<BorrowType, K, V> LeafRange<BorrowType, K, V> {
pub fn none() -> Self {
LeafRange { front: None, back: None }
@@ -124,6 +130,12 @@ pub struct LazyLeafRange<BorrowType, K, V> {
back: Option<LazyLeafHandle<BorrowType, K, V>>,
}
+impl<B, K, V> Default for LazyLeafRange<B, K, V> {
+ fn default() -> Self {
+ LazyLeafRange { front: None, back: None }
+ }
+}
+
impl<'a, K: 'a, V: 'a> Clone for LazyLeafRange<marker::Immut<'a>, K, V> {
fn clone(&self) -> Self {
LazyLeafRange { front: self.front.clone(), back: self.back.clone() }
diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs
index 4ddb21192..da952a13f 100644
--- a/library/alloc/src/collections/btree/set.rs
+++ b/library/alloc/src/collections/btree/set.rs
@@ -1,13 +1,10 @@
-// This is pretty much entirely stolen from TreeSet, since BTreeMap has an identical interface
-// to TreeMap
-
use crate::vec::Vec;
use core::borrow::Borrow;
use core::cmp::Ordering::{self, Equal, Greater, Less};
use core::cmp::{max, min};
use core::fmt::{self, Debug};
use core::hash::{Hash, Hasher};
-use core::iter::{FromIterator, FusedIterator, Peekable};
+use core::iter::{FusedIterator, Peekable};
use core::mem::ManuallyDrop;
use core::ops::{BitAnd, BitOr, BitXor, RangeBounds, Sub};
@@ -18,8 +15,6 @@ use super::Recover;
use crate::alloc::{Allocator, Global};
-// FIXME(conventions): implement bounded iterators
-
/// An ordered set based on a B-Tree.
///
/// See [`BTreeMap`]'s documentation for a detailed discussion of this collection's performance
@@ -35,7 +30,6 @@ use crate::alloc::{Allocator, Global};
/// Iterators returned by [`BTreeSet::iter`] produce their items in order, and take worst-case
/// logarithmic and amortized constant time per item returned.
///
-/// [`Ord`]: core::cmp::Ord
/// [`Cell`]: core::cell::Cell
/// [`RefCell`]: core::cell::RefCell
///
@@ -152,7 +146,6 @@ impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: BTreeSet#method.into_iter
-/// [`IntoIterator`]: core::iter::IntoIterator
#[stable(feature = "rust1", since = "1.0.0")]
#[derive(Debug)]
pub struct IntoIter<
@@ -1544,6 +1537,21 @@ impl<T, A: Allocator + Clone> Iterator for IntoIter<T, A> {
self.iter.size_hint()
}
}
+
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T> Default for Iter<'_, T> {
+ /// Creates an empty `btree_set::Iter`.
+ ///
+ /// ```
+ /// # use std::collections::btree_set;
+ /// let iter: btree_set::Iter<'_, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Iter { iter: Default::default() }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator + Clone> DoubleEndedIterator for IntoIter<T, A> {
fn next_back(&mut self) -> Option<T> {
@@ -1560,6 +1568,23 @@ impl<T, A: Allocator + Clone> ExactSizeIterator for IntoIter<T, A> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator + Clone> FusedIterator for IntoIter<T, A> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T, A> Default for IntoIter<T, A>
+where
+ A: Allocator + Default + Clone,
+{
+ /// Creates an empty `btree_set::IntoIter`.
+ ///
+ /// ```
+ /// # use std::collections::btree_set;
+ /// let iter: btree_set::IntoIter<u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ IntoIter { iter: Default::default() }
+ }
+}
+
#[stable(feature = "btree_range", since = "1.17.0")]
impl<T> Clone for Range<'_, T> {
fn clone(&self) -> Self {
@@ -1598,6 +1623,20 @@ impl<'a, T> DoubleEndedIterator for Range<'a, T> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Range<'_, T> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T> Default for Range<'_, T> {
+ /// Creates an empty `btree_set::Range`.
+ ///
+ /// ```
+ /// # use std::collections::btree_set;
+ /// let iter: btree_set::Range<'_, u8> = Default::default();
+ /// assert_eq!(iter.count(), 0);
+ /// ```
+ fn default() -> Self {
+ Range { iter: Default::default() }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T, A: Allocator + Clone> Clone for Difference<'_, T, A> {
fn clone(&self) -> Self {
diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs
index 7b8d41a60..a7c839d77 100644
--- a/library/alloc/src/collections/btree/set/tests.rs
+++ b/library/alloc/src/collections/btree/set/tests.rs
@@ -4,7 +4,6 @@ use crate::testing::rng::DeterministicRng;
use crate::vec::Vec;
use std::cmp::Ordering;
use std::hash::{Hash, Hasher};
-use std::iter::FromIterator;
use std::ops::Bound::{Excluded, Included};
use std::panic::{catch_unwind, AssertUnwindSafe};
diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs
index f2f5dffc2..106d05c57 100644
--- a/library/alloc/src/collections/linked_list.rs
+++ b/library/alloc/src/collections/linked_list.rs
@@ -15,7 +15,7 @@
use core::cmp::Ordering;
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::iter::{FromIterator, FusedIterator};
+use core::iter::FusedIterator;
use core::marker::PhantomData;
use core::mem;
use core::ptr::NonNull;
@@ -130,7 +130,6 @@ impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: LinkedList::into_iter
-/// [`IntoIterator`]: core::iter::IntoIterator
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<T> {
@@ -1075,6 +1074,20 @@ impl<T> ExactSizeIterator for Iter<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for Iter<'_, T> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T> Default for Iter<'_, T> {
+ /// Creates an empty `linked_list::Iter`.
+ ///
+ /// ```
+ /// # use std::collections::linked_list;
+ /// let iter: linked_list::Iter<'_, u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ Iter { head: None, tail: None, len: 0, marker: Default::default() }
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<'a, T> Iterator for IterMut<'a, T> {
type Item = &'a mut T;
@@ -1129,6 +1142,13 @@ impl<T> ExactSizeIterator for IterMut<'_, T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for IterMut<'_, T> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T> Default for IterMut<'_, T> {
+ fn default() -> Self {
+ IterMut { head: None, tail: None, len: 0, marker: Default::default() }
+ }
+}
+
/// A cursor over a `LinkedList`.
///
/// A `Cursor` is like an iterator, except that it can freely seek back-and-forth.
@@ -1808,6 +1828,20 @@ impl<T> ExactSizeIterator for IntoIter<T> {}
#[stable(feature = "fused", since = "1.26.0")]
impl<T> FusedIterator for IntoIter<T> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T> Default for IntoIter<T> {
+ /// Creates an empty `linked_list::IntoIter`.
+ ///
+ /// ```
+ /// # use std::collections::linked_list;
+ /// let iter: linked_list::IntoIter<u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// ```
+ fn default() -> Self {
+ LinkedList::new().into_iter()
+ }
+}
+
#[stable(feature = "rust1", since = "1.0.0")]
impl<T> FromIterator<T> for LinkedList<T> {
fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
diff --git a/library/alloc/src/collections/vec_deque/drain.rs b/library/alloc/src/collections/vec_deque/drain.rs
index 89feb361d..0be274a38 100644
--- a/library/alloc/src/collections/vec_deque/drain.rs
+++ b/library/alloc/src/collections/vec_deque/drain.rs
@@ -52,36 +52,22 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
}
}
- // Only returns pointers to the slices, as that's
- // all we need to drop them. May only be called if `self.remaining != 0`.
+ // Only returns pointers to the slices, as that's all we need
+ // to drop them. May only be called if `self.remaining != 0`.
unsafe fn as_slices(&self) -> (*mut [T], *mut [T]) {
unsafe {
let deque = self.deque.as_ref();
- // FIXME: This is doing almost exactly the same thing as the else branch in `VecDeque::slice_ranges`.
- // Unfortunately, we can't just call `slice_ranges` here, as the deque's `len` is currently
- // just `drain_start`, so the range check would (almost) always panic. Between temporarily
- // adjusting the deques `len` to call `slice_ranges`, and just copy pasting the `slice_ranges`
- // implementation, this seemed like the less hacky solution, though it might be good to
- // find a better one in the future.
-
- // because `self.remaining != 0`, we know that `self.idx < deque.original_len`, so it's a valid
- // logical index.
- let wrapped_start = deque.to_physical_idx(self.idx);
-
- let head_len = deque.capacity() - wrapped_start;
-
- let (a_range, b_range) = if head_len >= self.remaining {
- (wrapped_start..wrapped_start + self.remaining, 0..0)
- } else {
- let tail_len = self.remaining - head_len;
- (wrapped_start..deque.capacity(), 0..tail_len)
- };
-
- // SAFETY: the range `self.idx..self.idx+self.remaining` lies strictly inside
- // the range `0..deque.original_len`. because of this, and because of the fact
- // that we acquire `a_range` and `b_range` exactly like `slice_ranges` would,
- // it's guaranteed that `a_range` and `b_range` represent valid ranges into
- // the deques buffer.
+
+ // We know that `self.idx + self.remaining <= deque.len <= usize::MAX`, so this won't overflow.
+ let logical_remaining_range = self.idx..self.idx + self.remaining;
+
+ // SAFETY: `logical_remaining_range` represents the
+ // range into the logical buffer of elements that
+ // haven't been drained yet, so they're all initialized,
+ // and `slice::range(start..end, end) == start..end`,
+ // so the preconditions for `slice_ranges` are met.
+ let (a_range, b_range) =
+ deque.slice_ranges(logical_remaining_range.clone(), logical_remaining_range.end);
(deque.buffer_range(a_range), deque.buffer_range(b_range))
}
}
diff --git a/library/alloc/src/collections/vec_deque/into_iter.rs b/library/alloc/src/collections/vec_deque/into_iter.rs
index 34bc0ce91..d9e274df0 100644
--- a/library/alloc/src/collections/vec_deque/into_iter.rs
+++ b/library/alloc/src/collections/vec_deque/into_iter.rs
@@ -1,4 +1,5 @@
use core::iter::{FusedIterator, TrustedLen};
+use core::num::NonZeroUsize;
use core::{array, fmt, mem::MaybeUninit, ops::Try, ptr};
use crate::alloc::{Allocator, Global};
@@ -11,7 +12,6 @@ use super::VecDeque;
/// (provided by the [`IntoIterator`] trait). See its documentation for more.
///
/// [`into_iter`]: VecDeque::into_iter
-/// [`IntoIterator`]: core::iter::IntoIterator
#[derive(Clone)]
#[stable(feature = "rust1", since = "1.0.0")]
pub struct IntoIter<
@@ -54,15 +54,16 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- if self.inner.len < n {
- let len = self.inner.len;
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ let len = self.inner.len;
+ let rem = if len < n {
self.inner.clear();
- Err(len)
+ n - len
} else {
self.inner.drain(..n);
- Ok(())
- }
+ 0
+ };
+ NonZeroUsize::new(rem).map_or(Ok(()), Err)
}
#[inline]
@@ -182,15 +183,16 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let len = self.inner.len;
- if len >= n {
- self.inner.truncate(len - n);
- Ok(())
- } else {
+ let rem = if len < n {
self.inner.clear();
- Err(len)
- }
+ n - len
+ } else {
+ self.inner.truncate(len - n);
+ 0
+ };
+ NonZeroUsize::new(rem).map_or(Ok(()), Err)
}
fn try_rfold<B, F, R>(&mut self, mut init: B, mut f: F) -> R
diff --git a/library/alloc/src/collections/vec_deque/iter.rs b/library/alloc/src/collections/vec_deque/iter.rs
index d9f393714..646a2a991 100644
--- a/library/alloc/src/collections/vec_deque/iter.rs
+++ b/library/alloc/src/collections/vec_deque/iter.rs
@@ -1,4 +1,5 @@
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
+use core::num::NonZeroUsize;
use core::ops::Try;
use core::{fmt, mem, slice};
@@ -55,13 +56,15 @@ impl<'a, T> Iterator for Iter<'a, T> {
}
}
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- let m = match self.i1.advance_by(n) {
- Ok(_) => return Ok(()),
- Err(m) => m,
- };
- mem::swap(&mut self.i1, &mut self.i2);
- self.i1.advance_by(n - m).map_err(|o| o + m)
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ let remaining = self.i1.advance_by(n);
+ match remaining {
+ Ok(()) => return Ok(()),
+ Err(n) => {
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i1.advance_by(n.get())
+ }
+ }
}
#[inline]
@@ -125,14 +128,14 @@ impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
}
}
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
- let m = match self.i2.advance_back_by(n) {
- Ok(_) => return Ok(()),
- Err(m) => m,
- };
-
- mem::swap(&mut self.i1, &mut self.i2);
- self.i2.advance_back_by(n - m).map_err(|o| m + o)
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ match self.i2.advance_back_by(n) {
+ Ok(()) => return Ok(()),
+ Err(n) => {
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i2.advance_back_by(n.get())
+ }
+ }
}
fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
diff --git a/library/alloc/src/collections/vec_deque/iter_mut.rs b/library/alloc/src/collections/vec_deque/iter_mut.rs
index 2c59d95cd..7defbb109 100644
--- a/library/alloc/src/collections/vec_deque/iter_mut.rs
+++ b/library/alloc/src/collections/vec_deque/iter_mut.rs
@@ -1,4 +1,5 @@
use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
+use core::num::NonZeroUsize;
use core::ops::Try;
use core::{fmt, mem, slice};
@@ -47,13 +48,14 @@ impl<'a, T> Iterator for IterMut<'a, T> {
}
}
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
- let m = match self.i1.advance_by(n) {
- Ok(_) => return Ok(()),
- Err(m) => m,
- };
- mem::swap(&mut self.i1, &mut self.i2);
- self.i1.advance_by(n - m).map_err(|o| o + m)
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ match self.i1.advance_by(n) {
+ Ok(()) => return Ok(()),
+ Err(remaining) => {
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i1.advance_by(remaining.get())
+ }
+ }
}
#[inline]
@@ -117,14 +119,14 @@ impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
}
}
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
- let m = match self.i2.advance_back_by(n) {
- Ok(_) => return Ok(()),
- Err(m) => m,
- };
-
- mem::swap(&mut self.i1, &mut self.i2);
- self.i2.advance_back_by(n - m).map_err(|o| m + o)
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
+ match self.i2.advance_back_by(n) {
+ Ok(()) => return Ok(()),
+ Err(remaining) => {
+ mem::swap(&mut self.i1, &mut self.i2);
+ self.i2.advance_back_by(remaining.get())
+ }
+ }
}
fn rfold<Acc, F>(self, accum: Acc, mut f: F) -> Acc
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 8317ac431..8916b42ed 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -10,7 +10,7 @@
use core::cmp::{self, Ordering};
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::iter::{repeat_n, repeat_with, ByRefSized, FromIterator};
+use core::iter::{repeat_n, repeat_with, ByRefSized};
use core::mem::{ManuallyDrop, SizedTypeProperties};
use core::ops::{Index, IndexMut, Range, RangeBounds};
use core::ptr;
@@ -1156,7 +1156,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_slices(&self) -> (&[T], &[T]) {
- let (a_range, b_range) = self.slice_ranges(..);
+ let (a_range, b_range) = self.slice_ranges(.., self.len);
// SAFETY: `slice_ranges` always returns valid ranges into
// the physical buffer.
unsafe { (&*self.buffer_range(a_range), &*self.buffer_range(b_range)) }
@@ -1190,7 +1190,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
#[inline]
#[stable(feature = "deque_extras_15", since = "1.5.0")]
pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
- let (a_range, b_range) = self.slice_ranges(..);
+ let (a_range, b_range) = self.slice_ranges(.., self.len);
// SAFETY: `slice_ranges` always returns valid ranges into
// the physical buffer.
unsafe { (&mut *self.buffer_range(a_range), &mut *self.buffer_range(b_range)) }
@@ -1232,19 +1232,28 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// Given a range into the logical buffer of the deque, this function
/// return two ranges into the physical buffer that correspond to
- /// the given range.
- fn slice_ranges<R>(&self, range: R) -> (Range<usize>, Range<usize>)
+ /// the given range. The `len` parameter should usually just be `self.len`;
+ /// the reason it's passed explicitly is that if the deque is wrapped in
+ /// a `Drain`, then `self.len` is not actually the length of the deque.
+ ///
+ /// # Safety
+ ///
+ /// This function is always safe to call. For the resulting ranges to be valid
+ /// ranges into the physical buffer, the caller must ensure that the result of
+ /// calling `slice::range(range, ..len)` represents a valid range into the
+ /// logical buffer, and that all elements in that range are initialized.
+ fn slice_ranges<R>(&self, range: R, len: usize) -> (Range<usize>, Range<usize>)
where
R: RangeBounds<usize>,
{
- let Range { start, end } = slice::range(range, ..self.len);
+ let Range { start, end } = slice::range(range, ..len);
let len = end - start;
if len == 0 {
(0..0, 0..0)
} else {
- // `slice::range` guarantees that `start <= end <= self.len`.
- // because `len != 0`, we know that `start < end`, so `start < self.len`
+ // `slice::range` guarantees that `start <= end <= len`.
+ // because `len != 0`, we know that `start < end`, so `start < len`
// and the indexing is valid.
let wrapped_start = self.to_physical_idx(start);
@@ -1290,7 +1299,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
where
R: RangeBounds<usize>,
{
- let (a_range, b_range) = self.slice_ranges(range);
+ let (a_range, b_range) = self.slice_ranges(range, self.len);
// SAFETY: The ranges returned by `slice_ranges`
// are valid ranges into the physical buffer, so
// it's ok to pass them to `buffer_range` and
@@ -1330,7 +1339,7 @@ impl<T, A: Allocator> VecDeque<T, A> {
where
R: RangeBounds<usize>,
{
- let (a_range, b_range) = self.slice_ranges(range);
+ let (a_range, b_range) = self.slice_ranges(range, self.len);
// SAFETY: The ranges returned by `slice_ranges`
// are valid ranges into the physical buffer, so
// it's ok to pass them to `buffer_range` and
@@ -2385,7 +2394,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
/// Binary searches this `VecDeque` for a given element.
- /// This behaves similarly to [`contains`] if this `VecDeque` is sorted.
+ /// If the `VecDeque` is not sorted, the returned result is unspecified and
+ /// meaningless.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
@@ -2395,7 +2405,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
///
/// See also [`binary_search_by`], [`binary_search_by_key`], and [`partition_point`].
///
- /// [`contains`]: VecDeque::contains
/// [`binary_search_by`]: VecDeque::binary_search_by
/// [`binary_search_by_key`]: VecDeque::binary_search_by_key
/// [`partition_point`]: VecDeque::partition_point
@@ -2441,12 +2450,13 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
/// Binary searches this `VecDeque` with a comparator function.
- /// This behaves similarly to [`contains`] if this `VecDeque` is sorted.
///
- /// The comparator function should implement an order consistent
- /// with the sort order of the deque, returning an order code that
- /// indicates whether its argument is `Less`, `Equal` or `Greater`
- /// than the desired target.
+ /// The comparator function should return an order code that indicates
+ /// whether its argument is `Less`, `Equal` or `Greater` the desired
+ /// target.
+ /// If the `VecDeque` is not sorted or if the comparator function does not
+ /// implement an order consistent with the sort order of the underlying
+ /// `VecDeque`, the returned result is unspecified and meaningless.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
@@ -2456,7 +2466,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
///
/// See also [`binary_search`], [`binary_search_by_key`], and [`partition_point`].
///
- /// [`contains`]: VecDeque::contains
/// [`binary_search`]: VecDeque::binary_search
/// [`binary_search_by_key`]: VecDeque::binary_search_by_key
/// [`partition_point`]: VecDeque::partition_point
@@ -2496,10 +2505,11 @@ impl<T, A: Allocator> VecDeque<T, A> {
}
/// Binary searches this `VecDeque` with a key extraction function.
- /// This behaves similarly to [`contains`] if this `VecDeque` is sorted.
///
/// Assumes that the deque is sorted by the key, for instance with
/// [`make_contiguous().sort_by_key()`] using the same key extraction function.
+ /// If the deque is not sorted by the key, the returned result is
+ /// unspecified and meaningless.
///
/// If the value is found then [`Result::Ok`] is returned, containing the
/// index of the matching element. If there are multiple matches, then any
@@ -2509,7 +2519,6 @@ impl<T, A: Allocator> VecDeque<T, A> {
///
/// See also [`binary_search`], [`binary_search_by`], and [`partition_point`].
///
- /// [`contains`]: VecDeque::contains
/// [`make_contiguous().sort_by_key()`]: VecDeque::make_contiguous
/// [`binary_search`]: VecDeque::binary_search
/// [`binary_search_by`]: VecDeque::binary_search_by
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index e9cc3875f..aa240c37e 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -87,9 +87,14 @@
#![warn(missing_debug_implementations)]
#![warn(missing_docs)]
#![allow(explicit_outlives_requirements)]
-#![cfg_attr(not(bootstrap), warn(multiple_supertrait_upcastable))]
+#![warn(multiple_supertrait_upcastable)]
//
// Library features:
+// tidy-alphabetical-start
+#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
+#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
+#![cfg_attr(test, feature(is_sorted))]
+#![cfg_attr(test, feature(new_uninit))]
#![feature(alloc_layout_extra)]
#![feature(allocator_api)]
#![feature(array_chunks)]
@@ -99,23 +104,21 @@
#![feature(assert_matches)]
#![feature(async_iterator)]
#![feature(coerce_unsized)]
-#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
+#![feature(const_align_of_val)]
#![feature(const_box)]
-#![cfg_attr(not(no_global_oom_handling), feature(const_btree_len))]
-#![feature(const_cow_is_borrowed)]
#![feature(const_convert)]
-#![feature(const_size_of_val)]
-#![feature(const_align_of_val)]
-#![feature(const_ptr_read)]
-#![feature(const_maybe_uninit_zeroed)]
-#![feature(const_maybe_uninit_write)]
+#![feature(const_cow_is_borrowed)]
+#![feature(const_eval_select)]
#![feature(const_maybe_uninit_as_mut_ptr)]
+#![feature(const_maybe_uninit_write)]
+#![feature(const_maybe_uninit_zeroed)]
+#![feature(const_pin)]
+#![feature(const_ptr_read)]
#![feature(const_refs_to_cell)]
+#![feature(const_size_of_val)]
+#![feature(const_waker)]
#![feature(core_intrinsics)]
#![feature(core_panic)]
-#![feature(const_eval_select)]
-#![feature(const_pin)]
-#![feature(const_waker)]
#![feature(dispatch_from_dyn)]
#![feature(error_generic_member_access)]
#![feature(error_in_core)]
@@ -126,7 +129,6 @@
#![feature(hasher_prefixfree_extras)]
#![feature(inline_const)]
#![feature(inplace_iteration)]
-#![cfg_attr(test, feature(is_sorted))]
#![feature(iter_advance_by)]
#![feature(iter_next_chunk)]
#![feature(iter_repeat_n)]
@@ -134,8 +136,6 @@
#![feature(maybe_uninit_slice)]
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_uninit_array_transpose)]
-#![cfg_attr(test, feature(new_uninit))]
-#![feature(nonnull_slice_from_raw_parts)]
#![feature(pattern)]
#![feature(pointer_byte_offsets)]
#![feature(provide_any)]
@@ -151,6 +151,7 @@
#![feature(slice_ptr_get)]
#![feature(slice_ptr_len)]
#![feature(slice_range)]
+#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
#![feature(trusted_len)]
@@ -161,41 +162,43 @@
#![feature(unicode_internals)]
#![feature(unsize)]
#![feature(utf8_chunks)]
-#![feature(std_internals)]
+// tidy-alphabetical-end
//
// Language features:
+// tidy-alphabetical-start
+#![cfg_attr(not(test), feature(generator_trait))]
+#![cfg_attr(test, feature(panic_update_hook))]
+#![cfg_attr(test, feature(test))]
#![feature(allocator_internals)]
#![feature(allow_internal_unstable)]
#![feature(associated_type_bounds)]
+#![feature(c_unwind)]
#![feature(cfg_sanitize)]
#![feature(const_deref)]
#![feature(const_mut_refs)]
-#![feature(const_ptr_write)]
#![feature(const_precise_live_drops)]
+#![feature(const_ptr_write)]
#![feature(const_trait_impl)]
#![feature(const_try)]
#![feature(dropck_eyepatch)]
#![feature(exclusive_range_pattern)]
#![feature(fundamental)]
-#![cfg_attr(not(test), feature(generator_trait))]
#![feature(hashmap_internals)]
#![feature(lang_items)]
#![feature(min_specialization)]
+#![feature(multiple_supertrait_upcastable)]
#![feature(negative_impls)]
#![feature(never_type)]
+#![feature(pointer_is_aligned)]
#![feature(rustc_allow_const_fn_unstable)]
#![feature(rustc_attrs)]
-#![feature(pointer_is_aligned)]
#![feature(slice_internals)]
#![feature(staged_api)]
#![feature(stmt_expr_attributes)]
-#![cfg_attr(test, feature(test))]
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
-#![feature(c_unwind)]
#![feature(with_negative_coherence)]
-#![cfg_attr(test, feature(panic_update_hook))]
-#![cfg_attr(not(bootstrap), feature(multiple_supertrait_upcastable))]
+// tidy-alphabetical-end
//
// Rustdoc features:
#![feature(doc_cfg)]
diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs
index 3751f2a24..dfd30d99c 100644
--- a/library/alloc/src/raw_vec.rs
+++ b/library/alloc/src/raw_vec.rs
@@ -4,7 +4,6 @@ use core::alloc::LayoutError;
use core::cmp;
use core::intrinsics;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
-use core::ops::Drop;
use core::ptr::{self, NonNull, Unique};
use core::slice;
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index 932a537c5..ba035fb06 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -15,7 +15,7 @@
//!
//! [`Rc`] uses non-atomic reference counting. This means that overhead is very
//! low, but an [`Rc`] cannot be sent between threads, and consequently [`Rc`]
-//! does not implement [`Send`][send]. As a result, the Rust compiler
+//! does not implement [`Send`]. As a result, the Rust compiler
//! will check *at compile time* that you are not sending [`Rc`]s between
//! threads. If you need multi-threaded, atomic reference counting, use
//! [`sync::Arc`][arc].
@@ -232,7 +232,6 @@
//! [clone]: Clone::clone
//! [`Cell`]: core::cell::Cell
//! [`RefCell`]: core::cell::RefCell
-//! [send]: core::marker::Send
//! [arc]: crate::sync::Arc
//! [`Deref`]: core::ops::Deref
//! [downgrade]: Rc::downgrade
@@ -251,13 +250,12 @@ use core::any::Any;
use core::borrow;
use core::cell::Cell;
use core::cmp::Ordering;
-use core::convert::{From, TryFrom};
use core::fmt;
use core::hash::{Hash, Hasher};
use core::intrinsics::abort;
#[cfg(not(no_global_oom_handling))]
use core::iter;
-use core::marker::{self, PhantomData, Unpin, Unsize};
+use core::marker::{PhantomData, Unsize};
#[cfg(not(no_global_oom_handling))]
use core::mem::size_of_val;
use core::mem::{self, align_of_val_raw, forget};
@@ -321,7 +319,7 @@ pub struct Rc<T: ?Sized> {
}
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> !marker::Send for Rc<T> {}
+impl<T: ?Sized> !Send for Rc<T> {}
// Note that this negative impl isn't strictly necessary for correctness,
// as `Rc` transitively contains a `Cell`, which is itself `!Sync`.
@@ -329,7 +327,7 @@ impl<T: ?Sized> !marker::Send for Rc<T> {}
// having an explicit negative impl is nice for documentation purposes
// and results in nicer error messages.
#[stable(feature = "rust1", since = "1.0.0")]
-impl<T: ?Sized> !marker::Sync for Rc<T> {}
+impl<T: ?Sized> !Sync for Rc<T> {}
#[stable(feature = "catch_unwind", since = "1.9.0")]
impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Rc<T> {}
@@ -681,6 +679,24 @@ impl<T> Rc<T> {
Err(this)
}
}
+
+ /// Returns the inner value, if the `Rc` has exactly one strong reference.
+ ///
+ /// Otherwise, [`None`] is returned and the `Rc` is dropped.
+ ///
+ /// This will succeed even if there are outstanding weak references.
+ ///
+ /// If `Rc::into_inner` is called on every clone of this `Rc`,
+ /// it is guaranteed that exactly one of the calls returns the inner value.
+ /// This means in particular that the inner value is not dropped.
+ ///
+ /// This is equivalent to `Rc::try_unwrap(this).ok()`. (Note that these are not equivalent for
+ /// [`Arc`](crate::sync::Arc), due to race conditions that do not apply to `Rc`.)
+ #[inline]
+ #[stable(feature = "rc_into_inner", since = "1.70.0")]
+ pub fn into_inner(this: Self) -> Option<T> {
+ Rc::try_unwrap(this).ok()
+ }
}
impl<T> Rc<[T]> {
@@ -1042,7 +1058,7 @@ impl<T: ?Sized> Rc<T> {
#[inline]
#[stable(feature = "rc_mutate_strong_count", since = "1.53.0")]
pub unsafe fn decrement_strong_count(ptr: *const T) {
- unsafe { mem::drop(Rc::from_raw(ptr)) };
+ unsafe { drop(Rc::from_raw(ptr)) };
}
/// Returns `true` if there are no other `Rc` or [`Weak`] pointers to
@@ -1478,7 +1494,7 @@ impl<T> Rc<[T]> {
///
/// Behavior is undefined should the size be wrong.
#[cfg(not(no_global_oom_handling))]
- unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Rc<[T]> {
+ unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Rc<[T]> {
// Panic guard while cloning T elements.
// In the event of a panic, elements that have been written
// into the new RcBox will be dropped, then the memory freed.
@@ -1720,11 +1736,11 @@ impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
/// Inequality for two `Rc`s.
///
- /// Two `Rc`s are unequal if their inner values are unequal.
+ /// Two `Rc`s are not equal if their inner values are not equal.
///
/// If `T` also implements `Eq` (implying reflexivity of equality),
/// two `Rc`s that point to the same allocation are
- /// never unequal.
+ /// always equal.
///
/// # Examples
///
@@ -2070,7 +2086,7 @@ impl<T, const N: usize> TryFrom<Rc<[T]>> for Rc<[T; N]> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shared_from_iter", since = "1.37.0")]
-impl<T> iter::FromIterator<T> for Rc<[T]> {
+impl<T> FromIterator<T> for Rc<[T]> {
/// Takes each element in the `Iterator` and collects it into an `Rc<[T]>`.
///
/// # Performance characteristics
@@ -2109,7 +2125,7 @@ impl<T> iter::FromIterator<T> for Rc<[T]> {
/// let evens: Rc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
/// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
/// ```
- fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
ToRcSlice::to_rc_slice(iter.into_iter())
}
}
@@ -2186,9 +2202,9 @@ pub struct Weak<T: ?Sized> {
}
#[stable(feature = "rc_weak", since = "1.4.0")]
-impl<T: ?Sized> !marker::Send for Weak<T> {}
+impl<T: ?Sized> !Send for Weak<T> {}
#[stable(feature = "rc_weak", since = "1.4.0")]
-impl<T: ?Sized> !marker::Sync for Weak<T> {}
+impl<T: ?Sized> !Sync for Weak<T> {}
#[unstable(feature = "coerce_unsized", issue = "18598")]
impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
diff --git a/library/alloc/src/rc/tests.rs b/library/alloc/src/rc/tests.rs
index 32433cfbd..2784108e0 100644
--- a/library/alloc/src/rc/tests.rs
+++ b/library/alloc/src/rc/tests.rs
@@ -152,6 +152,21 @@ fn try_unwrap() {
}
#[test]
+fn into_inner() {
+ let x = Rc::new(3);
+ assert_eq!(Rc::into_inner(x), Some(3));
+
+ let x = Rc::new(4);
+ let y = Rc::clone(&x);
+ assert_eq!(Rc::into_inner(x), None);
+ assert_eq!(Rc::into_inner(y), Some(4));
+
+ let x = Rc::new(5);
+ let _w = Rc::downgrade(&x);
+ assert_eq!(Rc::into_inner(x), Some(5));
+}
+
+#[test]
fn into_from_raw() {
let x = Rc::new(Box::new("hello"));
let y = x.clone();
diff --git a/library/alloc/src/str.rs b/library/alloc/src/str.rs
index afbe5cfaf..b87ef59f6 100644
--- a/library/alloc/src/str.rs
+++ b/library/alloc/src/str.rs
@@ -256,7 +256,7 @@ impl str {
/// assert_eq!("than an old", s.replace("is", "an"));
/// ```
///
- /// When the pattern doesn't match:
+ /// When the pattern doesn't match, it returns this string slice as [`String`]:
///
/// ```
/// let s = "this is old";
@@ -297,7 +297,7 @@ impl str {
/// assert_eq!("foo foo new23 foo", s.replacen(char::is_numeric, "new", 1));
/// ```
///
- /// When the pattern doesn't match:
+ /// When the pattern doesn't match, it returns this string slice as [`String`]:
///
/// ```
/// let s = "this is old";
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index c7e7ed3e9..be41919b9 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -45,9 +45,9 @@
use core::error::Error;
use core::fmt;
use core::hash;
-use core::iter::FusedIterator;
#[cfg(not(no_global_oom_handling))]
-use core::iter::{from_fn, FromIterator};
+use core::iter::from_fn;
+use core::iter::FusedIterator;
#[cfg(not(no_global_oom_handling))]
use core::ops::Add;
#[cfg(not(no_global_oom_handling))]
@@ -359,7 +359,7 @@ use crate::vec::Vec;
/// [Deref]: core::ops::Deref "ops::Deref"
/// [`Deref`]: core::ops::Deref "ops::Deref"
/// [`as_str()`]: String::as_str
-#[derive(PartialOrd, Eq, Ord)]
+#[derive(PartialEq, PartialOrd, Eq, Ord)]
#[stable(feature = "rust1", since = "1.0.0")]
#[cfg_attr(not(test), lang = "String")]
pub struct String {
@@ -2207,14 +2207,6 @@ impl<'a, 'b> Pattern<'a> for &'b String {
}
}
-#[stable(feature = "rust1", since = "1.0.0")]
-impl PartialEq for String {
- #[inline]
- fn eq(&self, other: &String) -> bool {
- PartialEq::eq(&self[..], &other[..])
- }
-}
-
macro_rules! impl_eq {
($lhs:ty, $rhs: ty) => {
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index fdd341a06..24849d52d 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -11,14 +11,13 @@
use core::any::Any;
use core::borrow;
use core::cmp::Ordering;
-use core::convert::{From, TryFrom};
use core::fmt;
use core::hash::{Hash, Hasher};
use core::hint;
use core::intrinsics::abort;
#[cfg(not(no_global_oom_handling))]
use core::iter;
-use core::marker::{PhantomData, Unpin, Unsize};
+use core::marker::{PhantomData, Unsize};
#[cfg(not(no_global_oom_handling))]
use core::mem::size_of_val;
use core::mem::{self, align_of_val_raw};
@@ -51,8 +50,16 @@ mod tests;
///
/// Going above this limit will abort your program (although not
/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
+/// Trying to go above it might call a `panic` (if not actually going above it).
+///
+/// This is a global invariant, and also applies when using a compare-exchange loop.
+///
+/// See comment in `Arc::clone`.
const MAX_REFCOUNT: usize = (isize::MAX) as usize;
+/// The error in case either counter reaches above `MAX_REFCOUNT`, and we can `panic` safely.
+const INTERNAL_OVERFLOW_ERROR: &str = "Arc counter overflow";
+
#[cfg(not(sanitize = "thread"))]
macro_rules! acquire {
($x:expr) => {
@@ -180,8 +187,6 @@ macro_rules! acquire {
/// [mutex]: ../../std/sync/struct.Mutex.html
/// [rwlock]: ../../std/sync/struct.RwLock.html
/// [atomic]: core::sync::atomic
-/// [`Send`]: core::marker::Send
-/// [`Sync`]: core::marker::Sync
/// [deref]: core::ops::Deref
/// [downgrade]: Arc::downgrade
/// [upgrade]: Weak::upgrade
@@ -654,20 +659,17 @@ impl<T> Arc<T> {
///
/// This will succeed even if there are outstanding weak references.
///
- // FIXME: when `Arc::into_inner` is stabilized, add this paragraph:
- /*
/// It is strongly recommended to use [`Arc::into_inner`] instead if you don't
/// want to keep the `Arc` in the [`Err`] case.
/// Immediately dropping the [`Err`] payload, like in the expression
/// `Arc::try_unwrap(this).ok()`, can still cause the strong count to
/// drop to zero and the inner value of the `Arc` to be dropped:
- /// For instance if two threads execute this expression in parallel, then
+ /// For instance if two threads each execute this expression in parallel, then
/// there is a race condition. The threads could first both check whether they
/// have the last clone of their `Arc` via `Arc::try_unwrap`, and then
/// both drop their `Arc` in the call to [`ok`][`Result::ok`],
/// taking the strong count from two down to zero.
///
- */
/// # Examples
///
/// ```
@@ -711,20 +713,13 @@ impl<T> Arc<T> {
/// This means in particular that the inner value is not dropped.
///
/// The similar expression `Arc::try_unwrap(this).ok()` does not
- /// offer such a guarantee. See the last example below.
- //
- // FIXME: when `Arc::into_inner` is stabilized, add this to end
- // of the previous sentence:
- /*
+ /// offer such a guarantee. See the last example below
/// and the documentation of [`Arc::try_unwrap`].
- */
///
/// # Examples
///
/// Minimal example demonstrating the guarantee that `Arc::into_inner` gives.
/// ```
- /// #![feature(arc_into_inner)]
- ///
/// use std::sync::Arc;
///
/// let x = Arc::new(3);
@@ -748,8 +743,6 @@ impl<T> Arc<T> {
///
/// A more practical example demonstrating the need for `Arc::into_inner`:
/// ```
- /// #![feature(arc_into_inner)]
- ///
/// use std::sync::Arc;
///
/// // Definition of a simple singly linked list using `Arc`:
@@ -799,13 +792,8 @@ impl<T> Arc<T> {
/// x_thread.join().unwrap();
/// y_thread.join().unwrap();
/// ```
-
- // FIXME: when `Arc::into_inner` is stabilized, adjust above documentation
- // and the documentation of `Arc::try_unwrap` according to the `FIXME`s. Also
- // open an issue on rust-lang/rust-clippy, asking for a lint against
- // `Arc::try_unwrap(...).ok()`.
#[inline]
- #[unstable(feature = "arc_into_inner", issue = "106894")]
+ #[stable(feature = "arc_into_inner", since = "1.70.0")]
pub fn into_inner(this: Self) -> Option<T> {
// Make sure that the ordinary `Drop` implementation isn’t called as well
let mut this = mem::ManuallyDrop::new(this);
@@ -1104,6 +1092,9 @@ impl<T: ?Sized> Arc<T> {
continue;
}
+ // We can't allow the refcount to increase much past `MAX_REFCOUNT`.
+ assert!(cur <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
+
// NOTE: this code currently ignores the possibility of overflow
// into usize::MAX; in general both Rc and Arc need to be adjusted
// to deal with overflow.
@@ -1247,7 +1238,7 @@ impl<T: ?Sized> Arc<T> {
#[inline]
#[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
pub unsafe fn decrement_strong_count(ptr: *const T) {
- unsafe { mem::drop(Arc::from_raw(ptr)) };
+ unsafe { drop(Arc::from_raw(ptr)) };
}
#[inline]
@@ -1410,7 +1401,7 @@ impl<T> Arc<[T]> {
///
/// Behavior is undefined should the size be wrong.
#[cfg(not(no_global_oom_handling))]
- unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> {
+ unsafe fn from_iter_exact(iter: impl Iterator<Item = T>, len: usize) -> Arc<[T]> {
// Panic guard while cloning T elements.
// In the event of a panic, elements that have been written
// into the new ArcInner will be dropped, then the memory freed.
@@ -1519,6 +1510,11 @@ impl<T: ?Sized> Clone for Arc<T> {
// the worst already happened and we actually do overflow the `usize` counter. However, that
// requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
// above and the `abort` below, which seems exceedingly unlikely.
+ //
+ // This is a global invariant, and also applies when using a compare-exchange loop to increment
+ // counters in other methods.
+ // Otherwise, the counter could be brought to an almost-overflow using a compare-exchange loop,
+ // and then overflow using a few `fetch_add`s.
if old_size > MAX_REFCOUNT {
abort();
}
@@ -2180,9 +2176,7 @@ impl<T: ?Sized> Weak<T> {
return None;
}
// See comments in `Arc::clone` for why we do this (for `mem::forget`).
- if n > MAX_REFCOUNT {
- abort();
- }
+ assert!(n <= MAX_REFCOUNT, "{}", INTERNAL_OVERFLOW_ERROR);
Some(n + 1)
})
.ok()
@@ -2461,10 +2455,10 @@ impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
/// Inequality for two `Arc`s.
///
- /// Two `Arc`s are unequal if their inner values are unequal.
+ /// Two `Arc`s are not equal if their inner values are not equal.
///
/// If `T` also implements `Eq` (implying reflexivity of equality),
- /// two `Arc`s that point to the same value are never unequal.
+ /// two `Arc`s that point to the same value are always equal.
///
/// # Examples
///
@@ -2821,7 +2815,7 @@ impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "shared_from_iter", since = "1.37.0")]
-impl<T> iter::FromIterator<T> for Arc<[T]> {
+impl<T> FromIterator<T> for Arc<[T]> {
/// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
///
/// # Performance characteristics
@@ -2860,7 +2854,7 @@ impl<T> iter::FromIterator<T> for Arc<[T]> {
/// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
/// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
/// ```
- fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
ToArcSlice::to_arc_slice(iter.into_iter())
}
}
diff --git a/library/alloc/src/tests.rs b/library/alloc/src/tests.rs
index 299ed156a..b1d3a9fa8 100644
--- a/library/alloc/src/tests.rs
+++ b/library/alloc/src/tests.rs
@@ -4,7 +4,6 @@ use core::any::Any;
use core::clone::Clone;
use core::convert::TryInto;
use core::ops::Deref;
-use core::result::Result::{Err, Ok};
use std::boxed::Box;
@@ -15,7 +14,7 @@ fn test_owned_clone() {
assert!(a == b);
}
-#[derive(PartialEq, Eq)]
+#[derive(Debug, PartialEq, Eq)]
struct Test;
#[test]
@@ -23,24 +22,17 @@ fn any_move() {
let a = Box::new(8) as Box<dyn Any>;
let b = Box::new(Test) as Box<dyn Any>;
- match a.downcast::<i32>() {
- Ok(a) => {
- assert!(a == Box::new(8));
- }
- Err(..) => panic!(),
- }
- match b.downcast::<Test>() {
- Ok(a) => {
- assert!(a == Box::new(Test));
- }
- Err(..) => panic!(),
- }
+ let a: Box<i32> = a.downcast::<i32>().unwrap();
+ assert_eq!(*a, 8);
+
+ let b: Box<Test> = b.downcast::<Test>().unwrap();
+ assert_eq!(*b, Test);
let a = Box::new(8) as Box<dyn Any>;
let b = Box::new(Test) as Box<dyn Any>;
- assert!(a.downcast::<Box<Test>>().is_err());
- assert!(b.downcast::<Box<i32>>().is_err());
+ assert!(a.downcast::<Box<i32>>().is_err());
+ assert!(b.downcast::<Box<Test>>().is_err());
}
#[test]
diff --git a/library/alloc/src/vec/cow.rs b/library/alloc/src/vec/cow.rs
index 64943a273..2c799605b 100644
--- a/library/alloc/src/vec/cow.rs
+++ b/library/alloc/src/vec/cow.rs
@@ -1,5 +1,4 @@
use crate::borrow::Cow;
-use core::iter::FromIterator;
use super::Vec;
diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs
index 37966007e..b2db2fdfd 100644
--- a/library/alloc/src/vec/into_iter.rs
+++ b/library/alloc/src/vec/into_iter.rs
@@ -11,6 +11,7 @@ use core::iter::{
};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
+use core::num::NonZeroUsize;
#[cfg(not(no_global_oom_handling))]
use core::ops::Deref;
use core::ptr::{self, NonNull};
@@ -107,7 +108,7 @@ impl<T, A: Allocator> IntoIter<T, A> {
/// ```
/// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
/// let mut into_iter = std::mem::replace(&mut into_iter, Vec::new().into_iter());
- /// (&mut into_iter).for_each(core::mem::drop);
+ /// (&mut into_iter).for_each(drop);
/// std::mem::forget(into_iter);
/// ```
///
@@ -213,7 +214,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
}
#[inline]
- fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let step_size = self.len().min(n);
let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
if T::IS_ZST {
@@ -227,10 +228,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
unsafe {
ptr::drop_in_place(to_drop);
}
- if step_size < n {
- return Err(step_size);
- }
- Ok(())
+ NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
}
#[inline]
@@ -313,7 +311,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
}
#[inline]
- fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ fn advance_back_by(&mut self, n: usize) -> Result<(), NonZeroUsize> {
let step_size = self.len().min(n);
if T::IS_ZST {
// SAFETY: same as for advance_by()
@@ -327,10 +325,7 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
unsafe {
ptr::drop_in_place(to_drop);
}
- if step_size < n {
- return Err(step_size);
- }
- Ok(())
+ NonZeroUsize::new(n - step_size).map_or(Ok(()), Err)
}
}
@@ -347,6 +342,24 @@ impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
+#[stable(feature = "default_iters", since = "1.70.0")]
+impl<T, A> Default for IntoIter<T, A>
+where
+ A: Allocator + Default,
+{
+ /// Creates an empty `vec::IntoIter`.
+ ///
+ /// ```
+ /// # use std::vec;
+ /// let iter: vec::IntoIter<u8> = Default::default();
+ /// assert_eq!(iter.len(), 0);
+ /// assert_eq!(iter.as_slice(), &[]);
+ /// ```
+ fn default() -> Self {
+ super::Vec::new_in(Default::default()).into_iter()
+ }
+}
+
#[doc(hidden)]
#[unstable(issue = "none", feature = "std_internals")]
#[rustc_unsafe_specialization_marker]
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index f2aa30f18..3736a6e0b 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -56,13 +56,9 @@
#[cfg(not(no_global_oom_handling))]
use core::cmp;
use core::cmp::Ordering;
-use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::intrinsics::assume;
use core::iter;
-#[cfg(not(no_global_oom_handling))]
-use core::iter::FromIterator;
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
use core::ops::{self, Index, IndexMut, Range, RangeBounds};
@@ -1240,11 +1236,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn as_ptr(&self) -> *const T {
// We shadow the slice method of the same name to avoid going through
// `deref`, which creates an intermediate reference.
- let ptr = self.buf.ptr();
- unsafe {
- assume(!ptr.is_null());
- }
- ptr
+ self.buf.ptr()
}
/// Returns an unsafe mutable pointer to the vector's buffer, or a dangling
@@ -1277,11 +1269,7 @@ impl<T, A: Allocator> Vec<T, A> {
pub fn as_mut_ptr(&mut self) -> *mut T {
// We shadow the slice method of the same name to avoid going through
// `deref_mut`, which creates an intermediate reference.
- let ptr = self.buf.ptr();
- unsafe {
- assume(!ptr.is_null());
- }
- ptr
+ self.buf.ptr()
}
/// Returns a reference to the underlying allocator.
@@ -2999,7 +2987,7 @@ impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
}
}
-/// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+/// Implements comparison of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
#[inline]
@@ -3011,7 +2999,7 @@ impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Eq, A: Allocator> Eq for Vec<T, A> {}
-/// Implements ordering of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+/// Implements ordering of vectors, [lexicographically](Ord#lexicographical-comparison).
#[stable(feature = "rust1", since = "1.0.0")]
impl<T: Ord, A: Allocator> Ord for Vec<T, A> {
#[inline]