summaryrefslogtreecommitdiffstats
path: root/library/alloc
diff options
context:
space:
mode:
Diffstat (limited to 'library/alloc')
-rw-r--r--library/alloc/src/alloc.rs6
-rw-r--r--library/alloc/src/alloc/tests.rs2
-rw-r--r--library/alloc/src/boxed.rs310
-rw-r--r--library/alloc/src/boxed/thin.rs10
-rw-r--r--library/alloc/src/collections/binary_heap.rs3
-rw-r--r--library/alloc/src/collections/btree/dedup_sorted_iter.rs4
-rw-r--r--library/alloc/src/collections/btree/map/entry.rs11
-rw-r--r--library/alloc/src/collections/btree/node.rs16
-rw-r--r--library/alloc/src/collections/linked_list.rs2
-rw-r--r--library/alloc/src/collections/mod.rs4
-rw-r--r--library/alloc/src/collections/vec_deque/drain.rs44
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs10
-rw-r--r--library/alloc/src/ffi/c_str.rs32
-rw-r--r--library/alloc/src/lib.rs16
-rw-r--r--library/alloc/src/macros.rs2
-rw-r--r--library/alloc/src/rc.rs2
-rw-r--r--library/alloc/src/slice.rs86
-rw-r--r--library/alloc/src/str.rs24
-rw-r--r--library/alloc/src/string.rs39
-rw-r--r--library/alloc/src/sync.rs22
-rw-r--r--library/alloc/src/sync/tests.rs19
-rw-r--r--library/alloc/src/vec/drain.rs73
-rw-r--r--library/alloc/src/vec/drain_filter.rs60
-rw-r--r--library/alloc/src/vec/in_place_collect.rs2
-rw-r--r--library/alloc/src/vec/into_iter.rs21
-rw-r--r--library/alloc/src/vec/is_zero.rs16
-rw-r--r--library/alloc/src/vec/mod.rs27
-rw-r--r--library/alloc/src/vec/spec_extend.rs2
-rw-r--r--library/alloc/tests/lib.rs2
-rw-r--r--library/alloc/tests/str.rs10
-rw-r--r--library/alloc/tests/string.rs126
-rw-r--r--library/alloc/tests/thin_box.rs8
-rw-r--r--library/alloc/tests/vec.rs276
-rw-r--r--library/alloc/tests/vec_deque.rs161
34 files changed, 952 insertions, 496 deletions
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index cc8da7bcc..80b067812 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -30,13 +30,13 @@ extern "Rust" {
#[rustc_allocator]
#[rustc_allocator_nounwind]
fn __rust_alloc(size: usize, align: usize) -> *mut u8;
- #[cfg_attr(not(bootstrap), rustc_deallocator)]
+ #[rustc_deallocator]
#[rustc_allocator_nounwind]
fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
- #[cfg_attr(not(bootstrap), rustc_reallocator)]
+ #[rustc_reallocator]
#[rustc_allocator_nounwind]
fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
- #[cfg_attr(not(bootstrap), rustc_allocator_zeroed)]
+ #[rustc_allocator_zeroed]
#[rustc_allocator_nounwind]
fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
}
diff --git a/library/alloc/src/alloc/tests.rs b/library/alloc/src/alloc/tests.rs
index 7d560964d..b2f019459 100644
--- a/library/alloc/src/alloc/tests.rs
+++ b/library/alloc/src/alloc/tests.rs
@@ -15,7 +15,7 @@ fn allocate_zeroed() {
let end = i.add(layout.size());
while i < end {
assert_eq!(*i, 0);
- i = i.offset(1);
+ i = i.add(1);
}
Global.deallocate(ptr.as_non_null_ptr(), layout);
}
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index c1ceeb0de..65e323c9e 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -1,4 +1,4 @@
-//! A pointer type for heap allocation.
+//! The `Box<T>` type for heap allocation.
//!
//! [`Box<T>`], casually referred to as a 'box', provides the simplest form of
//! heap allocation in Rust. Boxes provide ownership for this allocation, and
@@ -151,6 +151,8 @@ use core::async_iter::AsyncIterator;
use core::borrow;
use core::cmp::Ordering;
use core::convert::{From, TryFrom};
+#[cfg(not(bootstrap))]
+use core::error::Error;
use core::fmt;
use core::future::Future;
use core::hash::{Hash, Hasher};
@@ -174,6 +176,9 @@ use crate::borrow::Cow;
use crate::raw_vec::RawVec;
#[cfg(not(no_global_oom_handling))]
use crate::str::from_boxed_utf8_unchecked;
+#[cfg(not(bootstrap))]
+#[cfg(not(no_global_oom_handling))]
+use crate::string::String;
#[cfg(not(no_global_oom_handling))]
use crate::vec::Vec;
@@ -1480,7 +1485,7 @@ impl<T: Copy> From<&[T]> for Box<[T]> {
/// Converts a `&[T]` into a `Box<[T]>`
///
/// This conversion allocates on the heap
- /// and performs a copy of `slice`.
+ /// and performs a copy of `slice` and its contents.
///
/// # Examples
/// ```rust
@@ -2085,3 +2090,304 @@ impl<S: ?Sized + AsyncIterator + Unpin> AsyncIterator for Box<S> {
(**self).size_hint()
}
}
+
+#[cfg(not(bootstrap))]
+impl dyn Error {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[rustc_allow_incoherent_impl]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error>> {
+ if self.is::<T>() {
+ unsafe {
+ let raw: *mut dyn Error = Box::into_raw(self);
+ Ok(Box::from_raw(raw as *mut T))
+ }
+ } else {
+ Err(self)
+ }
+ }
+}
+
+#[cfg(not(bootstrap))]
+impl dyn Error + Send {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[rustc_allow_incoherent_impl]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<dyn Error + Send>> {
+ let err: Box<dyn Error> = self;
+ <dyn Error>::downcast(err).map_err(|s| unsafe {
+ // Reapply the `Send` marker.
+ mem::transmute::<Box<dyn Error>, Box<dyn Error + Send>>(s)
+ })
+ }
+}
+
+#[cfg(not(bootstrap))]
+impl dyn Error + Send + Sync {
+ #[inline]
+ #[stable(feature = "error_downcast", since = "1.3.0")]
+ #[rustc_allow_incoherent_impl]
+ /// Attempts to downcast the box to a concrete type.
+ pub fn downcast<T: Error + 'static>(self: Box<Self>) -> Result<Box<T>, Box<Self>> {
+ let err: Box<dyn Error> = self;
+ <dyn Error>::downcast(err).map_err(|s| unsafe {
+ // Reapply the `Send + Sync` marker.
+ mem::transmute::<Box<dyn Error>, Box<dyn Error + Send + Sync>>(s)
+ })
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
+ /// Converts a type of [`Error`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ /// use std::mem;
+ ///
+ /// #[derive(Debug)]
+ /// struct AnError;
+ ///
+ /// impl fmt::Display for AnError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "An error")
+ /// }
+ /// }
+ ///
+ /// impl Error for AnError {}
+ ///
+ /// let an_error = AnError;
+ /// assert!(0 == mem::size_of_val(&an_error));
+ /// let a_boxed_error = Box::<dyn Error>::from(an_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: E) -> Box<dyn Error + 'a> {
+ Box::new(err)
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a type of [`Error`] + [`Send`] + [`Sync`] into a box of
+ /// dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ /// use std::mem;
+ ///
+ /// #[derive(Debug)]
+ /// struct AnError;
+ ///
+ /// impl fmt::Display for AnError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "An error")
+ /// }
+ /// }
+ ///
+ /// impl Error for AnError {}
+ ///
+ /// unsafe impl Send for AnError {}
+ ///
+ /// unsafe impl Sync for AnError {}
+ ///
+ /// let an_error = AnError;
+ /// assert!(0 == mem::size_of_val(&an_error));
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> {
+ Box::new(err)
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<String> for Box<dyn Error + Send + Sync> {
+ /// Converts a [`String`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_string_error = "a string error".to_string();
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_string_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ #[inline]
+ fn from(err: String) -> Box<dyn Error + Send + Sync> {
+ struct StringError(String);
+
+ impl Error for StringError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ &self.0
+ }
+ }
+
+ impl fmt::Display for StringError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.0, f)
+ }
+ }
+
+ // Purposefully skip printing "StringError(..)"
+ impl fmt::Debug for StringError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&self.0, f)
+ }
+ }
+
+ Box::new(StringError(err))
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_box_error", since = "1.6.0")]
+impl From<String> for Box<dyn Error> {
+ /// Converts a [`String`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_string_error = "a string error".to_string();
+ /// let a_boxed_error = Box::<dyn Error>::from(a_string_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(str_err: String) -> Box<dyn Error> {
+ let err1: Box<dyn Error + Send + Sync> = From::from(str_err);
+ let err2: Box<dyn Error> = err1;
+ err2
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> From<&str> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a [`str`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// [`str`]: prim@str
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_str_error = "a str error";
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_str_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ #[inline]
+ fn from(err: &str) -> Box<dyn Error + Send + Sync + 'a> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_box_error", since = "1.6.0")]
+impl From<&str> for Box<dyn Error> {
+ /// Converts a [`str`] into a box of dyn [`Error`].
+ ///
+ /// [`str`]: prim@str
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ ///
+ /// let a_str_error = "a str error";
+ /// let a_boxed_error = Box::<dyn Error>::from(a_str_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: &str) -> Box<dyn Error> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_box_error", since = "1.22.0")]
+impl<'a, 'b> From<Cow<'b, str>> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a [`Cow`] into a box of dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ /// use std::borrow::Cow;
+ ///
+ /// let a_cow_str_error = Cow::from("a str error");
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(a_cow_str_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: Cow<'b, str>) -> Box<dyn Error + Send + Sync + 'a> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_box_error", since = "1.22.0")]
+impl<'a> From<Cow<'a, str>> for Box<dyn Error> {
+ /// Converts a [`Cow`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::mem;
+ /// use std::borrow::Cow;
+ ///
+ /// let a_cow_str_error = Cow::from("a str error");
+ /// let a_boxed_error = Box::<dyn Error>::from(a_cow_str_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ fn from(err: Cow<'a, str>) -> Box<dyn Error> {
+ From::from(String::from(err))
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[stable(feature = "box_error", since = "1.8.0")]
+impl<T: core::error::Error> core::error::Error for Box<T> {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ core::error::Error::description(&**self)
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&dyn core::error::Error> {
+ core::error::Error::cause(&**self)
+ }
+
+ fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
+ core::error::Error::source(&**self)
+ }
+}
diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs
index 649ccfcaa..0a20c74b0 100644
--- a/library/alloc/src/boxed/thin.rs
+++ b/library/alloc/src/boxed/thin.rs
@@ -2,6 +2,8 @@
// https://github.com/matthieu-m/rfc2580/blob/b58d1d3cba0d4b5e859d3617ea2d0943aaa31329/examples/thin.rs
// by matthieu-m
use crate::alloc::{self, Layout, LayoutError};
+#[cfg(not(bootstrap))]
+use core::error::Error;
use core::fmt::{self, Debug, Display, Formatter};
use core::marker::PhantomData;
#[cfg(not(no_global_oom_handling))]
@@ -271,3 +273,11 @@ impl<H> WithHeader<H> {
Layout::new::<H>().extend(value_layout)
}
}
+
+#[cfg(not(bootstrap))]
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized + Error> Error for ThinBox<T> {
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ self.deref().source()
+ }
+}
diff --git a/library/alloc/src/collections/binary_heap.rs b/library/alloc/src/collections/binary_heap.rs
index 197e7aaac..4583bc9a1 100644
--- a/library/alloc/src/collections/binary_heap.rs
+++ b/library/alloc/src/collections/binary_heap.rs
@@ -1010,7 +1010,8 @@ impl<T> BinaryHeap<T> {
/// current length. The allocator may reserve more space to speculatively
/// avoid frequent allocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient.
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
///
/// # Errors
///
diff --git a/library/alloc/src/collections/btree/dedup_sorted_iter.rs b/library/alloc/src/collections/btree/dedup_sorted_iter.rs
index 60bf83b83..17ee78045 100644
--- a/library/alloc/src/collections/btree/dedup_sorted_iter.rs
+++ b/library/alloc/src/collections/btree/dedup_sorted_iter.rs
@@ -3,7 +3,9 @@ use core::iter::Peekable;
/// A iterator for deduping the key of a sorted iterator.
/// When encountering the duplicated key, only the last key-value pair is yielded.
///
-/// Used by [`BTreeMap::bulk_build_from_sorted_iter`].
+/// Used by [`BTreeMap::bulk_build_from_sorted_iter`][1].
+///
+/// [1]: crate::collections::BTreeMap::bulk_build_from_sorted_iter
pub struct DedupSortedIter<K, V, I>
where
I: Iterator<Item = (K, V)>,
diff --git a/library/alloc/src/collections/btree/map/entry.rs b/library/alloc/src/collections/btree/map/entry.rs
index b6eecf9b0..cd7cdc192 100644
--- a/library/alloc/src/collections/btree/map/entry.rs
+++ b/library/alloc/src/collections/btree/map/entry.rs
@@ -133,6 +133,17 @@ impl<'a, K: Debug + Ord, V: Debug, A: Allocator + Clone> fmt::Display
}
}
+#[cfg(not(bootstrap))]
+#[unstable(feature = "map_try_insert", issue = "82766")]
+impl<'a, K: core::fmt::Debug + Ord, V: core::fmt::Debug> core::error::Error
+ for crate::collections::btree_map::OccupiedError<'a, K, V>
+{
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "key already exists"
+ }
+}
+
impl<'a, K: Ord, V, A: Allocator + Clone> Entry<'a, K, V, A> {
/// Ensures a value is in the entry by inserting the default if empty, and returns
/// a mutable reference to the value in the entry.
diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs
index d831161bc..f1d2d3b30 100644
--- a/library/alloc/src/collections/btree/node.rs
+++ b/library/alloc/src/collections/btree/node.rs
@@ -318,7 +318,7 @@ impl<BorrowType: marker::BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type>
pub fn ascend(
self,
) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> {
- assert!(BorrowType::PERMITS_TRAVERSAL);
+ let _ = BorrowType::TRAVERSAL_PERMIT;
// We need to use raw pointers to nodes because, if BorrowType is marker::ValMut,
// there might be outstanding mutable references to values that we must not invalidate.
let leaf_ptr: *const _ = Self::as_leaf_ptr(&self);
@@ -1003,7 +1003,7 @@ impl<BorrowType: marker::BorrowType, K, V>
/// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
/// both, upon success, do nothing.
pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
- assert!(BorrowType::PERMITS_TRAVERSAL);
+ let _ = BorrowType::TRAVERSAL_PERMIT;
// We need to use raw pointers to nodes because, if BorrowType is
// marker::ValMut, there might be outstanding mutable references to
// values that we must not invalidate. There's no worry accessing the
@@ -1666,15 +1666,17 @@ pub mod marker {
pub struct ValMut<'a>(PhantomData<&'a mut ()>);
pub trait BorrowType {
- // Whether node references of this borrow type allow traversing
- // to other nodes in the tree.
- const PERMITS_TRAVERSAL: bool = true;
+ // If node references of this borrow type allow traversing to other
+ // nodes in the tree, this constant can be evaluated. Thus reading it
+ // serves as a compile-time assertion.
+ const TRAVERSAL_PERMIT: () = ();
}
impl BorrowType for Owned {
- // Traversal isn't needed, it happens using the result of `borrow_mut`.
+ // Reject evaluation, because traversal isn't needed. Instead traversal
+ // happens using the result of `borrow_mut`.
// By disabling traversal, and only creating new references to roots,
// we know that every reference of the `Owned` type is to a root node.
- const PERMITS_TRAVERSAL: bool = false;
+ const TRAVERSAL_PERMIT: () = panic!();
}
impl BorrowType for Dying {}
impl<'a> BorrowType for Immut<'a> {}
diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs
index e21c8aa3b..6480fcaf9 100644
--- a/library/alloc/src/collections/linked_list.rs
+++ b/library/alloc/src/collections/linked_list.rs
@@ -1570,7 +1570,7 @@ impl<'a, T> CursorMut<'a, T> {
/// that the cursor points to is unchanged, even if it is the "ghost" node.
///
/// This operation should compute in *O*(1) time.
- // `push_front` continues to point to "ghost" when it addes a node to mimic
+ // `push_front` continues to point to "ghost" when it adds a node to mimic
// the behavior of `insert_before` on an empty list.
#[unstable(feature = "linked_list_cursors", issue = "58533")]
pub fn push_front(&mut self, elt: T) {
diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs
index 628a5b155..21d0def08 100644
--- a/library/alloc/src/collections/mod.rs
+++ b/library/alloc/src/collections/mod.rs
@@ -152,3 +152,7 @@ trait SpecExtend<I: IntoIterator> {
/// Extends `self` with the contents of the given iterator.
fn spec_extend(&mut self, iter: I);
}
+
+#[cfg(not(bootstrap))]
+#[stable(feature = "try_reserve", since = "1.57.0")]
+impl core::error::Error for TryReserveError {}
diff --git a/library/alloc/src/collections/vec_deque/drain.rs b/library/alloc/src/collections/vec_deque/drain.rs
index 05f94da6d..41baa7102 100644
--- a/library/alloc/src/collections/vec_deque/drain.rs
+++ b/library/alloc/src/collections/vec_deque/drain.rs
@@ -1,10 +1,12 @@
+use core::fmt;
use core::iter::FusedIterator;
+use core::marker::PhantomData;
+use core::mem::{self, MaybeUninit};
use core::ptr::{self, NonNull};
-use core::{fmt, mem};
use crate::alloc::{Allocator, Global};
-use super::{count, Iter, VecDeque};
+use super::{count, wrap_index, VecDeque};
/// A draining iterator over the elements of a `VecDeque`.
///
@@ -20,18 +22,24 @@ pub struct Drain<
> {
after_tail: usize,
after_head: usize,
- iter: Iter<'a, T>,
+ ring: NonNull<[T]>,
+ tail: usize,
+ head: usize,
deque: NonNull<VecDeque<T, A>>,
+ _phantom: PhantomData<&'a T>,
}
impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub(super) unsafe fn new(
after_tail: usize,
after_head: usize,
- iter: Iter<'a, T>,
+ ring: &'a [MaybeUninit<T>],
+ tail: usize,
+ head: usize,
deque: NonNull<VecDeque<T, A>>,
) -> Self {
- Drain { after_tail, after_head, iter, deque }
+ let ring = unsafe { NonNull::new_unchecked(ring as *const [MaybeUninit<T>] as *mut _) };
+ Drain { after_tail, after_head, ring, tail, head, deque, _phantom: PhantomData }
}
}
@@ -41,7 +49,9 @@ impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
f.debug_tuple("Drain")
.field(&self.after_tail)
.field(&self.after_head)
- .field(&self.iter)
+ .field(&self.ring)
+ .field(&self.tail)
+ .field(&self.head)
.finish()
}
}
@@ -118,12 +128,21 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
#[inline]
fn next(&mut self) -> Option<T> {
- self.iter.next().map(|elt| unsafe { ptr::read(elt) })
+ if self.tail == self.head {
+ return None;
+ }
+ let tail = self.tail;
+ self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+ // Safety:
+ // - `self.tail` in a ring buffer is always a valid index.
+ // - `self.head` and `self.tail` equality is checked above.
+ unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(tail))) }
}
#[inline]
fn size_hint(&self) -> (usize, Option<usize>) {
- self.iter.size_hint()
+ let len = count(self.tail, self.head, self.ring.len());
+ (len, Some(len))
}
}
@@ -131,7 +150,14 @@ impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
#[inline]
fn next_back(&mut self) -> Option<T> {
- self.iter.next_back().map(|elt| unsafe { ptr::read(elt) })
+ if self.tail == self.head {
+ return None;
+ }
+ self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+ // Safety:
+ // - `self.head` in a ring buffer is always a valid index.
+ // - `self.head` and `self.tail` equality is checked above.
+ unsafe { Some(ptr::read(self.ring.as_ptr().get_unchecked_mut(self.head))) }
}
}
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
index 4d895d837..e3f4deb08 100644
--- a/library/alloc/src/collections/vec_deque/mod.rs
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -794,7 +794,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
/// in the given deque. The collection may reserve more space to speculatively avoid
/// frequent reallocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient.
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
///
/// # Errors
///
@@ -1333,9 +1334,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
// it. We do not write to `self` nor reborrow to a mutable reference.
// Hence the raw pointer we created above, for `deque`, remains valid.
let ring = self.buffer_as_slice();
- let iter = Iter::new(ring, drain_tail, drain_head);
- Drain::new(drain_head, head, iter, deque)
+ Drain::new(drain_head, head, ring, drain_tail, drain_head, deque)
}
}
@@ -2447,8 +2447,8 @@ impl<T, A: Allocator> VecDeque<T, A> {
let mut right_offset = 0;
for i in left_edge..right_edge {
right_offset = (i - left_edge) % (cap - right_edge);
- let src: isize = (right_edge + right_offset) as isize;
- ptr::swap(buf.add(i), buf.offset(src));
+ let src = right_edge + right_offset;
+ ptr::swap(buf.add(i), buf.add(src));
}
let n_ops = right_edge - left_edge;
left_edge += n_ops;
diff --git a/library/alloc/src/ffi/c_str.rs b/library/alloc/src/ffi/c_str.rs
index ae61b1f1e..aede6d54c 100644
--- a/library/alloc/src/ffi/c_str.rs
+++ b/library/alloc/src/ffi/c_str.rs
@@ -436,9 +436,9 @@ impl CString {
///
/// unsafe {
/// assert_eq!(b'f', *ptr as u8);
- /// assert_eq!(b'o', *ptr.offset(1) as u8);
- /// assert_eq!(b'o', *ptr.offset(2) as u8);
- /// assert_eq!(b'\0', *ptr.offset(3) as u8);
+ /// assert_eq!(b'o', *ptr.add(1) as u8);
+ /// assert_eq!(b'o', *ptr.add(2) as u8);
+ /// assert_eq!(b'\0', *ptr.add(3) as u8);
///
/// // retake pointer to free memory
/// let _ = CString::from_raw(ptr);
@@ -1121,3 +1121,29 @@ impl CStr {
CString::from(self)
}
}
+
+#[cfg(not(bootstrap))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl core::error::Error for NulError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "nul byte found in data"
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+impl core::error::Error for FromVecWithNulError {}
+
+#[cfg(not(bootstrap))]
+#[stable(feature = "cstring_into", since = "1.7.0")]
+impl core::error::Error for IntoStringError {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "C string contained non-utf8 bytes"
+ }
+
+ fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
+ Some(self.__source())
+ }
+}
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index 8b6f40548..8619467c2 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -56,10 +56,6 @@
//! [`Rc`]: rc
//! [`RefCell`]: core::cell
-// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
-// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
-// rustc itself never sets the feature, so this line has no affect there.
-#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
#![allow(unused_attributes)]
#![stable(feature = "alloc", since = "1.36.0")]
#![doc(
@@ -77,6 +73,10 @@
))]
#![no_std]
#![needs_allocator]
+// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
+// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
+// rustc itself never sets the feature, so this line has no affect there.
+#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
//
// Lints:
#![deny(unsafe_op_in_unsafe_fn)]
@@ -111,6 +111,8 @@
#![feature(const_pin)]
#![feature(cstr_from_bytes_until_nul)]
#![feature(dispatch_from_dyn)]
+#![cfg_attr(not(bootstrap), feature(error_generic_member_access))]
+#![cfg_attr(not(bootstrap), feature(error_in_core))]
#![feature(exact_size_is_empty)]
#![feature(extend_one)]
#![feature(fmt_internals)]
@@ -127,10 +129,12 @@
#![feature(nonnull_slice_from_raw_parts)]
#![feature(pattern)]
#![feature(pointer_byte_offsets)]
+#![cfg_attr(not(bootstrap), feature(provide_any))]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
#![feature(receiver_trait)]
+#![feature(saturating_int_impl)]
#![feature(set_ptr_value)]
#![feature(slice_from_ptr_range)]
#![feature(slice_group_by)]
@@ -145,6 +149,7 @@
#![feature(unchecked_math)]
#![feature(unicode_internals)]
#![feature(unsize)]
+#![feature(utf8_chunks)]
#![feature(std_internals)]
//
// Language features:
@@ -164,7 +169,7 @@
#![cfg_attr(not(test), feature(generator_trait))]
#![feature(hashmap_internals)]
#![feature(lang_items)]
-#![feature(let_else)]
+#![cfg_attr(bootstrap, feature(let_else))]
#![feature(min_specialization)]
#![feature(negative_impls)]
#![feature(never_type)]
@@ -178,6 +183,7 @@
#![feature(unboxed_closures)]
#![feature(unsized_fn_params)]
#![feature(c_unwind)]
+#![feature(with_negative_coherence)]
//
// Rustdoc features:
#![feature(doc_cfg)]
diff --git a/library/alloc/src/macros.rs b/library/alloc/src/macros.rs
index 88eb6aa7a..5198bf297 100644
--- a/library/alloc/src/macros.rs
+++ b/library/alloc/src/macros.rs
@@ -107,6 +107,8 @@ macro_rules! vec {
/// format!("test");
/// format!("hello {}", "world!");
/// format!("x = {}, y = {y}", 10, y = 30);
+/// let (x, y) = (1, 2);
+/// format!("{x} + {y} = 3");
/// ```
#[macro_export]
#[stable(feature = "rust1", since = "1.0.0")]
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index b89b03683..6d247681c 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -1142,7 +1142,7 @@ impl<T: Clone> Rc<T> {
/// be cloned.
///
/// See also [`get_mut`], which will fail rather than cloning the inner value
- /// or diassociating [`Weak`] pointers.
+ /// or disassociating [`Weak`] pointers.
///
/// [`clone`]: Clone::clone
/// [`get_mut`]: Rc::get_mut
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
index 63d4d9452..bcd3f49e2 100644
--- a/library/alloc/src/slice.rs
+++ b/library/alloc/src/slice.rs
@@ -1,82 +1,12 @@
-//! A dynamically-sized view into a contiguous sequence, `[T]`.
+//! Utilities for the slice primitive type.
//!
//! *[See also the slice primitive type](slice).*
//!
-//! Slices are a view into a block of memory represented as a pointer and a
-//! length.
+//! Most of the structs in this module are iterator types which can only be created
+//! using a certain function. For example, `slice.iter()` yields an [`Iter`].
//!
-//! ```
-//! // slicing a Vec
-//! let vec = vec![1, 2, 3];
-//! let int_slice = &vec[..];
-//! // coercing an array to a slice
-//! let str_slice: &[&str] = &["one", "two", "three"];
-//! ```
-//!
-//! Slices are either mutable or shared. The shared slice type is `&[T]`,
-//! while the mutable slice type is `&mut [T]`, where `T` represents the element
-//! type. For example, you can mutate the block of memory that a mutable slice
-//! points to:
-//!
-//! ```
-//! let x = &mut [1, 2, 3];
-//! x[1] = 7;
-//! assert_eq!(x, &[1, 7, 3]);
-//! ```
-//!
-//! Here are some of the things this module contains:
-//!
-//! ## Structs
-//!
-//! There are several structs that are useful for slices, such as [`Iter`], which
-//! represents iteration over a slice.
-//!
-//! ## Trait Implementations
-//!
-//! There are several implementations of common traits for slices. Some examples
-//! include:
-//!
-//! * [`Clone`]
-//! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
-//! * [`Hash`] - for slices whose element type is [`Hash`].
-//!
-//! ## Iteration
-//!
-//! The slices implement `IntoIterator`. The iterator yields references to the
-//! slice elements.
-//!
-//! ```
-//! let numbers = &[0, 1, 2];
-//! for n in numbers {
-//! println!("{n} is a number!");
-//! }
-//! ```
-//!
-//! The mutable slice yields mutable references to the elements:
-//!
-//! ```
-//! let mut scores = [7, 8, 9];
-//! for score in &mut scores[..] {
-//! *score += 1;
-//! }
-//! ```
-//!
-//! This iterator yields mutable references to the slice's elements, so while
-//! the element type of the slice is `i32`, the element type of the iterator is
-//! `&mut i32`.
-//!
-//! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
-//! iterators.
-//! * Further methods that return iterators are [`.split`], [`.splitn`],
-//! [`.chunks`], [`.windows`] and more.
-//!
-//! [`Hash`]: core::hash::Hash
-//! [`.iter`]: slice::iter
-//! [`.iter_mut`]: slice::iter_mut
-//! [`.split`]: slice::split
-//! [`.splitn`]: slice::splitn
-//! [`.chunks`]: slice::chunks
-//! [`.windows`]: slice::windows
+//! A few functions are provided to create a slice from a value reference
+//! or from a raw pointer.
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
// It's cleaner to just turn off the unused_imports warning than to fix them.
@@ -1024,7 +954,7 @@ where
// Consume the greater side.
// If equal, prefer the right run to maintain stability.
unsafe {
- let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
+ let to_copy = if is_less(&*right.sub(1), &*left.sub(1)) {
decrement_and_get(left)
} else {
decrement_and_get(right)
@@ -1038,12 +968,12 @@ where
unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
let old = *ptr;
- *ptr = unsafe { ptr.offset(1) };
+ *ptr = unsafe { ptr.add(1) };
old
}
unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
- *ptr = unsafe { ptr.offset(-1) };
+ *ptr = unsafe { ptr.sub(1) };
*ptr
}
diff --git a/library/alloc/src/str.rs b/library/alloc/src/str.rs
index d5ed2c4ad..b28d20cda 100644
--- a/library/alloc/src/str.rs
+++ b/library/alloc/src/str.rs
@@ -1,26 +1,6 @@
-//! Unicode string slices.
+//! Utilities for the `str` primitive type.
//!
//! *[See also the `str` primitive type](str).*
-//!
-//! The `&str` type is one of the two main string types, the other being `String`.
-//! Unlike its `String` counterpart, its contents are borrowed.
-//!
-//! # Basic Usage
-//!
-//! A basic string declaration of `&str` type:
-//!
-//! ```
-//! let hello_world = "Hello, World!";
-//! ```
-//!
-//! Here we have declared a string literal, also known as a string slice.
-//! String literals have a static lifetime, which means the string `hello_world`
-//! is guaranteed to be valid for the duration of the entire program.
-//! We can explicitly specify `hello_world`'s lifetime as well:
-//!
-//! ```
-//! let hello_world: &'static str = "Hello, world!";
-//! ```
#![stable(feature = "rust1", since = "1.0.0")]
// Many of the usings in this module are only used in the test configuration.
@@ -71,6 +51,8 @@ pub use core::str::{RSplit, Split};
pub use core::str::{RSplitN, SplitN};
#[stable(feature = "rust1", since = "1.0.0")]
pub use core::str::{RSplitTerminator, SplitTerminator};
+#[unstable(feature = "utf8_chunks", issue = "99543")]
+pub use core::str::{Utf8Chunk, Utf8Chunks};
/// Note: `str` in `Concat<str>` is not meaningful here.
/// This type parameter of the trait only exists to enable another impl.
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
index a5118e533..f2448396c 100644
--- a/library/alloc/src/string.rs
+++ b/library/alloc/src/string.rs
@@ -44,6 +44,8 @@
#[cfg(not(no_global_oom_handling))]
use core::char::{decode_utf16, REPLACEMENT_CHARACTER};
+#[cfg(not(bootstrap))]
+use core::error::Error;
use core::fmt;
use core::hash;
use core::iter::FusedIterator;
@@ -58,9 +60,9 @@ use core::ops::Bound::{Excluded, Included, Unbounded};
use core::ops::{self, Index, IndexMut, Range, RangeBounds};
use core::ptr;
use core::slice;
-#[cfg(not(no_global_oom_handling))]
-use core::str::lossy;
use core::str::pattern::Pattern;
+#[cfg(not(no_global_oom_handling))]
+use core::str::Utf8Chunks;
#[cfg(not(no_global_oom_handling))]
use crate::borrow::{Cow, ToOwned};
@@ -628,11 +630,11 @@ impl String {
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
pub fn from_utf8_lossy(v: &[u8]) -> Cow<'_, str> {
- let mut iter = lossy::Utf8Lossy::from_bytes(v).chunks();
+ let mut iter = Utf8Chunks::new(v);
let first_valid = if let Some(chunk) = iter.next() {
- let lossy::Utf8LossyChunk { valid, broken } = chunk;
- if broken.is_empty() {
+ let valid = chunk.valid();
+ if chunk.invalid().is_empty() {
debug_assert_eq!(valid.len(), v.len());
return Cow::Borrowed(valid);
}
@@ -647,9 +649,9 @@ impl String {
res.push_str(first_valid);
res.push_str(REPLACEMENT);
- for lossy::Utf8LossyChunk { valid, broken } in iter {
- res.push_str(valid);
- if !broken.is_empty() {
+ for chunk in iter {
+ res.push_str(chunk.valid());
+ if !chunk.invalid().is_empty() {
res.push_str(REPLACEMENT);
}
}
@@ -1080,7 +1082,8 @@ impl String {
/// current length. The allocator may reserve more space to speculatively
/// avoid frequent allocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient.
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
///
/// # Errors
///
@@ -1938,6 +1941,24 @@ impl fmt::Display for FromUtf16Error {
}
}
+#[cfg(not(bootstrap))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for FromUtf8Error {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "invalid utf-8"
+ }
+}
+
+#[cfg(not(bootstrap))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Error for FromUtf16Error {
+ #[allow(deprecated)]
+ fn description(&self) -> &str {
+ "invalid utf-16"
+ }
+}
+
#[cfg(not(no_global_oom_handling))]
#[stable(feature = "rust1", since = "1.0.0")]
impl Clone for String {
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index 4c03cc3ed..4377edeee 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -2763,3 +2763,25 @@ fn data_offset_align(align: usize) -> usize {
let layout = Layout::new::<ArcInner<()>>();
layout.size() + layout.padding_needed_for(align)
}
+
+#[cfg(not(bootstrap))]
+#[stable(feature = "arc_error", since = "1.52.0")]
+impl<T: core::error::Error + ?Sized> core::error::Error for Arc<T> {
+ #[allow(deprecated, deprecated_in_future)]
+ fn description(&self) -> &str {
+ core::error::Error::description(&**self)
+ }
+
+ #[allow(deprecated)]
+ fn cause(&self) -> Option<&dyn core::error::Error> {
+ core::error::Error::cause(&**self)
+ }
+
+ fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
+ core::error::Error::source(&**self)
+ }
+
+ fn provide<'a>(&'a self, req: &mut core::any::Demand<'a>) {
+ core::error::Error::provide(&**self, req);
+ }
+}
diff --git a/library/alloc/src/sync/tests.rs b/library/alloc/src/sync/tests.rs
index 202d0e7f0..0fae8953a 100644
--- a/library/alloc/src/sync/tests.rs
+++ b/library/alloc/src/sync/tests.rs
@@ -618,3 +618,22 @@ fn test_arc_cyclic_two_refs() {
assert_eq!(Arc::strong_count(&two_refs), 3);
assert_eq!(Arc::weak_count(&two_refs), 2);
}
+
+/// Test for Arc::drop bug (https://github.com/rust-lang/rust/issues/55005)
+#[test]
+#[cfg(miri)] // relies on Stacked Borrows in Miri
+fn arc_drop_dereferenceable_race() {
+ // The bug seems to take up to 700 iterations to reproduce with most seeds (tested 0-9).
+ for _ in 0..750 {
+ let arc_1 = Arc::new(());
+ let arc_2 = arc_1.clone();
+ let thread = thread::spawn(|| drop(arc_2));
+ // Spin a bit; makes the race more likely to appear
+ let mut i = 0;
+ while i < 256 {
+ i += 1;
+ }
+ drop(arc_1);
+ thread.join().unwrap();
+ }
+}
diff --git a/library/alloc/src/vec/drain.rs b/library/alloc/src/vec/drain.rs
index 5cdee0bd4..5b73906a1 100644
--- a/library/alloc/src/vec/drain.rs
+++ b/library/alloc/src/vec/drain.rs
@@ -1,7 +1,7 @@
use crate::alloc::{Allocator, Global};
use core::fmt;
use core::iter::{FusedIterator, TrustedLen};
-use core::mem;
+use core::mem::{self, ManuallyDrop};
use core::ptr::{self, NonNull};
use core::slice::{self};
@@ -65,6 +65,77 @@ impl<'a, T, A: Allocator> Drain<'a, T, A> {
pub fn allocator(&self) -> &A {
unsafe { self.vec.as_ref().allocator() }
}
+
+ /// Keep unyielded elements in the source `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(drain_keep_rest)]
+ ///
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain(..);
+ ///
+ /// assert_eq!(drain.next().unwrap(), 'a');
+ ///
+ /// // This call keeps 'b' and 'c' in the vec.
+ /// drain.keep_rest();
+ ///
+ /// // If we wouldn't call `keep_rest()`,
+ /// // `vec` would be empty.
+ /// assert_eq!(vec, ['b', 'c']);
+ /// ```
+ #[unstable(feature = "drain_keep_rest", issue = "101122")]
+ pub fn keep_rest(self) {
+ // At this moment layout looks like this:
+ //
+ // [head] [yielded by next] [unyielded] [yielded by next_back] [tail]
+ // ^-- start \_________/-- unyielded_len \____/-- self.tail_len
+ // ^-- unyielded_ptr ^-- tail
+ //
+ // Normally `Drop` impl would drop [unyielded] and then move [tail] to the `start`.
+ // Here we want to
+ // 1. Move [unyielded] to `start`
+ // 2. Move [tail] to a new start at `start + len(unyielded)`
+ // 3. Update length of the original vec to `len(head) + len(unyielded) + len(tail)`
+ // a. In case of ZST, this is the only thing we want to do
+ // 4. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+ let mut this = ManuallyDrop::new(self);
+
+ unsafe {
+ let source_vec = this.vec.as_mut();
+
+ let start = source_vec.len();
+ let tail = this.tail_start;
+
+ let unyielded_len = this.iter.len();
+ let unyielded_ptr = this.iter.as_slice().as_ptr();
+
+ // ZSTs have no identity, so we don't need to move them around.
+ let needs_move = mem::size_of::<T>() != 0;
+
+ if needs_move {
+ let start_ptr = source_vec.as_mut_ptr().add(start);
+
+ // memmove back unyielded elements
+ if unyielded_ptr != start_ptr {
+ let src = unyielded_ptr;
+ let dst = start_ptr;
+
+ ptr::copy(src, dst, unyielded_len);
+ }
+
+ // memmove back untouched tail
+ if tail != (start + unyielded_len) {
+ let src = source_vec.as_ptr().add(tail);
+ let dst = start_ptr.add(unyielded_len);
+ ptr::copy(src, dst, this.tail_len);
+ }
+ }
+
+ source_vec.set_len(start + unyielded_len + this.tail_len);
+ }
+ }
}
#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
diff --git a/library/alloc/src/vec/drain_filter.rs b/library/alloc/src/vec/drain_filter.rs
index 3c37c92ae..8c03f1692 100644
--- a/library/alloc/src/vec/drain_filter.rs
+++ b/library/alloc/src/vec/drain_filter.rs
@@ -1,6 +1,7 @@
use crate::alloc::{Allocator, Global};
-use core::ptr::{self};
-use core::slice::{self};
+use core::mem::{self, ManuallyDrop};
+use core::ptr;
+use core::slice;
use super::Vec;
@@ -54,6 +55,61 @@ where
pub fn allocator(&self) -> &A {
self.vec.allocator()
}
+
+ /// Keep unyielded elements in the source `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(drain_filter)]
+ /// #![feature(drain_keep_rest)]
+ ///
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain_filter(|_| true);
+ ///
+ /// assert_eq!(drain.next().unwrap(), 'a');
+ ///
+ /// // This call keeps 'b' and 'c' in the vec.
+ /// drain.keep_rest();
+ ///
+ /// // If we wouldn't call `keep_rest()`,
+ /// // `vec` would be empty.
+ /// assert_eq!(vec, ['b', 'c']);
+ /// ```
+ #[unstable(feature = "drain_keep_rest", issue = "101122")]
+ pub fn keep_rest(self) {
+ // At this moment layout looks like this:
+ //
+ // _____________________/-- old_len
+ // / \
+ // [kept] [yielded] [tail]
+ // \_______/ ^-- idx
+ // \-- del
+ //
+ // Normally `Drop` impl would drop [tail] (via .for_each(drop), ie still calling `pred`)
+ //
+ // 1. Move [tail] after [kept]
+ // 2. Update length of the original vec to `old_len - del`
+ // a. In case of ZST, this is the only thing we want to do
+ // 3. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+ let mut this = ManuallyDrop::new(self);
+
+ unsafe {
+ // ZSTs have no identity, so we don't need to move them around.
+ let needs_move = mem::size_of::<T>() != 0;
+
+ if needs_move && this.idx < this.old_len && this.del > 0 {
+ let ptr = this.vec.as_mut_ptr();
+ let src = ptr.add(this.idx);
+ let dst = src.sub(this.del);
+ let tail_len = this.old_len - this.idx;
+ src.copy_to(dst, tail_len);
+ }
+
+ let new_len = this.old_len - this.del;
+ this.vec.set_len(new_len);
+ }
+ }
}
#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs
index 55dcb84ad..b211421b2 100644
--- a/library/alloc/src/vec/in_place_collect.rs
+++ b/library/alloc/src/vec/in_place_collect.rs
@@ -267,7 +267,7 @@ where
// one slot in the underlying storage will have been freed up and we can immediately
// write back the result.
unsafe {
- let dst = dst_buf.offset(i as isize);
+ let dst = dst_buf.add(i);
debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation");
ptr::write(dst, self.__iterator_get_unchecked(i));
// Since this executes user code which can panic we have to bump the pointer
diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs
index 1b483e3fc..b4157fd58 100644
--- a/library/alloc/src/vec/into_iter.rs
+++ b/library/alloc/src/vec/into_iter.rs
@@ -4,7 +4,6 @@ use crate::alloc::{Allocator, Global};
use crate::raw_vec::RawVec;
use core::array;
use core::fmt;
-use core::intrinsics::arith_offset;
use core::iter::{
FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
};
@@ -148,19 +147,19 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
#[inline]
fn next(&mut self) -> Option<T> {
- if self.ptr as *const _ == self.end {
+ if self.ptr == self.end {
None
} else if mem::size_of::<T>() == 0 {
// purposefully don't use 'ptr.offset' because for
// vectors with 0-size elements this would return the
// same pointer.
- self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T };
+ self.ptr = self.ptr.wrapping_byte_add(1);
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
let old = self.ptr;
- self.ptr = unsafe { self.ptr.offset(1) };
+ self.ptr = unsafe { self.ptr.add(1) };
Some(unsafe { ptr::read(old) })
}
@@ -184,7 +183,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
// SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound
// effectively results in unsigned pointers representing positions 0..usize::MAX,
// which is valid for ZSTs.
- self.ptr = unsafe { arith_offset(self.ptr as *const i8, step_size as isize) as *mut T }
+ self.ptr = self.ptr.wrapping_byte_add(step_size);
} else {
// SAFETY: the min() above ensures that step_size is in bounds
self.ptr = unsafe { self.ptr.add(step_size) };
@@ -217,7 +216,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) });
}
- self.ptr = unsafe { arith_offset(self.ptr as *const i8, N as isize) as *mut T };
+ self.ptr = self.ptr.wrapping_byte_add(N);
// Safety: ditto
return Ok(unsafe { MaybeUninit::array_assume_init(raw_ary) });
}
@@ -267,12 +266,12 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
None
} else if mem::size_of::<T>() == 0 {
// See above for why 'ptr.offset' isn't used
- self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T };
+ self.end = self.end.wrapping_byte_sub(1);
// Make up a value of this ZST.
Some(unsafe { mem::zeroed() })
} else {
- self.end = unsafe { self.end.offset(-1) };
+ self.end = unsafe { self.end.sub(1) };
Some(unsafe { ptr::read(self.end) })
}
@@ -283,12 +282,10 @@ impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
let step_size = self.len().min(n);
if mem::size_of::<T>() == 0 {
// SAFETY: same as for advance_by()
- self.end = unsafe {
- arith_offset(self.end as *const i8, step_size.wrapping_neg() as isize) as *mut T
- }
+ self.end = self.end.wrapping_byte_sub(step_size);
} else {
// SAFETY: same as for advance_by()
- self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) };
+ self.end = unsafe { self.end.sub(step_size) };
}
let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
// SAFETY: same as for advance_by()
diff --git a/library/alloc/src/vec/is_zero.rs b/library/alloc/src/vec/is_zero.rs
index 92a32779b..2e025c8a4 100644
--- a/library/alloc/src/vec/is_zero.rs
+++ b/library/alloc/src/vec/is_zero.rs
@@ -1,3 +1,5 @@
+use core::num::{Saturating, Wrapping};
+
use crate::boxed::Box;
#[rustc_specialization_trait]
@@ -144,3 +146,17 @@ impl_is_zero_option_of_nonzero!(
NonZeroUsize,
NonZeroIsize,
);
+
+unsafe impl<T: IsZero> IsZero for Wrapping<T> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.0.is_zero()
+ }
+}
+
+unsafe impl<T: IsZero> IsZero for Saturating<T> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.0.is_zero()
+ }
+}
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index fa9f2131c..60b36af5e 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -59,7 +59,7 @@ use core::cmp::Ordering;
use core::convert::TryFrom;
use core::fmt;
use core::hash::{Hash, Hasher};
-use core::intrinsics::{arith_offset, assume};
+use core::intrinsics::assume;
use core::iter;
#[cfg(not(no_global_oom_handling))]
use core::iter::FromIterator;
@@ -436,7 +436,7 @@ impl<T> Vec<T> {
/// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
- /// If it is imporant to know the exact allocated capacity of a `Vec`,
+ /// If it is important to know the exact allocated capacity of a `Vec`,
/// always use the [`capacity`] method after construction.
///
/// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
@@ -542,8 +542,8 @@ impl<T> Vec<T> {
///
/// unsafe {
/// // Overwrite memory with 4, 5, 6
- /// for i in 0..len as isize {
- /// ptr::write(p.offset(i), 4 + i);
+ /// for i in 0..len {
+ /// ptr::write(p.add(i), 4 + i);
/// }
///
/// // Put everything back together into a Vec
@@ -591,7 +591,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// an explanation of the difference between length and capacity, see
/// *[Capacity and reallocation]*.
///
- /// If it is imporant to know the exact allocated capacity of a `Vec`,
+ /// If it is important to know the exact allocated capacity of a `Vec`,
/// always use the [`capacity`] method after construction.
///
/// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
@@ -702,8 +702,8 @@ impl<T, A: Allocator> Vec<T, A> {
///
/// unsafe {
/// // Overwrite memory with 4, 5, 6
- /// for i in 0..len as isize {
- /// ptr::write(p.offset(i), 4 + i);
+ /// for i in 0..len {
+ /// ptr::write(p.add(i), 4 + i);
/// }
///
/// // Put everything back together into a Vec
@@ -875,7 +875,8 @@ impl<T, A: Allocator> Vec<T, A> {
/// in the given `Vec<T>`. The collection may reserve more space to speculatively avoid
/// frequent reallocations. After calling `try_reserve`, capacity will be
/// greater than or equal to `self.len() + additional` if it returns
- /// `Ok(())`. Does nothing if capacity is already sufficient.
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
///
/// # Errors
///
@@ -1393,7 +1394,7 @@ impl<T, A: Allocator> Vec<T, A> {
if index < len {
// Shift everything over to make space. (Duplicating the
// `index`th element into two consecutive places.)
- ptr::copy(p, p.offset(1), len - index);
+ ptr::copy(p, p.add(1), len - index);
} else if index == len {
// No elements need shifting.
} else {
@@ -1455,7 +1456,7 @@ impl<T, A: Allocator> Vec<T, A> {
ret = ptr::read(ptr);
// Shift everything down to fill in that spot.
- ptr::copy(ptr.offset(1), ptr, len - index - 1);
+ ptr::copy(ptr.add(1), ptr, len - index - 1);
}
self.set_len(len - 1);
ret
@@ -2408,7 +2409,7 @@ impl<T, A: Allocator> Vec<T, A> {
// Write all elements except the last one
for _ in 1..n {
ptr::write(ptr, value.next());
- ptr = ptr.offset(1);
+ ptr = ptr.add(1);
// Increment the length in every step in case next() panics
local_len.increment_len(1);
}
@@ -2677,7 +2678,7 @@ impl<T, A: Allocator> IntoIterator for Vec<T, A> {
let alloc = ManuallyDrop::new(ptr::read(me.allocator()));
let begin = me.as_mut_ptr();
let end = if mem::size_of::<T>() == 0 {
- arith_offset(begin as *const i8, me.len() as isize) as *const T
+ begin.wrapping_byte_add(me.len())
} else {
begin.add(me.len()) as *const T
};
@@ -2927,6 +2928,8 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec<T, A> {
#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
impl<T> const Default for Vec<T> {
/// Creates an empty `Vec<T>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
fn default() -> Vec<T> {
Vec::new()
}
diff --git a/library/alloc/src/vec/spec_extend.rs b/library/alloc/src/vec/spec_extend.rs
index 506ee0ecf..1ea9c827a 100644
--- a/library/alloc/src/vec/spec_extend.rs
+++ b/library/alloc/src/vec/spec_extend.rs
@@ -39,7 +39,7 @@ where
let mut local_len = SetLenOnDrop::new(&mut self.len);
iterator.for_each(move |element| {
ptr::write(ptr, element);
- ptr = ptr.offset(1);
+ ptr = ptr.add(1);
// Since the loop executes user code which can panic we have to bump the pointer
// after each step.
// NB can't overflow since we would have had to alloc the address space
diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs
index d83cd29dd..490c0d8f7 100644
--- a/library/alloc/tests/lib.rs
+++ b/library/alloc/tests/lib.rs
@@ -38,11 +38,13 @@
#![feature(const_str_from_utf8)]
#![feature(nonnull_slice_from_raw_parts)]
#![feature(panic_update_hook)]
+#![feature(pointer_is_aligned)]
#![feature(slice_flatten)]
#![feature(thin_box)]
#![feature(bench_black_box)]
#![feature(strict_provenance)]
#![feature(once_cell)]
+#![feature(drain_keep_rest)]
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
diff --git a/library/alloc/tests/str.rs b/library/alloc/tests/str.rs
index 7379569dd..e30329aa1 100644
--- a/library/alloc/tests/str.rs
+++ b/library/alloc/tests/str.rs
@@ -1010,11 +1010,11 @@ fn test_as_bytes_fail() {
fn test_as_ptr() {
let buf = "hello".as_ptr();
unsafe {
- assert_eq!(*buf.offset(0), b'h');
- assert_eq!(*buf.offset(1), b'e');
- assert_eq!(*buf.offset(2), b'l');
- assert_eq!(*buf.offset(3), b'l');
- assert_eq!(*buf.offset(4), b'o');
+ assert_eq!(*buf.add(0), b'h');
+ assert_eq!(*buf.add(1), b'e');
+ assert_eq!(*buf.add(2), b'l');
+ assert_eq!(*buf.add(3), b'l');
+ assert_eq!(*buf.add(4), b'o');
}
}
diff --git a/library/alloc/tests/string.rs b/library/alloc/tests/string.rs
index b6836fdc8..99d1296a4 100644
--- a/library/alloc/tests/string.rs
+++ b/library/alloc/tests/string.rs
@@ -693,12 +693,6 @@ fn test_try_reserve() {
const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
- // On 16/32-bit, we check that allocations don't exceed isize::MAX,
- // on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
- // Any platform that succeeds for these requests is technically broken with
- // ptr::offset because LLVM is the worst.
- let guards_against_isize = usize::BITS < 64;
-
{
// Note: basic stuff is checked by test_reserve
let mut empty_string: String = String::new();
@@ -712,35 +706,19 @@ fn test_try_reserve() {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- // Check isize::MAX + 1 does count as overflow
- assert_matches!(
- empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
-
- // Check usize::MAX does count as overflow
- assert_matches!(
- empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "usize::MAX should trigger an overflow!"
- );
- } else {
- // Check isize::MAX + 1 is an OOM
- assert_matches!(
- empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
-
- // Check usize::MAX is an OOM
- assert_matches!(
- empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "usize::MAX should trigger an OOM!"
- );
- }
+ // Check isize::MAX + 1 does count as overflow
+ assert_matches!(
+ empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ // Check usize::MAX does count as overflow
+ assert_matches!(
+ empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
}
{
@@ -753,19 +731,13 @@ fn test_try_reserve() {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
// Should always overflow in the add-to-len
assert_matches!(
ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
@@ -785,8 +757,6 @@ fn test_try_reserve_exact() {
const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
- let guards_against_isize = usize::BITS < 64;
-
{
let mut empty_string: String = String::new();
@@ -799,31 +769,17 @@ fn test_try_reserve_exact() {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
-
- assert_matches!(
- empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "usize::MAX should trigger an overflow!"
- );
- } else {
- assert_matches!(
- empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
-
- assert_matches!(
- empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "usize::MAX should trigger an OOM!"
- );
- }
+ assert_matches!(
+ empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ assert_matches!(
+ empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
}
{
@@ -839,19 +795,13 @@ fn test_try_reserve_exact() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
assert_matches!(
ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
Err(CapacityOverflow),
diff --git a/library/alloc/tests/thin_box.rs b/library/alloc/tests/thin_box.rs
index 368aa564f..e008b0cc3 100644
--- a/library/alloc/tests/thin_box.rs
+++ b/library/alloc/tests/thin_box.rs
@@ -48,11 +48,11 @@ fn verify_aligned<T>(ptr: *const T) {
// practice these checks are mostly just smoke-detectors for an extremely
// broken `ThinBox` impl, since it's an extremely subtle piece of code.
let ptr = core::hint::black_box(ptr);
- let align = core::mem::align_of::<T>();
assert!(
- (ptr.addr() & (align - 1)) == 0 && !ptr.is_null(),
- "misaligned ThinBox data; valid pointers to `{}` should be aligned to {align}: {ptr:p}",
- core::any::type_name::<T>(),
+ ptr.is_aligned() && !ptr.is_null(),
+ "misaligned ThinBox data; valid pointers to `{ty}` should be aligned to {align}: {ptr:p}",
+ ty = core::any::type_name::<T>(),
+ align = core::mem::align_of::<T>(),
);
}
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
index b797e2375..f140fc414 100644
--- a/library/alloc/tests/vec.rs
+++ b/library/alloc/tests/vec.rs
@@ -294,6 +294,22 @@ fn test_retain() {
}
#[test]
+fn test_retain_predicate_order() {
+ for to_keep in [true, false] {
+ let mut number_of_executions = 0;
+ let mut vec = vec![1, 2, 3, 4];
+ let mut next_expected = 1;
+ vec.retain(|&x| {
+ assert_eq!(next_expected, x);
+ next_expected += 1;
+ number_of_executions += 1;
+ to_keep
+ });
+ assert_eq!(number_of_executions, 4);
+ }
+}
+
+#[test]
fn test_retain_pred_panic_with_hole() {
let v = (0..5).map(Rc::new).collect::<Vec<_>>();
catch_unwind(AssertUnwindSafe(|| {
@@ -355,6 +371,35 @@ fn test_retain_drop_panic() {
}
#[test]
+fn test_retain_maybeuninits() {
+ // This test aimed to be run under miri.
+ use core::mem::MaybeUninit;
+ let mut vec: Vec<_> = [1i32, 2, 3, 4].map(|v| MaybeUninit::new(vec![v])).into();
+ vec.retain(|x| {
+ // SAFETY: Retain must visit every element of Vec in original order and exactly once.
+ // Our values is initialized at creation of Vec.
+ let v = unsafe { x.assume_init_ref()[0] };
+ if v & 1 == 0 {
+ return true;
+ }
+ // SAFETY: Value is initialized.
+ // Value wouldn't be dropped by `Vec::retain`
+ // because `MaybeUninit` doesn't drop content.
+ drop(unsafe { x.assume_init_read() });
+ false
+ });
+ let vec: Vec<i32> = vec
+ .into_iter()
+ .map(|x| unsafe {
+ // SAFETY: All values dropped in retain predicate must be removed by `Vec::retain`.
+ // Remaining values are initialized.
+ x.assume_init()[0]
+ })
+ .collect();
+ assert_eq!(vec, [2, 4]);
+}
+
+#[test]
fn test_dedup() {
fn case(a: Vec<i32>, b: Vec<i32>) {
let mut v = a;
@@ -795,6 +840,36 @@ fn test_drain_leak() {
}
#[test]
+fn test_drain_keep_rest() {
+ let mut v = vec![0, 1, 2, 3, 4, 5, 6];
+ let mut drain = v.drain(1..6);
+ assert_eq!(drain.next(), Some(1));
+ assert_eq!(drain.next_back(), Some(5));
+ assert_eq!(drain.next(), Some(2));
+
+ drain.keep_rest();
+ assert_eq!(v, &[0, 3, 4, 6]);
+}
+
+#[test]
+fn test_drain_keep_rest_all() {
+ let mut v = vec![0, 1, 2, 3, 4, 5, 6];
+ v.drain(1..6).keep_rest();
+ assert_eq!(v, &[0, 1, 2, 3, 4, 5, 6]);
+}
+
+#[test]
+fn test_drain_keep_rest_none() {
+ let mut v = vec![0, 1, 2, 3, 4, 5, 6];
+ let mut drain = v.drain(1..6);
+
+ drain.by_ref().for_each(drop);
+
+ drain.keep_rest();
+ assert_eq!(v, &[0, 6]);
+}
+
+#[test]
fn test_splice() {
let mut v = vec![1, 2, 3, 4, 5];
let a = [10, 11, 12];
@@ -1030,6 +1105,12 @@ fn test_into_iter_drop_allocator() {
}
#[test]
+fn test_into_iter_zst() {
+ for _ in vec![[0u64; 0]].into_iter() {}
+ for _ in vec![[0u64; 0]; 5].into_iter().rev() {}
+}
+
+#[test]
fn test_from_iter_specialization() {
let src: Vec<usize> = vec![0usize; 1];
let srcptr = src.as_ptr();
@@ -1489,6 +1570,35 @@ fn drain_filter_unconsumed() {
}
#[test]
+fn test_drain_filter_keep_rest() {
+ let mut v = vec![0, 1, 2, 3, 4, 5, 6];
+ let mut drain = v.drain_filter(|&mut x| x % 2 == 0);
+ assert_eq!(drain.next(), Some(0));
+ assert_eq!(drain.next(), Some(2));
+
+ drain.keep_rest();
+ assert_eq!(v, &[1, 3, 4, 5, 6]);
+}
+
+#[test]
+fn test_drain_filter_keep_rest_all() {
+ let mut v = vec![0, 1, 2, 3, 4, 5, 6];
+ v.drain_filter(|_| true).keep_rest();
+ assert_eq!(v, &[0, 1, 2, 3, 4, 5, 6]);
+}
+
+#[test]
+fn test_drain_filter_keep_rest_none() {
+ let mut v = vec![0, 1, 2, 3, 4, 5, 6];
+ let mut drain = v.drain_filter(|_| true);
+
+ drain.by_ref().for_each(drop);
+
+ drain.keep_rest();
+ assert_eq!(v, &[]);
+}
+
+#[test]
fn test_reserve_exact() {
// This is all the same as test_reserve
@@ -1527,12 +1637,6 @@ fn test_try_reserve() {
const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
- // On 16/32-bit, we check that allocations don't exceed isize::MAX,
- // on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
- // Any platform that succeeds for these requests is technically broken with
- // ptr::offset because LLVM is the worst.
- let guards_against_isize = usize::BITS < 64;
-
{
// Note: basic stuff is checked by test_reserve
let mut empty_bytes: Vec<u8> = Vec::new();
@@ -1546,35 +1650,19 @@ fn test_try_reserve() {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- // Check isize::MAX + 1 does count as overflow
- assert_matches!(
- empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
-
- // Check usize::MAX does count as overflow
- assert_matches!(
- empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "usize::MAX should trigger an overflow!"
- );
- } else {
- // Check isize::MAX + 1 is an OOM
- assert_matches!(
- empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
-
- // Check usize::MAX is an OOM
- assert_matches!(
- empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "usize::MAX should trigger an OOM!"
- );
- }
+ // Check isize::MAX + 1 does count as overflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ // Check usize::MAX does count as overflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
}
{
@@ -1587,19 +1675,13 @@ fn test_try_reserve() {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
// Should always overflow in the add-to-len
assert_matches!(
ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
@@ -1620,19 +1702,13 @@ fn test_try_reserve() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
// Should fail in the mul-by-size
assert_matches!(
ten_u32s.try_reserve(MAX_USIZE - 20).map_err(|e| e.kind()),
@@ -1652,8 +1728,6 @@ fn test_try_reserve_exact() {
const MAX_CAP: usize = isize::MAX as usize;
const MAX_USIZE: usize = usize::MAX;
- let guards_against_isize = size_of::<usize>() < 8;
-
{
let mut empty_bytes: Vec<u8> = Vec::new();
@@ -1666,31 +1740,17 @@ fn test_try_reserve_exact() {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
-
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "usize::MAX should trigger an overflow!"
- );
- } else {
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
-
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "usize::MAX should trigger an OOM!"
- );
- }
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
}
{
@@ -1706,19 +1766,13 @@ fn test_try_reserve_exact() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
assert_matches!(
ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
Err(CapacityOverflow),
@@ -1739,19 +1793,13 @@ fn test_try_reserve_exact() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
assert_matches!(
ten_u32s.try_reserve_exact(MAX_USIZE - 20).map_err(|e| e.kind()),
Err(CapacityOverflow),
diff --git a/library/alloc/tests/vec_deque.rs b/library/alloc/tests/vec_deque.rs
index 89cc7f905..019d73c0b 100644
--- a/library/alloc/tests/vec_deque.rs
+++ b/library/alloc/tests/vec_deque.rs
@@ -2,7 +2,6 @@ use std::assert_matches::assert_matches;
use std::collections::TryReserveErrorKind::*;
use std::collections::{vec_deque::Drain, VecDeque};
use std::fmt::Debug;
-use std::mem::size_of;
use std::ops::Bound::*;
use std::panic::{catch_unwind, AssertUnwindSafe};
@@ -1161,12 +1160,6 @@ fn test_try_reserve() {
const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1;
const MAX_USIZE: usize = usize::MAX;
- // On 16/32-bit, we check that allocations don't exceed isize::MAX,
- // on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
- // Any platform that succeeds for these requests is technically broken with
- // ptr::offset because LLVM is the worst.
- let guards_against_isize = size_of::<usize>() < 8;
-
{
// Note: basic stuff is checked by test_reserve
let mut empty_bytes: VecDeque<u8> = VecDeque::new();
@@ -1180,31 +1173,19 @@ fn test_try_reserve() {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- // Check isize::MAX + 1 does count as overflow
- assert_matches!(
- empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
-
- // Check usize::MAX does count as overflow
- assert_matches!(
- empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "usize::MAX should trigger an overflow!"
- );
- } else {
- // Check isize::MAX is an OOM
- // VecDeque starts with capacity 7, always adds 1 to the capacity
- // and also rounds the number to next power of 2 so this is the
- // furthest we can go without triggering CapacityOverflow
- assert_matches!(
- empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+ // Check isize::MAX + 1 does count as overflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ // Check usize::MAX does count as overflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
}
{
@@ -1217,19 +1198,13 @@ fn test_try_reserve() {
if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
// Should always overflow in the add-to-len
assert_matches!(
ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
@@ -1250,19 +1225,13 @@ fn test_try_reserve() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
// Should fail in the mul-by-size
assert_matches!(
ten_u32s.try_reserve(MAX_USIZE - 20).map_err(|e| e.kind()),
@@ -1282,8 +1251,6 @@ fn test_try_reserve_exact() {
const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1;
const MAX_USIZE: usize = usize::MAX;
- let guards_against_isize = size_of::<usize>() < 8;
-
{
let mut empty_bytes: VecDeque<u8> = VecDeque::new();
@@ -1296,29 +1263,17 @@ fn test_try_reserve_exact() {
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
-
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "usize::MAX should trigger an overflow!"
- );
- } else {
- // Check isize::MAX is an OOM
- // VecDeque starts with capacity 7, always adds 1 to the capacity
- // and also rounds the number to next power of 2 so this is the
- // furthest we can go without triggering CapacityOverflow
- assert_matches!(
- empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
}
{
@@ -1334,19 +1289,13 @@ fn test_try_reserve_exact() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
assert_matches!(
ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
Err(CapacityOverflow),
@@ -1367,19 +1316,13 @@ fn test_try_reserve_exact() {
{
panic!("isize::MAX shouldn't trigger an overflow!");
}
- if guards_against_isize {
- assert_matches!(
- ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(CapacityOverflow),
- "isize::MAX + 1 should trigger an overflow!"
- );
- } else {
- assert_matches!(
- ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
- Err(AllocError { .. }),
- "isize::MAX + 1 should trigger an OOM!"
- );
- }
+
+ assert_matches!(
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
assert_matches!(
ten_u32s.try_reserve_exact(MAX_USIZE - 20).map_err(|e| e.kind()),
Err(CapacityOverflow),