summaryrefslogtreecommitdiffstats
path: root/library/alloc/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 09:26:03 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-19 09:26:03 +0000
commit9918693037dce8aa4bb6f08741b6812923486c18 (patch)
tree21d2b40bec7e6a7ea664acee056eb3d08e15a1cf /library/alloc/src
parentReleasing progress-linux version 1.75.0+dfsg1-5~progress7.99u1. (diff)
downloadrustc-9918693037dce8aa4bb6f08741b6812923486c18.tar.xz
rustc-9918693037dce8aa4bb6f08741b6812923486c18.zip
Merging upstream version 1.76.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/alloc/src')
-rw-r--r--library/alloc/src/alloc.rs3
-rw-r--r--library/alloc/src/boxed.rs14
-rw-r--r--library/alloc/src/boxed/thin.rs1
-rw-r--r--library/alloc/src/collections/binary_heap/mod.rs11
-rw-r--r--library/alloc/src/collections/binary_heap/tests.rs2
-rw-r--r--library/alloc/src/collections/btree/map/tests.rs7
-rw-r--r--library/alloc/src/collections/btree/set/tests.rs3
-rw-r--r--library/alloc/src/collections/linked_list.rs93
-rw-r--r--library/alloc/src/collections/mod.rs1
-rw-r--r--library/alloc/src/ffi/c_str.rs6
-rw-r--r--library/alloc/src/ffi/c_str/tests.rs2
-rw-r--r--library/alloc/src/lib.rs8
-rw-r--r--library/alloc/src/raw_vec.rs56
-rw-r--r--library/alloc/src/raw_vec/tests.rs9
-rw-r--r--library/alloc/src/rc.rs4
-rw-r--r--library/alloc/src/rc/tests.rs5
-rw-r--r--library/alloc/src/sync.rs12
-rw-r--r--library/alloc/src/sync/tests.rs13
-rw-r--r--library/alloc/src/tests.rs2
-rw-r--r--library/alloc/src/vec/in_place_collect.rs164
-rw-r--r--library/alloc/src/vec/in_place_drop.rs26
-rw-r--r--library/alloc/src/vec/into_iter.rs16
-rw-r--r--library/alloc/src/vec/mod.rs66
-rw-r--r--library/alloc/src/vec/spec_from_elem.rs23
-rw-r--r--library/alloc/src/vec/spec_from_iter.rs14
25 files changed, 428 insertions, 133 deletions
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
index 2499f1053..1663aa849 100644
--- a/library/alloc/src/alloc.rs
+++ b/library/alloc/src/alloc.rs
@@ -423,12 +423,14 @@ pub mod __alloc_error_handler {
}
}
+#[cfg(not(no_global_oom_handling))]
/// Specialize clones into pre-allocated, uninitialized memory.
/// Used by `Box::clone` and `Rc`/`Arc::make_mut`.
pub(crate) trait WriteCloneIntoRaw: Sized {
unsafe fn write_clone_into_raw(&self, target: *mut Self);
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Clone> WriteCloneIntoRaw for T {
#[inline]
default unsafe fn write_clone_into_raw(&self, target: *mut Self) {
@@ -438,6 +440,7 @@ impl<T: Clone> WriteCloneIntoRaw for T {
}
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Copy> WriteCloneIntoRaw for T {
#[inline]
unsafe fn write_clone_into_raw(&self, target: *mut Self) {
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
index 25c63b425..fdf5e134f 100644
--- a/library/alloc/src/boxed.rs
+++ b/library/alloc/src/boxed.rs
@@ -1038,10 +1038,18 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
/// use std::ptr;
///
/// let x = Box::new(String::from("Hello"));
- /// let p = Box::into_raw(x);
+ /// let ptr = Box::into_raw(x);
+ /// unsafe {
+ /// ptr::drop_in_place(ptr);
+ /// dealloc(ptr as *mut u8, Layout::new::<String>());
+ /// }
+ /// ```
+ /// Note: This is equivalent to the following:
+ /// ```
+ /// let x = Box::new(String::from("Hello"));
+ /// let ptr = Box::into_raw(x);
/// unsafe {
- /// ptr::drop_in_place(p);
- /// dealloc(p as *mut u8, Layout::new::<String>());
+ /// drop(Box::from_raw(ptr));
/// }
/// ```
///
diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs
index f83c8f83c..a8005b706 100644
--- a/library/alloc/src/boxed/thin.rs
+++ b/library/alloc/src/boxed/thin.rs
@@ -171,6 +171,7 @@ struct WithHeader<H>(NonNull<u8>, PhantomData<H>);
/// An opaque representation of `WithHeader<H>` to avoid the
/// projection invariance of `<T as Pointee>::Metadata`.
#[repr(transparent)]
+#[allow(unused_tuple_struct_fields)] // Field only used through `WithHeader` type above.
struct WithOpaqueHeader(NonNull<u8>);
impl WithOpaqueHeader {
diff --git a/library/alloc/src/collections/binary_heap/mod.rs b/library/alloc/src/collections/binary_heap/mod.rs
index 61c5950b0..00a101541 100644
--- a/library/alloc/src/collections/binary_heap/mod.rs
+++ b/library/alloc/src/collections/binary_heap/mod.rs
@@ -145,7 +145,7 @@
use core::alloc::Allocator;
use core::fmt;
-use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
+use core::iter::{FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen};
use core::mem::{self, swap, ManuallyDrop};
use core::num::NonZeroUsize;
use core::ops::{Deref, DerefMut};
@@ -1542,6 +1542,10 @@ impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {}
+
#[stable(feature = "default_iters", since = "1.70.0")]
impl<T> Default for IntoIter<T> {
/// Creates an empty `binary_heap::IntoIter`.
@@ -1571,7 +1575,10 @@ unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> {
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
-unsafe impl<I, A: Allocator> InPlaceIterable for IntoIter<I, A> {}
+unsafe impl<I, A: Allocator> InPlaceIterable for IntoIter<I, A> {
+ const EXPAND_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
+ const MERGE_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
+}
unsafe impl<I> AsVecIntoIter for IntoIter<I> {
type Item = I;
diff --git a/library/alloc/src/collections/binary_heap/tests.rs b/library/alloc/src/collections/binary_heap/tests.rs
index 565a7b797..d4bc6226a 100644
--- a/library/alloc/src/collections/binary_heap/tests.rs
+++ b/library/alloc/src/collections/binary_heap/tests.rs
@@ -1,8 +1,6 @@
use super::*;
use crate::boxed::Box;
use crate::testing::crash_test::{CrashTestDummy, Panic};
-use core::mem;
-use std::iter::TrustedLen;
use std::panic::{catch_unwind, AssertUnwindSafe};
#[test]
diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs
index 8681cfcd6..a1b7cfe6b 100644
--- a/library/alloc/src/collections/btree/map/tests.rs
+++ b/library/alloc/src/collections/btree/map/tests.rs
@@ -1,4 +1,3 @@
-use super::Entry::{Occupied, Vacant};
use super::*;
use crate::boxed::Box;
use crate::fmt::Debug;
@@ -7,13 +6,9 @@ use crate::string::{String, ToString};
use crate::testing::crash_test::{CrashTestDummy, Panic};
use crate::testing::ord_chaos::{Cyclic3, Governed, Governor};
use crate::testing::rng::DeterministicRng;
-use crate::vec::Vec;
use core::assert_matches::assert_matches;
-use std::cmp::Ordering;
use std::iter;
-use std::mem;
-use std::ops::Bound::{self, Excluded, Included, Unbounded};
-use std::ops::RangeBounds;
+use std::ops::Bound::{Excluded, Included, Unbounded};
use std::panic::{catch_unwind, AssertUnwindSafe};
use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs
index e05bf0e20..8726c5bfe 100644
--- a/library/alloc/src/collections/btree/set/tests.rs
+++ b/library/alloc/src/collections/btree/set/tests.rs
@@ -1,9 +1,6 @@
use super::*;
use crate::testing::crash_test::{CrashTestDummy, Panic};
use crate::testing::rng::DeterministicRng;
-use crate::vec::Vec;
-use std::cmp::Ordering;
-use std::hash::{Hash, Hasher};
use std::ops::Bound::{Excluded, Included};
use std::panic::{catch_unwind, AssertUnwindSafe};
diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs
index 2c26f9e03..9e109feb3 100644
--- a/library/alloc/src/collections/linked_list.rs
+++ b/library/alloc/src/collections/linked_list.rs
@@ -1026,6 +1026,99 @@ impl<T, A: Allocator> LinkedList<T, A> {
}
}
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns false.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(linked_list_retain)]
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ ///
+ /// d.push_front(1);
+ /// d.push_front(2);
+ /// d.push_front(3);
+ ///
+ /// d.retain(|&x| x % 2 == 0);
+ ///
+ /// assert_eq!(d.pop_front(), Some(2));
+ /// assert_eq!(d.pop_front(), None);
+ /// ```
+ ///
+ /// Because the elements are visited exactly once in the original order,
+ /// external state may be used to decide which elements to keep.
+ ///
+ /// ```
+ /// #![feature(linked_list_retain)]
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ ///
+ /// d.push_front(1);
+ /// d.push_front(2);
+ /// d.push_front(3);
+ ///
+ /// let keep = [false, true, false];
+ /// let mut iter = keep.iter();
+ /// d.retain(|_| *iter.next().unwrap());
+ /// assert_eq!(d.pop_front(), Some(2));
+ /// assert_eq!(d.pop_front(), None);
+ /// ```
+ #[unstable(feature = "linked_list_retain", issue = "114135")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ self.retain_mut(|elem| f(elem));
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns false.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(linked_list_retain)]
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ ///
+ /// d.push_front(1);
+ /// d.push_front(2);
+ /// d.push_front(3);
+ ///
+ /// d.retain_mut(|x| if *x % 2 == 0 {
+ /// *x += 1;
+ /// true
+ /// } else {
+ /// false
+ /// });
+ /// assert_eq!(d.pop_front(), Some(3));
+ /// assert_eq!(d.pop_front(), None);
+ /// ```
+ #[unstable(feature = "linked_list_retain", issue = "114135")]
+ pub fn retain_mut<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ let mut cursor = self.cursor_front_mut();
+ while let Some(node) = cursor.current() {
+ if !f(node) {
+ cursor.remove_current().unwrap();
+ } else {
+ cursor.move_next();
+ }
+ }
+ }
+
/// Creates an iterator which uses a closure to determine if an element should be removed.
///
/// If the closure returns true, then the element is removed and yielded.
diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs
index 3e0b0f735..705b81535 100644
--- a/library/alloc/src/collections/mod.rs
+++ b/library/alloc/src/collections/mod.rs
@@ -148,6 +148,7 @@ impl Display for TryReserveError {
/// An intermediate trait for specialization of `Extend`.
#[doc(hidden)]
+#[cfg(not(no_global_oom_handling))]
trait SpecExtend<I: IntoIterator> {
/// Extends `self` with the contents of the given iterator.
fn spec_extend(&mut self, iter: I);
diff --git a/library/alloc/src/ffi/c_str.rs b/library/alloc/src/ffi/c_str.rs
index 62856fc9a..4a4a3abd4 100644
--- a/library/alloc/src/ffi/c_str.rs
+++ b/library/alloc/src/ffi/c_str.rs
@@ -421,7 +421,7 @@ impl CString {
/// Failure to call [`CString::from_raw`] will lead to a memory leak.
///
/// The C side must **not** modify the length of the string (by writing a
- /// `null` somewhere inside the string or removing the final one) before
+ /// nul byte somewhere inside the string or removing the final one) before
/// it makes it back into Rust using [`CString::from_raw`]. See the safety section
/// in [`CString::from_raw`].
///
@@ -797,7 +797,7 @@ impl From<Box<CStr>> for CString {
#[stable(feature = "cstring_from_vec_of_nonzerou8", since = "1.43.0")]
impl From<Vec<NonZeroU8>> for CString {
/// Converts a <code>[Vec]<[NonZeroU8]></code> into a [`CString`] without
- /// copying nor checking for inner null bytes.
+ /// copying nor checking for inner nul bytes.
#[inline]
fn from(v: Vec<NonZeroU8>) -> CString {
unsafe {
@@ -809,7 +809,7 @@ impl From<Vec<NonZeroU8>> for CString {
let (ptr, len, cap): (*mut NonZeroU8, _, _) = Vec::into_raw_parts(v);
Vec::from_raw_parts(ptr.cast::<u8>(), len, cap)
};
- // SAFETY: `v` cannot contain null bytes, given the type-level
+ // SAFETY: `v` cannot contain nul bytes, given the type-level
// invariant of `NonZeroU8`.
Self::_from_vec_unchecked(v)
}
diff --git a/library/alloc/src/ffi/c_str/tests.rs b/library/alloc/src/ffi/c_str/tests.rs
index 0b7476d5c..9f51e17a4 100644
--- a/library/alloc/src/ffi/c_str/tests.rs
+++ b/library/alloc/src/ffi/c_str/tests.rs
@@ -1,6 +1,4 @@
use super::*;
-use crate::rc::Rc;
-use crate::sync::Arc;
use core::assert_matches::assert_matches;
use core::ffi::FromBytesUntilNulError;
use core::hash::{Hash, Hasher};
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
index d33c4418e..0af3ac38e 100644
--- a/library/alloc/src/lib.rs
+++ b/library/alloc/src/lib.rs
@@ -78,8 +78,8 @@
not(no_sync),
target_has_atomic = "ptr"
))]
-#![cfg_attr(not(bootstrap), doc(rust_logo))]
-#![cfg_attr(not(bootstrap), feature(rustdoc_internals))]
+#![doc(rust_logo)]
+#![feature(rustdoc_internals)]
#![no_std]
#![needs_allocator]
// Lints:
@@ -140,7 +140,6 @@
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_uninit_array_transpose)]
#![feature(pattern)]
-#![feature(ptr_addr_eq)]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
@@ -155,6 +154,7 @@
#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
+#![feature(trusted_fused)]
#![feature(trusted_len)]
#![feature(trusted_random_access)]
#![feature(try_trait_v2)]
@@ -270,7 +270,7 @@ pub(crate) mod test_helpers {
/// seed not being the same for every RNG invocation too.
pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
use std::hash::{BuildHasher, Hash, Hasher};
- let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+ let mut hasher = std::hash::RandomState::new().build_hasher();
std::panic::Location::caller().hash(&mut hasher);
let hc64 = hasher.finish();
let seed_vec =
diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs
index 817b93720..99ec68f5a 100644
--- a/library/alloc/src/raw_vec.rs
+++ b/library/alloc/src/raw_vec.rs
@@ -25,6 +25,16 @@ enum AllocInit {
Zeroed,
}
+#[repr(transparent)]
+#[cfg_attr(target_pointer_width = "16", rustc_layout_scalar_valid_range_end(0x7fff))]
+#[cfg_attr(target_pointer_width = "32", rustc_layout_scalar_valid_range_end(0x7fff_ffff))]
+#[cfg_attr(target_pointer_width = "64", rustc_layout_scalar_valid_range_end(0x7fff_ffff_ffff_ffff))]
+struct Cap(usize);
+
+impl Cap {
+ const ZERO: Cap = unsafe { Cap(0) };
+}
+
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
@@ -50,7 +60,12 @@ enum AllocInit {
#[allow(missing_debug_implementations)]
pub(crate) struct RawVec<T, A: Allocator = Global> {
ptr: Unique<T>,
- cap: usize,
+ /// Never used for ZSTs; it's `capacity()`'s responsibility to return usize::MAX in that case.
+ ///
+ /// # Safety
+ ///
+ /// `cap` must be in the `0..=isize::MAX` range.
+ cap: Cap,
alloc: A,
}
@@ -119,7 +134,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// the returned `RawVec`.
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
- Self { ptr: Unique::dangling(), cap: 0, alloc }
+ Self { ptr: Unique::dangling(), cap: Cap::ZERO, alloc }
}
/// Like `with_capacity`, but parameterized over the choice of
@@ -194,7 +209,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// here should change to `ptr.len() / mem::size_of::<T>()`.
Self {
ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
- cap: capacity,
+ cap: unsafe { Cap(capacity) },
alloc,
}
}
@@ -207,12 +222,13 @@ impl<T, A: Allocator> RawVec<T, A> {
/// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
/// `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
- /// systems). ZST vectors may have a capacity up to `usize::MAX`.
+ /// systems). For ZSTs capacity is ignored.
/// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
/// guaranteed.
#[inline]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
- Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc }
+ let cap = if T::IS_ZST { Cap::ZERO } else { unsafe { Cap(capacity) } };
+ Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc }
}
/// Gets a raw pointer to the start of the allocation. Note that this is
@@ -228,7 +244,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
- if T::IS_ZST { usize::MAX } else { self.cap }
+ if T::IS_ZST { usize::MAX } else { self.cap.0 }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
@@ -237,7 +253,7 @@ impl<T, A: Allocator> RawVec<T, A> {
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
- if T::IS_ZST || self.cap == 0 {
+ if T::IS_ZST || self.cap.0 == 0 {
None
} else {
// We could use Layout::array here which ensures the absence of isize and usize overflows
@@ -247,7 +263,7 @@ impl<T, A: Allocator> RawVec<T, A> {
let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
unsafe {
let align = mem::align_of::<T>();
- let size = mem::size_of::<T>().unchecked_mul(self.cap);
+ let size = mem::size_of::<T>().unchecked_mul(self.cap.0);
let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
@@ -375,12 +391,15 @@ impl<T, A: Allocator> RawVec<T, A> {
additional > self.capacity().wrapping_sub(len)
}
- fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
+ /// # Safety:
+ ///
+ /// `cap` must not exceed `isize::MAX`.
+ unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
// Allocators currently return a `NonNull<[u8]>` whose length matches
// the size requested. If that ever changes, the capacity here should
// change to `ptr.len() / mem::size_of::<T>()`.
self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) };
- self.cap = cap;
+ self.cap = unsafe { Cap(cap) };
}
// This method is usually instantiated many times. So we want it to be as
@@ -405,14 +424,15 @@ impl<T, A: Allocator> RawVec<T, A> {
// This guarantees exponential growth. The doubling cannot overflow
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
- let cap = cmp::max(self.cap * 2, required_cap);
+ let cap = cmp::max(self.cap.0 * 2, required_cap);
let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
- self.set_ptr_and_cap(ptr, cap);
+ // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
+ unsafe { self.set_ptr_and_cap(ptr, cap) };
Ok(())
}
@@ -431,7 +451,10 @@ impl<T, A: Allocator> RawVec<T, A> {
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
- self.set_ptr_and_cap(ptr, cap);
+ // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
+ unsafe {
+ self.set_ptr_and_cap(ptr, cap);
+ }
Ok(())
}
@@ -449,7 +472,7 @@ impl<T, A: Allocator> RawVec<T, A> {
if cap == 0 {
unsafe { self.alloc.deallocate(ptr, layout) };
self.ptr = Unique::dangling();
- self.cap = 0;
+ self.cap = Cap::ZERO;
} else {
let ptr = unsafe {
// `Layout::array` cannot overflow here because it would have
@@ -460,7 +483,10 @@ impl<T, A: Allocator> RawVec<T, A> {
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
};
- self.set_ptr_and_cap(ptr, cap);
+ // SAFETY: if the allocation is valid, then the capacity is too
+ unsafe {
+ self.set_ptr_and_cap(ptr, cap);
+ }
}
Ok(())
}
diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs
index ff322f0da..f8cada01c 100644
--- a/library/alloc/src/raw_vec/tests.rs
+++ b/library/alloc/src/raw_vec/tests.rs
@@ -1,4 +1,5 @@
use super::*;
+use core::mem::size_of;
use std::cell::Cell;
#[test]
@@ -161,3 +162,11 @@ fn zst_reserve_exact_panic() {
v.reserve_exact(101, usize::MAX - 100);
}
+
+#[test]
+fn niches() {
+ let baseline = size_of::<RawVec<u8>>();
+ assert_eq!(size_of::<Option<RawVec<u8>>>(), baseline);
+ assert_eq!(size_of::<Option<Option<RawVec<u8>>>>(), baseline);
+ assert_eq!(size_of::<Option<Option<Option<RawVec<u8>>>>>(), baseline);
+}
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
index dd7876bed..59f3a50dd 100644
--- a/library/alloc/src/rc.rs
+++ b/library/alloc/src/rc.rs
@@ -1748,7 +1748,6 @@ impl<T: Clone, A: Allocator + Clone> Rc<T, A> {
/// # Examples
///
/// ```
- /// #![feature(arc_unwrap_or_clone)]
/// # use std::{ptr, rc::Rc};
/// let inner = String::from("test");
/// let ptr = inner.as_ptr();
@@ -1769,7 +1768,7 @@ impl<T: Clone, A: Allocator + Clone> Rc<T, A> {
/// assert!(ptr::eq(ptr, inner.as_ptr()));
/// ```
#[inline]
- #[unstable(feature = "arc_unwrap_or_clone", issue = "93610")]
+ #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
pub fn unwrap_or_clone(this: Self) -> T {
Rc::try_unwrap(this).unwrap_or_else(|rc| (*rc).clone())
}
@@ -2023,6 +2022,7 @@ impl<T, A: Allocator> Rc<[T], A> {
}
}
+#[cfg(not(no_global_oom_handling))]
/// Specialization trait used for `From<&[T]>`.
trait RcFromSlice<T> {
fn from_slice(slice: &[T]) -> Self;
diff --git a/library/alloc/src/rc/tests.rs b/library/alloc/src/rc/tests.rs
index 1f221b86f..c8a40603d 100644
--- a/library/alloc/src/rc/tests.rs
+++ b/library/alloc/src/rc/tests.rs
@@ -1,12 +1,7 @@
use super::*;
-use std::boxed::Box;
use std::cell::RefCell;
use std::clone::Clone;
-use std::convert::{From, TryInto};
-use std::mem::drop;
-use std::option::Option::{self, None, Some};
-use std::result::Result::{Err, Ok};
#[test]
fn test_clone() {
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
index 351e6c1a4..85df49163 100644
--- a/library/alloc/src/sync.rs
+++ b/library/alloc/src/sync.rs
@@ -2174,7 +2174,6 @@ impl<T: Clone, A: Allocator + Clone> Arc<T, A> {
/// # Examples
///
/// ```
- /// #![feature(arc_unwrap_or_clone)]
/// # use std::{ptr, sync::Arc};
/// let inner = String::from("test");
/// let ptr = inner.as_ptr();
@@ -2195,7 +2194,7 @@ impl<T: Clone, A: Allocator + Clone> Arc<T, A> {
/// assert!(ptr::eq(ptr, inner.as_ptr()));
/// ```
#[inline]
- #[unstable(feature = "arc_unwrap_or_clone", issue = "93610")]
+ #[stable(feature = "arc_unwrap_or_clone", since = "1.76.0")]
pub fn unwrap_or_clone(this: Self) -> T {
Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
}
@@ -2843,16 +2842,14 @@ impl<T: ?Sized, A: Allocator> Weak<T, A> {
/// (i.e., when this `Weak` was created by `Weak::new`).
#[inline]
fn inner(&self) -> Option<WeakInner<'_>> {
- if is_dangling(self.ptr.as_ptr()) {
+ let ptr = self.ptr.as_ptr();
+ if is_dangling(ptr) {
None
} else {
// We are careful to *not* create a reference covering the "data" field, as
// the field may be mutated concurrently (for example, if the last `Arc`
// is dropped, the data field will be dropped in-place).
- Some(unsafe {
- let ptr = self.ptr.as_ptr();
- WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak }
- })
+ Some(unsafe { WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak } })
}
}
@@ -3503,6 +3500,7 @@ impl<T> FromIterator<T> for Arc<[T]> {
}
}
+#[cfg(not(no_global_oom_handling))]
/// Specialization trait used for collecting into `Arc<[T]>`.
trait ToArcSlice<T>: Iterator<Item = T> + Sized {
fn to_arc_slice(self) -> Arc<[T]>;
diff --git a/library/alloc/src/sync/tests.rs b/library/alloc/src/sync/tests.rs
index 863d58bdf..d37e45569 100644
--- a/library/alloc/src/sync/tests.rs
+++ b/library/alloc/src/sync/tests.rs
@@ -1,21 +1,12 @@
use super::*;
-use std::boxed::Box;
use std::clone::Clone;
-use std::convert::{From, TryInto};
-use std::mem::drop;
-use std::ops::Drop;
-use std::option::Option::{self, None, Some};
-use std::sync::atomic::{
- self,
- Ordering::{Acquire, SeqCst},
-};
+use std::option::Option::None;
+use std::sync::atomic::Ordering::SeqCst;
use std::sync::mpsc::channel;
use std::sync::Mutex;
use std::thread;
-use crate::vec::Vec;
-
struct Canary(*mut atomic::AtomicUsize);
impl Drop for Canary {
diff --git a/library/alloc/src/tests.rs b/library/alloc/src/tests.rs
index b1d3a9fa8..ab256ceae 100644
--- a/library/alloc/src/tests.rs
+++ b/library/alloc/src/tests.rs
@@ -1,8 +1,6 @@
//! Test for `boxed` mod.
use core::any::Any;
-use core::clone::Clone;
-use core::convert::TryInto;
use core::ops::Deref;
use std::boxed::Box;
diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs
index 5ecd04799..e68cce04c 100644
--- a/library/alloc/src/vec/in_place_collect.rs
+++ b/library/alloc/src/vec/in_place_collect.rs
@@ -6,11 +6,11 @@
//! The specialization in this module applies to iterators in the shape of
//! `source.adapter().adapter().adapter().collect::<Vec<U>>()`
//! where `source` is an owning iterator obtained from [`Vec<T>`], [`Box<[T]>`][box] (by conversion to `Vec`)
-//! or [`BinaryHeap<T>`], the adapters each consume one or more items per step
-//! (represented by [`InPlaceIterable`]), provide transitive access to `source` (via [`SourceIter`])
-//! and thus the underlying allocation. And finally the layouts of `T` and `U` must
-//! have the same size and alignment, this is currently ensured via const eval instead of trait bounds
-//! in the specialized [`SpecFromIter`] implementation.
+//! or [`BinaryHeap<T>`], the adapters guarantee to consume enough items per step to make room
+//! for the results (represented by [`InPlaceIterable`]), provide transitive access to `source`
+//! (via [`SourceIter`]) and thus the underlying allocation.
+//! And finally there are alignment and size constriants to consider, this is currently ensured via
+//! const eval instead of trait bounds in the specialized [`SpecFromIter`] implementation.
//!
//! [`BinaryHeap<T>`]: crate::collections::BinaryHeap
//! [box]: crate::boxed::Box
@@ -35,11 +35,28 @@
//! the step of reading a value and getting a reference to write to. Instead raw pointers must be
//! used on the reader and writer side.
//!
-//! That writes never clobber a yet-to-be-read item is ensured by the [`InPlaceIterable`] requirements.
+//! That writes never clobber a yet-to-be-read items is ensured by the [`InPlaceIterable`] requirements.
//!
//! # Layout constraints
//!
-//! [`Allocator`] requires that `allocate()` and `deallocate()` have matching alignment and size.
+//! When recycling an allocation between different types we must uphold the [`Allocator`] contract
+//! which means that the input and output Layouts have to "fit".
+//!
+//! To complicate things further `InPlaceIterable` supports splitting or merging items into smaller/
+//! larger ones to enable (de)aggregation of arrays.
+//!
+//! Ultimately each step of the iterator must free up enough *bytes* in the source to make room
+//! for the next output item.
+//! If `T` and `U` have the same size no fixup is needed.
+//! If `T`'s size is a multiple of `U`'s we can compensate by multiplying the capacity accordingly.
+//! Otherwise the input capacity (and thus layout) in bytes may not be representable by the output
+//! `Vec<U>`. In that case `alloc.shrink()` is used to update the allocation's layout.
+//!
+//! Alignments of `T` must be the same or larger than `U`. Since alignments are always a power
+//! of two _larger_ implies _is a multiple of_.
+//!
+//! See `in_place_collectible()` for the current conditions.
+//!
//! Additionally this specialization doesn't make sense for ZSTs as there is no reallocation to
//! avoid and it would make pointer arithmetic more difficult.
//!
@@ -55,7 +72,7 @@
//! This is handled by the [`InPlaceDrop`] guard for sink items (`U`) and by
//! [`vec::IntoIter::forget_allocation_drop_remaining()`] for remaining source items (`T`).
//!
-//! If dropping any remaining source item (`T`) panics then [`InPlaceDstBufDrop`] will handle dropping
+//! If dropping any remaining source item (`T`) panics then [`InPlaceDstDataSrcBufDrop`] will handle dropping
//! the already collected sink items (`U`) and freeing the allocation.
//!
//! [`vec::IntoIter::forget_allocation_drop_remaining()`]: super::IntoIter::forget_allocation_drop_remaining()
@@ -137,44 +154,98 @@
//! }
//! vec.truncate(write_idx);
//! ```
+use crate::alloc::{handle_alloc_error, Global};
+use core::alloc::Allocator;
+use core::alloc::Layout;
use core::iter::{InPlaceIterable, SourceIter, TrustedRandomAccessNoCoerce};
+use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, SizedTypeProperties};
-use core::ptr::{self};
+use core::num::NonZeroUsize;
+use core::ptr::{self, NonNull};
+
+use super::{InPlaceDrop, InPlaceDstDataSrcBufDrop, SpecFromIter, SpecFromIterNested, Vec};
+
+const fn in_place_collectible<DEST, SRC>(
+ step_merge: Option<NonZeroUsize>,
+ step_expand: Option<NonZeroUsize>,
+) -> bool {
+ // Require matching alignments because an alignment-changing realloc is inefficient on many
+ // system allocators and better implementations would require the unstable Allocator trait.
+ if const { SRC::IS_ZST || DEST::IS_ZST || mem::align_of::<SRC>() != mem::align_of::<DEST>() } {
+ return false;
+ }
-use super::{InPlaceDrop, InPlaceDstBufDrop, SpecFromIter, SpecFromIterNested, Vec};
+ match (step_merge, step_expand) {
+ (Some(step_merge), Some(step_expand)) => {
+ // At least N merged source items -> at most M expanded destination items
+ // e.g.
+ // - 1 x [u8; 4] -> 4x u8, via flatten
+ // - 4 x u8 -> 1x [u8; 4], via array_chunks
+ mem::size_of::<SRC>() * step_merge.get() >= mem::size_of::<DEST>() * step_expand.get()
+ }
+ // Fall back to other from_iter impls if an overflow occurred in the step merge/expansion
+ // tracking.
+ _ => false,
+ }
+}
+
+const fn needs_realloc<SRC, DEST>(src_cap: usize, dst_cap: usize) -> bool {
+ if const { mem::align_of::<SRC>() != mem::align_of::<DEST>() } {
+ // FIXME: use unreachable! once that works in const
+ panic!("in_place_collectible() prevents this");
+ }
-/// Specialization marker for collecting an iterator pipeline into a Vec while reusing the
-/// source allocation, i.e. executing the pipeline in place.
-#[rustc_unsafe_specialization_marker]
-pub(super) trait InPlaceIterableMarker {}
+ // If src type size is an integer multiple of the destination type size then
+ // the caller will have calculated a `dst_cap` that is an integer multiple of
+ // `src_cap` without remainder.
+ if const {
+ let src_sz = mem::size_of::<SRC>();
+ let dest_sz = mem::size_of::<DEST>();
+ dest_sz != 0 && src_sz % dest_sz == 0
+ } {
+ return false;
+ }
-impl<T> InPlaceIterableMarker for T where T: InPlaceIterable {}
+ // type layouts don't guarantee a fit, so do a runtime check to see if
+ // the allocations happen to match
+ return src_cap > 0 && src_cap * mem::size_of::<SRC>() != dst_cap * mem::size_of::<DEST>();
+}
+
+/// This provides a shorthand for the source type since local type aliases aren't a thing.
+#[rustc_specialization_trait]
+trait InPlaceCollect: SourceIter<Source: AsVecIntoIter> + InPlaceIterable {
+ type Src;
+}
+
+impl<T> InPlaceCollect for T
+where
+ T: SourceIter<Source: AsVecIntoIter> + InPlaceIterable,
+{
+ type Src = <<T as SourceIter>::Source as AsVecIntoIter>::Item;
+}
impl<T, I> SpecFromIter<T, I> for Vec<T>
where
- I: Iterator<Item = T> + SourceIter<Source: AsVecIntoIter> + InPlaceIterableMarker,
+ I: Iterator<Item = T> + InPlaceCollect,
+ <I as SourceIter>::Source: AsVecIntoIter,
{
default fn from_iter(mut iterator: I) -> Self {
// See "Layout constraints" section in the module documentation. We rely on const
// optimization here since these conditions currently cannot be expressed as trait bounds
- if T::IS_ZST
- || mem::size_of::<T>()
- != mem::size_of::<<<I as SourceIter>::Source as AsVecIntoIter>::Item>()
- || mem::align_of::<T>()
- != mem::align_of::<<<I as SourceIter>::Source as AsVecIntoIter>::Item>()
- {
+ if const { !in_place_collectible::<T, I::Src>(I::MERGE_BY, I::EXPAND_BY) } {
// fallback to more generic implementations
return SpecFromIterNested::from_iter(iterator);
}
- let (src_buf, src_ptr, dst_buf, dst_end, cap) = unsafe {
+ let (src_buf, src_ptr, src_cap, mut dst_buf, dst_end, dst_cap) = unsafe {
let inner = iterator.as_inner().as_into_iter();
(
inner.buf.as_ptr(),
inner.ptr,
+ inner.cap,
inner.buf.as_ptr() as *mut T,
inner.end as *const T,
- inner.cap,
+ inner.cap * mem::size_of::<I::Src>() / mem::size_of::<T>(),
)
};
@@ -195,19 +266,56 @@ where
);
}
- // The ownership of the allocation and the new `T` values is temporarily moved into `dst_guard`.
- // This is safe because `forget_allocation_drop_remaining` immediately forgets the allocation
+ // The ownership of the source allocation and the new `T` values is temporarily moved into `dst_guard`.
+ // This is safe because
+ // * `forget_allocation_drop_remaining` immediately forgets the allocation
// before any panic can occur in order to avoid any double free, and then proceeds to drop
// any remaining values at the tail of the source.
+ // * the shrink either panics without invalidating the allocation, aborts or
+ // succeeds. In the last case we disarm the guard.
//
// Note: This access to the source wouldn't be allowed by the TrustedRandomIteratorNoCoerce
// contract (used by SpecInPlaceCollect below). But see the "O(1) collect" section in the
// module documentation why this is ok anyway.
- let dst_guard = InPlaceDstBufDrop { ptr: dst_buf, len, cap };
+ let dst_guard =
+ InPlaceDstDataSrcBufDrop { ptr: dst_buf, len, src_cap, src: PhantomData::<I::Src> };
src.forget_allocation_drop_remaining();
+
+ // Adjust the allocation if the source had a capacity in bytes that wasn't a multiple
+ // of the destination type size.
+ // Since the discrepancy should generally be small this should only result in some
+ // bookkeeping updates and no memmove.
+ if needs_realloc::<I::Src, T>(src_cap, dst_cap) {
+ let alloc = Global;
+ debug_assert_ne!(src_cap, 0);
+ debug_assert_ne!(dst_cap, 0);
+ unsafe {
+ // The old allocation exists, therefore it must have a valid layout.
+ let src_align = mem::align_of::<I::Src>();
+ let src_size = mem::size_of::<I::Src>().unchecked_mul(src_cap);
+ let old_layout = Layout::from_size_align_unchecked(src_size, src_align);
+
+ // The allocation must be equal or smaller for in-place iteration to be possible
+ // therefore the new layout must be ≤ the old one and therefore valid.
+ let dst_align = mem::align_of::<T>();
+ let dst_size = mem::size_of::<T>().unchecked_mul(dst_cap);
+ let new_layout = Layout::from_size_align_unchecked(dst_size, dst_align);
+
+ let result = alloc.shrink(
+ NonNull::new_unchecked(dst_buf as *mut u8),
+ old_layout,
+ new_layout,
+ );
+ let Ok(reallocated) = result else { handle_alloc_error(new_layout) };
+ dst_buf = reallocated.as_ptr() as *mut T;
+ }
+ } else {
+ debug_assert_eq!(src_cap * mem::size_of::<I::Src>(), dst_cap * mem::size_of::<T>());
+ }
+
mem::forget(dst_guard);
- let vec = unsafe { Vec::from_raw_parts(dst_buf, len, cap) };
+ let vec = unsafe { Vec::from_raw_parts(dst_buf, len, dst_cap) };
vec
}
diff --git a/library/alloc/src/vec/in_place_drop.rs b/library/alloc/src/vec/in_place_drop.rs
index 25ca33c6a..40a540b57 100644
--- a/library/alloc/src/vec/in_place_drop.rs
+++ b/library/alloc/src/vec/in_place_drop.rs
@@ -1,6 +1,10 @@
-use core::ptr::{self};
+use core::marker::PhantomData;
+use core::ptr::{self, drop_in_place};
use core::slice::{self};
+use crate::alloc::Global;
+use crate::raw_vec::RawVec;
+
// A helper struct for in-place iteration that drops the destination slice of iteration,
// i.e. the head. The source slice (the tail) is dropped by IntoIter.
pub(super) struct InPlaceDrop<T> {
@@ -23,17 +27,23 @@ impl<T> Drop for InPlaceDrop<T> {
}
}
-// A helper struct for in-place collection that drops the destination allocation and elements,
-// to avoid leaking them if some other destructor panics.
-pub(super) struct InPlaceDstBufDrop<T> {
- pub(super) ptr: *mut T,
+// A helper struct for in-place collection that drops the destination items together with
+// the source allocation - i.e. before the reallocation happened - to avoid leaking them
+// if some other destructor panics.
+pub(super) struct InPlaceDstDataSrcBufDrop<Src, Dest> {
+ pub(super) ptr: *mut Dest,
pub(super) len: usize,
- pub(super) cap: usize,
+ pub(super) src_cap: usize,
+ pub(super) src: PhantomData<Src>,
}
-impl<T> Drop for InPlaceDstBufDrop<T> {
+impl<Src, Dest> Drop for InPlaceDstDataSrcBufDrop<Src, Dest> {
#[inline]
fn drop(&mut self) {
- unsafe { super::Vec::from_raw_parts(self.ptr, self.len, self.cap) };
+ unsafe {
+ let _drop_allocation =
+ RawVec::<Src>::from_raw_parts_in(self.ptr.cast::<Src>(), self.src_cap, Global);
+ drop_in_place(core::ptr::slice_from_raw_parts_mut::<Dest>(self.ptr, self.len));
+ };
}
}
diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs
index b2db2fdfd..b03e04b7c 100644
--- a/library/alloc/src/vec/into_iter.rs
+++ b/library/alloc/src/vec/into_iter.rs
@@ -7,7 +7,8 @@ use crate::raw_vec::RawVec;
use core::array;
use core::fmt;
use core::iter::{
- FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
+ FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen,
+ TrustedRandomAccessNoCoerce,
};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
@@ -285,9 +286,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
// Also note the implementation of `Self: TrustedRandomAccess` requires
// that `T: Copy` so reading elements from the buffer doesn't invalidate
// them for `Drop`.
- unsafe {
- if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
- }
+ unsafe { if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) } }
}
}
@@ -339,6 +338,10 @@ impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {}
+
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
@@ -423,7 +426,10 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> {
// also refer to the vec::in_place_collect module documentation to get an overview
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
-unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {}
+unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {
+ const EXPAND_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
+ const MERGE_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
+}
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
index 6c78d65f1..b2e920397 100644
--- a/library/alloc/src/vec/mod.rs
+++ b/library/alloc/src/vec/mod.rs
@@ -58,6 +58,7 @@ use core::cmp;
use core::cmp::Ordering;
use core::fmt;
use core::hash::{Hash, Hasher};
+#[cfg(not(no_global_oom_handling))]
use core::iter;
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
@@ -101,6 +102,7 @@ mod into_iter;
#[cfg(not(no_global_oom_handling))]
use self::is_zero::IsZero;
+#[cfg(not(no_global_oom_handling))]
mod is_zero;
#[cfg(not(no_global_oom_handling))]
@@ -121,7 +123,7 @@ use self::set_len_on_drop::SetLenOnDrop;
mod set_len_on_drop;
#[cfg(not(no_global_oom_handling))]
-use self::in_place_drop::{InPlaceDrop, InPlaceDstBufDrop};
+use self::in_place_drop::{InPlaceDrop, InPlaceDstDataSrcBufDrop};
#[cfg(not(no_global_oom_handling))]
mod in_place_drop;
@@ -1775,7 +1777,32 @@ impl<T, A: Allocator> Vec<T, A> {
return;
}
- /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
+ // Check if we ever want to remove anything.
+ // This allows to use copy_non_overlapping in next cycle.
+ // And avoids any memory writes if we don't need to remove anything.
+ let mut first_duplicate_idx: usize = 1;
+ let start = self.as_mut_ptr();
+ while first_duplicate_idx != len {
+ let found_duplicate = unsafe {
+ // SAFETY: first_duplicate always in range [1..len)
+ // Note that we start iteration from 1 so we never overflow.
+ let prev = start.add(first_duplicate_idx.wrapping_sub(1));
+ let current = start.add(first_duplicate_idx);
+ // We explicitly say in docs that references are reversed.
+ same_bucket(&mut *current, &mut *prev)
+ };
+ if found_duplicate {
+ break;
+ }
+ first_duplicate_idx += 1;
+ }
+ // Don't need to remove anything.
+ // We cannot get bigger than len.
+ if first_duplicate_idx == len {
+ return;
+ }
+
+ /* INVARIANT: vec.len() > read > write > write-1 >= 0 */
struct FillGapOnDrop<'a, T, A: core::alloc::Allocator> {
/* Offset of the element we want to check if it is duplicate */
read: usize,
@@ -1821,31 +1848,39 @@ impl<T, A: Allocator> Vec<T, A> {
}
}
- let mut gap = FillGapOnDrop { read: 1, write: 1, vec: self };
- let ptr = gap.vec.as_mut_ptr();
-
/* Drop items while going through Vec, it should be more efficient than
* doing slice partition_dedup + truncate */
+ // Construct gap first and then drop item to avoid memory corruption if `T::drop` panics.
+ let mut gap =
+ FillGapOnDrop { read: first_duplicate_idx + 1, write: first_duplicate_idx, vec: self };
+ unsafe {
+ // SAFETY: we checked that first_duplicate_idx in bounds before.
+ // If drop panics, `gap` would remove this item without drop.
+ ptr::drop_in_place(start.add(first_duplicate_idx));
+ }
+
/* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
* are always in-bounds and read_ptr never aliases prev_ptr */
unsafe {
while gap.read < len {
- let read_ptr = ptr.add(gap.read);
- let prev_ptr = ptr.add(gap.write.wrapping_sub(1));
+ let read_ptr = start.add(gap.read);
+ let prev_ptr = start.add(gap.write.wrapping_sub(1));
- if same_bucket(&mut *read_ptr, &mut *prev_ptr) {
+ // We explicitly say in docs that references are reversed.
+ let found_duplicate = same_bucket(&mut *read_ptr, &mut *prev_ptr);
+ if found_duplicate {
// Increase `gap.read` now since the drop may panic.
gap.read += 1;
/* We have found duplicate, drop it in-place */
ptr::drop_in_place(read_ptr);
} else {
- let write_ptr = ptr.add(gap.write);
+ let write_ptr = start.add(gap.write);
- /* Because `read_ptr` can be equal to `write_ptr`, we either
- * have to use `copy` or conditional `copy_nonoverlapping`.
- * Looks like the first option is faster. */
- ptr::copy(read_ptr, write_ptr, 1);
+ /* read_ptr cannot be equal to write_ptr because at this point
+ * we guaranteed to skip at least one element (before loop starts).
+ */
+ ptr::copy_nonoverlapping(read_ptr, write_ptr, 1);
/* We have filled that place, so go further */
gap.write += 1;
@@ -2599,6 +2634,7 @@ pub fn from_elem_in<T: Clone, A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<
<T as SpecFromElem>::from_elem(elem, n, alloc)
}
+#[cfg(not(no_global_oom_handling))]
trait ExtendFromWithinSpec {
/// # Safety
///
@@ -2607,6 +2643,7 @@ trait ExtendFromWithinSpec {
unsafe fn spec_extend_from_within(&mut self, src: Range<usize>);
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
default unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
// SAFETY:
@@ -2626,6 +2663,7 @@ impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
}
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
let count = src.len();
@@ -2706,7 +2744,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
/// ```
/// use std::hash::BuildHasher;
///
-/// let b = std::collections::hash_map::RandomState::new();
+/// let b = std::hash::RandomState::new();
/// let v: Vec<u8> = vec![0xa8, 0x3c, 0x09];
/// let s: &[u8] = &[0xa8, 0x3c, 0x09];
/// assert_eq!(b.hash_one(v), b.hash_one(s));
diff --git a/library/alloc/src/vec/spec_from_elem.rs b/library/alloc/src/vec/spec_from_elem.rs
index da43d17bf..01a6db144 100644
--- a/library/alloc/src/vec/spec_from_elem.rs
+++ b/library/alloc/src/vec/spec_from_elem.rs
@@ -36,12 +36,12 @@ impl SpecFromElem for i8 {
if elem == 0 {
return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n };
}
+ let mut v = Vec::with_capacity_in(n, alloc);
unsafe {
- let mut v = Vec::with_capacity_in(n, alloc);
ptr::write_bytes(v.as_mut_ptr(), elem as u8, n);
v.set_len(n);
- v
}
+ v
}
}
@@ -51,11 +51,26 @@ impl SpecFromElem for u8 {
if elem == 0 {
return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n };
}
+ let mut v = Vec::with_capacity_in(n, alloc);
unsafe {
- let mut v = Vec::with_capacity_in(n, alloc);
ptr::write_bytes(v.as_mut_ptr(), elem, n);
v.set_len(n);
- v
}
+ v
+ }
+}
+
+// A better way would be to implement this for all ZSTs which are `Copy` and have trivial `Clone`
+// but the latter cannot be detected currently
+impl SpecFromElem for () {
+ #[inline]
+ fn from_elem<A: Allocator>(_elem: (), n: usize, alloc: A) -> Vec<(), A> {
+ let mut v = Vec::with_capacity_in(n, alloc);
+ // SAFETY: the capacity has just been set to `n`
+ // and `()` is a ZST with trivial `Clone` implementation
+ unsafe {
+ v.set_len(n);
+ }
+ v
}
}
diff --git a/library/alloc/src/vec/spec_from_iter.rs b/library/alloc/src/vec/spec_from_iter.rs
index efa686847..2ddfdde59 100644
--- a/library/alloc/src/vec/spec_from_iter.rs
+++ b/library/alloc/src/vec/spec_from_iter.rs
@@ -13,13 +13,13 @@ use super::{IntoIter, SpecExtend, SpecFromIterNested, Vec};
/// +-+-----------+
/// |
/// v
-/// +-+-------------------------------+ +---------------------+
-/// |SpecFromIter +---->+SpecFromIterNested |
-/// |where I: | | |where I: |
-/// | Iterator (default)----------+ | | Iterator (default) |
-/// | vec::IntoIter | | | TrustedLen |
-/// | SourceIterMarker---fallback-+ | +---------------------+
-/// +---------------------------------+
+/// +-+---------------------------------+ +---------------------+
+/// |SpecFromIter +---->+SpecFromIterNested |
+/// |where I: | | |where I: |
+/// | Iterator (default)------------+ | | Iterator (default) |
+/// | vec::IntoIter | | | TrustedLen |
+/// | InPlaceCollect--(fallback to)-+ | +---------------------+
+/// +-----------------------------------+
/// ```
pub(super) trait SpecFromIter<T, I> {
fn from_iter(iter: I) -> Self;