summaryrefslogtreecommitdiffstats
path: root/rust/alloc
diff options
context:
space:
mode:
Diffstat (limited to 'rust/alloc')
-rw-r--r--rust/alloc/alloc.rs12
-rw-r--r--rust/alloc/boxed.rs34
-rw-r--r--rust/alloc/collections/mod.rs1
-rw-r--r--rust/alloc/lib.rs9
-rw-r--r--rust/alloc/raw_vec.rs77
-rw-r--r--rust/alloc/vec/into_iter.rs16
-rw-r--r--rust/alloc/vec/mod.rs81
7 files changed, 169 insertions, 61 deletions
diff --git a/rust/alloc/alloc.rs b/rust/alloc/alloc.rs
index 150e13750f..abb791cc23 100644
--- a/rust/alloc/alloc.rs
+++ b/rust/alloc/alloc.rs
@@ -379,13 +379,20 @@ pub const fn handle_alloc_error(layout: Layout) -> ! {
panic!("allocation failed");
}
+ #[inline]
fn rt_error(layout: Layout) -> ! {
unsafe {
__rust_alloc_error_handler(layout.size(), layout.align());
}
}
- unsafe { core::intrinsics::const_eval_select((layout,), ct_error, rt_error) }
+ #[cfg(not(feature = "panic_immediate_abort"))]
+ unsafe {
+ core::intrinsics::const_eval_select((layout,), ct_error, rt_error)
+ }
+
+ #[cfg(feature = "panic_immediate_abort")]
+ ct_error(layout)
}
// For alloc test `std::alloc::handle_alloc_error` can be used directly.
@@ -418,12 +425,14 @@ pub mod __alloc_error_handler {
}
}
+#[cfg(not(no_global_oom_handling))]
/// Specialize clones into pre-allocated, uninitialized memory.
/// Used by `Box::clone` and `Rc`/`Arc::make_mut`.
pub(crate) trait WriteCloneIntoRaw: Sized {
unsafe fn write_clone_into_raw(&self, target: *mut Self);
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Clone> WriteCloneIntoRaw for T {
#[inline]
default unsafe fn write_clone_into_raw(&self, target: *mut Self) {
@@ -433,6 +442,7 @@ impl<T: Clone> WriteCloneIntoRaw for T {
}
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Copy> WriteCloneIntoRaw for T {
#[inline]
unsafe fn write_clone_into_raw(&self, target: *mut Self) {
diff --git a/rust/alloc/boxed.rs b/rust/alloc/boxed.rs
index 9620eba172..c93a22a5c9 100644
--- a/rust/alloc/boxed.rs
+++ b/rust/alloc/boxed.rs
@@ -161,7 +161,7 @@ use core::marker::Tuple;
use core::marker::Unsize;
use core::mem::{self, SizedTypeProperties};
use core::ops::{
- CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
+ CoerceUnsized, Coroutine, CoroutineState, Deref, DerefMut, DispatchFromDyn, Receiver,
};
use core::pin::Pin;
use core::ptr::{self, NonNull, Unique};
@@ -211,7 +211,7 @@ impl<T> Box<T> {
/// ```
/// let five = Box::new(5);
/// ```
- #[cfg(all(not(no_global_oom_handling)))]
+ #[cfg(not(no_global_oom_handling))]
#[inline(always)]
#[stable(feature = "rust1", since = "1.0.0")]
#[must_use]
@@ -1042,10 +1042,18 @@ impl<T: ?Sized, A: Allocator> Box<T, A> {
/// use std::ptr;
///
/// let x = Box::new(String::from("Hello"));
- /// let p = Box::into_raw(x);
+ /// let ptr = Box::into_raw(x);
/// unsafe {
- /// ptr::drop_in_place(p);
- /// dealloc(p as *mut u8, Layout::new::<String>());
+ /// ptr::drop_in_place(ptr);
+ /// dealloc(ptr as *mut u8, Layout::new::<String>());
+ /// }
+ /// ```
+ /// Note: This is equivalent to the following:
+ /// ```
+ /// let x = Box::new(String::from("Hello"));
+ /// let ptr = Box::into_raw(x);
+ /// unsafe {
+ /// drop(Box::from_raw(ptr));
/// }
/// ```
///
@@ -2110,28 +2118,28 @@ impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> {
#[stable(feature = "pin", since = "1.33.0")]
impl<T: ?Sized, A: Allocator> Unpin for Box<T, A> where A: 'static {}
-#[unstable(feature = "generator_trait", issue = "43122")]
-impl<G: ?Sized + Generator<R> + Unpin, R, A: Allocator> Generator<R> for Box<G, A>
+#[unstable(feature = "coroutine_trait", issue = "43122")]
+impl<G: ?Sized + Coroutine<R> + Unpin, R, A: Allocator> Coroutine<R> for Box<G, A>
where
A: 'static,
{
type Yield = G::Yield;
type Return = G::Return;
- fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
G::resume(Pin::new(&mut *self), arg)
}
}
-#[unstable(feature = "generator_trait", issue = "43122")]
-impl<G: ?Sized + Generator<R>, R, A: Allocator> Generator<R> for Pin<Box<G, A>>
+#[unstable(feature = "coroutine_trait", issue = "43122")]
+impl<G: ?Sized + Coroutine<R>, R, A: Allocator> Coroutine<R> for Pin<Box<G, A>>
where
A: 'static,
{
type Yield = G::Yield;
type Return = G::Return;
- fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> CoroutineState<Self::Yield, Self::Return> {
G::resume((*self).as_mut(), arg)
}
}
@@ -2448,4 +2456,8 @@ impl<T: core::error::Error> core::error::Error for Box<T> {
fn source(&self) -> Option<&(dyn core::error::Error + 'static)> {
core::error::Error::source(&**self)
}
+
+ fn provide<'b>(&'b self, request: &mut core::error::Request<'b>) {
+ core::error::Error::provide(&**self, request);
+ }
}
diff --git a/rust/alloc/collections/mod.rs b/rust/alloc/collections/mod.rs
index 2506065d15..00ffb3b973 100644
--- a/rust/alloc/collections/mod.rs
+++ b/rust/alloc/collections/mod.rs
@@ -150,6 +150,7 @@ impl Display for TryReserveError {
/// An intermediate trait for specialization of `Extend`.
#[doc(hidden)]
+#[cfg(not(no_global_oom_handling))]
trait SpecExtend<I: IntoIterator> {
/// Extends `self` with the contents of the given iterator.
fn spec_extend(&mut self, iter: I);
diff --git a/rust/alloc/lib.rs b/rust/alloc/lib.rs
index 9c7ea73da1..36f79c0755 100644
--- a/rust/alloc/lib.rs
+++ b/rust/alloc/lib.rs
@@ -80,6 +80,8 @@
not(no_sync),
target_has_atomic = "ptr"
))]
+#![doc(rust_logo)]
+#![feature(rustdoc_internals)]
#![no_std]
#![needs_allocator]
// Lints:
@@ -115,7 +117,6 @@
#![feature(const_eval_select)]
#![feature(const_maybe_uninit_as_mut_ptr)]
#![feature(const_maybe_uninit_write)]
-#![feature(const_maybe_uninit_zeroed)]
#![feature(const_pin)]
#![feature(const_refs_to_cell)]
#![feature(const_size_of_val)]
@@ -141,7 +142,6 @@
#![feature(maybe_uninit_uninit_array)]
#![feature(maybe_uninit_uninit_array_transpose)]
#![feature(pattern)]
-#![feature(pointer_byte_offsets)]
#![feature(ptr_internals)]
#![feature(ptr_metadata)]
#![feature(ptr_sub_ptr)]
@@ -156,6 +156,7 @@
#![feature(std_internals)]
#![feature(str_internals)]
#![feature(strict_provenance)]
+#![feature(trusted_fused)]
#![feature(trusted_len)]
#![feature(trusted_random_access)]
#![feature(try_trait_v2)]
@@ -168,7 +169,7 @@
//
// Language features:
// tidy-alphabetical-start
-#![cfg_attr(not(test), feature(generator_trait))]
+#![cfg_attr(not(test), feature(coroutine_trait))]
#![cfg_attr(test, feature(panic_update_hook))]
#![cfg_attr(test, feature(test))]
#![feature(allocator_internals)]
@@ -276,7 +277,7 @@ pub(crate) mod test_helpers {
/// seed not being the same for every RNG invocation too.
pub(crate) fn test_rng() -> rand_xorshift::XorShiftRng {
use std::hash::{BuildHasher, Hash, Hasher};
- let mut hasher = std::collections::hash_map::RandomState::new().build_hasher();
+ let mut hasher = std::hash::RandomState::new().build_hasher();
std::panic::Location::caller().hash(&mut hasher);
let hc64 = hasher.finish();
let seed_vec =
diff --git a/rust/alloc/raw_vec.rs b/rust/alloc/raw_vec.rs
index a7425582a3..98b6abf30a 100644
--- a/rust/alloc/raw_vec.rs
+++ b/rust/alloc/raw_vec.rs
@@ -27,6 +27,16 @@ enum AllocInit {
Zeroed,
}
+#[repr(transparent)]
+#[cfg_attr(target_pointer_width = "16", rustc_layout_scalar_valid_range_end(0x7fff))]
+#[cfg_attr(target_pointer_width = "32", rustc_layout_scalar_valid_range_end(0x7fff_ffff))]
+#[cfg_attr(target_pointer_width = "64", rustc_layout_scalar_valid_range_end(0x7fff_ffff_ffff_ffff))]
+struct Cap(usize);
+
+impl Cap {
+ const ZERO: Cap = unsafe { Cap(0) };
+}
+
/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
/// a buffer of memory on the heap without having to worry about all the corner cases
/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
@@ -52,7 +62,12 @@ enum AllocInit {
#[allow(missing_debug_implementations)]
pub(crate) struct RawVec<T, A: Allocator = Global> {
ptr: Unique<T>,
- cap: usize,
+ /// Never used for ZSTs; it's `capacity()`'s responsibility to return usize::MAX in that case.
+ ///
+ /// # Safety
+ ///
+ /// `cap` must be in the `0..=isize::MAX` range.
+ cap: Cap,
alloc: A,
}
@@ -121,7 +136,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// the returned `RawVec`.
pub const fn new_in(alloc: A) -> Self {
// `cap: 0` means "unallocated". zero-sized types are ignored.
- Self { ptr: Unique::dangling(), cap: 0, alloc }
+ Self { ptr: Unique::dangling(), cap: Cap::ZERO, alloc }
}
/// Like `with_capacity`, but parameterized over the choice of
@@ -203,7 +218,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// here should change to `ptr.len() / mem::size_of::<T>()`.
Self {
ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
- cap: capacity,
+ cap: unsafe { Cap(capacity) },
alloc,
}
}
@@ -228,7 +243,7 @@ impl<T, A: Allocator> RawVec<T, A> {
// here should change to `ptr.len() / mem::size_of::<T>()`.
Ok(Self {
ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
- cap: capacity,
+ cap: unsafe { Cap(capacity) },
alloc,
})
}
@@ -240,12 +255,13 @@ impl<T, A: Allocator> RawVec<T, A> {
/// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
/// `capacity`.
/// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
- /// systems). ZST vectors may have a capacity up to `usize::MAX`.
+ /// systems). For ZSTs capacity is ignored.
/// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
/// guaranteed.
#[inline]
pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
- Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc }
+ let cap = if T::IS_ZST { Cap::ZERO } else { unsafe { Cap(capacity) } };
+ Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap, alloc }
}
/// Gets a raw pointer to the start of the allocation. Note that this is
@@ -261,7 +277,7 @@ impl<T, A: Allocator> RawVec<T, A> {
/// This will always be `usize::MAX` if `T` is zero-sized.
#[inline(always)]
pub fn capacity(&self) -> usize {
- if T::IS_ZST { usize::MAX } else { self.cap }
+ if T::IS_ZST { usize::MAX } else { self.cap.0 }
}
/// Returns a shared reference to the allocator backing this `RawVec`.
@@ -270,7 +286,7 @@ impl<T, A: Allocator> RawVec<T, A> {
}
fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
- if T::IS_ZST || self.cap == 0 {
+ if T::IS_ZST || self.cap.0 == 0 {
None
} else {
// We could use Layout::array here which ensures the absence of isize and usize overflows
@@ -280,7 +296,7 @@ impl<T, A: Allocator> RawVec<T, A> {
let _: () = const { assert!(mem::size_of::<T>() % mem::align_of::<T>() == 0) };
unsafe {
let align = mem::align_of::<T>();
- let size = mem::size_of::<T>().unchecked_mul(self.cap);
+ let size = mem::size_of::<T>().unchecked_mul(self.cap.0);
let layout = Layout::from_size_align_unchecked(size, align);
Some((self.ptr.cast().into(), layout))
}
@@ -338,10 +354,13 @@ impl<T, A: Allocator> RawVec<T, A> {
/// The same as `reserve`, but returns on errors instead of panicking or aborting.
pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
if self.needs_to_grow(len, additional) {
- self.grow_amortized(len, additional)
- } else {
- Ok(())
+ self.grow_amortized(len, additional)?;
}
+ unsafe {
+ // Inform the optimizer that the reservation has succeeded or wasn't needed
+ core::intrinsics::assume(!self.needs_to_grow(len, additional));
+ }
+ Ok(())
}
/// The same as `reserve_for_push`, but returns on errors instead of panicking or aborting.
@@ -378,7 +397,14 @@ impl<T, A: Allocator> RawVec<T, A> {
len: usize,
additional: usize,
) -> Result<(), TryReserveError> {
- if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) }
+ if self.needs_to_grow(len, additional) {
+ self.grow_exact(len, additional)?;
+ }
+ unsafe {
+ // Inform the optimizer that the reservation has succeeded or wasn't needed
+ core::intrinsics::assume(!self.needs_to_grow(len, additional));
+ }
+ Ok(())
}
/// Shrinks the buffer down to the specified capacity. If the given amount
@@ -404,12 +430,15 @@ impl<T, A: Allocator> RawVec<T, A> {
additional > self.capacity().wrapping_sub(len)
}
- fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
+ /// # Safety:
+ ///
+ /// `cap` must not exceed `isize::MAX`.
+ unsafe fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
// Allocators currently return a `NonNull<[u8]>` whose length matches
// the size requested. If that ever changes, the capacity here should
// change to `ptr.len() / mem::size_of::<T>()`.
self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) };
- self.cap = cap;
+ self.cap = unsafe { Cap(cap) };
}
// This method is usually instantiated many times. So we want it to be as
@@ -434,14 +463,15 @@ impl<T, A: Allocator> RawVec<T, A> {
// This guarantees exponential growth. The doubling cannot overflow
// because `cap <= isize::MAX` and the type of `cap` is `usize`.
- let cap = cmp::max(self.cap * 2, required_cap);
+ let cap = cmp::max(self.cap.0 * 2, required_cap);
let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
let new_layout = Layout::array::<T>(cap);
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
- self.set_ptr_and_cap(ptr, cap);
+ // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
+ unsafe { self.set_ptr_and_cap(ptr, cap) };
Ok(())
}
@@ -460,7 +490,10 @@ impl<T, A: Allocator> RawVec<T, A> {
// `finish_grow` is non-generic over `T`.
let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
- self.set_ptr_and_cap(ptr, cap);
+ // SAFETY: finish_grow would have resulted in a capacity overflow if we tried to allocate more than isize::MAX items
+ unsafe {
+ self.set_ptr_and_cap(ptr, cap);
+ }
Ok(())
}
@@ -478,7 +511,7 @@ impl<T, A: Allocator> RawVec<T, A> {
if cap == 0 {
unsafe { self.alloc.deallocate(ptr, layout) };
self.ptr = Unique::dangling();
- self.cap = 0;
+ self.cap = Cap::ZERO;
} else {
let ptr = unsafe {
// `Layout::array` cannot overflow here because it would have
@@ -489,7 +522,10 @@ impl<T, A: Allocator> RawVec<T, A> {
.shrink(ptr, layout, new_layout)
.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
};
- self.set_ptr_and_cap(ptr, cap);
+ // SAFETY: if the allocation is valid, then the capacity is too
+ unsafe {
+ self.set_ptr_and_cap(ptr, cap);
+ }
}
Ok(())
}
@@ -569,6 +605,7 @@ fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
// ensure that the code generation related to these panics is minimal as there's
// only one location which panics rather than a bunch throughout the module.
#[cfg(not(no_global_oom_handling))]
+#[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
fn capacity_overflow() -> ! {
panic!("capacity overflow");
}
diff --git a/rust/alloc/vec/into_iter.rs b/rust/alloc/vec/into_iter.rs
index aac0ec16ae..136bfe94af 100644
--- a/rust/alloc/vec/into_iter.rs
+++ b/rust/alloc/vec/into_iter.rs
@@ -9,7 +9,8 @@ use crate::raw_vec::RawVec;
use core::array;
use core::fmt;
use core::iter::{
- FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
+ FusedIterator, InPlaceIterable, SourceIter, TrustedFused, TrustedLen,
+ TrustedRandomAccessNoCoerce,
};
use core::marker::PhantomData;
use core::mem::{self, ManuallyDrop, MaybeUninit, SizedTypeProperties};
@@ -287,9 +288,7 @@ impl<T, A: Allocator> Iterator for IntoIter<T, A> {
// Also note the implementation of `Self: TrustedRandomAccess` requires
// that `T: Copy` so reading elements from the buffer doesn't invalidate
// them for `Drop`.
- unsafe {
- if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
- }
+ unsafe { if T::IS_ZST { mem::zeroed() } else { ptr::read(self.ptr.add(i)) } }
}
}
@@ -341,6 +340,10 @@ impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
#[stable(feature = "fused", since = "1.26.0")]
impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "trusted_fused")]
+unsafe impl<T, A: Allocator> TrustedFused for IntoIter<T, A> {}
+
#[unstable(feature = "trusted_len", issue = "37572")]
unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
@@ -425,7 +428,10 @@ unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> {
// also refer to the vec::in_place_collect module documentation to get an overview
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
-unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {}
+unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {
+ const EXPAND_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
+ const MERGE_BY: Option<NonZeroUsize> = NonZeroUsize::new(1);
+}
#[unstable(issue = "none", feature = "inplace_iteration")]
#[doc(hidden)]
diff --git a/rust/alloc/vec/mod.rs b/rust/alloc/vec/mod.rs
index 41ca71805e..220fb9d6f4 100644
--- a/rust/alloc/vec/mod.rs
+++ b/rust/alloc/vec/mod.rs
@@ -105,6 +105,7 @@ mod into_iter;
#[cfg(not(no_global_oom_handling))]
use self::is_zero::IsZero;
+#[cfg(not(no_global_oom_handling))]
mod is_zero;
#[cfg(not(no_global_oom_handling))]
@@ -123,7 +124,7 @@ use self::set_len_on_drop::SetLenOnDrop;
mod set_len_on_drop;
#[cfg(not(no_global_oom_handling))]
-use self::in_place_drop::{InPlaceDrop, InPlaceDstBufDrop};
+use self::in_place_drop::{InPlaceDrop, InPlaceDstDataSrcBufDrop};
#[cfg(not(no_global_oom_handling))]
mod in_place_drop;
@@ -1376,7 +1377,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// [`as_mut_ptr`]: Vec::as_mut_ptr
/// [`as_ptr`]: Vec::as_ptr
#[stable(feature = "vec_as_ptr", since = "1.37.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
#[inline]
pub fn as_ptr(&self) -> *const T {
// We shadow the slice method of the same name to avoid going through
@@ -1436,7 +1437,7 @@ impl<T, A: Allocator> Vec<T, A> {
/// [`as_mut_ptr`]: Vec::as_mut_ptr
/// [`as_ptr`]: Vec::as_ptr
#[stable(feature = "vec_as_ptr", since = "1.37.0")]
- #[cfg_attr(not(bootstrap), rustc_never_returns_null_ptr)]
+ #[rustc_never_returns_null_ptr]
#[inline]
pub fn as_mut_ptr(&mut self) -> *mut T {
// We shadow the slice method of the same name to avoid going through
@@ -1565,7 +1566,8 @@ impl<T, A: Allocator> Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn swap_remove(&mut self, index: usize) -> T {
#[cold]
- #[inline(never)]
+ #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+ #[track_caller]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("swap_remove index (is {index}) should be < len (is {len})");
}
@@ -1606,7 +1608,8 @@ impl<T, A: Allocator> Vec<T, A> {
#[stable(feature = "rust1", since = "1.0.0")]
pub fn insert(&mut self, index: usize, element: T) {
#[cold]
- #[inline(never)]
+ #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+ #[track_caller]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("insertion index (is {index}) should be <= len (is {len})");
}
@@ -1667,7 +1670,7 @@ impl<T, A: Allocator> Vec<T, A> {
#[track_caller]
pub fn remove(&mut self, index: usize) -> T {
#[cold]
- #[inline(never)]
+ #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
#[track_caller]
fn assert_failed(index: usize, len: usize) -> ! {
panic!("removal index (is {index}) should be < len (is {len})");
@@ -1891,7 +1894,32 @@ impl<T, A: Allocator> Vec<T, A> {
return;
}
- /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
+ // Check if we ever want to remove anything.
+ // This allows to use copy_non_overlapping in next cycle.
+ // And avoids any memory writes if we don't need to remove anything.
+ let mut first_duplicate_idx: usize = 1;
+ let start = self.as_mut_ptr();
+ while first_duplicate_idx != len {
+ let found_duplicate = unsafe {
+ // SAFETY: first_duplicate always in range [1..len)
+ // Note that we start iteration from 1 so we never overflow.
+ let prev = start.add(first_duplicate_idx.wrapping_sub(1));
+ let current = start.add(first_duplicate_idx);
+ // We explicitly say in docs that references are reversed.
+ same_bucket(&mut *current, &mut *prev)
+ };
+ if found_duplicate {
+ break;
+ }
+ first_duplicate_idx += 1;
+ }
+ // Don't need to remove anything.
+ // We cannot get bigger than len.
+ if first_duplicate_idx == len {
+ return;
+ }
+
+ /* INVARIANT: vec.len() > read > write > write-1 >= 0 */
struct FillGapOnDrop<'a, T, A: core::alloc::Allocator> {
/* Offset of the element we want to check if it is duplicate */
read: usize,
@@ -1937,31 +1965,39 @@ impl<T, A: Allocator> Vec<T, A> {
}
}
- let mut gap = FillGapOnDrop { read: 1, write: 1, vec: self };
- let ptr = gap.vec.as_mut_ptr();
-
/* Drop items while going through Vec, it should be more efficient than
* doing slice partition_dedup + truncate */
+ // Construct gap first and then drop item to avoid memory corruption if `T::drop` panics.
+ let mut gap =
+ FillGapOnDrop { read: first_duplicate_idx + 1, write: first_duplicate_idx, vec: self };
+ unsafe {
+ // SAFETY: we checked that first_duplicate_idx in bounds before.
+ // If drop panics, `gap` would remove this item without drop.
+ ptr::drop_in_place(start.add(first_duplicate_idx));
+ }
+
/* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
* are always in-bounds and read_ptr never aliases prev_ptr */
unsafe {
while gap.read < len {
- let read_ptr = ptr.add(gap.read);
- let prev_ptr = ptr.add(gap.write.wrapping_sub(1));
+ let read_ptr = start.add(gap.read);
+ let prev_ptr = start.add(gap.write.wrapping_sub(1));
- if same_bucket(&mut *read_ptr, &mut *prev_ptr) {
+ // We explicitly say in docs that references are reversed.
+ let found_duplicate = same_bucket(&mut *read_ptr, &mut *prev_ptr);
+ if found_duplicate {
// Increase `gap.read` now since the drop may panic.
gap.read += 1;
/* We have found duplicate, drop it in-place */
ptr::drop_in_place(read_ptr);
} else {
- let write_ptr = ptr.add(gap.write);
+ let write_ptr = start.add(gap.write);
- /* Because `read_ptr` can be equal to `write_ptr`, we either
- * have to use `copy` or conditional `copy_nonoverlapping`.
- * Looks like the first option is faster. */
- ptr::copy(read_ptr, write_ptr, 1);
+ /* read_ptr cannot be equal to write_ptr because at this point
+ * we guaranteed to skip at least one element (before loop starts).
+ */
+ ptr::copy_nonoverlapping(read_ptr, write_ptr, 1);
/* We have filled that place, so go further */
gap.write += 1;
@@ -2097,6 +2133,7 @@ impl<T, A: Allocator> Vec<T, A> {
} else {
unsafe {
self.len -= 1;
+ core::intrinsics::assume(self.len < self.capacity());
Some(ptr::read(self.as_ptr().add(self.len())))
}
}
@@ -2299,7 +2336,8 @@ impl<T, A: Allocator> Vec<T, A> {
A: Clone,
{
#[cold]
- #[inline(never)]
+ #[cfg_attr(not(feature = "panic_immediate_abort"), inline(never))]
+ #[track_caller]
fn assert_failed(at: usize, len: usize) -> ! {
panic!("`at` split index (is {at}) should be <= len (is {len})");
}
@@ -2840,6 +2878,7 @@ pub fn from_elem_in<T: Clone, A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<
<T as SpecFromElem>::from_elem(elem, n, alloc)
}
+#[cfg(not(no_global_oom_handling))]
trait ExtendFromWithinSpec {
/// # Safety
///
@@ -2848,6 +2887,7 @@ trait ExtendFromWithinSpec {
unsafe fn spec_extend_from_within(&mut self, src: Range<usize>);
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
default unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
// SAFETY:
@@ -2867,6 +2907,7 @@ impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
}
}
+#[cfg(not(no_global_oom_handling))]
impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
let count = src.len();
@@ -2947,7 +2988,7 @@ impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
/// ```
/// use std::hash::BuildHasher;
///
-/// let b = std::collections::hash_map::RandomState::new();
+/// let b = std::hash::RandomState::new();
/// let v: Vec<u8> = vec![0xa8, 0x3c, 0x09];
/// let s: &[u8] = &[0xa8, 0x3c, 0x09];
/// assert_eq!(b.hash_one(v), b.hash_one(s));