summaryrefslogtreecommitdiffstats
path: root/third_party/rust/bumpalo/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:35:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-15 03:35:49 +0000
commitd8bbc7858622b6d9c278469aab701ca0b609cddf (patch)
treeeff41dc61d9f714852212739e6b3738b82a2af87 /third_party/rust/bumpalo/src
parentReleasing progress-linux version 125.0.3-1~progress7.99u1. (diff)
downloadfirefox-d8bbc7858622b6d9c278469aab701ca0b609cddf.tar.xz
firefox-d8bbc7858622b6d9c278469aab701ca0b609cddf.zip
Merging upstream version 126.0.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--third_party/rust/bumpalo/src/alloc.rs2
-rw-r--r--third_party/rust/bumpalo/src/collections/raw_vec.rs86
-rw-r--r--third_party/rust/bumpalo/src/collections/string.rs17
-rw-r--r--third_party/rust/bumpalo/src/collections/vec.rs148
-rwxr-xr-x[-rw-r--r--]third_party/rust/bumpalo/src/lib.rs192
5 files changed, 353 insertions, 92 deletions
diff --git a/third_party/rust/bumpalo/src/alloc.rs b/third_party/rust/bumpalo/src/alloc.rs
index 0bcc21f22c..6947e2a6cf 100644
--- a/third_party/rust/bumpalo/src/alloc.rs
+++ b/third_party/rust/bumpalo/src/alloc.rs
@@ -752,7 +752,7 @@ pub unsafe trait Alloc {
match (Layout::array::<T>(n_old), Layout::array::<T>(n_new)) {
(Ok(ref k_old), Ok(ref k_new)) if k_old.size() > 0 && k_new.size() > 0 => {
debug_assert!(k_old.align() == k_new.align());
- self.realloc(ptr.cast(), k_old.clone(), k_new.size())
+ self.realloc(ptr.cast(), *k_old, k_new.size())
.map(NonNull::cast)
}
_ => Err(AllocErr),
diff --git a/third_party/rust/bumpalo/src/collections/raw_vec.rs b/third_party/rust/bumpalo/src/collections/raw_vec.rs
index ac3bd0758c..456829d447 100644
--- a/third_party/rust/bumpalo/src/collections/raw_vec.rs
+++ b/third_party/rust/bumpalo/src/collections/raw_vec.rs
@@ -319,7 +319,7 @@ impl<'a, T> RawVec<'a, T> {
used_cap: usize,
needed_extra_cap: usize,
) -> Result<(), CollectionAllocErr> {
- self.reserve_internal(used_cap, needed_extra_cap, Fallible, Exact)
+ self.fallible_reserve_internal(used_cap, needed_extra_cap, Exact)
}
/// Ensures that the buffer contains at least enough space to hold
@@ -343,11 +343,7 @@ impl<'a, T> RawVec<'a, T> {
///
/// Aborts on OOM
pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
- match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Exact) {
- Err(CapacityOverflow) => capacity_overflow(),
- Err(AllocErr) => unreachable!(),
- Ok(()) => { /* yay */ }
- }
+ self.infallible_reserve_internal(used_cap, needed_extra_cap, Exact)
}
/// Calculates the buffer's new size given that it'll hold `used_cap +
@@ -374,7 +370,7 @@ impl<'a, T> RawVec<'a, T> {
used_cap: usize,
needed_extra_cap: usize,
) -> Result<(), CollectionAllocErr> {
- self.reserve_internal(used_cap, needed_extra_cap, Fallible, Amortized)
+ self.fallible_reserve_internal(used_cap, needed_extra_cap, Amortized)
}
/// Ensures that the buffer contains at least enough space to hold
@@ -429,13 +425,11 @@ impl<'a, T> RawVec<'a, T> {
/// # vector.push_all(&[1, 3, 5, 7, 9]);
/// # }
/// ```
+ #[inline(always)]
pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
- match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Amortized) {
- Err(CapacityOverflow) => capacity_overflow(),
- Err(AllocErr) => unreachable!(),
- Ok(()) => { /* yay */ }
- }
+ self.infallible_reserve_internal(used_cap, needed_extra_cap, Amortized)
}
+
/// Attempts to ensure that the buffer contains at least enough space to hold
/// `used_cap + needed_extra_cap` elements. If it doesn't already have
/// enough capacity, will reallocate in place enough space plus comfortable slack
@@ -593,6 +587,68 @@ enum ReserveStrategy {
use self::ReserveStrategy::*;
impl<'a, T> RawVec<'a, T> {
+ #[inline(always)]
+ fn fallible_reserve_internal(
+ &mut self,
+ used_cap: usize,
+ needed_extra_cap: usize,
+ strategy: ReserveStrategy,
+ ) -> Result<(), CollectionAllocErr> {
+ // This portion of the method should always be inlined.
+ if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
+ return Ok(());
+ }
+ // This portion of the method should never be inlined, and will only be called when
+ // the check above has confirmed that it is necessary.
+ self.reserve_internal_or_error(used_cap, needed_extra_cap, Fallible, strategy)
+ }
+
+ #[inline(always)]
+ fn infallible_reserve_internal(
+ &mut self,
+ used_cap: usize,
+ needed_extra_cap: usize,
+ strategy: ReserveStrategy,
+ ) {
+ // This portion of the method should always be inlined.
+ if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
+ return;
+ }
+ // This portion of the method should never be inlined, and will only be called when
+ // the check above has confirmed that it is necessary.
+ self.reserve_internal_or_panic(used_cap, needed_extra_cap, strategy)
+ }
+
+ #[inline(never)]
+ fn reserve_internal_or_panic(
+ &mut self,
+ used_cap: usize,
+ needed_extra_cap: usize,
+ strategy: ReserveStrategy,
+ ) {
+ // Delegates the call to `reserve_internal_or_error` and panics in the event of an error.
+ // This allows the method to have a return type of `()`, simplifying the assembly at the
+ // call site.
+ match self.reserve_internal(used_cap, needed_extra_cap, Infallible, strategy) {
+ Err(CapacityOverflow) => capacity_overflow(),
+ Err(AllocErr) => unreachable!(),
+ Ok(()) => { /* yay */ }
+ }
+ }
+
+ #[inline(never)]
+ fn reserve_internal_or_error(
+ &mut self,
+ used_cap: usize,
+ needed_extra_cap: usize,
+ fallibility: Fallibility,
+ strategy: ReserveStrategy,)-> Result<(), CollectionAllocErr> {
+ // Delegates the call to `reserve_internal`, which can be inlined.
+ self.reserve_internal(used_cap, needed_extra_cap, fallibility, strategy)
+ }
+
+ /// Helper method to reserve additional space, reallocating the backing memory.
+ /// The caller is responsible for confirming that there is not already enough space available.
fn reserve_internal(
&mut self,
used_cap: usize,
@@ -608,12 +664,6 @@ impl<'a, T> RawVec<'a, T> {
// If we make it past the first branch then we are guaranteed to
// panic.
- // Don't actually need any more capacity.
- // Wrapping in case they gave a bad `used_cap`.
- if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
- return Ok(());
- }
-
// Nothing we can really do about these checks :(
let new_cap = match strategy {
Exact => used_cap
diff --git a/third_party/rust/bumpalo/src/collections/string.rs b/third_party/rust/bumpalo/src/collections/string.rs
index ffd1db92de..e9fafbf204 100644
--- a/third_party/rust/bumpalo/src/collections/string.rs
+++ b/third_party/rust/bumpalo/src/collections/string.rs
@@ -680,8 +680,19 @@ impl<'bump> String<'bump> {
/// assert_eq!(s, "hello");
/// ```
pub fn from_str_in(s: &str, bump: &'bump Bump) -> String<'bump> {
- let mut t = String::with_capacity_in(s.len(), bump);
- t.push_str(s);
+ let len = s.len();
+ let mut t = String::with_capacity_in(len, bump);
+ // SAFETY:
+ // * `src` is valid for reads of `s.len()` bytes by virtue of being an allocated `&str`.
+ // * `dst` is valid for writes of `s.len()` bytes as `String::with_capacity_in(s.len(), bump)`
+ // above guarantees that.
+ // * Alignment is not relevant as `u8` has no alignment requirements.
+ // * Source and destination ranges cannot overlap as we just reserved the destination
+ // range from the bump.
+ unsafe { ptr::copy_nonoverlapping(s.as_ptr(), t.vec.as_mut_ptr(), len) };
+ // SAFETY: We reserved sufficent capacity for the string above.
+ // The elements at `0..len` were initialized by `copy_nonoverlapping` above.
+ unsafe { t.vec.set_len(len) };
t
}
@@ -925,7 +936,7 @@ impl<'bump> String<'bump> {
/// ```
#[inline]
pub fn push_str(&mut self, string: &str) {
- self.vec.extend_from_slice(string.as_bytes())
+ self.vec.extend_from_slice_copy(string.as_bytes())
}
/// Returns this `String`'s capacity, in bytes.
diff --git a/third_party/rust/bumpalo/src/collections/vec.rs b/third_party/rust/bumpalo/src/collections/vec.rs
index 312aa055b9..0dab700727 100644
--- a/third_party/rust/bumpalo/src/collections/vec.rs
+++ b/third_party/rust/bumpalo/src/collections/vec.rs
@@ -104,6 +104,8 @@ use core::ops::{Index, IndexMut, RangeBounds};
use core::ptr;
use core::ptr::NonNull;
use core::slice;
+#[cfg(feature = "std")]
+use std::io;
unsafe fn arith_offset<T>(p: *const T, offset: isize) -> *const T {
p.offset(offset)
@@ -1775,6 +1777,132 @@ impl<'bump, T: 'bump + Clone> Vec<'bump, T> {
}
}
+impl<'bump, T: 'bump + Copy> Vec<'bump, T> {
+ /// Helper method to copy all of the items in `other` and append them to the end of `self`.
+ ///
+ /// SAFETY:
+ /// * The caller is responsible for:
+ /// * calling [`reserve`](Self::reserve) beforehand to guarantee that there is enough
+ /// capacity to store `other.len()` more items.
+ /// * guaranteeing that `self` and `other` do not overlap.
+ unsafe fn extend_from_slice_copy_unchecked(&mut self, other: &[T]) {
+ let old_len = self.len();
+ debug_assert!(old_len + other.len() <= self.capacity());
+
+ // SAFETY:
+ // * `src` is valid for reads of `other.len()` values by virtue of being a `&[T]`.
+ // * `dst` is valid for writes of `other.len()` bytes because the caller of this
+ // method is required to `reserve` capacity to store at least `other.len()` items
+ // beforehand.
+ // * Because `src` is a `&[T]` and dst is a `&[T]` within the `Vec<T>`,
+ // `copy_nonoverlapping`'s alignment requirements are met.
+ // * Caller is required to guarantee that the source and destination ranges cannot overlap
+ unsafe {
+ let src = other.as_ptr();
+ let dst = self.as_mut_ptr().add(old_len);
+ ptr::copy_nonoverlapping(src, dst, other.len());
+ self.set_len(old_len + other.len());
+ }
+ }
+
+
+ /// Copies all elements in the slice `other` and appends them to the `Vec`.
+ ///
+ /// Note that this function is same as [`extend_from_slice`] except that it is optimized for
+ /// slices of types that implement the `Copy` trait. If and when Rust gets specialization
+ /// this function will likely be deprecated (but still available).
+ ///
+ /// To copy and append the data from multiple source slices at once, see
+ /// [`extend_from_slices_copy`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bumpalo::{Bump, collections::Vec};
+ ///
+ /// let b = Bump::new();
+ ///
+ /// let mut vec = bumpalo::vec![in &b; 1];
+ /// vec.extend_from_slice_copy(&[2, 3, 4]);
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// ```
+ ///
+ /// ```
+ /// use bumpalo::{Bump, collections::Vec};
+ ///
+ /// let b = Bump::new();
+ ///
+ /// let mut vec = bumpalo::vec![in &b; 'H' as u8];
+ /// vec.extend_from_slice_copy("ello, world!".as_bytes());
+ /// assert_eq!(vec, "Hello, world!".as_bytes());
+ /// ```
+ ///
+ /// [`extend_from_slice`]: #method.extend_from_slice
+ /// [`extend_from_slices`]: #method.extend_from_slices
+ pub fn extend_from_slice_copy(&mut self, other: &[T]) {
+ // Reserve space in the Vec for the values to be added
+ self.reserve(other.len());
+
+ // Copy values into the space that was just reserved
+ // SAFETY:
+ // * `self` has enough capacity to store `other.len()` more items as `self.reserve(other.len())`
+ // above guarantees that.
+ // * Source and destination data ranges cannot overlap as we just reserved the destination
+ // range from the bump.
+ unsafe {
+ self.extend_from_slice_copy_unchecked(other);
+ }
+ }
+
+ /// For each slice in `slices`, copies all elements in the slice and appends them to the `Vec`.
+ ///
+ /// This method is equivalent to calling [`extend_from_slice_copy`] in a loop, but is able
+ /// to precompute the total amount of space to reserve in advance. This reduces the potential
+ /// maximum number of reallocations needed from one-per-slice to just one.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bumpalo::{Bump, collections::Vec};
+ ///
+ /// let b = Bump::new();
+ ///
+ /// let mut vec = bumpalo::vec![in &b; 1];
+ /// vec.extend_from_slices_copy(&[&[2, 3], &[], &[4]]);
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// ```
+ ///
+ /// ```
+ /// use bumpalo::{Bump, collections::Vec};
+ ///
+ /// let b = Bump::new();
+ ///
+ /// let mut vec = bumpalo::vec![in &b; 'H' as u8];
+ /// vec.extend_from_slices_copy(&["ello,".as_bytes(), &[], " world!".as_bytes()]);
+ /// assert_eq!(vec, "Hello, world!".as_bytes());
+ /// ```
+ ///
+ /// [`extend_from_slice_copy`]: #method.extend_from_slice_copy
+ pub fn extend_from_slices_copy(&mut self, slices: &[&[T]]) {
+ // Reserve the total amount of capacity we'll need to safely append the aggregated contents
+ // of each slice in `slices`.
+ let capacity_to_reserve: usize = slices.iter().map(|slice| slice.len()).sum();
+ self.reserve(capacity_to_reserve);
+
+ // SAFETY:
+ // * `dst` is valid for writes of `capacity_to_reserve` items as
+ // `self.reserve(capacity_to_reserve)` above guarantees that.
+ // * Source and destination ranges cannot overlap as we just reserved the destination
+ // range from the bump.
+ unsafe {
+ // Copy the contents of each slice onto the end of `self`
+ slices.iter().for_each(|slice| {
+ self.extend_from_slice_copy_unchecked(slice);
+ });
+ }
+ }
+}
+
// This code generalises `extend_with_{element,default}`.
trait ExtendWith<T> {
fn next(&mut self) -> T;
@@ -2612,3 +2740,23 @@ where
}
}
}
+
+#[cfg(feature = "std")]
+impl<'bump> io::Write for Vec<'bump, u8> {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.extend_from_slice_copy(buf);
+ Ok(buf.len())
+ }
+
+ #[inline]
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ self.extend_from_slice_copy(buf);
+ Ok(())
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
diff --git a/third_party/rust/bumpalo/src/lib.rs b/third_party/rust/bumpalo/src/lib.rs
index 74dfcd4361..b23cfeabc8 100644..100755
--- a/third_party/rust/bumpalo/src/lib.rs
+++ b/third_party/rust/bumpalo/src/lib.rs
@@ -1,11 +1,8 @@
#![doc = include_str!("../README.md")]
#![deny(missing_debug_implementations)]
#![deny(missing_docs)]
-#![no_std]
-#![cfg_attr(
- feature = "allocator_api",
- feature(allocator_api, nonnull_slice_from_raw_parts)
-)]
+#![cfg_attr(not(feature = "std"), no_std)]
+#![cfg_attr(feature = "allocator_api", feature(allocator_api))]
#[doc(hidden)]
pub extern crate alloc as core_alloc;
@@ -26,9 +23,13 @@ use core::ptr::{self, NonNull};
use core::slice;
use core::str;
use core_alloc::alloc::{alloc, dealloc, Layout};
+
#[cfg(feature = "allocator_api")]
use core_alloc::alloc::{AllocError, Allocator};
+#[cfg(all(feature = "allocator-api2", not(feature = "allocator_api")))]
+use allocator_api2::alloc::{AllocError, Allocator};
+
pub use alloc::AllocErr;
/// An error returned from [`Bump::try_alloc_try_with`].
@@ -354,7 +355,7 @@ static EMPTY_CHUNK: EmptyChunkFooter = EmptyChunkFooter(ChunkFooter {
impl EmptyChunkFooter {
fn get(&'static self) -> NonNull<ChunkFooter> {
- unsafe { NonNull::new_unchecked(&self.0 as *const ChunkFooter as *mut ChunkFooter) }
+ NonNull::from(&self.0)
}
}
@@ -406,6 +407,15 @@ unsafe fn dealloc_chunk_list(mut footer: NonNull<ChunkFooter>) {
unsafe impl Send for Bump {}
#[inline]
+fn is_pointer_aligned_to<T>(pointer: *mut T, align: usize) -> bool {
+ debug_assert!(align.is_power_of_two());
+
+ let pointer = pointer as usize;
+ let pointer_aligned = round_down_to(pointer, align);
+ pointer == pointer_aligned
+}
+
+#[inline]
pub(crate) fn round_up_to(n: usize, divisor: usize) -> Option<usize> {
debug_assert!(divisor > 0);
debug_assert!(divisor.is_power_of_two());
@@ -419,6 +429,14 @@ pub(crate) fn round_down_to(n: usize, divisor: usize) -> usize {
n & !(divisor - 1)
}
+/// Same as `round_down_to` but preserves pointer provenance.
+#[inline]
+pub(crate) fn round_mut_ptr_down_to(ptr: *mut u8, divisor: usize) -> *mut u8 {
+ debug_assert!(divisor > 0);
+ debug_assert!(divisor.is_power_of_two());
+ ptr.wrapping_sub(ptr as usize & (divisor - 1))
+}
+
// After this point, we try to hit page boundaries instead of powers of 2
const PAGE_STRATEGY_CUTOFF: usize = 0x1000;
@@ -463,12 +481,8 @@ struct NewChunkMemoryDetails {
/// Wrapper around `Layout::from_size_align` that adds debug assertions.
#[inline]
-unsafe fn layout_from_size_align(size: usize, align: usize) -> Layout {
- if cfg!(debug_assertions) {
- Layout::from_size_align(size, align).unwrap()
- } else {
- Layout::from_size_align_unchecked(size, align)
- }
+fn layout_from_size_align(size: usize, align: usize) -> Result<Layout, AllocErr> {
+ Layout::from_size_align(size, align).map_err(|_| AllocErr)
}
#[inline(never)]
@@ -476,12 +490,6 @@ fn allocation_size_overflow<T>() -> T {
panic!("requested allocation size overflowed")
}
-// This can be migrated to directly use `usize::abs_diff` when the MSRV
-// reaches `1.60`
-fn abs_diff(a: usize, b: usize) -> usize {
- usize::max(a, b) - usize::min(a, b)
-}
-
impl Bump {
/// Construct a new arena to bump allocate into.
///
@@ -535,7 +543,7 @@ impl Bump {
});
}
- let layout = unsafe { layout_from_size_align(capacity, 1) };
+ let layout = layout_from_size_align(capacity, 1)?;
let chunk_footer = unsafe {
Self::new_chunk(
@@ -589,7 +597,7 @@ impl Bump {
/// assert!(bump.try_alloc(5).is_err());
/// ```
pub fn set_allocation_limit(&self, limit: Option<usize>) {
- self.allocation_limit.set(limit)
+ self.allocation_limit.set(limit);
}
/// How much headroom an arena has before it hits its allocation
@@ -600,7 +608,7 @@ impl Bump {
if allocated_bytes > allocation_limit {
None
} else {
- Some(abs_diff(allocation_limit, allocated_bytes))
+ Some(usize::abs_diff(allocation_limit, allocated_bytes))
}
})
}
@@ -682,7 +690,7 @@ impl Bump {
size,
} = new_chunk_memory_details;
- let layout = layout_from_size_align(size, align);
+ let layout = layout_from_size_align(size, align).ok()?;
debug_assert!(size >= requested_layout.size());
@@ -801,7 +809,6 @@ impl Bump {
/// assert_eq!(*x, "hello");
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc<T>(&self, val: T) -> &mut T {
self.alloc_with(|| val)
}
@@ -821,7 +828,6 @@ impl Bump {
/// assert_eq!(x, Ok(&mut "hello"));
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn try_alloc<T>(&self, val: T) -> Result<&mut T, AllocErr> {
self.try_alloc_with(|| val)
}
@@ -846,7 +852,6 @@ impl Bump {
/// assert_eq!(*x, "hello");
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_with<F, T>(&self, f: F) -> &mut T
where
F: FnOnce() -> T,
@@ -866,7 +871,7 @@ impl Bump {
// directly into the heap instead. It seems we get it to realize
// this most consistently if we put this critical line into it's
// own function instead of inlining it into the surrounding code.
- ptr::write(ptr, f())
+ ptr::write(ptr, f());
}
let layout = Layout::new::<T>();
@@ -899,7 +904,6 @@ impl Bump {
/// assert_eq!(x, Ok(&mut "hello"));
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn try_alloc_with<F, T>(&self, f: F) -> Result<&mut T, AllocErr>
where
F: FnOnce() -> T,
@@ -919,7 +923,7 @@ impl Bump {
// directly into the heap instead. It seems we get it to realize
// this most consistently if we put this critical line into it's
// own function instead of inlining it into the surrounding code.
- ptr::write(ptr, f())
+ ptr::write(ptr, f());
}
//SAFETY: Self-contained:
@@ -971,7 +975,6 @@ impl Bump {
/// # Result::<_, ()>::Ok(())
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, E>
where
F: FnOnce() -> Result<T, E>,
@@ -1080,7 +1083,6 @@ impl Bump {
/// # Result::<_, bumpalo::AllocOrInitError<()>>::Ok(())
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn try_alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, AllocOrInitError<E>>
where
F: FnOnce() -> Result<T, E>,
@@ -1165,7 +1167,6 @@ impl Bump {
/// assert_eq!(x, &[1, 2, 3]);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_copy<T>(&self, src: &[T]) -> &mut [T]
where
T: Copy,
@@ -1205,7 +1206,6 @@ impl Bump {
/// assert_eq!(originals, clones);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_clone<T>(&self, src: &[T]) -> &mut [T]
where
T: Clone,
@@ -1236,7 +1236,6 @@ impl Bump {
/// assert_eq!("hello world", hello);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_str(&self, src: &str) -> &mut str {
let buffer = self.alloc_slice_copy(src.as_bytes());
unsafe {
@@ -1263,7 +1262,6 @@ impl Bump {
/// assert_eq!(x, &[5, 10, 15, 20, 25]);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_fill_with<T, F>(&self, len: usize, mut f: F) -> &mut [T]
where
F: FnMut(usize) -> T,
@@ -1299,7 +1297,6 @@ impl Bump {
/// assert_eq!(x, &[42, 42, 42, 42, 42]);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_fill_copy<T: Copy>(&self, len: usize, value: T) -> &mut [T] {
self.alloc_slice_fill_with(len, |_| value)
}
@@ -1324,7 +1321,6 @@ impl Bump {
/// assert_eq!(&x[1], &s);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_fill_clone<T: Clone>(&self, len: usize, value: &T) -> &mut [T] {
self.alloc_slice_fill_with(len, |_| value.clone())
}
@@ -1347,7 +1343,6 @@ impl Bump {
/// assert_eq!(x, [4, 9, 25]);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_fill_iter<T, I>(&self, iter: I) -> &mut [T]
where
I: IntoIterator<Item = T>,
@@ -1378,7 +1373,6 @@ impl Bump {
/// assert_eq!(x, &[0, 0, 0, 0, 0]);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_fill_default<T: Default>(&self, len: usize) -> &mut [T] {
self.alloc_slice_fill_with(len, |_| T::default())
}
@@ -1435,11 +1429,10 @@ impl Bump {
}
let ptr = ptr.wrapping_sub(layout.size());
- let rem = ptr as usize % layout.align();
- let aligned_ptr = ptr.wrapping_sub(rem);
+ let aligned_ptr = round_mut_ptr_down_to(ptr, layout.align());
if aligned_ptr >= start {
- let aligned_ptr = NonNull::new_unchecked(aligned_ptr as *mut u8);
+ let aligned_ptr = NonNull::new_unchecked(aligned_ptr);
footer.ptr.set(aligned_ptr);
Some(aligned_ptr)
} else {
@@ -1464,12 +1457,13 @@ impl Bump {
let current_footer = self.current_chunk_footer.get();
let current_footer = unsafe { current_footer.as_ref() };
- current_footer as *const _ as usize - current_footer.data.as_ptr() as usize
+ current_footer.ptr.get().as_ptr() as usize - current_footer.data.as_ptr() as usize
}
/// Slow path allocation for when we need to allocate a new chunk from the
/// parent bump set because there isn't enough room in our current chunk.
#[inline(never)]
+ #[cold]
fn alloc_layout_slow(&self, layout: Layout) -> Option<NonNull<u8>> {
unsafe {
let size = layout.size();
@@ -1488,21 +1482,14 @@ impl Bump {
.checked_mul(2)?
.max(min_new_chunk_size);
let chunk_memory_details = iter::from_fn(|| {
- let bypass_min_chunk_size_for_small_limits = match self.allocation_limit() {
- Some(limit)
- if layout.size() < limit
+ let bypass_min_chunk_size_for_small_limits = matches!(self.allocation_limit(), Some(limit) if layout.size() < limit
&& base_size >= layout.size()
&& limit < DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER
- && self.allocated_bytes() == 0 =>
- {
- true
- }
- _ => false,
- };
+ && self.allocated_bytes() == 0);
if base_size >= min_new_chunk_size || bypass_min_chunk_size_for_small_limits {
let size = base_size;
- base_size = base_size / 2;
+ base_size /= 2;
Bump::new_chunk_memory_details(Some(size), layout)
} else {
None
@@ -1537,14 +1524,14 @@ impl Bump {
// at least the requested size.
let mut ptr = new_footer.ptr.get().as_ptr().sub(size);
// Round the pointer down to the requested alignment.
- ptr = ptr.sub(ptr as usize % layout.align());
+ ptr = round_mut_ptr_down_to(ptr, layout.align());
debug_assert!(
ptr as *const _ <= new_footer,
"{:p} <= {:p}",
ptr,
new_footer
);
- let ptr = NonNull::new_unchecked(ptr as *mut u8);
+ let ptr = NonNull::new_unchecked(ptr);
new_footer.ptr.set(ptr);
// Return a pointer to the freshly allocated region in this chunk.
@@ -1696,6 +1683,16 @@ impl Bump {
unsafe { footer.as_ref().allocated_bytes }
}
+ /// Calculates the number of bytes requested from the Rust allocator for this `Bump`.
+ ///
+ /// This number is equal to the [`allocated_bytes()`](Self::allocated_bytes) plus
+ /// the size of the bump metadata.
+ pub fn allocated_bytes_including_metadata(&self) -> usize {
+ let metadata_size =
+ unsafe { self.iter_allocated_chunks_raw().count() * mem::size_of::<ChunkFooter>() };
+ self.allocated_bytes() + metadata_size
+ }
+
#[inline]
unsafe fn is_last_allocation(&self, ptr: NonNull<u8>) -> bool {
let footer = self.current_chunk_footer.get();
@@ -1720,13 +1717,31 @@ impl Bump {
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<u8>, AllocErr> {
+ // If the new layout demands greater alignment than the old layout has,
+ // then either
+ //
+ // 1. the pointer happens to satisfy the new layout's alignment, so we
+ // got lucky and can return the pointer as-is, or
+ //
+ // 2. the pointer is not aligned to the new layout's demanded alignment,
+ // and we are unlucky.
+ //
+ // In the case of (2), to successfully "shrink" the allocation, we would
+ // have to allocate a whole new region for the new layout, without being
+ // able to free the old region. That is unacceptable, so simply return
+ // an allocation failure error instead.
+ if old_layout.align() < new_layout.align() {
+ if is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()) {
+ return Ok(ptr);
+ } else {
+ return Err(AllocErr);
+ }
+ }
+
+ debug_assert!(is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()));
+
let old_size = old_layout.size();
let new_size = new_layout.size();
- let align_is_compatible = old_layout.align() >= new_layout.align();
-
- if !align_is_compatible {
- return Err(AllocErr);
- }
// This is how much space we would *actually* reclaim while satisfying
// the requested alignment.
@@ -1736,7 +1751,32 @@ impl Bump {
// Only reclaim the excess space (which requires a copy) if it
// is worth it: we are actually going to recover "enough" space
// and we can do a non-overlapping copy.
- && delta >= old_size / 2
+ //
+ // We do `(old_size + 1) / 2` so division rounds up rather than
+ // down. Consider when:
+ //
+ // old_size = 5
+ // new_size = 3
+ //
+ // If we do not take care to round up, this will result in:
+ //
+ // delta = 2
+ // (old_size / 2) = (5 / 2) = 2
+ //
+ // And the the check will succeed even though we are have
+ // overlapping ranges:
+ //
+ // |--------old-allocation-------|
+ // |------from-------|
+ // |-------to--------|
+ // +-----+-----+-----+-----+-----+
+ // | a | b | c | . | . |
+ // +-----+-----+-----+-----+-----+
+ //
+ // But we MUST NOT have overlapping ranges because we use
+ // `copy_nonoverlapping` below! Therefore, we round the division
+ // up to avoid this issue.
+ && delta >= (old_size + 1) / 2
{
let footer = self.current_chunk_footer.get();
let footer = footer.as_ref();
@@ -1751,9 +1791,11 @@ impl Bump {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_size);
return Ok(new_ptr);
- } else {
- return Ok(ptr);
}
+
+ // If this wasn't the last allocation, or shrinking wasn't worth it,
+ // simply return the old pointer as-is.
+ Ok(ptr)
}
#[inline]
@@ -1772,7 +1814,7 @@ impl Bump {
// reuse the currently allocated space.
let delta = new_size - old_size;
if let Some(p) =
- self.try_alloc_layout_fast(layout_from_size_align(delta, old_layout.align()))
+ self.try_alloc_layout_fast(layout_from_size_align(delta, old_layout.align())?)
{
ptr::copy(ptr.as_ptr(), p.as_ptr(), old_size);
return Ok(p);
@@ -1867,7 +1909,7 @@ unsafe impl<'a> alloc::Alloc for &'a Bump {
#[inline]
unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
- Bump::dealloc(self, ptr, layout)
+ Bump::dealloc(self, ptr, layout);
}
#[inline]
@@ -1883,7 +1925,7 @@ unsafe impl<'a> alloc::Alloc for &'a Bump {
return self.try_alloc_layout(layout);
}
- let new_layout = layout_from_size_align(new_size, layout.align());
+ let new_layout = layout_from_size_align(new_size, layout.align())?;
if new_size <= old_size {
self.shrink(ptr, layout, new_layout)
} else {
@@ -1892,18 +1934,23 @@ unsafe impl<'a> alloc::Alloc for &'a Bump {
}
}
-#[cfg(feature = "allocator_api")]
+#[cfg(any(feature = "allocator_api", feature = "allocator-api2"))]
unsafe impl<'a> Allocator for &'a Bump {
+ #[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.try_alloc_layout(layout)
- .map(|p| NonNull::slice_from_raw_parts(p, layout.size()))
+ .map(|p| unsafe {
+ NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), layout.size()))
+ })
.map_err(|_| AllocError)
}
+ #[inline]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
Bump::dealloc(self, ptr, layout)
}
+ #[inline]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
@@ -1911,10 +1958,13 @@ unsafe impl<'a> Allocator for &'a Bump {
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
Bump::shrink(self, ptr, old_layout, new_layout)
- .map(|p| NonNull::slice_from_raw_parts(p, new_layout.size()))
+ .map(|p| unsafe {
+ NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
+ })
.map_err(|_| AllocError)
}
+ #[inline]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
@@ -1922,10 +1972,13 @@ unsafe impl<'a> Allocator for &'a Bump {
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
Bump::grow(self, ptr, old_layout, new_layout)
- .map(|p| NonNull::slice_from_raw_parts(p, new_layout.size()))
+ .map(|p| unsafe {
+ NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
+ })
.map_err(|_| AllocError)
}
+ #[inline]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
@@ -1953,7 +2006,6 @@ mod tests {
// Uses private `alloc` module.
#[test]
- #[allow(clippy::cognitive_complexity)]
fn test_realloc() {
use crate::alloc::Alloc;