summaryrefslogtreecommitdiffstats
path: root/third_party/rust/bumpalo
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/rust/bumpalo/.cargo-checksum.json2
-rw-r--r--third_party/rust/bumpalo/CHANGELOG.md124
-rw-r--r--third_party/rust/bumpalo/Cargo.toml11
-rw-r--r--third_party/rust/bumpalo/README.md23
-rw-r--r--third_party/rust/bumpalo/src/alloc.rs2
-rw-r--r--third_party/rust/bumpalo/src/collections/raw_vec.rs86
-rw-r--r--third_party/rust/bumpalo/src/collections/string.rs17
-rw-r--r--third_party/rust/bumpalo/src/collections/vec.rs148
-rwxr-xr-x[-rw-r--r--]third_party/rust/bumpalo/src/lib.rs192
9 files changed, 504 insertions, 101 deletions
diff --git a/third_party/rust/bumpalo/.cargo-checksum.json b/third_party/rust/bumpalo/.cargo-checksum.json
index d747ed0ac6..c9c971e9d8 100644
--- a/third_party/rust/bumpalo/.cargo-checksum.json
+++ b/third_party/rust/bumpalo/.cargo-checksum.json
@@ -1 +1 @@
-{"files":{"CHANGELOG.md":"8b5a7a49c720ba2678c07184f50b3608e2165fbf6704da494fba23c864e691e0","Cargo.toml":"8d5fd21d2b3ed1d7149e864d43f843fd469ccdcd9893ac3c2bef8518294a61dd","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"65f94e99ddaf4f5d1782a6dae23f35d4293a9a01444a13135a6887017d353cee","README.md":"00c9224790248ec71d1505615429699fd685b0290a0c2b6d7c0df0214e7f80eb","src/alloc.rs":"ab0f23fa11c26efdd8f0596ebdf0e3faa75d097881fb59639b0fb23340c106bc","src/boxed.rs":"5fc935f8e1a7bc1b8f6a39b2bcc4355a2be4743f2308fe3ffd557455a3a27cb2","src/collections/collect_in.rs":"0588a4ff3967a4323abb4218bbd615af4b123639ab4fae9130c6590c258b3d15","src/collections/mod.rs":"d58dc46eb4f9fcdde574f09bc5b8646f53e42d49c169561d98e0c23e5b36848a","src/collections/raw_vec.rs":"8829cc9a693fde38aa93e47a7bbbc2dac247620d07f60519f2e6cb44f5494bc5","src/collections/str/lossy.rs":"c5d62b16e01071e2a574ae41ef6693ad12f1e6c786c5d38f7a13ebd6cb23c088","src/collections/str/mod.rs":"d82a8bd417fbf52a589d89a16ea2a0ac4f6ac920c3976ab1f5b6ac0c8493c4f2","src/collections/string.rs":"388d39b999788baf5c14ccc3f5cb57da728060ea3295ddfc28f0f2e1ca5858ec","src/collections/vec.rs":"2eaf52e085e6d04767e97b224e82688dd0debd231c6536d6034f431376aa8bf0","src/lib.rs":"9eb2bdb8359b368a6f3091a66b3a5eb1216672ec1605cb18d5da28292c381cb9"},"package":"0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535"} \ No newline at end of file
+{"files":{"CHANGELOG.md":"78962e256666b2ca4b3ad5393da51da94decbc465e4d283a882ffdb0400973b8","Cargo.toml":"480d1eff4ff1840deaedf5670ff0cec6d5cfad8e818545072942a0a72ddba8c0","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"65f94e99ddaf4f5d1782a6dae23f35d4293a9a01444a13135a6887017d353cee","README.md":"19edaf495926291be237a1d8f958dd736940a2bbb75181ffefaeca3d2ce81046","src/alloc.rs":"3a9645d9e8db1f2a8549ee928cafa5263a828f25c88ce4d2b07996ecc14bfa81","src/boxed.rs":"5fc935f8e1a7bc1b8f6a39b2bcc4355a2be4743f2308fe3ffd557455a3a27cb2","src/collections/collect_in.rs":"0588a4ff3967a4323abb4218bbd615af4b123639ab4fae9130c6590c258b3d15","src/collections/mod.rs":"d58dc46eb4f9fcdde574f09bc5b8646f53e42d49c169561d98e0c23e5b36848a","src/collections/raw_vec.rs":"a37069763ff1434bb12356318d0a00cc25a273f0c2fc0bfea35615785808d1c6","src/collections/str/lossy.rs":"c5d62b16e01071e2a574ae41ef6693ad12f1e6c786c5d38f7a13ebd6cb23c088","src/collections/str/mod.rs":"d82a8bd417fbf52a589d89a16ea2a0ac4f6ac920c3976ab1f5b6ac0c8493c4f2","src/collections/string.rs":"39b2a94b552a82066fa4996d65d1dea4073e2a6724b5c237d530ec46e16bc222","src/collections/vec.rs":"a224894cd743954a90f275a5f19e7127414694bfa1d49c4647ebf789aab1721a","src/lib.rs":"c71735f5eac817d378fa47d9013056cb9feb55d15eb8247e50607bfb4ea4cdbd"},"package":"7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa"} \ No newline at end of file
diff --git a/third_party/rust/bumpalo/CHANGELOG.md b/third_party/rust/bumpalo/CHANGELOG.md
index afc142eb90..3f9e366032 100644
--- a/third_party/rust/bumpalo/CHANGELOG.md
+++ b/third_party/rust/bumpalo/CHANGELOG.md
@@ -28,6 +28,126 @@ Released YYYY-MM-DD.
--------------------------------------------------------------------------------
+## 3.15.4
+
+Released 2024-03-07.
+
+### Added
+
+* Added the `bumpalo::collections::Vec::extend_from_slices_copy` method, which
+ is a faster way to extend a vec from multiple slices when the element is
+ `Copy` than calling `extend_from_slice_copy` N times.
+
+--------------------------------------------------------------------------------
+
+## 3.15.3
+
+Released 2024-02-22.
+
+### Added
+
+* Added additional performance improvements to `bumpalo::collections::Vec`
+ related to reserving capacity.
+
+--------------------------------------------------------------------------------
+
+## 3.15.2
+
+Released 2024-02-21.
+
+### Added
+
+* Add a `bumpalo::collections::Vec::extend_from_slice_copy` method. This doesn't
+ exist on the standard library's `Vec` but they have access to specialization,
+ so their regular `extend_from_slice` has a specialization for `Copy`
+ types. Using this new method for `Copy` types is a ~80x performance
+ improvement over the plain `extend_from_slice` method.
+
+--------------------------------------------------------------------------------
+
+## 3.15.1
+
+Released 2024-02-20.
+
+### Fixed
+
+* Fixed the MSRV listed in `Cargo.toml`, whose update was forgotten when the
+ MSRV bumped in release 3.15.0.
+
+--------------------------------------------------------------------------------
+
+## 3.15.0
+
+Released 2024-02-15.
+
+### Changed
+
+* The minimum supported Rust version (MSRV) is now 1.73.0.
+* `bumpalo::collections::String::push_str` and
+ `bumpalo::collections::String::from_str_in` received significant performance
+ improvements.
+* Allocator trait methods are now marked `#[inline]`, increasing performance for
+ some callers.
+
+### Fixed
+
+* Fixed an edge-case bug in the `Allocator::shrink` method.
+
+--------------------------------------------------------------------------------
+
+## 3.14.0
+
+Released 2023-09-14.
+
+### Added
+
+* Added the `std` cargo feature, which enables implementations of `std` traits
+ for various things. Right now that is just `std::io::Write` for
+ `bumpalo::collections::Vec`, but could be more in the future.
+
+--------------------------------------------------------------------------------
+
+## 3.13.0
+
+Released 2023-05-22.
+
+### Added
+
+* New `"allocator-api2"` feature enables the use of the allocator API on
+ stable. This feature uses a crate that mirrors the API of the unstable Rust
+ `allocator_api` feature. If the feature is enabled, references to `Bump` will
+ implement `allocator_api2::Allocator`. This allows `Bump` to be used as an
+ allocator for collection types from `allocator-api2` and any other crates that
+ support `allocator-api2`.
+
+### Changed
+
+* The minimum supported Rust version (MSRV) is now 1.63.0.
+
+--------------------------------------------------------------------------------
+
+## 3.12.2
+
+Released 2023-05-09.
+
+### Changed
+
+* Added `rust-version` metadata to `Cargo.toml` which helps `cargo` with version
+ resolution.
+
+--------------------------------------------------------------------------------
+
+## 3.12.1
+
+Released 2023-04-21.
+
+### Fixed
+
+* Fixed a bug where `Bump::try_with_capacity(n)` where `n > isize::MAX` could
+ lead to attempts to create invalid `Layout`s.
+
+--------------------------------------------------------------------------------
+
## 3.12.0
Released 2023-01-17.
@@ -489,7 +609,7 @@ Released 2019-12-20.
from the allocated chunks are slightly different from the old
`each_allocated_chunk`: only up to 16-byte alignment is supported now. If you
allocate anything with greater alignment than that into the bump arena, there
- might be uninitilized padding inserted in the chunks, and therefore it is no
+ might be uninitialized padding inserted in the chunks, and therefore it is no
longer safe to read them via `MaybeUninit::assume_init`. See also the note
about bump direction in the "changed" section; if you're iterating chunks,
you're likely affected by that change!
@@ -528,7 +648,7 @@ Released 2019-05-20.
* Fixed a bug where chunks were always deallocated with the default chunk
layout, not the layout that the chunk was actually allocated with (i.e. if we
- started growing largers chunks with larger layouts, we would deallocate those
+ started growing larger chunks with larger layouts, we would deallocate those
chunks with an incorrect layout).
--------------------------------------------------------------------------------
diff --git a/third_party/rust/bumpalo/Cargo.toml b/third_party/rust/bumpalo/Cargo.toml
index 02ec679c2b..53fc06bb97 100644
--- a/third_party/rust/bumpalo/Cargo.toml
+++ b/third_party/rust/bumpalo/Cargo.toml
@@ -11,8 +11,9 @@
[package]
edition = "2021"
+rust-version = "1.73.0"
name = "bumpalo"
-version = "3.12.0"
+version = "3.15.4"
authors = ["Nick Fitzgerald <fitzgen@gmail.com>"]
exclude = [
"/.github/*",
@@ -29,7 +30,7 @@ categories = [
"rust-patterns",
"no-std",
]
-license = "MIT/Apache-2.0"
+license = "MIT OR Apache-2.0"
repository = "https://github.com/fitzgen/bumpalo"
[package.metadata.docs.rs]
@@ -50,6 +51,11 @@ path = "benches/benches.rs"
harness = false
required-features = ["collections"]
+[dependencies.allocator-api2]
+version = "0.2.8"
+optional = true
+default-features = false
+
[dev-dependencies.criterion]
version = "0.3.6"
@@ -64,3 +70,4 @@ allocator_api = []
boxed = []
collections = []
default = []
+std = []
diff --git a/third_party/rust/bumpalo/README.md b/third_party/rust/bumpalo/README.md
index 3d73e2967e..d0da14c87d 100644
--- a/third_party/rust/bumpalo/README.md
+++ b/third_party/rust/bumpalo/README.md
@@ -155,7 +155,14 @@ in its space itself.
### `#![no_std]` Support
-Bumpalo is a `no_std` crate. It depends only on the `alloc` and `core` crates.
+Bumpalo is a `no_std` crate by default. It depends only on the `alloc` and `core` crates.
+
+### `std` Support
+
+You can optionally decide to enable the `std` feature in order to enable some
+std only trait implementations for some collections:
+
+* `std::io::Write` for `Vec<'bump, u8>`
### Thread support
@@ -179,7 +186,7 @@ First, enable the `allocator_api` feature in your `Cargo.toml`:
```toml
[dependencies]
-bumpalo = { version = "3.9", features = ["allocator_api"] }
+bumpalo = { version = "3", features = ["allocator_api"] }
```
Next, enable the `allocator_api` nightly Rust feature in your `src/lib.rs` or
@@ -207,9 +214,17 @@ v.push(2);
[`Allocator`]: https://doc.rust-lang.org/std/alloc/trait.Allocator.html
-#### Minimum Supported Rust Version (MSRV)
+### Using the `Allocator` API on Stable Rust
+
+You can enable the `allocator-api2` Cargo feature and `bumpalo` will use [the
+`allocator-api2` crate](https://crates.io/crates/allocator-api2) to implement
+the unstable nightly`Allocator` API on stable Rust. This means that
+`bumpalo::Bump` will be usable with any collection that is generic over
+`allocator_api2::Allocator`.
+
+### Minimum Supported Rust Version (MSRV)
-This crate is guaranteed to compile on stable Rust **1.56** and up. It might
+This crate is guaranteed to compile on stable Rust **1.73** and up. It might
compile with older versions but that may change in any new patch release.
We reserve the right to increment the MSRV on minor releases, however we will
diff --git a/third_party/rust/bumpalo/src/alloc.rs b/third_party/rust/bumpalo/src/alloc.rs
index 0bcc21f22c..6947e2a6cf 100644
--- a/third_party/rust/bumpalo/src/alloc.rs
+++ b/third_party/rust/bumpalo/src/alloc.rs
@@ -752,7 +752,7 @@ pub unsafe trait Alloc {
match (Layout::array::<T>(n_old), Layout::array::<T>(n_new)) {
(Ok(ref k_old), Ok(ref k_new)) if k_old.size() > 0 && k_new.size() > 0 => {
debug_assert!(k_old.align() == k_new.align());
- self.realloc(ptr.cast(), k_old.clone(), k_new.size())
+ self.realloc(ptr.cast(), *k_old, k_new.size())
.map(NonNull::cast)
}
_ => Err(AllocErr),
diff --git a/third_party/rust/bumpalo/src/collections/raw_vec.rs b/third_party/rust/bumpalo/src/collections/raw_vec.rs
index ac3bd0758c..456829d447 100644
--- a/third_party/rust/bumpalo/src/collections/raw_vec.rs
+++ b/third_party/rust/bumpalo/src/collections/raw_vec.rs
@@ -319,7 +319,7 @@ impl<'a, T> RawVec<'a, T> {
used_cap: usize,
needed_extra_cap: usize,
) -> Result<(), CollectionAllocErr> {
- self.reserve_internal(used_cap, needed_extra_cap, Fallible, Exact)
+ self.fallible_reserve_internal(used_cap, needed_extra_cap, Exact)
}
/// Ensures that the buffer contains at least enough space to hold
@@ -343,11 +343,7 @@ impl<'a, T> RawVec<'a, T> {
///
/// Aborts on OOM
pub fn reserve_exact(&mut self, used_cap: usize, needed_extra_cap: usize) {
- match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Exact) {
- Err(CapacityOverflow) => capacity_overflow(),
- Err(AllocErr) => unreachable!(),
- Ok(()) => { /* yay */ }
- }
+ self.infallible_reserve_internal(used_cap, needed_extra_cap, Exact)
}
/// Calculates the buffer's new size given that it'll hold `used_cap +
@@ -374,7 +370,7 @@ impl<'a, T> RawVec<'a, T> {
used_cap: usize,
needed_extra_cap: usize,
) -> Result<(), CollectionAllocErr> {
- self.reserve_internal(used_cap, needed_extra_cap, Fallible, Amortized)
+ self.fallible_reserve_internal(used_cap, needed_extra_cap, Amortized)
}
/// Ensures that the buffer contains at least enough space to hold
@@ -429,13 +425,11 @@ impl<'a, T> RawVec<'a, T> {
/// # vector.push_all(&[1, 3, 5, 7, 9]);
/// # }
/// ```
+ #[inline(always)]
pub fn reserve(&mut self, used_cap: usize, needed_extra_cap: usize) {
- match self.reserve_internal(used_cap, needed_extra_cap, Infallible, Amortized) {
- Err(CapacityOverflow) => capacity_overflow(),
- Err(AllocErr) => unreachable!(),
- Ok(()) => { /* yay */ }
- }
+ self.infallible_reserve_internal(used_cap, needed_extra_cap, Amortized)
}
+
/// Attempts to ensure that the buffer contains at least enough space to hold
/// `used_cap + needed_extra_cap` elements. If it doesn't already have
/// enough capacity, will reallocate in place enough space plus comfortable slack
@@ -593,6 +587,68 @@ enum ReserveStrategy {
use self::ReserveStrategy::*;
impl<'a, T> RawVec<'a, T> {
+ #[inline(always)]
+ fn fallible_reserve_internal(
+ &mut self,
+ used_cap: usize,
+ needed_extra_cap: usize,
+ strategy: ReserveStrategy,
+ ) -> Result<(), CollectionAllocErr> {
+ // This portion of the method should always be inlined.
+ if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
+ return Ok(());
+ }
+ // This portion of the method should never be inlined, and will only be called when
+ // the check above has confirmed that it is necessary.
+ self.reserve_internal_or_error(used_cap, needed_extra_cap, Fallible, strategy)
+ }
+
+ #[inline(always)]
+ fn infallible_reserve_internal(
+ &mut self,
+ used_cap: usize,
+ needed_extra_cap: usize,
+ strategy: ReserveStrategy,
+ ) {
+ // This portion of the method should always be inlined.
+ if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
+ return;
+ }
+ // This portion of the method should never be inlined, and will only be called when
+ // the check above has confirmed that it is necessary.
+ self.reserve_internal_or_panic(used_cap, needed_extra_cap, strategy)
+ }
+
+ #[inline(never)]
+ fn reserve_internal_or_panic(
+ &mut self,
+ used_cap: usize,
+ needed_extra_cap: usize,
+ strategy: ReserveStrategy,
+ ) {
+ // Delegates the call to `reserve_internal_or_error` and panics in the event of an error.
+ // This allows the method to have a return type of `()`, simplifying the assembly at the
+ // call site.
+ match self.reserve_internal(used_cap, needed_extra_cap, Infallible, strategy) {
+ Err(CapacityOverflow) => capacity_overflow(),
+ Err(AllocErr) => unreachable!(),
+ Ok(()) => { /* yay */ }
+ }
+ }
+
+ #[inline(never)]
+ fn reserve_internal_or_error(
+ &mut self,
+ used_cap: usize,
+ needed_extra_cap: usize,
+ fallibility: Fallibility,
+ strategy: ReserveStrategy,)-> Result<(), CollectionAllocErr> {
+ // Delegates the call to `reserve_internal`, which can be inlined.
+ self.reserve_internal(used_cap, needed_extra_cap, fallibility, strategy)
+ }
+
+ /// Helper method to reserve additional space, reallocating the backing memory.
+ /// The caller is responsible for confirming that there is not already enough space available.
fn reserve_internal(
&mut self,
used_cap: usize,
@@ -608,12 +664,6 @@ impl<'a, T> RawVec<'a, T> {
// If we make it past the first branch then we are guaranteed to
// panic.
- // Don't actually need any more capacity.
- // Wrapping in case they gave a bad `used_cap`.
- if self.cap().wrapping_sub(used_cap) >= needed_extra_cap {
- return Ok(());
- }
-
// Nothing we can really do about these checks :(
let new_cap = match strategy {
Exact => used_cap
diff --git a/third_party/rust/bumpalo/src/collections/string.rs b/third_party/rust/bumpalo/src/collections/string.rs
index ffd1db92de..e9fafbf204 100644
--- a/third_party/rust/bumpalo/src/collections/string.rs
+++ b/third_party/rust/bumpalo/src/collections/string.rs
@@ -680,8 +680,19 @@ impl<'bump> String<'bump> {
/// assert_eq!(s, "hello");
/// ```
pub fn from_str_in(s: &str, bump: &'bump Bump) -> String<'bump> {
- let mut t = String::with_capacity_in(s.len(), bump);
- t.push_str(s);
+ let len = s.len();
+ let mut t = String::with_capacity_in(len, bump);
+ // SAFETY:
+ // * `src` is valid for reads of `s.len()` bytes by virtue of being an allocated `&str`.
+ // * `dst` is valid for writes of `s.len()` bytes as `String::with_capacity_in(s.len(), bump)`
+ // above guarantees that.
+ // * Alignment is not relevant as `u8` has no alignment requirements.
+ // * Source and destination ranges cannot overlap as we just reserved the destination
+ // range from the bump.
+ unsafe { ptr::copy_nonoverlapping(s.as_ptr(), t.vec.as_mut_ptr(), len) };
+ // SAFETY: We reserved sufficent capacity for the string above.
+ // The elements at `0..len` were initialized by `copy_nonoverlapping` above.
+ unsafe { t.vec.set_len(len) };
t
}
@@ -925,7 +936,7 @@ impl<'bump> String<'bump> {
/// ```
#[inline]
pub fn push_str(&mut self, string: &str) {
- self.vec.extend_from_slice(string.as_bytes())
+ self.vec.extend_from_slice_copy(string.as_bytes())
}
/// Returns this `String`'s capacity, in bytes.
diff --git a/third_party/rust/bumpalo/src/collections/vec.rs b/third_party/rust/bumpalo/src/collections/vec.rs
index 312aa055b9..0dab700727 100644
--- a/third_party/rust/bumpalo/src/collections/vec.rs
+++ b/third_party/rust/bumpalo/src/collections/vec.rs
@@ -104,6 +104,8 @@ use core::ops::{Index, IndexMut, RangeBounds};
use core::ptr;
use core::ptr::NonNull;
use core::slice;
+#[cfg(feature = "std")]
+use std::io;
unsafe fn arith_offset<T>(p: *const T, offset: isize) -> *const T {
p.offset(offset)
@@ -1775,6 +1777,132 @@ impl<'bump, T: 'bump + Clone> Vec<'bump, T> {
}
}
+impl<'bump, T: 'bump + Copy> Vec<'bump, T> {
+ /// Helper method to copy all of the items in `other` and append them to the end of `self`.
+ ///
+ /// SAFETY:
+ /// * The caller is responsible for:
+ /// * calling [`reserve`](Self::reserve) beforehand to guarantee that there is enough
+ /// capacity to store `other.len()` more items.
+ /// * guaranteeing that `self` and `other` do not overlap.
+ unsafe fn extend_from_slice_copy_unchecked(&mut self, other: &[T]) {
+ let old_len = self.len();
+ debug_assert!(old_len + other.len() <= self.capacity());
+
+ // SAFETY:
+ // * `src` is valid for reads of `other.len()` values by virtue of being a `&[T]`.
+ // * `dst` is valid for writes of `other.len()` bytes because the caller of this
+ // method is required to `reserve` capacity to store at least `other.len()` items
+ // beforehand.
+ // * Because `src` is a `&[T]` and dst is a `&[T]` within the `Vec<T>`,
+ // `copy_nonoverlapping`'s alignment requirements are met.
+ // * Caller is required to guarantee that the source and destination ranges cannot overlap
+ unsafe {
+ let src = other.as_ptr();
+ let dst = self.as_mut_ptr().add(old_len);
+ ptr::copy_nonoverlapping(src, dst, other.len());
+ self.set_len(old_len + other.len());
+ }
+ }
+
+
+ /// Copies all elements in the slice `other` and appends them to the `Vec`.
+ ///
+ /// Note that this function is same as [`extend_from_slice`] except that it is optimized for
+ /// slices of types that implement the `Copy` trait. If and when Rust gets specialization
+ /// this function will likely be deprecated (but still available).
+ ///
+ /// To copy and append the data from multiple source slices at once, see
+ /// [`extend_from_slices_copy`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bumpalo::{Bump, collections::Vec};
+ ///
+ /// let b = Bump::new();
+ ///
+ /// let mut vec = bumpalo::vec![in &b; 1];
+ /// vec.extend_from_slice_copy(&[2, 3, 4]);
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// ```
+ ///
+ /// ```
+ /// use bumpalo::{Bump, collections::Vec};
+ ///
+ /// let b = Bump::new();
+ ///
+ /// let mut vec = bumpalo::vec![in &b; 'H' as u8];
+ /// vec.extend_from_slice_copy("ello, world!".as_bytes());
+ /// assert_eq!(vec, "Hello, world!".as_bytes());
+ /// ```
+ ///
+ /// [`extend_from_slice`]: #method.extend_from_slice
+ /// [`extend_from_slices`]: #method.extend_from_slices
+ pub fn extend_from_slice_copy(&mut self, other: &[T]) {
+ // Reserve space in the Vec for the values to be added
+ self.reserve(other.len());
+
+ // Copy values into the space that was just reserved
+ // SAFETY:
+ // * `self` has enough capacity to store `other.len()` more items as `self.reserve(other.len())`
+ // above guarantees that.
+ // * Source and destination data ranges cannot overlap as we just reserved the destination
+ // range from the bump.
+ unsafe {
+ self.extend_from_slice_copy_unchecked(other);
+ }
+ }
+
+ /// For each slice in `slices`, copies all elements in the slice and appends them to the `Vec`.
+ ///
+ /// This method is equivalent to calling [`extend_from_slice_copy`] in a loop, but is able
+ /// to precompute the total amount of space to reserve in advance. This reduces the potential
+ /// maximum number of reallocations needed from one-per-slice to just one.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use bumpalo::{Bump, collections::Vec};
+ ///
+ /// let b = Bump::new();
+ ///
+ /// let mut vec = bumpalo::vec![in &b; 1];
+ /// vec.extend_from_slices_copy(&[&[2, 3], &[], &[4]]);
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// ```
+ ///
+ /// ```
+ /// use bumpalo::{Bump, collections::Vec};
+ ///
+ /// let b = Bump::new();
+ ///
+ /// let mut vec = bumpalo::vec![in &b; 'H' as u8];
+ /// vec.extend_from_slices_copy(&["ello,".as_bytes(), &[], " world!".as_bytes()]);
+ /// assert_eq!(vec, "Hello, world!".as_bytes());
+ /// ```
+ ///
+ /// [`extend_from_slice_copy`]: #method.extend_from_slice_copy
+ pub fn extend_from_slices_copy(&mut self, slices: &[&[T]]) {
+ // Reserve the total amount of capacity we'll need to safely append the aggregated contents
+ // of each slice in `slices`.
+ let capacity_to_reserve: usize = slices.iter().map(|slice| slice.len()).sum();
+ self.reserve(capacity_to_reserve);
+
+ // SAFETY:
+ // * `dst` is valid for writes of `capacity_to_reserve` items as
+ // `self.reserve(capacity_to_reserve)` above guarantees that.
+ // * Source and destination ranges cannot overlap as we just reserved the destination
+ // range from the bump.
+ unsafe {
+ // Copy the contents of each slice onto the end of `self`
+ slices.iter().for_each(|slice| {
+ self.extend_from_slice_copy_unchecked(slice);
+ });
+ }
+ }
+}
+
// This code generalises `extend_with_{element,default}`.
trait ExtendWith<T> {
fn next(&mut self) -> T;
@@ -2612,3 +2740,23 @@ where
}
}
}
+
+#[cfg(feature = "std")]
+impl<'bump> io::Write for Vec<'bump, u8> {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.extend_from_slice_copy(buf);
+ Ok(buf.len())
+ }
+
+ #[inline]
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ self.extend_from_slice_copy(buf);
+ Ok(())
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
diff --git a/third_party/rust/bumpalo/src/lib.rs b/third_party/rust/bumpalo/src/lib.rs
index 74dfcd4361..b23cfeabc8 100644..100755
--- a/third_party/rust/bumpalo/src/lib.rs
+++ b/third_party/rust/bumpalo/src/lib.rs
@@ -1,11 +1,8 @@
#![doc = include_str!("../README.md")]
#![deny(missing_debug_implementations)]
#![deny(missing_docs)]
-#![no_std]
-#![cfg_attr(
- feature = "allocator_api",
- feature(allocator_api, nonnull_slice_from_raw_parts)
-)]
+#![cfg_attr(not(feature = "std"), no_std)]
+#![cfg_attr(feature = "allocator_api", feature(allocator_api))]
#[doc(hidden)]
pub extern crate alloc as core_alloc;
@@ -26,9 +23,13 @@ use core::ptr::{self, NonNull};
use core::slice;
use core::str;
use core_alloc::alloc::{alloc, dealloc, Layout};
+
#[cfg(feature = "allocator_api")]
use core_alloc::alloc::{AllocError, Allocator};
+#[cfg(all(feature = "allocator-api2", not(feature = "allocator_api")))]
+use allocator_api2::alloc::{AllocError, Allocator};
+
pub use alloc::AllocErr;
/// An error returned from [`Bump::try_alloc_try_with`].
@@ -354,7 +355,7 @@ static EMPTY_CHUNK: EmptyChunkFooter = EmptyChunkFooter(ChunkFooter {
impl EmptyChunkFooter {
fn get(&'static self) -> NonNull<ChunkFooter> {
- unsafe { NonNull::new_unchecked(&self.0 as *const ChunkFooter as *mut ChunkFooter) }
+ NonNull::from(&self.0)
}
}
@@ -406,6 +407,15 @@ unsafe fn dealloc_chunk_list(mut footer: NonNull<ChunkFooter>) {
unsafe impl Send for Bump {}
#[inline]
+fn is_pointer_aligned_to<T>(pointer: *mut T, align: usize) -> bool {
+ debug_assert!(align.is_power_of_two());
+
+ let pointer = pointer as usize;
+ let pointer_aligned = round_down_to(pointer, align);
+ pointer == pointer_aligned
+}
+
+#[inline]
pub(crate) fn round_up_to(n: usize, divisor: usize) -> Option<usize> {
debug_assert!(divisor > 0);
debug_assert!(divisor.is_power_of_two());
@@ -419,6 +429,14 @@ pub(crate) fn round_down_to(n: usize, divisor: usize) -> usize {
n & !(divisor - 1)
}
+/// Same as `round_down_to` but preserves pointer provenance.
+#[inline]
+pub(crate) fn round_mut_ptr_down_to(ptr: *mut u8, divisor: usize) -> *mut u8 {
+ debug_assert!(divisor > 0);
+ debug_assert!(divisor.is_power_of_two());
+ ptr.wrapping_sub(ptr as usize & (divisor - 1))
+}
+
// After this point, we try to hit page boundaries instead of powers of 2
const PAGE_STRATEGY_CUTOFF: usize = 0x1000;
@@ -463,12 +481,8 @@ struct NewChunkMemoryDetails {
/// Wrapper around `Layout::from_size_align` that adds debug assertions.
#[inline]
-unsafe fn layout_from_size_align(size: usize, align: usize) -> Layout {
- if cfg!(debug_assertions) {
- Layout::from_size_align(size, align).unwrap()
- } else {
- Layout::from_size_align_unchecked(size, align)
- }
+fn layout_from_size_align(size: usize, align: usize) -> Result<Layout, AllocErr> {
+ Layout::from_size_align(size, align).map_err(|_| AllocErr)
}
#[inline(never)]
@@ -476,12 +490,6 @@ fn allocation_size_overflow<T>() -> T {
panic!("requested allocation size overflowed")
}
-// This can be migrated to directly use `usize::abs_diff` when the MSRV
-// reaches `1.60`
-fn abs_diff(a: usize, b: usize) -> usize {
- usize::max(a, b) - usize::min(a, b)
-}
-
impl Bump {
/// Construct a new arena to bump allocate into.
///
@@ -535,7 +543,7 @@ impl Bump {
});
}
- let layout = unsafe { layout_from_size_align(capacity, 1) };
+ let layout = layout_from_size_align(capacity, 1)?;
let chunk_footer = unsafe {
Self::new_chunk(
@@ -589,7 +597,7 @@ impl Bump {
/// assert!(bump.try_alloc(5).is_err());
/// ```
pub fn set_allocation_limit(&self, limit: Option<usize>) {
- self.allocation_limit.set(limit)
+ self.allocation_limit.set(limit);
}
/// How much headroom an arena has before it hits its allocation
@@ -600,7 +608,7 @@ impl Bump {
if allocated_bytes > allocation_limit {
None
} else {
- Some(abs_diff(allocation_limit, allocated_bytes))
+ Some(usize::abs_diff(allocation_limit, allocated_bytes))
}
})
}
@@ -682,7 +690,7 @@ impl Bump {
size,
} = new_chunk_memory_details;
- let layout = layout_from_size_align(size, align);
+ let layout = layout_from_size_align(size, align).ok()?;
debug_assert!(size >= requested_layout.size());
@@ -801,7 +809,6 @@ impl Bump {
/// assert_eq!(*x, "hello");
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc<T>(&self, val: T) -> &mut T {
self.alloc_with(|| val)
}
@@ -821,7 +828,6 @@ impl Bump {
/// assert_eq!(x, Ok(&mut "hello"));
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn try_alloc<T>(&self, val: T) -> Result<&mut T, AllocErr> {
self.try_alloc_with(|| val)
}
@@ -846,7 +852,6 @@ impl Bump {
/// assert_eq!(*x, "hello");
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_with<F, T>(&self, f: F) -> &mut T
where
F: FnOnce() -> T,
@@ -866,7 +871,7 @@ impl Bump {
// directly into the heap instead. It seems we get it to realize
// this most consistently if we put this critical line into it's
// own function instead of inlining it into the surrounding code.
- ptr::write(ptr, f())
+ ptr::write(ptr, f());
}
let layout = Layout::new::<T>();
@@ -899,7 +904,6 @@ impl Bump {
/// assert_eq!(x, Ok(&mut "hello"));
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn try_alloc_with<F, T>(&self, f: F) -> Result<&mut T, AllocErr>
where
F: FnOnce() -> T,
@@ -919,7 +923,7 @@ impl Bump {
// directly into the heap instead. It seems we get it to realize
// this most consistently if we put this critical line into it's
// own function instead of inlining it into the surrounding code.
- ptr::write(ptr, f())
+ ptr::write(ptr, f());
}
//SAFETY: Self-contained:
@@ -971,7 +975,6 @@ impl Bump {
/// # Result::<_, ()>::Ok(())
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, E>
where
F: FnOnce() -> Result<T, E>,
@@ -1080,7 +1083,6 @@ impl Bump {
/// # Result::<_, bumpalo::AllocOrInitError<()>>::Ok(())
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn try_alloc_try_with<F, T, E>(&self, f: F) -> Result<&mut T, AllocOrInitError<E>>
where
F: FnOnce() -> Result<T, E>,
@@ -1165,7 +1167,6 @@ impl Bump {
/// assert_eq!(x, &[1, 2, 3]);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_copy<T>(&self, src: &[T]) -> &mut [T]
where
T: Copy,
@@ -1205,7 +1206,6 @@ impl Bump {
/// assert_eq!(originals, clones);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_clone<T>(&self, src: &[T]) -> &mut [T]
where
T: Clone,
@@ -1236,7 +1236,6 @@ impl Bump {
/// assert_eq!("hello world", hello);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_str(&self, src: &str) -> &mut str {
let buffer = self.alloc_slice_copy(src.as_bytes());
unsafe {
@@ -1263,7 +1262,6 @@ impl Bump {
/// assert_eq!(x, &[5, 10, 15, 20, 25]);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_fill_with<T, F>(&self, len: usize, mut f: F) -> &mut [T]
where
F: FnMut(usize) -> T,
@@ -1299,7 +1297,6 @@ impl Bump {
/// assert_eq!(x, &[42, 42, 42, 42, 42]);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_fill_copy<T: Copy>(&self, len: usize, value: T) -> &mut [T] {
self.alloc_slice_fill_with(len, |_| value)
}
@@ -1324,7 +1321,6 @@ impl Bump {
/// assert_eq!(&x[1], &s);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_fill_clone<T: Clone>(&self, len: usize, value: &T) -> &mut [T] {
self.alloc_slice_fill_with(len, |_| value.clone())
}
@@ -1347,7 +1343,6 @@ impl Bump {
/// assert_eq!(x, [4, 9, 25]);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_fill_iter<T, I>(&self, iter: I) -> &mut [T]
where
I: IntoIterator<Item = T>,
@@ -1378,7 +1373,6 @@ impl Bump {
/// assert_eq!(x, &[0, 0, 0, 0, 0]);
/// ```
#[inline(always)]
- #[allow(clippy::mut_from_ref)]
pub fn alloc_slice_fill_default<T: Default>(&self, len: usize) -> &mut [T] {
self.alloc_slice_fill_with(len, |_| T::default())
}
@@ -1435,11 +1429,10 @@ impl Bump {
}
let ptr = ptr.wrapping_sub(layout.size());
- let rem = ptr as usize % layout.align();
- let aligned_ptr = ptr.wrapping_sub(rem);
+ let aligned_ptr = round_mut_ptr_down_to(ptr, layout.align());
if aligned_ptr >= start {
- let aligned_ptr = NonNull::new_unchecked(aligned_ptr as *mut u8);
+ let aligned_ptr = NonNull::new_unchecked(aligned_ptr);
footer.ptr.set(aligned_ptr);
Some(aligned_ptr)
} else {
@@ -1464,12 +1457,13 @@ impl Bump {
let current_footer = self.current_chunk_footer.get();
let current_footer = unsafe { current_footer.as_ref() };
- current_footer as *const _ as usize - current_footer.data.as_ptr() as usize
+ current_footer.ptr.get().as_ptr() as usize - current_footer.data.as_ptr() as usize
}
/// Slow path allocation for when we need to allocate a new chunk from the
/// parent bump set because there isn't enough room in our current chunk.
#[inline(never)]
+ #[cold]
fn alloc_layout_slow(&self, layout: Layout) -> Option<NonNull<u8>> {
unsafe {
let size = layout.size();
@@ -1488,21 +1482,14 @@ impl Bump {
.checked_mul(2)?
.max(min_new_chunk_size);
let chunk_memory_details = iter::from_fn(|| {
- let bypass_min_chunk_size_for_small_limits = match self.allocation_limit() {
- Some(limit)
- if layout.size() < limit
+ let bypass_min_chunk_size_for_small_limits = matches!(self.allocation_limit(), Some(limit) if layout.size() < limit
&& base_size >= layout.size()
&& limit < DEFAULT_CHUNK_SIZE_WITHOUT_FOOTER
- && self.allocated_bytes() == 0 =>
- {
- true
- }
- _ => false,
- };
+ && self.allocated_bytes() == 0);
if base_size >= min_new_chunk_size || bypass_min_chunk_size_for_small_limits {
let size = base_size;
- base_size = base_size / 2;
+ base_size /= 2;
Bump::new_chunk_memory_details(Some(size), layout)
} else {
None
@@ -1537,14 +1524,14 @@ impl Bump {
// at least the requested size.
let mut ptr = new_footer.ptr.get().as_ptr().sub(size);
// Round the pointer down to the requested alignment.
- ptr = ptr.sub(ptr as usize % layout.align());
+ ptr = round_mut_ptr_down_to(ptr, layout.align());
debug_assert!(
ptr as *const _ <= new_footer,
"{:p} <= {:p}",
ptr,
new_footer
);
- let ptr = NonNull::new_unchecked(ptr as *mut u8);
+ let ptr = NonNull::new_unchecked(ptr);
new_footer.ptr.set(ptr);
// Return a pointer to the freshly allocated region in this chunk.
@@ -1696,6 +1683,16 @@ impl Bump {
unsafe { footer.as_ref().allocated_bytes }
}
+ /// Calculates the number of bytes requested from the Rust allocator for this `Bump`.
+ ///
+ /// This number is equal to the [`allocated_bytes()`](Self::allocated_bytes) plus
+ /// the size of the bump metadata.
+ pub fn allocated_bytes_including_metadata(&self) -> usize {
+ let metadata_size =
+ unsafe { self.iter_allocated_chunks_raw().count() * mem::size_of::<ChunkFooter>() };
+ self.allocated_bytes() + metadata_size
+ }
+
#[inline]
unsafe fn is_last_allocation(&self, ptr: NonNull<u8>) -> bool {
let footer = self.current_chunk_footer.get();
@@ -1720,13 +1717,31 @@ impl Bump {
old_layout: Layout,
new_layout: Layout,
) -> Result<NonNull<u8>, AllocErr> {
+ // If the new layout demands greater alignment than the old layout has,
+ // then either
+ //
+ // 1. the pointer happens to satisfy the new layout's alignment, so we
+ // got lucky and can return the pointer as-is, or
+ //
+ // 2. the pointer is not aligned to the new layout's demanded alignment,
+ // and we are unlucky.
+ //
+ // In the case of (2), to successfully "shrink" the allocation, we would
+ // have to allocate a whole new region for the new layout, without being
+ // able to free the old region. That is unacceptable, so simply return
+ // an allocation failure error instead.
+ if old_layout.align() < new_layout.align() {
+ if is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()) {
+ return Ok(ptr);
+ } else {
+ return Err(AllocErr);
+ }
+ }
+
+ debug_assert!(is_pointer_aligned_to(ptr.as_ptr(), new_layout.align()));
+
let old_size = old_layout.size();
let new_size = new_layout.size();
- let align_is_compatible = old_layout.align() >= new_layout.align();
-
- if !align_is_compatible {
- return Err(AllocErr);
- }
// This is how much space we would *actually* reclaim while satisfying
// the requested alignment.
@@ -1736,7 +1751,32 @@ impl Bump {
// Only reclaim the excess space (which requires a copy) if it
// is worth it: we are actually going to recover "enough" space
// and we can do a non-overlapping copy.
- && delta >= old_size / 2
+ //
+ // We do `(old_size + 1) / 2` so division rounds up rather than
+ // down. Consider when:
+ //
+ // old_size = 5
+ // new_size = 3
+ //
+ // If we do not take care to round up, this will result in:
+ //
+ // delta = 2
+ // (old_size / 2) = (5 / 2) = 2
+ //
+ // And the the check will succeed even though we are have
+ // overlapping ranges:
+ //
+ // |--------old-allocation-------|
+ // |------from-------|
+ // |-------to--------|
+ // +-----+-----+-----+-----+-----+
+ // | a | b | c | . | . |
+ // +-----+-----+-----+-----+-----+
+ //
+ // But we MUST NOT have overlapping ranges because we use
+ // `copy_nonoverlapping` below! Therefore, we round the division
+ // up to avoid this issue.
+ && delta >= (old_size + 1) / 2
{
let footer = self.current_chunk_footer.get();
let footer = footer.as_ref();
@@ -1751,9 +1791,11 @@ impl Bump {
ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr(), new_size);
return Ok(new_ptr);
- } else {
- return Ok(ptr);
}
+
+ // If this wasn't the last allocation, or shrinking wasn't worth it,
+ // simply return the old pointer as-is.
+ Ok(ptr)
}
#[inline]
@@ -1772,7 +1814,7 @@ impl Bump {
// reuse the currently allocated space.
let delta = new_size - old_size;
if let Some(p) =
- self.try_alloc_layout_fast(layout_from_size_align(delta, old_layout.align()))
+ self.try_alloc_layout_fast(layout_from_size_align(delta, old_layout.align())?)
{
ptr::copy(ptr.as_ptr(), p.as_ptr(), old_size);
return Ok(p);
@@ -1867,7 +1909,7 @@ unsafe impl<'a> alloc::Alloc for &'a Bump {
#[inline]
unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
- Bump::dealloc(self, ptr, layout)
+ Bump::dealloc(self, ptr, layout);
}
#[inline]
@@ -1883,7 +1925,7 @@ unsafe impl<'a> alloc::Alloc for &'a Bump {
return self.try_alloc_layout(layout);
}
- let new_layout = layout_from_size_align(new_size, layout.align());
+ let new_layout = layout_from_size_align(new_size, layout.align())?;
if new_size <= old_size {
self.shrink(ptr, layout, new_layout)
} else {
@@ -1892,18 +1934,23 @@ unsafe impl<'a> alloc::Alloc for &'a Bump {
}
}
-#[cfg(feature = "allocator_api")]
+#[cfg(any(feature = "allocator_api", feature = "allocator-api2"))]
unsafe impl<'a> Allocator for &'a Bump {
+ #[inline]
fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
self.try_alloc_layout(layout)
- .map(|p| NonNull::slice_from_raw_parts(p, layout.size()))
+ .map(|p| unsafe {
+ NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), layout.size()))
+ })
.map_err(|_| AllocError)
}
+ #[inline]
unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
Bump::dealloc(self, ptr, layout)
}
+ #[inline]
unsafe fn shrink(
&self,
ptr: NonNull<u8>,
@@ -1911,10 +1958,13 @@ unsafe impl<'a> Allocator for &'a Bump {
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
Bump::shrink(self, ptr, old_layout, new_layout)
- .map(|p| NonNull::slice_from_raw_parts(p, new_layout.size()))
+ .map(|p| unsafe {
+ NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
+ })
.map_err(|_| AllocError)
}
+ #[inline]
unsafe fn grow(
&self,
ptr: NonNull<u8>,
@@ -1922,10 +1972,13 @@ unsafe impl<'a> Allocator for &'a Bump {
new_layout: Layout,
) -> Result<NonNull<[u8]>, AllocError> {
Bump::grow(self, ptr, old_layout, new_layout)
- .map(|p| NonNull::slice_from_raw_parts(p, new_layout.size()))
+ .map(|p| unsafe {
+ NonNull::new_unchecked(ptr::slice_from_raw_parts_mut(p.as_ptr(), new_layout.size()))
+ })
.map_err(|_| AllocError)
}
+ #[inline]
unsafe fn grow_zeroed(
&self,
ptr: NonNull<u8>,
@@ -1953,7 +2006,6 @@ mod tests {
// Uses private `alloc` module.
#[test]
- #[allow(clippy::cognitive_complexity)]
fn test_realloc() {
use crate::alloc::Alloc;