summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_arena/src
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:48 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-06-07 05:48:48 +0000
commitef24de24a82fe681581cc130f342363c47c0969a (patch)
tree0d494f7e1a38b95c92426f58fe6eaa877303a86c /compiler/rustc_arena/src
parentReleasing progress-linux version 1.74.1+dfsg1-1~progress7.99u1. (diff)
downloadrustc-ef24de24a82fe681581cc130f342363c47c0969a.tar.xz
rustc-ef24de24a82fe681581cc130f342363c47c0969a.zip
Merging upstream version 1.75.0+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_arena/src')
-rw-r--r--compiler/rustc_arena/src/lib.rs256
1 files changed, 95 insertions, 161 deletions
diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs
index 23fdd272f..a53fd4ae9 100644
--- a/compiler/rustc_arena/src/lib.rs
+++ b/compiler/rustc_arena/src/lib.rs
@@ -11,13 +11,13 @@
html_root_url = "https://doc.rust-lang.org/nightly/nightly-rustc/",
test(no_crate_inject, attr(deny(warnings)))
)]
+#![cfg_attr(not(bootstrap), doc(rust_logo))]
+#![cfg_attr(not(bootstrap), feature(rustdoc_internals))]
#![feature(core_intrinsics)]
#![feature(dropck_eyepatch)]
#![feature(new_uninit)]
#![feature(maybe_uninit_slice)]
-#![feature(min_specialization)]
#![feature(decl_macro)]
-#![feature(pointer_byte_offsets)]
#![feature(rustc_attrs)]
#![cfg_attr(test, feature(test))]
#![feature(strict_provenance)]
@@ -44,23 +44,6 @@ fn outline<F: FnOnce() -> R, R>(f: F) -> R {
f()
}
-/// An arena that can hold objects of only one type.
-pub struct TypedArena<T> {
- /// A pointer to the next object to be allocated.
- ptr: Cell<*mut T>,
-
- /// A pointer to the end of the allocated area. When this pointer is
- /// reached, a new chunk is allocated.
- end: Cell<*mut T>,
-
- /// A vector of arena chunks.
- chunks: RefCell<Vec<ArenaChunk<T>>>,
-
- /// Marker indicating that dropping the arena causes its owned
- /// instances of `T` to be dropped.
- _own: PhantomData<T>,
-}
-
struct ArenaChunk<T = u8> {
/// The raw storage for the arena chunk.
storage: NonNull<[MaybeUninit<T>]>,
@@ -130,6 +113,23 @@ impl<T> ArenaChunk<T> {
const PAGE: usize = 4096;
const HUGE_PAGE: usize = 2 * 1024 * 1024;
+/// An arena that can hold objects of only one type.
+pub struct TypedArena<T> {
+ /// A pointer to the next object to be allocated.
+ ptr: Cell<*mut T>,
+
+ /// A pointer to the end of the allocated area. When this pointer is
+ /// reached, a new chunk is allocated.
+ end: Cell<*mut T>,
+
+ /// A vector of arena chunks.
+ chunks: RefCell<Vec<ArenaChunk<T>>>,
+
+ /// Marker indicating that dropping the arena causes its owned
+ /// instances of `T` to be dropped.
+ _own: PhantomData<T>,
+}
+
impl<T> Default for TypedArena<T> {
/// Creates a new `TypedArena`.
fn default() -> TypedArena<T> {
@@ -144,77 +144,6 @@ impl<T> Default for TypedArena<T> {
}
}
-trait IterExt<T> {
- fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T];
-}
-
-impl<I, T> IterExt<T> for I
-where
- I: IntoIterator<Item = T>,
-{
- // This default collects into a `SmallVec` and then allocates by copying
- // from it. The specializations below for types like `Vec` are more
- // efficient, copying directly without the intermediate collecting step.
- // This default could be made more efficient, like
- // `DroplessArena::alloc_from_iter`, but it's not hot enough to bother.
- #[inline]
- default fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
- let vec: SmallVec<[_; 8]> = self.into_iter().collect();
- vec.alloc_from_iter(arena)
- }
-}
-
-impl<T, const N: usize> IterExt<T> for std::array::IntoIter<T, N> {
- #[inline]
- fn alloc_from_iter(self, arena: &TypedArena<T>) -> &mut [T] {
- let len = self.len();
- if len == 0 {
- return &mut [];
- }
- // Move the content to the arena by copying and then forgetting it.
- unsafe {
- let start_ptr = arena.alloc_raw_slice(len);
- self.as_slice().as_ptr().copy_to_nonoverlapping(start_ptr, len);
- mem::forget(self);
- slice::from_raw_parts_mut(start_ptr, len)
- }
- }
-}
-
-impl<T> IterExt<T> for Vec<T> {
- #[inline]
- fn alloc_from_iter(mut self, arena: &TypedArena<T>) -> &mut [T] {
- let len = self.len();
- if len == 0 {
- return &mut [];
- }
- // Move the content to the arena by copying and then forgetting it.
- unsafe {
- let start_ptr = arena.alloc_raw_slice(len);
- self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
- self.set_len(0);
- slice::from_raw_parts_mut(start_ptr, len)
- }
- }
-}
-
-impl<A: smallvec::Array> IterExt<A::Item> for SmallVec<A> {
- #[inline]
- fn alloc_from_iter(mut self, arena: &TypedArena<A::Item>) -> &mut [A::Item] {
- let len = self.len();
- if len == 0 {
- return &mut [];
- }
- // Move the content to the arena by copying and then forgetting it.
- unsafe {
- let start_ptr = arena.alloc_raw_slice(len);
- self.as_ptr().copy_to_nonoverlapping(start_ptr, len);
- self.set_len(0);
- slice::from_raw_parts_mut(start_ptr, len)
- }
- }
-}
-
impl<T> TypedArena<T> {
/// Allocates an object in the `TypedArena`, returning a reference to it.
#[inline]
@@ -250,33 +179,55 @@ impl<T> TypedArena<T> {
available_bytes >= additional_bytes
}
- /// Ensures there's enough space in the current chunk to fit `len` objects.
#[inline]
- fn ensure_capacity(&self, additional: usize) {
- if !self.can_allocate(additional) {
- self.grow(additional);
- debug_assert!(self.can_allocate(additional));
- }
- }
-
- #[inline]
- unsafe fn alloc_raw_slice(&self, len: usize) -> *mut T {
+ fn alloc_raw_slice(&self, len: usize) -> *mut T {
assert!(mem::size_of::<T>() != 0);
assert!(len != 0);
- self.ensure_capacity(len);
+ // Ensure the current chunk can fit `len` objects.
+ if !self.can_allocate(len) {
+ self.grow(len);
+ debug_assert!(self.can_allocate(len));
+ }
let start_ptr = self.ptr.get();
- // SAFETY: `self.ensure_capacity` makes sure that there is enough space
- // for `len` elements.
+ // SAFETY: `can_allocate`/`grow` ensures that there is enough space for
+ // `len` elements.
unsafe { self.ptr.set(start_ptr.add(len)) };
start_ptr
}
#[inline]
pub fn alloc_from_iter<I: IntoIterator<Item = T>>(&self, iter: I) -> &mut [T] {
+ // This implementation is entirely separate to
+ // `DroplessIterator::alloc_from_iter`, even though conceptually they
+ // are the same.
+ //
+ // `DroplessIterator` (in the fast case) writes elements from the
+ // iterator one at a time into the allocated memory. That's easy
+ // because the elements don't implement `Drop`. But for `TypedArena`
+ // they do implement `Drop`, which means that if the iterator panics we
+ // could end up with some allocated-but-uninitialized elements, which
+ // will then cause UB in `TypedArena::drop`.
+ //
+ // Instead we use an approach where any iterator panic will occur
+ // before the memory is allocated. This function is much less hot than
+ // `DroplessArena::alloc_from_iter`, so it doesn't need to be
+ // hyper-optimized.
assert!(mem::size_of::<T>() != 0);
- iter.alloc_from_iter(self)
+
+ let mut vec: SmallVec<[_; 8]> = iter.into_iter().collect();
+ if vec.is_empty() {
+ return &mut [];
+ }
+ // Move the content to the arena by copying and then forgetting it.
+ let len = vec.len();
+ let start_ptr = self.alloc_raw_slice(len);
+ unsafe {
+ vec.as_ptr().copy_to_nonoverlapping(start_ptr, len);
+ vec.set_len(0);
+ slice::from_raw_parts_mut(start_ptr, len)
+ }
}
/// Grows the arena.
@@ -407,6 +358,8 @@ impl Default for DroplessArena {
#[inline]
fn default() -> DroplessArena {
DroplessArena {
+ // We set both `start` and `end` to 0 so that the first call to
+ // alloc() will trigger a grow().
start: Cell::new(ptr::null_mut()),
end: Cell::new(ptr::null_mut()),
chunks: Default::default(),
@@ -415,9 +368,11 @@ impl Default for DroplessArena {
}
impl DroplessArena {
+ #[inline(never)]
+ #[cold]
fn grow(&self, layout: Layout) {
// Add some padding so we can align `self.end` while
- // stilling fitting in a `layout` allocation.
+ // still fitting in a `layout` allocation.
let additional = layout.size() + cmp::max(DROPLESS_ALIGNMENT, layout.align()) - 1;
unsafe {
@@ -441,7 +396,7 @@ impl DroplessArena {
let mut chunk = ArenaChunk::new(align_up(new_cap, PAGE));
self.start.set(chunk.start());
- // Align the end to DROPLESS_ALIGNMENT
+ // Align the end to DROPLESS_ALIGNMENT.
let end = align_down(chunk.end().addr(), DROPLESS_ALIGNMENT);
// Make sure we don't go past `start`. This should not happen since the allocation
@@ -454,55 +409,40 @@ impl DroplessArena {
}
}
- #[inline(never)]
- #[cold]
- fn grow_and_alloc_raw(&self, layout: Layout) -> *mut u8 {
- self.grow(layout);
- self.alloc_raw_without_grow(layout).unwrap()
- }
-
- #[inline(never)]
- #[cold]
- fn grow_and_alloc<T>(&self) -> *mut u8 {
- self.grow_and_alloc_raw(Layout::new::<T>())
- }
-
- /// Allocates a byte slice with specified layout from the current memory
- /// chunk. Returns `None` if there is no free space left to satisfy the
- /// request.
- #[inline]
- fn alloc_raw_without_grow(&self, layout: Layout) -> Option<*mut u8> {
- let start = self.start.get().addr();
- let old_end = self.end.get();
- let end = old_end.addr();
-
- // Align allocated bytes so that `self.end` stays aligned to DROPLESS_ALIGNMENT
- let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
-
- // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT
- unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
-
- let new_end = align_down(end.checked_sub(bytes)?, layout.align());
- if start <= new_end {
- let new_end = old_end.with_addr(new_end);
- // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down` preserves alignment
- // as both `end` and `bytes` are already aligned to DROPLESS_ALIGNMENT.
- self.end.set(new_end);
- Some(new_end)
- } else {
- None
- }
- }
-
#[inline]
pub fn alloc_raw(&self, layout: Layout) -> *mut u8 {
assert!(layout.size() != 0);
- if let Some(a) = self.alloc_raw_without_grow(layout) {
- return a;
+
+ // This loop executes once or twice: if allocation fails the first
+ // time, the `grow` ensures it will succeed the second time.
+ loop {
+ let start = self.start.get().addr();
+ let old_end = self.end.get();
+ let end = old_end.addr();
+
+ // Align allocated bytes so that `self.end` stays aligned to
+ // DROPLESS_ALIGNMENT.
+ let bytes = align_up(layout.size(), DROPLESS_ALIGNMENT);
+
+ // Tell LLVM that `end` is aligned to DROPLESS_ALIGNMENT.
+ unsafe { intrinsics::assume(end == align_down(end, DROPLESS_ALIGNMENT)) };
+
+ if let Some(sub) = end.checked_sub(bytes) {
+ let new_end = align_down(sub, layout.align());
+ if start <= new_end {
+ let new_end = old_end.with_addr(new_end);
+ // `new_end` is aligned to DROPLESS_ALIGNMENT as `align_down`
+ // preserves alignment as both `end` and `bytes` are already
+ // aligned to DROPLESS_ALIGNMENT.
+ self.end.set(new_end);
+ return new_end;
+ }
+ }
+
+ // No free space left. Allocate a new chunk to satisfy the request.
+ // On failure the grow will panic or abort.
+ self.grow(layout);
}
- // No free space left. Allocate a new chunk to satisfy the request.
- // On failure the grow will panic or abort.
- self.grow_and_alloc_raw(layout)
}
#[inline]
@@ -510,13 +450,7 @@ impl DroplessArena {
assert!(!mem::needs_drop::<T>());
assert!(mem::size_of::<T>() != 0);
- let mem = if let Some(a) = self.alloc_raw_without_grow(Layout::for_value::<T>(&object)) {
- a
- } else {
- // No free space left. Allocate a new chunk to satisfy the request.
- // On failure the grow will panic or abort.
- self.grow_and_alloc::<T>()
- } as *mut T;
+ let mem = self.alloc_raw(Layout::new::<T>()) as *mut T;
unsafe {
// Write into uninitialized memory.
@@ -713,10 +647,10 @@ pub macro declare_arena([$($a:tt $name:ident: $ty:ty,)*]) {
}
#[allow(clippy::mut_from_ref)]
- pub fn alloc_from_iter<'a, T: ArenaAllocatable<'tcx, C>, C>(
- &'a self,
+ pub fn alloc_from_iter<T: ArenaAllocatable<'tcx, C>, C>(
+ &self,
iter: impl ::std::iter::IntoIterator<Item = T>,
- ) -> &'a mut [T] {
+ ) -> &mut [T] {
T::allocate_from_iter(self, iter)
}
}