summaryrefslogtreecommitdiffstats
path: root/compiler/rustc_arena
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 02:49:50 +0000
commit9835e2ae736235810b4ea1c162ca5e65c547e770 (patch)
tree3fcebf40ed70e581d776a8a4c65923e8ec20e026 /compiler/rustc_arena
parentReleasing progress-linux version 1.70.0+dfsg2-1~progress7.99u1. (diff)
downloadrustc-9835e2ae736235810b4ea1c162ca5e65c547e770.tar.xz
rustc-9835e2ae736235810b4ea1c162ca5e65c547e770.zip
Merging upstream version 1.71.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'compiler/rustc_arena')
-rw-r--r--compiler/rustc_arena/src/lib.rs48
1 files changed, 34 insertions, 14 deletions
diff --git a/compiler/rustc_arena/src/lib.rs b/compiler/rustc_arena/src/lib.rs
index 345e058e1..6e15f06a7 100644
--- a/compiler/rustc_arena/src/lib.rs
+++ b/compiler/rustc_arena/src/lib.rs
@@ -20,6 +20,7 @@
#![feature(rustc_attrs)]
#![cfg_attr(test, feature(test))]
#![feature(strict_provenance)]
+#![deny(unsafe_op_in_unsafe_fn)]
#![deny(rustc::untranslatable_diagnostic)]
#![deny(rustc::diagnostic_outside_of_impl)]
#![allow(clippy::mut_from_ref)] // Arena allocators are one of the places where this pattern is fine.
@@ -74,19 +75,27 @@ impl<T> ArenaChunk<T> {
#[inline]
unsafe fn new(capacity: usize) -> ArenaChunk<T> {
ArenaChunk {
- storage: NonNull::new(Box::into_raw(Box::new_uninit_slice(capacity))).unwrap(),
+ storage: NonNull::from(Box::leak(Box::new_uninit_slice(capacity))),
entries: 0,
}
}
/// Destroys this arena chunk.
+ ///
+ /// # Safety
+ ///
+ /// The caller must ensure that `len` elements of this chunk have been initialized.
#[inline]
unsafe fn destroy(&mut self, len: usize) {
// The branch on needs_drop() is an -O1 performance optimization.
- // Without the branch, dropping TypedArena<u8> takes linear time.
+ // Without the branch, dropping TypedArena<T> takes linear time.
if mem::needs_drop::<T>() {
- let slice = &mut *(self.storage.as_mut());
- ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(&mut slice[..len]));
+ // SAFETY: The caller must ensure that `len` elements of this chunk have
+ // been initialized.
+ unsafe {
+ let slice = self.storage.as_mut();
+ ptr::drop_in_place(MaybeUninit::slice_assume_init_mut(&mut slice[..len]));
+ }
}
}
@@ -104,7 +113,7 @@ impl<T> ArenaChunk<T> {
// A pointer as large as possible for zero-sized elements.
ptr::invalid_mut(!0)
} else {
- self.start().add((*self.storage.as_ptr()).len())
+ self.start().add(self.storage.len())
}
}
}
@@ -255,7 +264,9 @@ impl<T> TypedArena<T> {
self.ensure_capacity(len);
let start_ptr = self.ptr.get();
- self.ptr.set(start_ptr.add(len));
+ // SAFETY: `self.ensure_capacity` makes sure that there is enough space
+ // for `len` elements.
+ unsafe { self.ptr.set(start_ptr.add(len)) };
start_ptr
}
@@ -288,7 +299,7 @@ impl<T> TypedArena<T> {
// If the previous chunk's len is less than HUGE_PAGE
// bytes, then this chunk will be least double the previous
// chunk's size.
- new_cap = (*last_chunk.storage.as_ptr()).len().min(HUGE_PAGE / elem_size / 2);
+ new_cap = last_chunk.storage.len().min(HUGE_PAGE / elem_size / 2);
new_cap *= 2;
} else {
new_cap = PAGE / elem_size;
@@ -396,7 +407,7 @@ impl DroplessArena {
// If the previous chunk's len is less than HUGE_PAGE
// bytes, then this chunk will be least double the previous
// chunk's size.
- new_cap = (*last_chunk.storage.as_ptr()).len().min(HUGE_PAGE / 2);
+ new_cap = last_chunk.storage.len().min(HUGE_PAGE / 2);
new_cap *= 2;
} else {
new_cap = PAGE;
@@ -483,6 +494,10 @@ impl DroplessArena {
}
}
+ /// # Safety
+ ///
+ /// The caller must ensure that `mem` is valid for writes up to
+ /// `size_of::<T>() * len`.
#[inline]
unsafe fn write_from_iter<T, I: Iterator<Item = T>>(
&self,
@@ -494,13 +509,18 @@ impl DroplessArena {
// Use a manual loop since LLVM manages to optimize it better for
// slice iterators
loop {
- let value = iter.next();
- if i >= len || value.is_none() {
- // We only return as many items as the iterator gave us, even
- // though it was supposed to give us `len`
- return slice::from_raw_parts_mut(mem, i);
+ // SAFETY: The caller must ensure that `mem` is valid for writes up to
+ // `size_of::<T>() * len`.
+ unsafe {
+ match iter.next() {
+ Some(value) if i < len => mem.add(i).write(value),
+ Some(_) | None => {
+ // We only return as many items as the iterator gave us, even
+ // though it was supposed to give us `len`
+ return slice::from_raw_parts_mut(mem, i);
+ }
+ }
}
- ptr::write(mem.add(i), value.unwrap());
i += 1;
}
}