summaryrefslogtreecommitdiffstats
path: root/library/core/src/alloc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /library/core/src/alloc
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/core/src/alloc')
-rw-r--r--library/core/src/alloc/global.rs275
-rw-r--r--library/core/src/alloc/layout.rs443
-rw-r--r--library/core/src/alloc/mod.rs410
3 files changed, 1128 insertions, 0 deletions
diff --git a/library/core/src/alloc/global.rs b/library/core/src/alloc/global.rs
new file mode 100644
index 000000000..887246c60
--- /dev/null
+++ b/library/core/src/alloc/global.rs
@@ -0,0 +1,275 @@
+use crate::alloc::Layout;
+use crate::cmp;
+use crate::ptr;
+
+/// A memory allocator that can be registered as the standard library’s default
+/// through the `#[global_allocator]` attribute.
+///
+/// Some of the methods require that a memory block be *currently
+/// allocated* via an allocator. This means that:
+///
+/// * the starting address for that memory block was previously
+/// returned by a previous call to an allocation method
+/// such as `alloc`, and
+///
+/// * the memory block has not been subsequently deallocated, where
+/// blocks are deallocated either by being passed to a deallocation
+/// method such as `dealloc` or by being
+/// passed to a reallocation method that returns a non-null pointer.
+///
+///
+/// # Example
+///
+/// ```
+/// use std::alloc::{GlobalAlloc, Layout};
+/// use std::cell::UnsafeCell;
+/// use std::ptr::null_mut;
+/// use std::sync::atomic::{
+/// AtomicUsize,
+/// Ordering::{Acquire, SeqCst},
+/// };
+///
+/// const ARENA_SIZE: usize = 128 * 1024;
+/// const MAX_SUPPORTED_ALIGN: usize = 4096;
+/// #[repr(C, align(4096))] // 4096 == MAX_SUPPORTED_ALIGN
+/// struct SimpleAllocator {
+/// arena: UnsafeCell<[u8; ARENA_SIZE]>,
+/// remaining: AtomicUsize, // we allocate from the top, counting down
+/// }
+///
+/// #[global_allocator]
+/// static ALLOCATOR: SimpleAllocator = SimpleAllocator {
+/// arena: UnsafeCell::new([0x55; ARENA_SIZE]),
+/// remaining: AtomicUsize::new(ARENA_SIZE),
+/// };
+///
+/// unsafe impl Sync for SimpleAllocator {}
+///
+/// unsafe impl GlobalAlloc for SimpleAllocator {
+/// unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
+/// let size = layout.size();
+/// let align = layout.align();
+///
+/// // `Layout` contract forbids making a `Layout` with align=0, or align not power of 2.
+/// // So we can safely use a mask to ensure alignment without worrying about UB.
+/// let align_mask_to_round_down = !(align - 1);
+///
+/// if align > MAX_SUPPORTED_ALIGN {
+/// return null_mut();
+/// }
+///
+/// let mut allocated = 0;
+/// if self
+/// .remaining
+/// .fetch_update(SeqCst, SeqCst, |mut remaining| {
+/// if size > remaining {
+/// return None;
+/// }
+/// remaining -= size;
+/// remaining &= align_mask_to_round_down;
+/// allocated = remaining;
+/// Some(remaining)
+/// })
+/// .is_err()
+/// {
+/// return null_mut();
+/// };
+/// (self.arena.get() as *mut u8).add(allocated)
+/// }
+/// unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {}
+/// }
+///
+/// fn main() {
+/// let _s = format!("allocating a string!");
+/// let currently = ALLOCATOR.remaining.load(Acquire);
+/// println!("allocated so far: {}", ARENA_SIZE - currently);
+/// }
+/// ```
+///
+/// # Safety
+///
+/// The `GlobalAlloc` trait is an `unsafe` trait for a number of reasons, and
+/// implementors must ensure that they adhere to these contracts:
+///
+/// * It's undefined behavior if global allocators unwind. This restriction may
+/// be lifted in the future, but currently a panic from any of these
+/// functions may lead to memory unsafety.
+///
+/// * `Layout` queries and calculations in general must be correct. Callers of
+/// this trait are allowed to rely on the contracts defined on each method,
+/// and implementors must ensure such contracts remain true.
+///
+/// * You must not rely on allocations actually happening, even if there are explicit
+/// heap allocations in the source. The optimizer may detect unused allocations that it can either
+/// eliminate entirely or move to the stack and thus never invoke the allocator. The
+/// optimizer may further assume that allocation is infallible, so code that used to fail due
+/// to allocator failures may now suddenly work because the optimizer worked around the
+/// need for an allocation. More concretely, the following code example is unsound, irrespective
+/// of whether your custom allocator allows counting how many allocations have happened.
+///
+/// ```rust,ignore (unsound and has placeholders)
+/// drop(Box::new(42));
+/// let number_of_heap_allocs = /* call private allocator API */;
+/// unsafe { std::intrinsics::assume(number_of_heap_allocs > 0); }
+/// ```
+///
+/// Note that the optimizations mentioned above are not the only
+/// optimization that can be applied. You may generally not rely on heap allocations
+/// happening if they can be removed without changing program behavior.
+/// Whether allocations happen or not is not part of the program behavior, even if it
+/// could be detected via an allocator that tracks allocations by printing or otherwise
+/// having side effects.
+#[stable(feature = "global_alloc", since = "1.28.0")]
+pub unsafe trait GlobalAlloc {
+ /// Allocate memory as described by the given `layout`.
+ ///
+ /// Returns a pointer to newly-allocated memory,
+ /// or null to indicate allocation failure.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure that `layout` has non-zero size.
+ ///
+ /// (Extension subtraits might provide more specific bounds on
+ /// behavior, e.g., guarantee a sentinel address or a null pointer
+ /// in response to a zero-size allocation request.)
+ ///
+ /// The allocated block of memory may or may not be initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning a null pointer indicates that either memory is exhausted
+ /// or `layout` does not meet this allocator's size or alignment constraints.
+ ///
+ /// Implementations are encouraged to return null on memory
+ /// exhaustion rather than aborting, but this is not
+ /// a strict requirement. (Specifically: it is *legal* to
+ /// implement this trait atop an underlying native allocation
+ /// library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// allocation error are encouraged to call the [`handle_alloc_error`] function,
+ /// rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[stable(feature = "global_alloc", since = "1.28.0")]
+ unsafe fn alloc(&self, layout: Layout) -> *mut u8;
+
+ /// Deallocate the block of memory at the given `ptr` pointer with the given `layout`.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure all of the following:
+ ///
+ /// * `ptr` must denote a block of memory currently allocated via
+ /// this allocator,
+ ///
+ /// * `layout` must be the same layout that was used
+ /// to allocate that block of memory.
+ #[stable(feature = "global_alloc", since = "1.28.0")]
+ unsafe fn dealloc(&self, ptr: *mut u8, layout: Layout);
+
+ /// Behaves like `alloc`, but also ensures that the contents
+ /// are set to zero before being returned.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe for the same reasons that `alloc` is.
+ /// However the allocated block of memory is guaranteed to be initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning a null pointer indicates that either memory is exhausted
+ /// or `layout` does not meet allocator's size or alignment constraints,
+ /// just as in `alloc`.
+ ///
+ /// Clients wishing to abort computation in response to an
+ /// allocation error are encouraged to call the [`handle_alloc_error`] function,
+ /// rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[stable(feature = "global_alloc", since = "1.28.0")]
+ unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
+ let size = layout.size();
+ // SAFETY: the safety contract for `alloc` must be upheld by the caller.
+ let ptr = unsafe { self.alloc(layout) };
+ if !ptr.is_null() {
+ // SAFETY: as allocation succeeded, the region from `ptr`
+ // of size `size` is guaranteed to be valid for writes.
+ unsafe { ptr::write_bytes(ptr, 0, size) };
+ }
+ ptr
+ }
+
+ /// Shrink or grow a block of memory to the given `new_size`.
+ /// The block is described by the given `ptr` pointer and `layout`.
+ ///
+ /// If this returns a non-null pointer, then ownership of the memory block
+ /// referenced by `ptr` has been transferred to this allocator.
+ /// The memory may or may not have been deallocated, and should be
+ /// considered unusable. The new memory block is allocated with `layout`,
+ /// but with the `size` updated to `new_size`. This new layout should be
+ /// used when deallocating the new memory block with `dealloc`. The range
+ /// `0..min(layout.size(), new_size)` of the new memory block is
+ /// guaranteed to have the same values as the original block.
+ ///
+ /// If this method returns null, then ownership of the memory
+ /// block has not been transferred to this allocator, and the
+ /// contents of the memory block are unaltered.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because undefined behavior can result
+ /// if the caller does not ensure all of the following:
+ ///
+ /// * `ptr` must be currently allocated via this allocator,
+ ///
+ /// * `layout` must be the same layout that was used
+ /// to allocate that block of memory,
+ ///
+ /// * `new_size` must be greater than zero.
+ ///
+ /// * `new_size`, when rounded up to the nearest multiple of `layout.align()`,
+ /// must not overflow (i.e., the rounded value must be less than `usize::MAX`).
+ ///
+ /// (Extension subtraits might provide more specific bounds on
+ /// behavior, e.g., guarantee a sentinel address or a null pointer
+ /// in response to a zero-size allocation request.)
+ ///
+ /// # Errors
+ ///
+ /// Returns null if the new layout does not meet the size
+ /// and alignment constraints of the allocator, or if reallocation
+ /// otherwise fails.
+ ///
+ /// Implementations are encouraged to return null on memory
+ /// exhaustion rather than panicking or aborting, but this is not
+ /// a strict requirement. (Specifically: it is *legal* to
+ /// implement this trait atop an underlying native allocation
+ /// library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to a
+ /// reallocation error are encouraged to call the [`handle_alloc_error`] function,
+ /// rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[stable(feature = "global_alloc", since = "1.28.0")]
+ unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ // SAFETY: the caller must ensure that the `new_size` does not overflow.
+ // `layout.align()` comes from a `Layout` and is thus guaranteed to be valid.
+ let new_layout = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
+ // SAFETY: the caller must ensure that `new_layout` is greater than zero.
+ let new_ptr = unsafe { self.alloc(new_layout) };
+ if !new_ptr.is_null() {
+ // SAFETY: the previously allocated block cannot overlap the newly allocated block.
+ // The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr, new_ptr, cmp::min(layout.size(), new_size));
+ self.dealloc(ptr, layout);
+ }
+ }
+ new_ptr
+ }
+}
diff --git a/library/core/src/alloc/layout.rs b/library/core/src/alloc/layout.rs
new file mode 100644
index 000000000..2f378836c
--- /dev/null
+++ b/library/core/src/alloc/layout.rs
@@ -0,0 +1,443 @@
+use crate::cmp;
+use crate::fmt;
+use crate::mem::{self, ValidAlign};
+use crate::ptr::NonNull;
+
+// While this function is used in one place and its implementation
+// could be inlined, the previous attempts to do so made rustc
+// slower:
+//
+// * https://github.com/rust-lang/rust/pull/72189
+// * https://github.com/rust-lang/rust/pull/79827
+const fn size_align<T>() -> (usize, usize) {
+ (mem::size_of::<T>(), mem::align_of::<T>())
+}
+
+/// Layout of a block of memory.
+///
+/// An instance of `Layout` describes a particular layout of memory.
+/// You build a `Layout` up as an input to give to an allocator.
+///
+/// All layouts have an associated size and a power-of-two alignment.
+///
+/// (Note that layouts are *not* required to have non-zero size,
+/// even though `GlobalAlloc` requires that all memory requests
+/// be non-zero in size. A caller must either ensure that conditions
+/// like this are met, use specific allocators with looser
+/// requirements, or use the more lenient `Allocator` interface.)
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
+#[lang = "alloc_layout"]
+pub struct Layout {
+ // size of the requested block of memory, measured in bytes.
+ size: usize,
+
+ // alignment of the requested block of memory, measured in bytes.
+ // we ensure that this is always a power-of-two, because API's
+ // like `posix_memalign` require it and it is a reasonable
+ // constraint to impose on Layout constructors.
+ //
+ // (However, we do not analogously require `align >= sizeof(void*)`,
+ // even though that is *also* a requirement of `posix_memalign`.)
+ align: ValidAlign,
+}
+
+impl Layout {
+ /// Constructs a `Layout` from a given `size` and `align`,
+ /// or returns `LayoutError` if any of the following conditions
+ /// are not met:
+ ///
+ /// * `align` must not be zero,
+ ///
+ /// * `align` must be a power of two,
+ ///
+ /// * `size`, when rounded up to the nearest multiple of `align`,
+ /// must not overflow (i.e., the rounded value must be less than
+ /// or equal to `usize::MAX`).
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_stable(feature = "const_alloc_layout_size_align", since = "1.50.0")]
+ #[inline]
+ pub const fn from_size_align(size: usize, align: usize) -> Result<Self, LayoutError> {
+ if !align.is_power_of_two() {
+ return Err(LayoutError);
+ }
+
+ // (power-of-two implies align != 0.)
+
+ // Rounded up size is:
+ // size_rounded_up = (size + align - 1) & !(align - 1);
+ //
+ // We know from above that align != 0. If adding (align - 1)
+ // does not overflow, then rounding up will be fine.
+ //
+ // Conversely, &-masking with !(align - 1) will subtract off
+ // only low-order-bits. Thus if overflow occurs with the sum,
+ // the &-mask cannot subtract enough to undo that overflow.
+ //
+ // Above implies that checking for summation overflow is both
+ // necessary and sufficient.
+ if size > usize::MAX - (align - 1) {
+ return Err(LayoutError);
+ }
+
+ // SAFETY: the conditions for `from_size_align_unchecked` have been
+ // checked above.
+ unsafe { Ok(Layout::from_size_align_unchecked(size, align)) }
+ }
+
+ /// Creates a layout, bypassing all checks.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe as it does not verify the preconditions from
+ /// [`Layout::from_size_align`].
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_stable(feature = "const_alloc_layout_unchecked", since = "1.36.0")]
+ #[must_use]
+ #[inline]
+ pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self {
+ // SAFETY: the caller must ensure that `align` is a power of two.
+ Layout { size, align: unsafe { ValidAlign::new_unchecked(align) } }
+ }
+
+ /// The minimum size in bytes for a memory block of this layout.
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_stable(feature = "const_alloc_layout_size_align", since = "1.50.0")]
+ #[must_use]
+ #[inline]
+ pub const fn size(&self) -> usize {
+ self.size
+ }
+
+ /// The minimum byte alignment for a memory block of this layout.
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_stable(feature = "const_alloc_layout_size_align", since = "1.50.0")]
+ #[must_use = "this returns the minimum alignment, \
+ without modifying the layout"]
+ #[inline]
+ pub const fn align(&self) -> usize {
+ self.align.as_nonzero().get()
+ }
+
+ /// Constructs a `Layout` suitable for holding a value of type `T`.
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[rustc_const_stable(feature = "alloc_layout_const_new", since = "1.42.0")]
+ #[must_use]
+ #[inline]
+ pub const fn new<T>() -> Self {
+ let (size, align) = size_align::<T>();
+ // SAFETY: the align is guaranteed by Rust to be a power of two and
+ // the size+align combo is guaranteed to fit in our address space. As a
+ // result use the unchecked constructor here to avoid inserting code
+ // that panics if it isn't optimized well enough.
+ unsafe { Layout::from_size_align_unchecked(size, align) }
+ }
+
+ /// Produces layout describing a record that could be used to
+ /// allocate backing structure for `T` (which could be a trait
+ /// or other unsized type like a slice).
+ #[stable(feature = "alloc_layout", since = "1.28.0")]
+ #[must_use]
+ #[inline]
+ pub fn for_value<T: ?Sized>(t: &T) -> Self {
+ let (size, align) = (mem::size_of_val(t), mem::align_of_val(t));
+ debug_assert!(Layout::from_size_align(size, align).is_ok());
+ // SAFETY: see rationale in `new` for why this is using the unsafe variant
+ unsafe { Layout::from_size_align_unchecked(size, align) }
+ }
+
+ /// Produces layout describing a record that could be used to
+ /// allocate backing structure for `T` (which could be a trait
+ /// or other unsized type like a slice).
+ ///
+ /// # Safety
+ ///
+ /// This function is only safe to call if the following conditions hold:
+ ///
+ /// - If `T` is `Sized`, this function is always safe to call.
+ /// - If the unsized tail of `T` is:
+ /// - a [slice], then the length of the slice tail must be an initialized
+ /// integer, and the size of the *entire value*
+ /// (dynamic tail length + statically sized prefix) must fit in `isize`.
+ /// - a [trait object], then the vtable part of the pointer must point
+ /// to a valid vtable for the type `T` acquired by an unsizing coercion,
+ /// and the size of the *entire value*
+ /// (dynamic tail length + statically sized prefix) must fit in `isize`.
+ /// - an (unstable) [extern type], then this function is always safe to
+ /// call, but may panic or otherwise return the wrong value, as the
+ /// extern type's layout is not known. This is the same behavior as
+ /// [`Layout::for_value`] on a reference to an extern type tail.
+ /// - otherwise, it is conservatively not allowed to call this function.
+ ///
+ /// [trait object]: ../../book/ch17-02-trait-objects.html
+ /// [extern type]: ../../unstable-book/language-features/extern-types.html
+ #[unstable(feature = "layout_for_ptr", issue = "69835")]
+ #[must_use]
+ pub unsafe fn for_value_raw<T: ?Sized>(t: *const T) -> Self {
+ // SAFETY: we pass along the prerequisites of these functions to the caller
+ let (size, align) = unsafe { (mem::size_of_val_raw(t), mem::align_of_val_raw(t)) };
+ debug_assert!(Layout::from_size_align(size, align).is_ok());
+ // SAFETY: see rationale in `new` for why this is using the unsafe variant
+ unsafe { Layout::from_size_align_unchecked(size, align) }
+ }
+
+ /// Creates a `NonNull` that is dangling, but well-aligned for this Layout.
+ ///
+ /// Note that the pointer value may potentially represent a valid pointer,
+ /// which means this must not be used as a "not yet initialized"
+ /// sentinel value. Types that lazily allocate must track initialization by
+ /// some other means.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[rustc_const_unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[must_use]
+ #[inline]
+ pub const fn dangling(&self) -> NonNull<u8> {
+ // SAFETY: align is guaranteed to be non-zero
+ unsafe { NonNull::new_unchecked(crate::ptr::invalid_mut::<u8>(self.align())) }
+ }
+
+ /// Creates a layout describing the record that can hold a value
+ /// of the same layout as `self`, but that also is aligned to
+ /// alignment `align` (measured in bytes).
+ ///
+ /// If `self` already meets the prescribed alignment, then returns
+ /// `self`.
+ ///
+ /// Note that this method does not add any padding to the overall
+ /// size, regardless of whether the returned layout has a different
+ /// alignment. In other words, if `K` has size 16, `K.align_to(32)`
+ /// will *still* have size 16.
+ ///
+ /// Returns an error if the combination of `self.size()` and the given
+ /// `align` violates the conditions listed in [`Layout::from_size_align`].
+ #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[inline]
+ pub fn align_to(&self, align: usize) -> Result<Self, LayoutError> {
+ Layout::from_size_align(self.size(), cmp::max(self.align(), align))
+ }
+
+ /// Returns the amount of padding we must insert after `self`
+ /// to ensure that the following address will satisfy `align`
+ /// (measured in bytes).
+ ///
+ /// e.g., if `self.size()` is 9, then `self.padding_needed_for(4)`
+ /// returns 3, because that is the minimum number of bytes of
+ /// padding required to get a 4-aligned address (assuming that the
+ /// corresponding memory block starts at a 4-aligned address).
+ ///
+ /// The return value of this function has no meaning if `align` is
+ /// not a power-of-two.
+ ///
+ /// Note that the utility of the returned value requires `align`
+ /// to be less than or equal to the alignment of the starting
+ /// address for the whole allocated block of memory. One way to
+ /// satisfy this constraint is to ensure `align <= self.align()`.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[rustc_const_unstable(feature = "const_alloc_layout", issue = "67521")]
+ #[must_use = "this returns the padding needed, \
+ without modifying the `Layout`"]
+ #[inline]
+ pub const fn padding_needed_for(&self, align: usize) -> usize {
+ let len = self.size();
+
+ // Rounded up value is:
+ // len_rounded_up = (len + align - 1) & !(align - 1);
+ // and then we return the padding difference: `len_rounded_up - len`.
+ //
+ // We use modular arithmetic throughout:
+ //
+ // 1. align is guaranteed to be > 0, so align - 1 is always
+ // valid.
+ //
+ // 2. `len + align - 1` can overflow by at most `align - 1`,
+ // so the &-mask with `!(align - 1)` will ensure that in the
+ // case of overflow, `len_rounded_up` will itself be 0.
+ // Thus the returned padding, when added to `len`, yields 0,
+ // which trivially satisfies the alignment `align`.
+ //
+ // (Of course, attempts to allocate blocks of memory whose
+ // size and padding overflow in the above manner should cause
+ // the allocator to yield an error anyway.)
+
+ let len_rounded_up = len.wrapping_add(align).wrapping_sub(1) & !align.wrapping_sub(1);
+ len_rounded_up.wrapping_sub(len)
+ }
+
+ /// Creates a layout by rounding the size of this layout up to a multiple
+ /// of the layout's alignment.
+ ///
+ /// This is equivalent to adding the result of `padding_needed_for`
+ /// to the layout's current size.
+ #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[must_use = "this returns a new `Layout`, \
+ without modifying the original"]
+ #[inline]
+ pub fn pad_to_align(&self) -> Layout {
+ let pad = self.padding_needed_for(self.align());
+ // This cannot overflow. Quoting from the invariant of Layout:
+ // > `size`, when rounded up to the nearest multiple of `align`,
+ // > must not overflow (i.e., the rounded value must be less than
+ // > `usize::MAX`)
+ let new_size = self.size() + pad;
+
+ // SAFETY: self.align is already known to be valid and new_size has been
+ // padded already.
+ unsafe { Layout::from_size_align_unchecked(new_size, self.align()) }
+ }
+
+ /// Creates a layout describing the record for `n` instances of
+ /// `self`, with a suitable amount of padding between each to
+ /// ensure that each instance is given its requested size and
+ /// alignment. On success, returns `(k, offs)` where `k` is the
+ /// layout of the array and `offs` is the distance between the start
+ /// of each element in the array.
+ ///
+ /// On arithmetic overflow, returns `LayoutError`.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[inline]
+ pub fn repeat(&self, n: usize) -> Result<(Self, usize), LayoutError> {
+ // This cannot overflow. Quoting from the invariant of Layout:
+ // > `size`, when rounded up to the nearest multiple of `align`,
+ // > must not overflow (i.e., the rounded value must be less than
+ // > `usize::MAX`)
+ let padded_size = self.size() + self.padding_needed_for(self.align());
+ let alloc_size = padded_size.checked_mul(n).ok_or(LayoutError)?;
+
+ // SAFETY: self.align is already known to be valid and alloc_size has been
+ // padded already.
+ unsafe { Ok((Layout::from_size_align_unchecked(alloc_size, self.align()), padded_size)) }
+ }
+
+ /// Creates a layout describing the record for `self` followed by
+ /// `next`, including any necessary padding to ensure that `next`
+ /// will be properly aligned, but *no trailing padding*.
+ ///
+ /// In order to match C representation layout `repr(C)`, you should
+ /// call `pad_to_align` after extending the layout with all fields.
+ /// (There is no way to match the default Rust representation
+ /// layout `repr(Rust)`, as it is unspecified.)
+ ///
+ /// Note that the alignment of the resulting layout will be the maximum of
+ /// those of `self` and `next`, in order to ensure alignment of both parts.
+ ///
+ /// Returns `Ok((k, offset))`, where `k` is layout of the concatenated
+ /// record and `offset` is the relative location, in bytes, of the
+ /// start of the `next` embedded within the concatenated record
+ /// (assuming that the record itself starts at offset 0).
+ ///
+ /// On arithmetic overflow, returns `LayoutError`.
+ ///
+ /// # Examples
+ ///
+ /// To calculate the layout of a `#[repr(C)]` structure and the offsets of
+ /// the fields from its fields' layouts:
+ ///
+ /// ```rust
+ /// # use std::alloc::{Layout, LayoutError};
+ /// pub fn repr_c(fields: &[Layout]) -> Result<(Layout, Vec<usize>), LayoutError> {
+ /// let mut offsets = Vec::new();
+ /// let mut layout = Layout::from_size_align(0, 1)?;
+ /// for &field in fields {
+ /// let (new_layout, offset) = layout.extend(field)?;
+ /// layout = new_layout;
+ /// offsets.push(offset);
+ /// }
+ /// // Remember to finalize with `pad_to_align`!
+ /// Ok((layout.pad_to_align(), offsets))
+ /// }
+ /// # // test that it works
+ /// # #[repr(C)] struct S { a: u64, b: u32, c: u16, d: u32 }
+ /// # let s = Layout::new::<S>();
+ /// # let u16 = Layout::new::<u16>();
+ /// # let u32 = Layout::new::<u32>();
+ /// # let u64 = Layout::new::<u64>();
+ /// # assert_eq!(repr_c(&[u64, u32, u16, u32]), Ok((s, vec![0, 8, 12, 16])));
+ /// ```
+ #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[inline]
+ pub fn extend(&self, next: Self) -> Result<(Self, usize), LayoutError> {
+ let new_align = cmp::max(self.align(), next.align());
+ let pad = self.padding_needed_for(next.align());
+
+ let offset = self.size().checked_add(pad).ok_or(LayoutError)?;
+ let new_size = offset.checked_add(next.size()).ok_or(LayoutError)?;
+
+ let layout = Layout::from_size_align(new_size, new_align)?;
+ Ok((layout, offset))
+ }
+
+ /// Creates a layout describing the record for `n` instances of
+ /// `self`, with no padding between each instance.
+ ///
+ /// Note that, unlike `repeat`, `repeat_packed` does not guarantee
+ /// that the repeated instances of `self` will be properly
+ /// aligned, even if a given instance of `self` is properly
+ /// aligned. In other words, if the layout returned by
+ /// `repeat_packed` is used to allocate an array, it is not
+ /// guaranteed that all elements in the array will be properly
+ /// aligned.
+ ///
+ /// On arithmetic overflow, returns `LayoutError`.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[inline]
+ pub fn repeat_packed(&self, n: usize) -> Result<Self, LayoutError> {
+ let size = self.size().checked_mul(n).ok_or(LayoutError)?;
+ Layout::from_size_align(size, self.align())
+ }
+
+ /// Creates a layout describing the record for `self` followed by
+ /// `next` with no additional padding between the two. Since no
+ /// padding is inserted, the alignment of `next` is irrelevant,
+ /// and is not incorporated *at all* into the resulting layout.
+ ///
+ /// On arithmetic overflow, returns `LayoutError`.
+ #[unstable(feature = "alloc_layout_extra", issue = "55724")]
+ #[inline]
+ pub fn extend_packed(&self, next: Self) -> Result<Self, LayoutError> {
+ let new_size = self.size().checked_add(next.size()).ok_or(LayoutError)?;
+ Layout::from_size_align(new_size, self.align())
+ }
+
+ /// Creates a layout describing the record for a `[T; n]`.
+ ///
+ /// On arithmetic overflow, returns `LayoutError`.
+ #[stable(feature = "alloc_layout_manipulation", since = "1.44.0")]
+ #[inline]
+ pub fn array<T>(n: usize) -> Result<Self, LayoutError> {
+ let array_size = mem::size_of::<T>().checked_mul(n).ok_or(LayoutError)?;
+
+ // SAFETY:
+ // - Size: `array_size` cannot be too big because `size_of::<T>()` must
+ // be a multiple of `align_of::<T>()`. Therefore, `array_size`
+ // rounded up to the nearest multiple of `align_of::<T>()` is just
+ // `array_size`. And `array_size` cannot be too big because it was
+ // just checked by the `checked_mul()`.
+ // - Alignment: `align_of::<T>()` will always give an acceptable
+ // (non-zero, power of two) alignment.
+ Ok(unsafe { Layout::from_size_align_unchecked(array_size, mem::align_of::<T>()) })
+ }
+}
+
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+#[deprecated(
+ since = "1.52.0",
+ note = "Name does not follow std convention, use LayoutError",
+ suggestion = "LayoutError"
+)]
+pub type LayoutErr = LayoutError;
+
+/// The parameters given to `Layout::from_size_align`
+/// or some other `Layout` constructor
+/// do not satisfy its documented constraints.
+#[stable(feature = "alloc_layout_error", since = "1.50.0")]
+#[non_exhaustive]
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct LayoutError;
+
+// (we need this for downstream impl of trait Error)
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+impl fmt::Display for LayoutError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("invalid parameters to Layout::from_size_align")
+ }
+}
diff --git a/library/core/src/alloc/mod.rs b/library/core/src/alloc/mod.rs
new file mode 100644
index 000000000..6cc6e359e
--- /dev/null
+++ b/library/core/src/alloc/mod.rs
@@ -0,0 +1,410 @@
+//! Memory allocation APIs
+
+#![stable(feature = "alloc_module", since = "1.28.0")]
+
+mod global;
+mod layout;
+
+#[stable(feature = "global_alloc", since = "1.28.0")]
+pub use self::global::GlobalAlloc;
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+pub use self::layout::Layout;
+#[stable(feature = "alloc_layout", since = "1.28.0")]
+#[deprecated(
+ since = "1.52.0",
+ note = "Name does not follow std convention, use LayoutError",
+ suggestion = "LayoutError"
+)]
+#[allow(deprecated, deprecated_in_future)]
+pub use self::layout::LayoutErr;
+
+#[stable(feature = "alloc_layout_error", since = "1.50.0")]
+pub use self::layout::LayoutError;
+
+use crate::fmt;
+use crate::ptr::{self, NonNull};
+
+/// The `AllocError` error indicates an allocation failure
+/// that may be due to resource exhaustion or to
+/// something wrong when combining the given input arguments with this
+/// allocator.
+#[unstable(feature = "allocator_api", issue = "32838")]
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct AllocError;
+
+// (we need this for downstream impl of trait Error)
+#[unstable(feature = "allocator_api", issue = "32838")]
+impl fmt::Display for AllocError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("memory allocation failed")
+ }
+}
+
+/// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of
+/// data described via [`Layout`][].
+///
+/// `Allocator` is designed to be implemented on ZSTs, references, or smart pointers because having
+/// an allocator like `MyAlloc([u8; N])` cannot be moved, without updating the pointers to the
+/// allocated memory.
+///
+/// Unlike [`GlobalAlloc`][], zero-sized allocations are allowed in `Allocator`. If an underlying
+/// allocator does not support this (like jemalloc) or return a null pointer (such as
+/// `libc::malloc`), this must be caught by the implementation.
+///
+/// ### Currently allocated memory
+///
+/// Some of the methods require that a memory block be *currently allocated* via an allocator. This
+/// means that:
+///
+/// * the starting address for that memory block was previously returned by [`allocate`], [`grow`], or
+/// [`shrink`], and
+///
+/// * the memory block has not been subsequently deallocated, where blocks are either deallocated
+/// directly by being passed to [`deallocate`] or were changed by being passed to [`grow`] or
+/// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer
+/// remains valid.
+///
+/// [`allocate`]: Allocator::allocate
+/// [`grow`]: Allocator::grow
+/// [`shrink`]: Allocator::shrink
+/// [`deallocate`]: Allocator::deallocate
+///
+/// ### Memory fitting
+///
+/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to
+/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the
+/// following conditions must hold:
+///
+/// * The block must be allocated with the same alignment as [`layout.align()`], and
+///
+/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where:
+/// - `min` is the size of the layout most recently used to allocate the block, and
+/// - `max` is the latest actual size returned from [`allocate`], [`grow`], or [`shrink`].
+///
+/// [`layout.align()`]: Layout::align
+/// [`layout.size()`]: Layout::size
+///
+/// # Safety
+///
+/// * Memory blocks returned from an allocator must point to valid memory and retain their validity
+/// until the instance and all of its clones are dropped,
+///
+/// * cloning or moving the allocator must not invalidate memory blocks returned from this
+/// allocator. A cloned allocator must behave like the same allocator, and
+///
+/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other
+/// method of the allocator.
+///
+/// [*currently allocated*]: #currently-allocated-memory
+#[unstable(feature = "allocator_api", issue = "32838")]
+pub unsafe trait Allocator {
+ /// Attempts to allocate a block of memory.
+ ///
+ /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`.
+ ///
+ /// The returned block may have a larger size than specified by `layout.size()`, and may or may
+ /// not have its contents initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
+ /// allocator's size or alignment constraints.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError>;
+
+ /// Behaves like `allocate`, but also ensures that the returned memory is zero-initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
+ /// allocator's size or alignment constraints.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ let ptr = self.allocate(layout)?;
+ // SAFETY: `alloc` returns a valid memory block
+ unsafe { ptr.as_non_null_ptr().as_ptr().write_bytes(0, ptr.len()) }
+ Ok(ptr)
+ }
+
+ /// Deallocates the memory referenced by `ptr`.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and
+ /// * `layout` must [*fit*] that block of memory.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout);
+
+ /// Attempts to extend the memory block.
+ ///
+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
+ /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
+ /// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout.
+ ///
+ /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
+ /// transferred to this allocator. The memory may or may not have been freed, and should be
+ /// considered unusable.
+ ///
+ /// If this method returns `Err`, then ownership of the memory block has not been transferred to
+ /// this allocator, and the contents of the memory block are unaltered.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
+ /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
+ ///
+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the new layout does not meet the allocator's size and alignment
+ /// constraints of the allocator, or if growing otherwise fails.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.allocate(new_layout)?;
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to
+ // `old_layout.size()`, both the old and new memory allocation are valid for reads and
+ // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
+ // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
+ // safe. The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_layout.size());
+ self.deallocate(ptr, old_layout);
+ }
+
+ Ok(new_ptr)
+ }
+
+ /// Behaves like `grow`, but also ensures that the new contents are set to zero before being
+ /// returned.
+ ///
+ /// The memory block will contain the following contents after a successful call to
+ /// `grow_zeroed`:
+ /// * Bytes `0..old_layout.size()` are preserved from the original allocation.
+ /// * Bytes `old_layout.size()..old_size` will either be preserved or zeroed, depending on
+ /// the allocator implementation. `old_size` refers to the size of the memory block prior
+ /// to the `grow_zeroed` call, which may be larger than the size that was originally
+ /// requested when it was allocated.
+ /// * Bytes `old_size..new_size` are zeroed. `new_size` refers to the size of the memory
+ /// block returned by the `grow_zeroed` call.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
+ /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
+ ///
+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the new layout does not meet the allocator's size and alignment
+ /// constraints of the allocator, or if growing otherwise fails.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.allocate_zeroed(new_layout)?;
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to
+ // `old_layout.size()`, both the old and new memory allocation are valid for reads and
+ // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
+ // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
+ // safe. The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_layout.size());
+ self.deallocate(ptr, old_layout);
+ }
+
+ Ok(new_ptr)
+ }
+
+ /// Attempts to shrink the memory block.
+ ///
+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
+ /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
+ /// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout.
+ ///
+ /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
+ /// transferred to this allocator. The memory may or may not have been freed, and should be
+ /// considered unusable.
+ ///
+ /// If this method returns `Err`, then ownership of the memory block has not been transferred to
+ /// this allocator, and the contents of the memory block are unaltered.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
+ /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
+ ///
+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the new layout does not meet the allocator's size and alignment
+ /// constraints of the allocator, or if shrinking otherwise fails.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() <= old_layout.size(),
+ "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.allocate(new_layout)?;
+
+ // SAFETY: because `new_layout.size()` must be lower than or equal to
+ // `old_layout.size()`, both the old and new memory allocation are valid for reads and
+ // writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet
+ // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
+ // safe. The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_layout.size());
+ self.deallocate(ptr, old_layout);
+ }
+
+ Ok(new_ptr)
+ }
+
+ /// Creates a "by reference" adapter for this instance of `Allocator`.
+ ///
+ /// The returned adapter also implements `Allocator` and will simply borrow this.
+ #[inline(always)]
+ fn by_ref(&self) -> &Self
+ where
+ Self: Sized,
+ {
+ self
+ }
+}
+
+#[unstable(feature = "allocator_api", issue = "32838")]
+unsafe impl<A> Allocator for &A
+where
+ A: Allocator + ?Sized,
+{
+ #[inline]
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ (**self).allocate(layout)
+ }
+
+ #[inline]
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ (**self).allocate_zeroed(layout)
+ }
+
+ #[inline]
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).deallocate(ptr, layout) }
+ }
+
+ #[inline]
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).grow(ptr, old_layout, new_layout) }
+ }
+
+ #[inline]
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).grow_zeroed(ptr, old_layout, new_layout) }
+ }
+
+ #[inline]
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).shrink(ptr, old_layout, new_layout) }
+ }
+}