summaryrefslogtreecommitdiffstats
path: root/vendor/allocator-api2
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-30 03:57:31 +0000
commitdc0db358abe19481e475e10c32149b53370f1a1c (patch)
treeab8ce99c4b255ce46f99ef402c27916055b899ee /vendor/allocator-api2
parentReleasing progress-linux version 1.71.1+dfsg1-2~progress7.99u1. (diff)
downloadrustc-dc0db358abe19481e475e10c32149b53370f1a1c.tar.xz
rustc-dc0db358abe19481e475e10c32149b53370f1a1c.zip
Merging upstream version 1.72.1+dfsg1.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'vendor/allocator-api2')
-rw-r--r--vendor/allocator-api2/.cargo-checksum.json1
-rw-r--r--vendor/allocator-api2/CHANGELOG.md7
-rw-r--r--vendor/allocator-api2/COPYING6
-rw-r--r--vendor/allocator-api2/Cargo.toml32
-rw-r--r--vendor/allocator-api2/README.md53
-rw-r--r--vendor/allocator-api2/license/APACHE13
-rw-r--r--vendor/allocator-api2/license/MIT25
-rw-r--r--vendor/allocator-api2/src/lib.rs19
-rw-r--r--vendor/allocator-api2/src/nightly.rs5
-rw-r--r--vendor/allocator-api2/src/stable/alloc/global.rs188
-rw-r--r--vendor/allocator-api2/src/stable/alloc/mod.rs416
-rw-r--r--vendor/allocator-api2/src/stable/alloc/system.rs172
-rw-r--r--vendor/allocator-api2/src/stable/boxed.rs2154
-rw-r--r--vendor/allocator-api2/src/stable/macros.rs83
-rw-r--r--vendor/allocator-api2/src/stable/mod.rs62
-rw-r--r--vendor/allocator-api2/src/stable/raw_vec.rs642
-rw-r--r--vendor/allocator-api2/src/stable/slice.rs171
-rw-r--r--vendor/allocator-api2/src/stable/vec/drain.rs242
-rw-r--r--vendor/allocator-api2/src/stable/vec/into_iter.rs198
-rw-r--r--vendor/allocator-api2/src/stable/vec/mod.rs3253
-rw-r--r--vendor/allocator-api2/src/stable/vec/partial_eq.rs43
-rw-r--r--vendor/allocator-api2/src/stable/vec/set_len_on_drop.rs31
-rw-r--r--vendor/allocator-api2/src/stable/vec/splice.rs135
23 files changed, 7951 insertions, 0 deletions
diff --git a/vendor/allocator-api2/.cargo-checksum.json b/vendor/allocator-api2/.cargo-checksum.json
new file mode 100644
index 000000000..8e595a183
--- /dev/null
+++ b/vendor/allocator-api2/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"CHANGELOG.md":"b4d01c4b8a790e435dc0ab67a1ef8b6d8e39f87bec233540e247ef313737d855","COPYING":"aacc8f585552509941b8531442e43a8e3e1aabc7d92f1ff0736250b80f65361c","Cargo.toml":"c4665044d4cd9194eee4e8c1f2a6d71b98ea88ea228325cd54b72dcb1ab6b70f","README.md":"85cecaf786f948c26510911416d7e0ab4c4f10367d963cad011589648084a986","license/APACHE":"65071d88cda37097d5579c272cf0db48b23acc4e2fe3ad16a5985cd714753cbc","license/MIT":"74d0d1e38a980edecb7c71d33f2056456e2cb6c37c16bd05a882d714b5e56661","src/lib.rs":"fc1294b60cbf4d9ca3f61a43b86aac9533cd6b5b87729a9bd32f7992186d1a49","src/nightly.rs":"fc84f98e2014bef66bd54671d8ec98db973fb46b80fb271d6783eb00d1f95228","src/stable/alloc/global.rs":"411208558701915ff0f7cf7ef6c64b8a3bc932944416c26fd832d03d10a76502","src/stable/alloc/mod.rs":"63db909472169a70ad5332f33f67b88e9ea361c13725c65540d7003c83d8d226","src/stable/alloc/system.rs":"7c9145f594869c3cb934e97d3eda1b0b8ed6bd8ba89b1aea7435fc6680465b6b","src/stable/boxed.rs":"a0da23504d1715ccf1e2af25b7e503d2bb1bed6cdaa7d825e63b8e2c78c7dc5a","src/stable/macros.rs":"ce3915ce7ee003d8790c695d70a4e77b1e63a908a5ae0825169270d0f4ab5941","src/stable/mod.rs":"fc44985d0d999e2bd52693a49bb1796451c0a9a2e6d4f7565629392a38ca54e1","src/stable/raw_vec.rs":"bc1cb45b661ae5786912d625351e6e0d33aac8e4edaf36873874184a136cd89c","src/stable/slice.rs":"14d6eb35e3557b5f78feb48fd4bea343f037e8f1f2d2707089db4dbed438b558","src/stable/vec/drain.rs":"f8209cbd76a57823f6583a84fee285727b6c00189ec299acc9f97a0829f0742f","src/stable/vec/into_iter.rs":"9b0e58c8cd6c34b3c706696cb9508c977cbfaa0eeb32d13f799a82520b5cd490","src/stable/vec/mod.rs":"19a6772a4e3053c55c83dd774d3b0154852080bb58734dd49052a4175f0b4df1","src/stable/vec/partial_eq.rs":"cb88615747b4413f26dcab206e026bbd50150bf7d97d8df174384e86151d875e","src/stable/vec/set_len_on_drop.rs":"36f2e8fdc9b0a838eb443d74bec0291d389e52bfe4f617e391d977f15e6893b5","src/stable/vec/splice.rs":"7ce9fa74764c36ab9043f7339548e96b0b68f7d1a16769c9cb066b9a538dcb14"},"package":"56fc6cf8dc8c4158eed8649f9b8b0ea1518eb62b544fe9490d66fa0b349eafe9"} \ No newline at end of file
diff --git a/vendor/allocator-api2/CHANGELOG.md b/vendor/allocator-api2/CHANGELOG.md
new file mode 100644
index 000000000..d94e3d6c6
--- /dev/null
+++ b/vendor/allocator-api2/CHANGELOG.md
@@ -0,0 +1,7 @@
+# Changelog
+All notable changes to this project will be documented in this file.
+
+The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
+and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
+
+## [Unreleased]
diff --git a/vendor/allocator-api2/COPYING b/vendor/allocator-api2/COPYING
new file mode 100644
index 000000000..4d3f91283
--- /dev/null
+++ b/vendor/allocator-api2/COPYING
@@ -0,0 +1,6 @@
+Copyright 2023 The allocator-api2 Project Developers
+
+Licensed under the Apache License, Version 2.0, <license/LICENSE-APACHE or
+http://apache.org/licenses/LICENSE-2.0> or the MIT license <license/LICENSE-MIT or
+http://opensource.org/licenses/MIT>, at your option. This file may not be
+copied, modified, or distributed except according to those terms.
diff --git a/vendor/allocator-api2/Cargo.toml b/vendor/allocator-api2/Cargo.toml
new file mode 100644
index 000000000..35c610cb6
--- /dev/null
+++ b/vendor/allocator-api2/Cargo.toml
@@ -0,0 +1,32 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "allocator-api2"
+version = "0.2.15"
+authors = ["Zakarum <zaq.dev@icloud.com>"]
+description = "Mirror of Rust's allocator API"
+homepage = "https://github.com/zakarumych/allocator-api2"
+documentation = "https://docs.rs/allocator-api2"
+readme = "README.md"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/zakarumych/allocator-api2"
+
+[dependencies.serde]
+version = "1.0"
+optional = true
+
+[features]
+alloc = []
+default = ["std"]
+nightly = []
+std = ["alloc"]
diff --git a/vendor/allocator-api2/README.md b/vendor/allocator-api2/README.md
new file mode 100644
index 000000000..d06b8d32b
--- /dev/null
+++ b/vendor/allocator-api2/README.md
@@ -0,0 +1,53 @@
+# allocator-api2
+
+[![crates](https://img.shields.io/crates/v/allocator-api2.svg?style=for-the-badge&label=allocator-api2)](https://crates.io/crates/allocator-api2)
+[![docs](https://img.shields.io/badge/docs.rs-allocator--api2-66c2a5?style=for-the-badge&labelColor=555555&logoColor=white)](https://docs.rs/allocator-api2)
+[![actions](https://img.shields.io/github/actions/workflow/status/zakarumych/allocator-api2/badge.yml?branch=main&style=for-the-badge)](https://github.com/zakarumych/allocator-api2/actions/workflows/badge.yml)
+[![MIT/Apache](https://img.shields.io/badge/license-MIT%2FApache-blue.svg?style=for-the-badge)](COPYING)
+![loc](https://img.shields.io/tokei/lines/github/zakarumych/allocator-api2?style=for-the-badge)
+
+This crate mirrors types and traits from Rust's unstable [`allocator_api`]
+The intention of this crate is to serve as substitution for actual thing
+for libs when build on stable and beta channels.
+The target users are library authors who implement allocators or collection types
+that use allocators, or anyone else who wants using [`allocator_api`]
+
+The crate should be frequently updated with minor version bump.
+When [`allocator_api`] is stable this crate will get version `1.0` and simply
+re-export from `core`, `alloc` and `std`.
+
+The code is mostly verbatim copy from rust repository.
+Mostly attributes are removed.
+
+## Usage
+
+This paragraph describes how to use this crate correctly to ensure
+compatibility and interoperability on both stable and nightly channels.
+
+If you are writing a library that interacts with allocators API, you can
+add this crate as a dependency and use the types and traits from this
+crate instead of the ones in `core` or `alloc`.
+This will allow your library to compile on stable and beta channels.
+
+Your library *MAY* provide a feature that will enable "allocator-api2/nightly".
+When this feature is enabled, your library *MUST* enable
+unstable `#![feature(allocator_api)]` or it may not compile.
+If feature is not provided, your library may not be compatible with the
+rest of the users and cause compilation errors on nightly channel
+when some other crate enables "allocator-api2/nightly" feature.
+
+## License
+
+Licensed under either of
+
+* Apache License, Version 2.0, ([license/APACHE](license/APACHE) or http://www.apache.org/licenses/LICENSE-2.0)
+* MIT license ([license/MIT](license/MIT) or http://opensource.org/licenses/MIT)
+
+at your option.
+
+## Contributions
+
+Unless you explicitly state otherwise, any contribution intentionally submitted for inclusion in the work by you, as defined in the Apache-2.0 license, shall be dual licensed as above, without any additional terms or conditions.
+
+
+[`allocator_api`]: https://doc.rust-lang.org/unstable-book/library-features/allocator-api.html \ No newline at end of file
diff --git a/vendor/allocator-api2/license/APACHE b/vendor/allocator-api2/license/APACHE
new file mode 100644
index 000000000..83b33707f
--- /dev/null
+++ b/vendor/allocator-api2/license/APACHE
@@ -0,0 +1,13 @@
+Copyright 2023 The allocator-api2 project developers
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License. \ No newline at end of file
diff --git a/vendor/allocator-api2/license/MIT b/vendor/allocator-api2/license/MIT
new file mode 100644
index 000000000..e8658e9b0
--- /dev/null
+++ b/vendor/allocator-api2/license/MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2023 The allocator-api2 project developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE. \ No newline at end of file
diff --git a/vendor/allocator-api2/src/lib.rs b/vendor/allocator-api2/src/lib.rs
new file mode 100644
index 000000000..8cdf14af1
--- /dev/null
+++ b/vendor/allocator-api2/src/lib.rs
@@ -0,0 +1,19 @@
+//!
+//! allocator-api2 crate.
+//!
+#![cfg_attr(not(feature = "std"), no_std)]
+
+#[cfg(feature = "alloc")]
+extern crate alloc as alloc_crate;
+
+#[cfg(not(feature = "nightly"))]
+mod stable;
+
+#[cfg(feature = "nightly")]
+mod nightly;
+
+#[cfg(not(feature = "nightly"))]
+pub use self::stable::*;
+
+#[cfg(feature = "nightly")]
+pub use self::nightly::*;
diff --git a/vendor/allocator-api2/src/nightly.rs b/vendor/allocator-api2/src/nightly.rs
new file mode 100644
index 000000000..7c4698dc9
--- /dev/null
+++ b/vendor/allocator-api2/src/nightly.rs
@@ -0,0 +1,5 @@
+#[cfg(not(feature = "alloc"))]
+pub use core::alloc;
+
+#[cfg(feature = "alloc")]
+pub use alloc_crate::{alloc, boxed, vec};
diff --git a/vendor/allocator-api2/src/stable/alloc/global.rs b/vendor/allocator-api2/src/stable/alloc/global.rs
new file mode 100644
index 000000000..e2dc27fa0
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/alloc/global.rs
@@ -0,0 +1,188 @@
+use core::ptr::NonNull;
+
+#[doc(inline)]
+pub use alloc_crate::alloc::{alloc, alloc_zeroed, dealloc, handle_alloc_error, realloc};
+
+use crate::stable::{assume, invalid_mut};
+
+use super::{AllocError, Allocator, Layout};
+
+/// The global memory allocator.
+///
+/// This type implements the [`Allocator`] trait by forwarding calls
+/// to the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// Note: while this type is unstable, the functionality it provides can be
+/// accessed through the [free functions in `alloc`](crate#functions).
+#[derive(Copy, Clone, Default, Debug)]
+pub struct Global;
+
+impl Global {
+ #[inline(always)]
+ fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
+ match layout.size() {
+ 0 => Ok(unsafe {
+ NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
+ invalid_mut(layout.align()),
+ 0,
+ ))
+ }),
+ // SAFETY: `layout` is non-zero in size,
+ size => unsafe {
+ let raw_ptr = if zeroed {
+ alloc_zeroed(layout)
+ } else {
+ alloc(layout)
+ };
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
+ ptr.as_ptr(),
+ size,
+ )))
+ },
+ }
+ }
+
+ // SAFETY: Same as `Allocator::grow`
+ #[inline(always)]
+ unsafe fn grow_impl(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ zeroed: bool,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ match old_layout.size() {
+ 0 => self.alloc_impl(new_layout, zeroed),
+
+ // SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
+ // as required by safety conditions. Other conditions must be upheld by the caller
+ old_size if old_layout.align() == new_layout.align() => unsafe {
+ let new_size = new_layout.size();
+
+ // `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
+ assume(new_size >= old_layout.size());
+
+ let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ if zeroed {
+ raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
+ }
+ Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
+ ptr.as_ptr(),
+ new_size,
+ )))
+ },
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
+ // both the old and new memory allocation are valid for reads and writes for `old_size`
+ // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
+ // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
+ // for `dealloc` must be upheld by the caller.
+ old_size => unsafe {
+ let new_ptr = self.alloc_impl(new_layout, zeroed)?;
+ core::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_size);
+ self.deallocate(ptr, old_layout);
+ Ok(new_ptr)
+ },
+ }
+ }
+}
+
+unsafe impl Allocator for Global {
+ #[inline(always)]
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ self.alloc_impl(layout, false)
+ }
+
+ #[inline(always)]
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ self.alloc_impl(layout, true)
+ }
+
+ #[inline(always)]
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ if layout.size() != 0 {
+ // SAFETY: `layout` is non-zero in size,
+ // other conditions must be upheld by the caller
+ unsafe { dealloc(ptr.as_ptr(), layout) }
+ }
+ }
+
+ #[inline(always)]
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: all conditions must be upheld by the caller
+ unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
+ }
+
+ #[inline(always)]
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: all conditions must be upheld by the caller
+ unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
+ }
+
+ #[inline(always)]
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() <= old_layout.size(),
+ "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
+ );
+
+ match new_layout.size() {
+ // SAFETY: conditions must be upheld by the caller
+ 0 => unsafe {
+ self.deallocate(ptr, old_layout);
+ Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
+ invalid_mut(new_layout.align()),
+ 0,
+ )))
+ },
+
+ // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
+ new_size if old_layout.align() == new_layout.align() => unsafe {
+ // `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
+ assume(new_size <= old_layout.size());
+
+ let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
+ ptr.as_ptr(),
+ new_size,
+ )))
+ },
+
+ // SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
+ // both the old and new memory allocation are valid for reads and writes for `new_size`
+ // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
+ // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
+ // for `dealloc` must be upheld by the caller.
+ new_size => unsafe {
+ let new_ptr = self.allocate(new_layout)?;
+ core::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), new_size);
+ self.deallocate(ptr, old_layout);
+ Ok(new_ptr)
+ },
+ }
+ }
+}
diff --git a/vendor/allocator-api2/src/stable/alloc/mod.rs b/vendor/allocator-api2/src/stable/alloc/mod.rs
new file mode 100644
index 000000000..6a50b4344
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/alloc/mod.rs
@@ -0,0 +1,416 @@
+//! Memory allocation APIs
+
+use core::{
+ fmt,
+ ptr::{self, NonNull},
+};
+
+#[cfg(feature = "alloc")]
+mod global;
+
+#[cfg(feature = "std")]
+mod system;
+
+pub use core::alloc::{GlobalAlloc, Layout, LayoutError};
+
+#[cfg(feature = "alloc")]
+pub use self::global::Global;
+
+#[cfg(feature = "std")]
+pub use self::system::System;
+
+#[cfg(feature = "alloc")]
+pub use alloc_crate::alloc::{alloc, alloc_zeroed, dealloc, realloc};
+
+#[cfg(all(feature = "alloc", not(no_global_oom_handling)))]
+pub use alloc_crate::alloc::handle_alloc_error;
+
+/// The `AllocError` error indicates an allocation failure
+/// that may be due to resource exhaustion or to
+/// something wrong when combining the given input arguments with this
+/// allocator.
+#[derive(Copy, Clone, PartialEq, Eq, Debug)]
+pub struct AllocError;
+
+#[cfg(feature = "std")]
+impl std::error::Error for AllocError {}
+
+// (we need this for downstream impl of trait Error)
+impl fmt::Display for AllocError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("memory allocation failed")
+ }
+}
+
+/// An implementation of `Allocator` can allocate, grow, shrink, and deallocate arbitrary blocks of
+/// data described via [`Layout`][].
+///
+/// `Allocator` is designed to be implemented on ZSTs, references, or smart pointers because having
+/// an allocator like `MyAlloc([u8; N])` cannot be moved, without updating the pointers to the
+/// allocated memory.
+///
+/// Unlike [`GlobalAlloc`][], zero-sized allocations are allowed in `Allocator`. If an underlying
+/// allocator does not support this (like jemalloc) or return a null pointer (such as
+/// `libc::malloc`), this must be caught by the implementation.
+///
+/// ### Currently allocated memory
+///
+/// Some of the methods require that a memory block be *currently allocated* via an allocator. This
+/// means that:
+///
+/// * the starting address for that memory block was previously returned by [`allocate`], [`grow`], or
+/// [`shrink`], and
+///
+/// * the memory block has not been subsequently deallocated, where blocks are either deallocated
+/// directly by being passed to [`deallocate`] or were changed by being passed to [`grow`] or
+/// [`shrink`] that returns `Ok`. If `grow` or `shrink` have returned `Err`, the passed pointer
+/// remains valid.
+///
+/// [`allocate`]: Allocator::allocate
+/// [`grow`]: Allocator::grow
+/// [`shrink`]: Allocator::shrink
+/// [`deallocate`]: Allocator::deallocate
+///
+/// ### Memory fitting
+///
+/// Some of the methods require that a layout *fit* a memory block. What it means for a layout to
+/// "fit" a memory block means (or equivalently, for a memory block to "fit" a layout) is that the
+/// following conditions must hold:
+///
+/// * The block must be allocated with the same alignment as [`layout.align()`], and
+///
+/// * The provided [`layout.size()`] must fall in the range `min ..= max`, where:
+/// - `min` is the size of the layout most recently used to allocate the block, and
+/// - `max` is the latest actual size returned from [`allocate`], [`grow`], or [`shrink`].
+///
+/// [`layout.align()`]: Layout::align
+/// [`layout.size()`]: Layout::size
+///
+/// # Safety
+///
+/// * Memory blocks returned from an allocator must point to valid memory and retain their validity
+/// until the instance and all of its clones are dropped,
+///
+/// * cloning or moving the allocator must not invalidate memory blocks returned from this
+/// allocator. A cloned allocator must behave like the same allocator, and
+///
+/// * any pointer to a memory block which is [*currently allocated*] may be passed to any other
+/// method of the allocator.
+///
+/// [*currently allocated*]: #currently-allocated-memory
+pub unsafe trait Allocator {
+ /// Attempts to allocate a block of memory.
+ ///
+ /// On success, returns a [`NonNull<[u8]>`][NonNull] meeting the size and alignment guarantees of `layout`.
+ ///
+ /// The returned block may have a larger size than specified by `layout.size()`, and may or may
+ /// not have its contents initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
+ /// allocator's size or alignment constraints.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError>;
+
+ /// Behaves like `allocate`, but also ensures that the returned memory is zero-initialized.
+ ///
+ /// # Errors
+ ///
+ /// Returning `Err` indicates that either memory is exhausted or `layout` does not meet
+ /// allocator's size or alignment constraints.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[inline(always)]
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ let ptr = self.allocate(layout)?;
+ // SAFETY: `alloc` returns a valid memory block
+ unsafe { ptr.cast::<u8>().as_ptr().write_bytes(0, ptr.len()) }
+ Ok(ptr)
+ }
+
+ /// Deallocates the memory referenced by `ptr`.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator, and
+ /// * `layout` must [*fit*] that block of memory.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout);
+
+ /// Attempts to extend the memory block.
+ ///
+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
+ /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
+ /// this, the allocator may extend the allocation referenced by `ptr` to fit the new layout.
+ ///
+ /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
+ /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the
+ /// allocation was grown in-place. The newly returned pointer is the only valid pointer
+ /// for accessing this memory now.
+ ///
+ /// If this method returns `Err`, then ownership of the memory block has not been transferred to
+ /// this allocator, and the contents of the memory block are unaltered.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
+ /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
+ ///
+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the new layout does not meet the allocator's size and alignment
+ /// constraints of the allocator, or if growing otherwise fails.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[inline(always)]
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.allocate(new_layout)?;
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to
+ // `old_layout.size()`, both the old and new memory allocation are valid for reads and
+ // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
+ // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
+ // safe. The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_layout.size());
+ self.deallocate(ptr, old_layout);
+ }
+
+ Ok(new_ptr)
+ }
+
+ /// Behaves like `grow`, but also ensures that the new contents are set to zero before being
+ /// returned.
+ ///
+ /// The memory block will contain the following contents after a successful call to
+ /// `grow_zeroed`:
+ /// * Bytes `0..old_layout.size()` are preserved from the original allocation.
+ /// * Bytes `old_layout.size()..old_size` will either be preserved or zeroed, depending on
+ /// the allocator implementation. `old_size` refers to the size of the memory block prior
+ /// to the `grow_zeroed` call, which may be larger than the size that was originally
+ /// requested when it was allocated.
+ /// * Bytes `old_size..new_size` are zeroed. `new_size` refers to the size of the memory
+ /// block returned by the `grow_zeroed` call.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
+ /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
+ /// * `new_layout.size()` must be greater than or equal to `old_layout.size()`.
+ ///
+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the new layout does not meet the allocator's size and alignment
+ /// constraints of the allocator, or if growing otherwise fails.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[inline(always)]
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.allocate_zeroed(new_layout)?;
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to
+ // `old_layout.size()`, both the old and new memory allocation are valid for reads and
+ // writes for `old_layout.size()` bytes. Also, because the old allocation wasn't yet
+ // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
+ // safe. The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_layout.size());
+ self.deallocate(ptr, old_layout);
+ }
+
+ Ok(new_ptr)
+ }
+
+ /// Attempts to shrink the memory block.
+ ///
+ /// Returns a new [`NonNull<[u8]>`][NonNull] containing a pointer and the actual size of the allocated
+ /// memory. The pointer is suitable for holding data described by `new_layout`. To accomplish
+ /// this, the allocator may shrink the allocation referenced by `ptr` to fit the new layout.
+ ///
+ /// If this returns `Ok`, then ownership of the memory block referenced by `ptr` has been
+ /// transferred to this allocator. Any access to the old `ptr` is Undefined Behavior, even if the
+ /// allocation was shrunk in-place. The newly returned pointer is the only valid pointer
+ /// for accessing this memory now.
+ ///
+ /// If this method returns `Err`, then ownership of the memory block has not been transferred to
+ /// this allocator, and the contents of the memory block are unaltered.
+ ///
+ /// # Safety
+ ///
+ /// * `ptr` must denote a block of memory [*currently allocated*] via this allocator.
+ /// * `old_layout` must [*fit*] that block of memory (The `new_layout` argument need not fit it.).
+ /// * `new_layout.size()` must be smaller than or equal to `old_layout.size()`.
+ ///
+ /// Note that `new_layout.align()` need not be the same as `old_layout.align()`.
+ ///
+ /// [*currently allocated*]: #currently-allocated-memory
+ /// [*fit*]: #memory-fitting
+ ///
+ /// # Errors
+ ///
+ /// Returns `Err` if the new layout does not meet the allocator's size and alignment
+ /// constraints of the allocator, or if shrinking otherwise fails.
+ ///
+ /// Implementations are encouraged to return `Err` on memory exhaustion rather than panicking or
+ /// aborting, but this is not a strict requirement. (Specifically: it is *legal* to implement
+ /// this trait atop an underlying native allocation library that aborts on memory exhaustion.)
+ ///
+ /// Clients wishing to abort computation in response to an allocation error are encouraged to
+ /// call the [`handle_alloc_error`] function, rather than directly invoking `panic!` or similar.
+ ///
+ /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
+ #[inline(always)]
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() <= old_layout.size(),
+ "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.allocate(new_layout)?;
+
+ // SAFETY: because `new_layout.size()` must be lower than or equal to
+ // `old_layout.size()`, both the old and new memory allocation are valid for reads and
+ // writes for `new_layout.size()` bytes. Also, because the old allocation wasn't yet
+ // deallocated, it cannot overlap `new_ptr`. Thus, the call to `copy_nonoverlapping` is
+ // safe. The safety contract for `dealloc` must be upheld by the caller.
+ unsafe {
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), new_layout.size());
+ self.deallocate(ptr, old_layout);
+ }
+
+ Ok(new_ptr)
+ }
+
+ /// Creates a "by reference" adapter for this instance of `Allocator`.
+ ///
+ /// The returned adapter also implements `Allocator` and will simply borrow this.
+ #[inline(always)]
+ fn by_ref(&self) -> &Self
+ where
+ Self: Sized,
+ {
+ self
+ }
+}
+
+unsafe impl<A> Allocator for &A
+where
+ A: Allocator + ?Sized,
+{
+ #[inline(always)]
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ (**self).allocate(layout)
+ }
+
+ #[inline(always)]
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ (**self).allocate_zeroed(layout)
+ }
+
+ #[inline(always)]
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).deallocate(ptr, layout) }
+ }
+
+ #[inline(always)]
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).grow(ptr, old_layout, new_layout) }
+ }
+
+ #[inline(always)]
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).grow_zeroed(ptr, old_layout, new_layout) }
+ }
+
+ #[inline(always)]
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: the safety contract must be upheld by the caller
+ unsafe { (**self).shrink(ptr, old_layout, new_layout) }
+ }
+}
diff --git a/vendor/allocator-api2/src/stable/alloc/system.rs b/vendor/allocator-api2/src/stable/alloc/system.rs
new file mode 100644
index 000000000..e733d0f7f
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/alloc/system.rs
@@ -0,0 +1,172 @@
+use core::ptr::NonNull;
+pub use std::alloc::System;
+
+use crate::stable::{assume, invalid_mut};
+
+use super::{AllocError, Allocator, GlobalAlloc as _, Layout};
+
+unsafe impl Allocator for System {
+ #[inline(always)]
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ alloc_impl(layout, false)
+ }
+
+ #[inline(always)]
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ alloc_impl(layout, true)
+ }
+
+ #[inline(always)]
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ if layout.size() != 0 {
+ // SAFETY: `layout` is non-zero in size,
+ // other conditions must be upheld by the caller
+ unsafe { System.dealloc(ptr.as_ptr(), layout) }
+ }
+ }
+
+ #[inline(always)]
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: all conditions must be upheld by the caller
+ unsafe { grow_impl(ptr, old_layout, new_layout, false) }
+ }
+
+ #[inline(always)]
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: all conditions must be upheld by the caller
+ unsafe { grow_impl(ptr, old_layout, new_layout, true) }
+ }
+
+ #[inline(always)]
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() <= old_layout.size(),
+ "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
+ );
+
+ match new_layout.size() {
+ // SAFETY: conditions must be upheld by the caller
+ 0 => unsafe {
+ self.deallocate(ptr, old_layout);
+ Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
+ invalid_mut(new_layout.align()),
+ 0,
+ )))
+ },
+
+ // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
+ new_size if old_layout.align() == new_layout.align() => unsafe {
+ // `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
+ assume(new_size <= old_layout.size());
+
+ let raw_ptr = System.realloc(ptr.as_ptr(), old_layout, new_size);
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
+ ptr.as_ptr(),
+ new_size,
+ )))
+ },
+
+ // SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
+ // both the old and new memory allocation are valid for reads and writes for `new_size`
+ // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
+ // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
+ // for `dealloc` must be upheld by the caller.
+ new_size => unsafe {
+ let new_ptr = self.allocate(new_layout)?;
+ core::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), new_size);
+ self.deallocate(ptr, old_layout);
+ Ok(new_ptr)
+ },
+ }
+ }
+}
+
+#[inline(always)]
+fn alloc_impl(layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
+ match layout.size() {
+ 0 => Ok(unsafe {
+ NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
+ invalid_mut(layout.align()),
+ 0,
+ ))
+ }),
+ // SAFETY: `layout` is non-zero in size,
+ size => unsafe {
+ let raw_ptr = if zeroed {
+ System.alloc_zeroed(layout)
+ } else {
+ System.alloc(layout)
+ };
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
+ ptr.as_ptr(),
+ size,
+ )))
+ },
+ }
+}
+
+// SAFETY: Same as `Allocator::grow`
+#[inline(always)]
+unsafe fn grow_impl(
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ zeroed: bool,
+) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ match old_layout.size() {
+ 0 => alloc_impl(new_layout, zeroed),
+
+ // SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
+ // as required by safety conditions. Other conditions must be upheld by the caller
+ old_size if old_layout.align() == new_layout.align() => unsafe {
+ let new_size = new_layout.size();
+
+ // `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
+ assume(new_size >= old_layout.size());
+
+ let raw_ptr = System.realloc(ptr.as_ptr(), old_layout, new_size);
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ if zeroed {
+ raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
+ }
+ Ok(NonNull::new_unchecked(core::ptr::slice_from_raw_parts_mut(
+ ptr.as_ptr(),
+ new_size,
+ )))
+ },
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
+ // both the old and new memory allocation are valid for reads and writes for `old_size`
+ // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
+ // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
+ // for `dealloc` must be upheld by the caller.
+ old_size => unsafe {
+ let new_ptr = alloc_impl(new_layout, zeroed)?;
+ core::ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_ptr().cast(), old_size);
+ System.deallocate(ptr, old_layout);
+ Ok(new_ptr)
+ },
+ }
+}
diff --git a/vendor/allocator-api2/src/stable/boxed.rs b/vendor/allocator-api2/src/stable/boxed.rs
new file mode 100644
index 000000000..3c342c54a
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/boxed.rs
@@ -0,0 +1,2154 @@
+//! The `Box<T>` type for heap allocation.
+//!
+//! [`Box<T>`], casually referred to as a 'box', provides the simplest form of
+//! heap allocation in Rust. Boxes provide ownership for this allocation, and
+//! drop their contents when they go out of scope. Boxes also ensure that they
+//! never allocate more than `isize::MAX` bytes.
+//!
+//! # Examples
+//!
+//! Move a value from the stack to the heap by creating a [`Box`]:
+//!
+//! ```
+//! let val: u8 = 5;
+//! let boxed: Box<u8> = Box::new(val);
+//! ```
+//!
+//! Move a value from a [`Box`] back to the stack by [dereferencing]:
+//!
+//! ```
+//! let boxed: Box<u8> = Box::new(5);
+//! let val: u8 = *boxed;
+//! ```
+//!
+//! Creating a recursive data structure:
+//!
+//! ```
+//! #[derive(Debug)]
+//! enum List<T> {
+//! Cons(T, Box<List<T>>),
+//! Nil,
+//! }
+//!
+//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
+//! println!("{list:?}");
+//! ```
+//!
+//! This will print `Cons(1, Cons(2, Nil))`.
+//!
+//! Recursive structures must be boxed, because if the definition of `Cons`
+//! looked like this:
+//!
+//! ```compile_fail,E0072
+//! # enum List<T> {
+//! Cons(T, List<T>),
+//! # }
+//! ```
+//!
+//! It wouldn't work. This is because the size of a `List` depends on how many
+//! elements are in the list, and so we don't know how much memory to allocate
+//! for a `Cons`. By introducing a [`Box<T>`], which has a defined size, we know how
+//! big `Cons` needs to be.
+//!
+//! # Memory layout
+//!
+//! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for
+//! its allocation. It is valid to convert both ways between a [`Box`] and a
+//! raw pointer allocated with the [`Global`] allocator, given that the
+//! [`Layout`] used with the allocator is correct for the type. More precisely,
+//! a `value: *mut T` that has been allocated with the [`Global`] allocator
+//! with `Layout::for_value(&*value)` may be converted into a box using
+//! [`Box::<T>::from_raw(value)`]. Conversely, the memory backing a `value: *mut
+//! T` obtained from [`Box::<T>::into_raw`] may be deallocated using the
+//! [`Global`] allocator with [`Layout::for_value(&*value)`].
+//!
+//! For zero-sized values, the `Box` pointer still has to be [valid] for reads
+//! and writes and sufficiently aligned. In particular, casting any aligned
+//! non-zero integer literal to a raw pointer produces a valid pointer, but a
+//! pointer pointing into previously allocated memory that since got freed is
+//! not valid. The recommended way to build a Box to a ZST if `Box::new` cannot
+//! be used is to use [`ptr::NonNull::dangling`].
+//!
+//! So long as `T: Sized`, a `Box<T>` is guaranteed to be represented
+//! as a single pointer and is also ABI-compatible with C pointers
+//! (i.e. the C type `T*`). This means that if you have extern "C"
+//! Rust functions that will be called from C, you can define those
+//! Rust functions using `Box<T>` types, and use `T*` as corresponding
+//! type on the C side. As an example, consider this C header which
+//! declares functions that create and destroy some kind of `Foo`
+//! value:
+//!
+//! ```c
+//! /* C header */
+//!
+//! /* Returns ownership to the caller */
+//! struct Foo* foo_new(void);
+//!
+//! /* Takes ownership from the caller; no-op when invoked with null */
+//! void foo_delete(struct Foo*);
+//! ```
+//!
+//! These two functions might be implemented in Rust as follows. Here, the
+//! `struct Foo*` type from C is translated to `Box<Foo>`, which captures
+//! the ownership constraints. Note also that the nullable argument to
+//! `foo_delete` is represented in Rust as `Option<Box<Foo>>`, since `Box<Foo>`
+//! cannot be null.
+//!
+//! ```
+//! #[repr(C)]
+//! pub struct Foo;
+//!
+//! #[no_mangle]
+//! pub extern "C" fn foo_new() -> Box<Foo> {
+//! Box::new(Foo)
+//! }
+//!
+//! #[no_mangle]
+//! pub extern "C" fn foo_delete(_: Option<Box<Foo>>) {}
+//! ```
+//!
+//! Even though `Box<T>` has the same representation and C ABI as a C pointer,
+//! this does not mean that you can convert an arbitrary `T*` into a `Box<T>`
+//! and expect things to work. `Box<T>` values will always be fully aligned,
+//! non-null pointers. Moreover, the destructor for `Box<T>` will attempt to
+//! free the value with the global allocator. In general, the best practice
+//! is to only use `Box<T>` for pointers that originated from the global
+//! allocator.
+//!
+//! **Important.** At least at present, you should avoid using
+//! `Box<T>` types for functions that are defined in C but invoked
+//! from Rust. In those cases, you should directly mirror the C types
+//! as closely as possible. Using types like `Box<T>` where the C
+//! definition is just using `T*` can lead to undefined behavior, as
+//! described in [rust-lang/unsafe-code-guidelines#198][ucg#198].
+//!
+//! # Considerations for unsafe code
+//!
+//! **Warning: This section is not normative and is subject to change, possibly
+//! being relaxed in the future! It is a simplified summary of the rules
+//! currently implemented in the compiler.**
+//!
+//! The aliasing rules for `Box<T>` are the same as for `&mut T`. `Box<T>`
+//! asserts uniqueness over its content. Using raw pointers derived from a box
+//! after that box has been mutated through, moved or borrowed as `&mut T`
+//! is not allowed. For more guidance on working with box from unsafe code, see
+//! [rust-lang/unsafe-code-guidelines#326][ucg#326].
+//!
+//!
+//! [ucg#198]: https://github.com/rust-lang/unsafe-code-guidelines/issues/198
+//! [ucg#326]: https://github.com/rust-lang/unsafe-code-guidelines/issues/326
+//! [dereferencing]: core::ops::Deref
+//! [`Box::<T>::from_raw(value)`]: Box::from_raw
+//! [`Global`]: crate::alloc::Global
+//! [`Layout`]: crate::alloc::Layout
+//! [`Layout::for_value(&*value)`]: crate::alloc::Layout::for_value
+//! [valid]: ptr#safety
+
+use core::any::Any;
+use core::borrow;
+use core::cmp::Ordering;
+use core::convert::{From, TryFrom};
+
+// use core::error::Error;
+use core::fmt;
+use core::future::Future;
+use core::hash::{Hash, Hasher};
+#[cfg(not(no_global_oom_handling))]
+use core::iter::FromIterator;
+use core::iter::{FusedIterator, Iterator};
+use core::marker::Unpin;
+use core::mem;
+use core::ops::{Deref, DerefMut};
+use core::pin::Pin;
+use core::ptr::{self, NonNull};
+use core::task::{Context, Poll};
+
+use super::alloc::{AllocError, Allocator, Global, Layout};
+use super::raw_vec::RawVec;
+#[cfg(not(no_global_oom_handling))]
+use super::vec::Vec;
+#[cfg(not(no_global_oom_handling))]
+use alloc_crate::alloc::handle_alloc_error;
+
+/// A pointer type for heap allocation.
+///
+/// See the [module-level documentation](../../std/boxed/index.html) for more.
+pub struct Box<T: ?Sized, A: Allocator = Global>(NonNull<T>, A);
+
+// Safety: Box owns both T and A, so sending is safe if
+// sending is safe for T and A.
+unsafe impl<T: ?Sized, A: Allocator> Send for Box<T, A>
+where
+ T: Send,
+ A: Send,
+{
+}
+
+// Safety: Box owns both T and A, so sharing is safe if
+// sharing is safe for T and A.
+unsafe impl<T: ?Sized, A: Allocator> Sync for Box<T, A>
+where
+ T: Sync,
+ A: Sync,
+{
+}
+
+impl<T> Box<T> {
+ /// Allocates memory on the heap and then places `x` into it.
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let five = Box::new(5);
+ /// ```
+ #[cfg(all(not(no_global_oom_handling)))]
+ #[inline(always)]
+ #[must_use]
+ pub fn new(x: T) -> Self {
+ Self::new_in(x, Global)
+ }
+
+ /// Constructs a new box with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut five = Box::<u32>::new_uninit();
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline(always)]
+ pub fn new_uninit() -> Box<mem::MaybeUninit<T>> {
+ Self::new_uninit_in(Global)
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let zero = Box::<u32>::new_zeroed();
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline(always)]
+ pub fn new_zeroed() -> Box<mem::MaybeUninit<T>> {
+ Self::new_zeroed_in(Global)
+ }
+
+ /// Constructs a new `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
+ /// `x` will be pinned in memory and unable to be moved.
+ ///
+ /// Constructing and pinning of the `Box` can also be done in two steps: `Box::pin(x)`
+ /// does the same as <code>[Box::into_pin]\([Box::new]\(x))</code>. Consider using
+ /// [`into_pin`](Box::into_pin) if you already have a `Box<T>`, or if you want to
+ /// construct a (pinned) `Box` in a different way than with [`Box::new`].
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline(always)]
+ pub fn pin(x: T) -> Pin<Box<T>> {
+ Box::new(x).into()
+ }
+
+ /// Allocates memory on the heap then places `x` into it,
+ /// returning an error if the allocation fails
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// let five = Box::try_new(5)?;
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[inline(always)]
+ pub fn try_new(x: T) -> Result<Self, AllocError> {
+ Self::try_new_in(x, Global)
+ }
+
+ /// Constructs a new box with uninitialized contents on the heap,
+ /// returning an error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let mut five = Box::<u32>::try_new_uninit()?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[inline(always)]
+ pub fn try_new_uninit() -> Result<Box<mem::MaybeUninit<T>>, AllocError> {
+ Box::try_new_uninit_in(Global)
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes on the heap
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let zero = Box::<u32>::try_new_zeroed()?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[inline(always)]
+ pub fn try_new_zeroed() -> Result<Box<mem::MaybeUninit<T>>, AllocError> {
+ Box::try_new_zeroed_in(Global)
+ }
+}
+
+impl<T, A: Allocator> Box<T, A> {
+ /// Allocates memory in the given allocator then places `x` into it.
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let five = Box::new_in(5, System);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline(always)]
+ pub fn new_in(x: T, alloc: A) -> Self
+ where
+ A: Allocator,
+ {
+ let mut boxed = Self::new_uninit_in(alloc);
+ unsafe {
+ boxed.as_mut_ptr().write(x);
+ boxed.assume_init()
+ }
+ }
+
+ /// Allocates memory in the given allocator then places `x` into it,
+ /// returning an error if the allocation fails
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let five = Box::try_new_in(5, System)?;
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[inline(always)]
+ pub fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
+ where
+ A: Allocator,
+ {
+ let mut boxed = Self::try_new_uninit_in(alloc)?;
+ unsafe {
+ boxed.as_mut_ptr().write(x);
+ Ok(boxed.assume_init())
+ }
+ }
+
+ /// Constructs a new box with uninitialized contents in the provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut five = Box::<u32, _>::new_uninit_in(System);
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline(always)]
+ pub fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ where
+ A: Allocator,
+ {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
+ // That would make code size bigger.
+ match Box::try_new_uninit_in(alloc) {
+ Ok(m) => m,
+ Err(_) => handle_alloc_error(layout),
+ }
+ }
+
+ /// Constructs a new box with uninitialized contents in the provided allocator,
+ /// returning an error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut five = Box::<u32, _>::try_new_uninit_in(System)?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[inline(always)]
+ pub fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ where
+ A: Allocator,
+ {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ let ptr = alloc.allocate(layout)?.cast();
+ unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes in the provided allocator.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let zero = Box::<u32, _>::new_zeroed_in(System);
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ #[inline(always)]
+ pub fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ where
+ A: Allocator,
+ {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
+ // That would make code size bigger.
+ match Box::try_new_zeroed_in(alloc) {
+ Ok(m) => m,
+ Err(_) => handle_alloc_error(layout),
+ }
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes in the provided allocator,
+ /// returning an error if the allocation fails,
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let zero = Box::<u32, _>::try_new_zeroed_in(System)?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[inline(always)]
+ pub fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ where
+ A: Allocator,
+ {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ let ptr = alloc.allocate_zeroed(layout)?.cast();
+ unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
+ }
+
+ /// Constructs a new `Pin<Box<T, A>>`. If `T` does not implement [`Unpin`], then
+ /// `x` will be pinned in memory and unable to be moved.
+ ///
+ /// Constructing and pinning of the `Box` can also be done in two steps: `Box::pin_in(x, alloc)`
+ /// does the same as <code>[Box::into_pin]\([Box::new_in]\(x, alloc))</code>. Consider using
+ /// [`into_pin`](Box::into_pin) if you already have a `Box<T, A>`, or if you want to
+ /// construct a (pinned) `Box` in a different way than with [`Box::new_in`].
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline(always)]
+ pub fn pin_in(x: T, alloc: A) -> Pin<Self>
+ where
+ A: 'static + Allocator,
+ {
+ Self::into_pin(Self::new_in(x, alloc))
+ }
+
+ /// Converts a `Box<T>` into a `Box<[T]>`
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ #[inline(always)]
+ pub fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(boxed);
+ unsafe { Box::from_raw_in(raw as *mut [T; 1], alloc) }
+ }
+
+ /// Consumes the `Box`, returning the wrapped value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(box_into_inner)]
+ ///
+ /// let c = Box::new(5);
+ ///
+ /// assert_eq!(Box::into_inner(c), 5);
+ /// ```
+ #[inline(always)]
+ pub fn into_inner(boxed: Self) -> T {
+ let ptr = boxed.0;
+ let unboxed = unsafe { ptr.as_ptr().read() };
+ unsafe { boxed.1.deallocate(ptr.cast(), Layout::new::<T>()) };
+ unboxed
+ }
+}
+
+impl<T> Box<[T]> {
+ /// Constructs a new boxed slice with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut values = Box::<[u32]>::new_uninit_slice(3);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline(always)]
+ pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
+ unsafe { RawVec::with_capacity(len).into_box(len) }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents, with the memory
+ /// being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let values = Box::<[u32]>::new_zeroed_slice(3);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline(always)]
+ pub fn new_zeroed_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
+ unsafe { RawVec::with_capacity_zeroed(len).into_box(len) }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents. Returns an error if
+ /// the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let mut values = Box::<[u32]>::try_new_uninit_slice(3)?;
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3]);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[inline(always)]
+ pub fn try_new_uninit_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
+ unsafe {
+ let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
+ Ok(l) => l,
+ Err(_) => return Err(AllocError),
+ };
+ let ptr = Global.allocate(layout)?;
+ Ok(RawVec::from_raw_parts_in(ptr.as_ptr() as *mut _, len, Global).into_box(len))
+ }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents, with the memory
+ /// being filled with `0` bytes. Returns an error if the allocation fails
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let values = Box::<[u32]>::try_new_zeroed_slice(3)?;
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0]);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[inline(always)]
+ pub fn try_new_zeroed_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
+ unsafe {
+ let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
+ Ok(l) => l,
+ Err(_) => return Err(AllocError),
+ };
+ let ptr = Global.allocate_zeroed(layout)?;
+ Ok(RawVec::from_raw_parts_in(ptr.as_ptr() as *mut _, len, Global).into_box(len))
+ }
+ }
+}
+
+impl<T, A: Allocator> Box<[T], A> {
+ /// Constructs a new boxed slice with uninitialized contents in the provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut values = Box::<[u32], _>::new_uninit_slice_in(3, System);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline(always)]
+ pub fn new_uninit_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit<T>], A> {
+ unsafe { RawVec::with_capacity_in(len, alloc).into_box(len) }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents in the provided allocator,
+ /// with the memory being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let values = Box::<[u32], _>::new_zeroed_slice_in(3, System);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline(always)]
+ pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit<T>], A> {
+ unsafe { RawVec::with_capacity_zeroed_in(len, alloc).into_box(len) }
+ }
+
+ pub fn into_vec(self) -> Vec<T, A>
+ where
+ A: Allocator,
+ {
+ unsafe {
+ let len = self.len();
+ let (b, alloc) = Box::into_raw_with_allocator(self);
+ Vec::from_raw_parts_in(b as *mut T, len, len, alloc)
+ }
+ }
+}
+
+impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
+ /// Converts to `Box<T, A>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the value
+ /// really is in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut five = Box::<u32>::new_uninit();
+ ///
+ /// let five: Box<u32> = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[inline(always)]
+ pub unsafe fn assume_init(self) -> Box<T, A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(self);
+ unsafe { Box::from_raw_in(raw as *mut T, alloc) }
+ }
+
+ /// Writes the value and converts to `Box<T, A>`.
+ ///
+ /// This method converts the box similarly to [`Box::assume_init`] but
+ /// writes `value` into it before conversion thus guaranteeing safety.
+ /// In some scenarios use of this method may improve performance because
+ /// the compiler may be able to optimize copying from stack.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let big_box = Box::<[usize; 1024]>::new_uninit();
+ ///
+ /// let mut array = [0; 1024];
+ /// for (i, place) in array.iter_mut().enumerate() {
+ /// *place = i;
+ /// }
+ ///
+ /// // The optimizer may be able to elide this copy, so previous code writes
+ /// // to heap directly.
+ /// let big_box = Box::write(big_box, array);
+ ///
+ /// for (i, x) in big_box.iter().enumerate() {
+ /// assert_eq!(*x, i);
+ /// }
+ /// ```
+ #[inline(always)]
+ pub fn write(mut boxed: Self, value: T) -> Box<T, A> {
+ unsafe {
+ (*boxed).write(value);
+ boxed.assume_init()
+ }
+ }
+}
+
+impl<T, A: Allocator> Box<[mem::MaybeUninit<T>], A> {
+ /// Converts to `Box<[T], A>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the values
+ /// really are in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut values = Box::<[u32]>::new_uninit_slice(3);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[inline(always)]
+ pub unsafe fn assume_init(self) -> Box<[T], A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(self);
+ unsafe { Box::from_raw_in(raw as *mut [T], alloc) }
+ }
+}
+
+impl<T: ?Sized> Box<T> {
+ /// Constructs a box from a raw pointer.
+ ///
+ /// After calling this function, the raw pointer is owned by the
+ /// resulting `Box`. Specifically, the `Box` destructor will call
+ /// the destructor of `T` and free the allocated memory. For this
+ /// to be safe, the memory must have been allocated in accordance
+ /// with the [memory layout] used by `Box` .
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because improper use may lead to
+ /// memory problems. For example, a double-free may occur if the
+ /// function is called twice on the same raw pointer.
+ ///
+ /// The safety conditions are described in the [memory layout] section.
+ ///
+ /// # Examples
+ ///
+ /// Recreate a `Box` which was previously converted to a raw pointer
+ /// using [`Box::into_raw`]:
+ /// ```
+ /// let x = Box::new(5);
+ /// let ptr = Box::into_raw(x);
+ /// let x = unsafe { Box::from_raw(ptr) };
+ /// ```
+ /// Manually create a `Box` from scratch by using the global allocator:
+ /// ```
+ /// use std::alloc::{alloc, Layout};
+ ///
+ /// unsafe {
+ /// let ptr = alloc(Layout::new::<i32>()) as *mut i32;
+ /// // In general .write is required to avoid attempting to destruct
+ /// // the (uninitialized) previous contents of `ptr`, though for this
+ /// // simple example `*ptr = 5` would have worked as well.
+ /// ptr.write(5);
+ /// let x = Box::from_raw(ptr);
+ /// }
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ /// [`Layout`]: crate::Layout
+ #[must_use = "call `drop(from_raw(ptr))` if you intend to drop the `Box`"]
+ #[inline(always)]
+ pub unsafe fn from_raw(raw: *mut T) -> Self {
+ unsafe { Self::from_raw_in(raw, Global) }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> Box<T, A> {
+ /// Constructs a box from a raw pointer in the given allocator.
+ ///
+ /// After calling this function, the raw pointer is owned by the
+ /// resulting `Box`. Specifically, the `Box` destructor will call
+ /// the destructor of `T` and free the allocated memory. For this
+ /// to be safe, the memory must have been allocated in accordance
+ /// with the [memory layout] used by `Box` .
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because improper use may lead to
+ /// memory problems. For example, a double-free may occur if the
+ /// function is called twice on the same raw pointer.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Recreate a `Box` which was previously converted to a raw pointer
+ /// using [`Box::into_raw_with_allocator`]:
+ /// ```
+ /// use std::alloc::System;
+ /// # use allocator_api2::boxed::Box;
+ ///
+ /// let x = Box::new_in(5, System);
+ /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
+ /// let x = unsafe { Box::from_raw_in(ptr, alloc) };
+ /// ```
+ /// Manually create a `Box` from scratch by using the system allocator:
+ /// ```
+ /// use allocator_api2::alloc::{Allocator, Layout, System};
+ /// # use allocator_api2::boxed::Box;
+ ///
+ /// unsafe {
+ /// let ptr = System.allocate(Layout::new::<i32>())?.as_ptr().cast::<i32>();
+ /// // In general .write is required to avoid attempting to destruct
+ /// // the (uninitialized) previous contents of `ptr`, though for this
+ /// // simple example `*ptr = 5` would have worked as well.
+ /// ptr.write(5);
+ /// let x = Box::from_raw_in(ptr, System);
+ /// }
+ /// # Ok::<(), allocator_api2::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ /// [`Layout`]: crate::Layout
+ #[inline(always)]
+ pub const unsafe fn from_raw_in(raw: *mut T, alloc: A) -> Self {
+ Box(unsafe { NonNull::new_unchecked(raw) }, alloc)
+ }
+
+ /// Consumes the `Box`, returning a wrapped raw pointer.
+ ///
+ /// The pointer will be properly aligned and non-null.
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Box`. In particular, the
+ /// caller should properly destroy `T` and release the memory, taking
+ /// into account the [memory layout] used by `Box`. The easiest way to
+ /// do this is to convert the raw pointer back into a `Box` with the
+ /// [`Box::from_raw`] function, allowing the `Box` destructor to perform
+ /// the cleanup.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
+ /// # Examples
+ /// Converting the raw pointer back into a `Box` with [`Box::from_raw`]
+ /// for automatic cleanup:
+ /// ```
+ /// let x = Box::new(String::from("Hello"));
+ /// let ptr = Box::into_raw(x);
+ /// let x = unsafe { Box::from_raw(ptr) };
+ /// ```
+ /// Manual cleanup by explicitly running the destructor and deallocating
+ /// the memory:
+ /// ```
+ /// use std::alloc::{dealloc, Layout};
+ /// use std::ptr;
+ ///
+ /// let x = Box::new(String::from("Hello"));
+ /// let p = Box::into_raw(x);
+ /// unsafe {
+ /// ptr::drop_in_place(p);
+ /// dealloc(p as *mut u8, Layout::new::<String>());
+ /// }
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ #[inline(always)]
+ pub fn into_raw(b: Self) -> *mut T {
+ Self::into_raw_with_allocator(b).0
+ }
+
+ /// Consumes the `Box`, returning a wrapped raw pointer and the allocator.
+ ///
+ /// The pointer will be properly aligned and non-null.
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Box`. In particular, the
+ /// caller should properly destroy `T` and release the memory, taking
+ /// into account the [memory layout] used by `Box`. The easiest way to
+ /// do this is to convert the raw pointer back into a `Box` with the
+ /// [`Box::from_raw_in`] function, allowing the `Box` destructor to perform
+ /// the cleanup.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::into_raw_with_allocator(b)` instead of `b.into_raw_with_allocator()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
+ /// # Examples
+ /// Converting the raw pointer back into a `Box` with [`Box::from_raw_in`]
+ /// for automatic cleanup:
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let x = Box::new_in(String::from("Hello"), System);
+ /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
+ /// let x = unsafe { Box::from_raw_in(ptr, alloc) };
+ /// ```
+ /// Manual cleanup by explicitly running the destructor and deallocating
+ /// the memory:
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::{Allocator, Layout, System};
+ /// use std::ptr::{self, NonNull};
+ ///
+ /// let x = Box::new_in(String::from("Hello"), System);
+ /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
+ /// unsafe {
+ /// ptr::drop_in_place(ptr);
+ /// let non_null = NonNull::new_unchecked(ptr);
+ /// alloc.deallocate(non_null.cast(), Layout::new::<String>());
+ /// }
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ #[inline(always)]
+ pub fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
+ let (leaked, alloc) = Box::into_non_null(b);
+ (leaked.as_ptr(), alloc)
+ }
+
+ #[inline(always)]
+ pub fn into_non_null(b: Self) -> (NonNull<T>, A) {
+ // Box is recognized as a "unique pointer" by Stacked Borrows, but internally it is a
+ // raw pointer for the type system. Turning it directly into a raw pointer would not be
+ // recognized as "releasing" the unique pointer to permit aliased raw accesses,
+ // so all raw pointer methods have to go through `Box::leak`. Turning *that* to a raw pointer
+ // behaves correctly.
+ let alloc = unsafe { ptr::read(&b.1) };
+ (NonNull::from(Box::leak(b)), alloc)
+ }
+
+ /// Returns a reference to the underlying allocator.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::allocator(&b)` instead of `b.allocator()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ #[inline(always)]
+ pub const fn allocator(b: &Self) -> &A {
+ &b.1
+ }
+
+ /// Consumes and leaks the `Box`, returning a mutable reference,
+ /// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime
+ /// `'a`. If the type has only static references, or none at all, then this
+ /// may be chosen to be `'static`.
+ ///
+ /// This function is mainly useful for data that lives for the remainder of
+ /// the program's life. Dropping the returned reference will cause a memory
+ /// leak. If this is not acceptable, the reference should first be wrapped
+ /// with the [`Box::from_raw`] function producing a `Box`. This `Box` can
+ /// then be dropped which will properly destroy `T` and release the
+ /// allocated memory.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::leak(b)` instead of `b.leak()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
+ /// # Examples
+ ///
+ /// Simple usage:
+ ///
+ /// ```
+ /// let x = Box::new(41);
+ /// let static_ref: &'static mut usize = Box::leak(x);
+ /// *static_ref += 1;
+ /// assert_eq!(*static_ref, 42);
+ /// ```
+ ///
+ /// Unsized data:
+ ///
+ /// ```
+ /// let x = vec![1, 2, 3].into_boxed_slice();
+ /// let static_ref = Box::leak(x);
+ /// static_ref[0] = 4;
+ /// assert_eq!(*static_ref, [4, 2, 3]);
+ /// ```
+ #[inline(always)]
+ fn leak<'a>(b: Self) -> &'a mut T
+ where
+ A: 'a,
+ {
+ unsafe { &mut *mem::ManuallyDrop::new(b).0.as_ptr() }
+ }
+
+ /// Converts a `Box<T>` into a `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
+ /// `*boxed` will be pinned in memory and unable to be moved.
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ ///
+ /// This is also available via [`From`].
+ ///
+ /// Constructing and pinning a `Box` with <code>Box::into_pin([Box::new]\(x))</code>
+ /// can also be written more concisely using <code>[Box::pin]\(x)</code>.
+ /// This `into_pin` method is useful if you already have a `Box<T>`, or you are
+ /// constructing a (pinned) `Box` in a different way than with [`Box::new`].
+ ///
+ /// # Notes
+ ///
+ /// It's not recommended that crates add an impl like `From<Box<T>> for Pin<T>`,
+ /// as it'll introduce an ambiguity when calling `Pin::from`.
+ /// A demonstration of such a poor impl is shown below.
+ ///
+ /// ```compile_fail
+ /// # use std::pin::Pin;
+ /// struct Foo; // A type defined in this crate.
+ /// impl From<Box<()>> for Pin<Foo> {
+ /// fn from(_: Box<()>) -> Pin<Foo> {
+ /// Pin::new(Foo)
+ /// }
+ /// }
+ ///
+ /// let foo = Box::new(());
+ /// let bar = Pin::from(foo);
+ /// ```
+ #[inline(always)]
+ pub fn into_pin(boxed: Self) -> Pin<Self>
+ where
+ A: 'static,
+ {
+ // It's not possible to move or replace the insides of a `Pin<Box<T>>`
+ // when `T: !Unpin`, so it's safe to pin it directly without any
+ // additional requirements.
+ unsafe { Pin::new_unchecked(boxed) }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> Drop for Box<T, A> {
+ #[inline(always)]
+ fn drop(&mut self) {
+ let layout = Layout::for_value::<T>(&**self);
+ unsafe {
+ self.1.deallocate(self.0.cast(), layout);
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Default> Default for Box<T> {
+ /// Creates a `Box<T>`, with the `Default` value for T.
+ #[inline(always)]
+ fn default() -> Self {
+ Box::new(T::default())
+ }
+}
+
+impl<T, A: Allocator + Default> Default for Box<[T], A> {
+ #[inline(always)]
+ fn default() -> Self {
+ let ptr: NonNull<[T]> = NonNull::<[T; 0]>::dangling();
+ Box(ptr, A::default())
+ }
+}
+
+impl<A: Allocator + Default> Default for Box<str, A> {
+ #[inline(always)]
+ fn default() -> Self {
+ // SAFETY: This is the same as `Unique::cast<U>` but with an unsized `U = str`.
+ let ptr: NonNull<str> = unsafe {
+ let bytes: NonNull<[u8]> = NonNull::<[u8; 0]>::dangling();
+ NonNull::new_unchecked(bytes.as_ptr() as *mut str)
+ };
+ Box(ptr, A::default())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone, A: Allocator + Clone> Clone for Box<T, A> {
+ /// Returns a new box with a `clone()` of this box's contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Box::new(5);
+ /// let y = x.clone();
+ ///
+ /// // The value is the same
+ /// assert_eq!(x, y);
+ ///
+ /// // But they are unique objects
+ /// assert_ne!(&*x as *const i32, &*y as *const i32);
+ /// ```
+ #[inline(always)]
+ fn clone(&self) -> Self {
+ // Pre-allocate memory to allow writing the cloned value directly.
+ let mut boxed = Self::new_uninit_in(self.1.clone());
+ unsafe {
+ boxed.write((**self).clone());
+ boxed.assume_init()
+ }
+ }
+
+ /// Copies `source`'s contents into `self` without creating a new allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Box::new(5);
+ /// let mut y = Box::new(10);
+ /// let yp: *const i32 = &*y;
+ ///
+ /// y.clone_from(&x);
+ ///
+ /// // The value is the same
+ /// assert_eq!(x, y);
+ ///
+ /// // And no allocation occurred
+ /// assert_eq!(yp, &*y);
+ /// ```
+ #[inline(always)]
+ fn clone_from(&mut self, source: &Self) {
+ (**self).clone_from(&(**source));
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl Clone for Box<str> {
+ #[inline(always)]
+ fn clone(&self) -> Self {
+ // this makes a copy of the data
+ let buf: Box<[u8]> = self.as_bytes().into();
+ unsafe { Box::from_raw(Box::into_raw(buf) as *mut str) }
+ }
+}
+
+impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Box<T, A> {
+ #[inline(always)]
+ fn eq(&self, other: &Self) -> bool {
+ PartialEq::eq(&**self, &**other)
+ }
+ #[inline(always)]
+ fn ne(&self, other: &Self) -> bool {
+ PartialEq::ne(&**self, &**other)
+ }
+}
+
+impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Box<T, A> {
+ #[inline(always)]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+ #[inline(always)]
+ fn lt(&self, other: &Self) -> bool {
+ PartialOrd::lt(&**self, &**other)
+ }
+ #[inline(always)]
+ fn le(&self, other: &Self) -> bool {
+ PartialOrd::le(&**self, &**other)
+ }
+ #[inline(always)]
+ fn ge(&self, other: &Self) -> bool {
+ PartialOrd::ge(&**self, &**other)
+ }
+ #[inline(always)]
+ fn gt(&self, other: &Self) -> bool {
+ PartialOrd::gt(&**self, &**other)
+ }
+}
+
+impl<T: ?Sized + Ord, A: Allocator> Ord for Box<T, A> {
+ #[inline(always)]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(&**self, &**other)
+ }
+}
+
+impl<T: ?Sized + Eq, A: Allocator> Eq for Box<T, A> {}
+
+impl<T: ?Sized + Hash, A: Allocator> Hash for Box<T, A> {
+ #[inline(always)]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state);
+ }
+}
+
+impl<T: ?Sized + Hasher, A: Allocator> Hasher for Box<T, A> {
+ #[inline(always)]
+ fn finish(&self) -> u64 {
+ (**self).finish()
+ }
+ #[inline(always)]
+ fn write(&mut self, bytes: &[u8]) {
+ (**self).write(bytes)
+ }
+ #[inline(always)]
+ fn write_u8(&mut self, i: u8) {
+ (**self).write_u8(i)
+ }
+ #[inline(always)]
+ fn write_u16(&mut self, i: u16) {
+ (**self).write_u16(i)
+ }
+ #[inline(always)]
+ fn write_u32(&mut self, i: u32) {
+ (**self).write_u32(i)
+ }
+ #[inline(always)]
+ fn write_u64(&mut self, i: u64) {
+ (**self).write_u64(i)
+ }
+ #[inline(always)]
+ fn write_u128(&mut self, i: u128) {
+ (**self).write_u128(i)
+ }
+ #[inline(always)]
+ fn write_usize(&mut self, i: usize) {
+ (**self).write_usize(i)
+ }
+ #[inline(always)]
+ fn write_i8(&mut self, i: i8) {
+ (**self).write_i8(i)
+ }
+ #[inline(always)]
+ fn write_i16(&mut self, i: i16) {
+ (**self).write_i16(i)
+ }
+ #[inline(always)]
+ fn write_i32(&mut self, i: i32) {
+ (**self).write_i32(i)
+ }
+ #[inline(always)]
+ fn write_i64(&mut self, i: i64) {
+ (**self).write_i64(i)
+ }
+ #[inline(always)]
+ fn write_i128(&mut self, i: i128) {
+ (**self).write_i128(i)
+ }
+ #[inline(always)]
+ fn write_isize(&mut self, i: isize) {
+ (**self).write_isize(i)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T> From<T> for Box<T> {
+ /// Converts a `T` into a `Box<T>`
+ ///
+ /// The conversion allocates on the heap and moves `t`
+ /// from the stack into it.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// let x = 5;
+ /// let boxed = Box::new(5);
+ ///
+ /// assert_eq!(Box::from(x), boxed);
+ /// ```
+ #[inline(always)]
+ fn from(t: T) -> Self {
+ Box::new(t)
+ }
+}
+
+impl<T: ?Sized, A: Allocator> From<Box<T, A>> for Pin<Box<T, A>>
+where
+ A: 'static,
+{
+ /// Converts a `Box<T>` into a `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
+ /// `*boxed` will be pinned in memory and unable to be moved.
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ ///
+ /// This is also available via [`Box::into_pin`].
+ ///
+ /// Constructing and pinning a `Box` with <code><Pin<Box\<T>>>::from([Box::new]\(x))</code>
+ /// can also be written more concisely using <code>[Box::pin]\(x)</code>.
+ /// This `From` implementation is useful if you already have a `Box<T>`, or you are
+ /// constructing a (pinned) `Box` in a different way than with [`Box::new`].
+ #[inline(always)]
+ fn from(boxed: Box<T, A>) -> Self {
+ Box::into_pin(boxed)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Copy, A: Allocator + Default> From<&[T]> for Box<[T], A> {
+ /// Converts a `&[T]` into a `Box<[T]>`
+ ///
+ /// This conversion allocates on the heap
+ /// and performs a copy of `slice` and its contents.
+ ///
+ /// # Examples
+ /// ```rust
+ /// // create a &[u8] which will be used to create a Box<[u8]>
+ /// let slice: &[u8] = &[104, 101, 108, 108, 111];
+ /// let boxed_slice: Box<[u8]> = Box::from(slice);
+ ///
+ /// println!("{boxed_slice:?}");
+ /// ```
+ #[inline(always)]
+ fn from(slice: &[T]) -> Box<[T], A> {
+ let len = slice.len();
+ let buf = RawVec::with_capacity_in(len, A::default());
+ unsafe {
+ ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
+ buf.into_box(slice.len()).assume_init()
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<A: Allocator + Default> From<&str> for Box<str, A> {
+ /// Converts a `&str` into a `Box<str>`
+ ///
+ /// This conversion allocates on the heap
+ /// and performs a copy of `s`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// let boxed: Box<str> = Box::from("hello");
+ /// println!("{boxed}");
+ /// ```
+ #[inline(always)]
+ fn from(s: &str) -> Box<str, A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(Box::<[u8], A>::from(s.as_bytes()));
+ unsafe { Box::from_raw_in(raw as *mut str, alloc) }
+ }
+}
+
+impl<A: Allocator> From<Box<str, A>> for Box<[u8], A> {
+ /// Converts a `Box<str>` into a `Box<[u8]>`
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ ///
+ /// # Examples
+ /// ```rust
+ /// // create a Box<str> which will be used to create a Box<[u8]>
+ /// let boxed: Box<str> = Box::from("hello");
+ /// let boxed_str: Box<[u8]> = Box::from(boxed);
+ ///
+ /// // create a &[u8] which will be used to create a Box<[u8]>
+ /// let slice: &[u8] = &[104, 101, 108, 108, 111];
+ /// let boxed_slice = Box::from(slice);
+ ///
+ /// assert_eq!(boxed_slice, boxed_str);
+ /// ```
+ #[inline(always)]
+ fn from(s: Box<str, A>) -> Self {
+ let (raw, alloc) = Box::into_raw_with_allocator(s);
+ unsafe { Box::from_raw_in(raw as *mut [u8], alloc) }
+ }
+}
+
+impl<T, A: Allocator, const N: usize> Box<[T; N], A> {
+ #[inline(always)]
+ pub fn slice(b: Self) -> Box<[T], A> {
+ let (ptr, alloc) = Box::into_raw_with_allocator(b);
+ unsafe { Box::from_raw_in(ptr, alloc) }
+ }
+
+ pub fn into_vec(self) -> Vec<T, A>
+ where
+ A: Allocator,
+ {
+ unsafe {
+ let (b, alloc) = Box::into_raw_with_allocator(self);
+ Vec::from_raw_parts_in(b as *mut T, N, N, alloc)
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, const N: usize> From<[T; N]> for Box<[T]> {
+ /// Converts a `[T; N]` into a `Box<[T]>`
+ ///
+ /// This conversion moves the array to newly heap-allocated memory.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// let boxed: Box<[u8]> = Box::from([4, 2]);
+ /// println!("{boxed:?}");
+ /// ```
+ #[inline(always)]
+ fn from(array: [T; N]) -> Box<[T]> {
+ Box::slice(Box::new(array))
+ }
+}
+
+impl<T, A: Allocator, const N: usize> TryFrom<Box<[T], A>> for Box<[T; N], A> {
+ type Error = Box<[T], A>;
+
+ /// Attempts to convert a `Box<[T]>` into a `Box<[T; N]>`.
+ ///
+ /// The conversion occurs in-place and does not require a
+ /// new memory allocation.
+ ///
+ /// # Errors
+ ///
+ /// Returns the old `Box<[T]>` in the `Err` variant if
+ /// `boxed_slice.len()` does not equal `N`.
+ #[inline(always)]
+ fn try_from(boxed_slice: Box<[T], A>) -> Result<Self, Self::Error> {
+ if boxed_slice.len() == N {
+ let (ptr, alloc) = Box::into_raw_with_allocator(boxed_slice);
+ Ok(unsafe { Box::from_raw_in(ptr as *mut [T; N], alloc) })
+ } else {
+ Err(boxed_slice)
+ }
+ }
+}
+
+impl<A: Allocator> Box<dyn Any, A> {
+ /// Attempt to downcast the box to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(value: Box<dyn Any>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Box::new(my_string));
+ /// print_if_string(Box::new(0i8));
+ /// ```
+ #[inline(always)]
+ pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
+ if self.is::<T>() {
+ unsafe { Ok(self.downcast_unchecked::<T>()) }
+ } else {
+ Err(self)
+ }
+ }
+
+ /// Downcasts the box to a concrete type.
+ ///
+ /// For a safe alternative see [`downcast`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let x: Box<dyn Any> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ ///
+ /// [`downcast`]: Self::downcast
+ #[inline(always)]
+ pub unsafe fn downcast_unchecked<T: Any>(self) -> Box<T, A> {
+ debug_assert!(self.is::<T>());
+ unsafe {
+ let (raw, alloc): (*mut dyn Any, _) = Box::into_raw_with_allocator(self);
+ Box::from_raw_in(raw as *mut T, alloc)
+ }
+ }
+}
+
+impl<A: Allocator> Box<dyn Any + Send, A> {
+ /// Attempt to downcast the box to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(value: Box<dyn Any + Send>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Box::new(my_string));
+ /// print_if_string(Box::new(0i8));
+ /// ```
+ #[inline(always)]
+ pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
+ if self.is::<T>() {
+ unsafe { Ok(self.downcast_unchecked::<T>()) }
+ } else {
+ Err(self)
+ }
+ }
+
+ /// Downcasts the box to a concrete type.
+ ///
+ /// For a safe alternative see [`downcast`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let x: Box<dyn Any + Send> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ ///
+ /// [`downcast`]: Self::downcast
+ #[inline(always)]
+ pub unsafe fn downcast_unchecked<T: Any>(self) -> Box<T, A> {
+ debug_assert!(self.is::<T>());
+ unsafe {
+ let (raw, alloc): (*mut (dyn Any + Send), _) = Box::into_raw_with_allocator(self);
+ Box::from_raw_in(raw as *mut T, alloc)
+ }
+ }
+}
+
+impl<A: Allocator> Box<dyn Any + Send + Sync, A> {
+ /// Attempt to downcast the box to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(value: Box<dyn Any + Send + Sync>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Box::new(my_string));
+ /// print_if_string(Box::new(0i8));
+ /// ```
+ #[inline(always)]
+ pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
+ if self.is::<T>() {
+ unsafe { Ok(self.downcast_unchecked::<T>()) }
+ } else {
+ Err(self)
+ }
+ }
+
+ /// Downcasts the box to a concrete type.
+ ///
+ /// For a safe alternative see [`downcast`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let x: Box<dyn Any + Send + Sync> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ ///
+ /// [`downcast`]: Self::downcast
+ #[inline(always)]
+ pub unsafe fn downcast_unchecked<T: Any>(self) -> Box<T, A> {
+ debug_assert!(self.is::<T>());
+ unsafe {
+ let (raw, alloc): (*mut (dyn Any + Send + Sync), _) =
+ Box::into_raw_with_allocator(self);
+ Box::from_raw_in(raw as *mut T, alloc)
+ }
+ }
+}
+
+impl<T: fmt::Display + ?Sized, A: Allocator> fmt::Display for Box<T, A> {
+ #[inline(always)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+impl<T: fmt::Debug + ?Sized, A: Allocator> fmt::Debug for Box<T, A> {
+ #[inline(always)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<T: ?Sized, A: Allocator> fmt::Pointer for Box<T, A> {
+ #[inline(always)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // It's not possible to extract the inner Uniq directly from the Box,
+ // instead we cast it to a *const which aliases the Unique
+ let ptr: *const T = &**self;
+ fmt::Pointer::fmt(&ptr, f)
+ }
+}
+
+impl<T: ?Sized, A: Allocator> Deref for Box<T, A> {
+ type Target = T;
+
+ #[inline(always)]
+ fn deref(&self) -> &T {
+ unsafe { self.0.as_ref() }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> DerefMut for Box<T, A> {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut T {
+ unsafe { self.0.as_mut() }
+ }
+}
+
+impl<I: Iterator + ?Sized, A: Allocator> Iterator for Box<I, A> {
+ type Item = I::Item;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<I::Item> {
+ (**self).next()
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+
+ #[inline(always)]
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ (**self).nth(n)
+ }
+
+ #[inline(always)]
+ fn last(self) -> Option<I::Item> {
+ BoxIter::last(self)
+ }
+}
+
+trait BoxIter {
+ type Item;
+ fn last(self) -> Option<Self::Item>;
+}
+
+impl<I: Iterator + ?Sized, A: Allocator> BoxIter for Box<I, A> {
+ type Item = I::Item;
+
+ #[inline(always)]
+ fn last(self) -> Option<I::Item> {
+ #[inline(always)]
+ fn some<T>(_: Option<T>, x: T) -> Option<T> {
+ Some(x)
+ }
+
+ self.fold(None, some)
+ }
+}
+
+impl<I: DoubleEndedIterator + ?Sized, A: Allocator> DoubleEndedIterator for Box<I, A> {
+ #[inline(always)]
+ fn next_back(&mut self) -> Option<I::Item> {
+ (**self).next_back()
+ }
+ #[inline(always)]
+ fn nth_back(&mut self, n: usize) -> Option<I::Item> {
+ (**self).nth_back(n)
+ }
+}
+
+impl<I: ExactSizeIterator + ?Sized, A: Allocator> ExactSizeIterator for Box<I, A> {
+ #[inline(always)]
+ fn len(&self) -> usize {
+ (**self).len()
+ }
+}
+
+impl<I: FusedIterator + ?Sized, A: Allocator> FusedIterator for Box<I, A> {}
+
+#[cfg(not(no_global_oom_handling))]
+impl<I> FromIterator<I> for Box<[I]> {
+ #[inline(always)]
+ fn from_iter<T: IntoIterator<Item = I>>(iter: T) -> Self {
+ iter.into_iter().collect::<Vec<_>>().into_boxed_slice()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone, A: Allocator + Clone> Clone for Box<[T], A> {
+ #[inline(always)]
+ fn clone(&self) -> Self {
+ let alloc = Box::allocator(self).clone();
+ let mut vec = Vec::with_capacity_in(self.len(), alloc);
+ vec.extend_from_slice(self);
+ vec.into_boxed_slice()
+ }
+
+ #[inline(always)]
+ fn clone_from(&mut self, other: &Self) {
+ if self.len() == other.len() {
+ self.clone_from_slice(other);
+ } else {
+ *self = other.clone();
+ }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Box<T, A> {
+ #[inline(always)]
+ fn borrow(&self) -> &T {
+ self
+ }
+}
+
+impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for Box<T, A> {
+ #[inline(always)]
+ fn borrow_mut(&mut self) -> &mut T {
+ self
+ }
+}
+
+impl<T: ?Sized, A: Allocator> AsRef<T> for Box<T, A> {
+ #[inline(always)]
+ fn as_ref(&self) -> &T {
+ self
+ }
+}
+
+impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> {
+ #[inline(always)]
+ fn as_mut(&mut self) -> &mut T {
+ self
+ }
+}
+
+/* Nota bene
+ *
+ * We could have chosen not to add this impl, and instead have written a
+ * function of Pin<Box<T>> to Pin<T>. Such a function would not be sound,
+ * because Box<T> implements Unpin even when T does not, as a result of
+ * this impl.
+ *
+ * We chose this API instead of the alternative for a few reasons:
+ * - Logically, it is helpful to understand pinning in regard to the
+ * memory region being pointed to. For this reason none of the
+ * standard library pointer types support projecting through a pin
+ * (Box<T> is the only pointer type in std for which this would be
+ * safe.)
+ * - It is in practice very useful to have Box<T> be unconditionally
+ * Unpin because of trait objects, for which the structural auto
+ * trait functionality does not apply (e.g., Box<dyn Foo> would
+ * otherwise not be Unpin).
+ *
+ * Another type with the same semantics as Box but only a conditional
+ * implementation of `Unpin` (where `T: Unpin`) would be valid/safe, and
+ * could have a method to project a Pin<T> from it.
+ */
+impl<T: ?Sized, A: Allocator> Unpin for Box<T, A> where A: 'static {}
+
+impl<F: ?Sized + Future + Unpin, A: Allocator> Future for Box<F, A>
+where
+ A: 'static,
+{
+ type Output = F::Output;
+
+ #[inline(always)]
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ F::poll(Pin::new(&mut *self), cx)
+ }
+}
+
+#[cfg(feature = "std")]
+mod error {
+ use std::error::Error;
+
+ use super::Box;
+
+ #[cfg(not(no_global_oom_handling))]
+ impl<'a, E: Error + 'a> From<E> for Box<dyn Error + 'a> {
+ /// Converts a type of [`Error`] into a box of dyn [`Error`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ /// use std::mem;
+ ///
+ /// #[derive(Debug)]
+ /// struct AnError;
+ ///
+ /// impl fmt::Display for AnError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "An error")
+ /// }
+ /// }
+ ///
+ /// impl Error for AnError {}
+ ///
+ /// let an_error = AnError;
+ /// assert!(0 == mem::size_of_val(&an_error));
+ /// let a_boxed_error = Box::<dyn Error>::from(an_error);
+ /// assert!(mem::size_of::<Box<dyn Error>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ #[inline(always)]
+ fn from(err: E) -> Box<dyn Error + 'a> {
+ unsafe { Box::from_raw(Box::leak(Box::new(err))) }
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ impl<'a, E: Error + Send + Sync + 'a> From<E> for Box<dyn Error + Send + Sync + 'a> {
+ /// Converts a type of [`Error`] + [`Send`] + [`Sync`] into a box of
+ /// dyn [`Error`] + [`Send`] + [`Sync`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::error::Error;
+ /// use std::fmt;
+ /// use std::mem;
+ ///
+ /// #[derive(Debug)]
+ /// struct AnError;
+ ///
+ /// impl fmt::Display for AnError {
+ /// fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ /// write!(f, "An error")
+ /// }
+ /// }
+ ///
+ /// impl Error for AnError {}
+ ///
+ /// unsafe impl Send for AnError {}
+ ///
+ /// unsafe impl Sync for AnError {}
+ ///
+ /// let an_error = AnError;
+ /// assert!(0 == mem::size_of_val(&an_error));
+ /// let a_boxed_error = Box::<dyn Error + Send + Sync>::from(an_error);
+ /// assert!(
+ /// mem::size_of::<Box<dyn Error + Send + Sync>>() == mem::size_of_val(&a_boxed_error))
+ /// ```
+ #[inline(always)]
+ fn from(err: E) -> Box<dyn Error + Send + Sync + 'a> {
+ unsafe { Box::from_raw(Box::leak(Box::new(err))) }
+ }
+ }
+
+ impl<T: Error> Error for Box<T> {
+ #[inline(always)]
+ fn source(&self) -> Option<&(dyn Error + 'static)> {
+ Error::source(&**self)
+ }
+ }
+}
+
+#[cfg(feature = "std")]
+impl<R: std::io::Read + ?Sized, A: Allocator> std::io::Read for Box<R, A> {
+ #[inline]
+ fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
+ (**self).read(buf)
+ }
+
+ #[inline]
+ fn read_to_end(&mut self, buf: &mut std::vec::Vec<u8>) -> std::io::Result<usize> {
+ (**self).read_to_end(buf)
+ }
+
+ #[inline]
+ fn read_to_string(&mut self, buf: &mut String) -> std::io::Result<usize> {
+ (**self).read_to_string(buf)
+ }
+
+ #[inline]
+ fn read_exact(&mut self, buf: &mut [u8]) -> std::io::Result<()> {
+ (**self).read_exact(buf)
+ }
+}
+
+#[cfg(feature = "std")]
+impl<W: std::io::Write + ?Sized, A: Allocator> std::io::Write for Box<W, A> {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
+ (**self).write(buf)
+ }
+
+ #[inline]
+ fn flush(&mut self) -> std::io::Result<()> {
+ (**self).flush()
+ }
+
+ #[inline]
+ fn write_all(&mut self, buf: &[u8]) -> std::io::Result<()> {
+ (**self).write_all(buf)
+ }
+
+ #[inline]
+ fn write_fmt(&mut self, fmt: fmt::Arguments<'_>) -> std::io::Result<()> {
+ (**self).write_fmt(fmt)
+ }
+}
+
+#[cfg(feature = "std")]
+impl<S: std::io::Seek + ?Sized, A: Allocator> std::io::Seek for Box<S, A> {
+ #[inline]
+ fn seek(&mut self, pos: std::io::SeekFrom) -> std::io::Result<u64> {
+ (**self).seek(pos)
+ }
+
+ #[inline]
+ fn stream_position(&mut self) -> std::io::Result<u64> {
+ (**self).stream_position()
+ }
+}
+
+#[cfg(feature = "std")]
+impl<B: std::io::BufRead + ?Sized, A: Allocator> std::io::BufRead for Box<B, A> {
+ #[inline]
+ fn fill_buf(&mut self) -> std::io::Result<&[u8]> {
+ (**self).fill_buf()
+ }
+
+ #[inline]
+ fn consume(&mut self, amt: usize) {
+ (**self).consume(amt)
+ }
+
+ #[inline]
+ fn read_until(&mut self, byte: u8, buf: &mut std::vec::Vec<u8>) -> std::io::Result<usize> {
+ (**self).read_until(byte, buf)
+ }
+
+ #[inline]
+ fn read_line(&mut self, buf: &mut std::string::String) -> std::io::Result<usize> {
+ (**self).read_line(buf)
+ }
+}
+
+#[cfg(feature = "alloc")]
+impl<A: Allocator> Extend<Box<str, A>> for alloc_crate::string::String {
+ fn extend<I: IntoIterator<Item = Box<str, A>>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(&s));
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl Clone for Box<core::ffi::CStr> {
+ #[inline]
+ fn clone(&self) -> Self {
+ (**self).into()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl From<&core::ffi::CStr> for Box<core::ffi::CStr> {
+ /// Converts a `&CStr` into a `Box<CStr>`,
+ /// by copying the contents into a newly allocated [`Box`].
+ fn from(s: &core::ffi::CStr) -> Box<core::ffi::CStr> {
+ let boxed: Box<[u8]> = Box::from(s.to_bytes_with_nul());
+ unsafe { Box::from_raw(Box::into_raw(boxed) as *mut core::ffi::CStr) }
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<T, A> serde::Serialize for Box<T, A>
+where
+ T: serde::Serialize,
+ A: Allocator,
+{
+ #[inline(always)]
+ fn serialize<S: serde::ser::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
+ (**self).serialize(serializer)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, T, A> serde::Deserialize<'de> for Box<T, A>
+where
+ T: serde::Deserialize<'de>,
+ A: Allocator + Default,
+{
+ #[inline(always)]
+ fn deserialize<D: serde::de::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
+ let value = T::deserialize(deserializer)?;
+ Ok(Box::new_in(value, A::default()))
+ }
+}
diff --git a/vendor/allocator-api2/src/stable/macros.rs b/vendor/allocator-api2/src/stable/macros.rs
new file mode 100644
index 000000000..29e59c696
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/macros.rs
@@ -0,0 +1,83 @@
+/// Creates a [`Vec`] containing the arguments.
+///
+/// `vec!` allows `Vec`s to be defined with the same syntax as array expressions.
+/// There are two forms of this macro:
+///
+/// - Create a [`Vec`] containing a given list of elements:
+///
+/// ```
+/// use allocator_api2::vec;
+/// let v = vec![1, 2, 3];
+/// assert_eq!(v[0], 1);
+/// assert_eq!(v[1], 2);
+/// assert_eq!(v[2], 3);
+/// ```
+///
+///
+/// ```
+/// use allocator_api2::{vec, alloc::Global};
+/// let v = vec![in Global; 1, 2, 3];
+/// assert_eq!(v[0], 1);
+/// assert_eq!(v[1], 2);
+/// assert_eq!(v[2], 3);
+/// ```
+///
+/// - Create a [`Vec`] from a given element and size:
+///
+/// ```
+/// use allocator_api2::vec;
+/// let v = vec![1; 3];
+/// assert_eq!(v, [1, 1, 1]);
+/// ```
+///
+/// ```
+/// use allocator_api2::{vec, alloc::Global};
+/// let v = vec![in Global; 1; 3];
+/// assert_eq!(v, [1, 1, 1]);
+/// ```
+///
+/// Note that unlike array expressions this syntax supports all elements
+/// which implement [`Clone`] and the number of elements doesn't have to be
+/// a constant.
+///
+/// This will use `clone` to duplicate an expression, so one should be careful
+/// using this with types having a nonstandard `Clone` implementation. For
+/// example, `vec![Rc::new(1); 5]` will create a vector of five references
+/// to the same boxed integer value, not five references pointing to independently
+/// boxed integers.
+///
+/// Also, note that `vec![expr; 0]` is allowed, and produces an empty vector.
+/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
+/// be mindful of side effects.
+///
+/// [`Vec`]: crate::vec::Vec
+#[cfg(not(no_global_oom_handling))]
+#[macro_export]
+macro_rules! vec {
+ (in $alloc:expr $(;)?) => (
+ $crate::vec::Vec::new()
+ );
+ (in $alloc:expr; $elem:expr; $n:expr) => (
+ $crate::vec::from_elem_in($elem, $n, $alloc)
+ );
+ (in $alloc:expr; $($x:expr),+ $(,)?) => (
+ $crate::boxed::Box::<[_]>::into_vec(
+ $crate::boxed::Box::slice(
+ $crate::boxed::Box::new_in([$($x),+], $alloc)
+ )
+ )
+ );
+ () => (
+ $crate::vec::Vec::new()
+ );
+ ($elem:expr; $n:expr) => (
+ $crate::vec::from_elem($elem, $n)
+ );
+ ($($x:expr),+ $(,)?) => (
+ $crate::boxed::Box::<[_]>::into_vec(
+ $crate::boxed::Box::slice(
+ $crate::boxed::Box::new([$($x),+])
+ )
+ )
+ );
+}
diff --git a/vendor/allocator-api2/src/stable/mod.rs b/vendor/allocator-api2/src/stable/mod.rs
new file mode 100644
index 000000000..709014d00
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/mod.rs
@@ -0,0 +1,62 @@
+#![deny(unsafe_op_in_unsafe_fn)]
+#![allow(clippy::needless_doctest_main, clippy::partialeq_ne_impl)]
+
+#[cfg(feature = "alloc")]
+pub use self::slice::SliceExt;
+
+pub mod alloc;
+
+#[cfg(feature = "alloc")]
+pub mod boxed;
+
+#[cfg(feature = "alloc")]
+mod raw_vec;
+
+#[cfg(feature = "alloc")]
+pub mod vec;
+
+#[cfg(feature = "alloc")]
+mod macros;
+
+#[cfg(feature = "alloc")]
+mod slice;
+
+#[cfg(feature = "alloc")]
+#[track_caller]
+#[inline(always)]
+#[cfg(debug_assertions)]
+unsafe fn assume(v: bool) {
+ if !v {
+ core::unreachable!()
+ }
+}
+
+#[cfg(feature = "alloc")]
+#[track_caller]
+#[inline(always)]
+#[cfg(not(debug_assertions))]
+unsafe fn assume(v: bool) {
+ if !v {
+ unsafe {
+ core::hint::unreachable_unchecked();
+ }
+ }
+}
+
+#[cfg(feature = "alloc")]
+#[inline(always)]
+fn addr<T>(x: *const T) -> usize {
+ #[allow(clippy::useless_transmute, clippy::transmutes_expressible_as_ptr_casts)]
+ unsafe {
+ core::mem::transmute(x)
+ }
+}
+
+#[cfg(feature = "alloc")]
+#[inline(always)]
+fn invalid_mut<T>(addr: usize) -> *mut T {
+ #[allow(clippy::useless_transmute, clippy::transmutes_expressible_as_ptr_casts)]
+ unsafe {
+ core::mem::transmute(addr)
+ }
+}
diff --git a/vendor/allocator-api2/src/stable/raw_vec.rs b/vendor/allocator-api2/src/stable/raw_vec.rs
new file mode 100644
index 000000000..984de7f4f
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/raw_vec.rs
@@ -0,0 +1,642 @@
+use core::alloc::LayoutError;
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::ops::Drop;
+use core::ptr::{self, NonNull};
+use core::slice;
+use core::{cmp, fmt};
+
+use super::{
+ alloc::{Allocator, Global, Layout},
+ assume,
+ boxed::Box,
+};
+
+#[cfg(not(no_global_oom_handling))]
+use super::alloc::handle_alloc_error;
+
+/// The error type for `try_reserve` methods.
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub struct TryReserveError {
+ kind: TryReserveErrorKind,
+}
+
+impl TryReserveError {
+ /// Details about the allocation that caused the error
+ pub fn kind(&self) -> TryReserveErrorKind {
+ self.kind.clone()
+ }
+}
+
+/// Details of the allocation that caused a `TryReserveError`
+#[derive(Clone, PartialEq, Eq, Debug)]
+pub enum TryReserveErrorKind {
+ /// Error due to the computed capacity exceeding the collection's maximum
+ /// (usually `isize::MAX` bytes).
+ CapacityOverflow,
+
+ /// The memory allocator returned an error
+ AllocError {
+ /// The layout of allocation request that failed
+ layout: Layout,
+
+ #[doc(hidden)]
+ non_exhaustive: (),
+ },
+}
+
+use TryReserveErrorKind::*;
+
+impl From<TryReserveErrorKind> for TryReserveError {
+ #[inline(always)]
+ fn from(kind: TryReserveErrorKind) -> Self {
+ Self { kind }
+ }
+}
+
+impl From<LayoutError> for TryReserveErrorKind {
+ /// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
+ #[inline(always)]
+ fn from(_: LayoutError) -> Self {
+ TryReserveErrorKind::CapacityOverflow
+ }
+}
+
+impl fmt::Display for TryReserveError {
+ fn fmt(
+ &self,
+ fmt: &mut core::fmt::Formatter<'_>,
+ ) -> core::result::Result<(), core::fmt::Error> {
+ fmt.write_str("memory allocation failed")?;
+ let reason = match self.kind {
+ TryReserveErrorKind::CapacityOverflow => {
+ " because the computed capacity exceeded the collection's maximum"
+ }
+ TryReserveErrorKind::AllocError { .. } => {
+ " because the memory allocator returned an error"
+ }
+ };
+ fmt.write_str(reason)
+ }
+}
+
+#[cfg(feature = "std")]
+impl std::error::Error for TryReserveError {}
+
+#[cfg(not(no_global_oom_handling))]
+enum AllocInit {
+ /// The contents of the new memory are uninitialized.
+ Uninitialized,
+ /// The new memory is guaranteed to be zeroed.
+ Zeroed,
+}
+
+/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
+/// a buffer of memory on the heap without having to worry about all the corner cases
+/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
+/// In particular:
+///
+/// * Produces `NonNull::dangling()` on zero-sized types.
+/// * Produces `NonNull::dangling()` on zero-length allocations.
+/// * Avoids freeing `NonNull::dangling()`.
+/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
+/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
+/// * Guards against overflowing your length.
+/// * Calls `handle_alloc_error` for fallible allocations.
+/// * Contains a `ptr::NonNull` and thus endows the user with all related benefits.
+/// * Uses the excess returned from the allocator to use the largest available capacity.
+///
+/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
+/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
+/// to handle the actual things *stored* inside of a `RawVec`.
+///
+/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
+/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
+/// `Box<[T]>`, since `capacity()` won't yield the length.
+#[allow(missing_debug_implementations)]
+pub(crate) struct RawVec<T, A: Allocator = Global> {
+ ptr: NonNull<T>,
+ cap: usize,
+ alloc: A,
+}
+
+// Safety: RawVec owns both T and A, so sending is safe if
+// sending is safe for T and A.
+unsafe impl<T, A: Allocator> Send for RawVec<T, A>
+where
+ T: Send,
+ A: Send,
+{
+}
+
+// Safety: RawVec owns both T and A, so sharing is safe if
+// sharing is safe for T and A.
+unsafe impl<T, A: Allocator> Sync for RawVec<T, A>
+where
+ T: Sync,
+ A: Sync,
+{
+}
+
+impl<T> RawVec<T, Global> {
+ /// Creates the biggest possible `RawVec` (on the system heap)
+ /// without allocating. If `T` has positive size, then this makes a
+ /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
+ /// `RawVec` with capacity `usize::MAX`. Useful for implementing
+ /// delayed allocation.
+ #[must_use]
+ pub const fn new() -> Self {
+ Self::new_in(Global)
+ }
+
+ /// Creates a `RawVec` (on the system heap) with exactly the
+ /// capacity and alignment requirements for a `[T; capacity]`. This is
+ /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
+ /// zero-sized. Note that if `T` is zero-sized this means you will
+ /// *not* get a `RawVec` with the requested capacity.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the requested capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline(always)]
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self::with_capacity_in(capacity, Global)
+ }
+
+ /// Like `with_capacity`, but guarantees the buffer is zeroed.
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline(always)]
+ pub fn with_capacity_zeroed(capacity: usize) -> Self {
+ Self::with_capacity_zeroed_in(capacity, Global)
+ }
+}
+
+impl<T, A: Allocator> RawVec<T, A> {
+ // Tiny Vecs are dumb. Skip to:
+ // - 8 if the element size is 1, because any heap allocators is likely
+ // to round up a request of less than 8 bytes to at least 8 bytes.
+ // - 4 if elements are moderate-sized (<= 1 KiB).
+ // - 1 otherwise, to avoid wasting too much space for very short Vecs.
+ pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
+ 8
+ } else if mem::size_of::<T>() <= 1024 {
+ 4
+ } else {
+ 1
+ };
+
+ /// Like `new`, but parameterized over the choice of allocator for
+ /// the returned `RawVec`.
+ #[inline(always)]
+ pub const fn new_in(alloc: A) -> Self {
+ // `cap: 0` means "unallocated". zero-sized types are ignored.
+ Self {
+ ptr: NonNull::dangling(),
+ cap: 0,
+ alloc,
+ }
+ }
+
+ /// Like `with_capacity`, but parameterized over the choice of
+ /// allocator for the returned `RawVec`.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+ Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
+ }
+
+ /// Like `with_capacity_zeroed`, but parameterized over the choice
+ /// of allocator for the returned `RawVec`.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
+ Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
+ }
+
+ /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
+ ///
+ /// Note that this will correctly reconstitute any `cap` changes
+ /// that may have been performed. (See description of type for details.)
+ ///
+ /// # Safety
+ ///
+ /// * `len` must be greater than or equal to the most recently requested capacity, and
+ /// * `len` must be less than or equal to `self.capacity()`.
+ ///
+ /// Note, that the requested capacity and `self.capacity()` could differ, as
+ /// an allocator could overallocate and return a greater memory block than requested.
+ #[inline(always)]
+ pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
+ // Sanity-check one half of the safety requirement (we cannot check the other half).
+ debug_assert!(
+ len <= self.capacity(),
+ "`len` must be smaller than or equal to `self.capacity()`"
+ );
+
+ let me = ManuallyDrop::new(self);
+ unsafe {
+ let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
+ Box::from_raw_in(slice, ptr::read(&me.alloc))
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
+ // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
+ if mem::size_of::<T>() == 0 || capacity == 0 {
+ Self::new_in(alloc)
+ } else {
+ // We avoid `unwrap_or_else` here because it bloats the amount of
+ // LLVM IR generated.
+ let layout = match Layout::array::<T>(capacity) {
+ Ok(layout) => layout,
+ Err(_) => capacity_overflow(),
+ };
+ match alloc_guard(layout.size()) {
+ Ok(_) => {}
+ Err(_) => capacity_overflow(),
+ }
+ let result = match init {
+ AllocInit::Uninitialized => alloc.allocate(layout),
+ AllocInit::Zeroed => alloc.allocate_zeroed(layout),
+ };
+ let ptr = match result {
+ Ok(ptr) => ptr,
+ Err(_) => handle_alloc_error(layout),
+ };
+
+ // Allocators currently return a `NonNull<[u8]>` whose length
+ // matches the size requested. If that ever changes, the capacity
+ // here should change to `ptr.len() / mem::size_of::<T>()`.
+ Self {
+ ptr: unsafe { NonNull::new_unchecked(ptr.cast().as_ptr()) },
+ cap: capacity,
+ alloc,
+ }
+ }
+ }
+
+ /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
+ ///
+ /// # Safety
+ ///
+ /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
+ /// `capacity`.
+ /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
+ /// systems). ZST vectors may have a capacity up to `usize::MAX`.
+ /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
+ /// guaranteed.
+ #[inline(always)]
+ pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
+ Self {
+ ptr: unsafe { NonNull::new_unchecked(ptr) },
+ cap: capacity,
+ alloc,
+ }
+ }
+
+ /// Gets a raw pointer to the start of the allocation. Note that this is
+ /// `NonNull::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
+ /// be careful.
+ #[inline(always)]
+ pub fn ptr(&self) -> *mut T {
+ self.ptr.as_ptr()
+ }
+
+ /// Gets the capacity of the allocation.
+ ///
+ /// This will always be `usize::MAX` if `T` is zero-sized.
+ #[inline(always)]
+ pub fn capacity(&self) -> usize {
+ if mem::size_of::<T>() == 0 {
+ usize::MAX
+ } else {
+ self.cap
+ }
+ }
+
+ /// Returns a shared reference to the allocator backing this `RawVec`.
+ #[inline(always)]
+ pub fn allocator(&self) -> &A {
+ &self.alloc
+ }
+
+ #[inline(always)]
+ fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
+ if mem::size_of::<T>() == 0 || self.cap == 0 {
+ None
+ } else {
+ // We have an allocated chunk of memory, so we can bypass runtime
+ // checks to get our current layout.
+ unsafe {
+ let layout = Layout::array::<T>(self.cap).unwrap_unchecked();
+ Some((self.ptr.cast(), layout))
+ }
+ }
+ }
+
+ /// Ensures that the buffer contains at least enough space to hold `len +
+ /// additional` elements. If it doesn't already have enough capacity, will
+ /// reallocate enough space plus comfortable slack space to get amortized
+ /// *O*(1) behavior. Will limit this behavior if it would needlessly cause
+ /// itself to panic.
+ ///
+ /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
+ /// the requested space. This is not really unsafe, but the unsafe
+ /// code *you* write that relies on the behavior of this function may break.
+ ///
+ /// This is ideal for implementing a bulk-push operation like `extend`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn reserve(&mut self, len: usize, additional: usize) {
+ // Callers expect this function to be very cheap when there is already sufficient capacity.
+ // Therefore, we move all the resizing and error-handling logic from grow_amortized and
+ // handle_reserve behind a call, while making sure that this function is likely to be
+ // inlined as just a comparison and a call if the comparison fails.
+ #[cold]
+ #[inline(always)]
+ fn do_reserve_and_handle<T, A: Allocator>(
+ slf: &mut RawVec<T, A>,
+ len: usize,
+ additional: usize,
+ ) {
+ handle_reserve(slf.grow_amortized(len, additional));
+ }
+
+ if self.needs_to_grow(len, additional) {
+ do_reserve_and_handle(self, len, additional);
+ }
+ }
+
+ /// A specialized version of `reserve()` used only by the hot and
+ /// oft-instantiated `Vec::push()`, which does its own capacity check.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn reserve_for_push(&mut self, len: usize) {
+ handle_reserve(self.grow_amortized(len, 1));
+ }
+
+ /// The same as `reserve`, but returns on errors instead of panicking or aborting.
+ #[inline(always)]
+ pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ if self.needs_to_grow(len, additional) {
+ self.grow_amortized(len, additional)
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Ensures that the buffer contains at least enough space to hold `len +
+ /// additional` elements. If it doesn't already, will reallocate the
+ /// minimum possible amount of memory necessary. Generally this will be
+ /// exactly the amount of memory necessary, but in principle the allocator
+ /// is free to give back more than we asked for.
+ ///
+ /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
+ /// the requested space. This is not really unsafe, but the unsafe code
+ /// *you* write that relies on the behavior of this function may break.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn reserve_exact(&mut self, len: usize, additional: usize) {
+ handle_reserve(self.try_reserve_exact(len, additional));
+ }
+
+ /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
+ #[inline(always)]
+ pub fn try_reserve_exact(
+ &mut self,
+ len: usize,
+ additional: usize,
+ ) -> Result<(), TryReserveError> {
+ if self.needs_to_grow(len, additional) {
+ self.grow_exact(len, additional)
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Shrinks the buffer down to the specified capacity. If the given amount
+ /// is 0, actually completely deallocates.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the given amount is *larger* than the current capacity.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn shrink_to_fit(&mut self, cap: usize) {
+ handle_reserve(self.shrink(cap));
+ }
+}
+
+impl<T, A: Allocator> RawVec<T, A> {
+ /// Returns if the buffer needs to grow to fulfill the needed extra capacity.
+ /// Mainly used to make inlining reserve-calls possible without inlining `grow`.
+ #[inline(always)]
+ fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
+ additional > self.capacity().wrapping_sub(len)
+ }
+
+ #[inline(always)]
+ fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
+ // Allocators currently return a `NonNull<[u8]>` whose length matches
+ // the size requested. If that ever changes, the capacity here should
+ // change to `ptr.len() / mem::size_of::<T>()`.
+ self.ptr = unsafe { NonNull::new_unchecked(ptr.cast().as_ptr()) };
+ self.cap = cap;
+ }
+
+ // This method is usually instantiated many times. So we want it to be as
+ // small as possible, to improve compile times. But we also want as much of
+ // its contents to be statically computable as possible, to make the
+ // generated code run faster. Therefore, this method is carefully written
+ // so that all of the code that depends on `T` is within it, while as much
+ // of the code that doesn't depend on `T` as possible is in functions that
+ // are non-generic over `T`.
+ #[inline(always)]
+ fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ // This is ensured by the calling contexts.
+ debug_assert!(additional > 0);
+
+ if mem::size_of::<T>() == 0 {
+ // Since we return a capacity of `usize::MAX` when `elem_size` is
+ // 0, getting to here necessarily means the `RawVec` is overfull.
+ return Err(CapacityOverflow.into());
+ }
+
+ // Nothing we can really do about these checks, sadly.
+ let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
+
+ // This guarantees exponential growth. The doubling cannot overflow
+ // because `cap <= isize::MAX` and the type of `cap` is `usize`.
+ let cap = cmp::max(self.cap * 2, required_cap);
+ let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
+
+ let new_layout = Layout::array::<T>(cap);
+
+ // `finish_grow` is non-generic over `T`.
+ let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
+ self.set_ptr_and_cap(ptr, cap);
+ Ok(())
+ }
+
+ // The constraints on this method are much the same as those on
+ // `grow_amortized`, but this method is usually instantiated less often so
+ // it's less critical.
+ #[inline(always)]
+ fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ if mem::size_of::<T>() == 0 {
+ // Since we return a capacity of `usize::MAX` when the type size is
+ // 0, getting to here necessarily means the `RawVec` is overfull.
+ return Err(CapacityOverflow.into());
+ }
+
+ let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
+ let new_layout = Layout::array::<T>(cap);
+
+ // `finish_grow` is non-generic over `T`.
+ let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
+ self.set_ptr_and_cap(ptr, cap);
+ Ok(())
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
+ assert!(
+ cap <= self.capacity(),
+ "Tried to shrink to a larger capacity"
+ );
+
+ let (ptr, layout) = if let Some(mem) = self.current_memory() {
+ mem
+ } else {
+ return Ok(());
+ };
+
+ let ptr = unsafe {
+ // `Layout::array` cannot overflow here because it would have
+ // overflowed earlier when capacity was larger.
+ let new_layout = Layout::array::<T>(cap).unwrap_unchecked();
+ self.alloc
+ .shrink(ptr, layout, new_layout)
+ .map_err(|_| AllocError {
+ layout: new_layout,
+ non_exhaustive: (),
+ })?
+ };
+ self.set_ptr_and_cap(ptr, cap);
+ Ok(())
+ }
+}
+
+// This function is outside `RawVec` to minimize compile times. See the comment
+// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
+// significant, because the number of different `A` types seen in practice is
+// much smaller than the number of `T` types.)
+#[inline(always)]
+fn finish_grow<A>(
+ new_layout: Result<Layout, LayoutError>,
+ current_memory: Option<(NonNull<u8>, Layout)>,
+ alloc: &mut A,
+) -> Result<NonNull<[u8]>, TryReserveError>
+where
+ A: Allocator,
+{
+ // Check for the error here to minimize the size of `RawVec::grow_*`.
+ let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
+
+ alloc_guard(new_layout.size())?;
+
+ let memory = if let Some((ptr, old_layout)) = current_memory {
+ debug_assert_eq!(old_layout.align(), new_layout.align());
+ unsafe {
+ // The allocator checks for alignment equality
+ assume(old_layout.align() == new_layout.align());
+ alloc.grow(ptr, old_layout, new_layout)
+ }
+ } else {
+ alloc.allocate(new_layout)
+ };
+
+ memory.map_err(|_| {
+ AllocError {
+ layout: new_layout,
+ non_exhaustive: (),
+ }
+ .into()
+ })
+}
+
+impl<T, A: Allocator> Drop for RawVec<T, A> {
+ /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
+ #[inline(always)]
+ fn drop(&mut self) {
+ if let Some((ptr, layout)) = self.current_memory() {
+ unsafe { self.alloc.deallocate(ptr, layout) }
+ }
+ }
+}
+
+// Central function for reserve error handling.
+#[cfg(not(no_global_oom_handling))]
+#[inline(always)]
+fn handle_reserve(result: Result<(), TryReserveError>) {
+ match result.map_err(|e| e.kind()) {
+ Err(CapacityOverflow) => capacity_overflow(),
+ Err(AllocError { layout, .. }) => handle_alloc_error(layout),
+ Ok(()) => { /* yay */ }
+ }
+}
+
+// We need to guarantee the following:
+// * We don't ever allocate `> isize::MAX` byte-size objects.
+// * We don't overflow `usize::MAX` and actually allocate too little.
+//
+// On 64-bit we just need to check for overflow since trying to allocate
+// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
+// an extra guard for this in case we're running on a platform which can use
+// all 4GB in user-space, e.g., PAE or x32.
+
+#[inline(always)]
+fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
+ if usize::BITS < 64 && alloc_size > isize::MAX as usize {
+ Err(CapacityOverflow.into())
+ } else {
+ Ok(())
+ }
+}
+
+// One central function responsible for reporting capacity overflows. This'll
+// ensure that the code generation related to these panics is minimal as there's
+// only one location which panics rather than a bunch throughout the module.
+#[cfg(not(no_global_oom_handling))]
+fn capacity_overflow() -> ! {
+ panic!("capacity overflow");
+}
diff --git a/vendor/allocator-api2/src/stable/slice.rs b/vendor/allocator-api2/src/stable/slice.rs
new file mode 100644
index 000000000..0883b72ad
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/slice.rs
@@ -0,0 +1,171 @@
+use crate::{
+ alloc::{Allocator, Global},
+ vec::Vec,
+};
+
+/// Slice methods that use `Box` and `Vec` from this crate.
+pub trait SliceExt<T> {
+ /// Copies `self` into a new `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = [10, 40, 30];
+ /// let x = s.to_vec();
+ /// // Here, `s` and `x` can be modified independently.
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ fn to_vec(&self) -> Vec<T, Global>
+ where
+ T: Clone,
+ {
+ self.to_vec_in(Global)
+ }
+
+ /// Copies `self` into a new `Vec` with an allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let s = [10, 40, 30];
+ /// let x = s.to_vec_in(System);
+ /// // Here, `s` and `x` can be modified independently.
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
+ where
+ T: Clone;
+
+ /// Creates a vector by copying a slice `n` times.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the capacity would overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
+ /// ```
+ ///
+ /// A panic upon overflow:
+ ///
+ /// ```should_panic
+ /// // this will panic at runtime
+ /// b"0123456789abcdef".repeat(usize::MAX);
+ /// ```
+ fn repeat(&self, n: usize) -> Vec<T, Global>
+ where
+ T: Copy;
+}
+
+impl<T> SliceExt<T> for [T] {
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
+ where
+ T: Clone,
+ {
+ struct DropGuard<'a, T, A: Allocator> {
+ vec: &'a mut Vec<T, A>,
+ num_init: usize,
+ }
+ impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY:
+ // items were marked initialized in the loop below
+ unsafe {
+ self.vec.set_len(self.num_init);
+ }
+ }
+ }
+
+ let mut vec = Vec::with_capacity_in(self.len(), alloc);
+ let mut guard = DropGuard {
+ vec: &mut vec,
+ num_init: 0,
+ };
+ let slots = guard.vec.spare_capacity_mut();
+ // .take(slots.len()) is necessary for LLVM to remove bounds checks
+ // and has better codegen than zip.
+ for (i, b) in self.iter().enumerate().take(slots.len()) {
+ guard.num_init = i;
+ slots[i].write(b.clone());
+ }
+ core::mem::forget(guard);
+ // SAFETY:
+ // the vec was allocated and initialized above to at least this length.
+ unsafe {
+ vec.set_len(self.len());
+ }
+ vec
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ fn repeat(&self, n: usize) -> Vec<T, Global>
+ where
+ T: Copy,
+ {
+ if n == 0 {
+ return Vec::new();
+ }
+
+ // If `n` is larger than zero, it can be split as
+ // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
+ // `2^expn` is the number represented by the leftmost '1' bit of `n`,
+ // and `rem` is the remaining part of `n`.
+
+ // Using `Vec` to access `set_len()`.
+ let capacity = self.len().checked_mul(n).expect("capacity overflow");
+ let mut buf = Vec::with_capacity(capacity);
+
+ // `2^expn` repetition is done by doubling `buf` `expn`-times.
+ buf.extend(self);
+ {
+ let mut m = n >> 1;
+ // If `m > 0`, there are remaining bits up to the leftmost '1'.
+ while m > 0 {
+ // `buf.extend(buf)`:
+ unsafe {
+ core::ptr::copy_nonoverlapping(
+ buf.as_ptr(),
+ (buf.as_mut_ptr() as *mut T).add(buf.len()),
+ buf.len(),
+ );
+ // `buf` has capacity of `self.len() * n`.
+ let buf_len = buf.len();
+ buf.set_len(buf_len * 2);
+ }
+
+ m >>= 1;
+ }
+ }
+
+ // `rem` (`= n - 2^expn`) repetition is done by copying
+ // first `rem` repetitions from `buf` itself.
+ let rem_len = capacity - buf.len(); // `self.len() * rem`
+ if rem_len > 0 {
+ // `buf.extend(buf[0 .. rem_len])`:
+ unsafe {
+ // This is non-overlapping since `2^expn > rem`.
+ core::ptr::copy_nonoverlapping(
+ buf.as_ptr(),
+ (buf.as_mut_ptr() as *mut T).add(buf.len()),
+ rem_len,
+ );
+ // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
+ buf.set_len(capacity);
+ }
+ }
+ buf
+ }
+}
diff --git a/vendor/allocator-api2/src/stable/vec/drain.rs b/vendor/allocator-api2/src/stable/vec/drain.rs
new file mode 100644
index 000000000..de7e3906c
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/vec/drain.rs
@@ -0,0 +1,242 @@
+use core::fmt;
+use core::iter::FusedIterator;
+use core::mem::{self, size_of, ManuallyDrop};
+use core::ptr::{self, NonNull};
+use core::slice::{self};
+
+use crate::stable::alloc::{Allocator, Global};
+
+use super::Vec;
+
+/// A draining iterator for `Vec<T>`.
+///
+/// This `struct` is created by [`Vec::drain`].
+/// See its documentation for more.
+///
+/// # Example
+///
+/// ```
+/// let mut v = vec![0, 1, 2];
+/// let iter: std::vec::Drain<_> = v.drain(..);
+/// ```
+pub struct Drain<'a, T: 'a, A: Allocator + 'a = Global> {
+ /// Index of tail to preserve
+ pub(super) tail_start: usize,
+ /// Length of tail
+ pub(super) tail_len: usize,
+ /// Current remaining range to remove
+ pub(super) iter: slice::Iter<'a, T>,
+ pub(super) vec: NonNull<Vec<T, A>>,
+}
+
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
+ }
+}
+
+impl<'a, T, A: Allocator> Drain<'a, T, A> {
+ /// Returns the remaining items of this iterator as a slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain(..);
+ /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']);
+ /// let _ = drain.next().unwrap();
+ /// assert_eq!(drain.as_slice(), &['b', 'c']);
+ /// ```
+ #[must_use]
+ #[inline(always)]
+ pub fn as_slice(&self) -> &[T] {
+ self.iter.as_slice()
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[must_use]
+ #[inline(always)]
+ pub fn allocator(&self) -> &A {
+ unsafe { self.vec.as_ref().allocator() }
+ }
+
+ /// Keep unyielded elements in the source `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(drain_keep_rest)]
+ ///
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain(..);
+ ///
+ /// assert_eq!(drain.next().unwrap(), 'a');
+ ///
+ /// // This call keeps 'b' and 'c' in the vec.
+ /// drain.keep_rest();
+ ///
+ /// // If we wouldn't call `keep_rest()`,
+ /// // `vec` would be empty.
+ /// assert_eq!(vec, ['b', 'c']);
+ /// ```
+ #[inline(always)]
+ pub fn keep_rest(self) {
+ // At this moment layout looks like this:
+ //
+ // [head] [yielded by next] [unyielded] [yielded by next_back] [tail]
+ // ^-- start \_________/-- unyielded_len \____/-- self.tail_len
+ // ^-- unyielded_ptr ^-- tail
+ //
+ // Normally `Drop` impl would drop [unyielded] and then move [tail] to the `start`.
+ // Here we want to
+ // 1. Move [unyielded] to `start`
+ // 2. Move [tail] to a new start at `start + len(unyielded)`
+ // 3. Update length of the original vec to `len(head) + len(unyielded) + len(tail)`
+ // a. In case of ZST, this is the only thing we want to do
+ // 4. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+ let mut this = ManuallyDrop::new(self);
+
+ unsafe {
+ let source_vec = this.vec.as_mut();
+
+ let start = source_vec.len();
+ let tail = this.tail_start;
+
+ let unyielded_len = this.iter.len();
+ let unyielded_ptr = this.iter.as_slice().as_ptr();
+
+ // ZSTs have no identity, so we don't need to move them around.
+ let needs_move = mem::size_of::<T>() != 0;
+
+ if needs_move {
+ let start_ptr = source_vec.as_mut_ptr().add(start);
+
+ // memmove back unyielded elements
+ if unyielded_ptr != start_ptr {
+ let src = unyielded_ptr;
+ let dst = start_ptr;
+
+ ptr::copy(src, dst, unyielded_len);
+ }
+
+ // memmove back untouched tail
+ if tail != (start + unyielded_len) {
+ let src = source_vec.as_ptr().add(tail);
+ let dst = start_ptr.add(unyielded_len);
+ ptr::copy(src, dst, this.tail_len);
+ }
+ }
+
+ source_vec.set_len(start + unyielded_len + this.tail_len);
+ }
+ }
+}
+
+impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> {
+ #[inline(always)]
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {}
+
+unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {}
+
+impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
+ type Item = T;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<T> {
+ self.iter
+ .next()
+ .map(|elt| unsafe { ptr::read(elt as *const _) })
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
+ #[inline(always)]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter
+ .next_back()
+ .map(|elt| unsafe { ptr::read(elt as *const _) })
+ }
+}
+
+impl<T, A: Allocator> Drop for Drain<'_, T, A> {
+ #[inline]
+ fn drop(&mut self) {
+ /// Moves back the un-`Drain`ed elements to restore the original `Vec`.
+ struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
+
+ impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
+ fn drop(&mut self) {
+ if self.0.tail_len > 0 {
+ unsafe {
+ let source_vec = self.0.vec.as_mut();
+ // memmove back untouched tail, update to new length
+ let start = source_vec.len();
+ let tail = self.0.tail_start;
+ if tail != start {
+ let src = source_vec.as_ptr().add(tail);
+ let dst = source_vec.as_mut_ptr().add(start);
+ ptr::copy(src, dst, self.0.tail_len);
+ }
+ source_vec.set_len(start + self.0.tail_len);
+ }
+ }
+ }
+ }
+
+ let iter = mem::replace(&mut self.iter, [].iter());
+ let drop_len = iter.len();
+
+ let mut vec = self.vec;
+
+ if size_of::<T>() == 0 {
+ // ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
+ // this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
+ unsafe {
+ let vec = vec.as_mut();
+ let old_len = vec.len();
+ vec.set_len(old_len + drop_len + self.tail_len);
+ vec.truncate(old_len + self.tail_len);
+ }
+
+ return;
+ }
+
+ // ensure elements are moved back into their appropriate places, even when drop_in_place panics
+ let _guard = DropGuard(self);
+
+ if drop_len == 0 {
+ return;
+ }
+
+ // as_slice() must only be called when iter.len() is > 0 because
+ // vec::Splice modifies vec::Drain fields and may grow the vec which would invalidate
+ // the iterator's internal pointers. Creating a reference to deallocated memory
+ // is invalid even when it is zero-length
+ let drop_ptr = iter.as_slice().as_ptr();
+
+ unsafe {
+ // drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place
+ // a pointer with mutable provenance is necessary. Therefore we must reconstruct
+ // it from the original vec but also avoid creating a &mut to the front since that could
+ // invalidate raw pointers to it which some unsafe code might rely on.
+ let vec_ptr = vec.as_mut().as_mut_ptr();
+ let drop_offset = drop_ptr.offset_from(vec_ptr) as usize;
+ let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len);
+ ptr::drop_in_place(to_drop);
+ }
+ }
+}
+
+impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {}
+
+impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}
diff --git a/vendor/allocator-api2/src/stable/vec/into_iter.rs b/vendor/allocator-api2/src/stable/vec/into_iter.rs
new file mode 100644
index 000000000..464702afd
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/vec/into_iter.rs
@@ -0,0 +1,198 @@
+use core::fmt;
+use core::iter::FusedIterator;
+use core::marker::PhantomData;
+use core::mem::{self, size_of, ManuallyDrop};
+
+use core::ptr::{self, NonNull};
+use core::slice::{self};
+
+use crate::stable::addr;
+
+use super::{Allocator, Global, RawVec};
+
+#[cfg(not(no_global_oom_handling))]
+use super::Vec;
+
+/// An iterator that moves out of a vector.
+///
+/// This `struct` is created by the `into_iter` method on [`Vec`](super::Vec)
+/// (provided by the [`IntoIterator`] trait).
+///
+/// # Example
+///
+/// ```
+/// let v = vec![0, 1, 2];
+/// let iter: std::vec::IntoIter<_> = v.into_iter();
+/// ```
+pub struct IntoIter<T, A: Allocator = Global> {
+ pub(super) buf: NonNull<T>,
+ pub(super) phantom: PhantomData<T>,
+ pub(super) cap: usize,
+ // the drop impl reconstructs a RawVec from buf, cap and alloc
+ // to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
+ pub(super) alloc: ManuallyDrop<A>,
+ pub(super) ptr: *const T,
+ pub(super) end: *const T,
+}
+
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
+ }
+}
+
+impl<T, A: Allocator> IntoIter<T, A> {
+ /// Returns the remaining items of this iterator as a slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let vec = vec!['a', 'b', 'c'];
+ /// let mut into_iter = vec.into_iter();
+ /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
+ /// let _ = into_iter.next().unwrap();
+ /// assert_eq!(into_iter.as_slice(), &['b', 'c']);
+ /// ```
+ pub fn as_slice(&self) -> &[T] {
+ unsafe { slice::from_raw_parts(self.ptr, self.len()) }
+ }
+
+ /// Returns the remaining items of this iterator as a mutable slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let vec = vec!['a', 'b', 'c'];
+ /// let mut into_iter = vec.into_iter();
+ /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
+ /// into_iter.as_mut_slice()[2] = 'z';
+ /// assert_eq!(into_iter.next().unwrap(), 'a');
+ /// assert_eq!(into_iter.next().unwrap(), 'b');
+ /// assert_eq!(into_iter.next().unwrap(), 'z');
+ /// ```
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ unsafe { &mut *self.as_raw_mut_slice() }
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[inline(always)]
+ pub fn allocator(&self) -> &A {
+ &self.alloc
+ }
+
+ fn as_raw_mut_slice(&mut self) -> *mut [T] {
+ ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len())
+ }
+}
+
+impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> {
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {}
+
+unsafe impl<T: Sync, A: Allocator + Sync> Sync for IntoIter<T, A> {}
+
+impl<T, A: Allocator> Iterator for IntoIter<T, A> {
+ type Item = T;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<T> {
+ if self.ptr == self.end {
+ None
+ } else if size_of::<T>() == 0 {
+ // purposefully don't use 'ptr.offset' because for
+ // vectors with 0-size elements this would return the
+ // same pointer.
+ self.ptr = self.ptr.cast::<u8>().wrapping_add(1).cast();
+
+ // Make up a value of this ZST.
+ Some(unsafe { mem::zeroed() })
+ } else {
+ let old = self.ptr;
+ self.ptr = unsafe { self.ptr.add(1) };
+
+ Some(unsafe { ptr::read(old) })
+ }
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = if size_of::<T>() == 0 {
+ addr(self.end).wrapping_sub(addr(self.ptr))
+ } else {
+ unsafe { self.end.offset_from(self.ptr) as usize }
+ };
+ (exact, Some(exact))
+ }
+
+ #[inline(always)]
+ fn count(self) -> usize {
+ self.len()
+ }
+}
+
+impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
+ #[inline(always)]
+ fn next_back(&mut self) -> Option<T> {
+ if self.end == self.ptr {
+ None
+ } else if size_of::<T>() == 0 {
+ // See above for why 'ptr.offset' isn't used
+ self.end = self.end.cast::<u8>().wrapping_add(1).cast();
+
+ // Make up a value of this ZST.
+ Some(unsafe { mem::zeroed() })
+ } else {
+ self.end = unsafe { self.end.sub(1) };
+
+ Some(unsafe { ptr::read(self.end) })
+ }
+ }
+}
+
+impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {}
+
+impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
+
+#[doc(hidden)]
+pub trait NonDrop {}
+
+// T: Copy as approximation for !Drop since get_unchecked does not advance self.ptr
+// and thus we can't implement drop-handling
+impl<T: Copy> NonDrop for T {}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone, A: Allocator + Clone> Clone for IntoIter<T, A> {
+ fn clone(&self) -> Self {
+ let mut vec = Vec::<T, A>::with_capacity_in(self.len(), (*self.alloc).clone());
+ vec.extend(self.as_slice().iter().cloned());
+ vec.into_iter()
+ }
+}
+
+impl<T, A: Allocator> Drop for IntoIter<T, A> {
+ fn drop(&mut self) {
+ struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter<T, A>);
+
+ impl<T, A: Allocator> Drop for DropGuard<'_, T, A> {
+ fn drop(&mut self) {
+ unsafe {
+ // `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec
+ let alloc = ManuallyDrop::take(&mut self.0.alloc);
+ // RawVec handles deallocation
+ let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc);
+ }
+ }
+ }
+
+ let guard = DropGuard(self);
+ // destroy the remaining elements
+ unsafe {
+ ptr::drop_in_place(guard.0.as_raw_mut_slice());
+ }
+ // now `guard` will be dropped and do the rest
+ }
+}
diff --git a/vendor/allocator-api2/src/stable/vec/mod.rs b/vendor/allocator-api2/src/stable/vec/mod.rs
new file mode 100644
index 000000000..8b7ab4b12
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/vec/mod.rs
@@ -0,0 +1,3253 @@
+//! A contiguous growable array type with heap-allocated contents, written
+//! `Vec<T>`.
+//!
+//! Vectors have *O*(1) indexing, amortized *O*(1) push (to the end) and
+//! *O*(1) pop (from the end).
+//!
+//! Vectors ensure they never allocate more than `isize::MAX` bytes.
+//!
+//! # Examples
+//!
+//! You can explicitly create a [`Vec`] with [`Vec::new`]:
+//!
+//! ```
+//! let v: Vec<i32> = Vec::new();
+//! ```
+//!
+//! ...or by using the [`vec!`] macro:
+//!
+//! ```
+//! let v: Vec<i32> = vec![];
+//!
+//! let v = vec![1, 2, 3, 4, 5];
+//!
+//! let v = vec![0; 10]; // ten zeroes
+//! ```
+//!
+//! You can [`push`] values onto the end of a vector (which will grow the vector
+//! as needed):
+//!
+//! ```
+//! let mut v = vec![1, 2];
+//!
+//! v.push(3);
+//! ```
+//!
+//! Popping values works in much the same way:
+//!
+//! ```
+//! let mut v = vec![1, 2];
+//!
+//! let two = v.pop();
+//! ```
+//!
+//! Vectors also support indexing (through the [`Index`] and [`IndexMut`] traits):
+//!
+//! ```
+//! let mut v = vec![1, 2, 3];
+//! let three = v[2];
+//! v[1] = v[1] + 5;
+//! ```
+//!
+//! [`push`]: Vec::push
+
+#[cfg(not(no_global_oom_handling))]
+use core::cmp;
+use core::cmp::Ordering;
+use core::convert::TryFrom;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+#[cfg(not(no_global_oom_handling))]
+use core::iter;
+#[cfg(not(no_global_oom_handling))]
+use core::iter::FromIterator;
+use core::marker::PhantomData;
+use core::mem::{self, size_of, ManuallyDrop, MaybeUninit};
+use core::ops::{self, Bound, Index, IndexMut, Range, RangeBounds};
+use core::ptr::{self, NonNull};
+use core::slice::{self, SliceIndex};
+
+use super::{
+ alloc::{Allocator, Global},
+ assume,
+ boxed::Box,
+ raw_vec::{RawVec, TryReserveError},
+};
+
+#[cfg(not(no_global_oom_handling))]
+pub use self::splice::Splice;
+
+#[cfg(not(no_global_oom_handling))]
+mod splice;
+
+pub use self::drain::Drain;
+
+mod drain;
+
+pub use self::into_iter::IntoIter;
+
+mod into_iter;
+
+mod partial_eq;
+
+#[cfg(not(no_global_oom_handling))]
+mod set_len_on_drop;
+
+#[cfg(not(no_global_oom_handling))]
+use self::set_len_on_drop::SetLenOnDrop;
+
+/// A contiguous growable array type, written as `Vec<T>`, short for 'vector'.
+///
+/// # Examples
+///
+/// ```
+/// let mut vec = Vec::new();
+/// vec.push(1);
+/// vec.push(2);
+///
+/// assert_eq!(vec.len(), 2);
+/// assert_eq!(vec[0], 1);
+///
+/// assert_eq!(vec.pop(), Some(2));
+/// assert_eq!(vec.len(), 1);
+///
+/// vec[0] = 7;
+/// assert_eq!(vec[0], 7);
+///
+/// vec.extend([1, 2, 3].iter().copied());
+///
+/// for x in &vec {
+/// println!("{x}");
+/// }
+/// assert_eq!(vec, [7, 1, 2, 3]);
+/// ```
+///
+/// The [`vec!`] macro is provided for convenient initialization:
+///
+/// ```
+/// let mut vec1 = vec![1, 2, 3];
+/// vec1.push(4);
+/// let vec2 = Vec::from([1, 2, 3, 4]);
+/// assert_eq!(vec1, vec2);
+/// ```
+///
+/// It can also initialize each element of a `Vec<T>` with a given value.
+/// This may be more efficient than performing allocation and initialization
+/// in separate steps, especially when initializing a vector of zeros:
+///
+/// ```
+/// let vec = vec![0; 5];
+/// assert_eq!(vec, [0, 0, 0, 0, 0]);
+///
+/// // The following is equivalent, but potentially slower:
+/// let mut vec = Vec::with_capacity(5);
+/// vec.resize(5, 0);
+/// assert_eq!(vec, [0, 0, 0, 0, 0]);
+/// ```
+///
+/// For more information, see
+/// [Capacity and Reallocation](#capacity-and-reallocation).
+///
+/// Use a `Vec<T>` as an efficient stack:
+///
+/// ```
+/// let mut stack = Vec::new();
+///
+/// stack.push(1);
+/// stack.push(2);
+/// stack.push(3);
+///
+/// while let Some(top) = stack.pop() {
+/// // Prints 3, 2, 1
+/// println!("{top}");
+/// }
+/// ```
+///
+/// # Indexing
+///
+/// The `Vec` type allows to access values by index, because it implements the
+/// [`Index`] trait. An example will be more explicit:
+///
+/// ```
+/// let v = vec![0, 2, 4, 6];
+/// println!("{}", v[1]); // it will display '2'
+/// ```
+///
+/// However be careful: if you try to access an index which isn't in the `Vec`,
+/// your software will panic! You cannot do this:
+///
+/// ```should_panic
+/// let v = vec![0, 2, 4, 6];
+/// println!("{}", v[6]); // it will panic!
+/// ```
+///
+/// Use [`get`] and [`get_mut`] if you want to check whether the index is in
+/// the `Vec`.
+///
+/// # Slicing
+///
+/// A `Vec` can be mutable. On the other hand, slices are read-only objects.
+/// To get a [slice][prim@slice], use [`&`]. Example:
+///
+/// ```
+/// fn read_slice(slice: &[usize]) {
+/// // ...
+/// }
+///
+/// let v = vec![0, 1];
+/// read_slice(&v);
+///
+/// // ... and that's all!
+/// // you can also do it like this:
+/// let u: &[usize] = &v;
+/// // or like this:
+/// let u: &[_] = &v;
+/// ```
+///
+/// In Rust, it's more common to pass slices as arguments rather than vectors
+/// when you just want to provide read access. The same goes for [`String`] and
+/// [`&str`].
+///
+/// # Capacity and reallocation
+///
+/// The capacity of a vector is the amount of space allocated for any future
+/// elements that will be added onto the vector. This is not to be confused with
+/// the *length* of a vector, which specifies the number of actual elements
+/// within the vector. If a vector's length exceeds its capacity, its capacity
+/// will automatically be increased, but its elements will have to be
+/// reallocated.
+///
+/// For example, a vector with capacity 10 and length 0 would be an empty vector
+/// with space for 10 more elements. Pushing 10 or fewer elements onto the
+/// vector will not change its capacity or cause reallocation to occur. However,
+/// if the vector's length is increased to 11, it will have to reallocate, which
+/// can be slow. For this reason, it is recommended to use [`Vec::with_capacity`]
+/// whenever possible to specify how big the vector is expected to get.
+///
+/// # Guarantees
+///
+/// Due to its incredibly fundamental nature, `Vec` makes a lot of guarantees
+/// about its design. This ensures that it's as low-overhead as possible in
+/// the general case, and can be correctly manipulated in primitive ways
+/// by unsafe code. Note that these guarantees refer to an unqualified `Vec<T>`.
+/// If additional type parameters are added (e.g., to support custom allocators),
+/// overriding their defaults may change the behavior.
+///
+/// Most fundamentally, `Vec` is and always will be a (pointer, capacity, length)
+/// triplet. No more, no less. The order of these fields is completely
+/// unspecified, and you should use the appropriate methods to modify these.
+/// The pointer will never be null, so this type is null-pointer-optimized.
+///
+/// However, the pointer might not actually point to allocated memory. In particular,
+/// if you construct a `Vec` with capacity 0 via [`Vec::new`], [`vec![]`][`vec!`],
+/// [`Vec::with_capacity(0)`][`Vec::with_capacity`], or by calling [`shrink_to_fit`]
+/// on an empty Vec, it will not allocate memory. Similarly, if you store zero-sized
+/// types inside a `Vec`, it will not allocate space for them. *Note that in this case
+/// the `Vec` might not report a [`capacity`] of 0*. `Vec` will allocate if and only
+/// if <code>[mem::size_of::\<T>]\() * [capacity]\() > 0</code>. In general, `Vec`'s allocation
+/// details are very subtle --- if you intend to allocate memory using a `Vec`
+/// and use it for something else (either to pass to unsafe code, or to build your
+/// own memory-backed collection), be sure to deallocate this memory by using
+/// `from_raw_parts` to recover the `Vec` and then dropping it.
+///
+/// If a `Vec` *has* allocated memory, then the memory it points to is on the heap
+/// (as defined by the allocator Rust is configured to use by default), and its
+/// pointer points to [`len`] initialized, contiguous elements in order (what
+/// you would see if you coerced it to a slice), followed by <code>[capacity] - [len]</code>
+/// logically uninitialized, contiguous elements.
+///
+/// A vector containing the elements `'a'` and `'b'` with capacity 4 can be
+/// visualized as below. The top part is the `Vec` struct, it contains a
+/// pointer to the head of the allocation in the heap, length and capacity.
+/// The bottom part is the allocation on the heap, a contiguous memory block.
+///
+/// ```text
+/// ptr len capacity
+/// +--------+--------+--------+
+/// | 0x0123 | 2 | 4 |
+/// +--------+--------+--------+
+/// |
+/// v
+/// Heap +--------+--------+--------+--------+
+/// | 'a' | 'b' | uninit | uninit |
+/// +--------+--------+--------+--------+
+/// ```
+///
+/// - **uninit** represents memory that is not initialized, see [`MaybeUninit`].
+/// - Note: the ABI is not stable and `Vec` makes no guarantees about its memory
+/// layout (including the order of fields).
+///
+/// `Vec` will never perform a "small optimization" where elements are actually
+/// stored on the stack for two reasons:
+///
+/// * It would make it more difficult for unsafe code to correctly manipulate
+/// a `Vec`. The contents of a `Vec` wouldn't have a stable address if it were
+/// only moved, and it would be more difficult to determine if a `Vec` had
+/// actually allocated memory.
+///
+/// * It would penalize the general case, incurring an additional branch
+/// on every access.
+///
+/// `Vec` will never automatically shrink itself, even if completely empty. This
+/// ensures no unnecessary allocations or deallocations occur. Emptying a `Vec`
+/// and then filling it back up to the same [`len`] should incur no calls to
+/// the allocator. If you wish to free up unused memory, use
+/// [`shrink_to_fit`] or [`shrink_to`].
+///
+/// [`push`] and [`insert`] will never (re)allocate if the reported capacity is
+/// sufficient. [`push`] and [`insert`] *will* (re)allocate if
+/// <code>[len] == [capacity]</code>. That is, the reported capacity is completely
+/// accurate, and can be relied on. It can even be used to manually free the memory
+/// allocated by a `Vec` if desired. Bulk insertion methods *may* reallocate, even
+/// when not necessary.
+///
+/// `Vec` does not guarantee any particular growth strategy when reallocating
+/// when full, nor when [`reserve`] is called. The current strategy is basic
+/// and it may prove desirable to use a non-constant growth factor. Whatever
+/// strategy is used will of course guarantee *O*(1) amortized [`push`].
+///
+/// `vec![x; n]`, `vec![a, b, c, d]`, and
+/// [`Vec::with_capacity(n)`][`Vec::with_capacity`], will all produce a `Vec`
+/// with exactly the requested capacity. If <code>[len] == [capacity]</code>,
+/// (as is the case for the [`vec!`] macro), then a `Vec<T>` can be converted to
+/// and from a [`Box<[T]>`][owned slice] without reallocating or moving the elements.
+///
+/// `Vec` will not specifically overwrite any data that is removed from it,
+/// but also won't specifically preserve it. Its uninitialized memory is
+/// scratch space that it may use however it wants. It will generally just do
+/// whatever is most efficient or otherwise easy to implement. Do not rely on
+/// removed data to be erased for security purposes. Even if you drop a `Vec`, its
+/// buffer may simply be reused by another allocation. Even if you zero a `Vec`'s memory
+/// first, that might not actually happen because the optimizer does not consider
+/// this a side-effect that must be preserved. There is one case which we will
+/// not break, however: using `unsafe` code to write to the excess capacity,
+/// and then increasing the length to match, is always valid.
+///
+/// Currently, `Vec` does not guarantee the order in which elements are dropped.
+/// The order has changed in the past and may change again.
+///
+/// [`get`]: ../../std/vec/struct.Vec.html#method.get
+/// [`get_mut`]: ../../std/vec/struct.Vec.html#method.get_mut
+/// [`String`]: alloc_crate::string::String
+/// [`&str`]: type@str
+/// [`shrink_to_fit`]: Vec::shrink_to_fit
+/// [`shrink_to`]: Vec::shrink_to
+/// [capacity]: Vec::capacity
+/// [`capacity`]: Vec::capacity
+/// [mem::size_of::\<T>]: core::mem::size_of
+/// [len]: Vec::len
+/// [`len`]: Vec::len
+/// [`push`]: Vec::push
+/// [`insert`]: Vec::insert
+/// [`reserve`]: Vec::reserve
+/// [`MaybeUninit`]: core::mem::MaybeUninit
+/// [owned slice]: Box
+pub struct Vec<T, A: Allocator = Global> {
+ buf: RawVec<T, A>,
+ len: usize,
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Inherent methods
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T> Vec<T> {
+ /// Constructs a new, empty `Vec<T>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(unused_mut)]
+ /// let mut vec: Vec<i32> = Vec::new();
+ /// ```
+ #[inline(always)]
+ #[must_use]
+ pub const fn new() -> Self {
+ Vec {
+ buf: RawVec::new(),
+ len: 0,
+ }
+ }
+
+ /// Constructs a new, empty `Vec<T>` with at least the specified capacity.
+ ///
+ /// The vector will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the vector will not allocate.
+ ///
+ /// It is important to note that although the returned vector has the
+ /// minimum *capacity* specified, the vector will have a zero *length*. For
+ /// an explanation of the difference between length and capacity, see
+ /// *[Capacity and reallocation]*.
+ ///
+ /// If it is important to know the exact allocated capacity of a `Vec`,
+ /// always use the [`capacity`] method after construction.
+ ///
+ /// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
+ /// and the capacity will always be `usize::MAX`.
+ ///
+ /// [Capacity and reallocation]: #capacity-and-reallocation
+ /// [`capacity`]: Vec::capacity
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ ///
+ /// // The vector contains no items, even though it has capacity for more
+ /// assert_eq!(vec.len(), 0);
+ /// assert!(vec.capacity() >= 10);
+ ///
+ /// // These are all done without reallocating...
+ /// for i in 0..10 {
+ /// vec.push(i);
+ /// }
+ /// assert_eq!(vec.len(), 10);
+ /// assert!(vec.capacity() >= 10);
+ ///
+ /// // ...but this may make the vector reallocate
+ /// vec.push(11);
+ /// assert_eq!(vec.len(), 11);
+ /// assert!(vec.capacity() >= 11);
+ ///
+ /// // A vector of a zero-sized type will always over-allocate, since no
+ /// // allocation is necessary
+ /// let vec_units = Vec::<()>::with_capacity(10);
+ /// assert_eq!(vec_units.capacity(), usize::MAX);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ #[must_use]
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self::with_capacity_in(capacity, Global)
+ }
+
+ /// Creates a `Vec<T>` directly from a pointer, a capacity, and a length.
+ ///
+ /// # Safety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * `T` needs to have the same alignment as what `ptr` was allocated with.
+ /// (`T` having a less strict alignment is not sufficient, the alignment really
+ /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
+ /// allocated and deallocated with the same layout.)
+ /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
+ /// to be the same size as the pointer was allocated with. (Because similar to
+ /// alignment, [`dealloc`] must be called with the same layout `size`.)
+ /// * `length` needs to be less than or equal to `capacity`.
+ /// * The first `length` values must be properly initialized values of type `T`.
+ /// * `capacity` needs to be the capacity that the pointer was allocated with.
+ /// * The allocated size in bytes must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`](https://doc.rust-lang.org/nightly/std/primitive.pointer.html#method.offset).
+ ///
+ /// These requirements are always upheld by any `ptr` that has been allocated
+ /// via `Vec<T>`. Other allocation sources are allowed if the invariants are
+ /// upheld.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal data structures. For example it is normally **not** safe
+ /// to build a `Vec<u8>` from a pointer to a C `char` array with length
+ /// `size_t`, doing so is only safe if the array was initially allocated by
+ /// a `Vec` or `String`.
+ /// It's also not safe to build one from a `Vec<u16>` and its length, because
+ /// the allocator cares about the alignment, and these two types have different
+ /// alignments. The buffer was allocated with alignment 2 (for `u16`), but after
+ /// turning it into a `Vec<u8>` it'll be deallocated with alignment 1. To avoid
+ /// these issues, it is often preferable to do casting/transmuting using
+ /// [`slice::from_raw_parts`] instead.
+ ///
+ /// The ownership of `ptr` is effectively transferred to the
+ /// `Vec<T>` which may then deallocate, reallocate or change the
+ /// contents of memory pointed to by the pointer at will. Ensure
+ /// that nothing else uses the pointer after calling this
+ /// function.
+ ///
+ /// [`String`]: alloc_crate::string::String
+ /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr;
+ /// use std::mem;
+ ///
+ /// let v = vec![1, 2, 3];
+ ///
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Prevent running `v`'s destructor so we are in complete control
+ /// // of the allocation.
+ /// let mut v = mem::ManuallyDrop::new(v);
+ ///
+ /// // Pull out the various important pieces of information about `v`
+ /// let p = v.as_mut_ptr();
+ /// let len = v.len();
+ /// let cap = v.capacity();
+ ///
+ /// unsafe {
+ /// // Overwrite memory with 4, 5, 6
+ /// for i in 0..len {
+ /// ptr::write(p.add(i), 4 + i);
+ /// }
+ ///
+ /// // Put everything back together into a Vec
+ /// let rebuilt = Vec::from_raw_parts(p, len, cap);
+ /// assert_eq!(rebuilt, [4, 5, 6]);
+ /// }
+ /// ```
+ ///
+ /// Using memory that was allocated elsewhere:
+ ///
+ /// ```rust
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::{AllocError, Allocator, Global, Layout};
+ ///
+ /// fn main() {
+ /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
+ ///
+ /// let vec = unsafe {
+ /// let mem = match Global.allocate(layout) {
+ /// Ok(mem) => mem.cast::<u32>().as_ptr(),
+ /// Err(AllocError) => return,
+ /// };
+ ///
+ /// mem.write(1_000_000);
+ ///
+ /// Vec::from_raw_parts_in(mem, 1, 16, Global)
+ /// };
+ ///
+ /// assert_eq!(vec, &[1_000_000]);
+ /// assert_eq!(vec.capacity(), 16);
+ /// }
+ /// ```
+ #[inline(always)]
+ pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self {
+ unsafe { Self::from_raw_parts_in(ptr, length, capacity, Global) }
+ }
+}
+
+impl<T, A: Allocator> Vec<T, A> {
+ /// Constructs a new, empty `Vec<T, A>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::alloc::System;
+ ///
+ /// # #[allow(unused_mut)]
+ /// let mut vec: Vec<i32, _> = Vec::new_in(System);
+ /// ```
+ #[inline(always)]
+ pub const fn new_in(alloc: A) -> Self {
+ Vec {
+ buf: RawVec::new_in(alloc),
+ len: 0,
+ }
+ }
+
+ /// Constructs a new, empty `Vec<T, A>` with at least the specified capacity
+ /// with the provided allocator.
+ ///
+ /// The vector will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the vector will not allocate.
+ ///
+ /// It is important to note that although the returned vector has the
+ /// minimum *capacity* specified, the vector will have a zero *length*. For
+ /// an explanation of the difference between length and capacity, see
+ /// *[Capacity and reallocation]*.
+ ///
+ /// If it is important to know the exact allocated capacity of a `Vec`,
+ /// always use the [`capacity`] method after construction.
+ ///
+ /// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
+ /// and the capacity will always be `usize::MAX`.
+ ///
+ /// [Capacity and reallocation]: #capacity-and-reallocation
+ /// [`capacity`]: Vec::capacity
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::alloc::System;
+ ///
+ /// let mut vec = Vec::with_capacity_in(10, System);
+ ///
+ /// // The vector contains no items, even though it has capacity for more
+ /// assert_eq!(vec.len(), 0);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // These are all done without reallocating...
+ /// for i in 0..10 {
+ /// vec.push(i);
+ /// }
+ /// assert_eq!(vec.len(), 10);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // ...but this may make the vector reallocate
+ /// vec.push(11);
+ /// assert_eq!(vec.len(), 11);
+ /// assert!(vec.capacity() >= 11);
+ ///
+ /// // A vector of a zero-sized type will always over-allocate, since no
+ /// // allocation is necessary
+ /// let vec_units = Vec::<(), System>::with_capacity_in(10, System);
+ /// assert_eq!(vec_units.capacity(), usize::MAX);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+ Vec {
+ buf: RawVec::with_capacity_in(capacity, alloc),
+ len: 0,
+ }
+ }
+
+ /// Creates a `Vec<T, A>` directly from a pointer, a capacity, a length,
+ /// and an allocator.
+ ///
+ /// # Safety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * `T` needs to have the same alignment as what `ptr` was allocated with.
+ /// (`T` having a less strict alignment is not sufficient, the alignment really
+ /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
+ /// allocated and deallocated with the same layout.)
+ /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
+ /// to be the same size as the pointer was allocated with. (Because similar to
+ /// alignment, [`dealloc`] must be called with the same layout `size`.)
+ /// * `length` needs to be less than or equal to `capacity`.
+ /// * The first `length` values must be properly initialized values of type `T`.
+ /// * `capacity` needs to [*fit*] the layout size that the pointer was allocated with.
+ /// * The allocated size in bytes must be no larger than `isize::MAX`.
+ /// See the safety documentation of [`pointer::offset`](https://doc.rust-lang.org/nightly/std/primitive.pointer.html#method.offset).
+ ///
+ /// These requirements are always upheld by any `ptr` that has been allocated
+ /// via `Vec<T, A>`. Other allocation sources are allowed if the invariants are
+ /// upheld.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal data structures. For example it is **not** safe
+ /// to build a `Vec<u8>` from a pointer to a C `char` array with length `size_t`.
+ /// It's also not safe to build one from a `Vec<u16>` and its length, because
+ /// the allocator cares about the alignment, and these two types have different
+ /// alignments. The buffer was allocated with alignment 2 (for `u16`), but after
+ /// turning it into a `Vec<u8>` it'll be deallocated with alignment 1.
+ ///
+ /// The ownership of `ptr` is effectively transferred to the
+ /// `Vec<T>` which may then deallocate, reallocate or change the
+ /// contents of memory pointed to by the pointer at will. Ensure
+ /// that nothing else uses the pointer after calling this
+ /// function.
+ ///
+ /// [`String`]: alloc_crate::string::String
+ /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ /// [*fit*]: crate::alloc::Allocator#memory-fitting
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::alloc::System;
+ ///
+ /// use std::ptr;
+ /// use std::mem;
+ ///
+ ///
+ /// # use allocator_api2::vec::Vec;
+ /// let mut v = Vec::with_capacity_in(3, System);
+ /// v.push(1);
+ /// v.push(2);
+ /// v.push(3);
+ ///
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Prevent running `v`'s destructor so we are in complete control
+ /// // of the allocation.
+ /// let mut v = mem::ManuallyDrop::new(v);
+ ///
+ /// // Pull out the various important pieces of information about `v`
+ /// let p = v.as_mut_ptr();
+ /// let len = v.len();
+ /// let cap = v.capacity();
+ /// let alloc = v.allocator();
+ ///
+ /// unsafe {
+ /// // Overwrite memory with 4, 5, 6
+ /// for i in 0..len {
+ /// ptr::write(p.add(i), 4 + i);
+ /// }
+ ///
+ /// // Put everything back together into a Vec
+ /// let rebuilt = Vec::from_raw_parts_in(p, len, cap, alloc.clone());
+ /// assert_eq!(rebuilt, [4, 5, 6]);
+ /// }
+ /// ```
+ ///
+ /// Using memory that was allocated elsewhere:
+ ///
+ /// ```rust
+ /// use std::alloc::{alloc, Layout};
+ ///
+ /// fn main() {
+ /// let layout = Layout::array::<u32>(16).expect("overflow cannot happen");
+ /// let vec = unsafe {
+ /// let mem = alloc(layout).cast::<u32>();
+ /// if mem.is_null() {
+ /// return;
+ /// }
+ ///
+ /// mem.write(1_000_000);
+ ///
+ /// Vec::from_raw_parts(mem, 1, 16)
+ /// };
+ ///
+ /// assert_eq!(vec, &[1_000_000]);
+ /// assert_eq!(vec.capacity(), 16);
+ /// }
+ /// ```
+ #[inline(always)]
+ pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self {
+ unsafe {
+ Vec {
+ buf: RawVec::from_raw_parts_in(ptr, capacity, alloc),
+ len: length,
+ }
+ }
+ }
+
+ /// Decomposes a `Vec<T>` into its raw components.
+ ///
+ /// Returns the raw pointer to the underlying data, the length of
+ /// the vector (in elements), and the allocated capacity of the
+ /// data (in elements). These are the same arguments in the same
+ /// order as the arguments to [`from_raw_parts`].
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Vec`. The only way to do
+ /// this is to convert the raw pointer, length, and capacity back
+ /// into a `Vec` with the [`from_raw_parts`] function, allowing
+ /// the destructor to perform the cleanup.
+ ///
+ /// [`from_raw_parts`]: Vec::from_raw_parts
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(vec_into_raw_parts)]
+ /// let v: Vec<i32> = vec![-1, 0, 1];
+ ///
+ /// let (ptr, len, cap) = v.into_raw_parts();
+ ///
+ /// let rebuilt = unsafe {
+ /// // We can now make changes to the components, such as
+ /// // transmuting the raw pointer to a compatible type.
+ /// let ptr = ptr as *mut u32;
+ ///
+ /// Vec::from_raw_parts(ptr, len, cap)
+ /// };
+ /// assert_eq!(rebuilt, [4294967295, 0, 1]);
+ /// ```
+ pub fn into_raw_parts(self) -> (*mut T, usize, usize) {
+ let mut me = ManuallyDrop::new(self);
+ (me.as_mut_ptr(), me.len(), me.capacity())
+ }
+
+ /// Decomposes a `Vec<T>` into its raw components.
+ ///
+ /// Returns the raw pointer to the underlying data, the length of the vector (in elements),
+ /// the allocated capacity of the data (in elements), and the allocator. These are the same
+ /// arguments in the same order as the arguments to [`from_raw_parts_in`].
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Vec`. The only way to do
+ /// this is to convert the raw pointer, length, and capacity back
+ /// into a `Vec` with the [`from_raw_parts_in`] function, allowing
+ /// the destructor to perform the cleanup.
+ ///
+ /// [`from_raw_parts_in`]: Vec::from_raw_parts_in
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, vec_into_raw_parts)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut v: Vec<i32, System> = Vec::new_in(System);
+ /// v.push(-1);
+ /// v.push(0);
+ /// v.push(1);
+ ///
+ /// let (ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
+ ///
+ /// let rebuilt = unsafe {
+ /// // We can now make changes to the components, such as
+ /// // transmuting the raw pointer to a compatible type.
+ /// let ptr = ptr as *mut u32;
+ ///
+ /// Vec::from_raw_parts_in(ptr, len, cap, alloc)
+ /// };
+ /// assert_eq!(rebuilt, [4294967295, 0, 1]);
+ /// ```
+ // #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
+ pub fn into_raw_parts_with_alloc(self) -> (*mut T, usize, usize, A) {
+ let mut me = ManuallyDrop::new(self);
+ let len = me.len();
+ let capacity = me.capacity();
+ let ptr = me.as_mut_ptr();
+ let alloc = unsafe { ptr::read(me.allocator()) };
+ (ptr, len, capacity, alloc)
+ }
+
+ /// Returns the total number of elements the vector can hold without
+ /// reallocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec: Vec<i32> = Vec::with_capacity(10);
+ /// vec.push(42);
+ /// assert_eq!(vec.capacity(), 10);
+ /// ```
+ #[inline(always)]
+ pub fn capacity(&self) -> usize {
+ self.buf.capacity()
+ }
+
+ /// Reserves capacity for at least `additional` more elements to be inserted
+ /// in the given `Vec<T>`. The collection may reserve more space to
+ /// speculatively avoid frequent reallocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if capacity is already sufficient.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.reserve(10);
+ /// assert!(vec.capacity() >= 11);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn reserve(&mut self, additional: usize) {
+ self.buf.reserve(self.len, additional);
+ }
+
+ /// Reserves the minimum capacity for at least `additional` more elements to
+ /// be inserted in the given `Vec<T>`. Unlike [`reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional`. Does nothing if the capacity is already
+ /// sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`reserve`] if future insertions are expected.
+ ///
+ /// [`reserve`]: Vec::reserve
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.reserve_exact(10);
+ /// assert!(vec.capacity() >= 11);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.buf.reserve_exact(self.len, additional);
+ }
+
+ /// Tries to reserve capacity for at least `additional` more elements to be inserted
+ /// in the given `Vec<T>`. The collection may reserve more space to speculatively avoid
+ /// frequent reallocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional` if it returns
+ /// `Ok(())`. Does nothing if capacity is already sufficient. This method
+ /// preserves the contents even if an error occurs.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &[u32]) -> Result<Vec<u32>, TryReserveError> {
+ /// let mut output = Vec::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.extend(data.iter().map(|&val| {
+ /// val * 2 + 5 // very complicated
+ /// }));
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[inline(always)]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.buf.try_reserve(self.len, additional)
+ }
+
+ /// Tries to reserve the minimum capacity for at least `additional`
+ /// elements to be inserted in the given `Vec<T>`. Unlike [`try_reserve`],
+ /// this will not deliberately over-allocate to speculatively avoid frequent
+ /// allocations. After calling `try_reserve_exact`, capacity will be greater
+ /// than or equal to `self.len() + additional` if it returns `Ok(())`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: Vec::try_reserve
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &[u32]) -> Result<Vec<u32>, TryReserveError> {
+ /// let mut output = Vec::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve_exact(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.extend(data.iter().map(|&val| {
+ /// val * 2 + 5 // very complicated
+ /// }));
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[inline(always)]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.buf.try_reserve_exact(self.len, additional)
+ }
+
+ /// Shrinks the capacity of the vector as much as possible.
+ ///
+ /// It will drop down as close as possible to the length but the allocator
+ /// may still inform the vector that there is space for a few more elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ /// assert_eq!(vec.capacity(), 10);
+ /// vec.shrink_to_fit();
+ /// assert!(vec.capacity() >= 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn shrink_to_fit(&mut self) {
+ // The capacity is never less than the length, and there's nothing to do when
+ // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit`
+ // by only calling it with a greater capacity.
+ if self.capacity() > self.len {
+ self.buf.shrink_to_fit(self.len);
+ }
+ }
+
+ /// Shrinks the capacity of the vector with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ /// assert_eq!(vec.capacity(), 10);
+ /// vec.shrink_to(4);
+ /// assert!(vec.capacity() >= 4);
+ /// vec.shrink_to(0);
+ /// assert!(vec.capacity() >= 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ if self.capacity() > min_capacity {
+ self.buf.shrink_to_fit(cmp::max(self.len, min_capacity));
+ }
+ }
+
+ /// Converts the vector into [`Box<[T]>`][owned slice].
+ ///
+ /// If the vector has excess capacity, its items will be moved into a
+ /// newly-allocated buffer with exactly the right capacity.
+ ///
+ /// [owned slice]: Box
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = vec![1, 2, 3];
+ ///
+ /// let slice = v.into_boxed_slice();
+ /// ```
+ ///
+ /// Any excess capacity is removed:
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ ///
+ /// assert_eq!(vec.capacity(), 10);
+ /// let slice = vec.into_boxed_slice();
+ /// assert_eq!(slice.into_vec().capacity(), 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn into_boxed_slice(mut self) -> Box<[T], A> {
+ unsafe {
+ self.shrink_to_fit();
+ let me = ManuallyDrop::new(self);
+ let buf = ptr::read(&me.buf);
+ let len = me.len();
+ buf.into_box(len).assume_init()
+ }
+ }
+
+ /// Shortens the vector, keeping the first `len` elements and dropping
+ /// the rest.
+ ///
+ /// If `len` is greater than the vector's current length, this has no
+ /// effect.
+ ///
+ /// The [`drain`] method can emulate `truncate`, but causes the excess
+ /// elements to be returned instead of dropped.
+ ///
+ /// Note that this method has no effect on the allocated capacity
+ /// of the vector.
+ ///
+ /// # Examples
+ ///
+ /// Truncating a five element vector to two elements:
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4, 5];
+ /// vec.truncate(2);
+ /// assert_eq!(vec, [1, 2]);
+ /// ```
+ ///
+ /// No truncation occurs when `len` is greater than the vector's current
+ /// length:
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.truncate(8);
+ /// assert_eq!(vec, [1, 2, 3]);
+ /// ```
+ ///
+ /// Truncating when `len == 0` is equivalent to calling the [`clear`]
+ /// method.
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.truncate(0);
+ /// assert_eq!(vec, []);
+ /// ```
+ ///
+ /// [`clear`]: Vec::clear
+ /// [`drain`]: Vec::drain
+ #[inline(always)]
+ pub fn truncate(&mut self, len: usize) {
+ // This is safe because:
+ //
+ // * the slice passed to `drop_in_place` is valid; the `len > self.len`
+ // case avoids creating an invalid slice, and
+ // * the `len` of the vector is shrunk before calling `drop_in_place`,
+ // such that no value will be dropped twice in case `drop_in_place`
+ // were to panic once (if it panics twice, the program aborts).
+ unsafe {
+ // Note: It's intentional that this is `>` and not `>=`.
+ // Changing it to `>=` has negative performance
+ // implications in some cases. See #78884 for more.
+ if len > self.len {
+ return;
+ }
+ let remaining_len = self.len - len;
+ let s = ptr::slice_from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len);
+ self.len = len;
+ ptr::drop_in_place(s);
+ }
+ }
+
+ /// Extracts a slice containing the entire vector.
+ ///
+ /// Equivalent to `&s[..]`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{self, Write};
+ /// let buffer = vec![1, 2, 3, 5, 8];
+ /// io::sink().write(buffer.as_slice()).unwrap();
+ /// ```
+ #[inline(always)]
+ pub fn as_slice(&self) -> &[T] {
+ self
+ }
+
+ /// Extracts a mutable slice of the entire vector.
+ ///
+ /// Equivalent to `&mut s[..]`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{self, Read};
+ /// let mut buffer = vec![0; 3];
+ /// io::repeat(0b101).read_exact(buffer.as_mut_slice()).unwrap();
+ /// ```
+ #[inline(always)]
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ self
+ }
+
+ /// Returns a raw pointer to the vector's buffer, or a dangling raw pointer
+ /// valid for zero sized reads if the vector didn't allocate.
+ ///
+ /// The caller must ensure that the vector outlives the pointer this
+ /// function returns, or else it will end up pointing to garbage.
+ /// Modifying the vector may cause its buffer to be reallocated,
+ /// which would also make any pointers to it invalid.
+ ///
+ /// The caller must also ensure that the memory the pointer (non-transitively) points to
+ /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
+ /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = vec![1, 2, 4];
+ /// let x_ptr = x.as_ptr();
+ ///
+ /// unsafe {
+ /// for i in 0..x.len() {
+ /// assert_eq!(*x_ptr.add(i), 1 << i);
+ /// }
+ /// }
+ /// ```
+ ///
+ /// [`as_mut_ptr`]: Vec::as_mut_ptr
+ #[inline(always)]
+ pub fn as_ptr(&self) -> *const T {
+ // We shadow the slice method of the same name to avoid going through
+ // `deref`, which creates an intermediate reference.
+ let ptr = self.buf.ptr();
+ unsafe {
+ assume(!ptr.is_null());
+ }
+ ptr
+ }
+
+ /// Returns an unsafe mutable pointer to the vector's buffer, or a dangling
+ /// raw pointer valid for zero sized reads if the vector didn't allocate.
+ ///
+ /// The caller must ensure that the vector outlives the pointer this
+ /// function returns, or else it will end up pointing to garbage.
+ /// Modifying the vector may cause its buffer to be reallocated,
+ /// which would also make any pointers to it invalid.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// // Allocate vector big enough for 4 elements.
+ /// let size = 4;
+ /// let mut x: Vec<i32> = Vec::with_capacity(size);
+ /// let x_ptr = x.as_mut_ptr();
+ ///
+ /// // Initialize elements via raw pointer writes, then set length.
+ /// unsafe {
+ /// for i in 0..size {
+ /// *x_ptr.add(i) = i as i32;
+ /// }
+ /// x.set_len(size);
+ /// }
+ /// assert_eq!(&*x, &[0, 1, 2, 3]);
+ /// ```
+ #[inline(always)]
+ pub fn as_mut_ptr(&mut self) -> *mut T {
+ // We shadow the slice method of the same name to avoid going through
+ // `deref_mut`, which creates an intermediate reference.
+ let ptr = self.buf.ptr();
+ unsafe {
+ assume(!ptr.is_null());
+ }
+ ptr
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[inline(always)]
+ pub fn allocator(&self) -> &A {
+ self.buf.allocator()
+ }
+
+ /// Forces the length of the vector to `new_len`.
+ ///
+ /// This is a low-level operation that maintains none of the normal
+ /// invariants of the type. Normally changing the length of a vector
+ /// is done using one of the safe operations instead, such as
+ /// [`truncate`], [`resize`], [`extend`], or [`clear`].
+ ///
+ /// [`truncate`]: Vec::truncate
+ /// [`resize`]: Vec::resize
+ /// [`extend`]: Extend::extend
+ /// [`clear`]: Vec::clear
+ ///
+ /// # Safety
+ ///
+ /// - `new_len` must be less than or equal to [`capacity()`].
+ /// - The elements at `old_len..new_len` must be initialized.
+ ///
+ /// [`capacity()`]: Vec::capacity
+ ///
+ /// # Examples
+ ///
+ /// This method can be useful for situations in which the vector
+ /// is serving as a buffer for other code, particularly over FFI:
+ ///
+ /// ```no_run
+ /// # #![allow(dead_code)]
+ /// # // This is just a minimal skeleton for the doc example;
+ /// # // don't use this as a starting point for a real library.
+ /// # pub struct StreamWrapper { strm: *mut std::ffi::c_void }
+ /// # const Z_OK: i32 = 0;
+ /// # extern "C" {
+ /// # fn deflateGetDictionary(
+ /// # strm: *mut std::ffi::c_void,
+ /// # dictionary: *mut u8,
+ /// # dictLength: *mut usize,
+ /// # ) -> i32;
+ /// # }
+ /// # impl StreamWrapper {
+ /// pub fn get_dictionary(&self) -> Option<Vec<u8>> {
+ /// // Per the FFI method's docs, "32768 bytes is always enough".
+ /// let mut dict = Vec::with_capacity(32_768);
+ /// let mut dict_length = 0;
+ /// // SAFETY: When `deflateGetDictionary` returns `Z_OK`, it holds that:
+ /// // 1. `dict_length` elements were initialized.
+ /// // 2. `dict_length` <= the capacity (32_768)
+ /// // which makes `set_len` safe to call.
+ /// unsafe {
+ /// // Make the FFI call...
+ /// let r = deflateGetDictionary(self.strm, dict.as_mut_ptr(), &mut dict_length);
+ /// if r == Z_OK {
+ /// // ...and update the length to what was initialized.
+ /// dict.set_len(dict_length);
+ /// Some(dict)
+ /// } else {
+ /// None
+ /// }
+ /// }
+ /// }
+ /// # }
+ /// ```
+ ///
+ /// While the following example is sound, there is a memory leak since
+ /// the inner vectors were not freed prior to the `set_len` call:
+ ///
+ /// ```
+ /// let mut vec = vec![vec![1, 0, 0],
+ /// vec![0, 1, 0],
+ /// vec![0, 0, 1]];
+ /// // SAFETY:
+ /// // 1. `old_len..0` is empty so no elements need to be initialized.
+ /// // 2. `0 <= capacity` always holds whatever `capacity` is.
+ /// unsafe {
+ /// vec.set_len(0);
+ /// }
+ /// ```
+ ///
+ /// Normally, here, one would use [`clear`] instead to correctly drop
+ /// the contents and thus not leak memory.
+ #[inline(always)]
+ pub unsafe fn set_len(&mut self, new_len: usize) {
+ debug_assert!(new_len <= self.capacity());
+
+ self.len = new_len;
+ }
+
+ /// Removes an element from the vector and returns it.
+ ///
+ /// The removed element is replaced by the last element of the vector.
+ ///
+ /// This does not preserve ordering, but is *O*(1).
+ /// If you need to preserve the element order, use [`remove`] instead.
+ ///
+ /// [`remove`]: Vec::remove
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec!["foo", "bar", "baz", "qux"];
+ ///
+ /// assert_eq!(v.swap_remove(1), "bar");
+ /// assert_eq!(v, ["foo", "qux", "baz"]);
+ ///
+ /// assert_eq!(v.swap_remove(0), "foo");
+ /// assert_eq!(v, ["baz", "qux"]);
+ /// ```
+ #[inline(always)]
+ pub fn swap_remove(&mut self, index: usize) -> T {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!(
+ "swap_remove index (is {}) should be < len (is {})",
+ index, len
+ );
+ }
+
+ let len = self.len();
+ if index >= len {
+ assert_failed(index, len);
+ }
+ unsafe {
+ // We replace self[index] with the last element. Note that if the
+ // bounds check above succeeds there must be a last element (which
+ // can be self[index] itself).
+ let value = ptr::read(self.as_ptr().add(index));
+ let base_ptr = self.as_mut_ptr();
+ ptr::copy(base_ptr.add(len - 1), base_ptr.add(index), 1);
+ self.set_len(len - 1);
+ value
+ }
+ }
+
+ /// Inserts an element at position `index` within the vector, shifting all
+ /// elements after it to the right.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.insert(1, 4);
+ /// assert_eq!(vec, [1, 4, 2, 3]);
+ /// vec.insert(4, 5);
+ /// assert_eq!(vec, [1, 4, 2, 3, 5]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ pub fn insert(&mut self, index: usize, element: T) {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!(
+ "insertion index (is {}) should be <= len (is {})",
+ index, len
+ );
+ }
+
+ let len = self.len();
+
+ // space for the new element
+ if len == self.buf.capacity() {
+ self.reserve(1);
+ }
+
+ unsafe {
+ // infallible
+ // The spot to put the new value
+ {
+ let p = self.as_mut_ptr().add(index);
+ match cmp::Ord::cmp(&index, &len) {
+ Ordering::Less => {
+ // Shift everything over to make space. (Duplicating the
+ // `index`th element into two consecutive places.)
+ ptr::copy(p, p.add(1), len - index);
+ }
+ Ordering::Equal => {
+ // No elements need shifting.
+ }
+ Ordering::Greater => {
+ assert_failed(index, len);
+ }
+ }
+ // Write it in, overwriting the first copy of the `index`th
+ // element.
+ ptr::write(p, element);
+ }
+ self.set_len(len + 1);
+ }
+ }
+
+ /// Removes and returns the element at position `index` within the vector,
+ /// shifting all elements after it to the left.
+ ///
+ /// Note: Because this shifts over the remaining elements, it has a
+ /// worst-case performance of *O*(*n*). If you don't need the order of elements
+ /// to be preserved, use [`swap_remove`] instead. If you'd like to remove
+ /// elements from the beginning of the `Vec`, consider using
+ /// [`VecDeque::pop_front`] instead.
+ ///
+ /// [`swap_remove`]: Vec::swap_remove
+ /// [`VecDeque::pop_front`]: alloc_crate::collections::VecDeque::pop_front
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ /// assert_eq!(v.remove(1), 2);
+ /// assert_eq!(v, [1, 3]);
+ /// ```
+ #[track_caller]
+ #[inline(always)]
+ pub fn remove(&mut self, index: usize) -> T {
+ #[cold]
+ #[inline(never)]
+ #[track_caller]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!("removal index (is {}) should be < len (is {})", index, len);
+ }
+
+ let len = self.len();
+ if index >= len {
+ assert_failed(index, len);
+ }
+ unsafe {
+ // infallible
+ let ret;
+ {
+ // the place we are taking from.
+ let ptr = self.as_mut_ptr().add(index);
+ // copy it out, unsafely having a copy of the value on
+ // the stack and in the vector at the same time.
+ ret = ptr::read(ptr);
+
+ // Shift everything down to fill in that spot.
+ ptr::copy(ptr.add(1), ptr, len - index - 1);
+ }
+ self.set_len(len - 1);
+ ret
+ }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns `false`.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.retain(|&x| x % 2 == 0);
+ /// assert_eq!(vec, [2, 4]);
+ /// ```
+ ///
+ /// Because the elements are visited exactly once in the original order,
+ /// external state may be used to decide which elements to keep.
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4, 5];
+ /// let keep = [false, true, true, false, true];
+ /// let mut iter = keep.iter();
+ /// vec.retain(|_| *iter.next().unwrap());
+ /// assert_eq!(vec, [2, 3, 5]);
+ /// ```
+ #[inline(always)]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ self.retain_mut(|elem| f(elem));
+ }
+
+ /// Retains only the elements specified by the predicate, passing a mutable reference to it.
+ ///
+ /// In other words, remove all elements `e` such that `f(&mut e)` returns `false`.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.retain_mut(|x| if *x <= 3 {
+ /// *x += 1;
+ /// true
+ /// } else {
+ /// false
+ /// });
+ /// assert_eq!(vec, [2, 3, 4]);
+ /// ```
+ #[inline]
+ pub fn retain_mut<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ let original_len = self.len();
+ // Avoid double drop if the drop guard is not executed,
+ // since we may make some holes during the process.
+ unsafe { self.set_len(0) };
+
+ // Vec: [Kept, Kept, Hole, Hole, Hole, Hole, Unchecked, Unchecked]
+ // |<- processed len ->| ^- next to check
+ // |<- deleted cnt ->|
+ // |<- original_len ->|
+ // Kept: Elements which predicate returns true on.
+ // Hole: Moved or dropped element slot.
+ // Unchecked: Unchecked valid elements.
+ //
+ // This drop guard will be invoked when predicate or `drop` of element panicked.
+ // It shifts unchecked elements to cover holes and `set_len` to the correct length.
+ // In cases when predicate and `drop` never panick, it will be optimized out.
+ struct BackshiftOnDrop<'a, T, A: Allocator> {
+ v: &'a mut Vec<T, A>,
+ processed_len: usize,
+ deleted_cnt: usize,
+ original_len: usize,
+ }
+
+ impl<T, A: Allocator> Drop for BackshiftOnDrop<'_, T, A> {
+ fn drop(&mut self) {
+ if self.deleted_cnt > 0 {
+ // SAFETY: Trailing unchecked items must be valid since we never touch them.
+ unsafe {
+ ptr::copy(
+ self.v.as_ptr().add(self.processed_len),
+ self.v
+ .as_mut_ptr()
+ .add(self.processed_len - self.deleted_cnt),
+ self.original_len - self.processed_len,
+ );
+ }
+ }
+ // SAFETY: After filling holes, all items are in contiguous memory.
+ unsafe {
+ self.v.set_len(self.original_len - self.deleted_cnt);
+ }
+ }
+ }
+
+ let mut g = BackshiftOnDrop {
+ v: self,
+ processed_len: 0,
+ deleted_cnt: 0,
+ original_len,
+ };
+
+ fn process_loop<F, T, A: Allocator, const DELETED: bool>(
+ original_len: usize,
+ f: &mut F,
+ g: &mut BackshiftOnDrop<'_, T, A>,
+ ) where
+ F: FnMut(&mut T) -> bool,
+ {
+ while g.processed_len != original_len {
+ // SAFETY: Unchecked element must be valid.
+ let cur = unsafe { &mut *g.v.as_mut_ptr().add(g.processed_len) };
+ if !f(cur) {
+ // Advance early to avoid double drop if `drop_in_place` panicked.
+ g.processed_len += 1;
+ g.deleted_cnt += 1;
+ // SAFETY: We never touch this element again after dropped.
+ unsafe { ptr::drop_in_place(cur) };
+ // We already advanced the counter.
+ if DELETED {
+ continue;
+ } else {
+ break;
+ }
+ }
+ if DELETED {
+ // SAFETY: `deleted_cnt` > 0, so the hole slot must not overlap with current element.
+ // We use copy for move, and never touch this element again.
+ unsafe {
+ let hole_slot = g.v.as_mut_ptr().add(g.processed_len - g.deleted_cnt);
+ ptr::copy_nonoverlapping(cur, hole_slot, 1);
+ }
+ }
+ g.processed_len += 1;
+ }
+ }
+
+ // Stage 1: Nothing was deleted.
+ process_loop::<F, T, A, false>(original_len, &mut f, &mut g);
+
+ // Stage 2: Some elements were deleted.
+ process_loop::<F, T, A, true>(original_len, &mut f, &mut g);
+
+ // All item are processed. This can be optimized to `set_len` by LLVM.
+ drop(g);
+ }
+
+ /// Removes all but the first of consecutive elements in the vector that resolve to the same
+ /// key.
+ ///
+ /// If the vector is sorted, this removes all duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![10, 20, 21, 30, 20];
+ ///
+ /// vec.dedup_by_key(|i| *i / 10);
+ ///
+ /// assert_eq!(vec, [10, 20, 30, 20]);
+ /// ```
+ #[inline(always)]
+ pub fn dedup_by_key<F, K>(&mut self, mut key: F)
+ where
+ F: FnMut(&mut T) -> K,
+ K: PartialEq,
+ {
+ self.dedup_by(|a, b| key(a) == key(b))
+ }
+
+ /// Removes all but the first of consecutive elements in the vector satisfying a given equality
+ /// relation.
+ ///
+ /// The `same_bucket` function is passed references to two elements from the vector and
+ /// must determine if the elements compare equal. The elements are passed in opposite order
+ /// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is removed.
+ ///
+ /// If the vector is sorted, this removes all duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!["foo", "bar", "Bar", "baz", "bar"];
+ ///
+ /// vec.dedup_by(|a, b| a.eq_ignore_ascii_case(b));
+ ///
+ /// assert_eq!(vec, ["foo", "bar", "baz", "bar"]);
+ /// ```
+ #[inline]
+ pub fn dedup_by<F>(&mut self, mut same_bucket: F)
+ where
+ F: FnMut(&mut T, &mut T) -> bool,
+ {
+ let len = self.len();
+ if len <= 1 {
+ return;
+ }
+
+ /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
+ struct FillGapOnDrop<'a, T, A: Allocator> {
+ /* Offset of the element we want to check if it is duplicate */
+ read: usize,
+
+ /* Offset of the place where we want to place the non-duplicate
+ * when we find it. */
+ write: usize,
+
+ /* The Vec that would need correction if `same_bucket` panicked */
+ vec: &'a mut Vec<T, A>,
+ }
+
+ impl<'a, T, A: Allocator> Drop for FillGapOnDrop<'a, T, A> {
+ fn drop(&mut self) {
+ /* This code gets executed when `same_bucket` panics */
+
+ /* SAFETY: invariant guarantees that `read - write`
+ * and `len - read` never overflow and that the copy is always
+ * in-bounds. */
+ unsafe {
+ let ptr = self.vec.as_mut_ptr();
+ let len = self.vec.len();
+
+ /* How many items were left when `same_bucket` panicked.
+ * Basically vec[read..].len() */
+ let items_left = len.wrapping_sub(self.read);
+
+ /* Pointer to first item in vec[write..write+items_left] slice */
+ let dropped_ptr = ptr.add(self.write);
+ /* Pointer to first item in vec[read..] slice */
+ let valid_ptr = ptr.add(self.read);
+
+ /* Copy `vec[read..]` to `vec[write..write+items_left]`.
+ * The slices can overlap, so `copy_nonoverlapping` cannot be used */
+ ptr::copy(valid_ptr, dropped_ptr, items_left);
+
+ /* How many items have been already dropped
+ * Basically vec[read..write].len() */
+ let dropped = self.read.wrapping_sub(self.write);
+
+ self.vec.set_len(len - dropped);
+ }
+ }
+ }
+
+ let mut gap = FillGapOnDrop {
+ read: 1,
+ write: 1,
+ vec: self,
+ };
+ let ptr = gap.vec.as_mut_ptr();
+
+ /* Drop items while going through Vec, it should be more efficient than
+ * doing slice partition_dedup + truncate */
+
+ /* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
+ * are always in-bounds and read_ptr never aliases prev_ptr */
+ unsafe {
+ while gap.read < len {
+ let read_ptr = ptr.add(gap.read);
+ let prev_ptr = ptr.add(gap.write.wrapping_sub(1));
+
+ if same_bucket(&mut *read_ptr, &mut *prev_ptr) {
+ // Increase `gap.read` now since the drop may panic.
+ gap.read += 1;
+ /* We have found duplicate, drop it in-place */
+ ptr::drop_in_place(read_ptr);
+ } else {
+ let write_ptr = ptr.add(gap.write);
+
+ /* Because `read_ptr` can be equal to `write_ptr`, we either
+ * have to use `copy` or conditional `copy_nonoverlapping`.
+ * Looks like the first option is faster. */
+ ptr::copy(read_ptr, write_ptr, 1);
+
+ /* We have filled that place, so go further */
+ gap.write += 1;
+ gap.read += 1;
+ }
+ }
+
+ /* Technically we could let `gap` clean up with its Drop, but
+ * when `same_bucket` is guaranteed to not panic, this bloats a little
+ * the codegen, so we just do it manually */
+ gap.vec.set_len(gap.write);
+ mem::forget(gap);
+ }
+ }
+
+ /// Appends an element to the back of a collection.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2];
+ /// vec.push(3);
+ /// assert_eq!(vec, [1, 2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn push(&mut self, value: T) {
+ // This will panic or abort if we would allocate > isize::MAX bytes
+ // or if the length increment would overflow for zero-sized types.
+ if self.len == self.buf.capacity() {
+ self.buf.reserve_for_push(self.len);
+ }
+ unsafe {
+ let end = self.as_mut_ptr().add(self.len);
+ ptr::write(end, value);
+ self.len += 1;
+ }
+ }
+
+ /// Appends an element if there is sufficient spare capacity, otherwise an error is returned
+ /// with the element.
+ ///
+ /// Unlike [`push`] this method will not reallocate when there's insufficient capacity.
+ /// The caller should use [`reserve`] or [`try_reserve`] to ensure that there is enough capacity.
+ ///
+ /// [`push`]: Vec::push
+ /// [`reserve`]: Vec::reserve
+ /// [`try_reserve`]: Vec::try_reserve
+ ///
+ /// # Examples
+ ///
+ /// A manual, panic-free alternative to [`FromIterator`]:
+ ///
+ /// ```
+ /// #![feature(vec_push_within_capacity)]
+ ///
+ /// use std::collections::TryReserveError;
+ /// fn from_iter_fallible<T>(iter: impl Iterator<Item=T>) -> Result<Vec<T>, TryReserveError> {
+ /// let mut vec = Vec::new();
+ /// for value in iter {
+ /// if let Err(value) = vec.push_within_capacity(value) {
+ /// vec.try_reserve(1)?;
+ /// // this cannot fail, the previous line either returned or added at least 1 free slot
+ /// let _ = vec.push_within_capacity(value);
+ /// }
+ /// }
+ /// Ok(vec)
+ /// }
+ /// assert_eq!(from_iter_fallible(0..100), Ok(Vec::from_iter(0..100)));
+ /// ```
+ #[inline(always)]
+ pub fn push_within_capacity(&mut self, value: T) -> Result<(), T> {
+ if self.len == self.buf.capacity() {
+ return Err(value);
+ }
+ unsafe {
+ let end = self.as_mut_ptr().add(self.len);
+ ptr::write(end, value);
+ self.len += 1;
+ }
+ Ok(())
+ }
+
+ /// Removes the last element from a vector and returns it, or [`None`] if it
+ /// is empty.
+ ///
+ /// If you'd like to pop the first element, consider using
+ /// [`VecDeque::pop_front`] instead.
+ ///
+ /// [`VecDeque::pop_front`]: alloc_crate::collections::VecDeque::pop_front
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// assert_eq!(vec.pop(), Some(3));
+ /// assert_eq!(vec, [1, 2]);
+ /// ```
+ #[inline(always)]
+ pub fn pop(&mut self) -> Option<T> {
+ if self.len == 0 {
+ None
+ } else {
+ unsafe {
+ self.len -= 1;
+ Some(ptr::read(self.as_ptr().add(self.len())))
+ }
+ }
+ }
+
+ /// Moves all the elements of `other` into `self`, leaving `other` empty.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// let mut vec2 = vec![4, 5, 6];
+ /// vec.append(&mut vec2);
+ /// assert_eq!(vec, [1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(vec2, []);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn append(&mut self, other: &mut Self) {
+ unsafe {
+ self.append_elements(other.as_slice() as _);
+ other.set_len(0);
+ }
+ }
+
+ /// Appends elements to `self` from other buffer.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ unsafe fn append_elements(&mut self, other: *const [T]) {
+ let count = unsafe { (*other).len() };
+ self.reserve(count);
+ let len = self.len();
+ unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
+ self.len += count;
+ }
+
+ /// Removes the specified range from the vector in bulk, returning all
+ /// removed elements as an iterator. If the iterator is dropped before
+ /// being fully consumed, it drops the remaining removed elements.
+ ///
+ /// The returned iterator keeps a mutable borrow on the vector to optimize
+ /// its implementation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ ///
+ /// # Leaking
+ ///
+ /// If the returned iterator goes out of scope without being dropped (due to
+ /// [`mem::forget`], for example), the vector may have lost and leaked
+ /// elements arbitrarily, including elements outside the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ /// let u: Vec<_> = v.drain(1..).collect();
+ /// assert_eq!(v, &[1]);
+ /// assert_eq!(u, &[2, 3]);
+ ///
+ /// // A full range clears the vector, like `clear()` does
+ /// v.drain(..);
+ /// assert_eq!(v, &[]);
+ /// ```
+ #[inline(always)]
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_, T, A>
+ where
+ R: RangeBounds<usize>,
+ {
+ // Memory safety
+ //
+ // When the Drain is first created, it shortens the length of
+ // the source vector to make sure no uninitialized or moved-from elements
+ // are accessible at all if the Drain's destructor never gets to run.
+ //
+ // Drain will ptr::read out the values to remove.
+ // When finished, remaining tail of the vec is copied back to cover
+ // the hole, and the vector length is restored to the new length.
+ //
+ let len = self.len();
+
+ // Replaced by code below
+ // let Range { start, end } = slice::range(range, ..len);
+
+ // Panics if range is out of bounds
+ let _ = &self.as_slice()[(range.start_bound().cloned(), range.end_bound().cloned())];
+
+ let start = match range.start_bound() {
+ Bound::Included(&n) => n,
+ Bound::Excluded(&n) => n + 1,
+ Bound::Unbounded => 0,
+ };
+ let end = match range.end_bound() {
+ Bound::Included(&n) => n + 1,
+ Bound::Excluded(&n) => n,
+ Bound::Unbounded => len,
+ };
+
+ unsafe {
+ // set self.vec length's to start, to be safe in case Drain is leaked
+ self.set_len(start);
+ let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start);
+ Drain {
+ tail_start: end,
+ tail_len: len - end,
+ iter: range_slice.iter(),
+ vec: NonNull::from(self),
+ }
+ }
+ }
+
+ /// Clears the vector, removing all values.
+ ///
+ /// Note that this method has no effect on the allocated capacity
+ /// of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ ///
+ /// v.clear();
+ ///
+ /// assert!(v.is_empty());
+ /// ```
+ #[inline(always)]
+ pub fn clear(&mut self) {
+ let elems: *mut [T] = self.as_mut_slice();
+
+ // SAFETY:
+ // - `elems` comes directly from `as_mut_slice` and is therefore valid.
+ // - Setting `self.len` before calling `drop_in_place` means that,
+ // if an element's `Drop` impl panics, the vector's `Drop` impl will
+ // do nothing (leaking the rest of the elements) instead of dropping
+ // some twice.
+ unsafe {
+ self.len = 0;
+ ptr::drop_in_place(elems);
+ }
+ }
+
+ /// Returns the number of elements in the vector, also referred to
+ /// as its 'length'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = vec![1, 2, 3];
+ /// assert_eq!(a.len(), 3);
+ /// ```
+ #[inline(always)]
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Returns `true` if the vector contains no elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = Vec::new();
+ /// assert!(v.is_empty());
+ ///
+ /// v.push(1);
+ /// assert!(!v.is_empty());
+ /// ```
+ #[inline(always)]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Splits the collection into two at the given index.
+ ///
+ /// Returns a newly allocated vector containing the elements in the range
+ /// `[at, len)`. After the call, the original vector will be left containing
+ /// the elements `[0, at)` with its previous capacity unchanged.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// let vec2 = vec.split_off(1);
+ /// assert_eq!(vec, [1]);
+ /// assert_eq!(vec2, [2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ #[must_use = "use `.truncate()` if you don't need the other half"]
+ pub fn split_off(&mut self, at: usize) -> Self
+ where
+ A: Clone,
+ {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(at: usize, len: usize) -> ! {
+ panic!("`at` split index (is {}) should be <= len (is {})", at, len);
+ }
+
+ if at > self.len() {
+ assert_failed(at, self.len());
+ }
+
+ if at == 0 {
+ // the new vector can take over the original buffer and avoid the copy
+ return mem::replace(
+ self,
+ Vec::with_capacity_in(self.capacity(), self.allocator().clone()),
+ );
+ }
+
+ let other_len = self.len - at;
+ let mut other = Vec::with_capacity_in(other_len, self.allocator().clone());
+
+ // Unsafely `set_len` and copy items to `other`.
+ unsafe {
+ self.set_len(at);
+ other.set_len(other_len);
+
+ ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len());
+ }
+ other
+ }
+
+ /// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `Vec` is extended by the
+ /// difference, with each additional slot filled with the result of
+ /// calling the closure `f`. The return values from `f` will end up
+ /// in the `Vec` in the order they have been generated.
+ ///
+ /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ ///
+ /// This method uses a closure to create new values on every push. If
+ /// you'd rather [`Clone`] a given value, use [`Vec::resize`]. If you
+ /// want to use the [`Default`] trait to generate values, you can
+ /// pass [`Default::default`] as the second argument.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.resize_with(5, Default::default);
+ /// assert_eq!(vec, [1, 2, 3, 0, 0]);
+ ///
+ /// let mut vec = vec![];
+ /// let mut p = 1;
+ /// vec.resize_with(4, || { p *= 2; p });
+ /// assert_eq!(vec, [2, 4, 8, 16]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn resize_with<F>(&mut self, new_len: usize, f: F)
+ where
+ F: FnMut() -> T,
+ {
+ let len = self.len();
+ if new_len > len {
+ self.extend(iter::repeat_with(f).take(new_len - len));
+ } else {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Consumes and leaks the `Vec`, returning a mutable reference to the contents,
+ /// `&'a mut [T]`. Note that the type `T` must outlive the chosen lifetime
+ /// `'a`. If the type has only static references, or none at all, then this
+ /// may be chosen to be `'static`.
+ ///
+ /// As of Rust 1.57, this method does not reallocate or shrink the `Vec`,
+ /// so the leaked allocation may include unused capacity that is not part
+ /// of the returned slice.
+ ///
+ /// This function is mainly useful for data that lives for the remainder of
+ /// the program's life. Dropping the returned reference will cause a memory
+ /// leak.
+ ///
+ /// # Examples
+ ///
+ /// Simple usage:
+ ///
+ /// ```
+ /// let x = vec![1, 2, 3];
+ /// let static_ref: &'static mut [usize] = x.leak();
+ /// static_ref[0] += 1;
+ /// assert_eq!(static_ref, &[2, 2, 3]);
+ /// ```
+ #[inline(always)]
+ pub fn leak<'a>(self) -> &'a mut [T]
+ where
+ A: 'a,
+ {
+ let mut me = ManuallyDrop::new(self);
+ unsafe { slice::from_raw_parts_mut(me.as_mut_ptr(), me.len) }
+ }
+
+ /// Returns the remaining spare capacity of the vector as a slice of
+ /// `MaybeUninit<T>`.
+ ///
+ /// The returned slice can be used to fill the vector with data (e.g. by
+ /// reading from a file) before marking the data as initialized using the
+ /// [`set_len`] method.
+ ///
+ /// [`set_len`]: Vec::set_len
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// // Allocate vector big enough for 10 elements.
+ /// let mut v = Vec::with_capacity(10);
+ ///
+ /// // Fill in the first 3 elements.
+ /// let uninit = v.spare_capacity_mut();
+ /// uninit[0].write(0);
+ /// uninit[1].write(1);
+ /// uninit[2].write(2);
+ ///
+ /// // Mark the first 3 elements of the vector as being initialized.
+ /// unsafe {
+ /// v.set_len(3);
+ /// }
+ ///
+ /// assert_eq!(&v, &[0, 1, 2]);
+ /// ```
+ #[inline(always)]
+ pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<T>] {
+ // Note:
+ // This method is not implemented in terms of `split_at_spare_mut`,
+ // to prevent invalidation of pointers to the buffer.
+ unsafe {
+ slice::from_raw_parts_mut(
+ self.as_mut_ptr().add(self.len) as *mut MaybeUninit<T>,
+ self.buf.capacity() - self.len,
+ )
+ }
+ }
+
+ /// Returns vector content as a slice of `T`, along with the remaining spare
+ /// capacity of the vector as a slice of `MaybeUninit<T>`.
+ ///
+ /// The returned spare capacity slice can be used to fill the vector with data
+ /// (e.g. by reading from a file) before marking the data as initialized using
+ /// the [`set_len`] method.
+ ///
+ /// [`set_len`]: Vec::set_len
+ ///
+ /// Note that this is a low-level API, which should be used with care for
+ /// optimization purposes. If you need to append data to a `Vec`
+ /// you can use [`push`], [`extend`], [`extend_from_slice`],
+ /// [`extend_from_within`], [`insert`], [`append`], [`resize`] or
+ /// [`resize_with`], depending on your exact needs.
+ ///
+ /// [`push`]: Vec::push
+ /// [`extend`]: Vec::extend
+ /// [`extend_from_slice`]: Vec::extend_from_slice
+ /// [`extend_from_within`]: Vec::extend_from_within
+ /// [`insert`]: Vec::insert
+ /// [`append`]: Vec::append
+ /// [`resize`]: Vec::resize
+ /// [`resize_with`]: Vec::resize_with
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(vec_split_at_spare)]
+ ///
+ /// let mut v = vec![1, 1, 2];
+ ///
+ /// // Reserve additional space big enough for 10 elements.
+ /// v.reserve(10);
+ ///
+ /// let (init, uninit) = v.split_at_spare_mut();
+ /// let sum = init.iter().copied().sum::<u32>();
+ ///
+ /// // Fill in the next 4 elements.
+ /// uninit[0].write(sum);
+ /// uninit[1].write(sum * 2);
+ /// uninit[2].write(sum * 3);
+ /// uninit[3].write(sum * 4);
+ ///
+ /// // Mark the 4 elements of the vector as being initialized.
+ /// unsafe {
+ /// let len = v.len();
+ /// v.set_len(len + 4);
+ /// }
+ ///
+ /// assert_eq!(&v, &[1, 1, 2, 4, 8, 12, 16]);
+ /// ```
+ #[inline(always)]
+ pub fn split_at_spare_mut(&mut self) -> (&mut [T], &mut [MaybeUninit<T>]) {
+ // SAFETY:
+ // - len is ignored and so never changed
+ let (init, spare, _) = unsafe { self.split_at_spare_mut_with_len() };
+ (init, spare)
+ }
+
+ /// Safety: changing returned .2 (&mut usize) is considered the same as calling `.set_len(_)`.
+ ///
+ /// This method provides unique access to all vec parts at once in `extend_from_within`.
+ unsafe fn split_at_spare_mut_with_len(
+ &mut self,
+ ) -> (&mut [T], &mut [MaybeUninit<T>], &mut usize) {
+ let ptr = self.as_mut_ptr();
+ // SAFETY:
+ // - `ptr` is guaranteed to be valid for `self.len` elements
+ // - but the allocation extends out to `self.buf.capacity()` elements, possibly
+ // uninitialized
+ let spare_ptr = unsafe { ptr.add(self.len) };
+ let spare_ptr = spare_ptr.cast::<MaybeUninit<T>>();
+ let spare_len = self.buf.capacity() - self.len;
+
+ // SAFETY:
+ // - `ptr` is guaranteed to be valid for `self.len` elements
+ // - `spare_ptr` is pointing one element past the buffer, so it doesn't overlap with `initialized`
+ unsafe {
+ let initialized = slice::from_raw_parts_mut(ptr, self.len);
+ let spare = slice::from_raw_parts_mut(spare_ptr, spare_len);
+
+ (initialized, spare, &mut self.len)
+ }
+ }
+}
+
+impl<T: Clone, A: Allocator> Vec<T, A> {
+ /// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `Vec` is extended by the
+ /// difference, with each additional slot filled with `value`.
+ /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ ///
+ /// This method requires `T` to implement [`Clone`],
+ /// in order to be able to clone the passed value.
+ /// If you need more flexibility (or want to rely on [`Default`] instead of
+ /// [`Clone`]), use [`Vec::resize_with`].
+ /// If you only need to resize to a smaller size, use [`Vec::truncate`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!["hello"];
+ /// vec.resize(3, "world");
+ /// assert_eq!(vec, ["hello", "world", "world"]);
+ ///
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.resize(2, 0);
+ /// assert_eq!(vec, [1, 2]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn resize(&mut self, new_len: usize, value: T) {
+ let len = self.len();
+
+ if new_len > len {
+ self.extend_with(new_len - len, ExtendElement(value))
+ } else {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Clones and appends all elements in a slice to the `Vec`.
+ ///
+ /// Iterates over the slice `other`, clones each element, and then appends
+ /// it to this `Vec`. The `other` slice is traversed in-order.
+ ///
+ /// Note that this function is same as [`extend`] except that it is
+ /// specialized to work with slices instead. If and when Rust gets
+ /// specialization this function will likely be deprecated (but still
+ /// available).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.extend_from_slice(&[2, 3, 4]);
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// ```
+ ///
+ /// [`extend`]: Vec::extend
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn extend_from_slice(&mut self, other: &[T]) {
+ self.extend(other.iter().cloned())
+ }
+
+ /// Copies elements from `src` range to the end of the vector.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![0, 1, 2, 3, 4];
+ ///
+ /// vec.extend_from_within(2..);
+ /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4]);
+ ///
+ /// vec.extend_from_within(..2);
+ /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4, 0, 1]);
+ ///
+ /// vec.extend_from_within(4..8);
+ /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4, 0, 1, 4, 2, 3, 4]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn extend_from_within<R>(&mut self, src: R)
+ where
+ R: RangeBounds<usize>,
+ {
+ // let range = slice::range(src, ..self.len());
+
+ let _ = &self.as_slice()[(src.start_bound().cloned(), src.end_bound().cloned())];
+
+ let len = self.len();
+
+ let start: ops::Bound<&usize> = src.start_bound();
+ let start = match start {
+ ops::Bound::Included(&start) => start,
+ ops::Bound::Excluded(start) => start + 1,
+ ops::Bound::Unbounded => 0,
+ };
+
+ let end: ops::Bound<&usize> = src.end_bound();
+ let end = match end {
+ ops::Bound::Included(end) => end + 1,
+ ops::Bound::Excluded(&end) => end,
+ ops::Bound::Unbounded => len,
+ };
+
+ let range = start..end;
+
+ self.reserve(range.len());
+
+ // SAFETY:
+ // - len is increased only after initializing elements
+ let (this, spare, len) = unsafe { self.split_at_spare_mut_with_len() };
+
+ // SAFETY:
+ // - caller guarantees that src is a valid index
+ let to_clone = unsafe { this.get_unchecked(range) };
+
+ iter::zip(to_clone, spare)
+ .map(|(src, dst)| dst.write(src.clone()))
+ // Note:
+ // - Element was just initialized with `MaybeUninit::write`, so it's ok to increase len
+ // - len is increased after each element to prevent leaks (see issue #82533)
+ .for_each(|_| *len += 1);
+ }
+}
+
+impl<T, A: Allocator, const N: usize> Vec<[T; N], A> {
+ /// Takes a `Vec<[T; N]>` and flattens it into a `Vec<T>`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the length of the resulting vector would overflow a `usize`.
+ ///
+ /// This is only possible when flattening a vector of arrays of zero-sized
+ /// types, and thus tends to be irrelevant in practice. If
+ /// `size_of::<T>() > 0`, this will never panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_flatten)]
+ ///
+ /// let mut vec = vec![[1, 2, 3], [4, 5, 6], [7, 8, 9]];
+ /// assert_eq!(vec.pop(), Some([7, 8, 9]));
+ ///
+ /// let mut flattened = vec.into_flattened();
+ /// assert_eq!(flattened.pop(), Some(6));
+ /// ```
+ #[inline(always)]
+ pub fn into_flattened(self) -> Vec<T, A> {
+ let (ptr, len, cap, alloc) = self.into_raw_parts_with_alloc();
+ let (new_len, new_cap) = if size_of::<T>() == 0 {
+ (len.checked_mul(N).expect("vec len overflow"), usize::MAX)
+ } else {
+ // SAFETY:
+ // - `cap * N` cannot overflow because the allocation is already in
+ // the address space.
+ // - Each `[T; N]` has `N` valid elements, so there are `len * N`
+ // valid elements in the allocation.
+ (len * N, cap * N)
+ };
+ // SAFETY:
+ // - `ptr` was allocated by `self`
+ // - `ptr` is well-aligned because `[T; N]` has the same alignment as `T`.
+ // - `new_cap` refers to the same sized allocation as `cap` because
+ // `new_cap * size_of::<T>()` == `cap * size_of::<[T; N]>()`
+ // - `len` <= `cap`, so `len * N` <= `cap * N`.
+ unsafe { Vec::<T, A>::from_raw_parts_in(ptr.cast(), new_len, new_cap, alloc) }
+ }
+}
+
+// This code generalizes `extend_with_{element,default}`.
+trait ExtendWith<T> {
+ fn next(&mut self) -> T;
+ fn last(self) -> T;
+}
+
+struct ExtendElement<T>(T);
+impl<T: Clone> ExtendWith<T> for ExtendElement<T> {
+ #[inline(always)]
+ fn next(&mut self) -> T {
+ self.0.clone()
+ }
+
+ #[inline(always)]
+ fn last(self) -> T {
+ self.0
+ }
+}
+
+impl<T, A: Allocator> Vec<T, A> {
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ /// Extend the vector by `n` values, using the given generator.
+ fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) {
+ self.reserve(n);
+
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ // Use SetLenOnDrop to work around bug where compiler
+ // might not realize the store through `ptr` through self.set_len()
+ // don't alias.
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+
+ // Write all elements except the last one
+ for _ in 1..n {
+ ptr::write(ptr, value.next());
+ ptr = ptr.add(1);
+ // Increment the length in every step in case next() panics
+ local_len.increment_len(1);
+ }
+
+ if n > 0 {
+ // We can write the last element directly without cloning needlessly
+ ptr::write(ptr, value.last());
+ local_len.increment_len(1);
+ }
+
+ // len set by scope guard
+ }
+ }
+}
+
+impl<T: PartialEq, A: Allocator> Vec<T, A> {
+ /// Removes consecutive repeated elements in the vector according to the
+ /// [`PartialEq`] trait implementation.
+ ///
+ /// If the vector is sorted, this removes all duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 2, 3, 2];
+ ///
+ /// vec.dedup();
+ ///
+ /// assert_eq!(vec, [1, 2, 3, 2]);
+ /// ```
+ #[inline(always)]
+ pub fn dedup(&mut self) {
+ self.dedup_by(|a, b| a == b)
+ }
+}
+
+trait ExtendFromWithinSpec {
+ /// # Safety
+ ///
+ /// - `src` needs to be valid index
+ /// - `self.capacity() - self.len()` must be `>= src.len()`
+ unsafe fn spec_extend_from_within(&mut self, src: Range<usize>);
+}
+
+// impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
+// default unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
+// // SAFETY:
+// // - len is increased only after initializing elements
+// let (this, spare, len) = unsafe { self.split_at_spare_mut_with_len() };
+
+// // SAFETY:
+// // - caller guarantees that src is a valid index
+// let to_clone = unsafe { this.get_unchecked(src) };
+
+// iter::zip(to_clone, spare)
+// .map(|(src, dst)| dst.write(src.clone()))
+// // Note:
+// // - Element was just initialized with `MaybeUninit::write`, so it's ok to increase len
+// // - len is increased after each element to prevent leaks (see issue #82533)
+// .for_each(|_| *len += 1);
+// }
+// }
+
+impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
+ #[inline(always)]
+ unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
+ let count = src.len();
+ {
+ let (init, spare) = self.split_at_spare_mut();
+
+ // SAFETY:
+ // - caller guarantees that `src` is a valid index
+ let source = unsafe { init.get_unchecked(src) };
+
+ // SAFETY:
+ // - Both pointers are created from unique slice references (`&mut [_]`)
+ // so they are valid and do not overlap.
+ // - Elements are :Copy so it's OK to copy them, without doing
+ // anything with the original values
+ // - `count` is equal to the len of `source`, so source is valid for
+ // `count` reads
+ // - `.reserve(count)` guarantees that `spare.len() >= count` so spare
+ // is valid for `count` writes
+ unsafe { ptr::copy_nonoverlapping(source.as_ptr(), spare.as_mut_ptr() as _, count) };
+ }
+
+ // SAFETY:
+ // - The elements were just initialized by `copy_nonoverlapping`
+ self.len += count;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Common trait implementations for Vec
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T, A: Allocator> ops::Deref for Vec<T, A> {
+ type Target = [T];
+
+ #[inline(always)]
+ fn deref(&self) -> &[T] {
+ unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
+ }
+}
+
+impl<T, A: Allocator> ops::DerefMut for Vec<T, A> {
+ #[inline(always)]
+ fn deref_mut(&mut self) -> &mut [T] {
+ unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
+ #[inline(always)]
+ fn clone(&self) -> Self {
+ let alloc = self.allocator().clone();
+ let mut vec = Vec::with_capacity_in(self.len(), alloc);
+ vec.extend_from_slice(self);
+ vec
+ }
+
+ #[inline(always)]
+ fn clone_from(&mut self, other: &Self) {
+ // drop anything that will not be overwritten
+ self.truncate(other.len());
+
+ // self.len <= other.len due to the truncate above, so the
+ // slices here are always in-bounds.
+ let (init, tail) = other.split_at(self.len());
+
+ // reuse the contained values' allocations/resources.
+ self.clone_from_slice(init);
+ self.extend_from_slice(tail);
+ }
+}
+
+/// The hash of a vector is the same as that of the corresponding slice,
+/// as required by the `core::borrow::Borrow` implementation.
+///
+/// ```
+/// #![feature(build_hasher_simple_hash_one)]
+/// use std::hash::BuildHasher;
+///
+/// let b = std::collections::hash_map::RandomState::new();
+/// let v: Vec<u8> = vec![0xa8, 0x3c, 0x09];
+/// let s: &[u8] = &[0xa8, 0x3c, 0x09];
+/// assert_eq!(b.hash_one(v), b.hash_one(s));
+/// ```
+impl<T: Hash, A: Allocator> Hash for Vec<T, A> {
+ #[inline(always)]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ Hash::hash(&**self, state)
+ }
+}
+
+impl<T, I: SliceIndex<[T]>, A: Allocator> Index<I> for Vec<T, A> {
+ type Output = I::Output;
+
+ #[inline(always)]
+ fn index(&self, index: I) -> &Self::Output {
+ Index::index(&**self, index)
+ }
+}
+
+impl<T, I: SliceIndex<[T]>, A: Allocator> IndexMut<I> for Vec<T, A> {
+ #[inline(always)]
+ fn index_mut(&mut self, index: I) -> &mut Self::Output {
+ IndexMut::index_mut(&mut **self, index)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T> FromIterator<T> for Vec<T> {
+ #[inline(always)]
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Vec<T> {
+ let mut vec = Vec::new();
+ vec.extend(iter);
+ vec
+ }
+}
+
+impl<T, A: Allocator> IntoIterator for Vec<T, A> {
+ type Item = T;
+ type IntoIter = IntoIter<T, A>;
+
+ /// Creates a consuming iterator, that is, one that moves each value out of
+ /// the vector (from start to end). The vector cannot be used after calling
+ /// this.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = vec!["a".to_string(), "b".to_string()];
+ /// let mut v_iter = v.into_iter();
+ ///
+ /// let first_element: Option<String> = v_iter.next();
+ ///
+ /// assert_eq!(first_element, Some("a".to_string()));
+ /// assert_eq!(v_iter.next(), Some("b".to_string()));
+ /// assert_eq!(v_iter.next(), None);
+ /// ```
+ #[inline(always)]
+ fn into_iter(self) -> Self::IntoIter {
+ unsafe {
+ let mut me = ManuallyDrop::new(self);
+ let alloc = ManuallyDrop::new(ptr::read(me.allocator()));
+ let begin = me.as_mut_ptr();
+ let end = if size_of::<T>() == 0 {
+ begin.cast::<u8>().wrapping_add(me.len()).cast()
+ } else {
+ begin.add(me.len()) as *const T
+ };
+ let cap = me.buf.capacity();
+ IntoIter {
+ buf: NonNull::new_unchecked(begin),
+ phantom: PhantomData,
+ cap,
+ alloc,
+ ptr: begin,
+ end,
+ }
+ }
+ }
+}
+
+impl<'a, T, A: Allocator> IntoIterator for &'a Vec<T, A> {
+ type Item = &'a T;
+ type IntoIter = slice::Iter<'a, T>;
+
+ #[inline(always)]
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A> {
+ type Item = &'a mut T;
+ type IntoIter = slice::IterMut<'a, T>;
+
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, A: Allocator> Extend<T> for Vec<T, A> {
+ #[inline(always)]
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ // This is the case for a general iter.
+ //
+ // This function should be the moral equivalent of:
+ //
+ // for item in iter {
+ // self.push(item);
+ // }
+
+ let mut iter = iter.into_iter();
+ while let Some(element) = iter.next() {
+ let len = self.len();
+ if len == self.capacity() {
+ let (lower, _) = iter.size_hint();
+ self.reserve(lower.saturating_add(1));
+ }
+ unsafe {
+ ptr::write(self.as_mut_ptr().add(len), element);
+ // Since next() executes user code which can panic we have to bump the length
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ self.set_len(len + 1);
+ }
+ }
+ }
+}
+
+impl<T, A: Allocator> Vec<T, A> {
+ /// Creates a splicing iterator that replaces the specified range in the vector
+ /// with the given `replace_with` iterator and yields the removed items.
+ /// `replace_with` does not need to be the same length as `range`.
+ ///
+ /// `range` is removed even if the iterator is not consumed until the end.
+ ///
+ /// It is unspecified how many elements are removed from the vector
+ /// if the `Splice` value is leaked.
+ ///
+ /// The input iterator `replace_with` is only consumed when the `Splice` value is dropped.
+ ///
+ /// This is optimal if:
+ ///
+ /// * The tail (elements in the vector after `range`) is empty,
+ /// * or `replace_with` yields fewer or equal elements than `range`’s length
+ /// * or the lower bound of its `size_hint()` is exact.
+ ///
+ /// Otherwise, a temporary vector is allocated and the tail is moved twice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3, 4];
+ /// let new = [7, 8, 9];
+ /// let u: Vec<_> = v.splice(1..3, new).collect();
+ /// assert_eq!(v, &[1, 7, 8, 9, 4]);
+ /// assert_eq!(u, &[2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(always)]
+ pub fn splice<R, I>(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter, A>
+ where
+ R: RangeBounds<usize>,
+ I: IntoIterator<Item = T>,
+ {
+ Splice {
+ drain: self.drain(range),
+ replace_with: replace_with.into_iter(),
+ }
+ }
+}
+
+/// Extend implementation that copies elements out of references before pushing them onto the Vec.
+///
+/// This implementation is specialized for slice iterators, where it uses [`copy_from_slice`] to
+/// append the entire slice at once.
+///
+/// [`copy_from_slice`]: slice::copy_from_slice
+#[cfg(not(no_global_oom_handling))]
+impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
+ #[inline(always)]
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ let mut iter = iter.into_iter();
+ while let Some(element) = iter.next() {
+ let len = self.len();
+ if len == self.capacity() {
+ let (lower, _) = iter.size_hint();
+ self.reserve(lower.saturating_add(1));
+ }
+ unsafe {
+ ptr::write(self.as_mut_ptr().add(len), *element);
+ // Since next() executes user code which can panic we have to bump the length
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ self.set_len(len + 1);
+ }
+ }
+ }
+}
+
+/// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
+ #[inline(always)]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+}
+
+impl<T: Eq, A: Allocator> Eq for Vec<T, A> {}
+
+/// Implements ordering of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+impl<T: Ord, A: Allocator> Ord for Vec<T, A> {
+ #[inline(always)]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(&**self, &**other)
+ }
+}
+
+impl<T, A: Allocator> Drop for Vec<T, A> {
+ #[inline(always)]
+ fn drop(&mut self) {
+ unsafe {
+ // use drop for [T]
+ // use a raw slice to refer to the elements of the vector as weakest necessary type;
+ // could avoid questions of validity in certain cases
+ ptr::drop_in_place(ptr::slice_from_raw_parts_mut(self.as_mut_ptr(), self.len))
+ }
+ // RawVec handles deallocation
+ }
+}
+
+impl<T> Default for Vec<T> {
+ /// Creates an empty `Vec<T>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
+ #[inline(always)]
+ fn default() -> Vec<T> {
+ Vec::new()
+ }
+}
+
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Vec<T, A> {
+ #[inline(always)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+impl<T, A: Allocator> AsRef<Vec<T, A>> for Vec<T, A> {
+ #[inline(always)]
+ fn as_ref(&self) -> &Vec<T, A> {
+ self
+ }
+}
+
+impl<T, A: Allocator> AsMut<Vec<T, A>> for Vec<T, A> {
+ #[inline(always)]
+ fn as_mut(&mut self) -> &mut Vec<T, A> {
+ self
+ }
+}
+
+impl<T, A: Allocator> AsRef<[T]> for Vec<T, A> {
+ #[inline(always)]
+ fn as_ref(&self) -> &[T] {
+ self
+ }
+}
+
+impl<T, A: Allocator> AsMut<[T]> for Vec<T, A> {
+ #[inline(always)]
+ fn as_mut(&mut self) -> &mut [T] {
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone> From<&[T]> for Vec<T> {
+ /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from(&[1, 2, 3][..]), vec![1, 2, 3]);
+ /// ```
+ #[inline(always)]
+ fn from(s: &[T]) -> Vec<T> {
+ let mut vec = Vec::with_capacity(s.len());
+ vec.extend_from_slice(s);
+ vec
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone> From<&mut [T]> for Vec<T> {
+ /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from(&mut [1, 2, 3][..]), vec![1, 2, 3]);
+ /// ```
+ #[inline(always)]
+ fn from(s: &mut [T]) -> Vec<T> {
+ let mut vec = Vec::with_capacity(s.len());
+ vec.extend_from_slice(s);
+ vec
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, const N: usize> From<[T; N]> for Vec<T> {
+ #[inline(always)]
+ fn from(s: [T; N]) -> Vec<T> {
+ Box::slice(Box::new(s)).into_vec()
+ }
+}
+
+impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
+ /// Convert a boxed slice into a vector by transferring ownership of
+ /// the existing heap allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let b: Box<[i32]> = vec![1, 2, 3].into_boxed_slice();
+ /// assert_eq!(Vec::from(b), vec![1, 2, 3]);
+ /// ```
+ #[inline(always)]
+ fn from(s: Box<[T], A>) -> Self {
+ s.into_vec()
+ }
+}
+
+impl<T, A: Allocator, const N: usize> From<Box<[T; N], A>> for Vec<T, A> {
+ /// Convert a boxed array into a vector by transferring ownership of
+ /// the existing heap allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let b: Box<[i32; 3]> = Box::new([1, 2, 3]);
+ /// assert_eq!(Vec::from(b), vec![1, 2, 3]);
+ /// ```
+ #[inline(always)]
+ fn from(s: Box<[T; N], A>) -> Self {
+ s.into_vec()
+ }
+}
+
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(no_global_oom_handling))]
+impl<T, A: Allocator> From<Vec<T, A>> for Box<[T], A> {
+ /// Convert a vector into a boxed slice.
+ ///
+ /// If `v` has excess capacity, its items will be moved into a
+ /// newly-allocated buffer with exactly the right capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Box::from(vec![1, 2, 3]), vec![1, 2, 3].into_boxed_slice());
+ /// ```
+ ///
+ /// Any excess capacity is removed:
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ ///
+ /// assert_eq!(Box::from(vec), vec![1, 2, 3].into_boxed_slice());
+ /// ```
+ #[inline(always)]
+ fn from(v: Vec<T, A>) -> Self {
+ v.into_boxed_slice()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl From<&str> for Vec<u8> {
+ /// Allocate a `Vec<u8>` and fill it with a UTF-8 string.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from("123"), vec![b'1', b'2', b'3']);
+ /// ```
+ #[inline(always)]
+ fn from(s: &str) -> Vec<u8> {
+ From::from(s.as_bytes())
+ }
+}
+
+impl<T, A: Allocator, const N: usize> TryFrom<Vec<T, A>> for [T; N] {
+ type Error = Vec<T, A>;
+
+ /// Gets the entire contents of the `Vec<T>` as an array,
+ /// if its size exactly matches that of the requested array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(vec![1, 2, 3].try_into(), Ok([1, 2, 3]));
+ /// assert_eq!(<Vec<i32>>::new().try_into(), Ok([]));
+ /// ```
+ ///
+ /// If the length doesn't match, the input comes back in `Err`:
+ /// ```
+ /// let r: Result<[i32; 4], _> = (0..10).collect::<Vec<_>>().try_into();
+ /// assert_eq!(r, Err(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]));
+ /// ```
+ ///
+ /// If you're fine with just getting a prefix of the `Vec<T>`,
+ /// you can call [`.truncate(N)`](Vec::truncate) first.
+ /// ```
+ /// let mut v = String::from("hello world").into_bytes();
+ /// v.sort();
+ /// v.truncate(2);
+ /// let [a, b]: [_; 2] = v.try_into().unwrap();
+ /// assert_eq!(a, b' ');
+ /// assert_eq!(b, b'd');
+ /// ```
+ #[inline(always)]
+ fn try_from(mut vec: Vec<T, A>) -> Result<[T; N], Vec<T, A>> {
+ if vec.len() != N {
+ return Err(vec);
+ }
+
+ // SAFETY: `.set_len(0)` is always sound.
+ unsafe { vec.set_len(0) };
+
+ // SAFETY: A `Vec`'s pointer is always aligned properly, and
+ // the alignment the array needs is the same as the items.
+ // We checked earlier that we have sufficient items.
+ // The items will not double-drop as the `set_len`
+ // tells the `Vec` not to also drop them.
+ let array = unsafe { ptr::read(vec.as_ptr() as *const [T; N]) };
+ Ok(array)
+ }
+}
+
+#[inline(always)]
+#[cfg(not(no_global_oom_handling))]
+#[doc(hidden)]
+pub fn from_elem_in<T: Clone, A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<T, A> {
+ let mut v = Vec::with_capacity_in(n, alloc);
+ v.extend_with(n, ExtendElement(elem));
+ v
+}
+
+#[inline(always)]
+#[cfg(not(no_global_oom_handling))]
+#[doc(hidden)]
+pub fn from_elem<T: Clone>(elem: T, n: usize) -> Vec<T> {
+ let mut v = Vec::with_capacity(n);
+ v.extend_with(n, ExtendElement(elem));
+ v
+}
+
+#[cfg(feature = "serde")]
+impl<T, A> serde::Serialize for Vec<T, A>
+where
+ T: serde::Serialize,
+ A: Allocator,
+{
+ #[inline(always)]
+ fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
+ where
+ S: serde::ser::Serializer,
+ {
+ serializer.collect_seq(self)
+ }
+}
+
+#[cfg(feature = "serde")]
+impl<'de, T, A> serde::de::Deserialize<'de> for Vec<T, A>
+where
+ T: serde::de::Deserialize<'de>,
+ A: Allocator + Default,
+{
+ #[inline(always)]
+ fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
+ where
+ D: serde::de::Deserializer<'de>,
+ {
+ struct VecVisitor<T, A> {
+ marker: PhantomData<(T, A)>,
+ }
+
+ impl<'de, T, A> serde::de::Visitor<'de> for VecVisitor<T, A>
+ where
+ T: serde::de::Deserialize<'de>,
+ A: Allocator + Default,
+ {
+ type Value = Vec<T, A>;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ formatter.write_str("a sequence")
+ }
+
+ fn visit_seq<S>(self, mut seq: S) -> Result<Self::Value, S::Error>
+ where
+ S: serde::de::SeqAccess<'de>,
+ {
+ let mut values = Vec::with_capacity_in(cautious(seq.size_hint()), A::default());
+
+ while let Some(value) = seq.next_element()? {
+ values.push(value);
+ }
+
+ Ok(values)
+ }
+ }
+
+ let visitor = VecVisitor {
+ marker: PhantomData,
+ };
+ deserializer.deserialize_seq(visitor)
+ }
+
+ #[inline(always)]
+ fn deserialize_in_place<D>(deserializer: D, place: &mut Self) -> Result<(), D::Error>
+ where
+ D: serde::de::Deserializer<'de>,
+ {
+ struct VecInPlaceVisitor<'a, T: 'a, A: Allocator + 'a>(&'a mut Vec<T, A>);
+
+ impl<'a, 'de, T, A> serde::de::Visitor<'de> for VecInPlaceVisitor<'a, T, A>
+ where
+ T: serde::de::Deserialize<'de>,
+ A: Allocator + Default,
+ {
+ type Value = ();
+
+ fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
+ formatter.write_str("a sequence")
+ }
+
+ fn visit_seq<S>(self, mut seq: S) -> Result<Self::Value, S::Error>
+ where
+ S: serde::de::SeqAccess<'de>,
+ {
+ let hint = cautious(seq.size_hint());
+ if let Some(additional) = hint.checked_sub(self.0.len()) {
+ self.0.reserve(additional);
+ }
+
+ for i in 0..self.0.len() {
+ let next = {
+ let next_place = InPlaceSeed(&mut self.0[i]);
+ seq.next_element_seed(next_place)?
+ };
+ if next.is_none() {
+ self.0.truncate(i);
+ return Ok(());
+ }
+ }
+
+ while let Some(value) = seq.next_element()? {
+ self.0.push(value);
+ }
+
+ Ok(())
+ }
+ }
+
+ deserializer.deserialize_seq(VecInPlaceVisitor(place))
+ }
+}
+
+#[cfg(feature = "serde")]
+pub fn cautious(hint: Option<usize>) -> usize {
+ cmp::min(hint.unwrap_or(0), 4096)
+}
+
+/// A DeserializeSeed helper for implementing deserialize_in_place Visitors.
+///
+/// Wraps a mutable reference and calls deserialize_in_place on it.
+
+#[cfg(feature = "serde")]
+pub struct InPlaceSeed<'a, T: 'a>(pub &'a mut T);
+
+#[cfg(feature = "serde")]
+impl<'a, 'de, T> serde::de::DeserializeSeed<'de> for InPlaceSeed<'a, T>
+where
+ T: serde::de::Deserialize<'de>,
+{
+ type Value = ();
+ fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
+ where
+ D: serde::de::Deserializer<'de>,
+ {
+ T::deserialize_in_place(deserializer, self.0)
+ }
+}
diff --git a/vendor/allocator-api2/src/stable/vec/partial_eq.rs b/vendor/allocator-api2/src/stable/vec/partial_eq.rs
new file mode 100644
index 000000000..157d9c0b9
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/vec/partial_eq.rs
@@ -0,0 +1,43 @@
+#[cfg(not(no_global_oom_handling))]
+use alloc_crate::borrow::Cow;
+
+use crate::stable::alloc::Allocator;
+
+use super::Vec;
+
+macro_rules! __impl_slice_eq1 {
+ ([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?) => {
+ impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs
+ where
+ T: PartialEq<U>,
+ $($ty: $bound)?
+ {
+ #[inline(always)]
+ fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
+ #[inline(always)]
+ fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] }
+ }
+ }
+}
+
+__impl_slice_eq1! { [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2> }
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U] }
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U] }
+__impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A> }
+__impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A> }
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U] }
+__impl_slice_eq1! { [A: Allocator] [T], Vec<U, A> }
+#[cfg(not(no_global_oom_handling))]
+__impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone }
+__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N] }
+__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, &[U; N] }
+
+// NOTE: some less important impls are omitted to reduce code bloat
+// FIXME(Centril): Reconsider this?
+//__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], }
+//__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, }
+//__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, }
+//__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], }
diff --git a/vendor/allocator-api2/src/stable/vec/set_len_on_drop.rs b/vendor/allocator-api2/src/stable/vec/set_len_on_drop.rs
new file mode 100644
index 000000000..c6f831321
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/vec/set_len_on_drop.rs
@@ -0,0 +1,31 @@
+// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
+//
+// The idea is: The length field in SetLenOnDrop is a local variable
+// that the optimizer will see does not alias with any stores through the Vec's data
+// pointer. This is a workaround for alias analysis issue #32155
+pub(super) struct SetLenOnDrop<'a> {
+ len: &'a mut usize,
+ local_len: usize,
+}
+
+impl<'a> SetLenOnDrop<'a> {
+ #[inline(always)]
+ pub(super) fn new(len: &'a mut usize) -> Self {
+ SetLenOnDrop {
+ local_len: *len,
+ len,
+ }
+ }
+
+ #[inline(always)]
+ pub(super) fn increment_len(&mut self, increment: usize) {
+ self.local_len += increment;
+ }
+}
+
+impl Drop for SetLenOnDrop<'_> {
+ #[inline(always)]
+ fn drop(&mut self) {
+ *self.len = self.local_len;
+ }
+}
diff --git a/vendor/allocator-api2/src/stable/vec/splice.rs b/vendor/allocator-api2/src/stable/vec/splice.rs
new file mode 100644
index 000000000..0f64c87af
--- /dev/null
+++ b/vendor/allocator-api2/src/stable/vec/splice.rs
@@ -0,0 +1,135 @@
+use core::ptr::{self};
+use core::slice::{self};
+
+use crate::stable::alloc::{Allocator, Global};
+
+use super::{Drain, Vec};
+
+/// A splicing iterator for `Vec`.
+///
+/// This struct is created by [`Vec::splice()`].
+/// See its documentation for more.
+///
+/// # Example
+///
+/// ```
+/// let mut v = vec![0, 1, 2];
+/// let new = [7, 8];
+/// let iter: std::vec::Splice<_> = v.splice(1.., new);
+/// ```
+#[derive(Debug)]
+pub struct Splice<'a, I: Iterator + 'a, A: Allocator + 'a = Global> {
+ pub(super) drain: Drain<'a, I::Item, A>,
+ pub(super) replace_with: I,
+}
+
+impl<I: Iterator, A: Allocator> Iterator for Splice<'_, I, A> {
+ type Item = I::Item;
+
+ #[inline(always)]
+ fn next(&mut self) -> Option<Self::Item> {
+ self.drain.next()
+ }
+
+ #[inline(always)]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.drain.size_hint()
+ }
+}
+
+impl<I: Iterator, A: Allocator> DoubleEndedIterator for Splice<'_, I, A> {
+ #[inline(always)]
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.drain.next_back()
+ }
+}
+
+impl<I: Iterator, A: Allocator> ExactSizeIterator for Splice<'_, I, A> {}
+
+impl<I: Iterator, A: Allocator> Drop for Splice<'_, I, A> {
+ #[inline]
+ fn drop(&mut self) {
+ self.drain.by_ref().for_each(drop);
+
+ unsafe {
+ if self.drain.tail_len == 0 {
+ self.drain.vec.as_mut().extend(self.replace_with.by_ref());
+ return;
+ }
+
+ // First fill the range left by drain().
+ if !self.drain.fill(&mut self.replace_with) {
+ return;
+ }
+
+ // There may be more elements. Use the lower bound as an estimate.
+ // FIXME: Is the upper bound a better guess? Or something else?
+ let (lower_bound, _upper_bound) = self.replace_with.size_hint();
+ if lower_bound > 0 {
+ self.drain.move_tail(lower_bound);
+ if !self.drain.fill(&mut self.replace_with) {
+ return;
+ }
+ }
+
+ // Collect any remaining elements.
+ // This is a zero-length vector which does not allocate if `lower_bound` was exact.
+ let mut collected = self
+ .replace_with
+ .by_ref()
+ .collect::<Vec<I::Item>>()
+ .into_iter();
+ // Now we have an exact count.
+ if collected.len() > 0 {
+ self.drain.move_tail(collected.len());
+ let filled = self.drain.fill(&mut collected);
+ debug_assert!(filled);
+ debug_assert_eq!(collected.len(), 0);
+ }
+ }
+ // Let `Drain::drop` move the tail back if necessary and restore `vec.len`.
+ }
+}
+
+/// Private helper methods for `Splice::drop`
+impl<T, A: Allocator> Drain<'_, T, A> {
+ /// The range from `self.vec.len` to `self.tail_start` contains elements
+ /// that have been moved out.
+ /// Fill that range as much as possible with new elements from the `replace_with` iterator.
+ /// Returns `true` if we filled the entire range. (`replace_with.next()` didn’t return `None`.)
+ #[inline(always)]
+ unsafe fn fill<I: Iterator<Item = T>>(&mut self, replace_with: &mut I) -> bool {
+ let vec = unsafe { self.vec.as_mut() };
+ let range_start = vec.len;
+ let range_end = self.tail_start;
+ let range_slice = unsafe {
+ slice::from_raw_parts_mut(vec.as_mut_ptr().add(range_start), range_end - range_start)
+ };
+
+ for place in range_slice {
+ if let Some(new_item) = replace_with.next() {
+ unsafe { ptr::write(place, new_item) };
+ vec.len += 1;
+ } else {
+ return false;
+ }
+ }
+ true
+ }
+
+ /// Makes room for inserting more elements before the tail.
+ #[inline(always)]
+ unsafe fn move_tail(&mut self, additional: usize) {
+ let vec = unsafe { self.vec.as_mut() };
+ let len = self.tail_start + self.tail_len;
+ vec.buf.reserve(len, additional);
+
+ let new_tail_start = self.tail_start + additional;
+ unsafe {
+ let src = vec.as_ptr().add(self.tail_start);
+ let dst = vec.as_mut_ptr().add(new_tail_start);
+ ptr::copy(src, dst, self.tail_len);
+ }
+ self.tail_start = new_tail_start;
+ }
+}