summaryrefslogtreecommitdiffstats
path: root/third_party/rust/smallvec
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/rust/smallvec
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/rust/smallvec')
-rw-r--r--third_party/rust/smallvec/.cargo-checksum.json1
-rw-r--r--third_party/rust/smallvec/Cargo.toml72
-rw-r--r--third_party/rust/smallvec/LICENSE-APACHE201
-rw-r--r--third_party/rust/smallvec/LICENSE-MIT25
-rw-r--r--third_party/rust/smallvec/README.md26
-rw-r--r--third_party/rust/smallvec/benches/bench.rs314
-rw-r--r--third_party/rust/smallvec/debug_metadata/README.md111
-rw-r--r--third_party/rust/smallvec/debug_metadata/smallvec.natvis35
-rw-r--r--third_party/rust/smallvec/scripts/run_miri.sh24
-rw-r--r--third_party/rust/smallvec/src/arbitrary.rs19
-rw-r--r--third_party/rust/smallvec/src/lib.rs2463
-rw-r--r--third_party/rust/smallvec/src/specialization.rs19
-rw-r--r--third_party/rust/smallvec/src/tests.rs1016
-rw-r--r--third_party/rust/smallvec/tests/debugger_visualizer.rs68
-rw-r--r--third_party/rust/smallvec/tests/macro.rs24
15 files changed, 4418 insertions, 0 deletions
diff --git a/third_party/rust/smallvec/.cargo-checksum.json b/third_party/rust/smallvec/.cargo-checksum.json
new file mode 100644
index 0000000000..f8adf7532e
--- /dev/null
+++ b/third_party/rust/smallvec/.cargo-checksum.json
@@ -0,0 +1 @@
+{"files":{"Cargo.toml":"964f828a4ed019af9a728f6a0f63dc5860446c7b21abe2be1a0b92ddc82d1140","LICENSE-APACHE":"a60eea817514531668d7e00765731449fe14d059d3249e0bc93b36de45f759f2","LICENSE-MIT":"0b28172679e0009b655da42797c03fd163a3379d5cfa67ba1f1655e974a2a1a9","README.md":"a01127c37308457e8d396b176fb790846be0978c173be3f13260b62efcef011b","benches/bench.rs":"e2a235d68be20996014c00468b369887d2041ce95486625de3cef35b8f2e4acd","debug_metadata/README.md":"4d7f1c1b2c25ce2231ef71864d06e54323867459035b53bc9e00f66a0a44f82e","debug_metadata/smallvec.natvis":"3092ddebd8fffc3486536d7f27f8c5eae3a8a093d45cd8eeb3946ea2b0c35a15","scripts/run_miri.sh":"74a9f9adc43f986e81977b03846f7dd00122a0150bd8ec3fe4842a1a787e0f07","src/arbitrary.rs":"22e55cfbf60374945b30e6d0855129eff67cd8b878cef6fa997e1f4be67b9e3d","src/lib.rs":"aed3176e0c74d7eb1d405ee096a4d1027626ed5f1bb65da4c0ef89f83b8f66ed","src/specialization.rs":"46433586203399251cba496d67b88d34e1be3c2b591986b77463513da1c66471","src/tests.rs":"d0a70bb7b4e1d0a174f2a195c8ab55280a40589bab7028999afd787b3fff6eae","tests/debugger_visualizer.rs":"185456ad253957fc0c9e904ff8a1135397ac991c29fa3c60f75d8d81f7463022","tests/macro.rs":"22ad4f6f104a599fdcba19cad8834105b8656b212fb6c7573a427d447f5db14f"},"package":"942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a"} \ No newline at end of file
diff --git a/third_party/rust/smallvec/Cargo.toml b/third_party/rust/smallvec/Cargo.toml
new file mode 100644
index 0000000000..4a69944339
--- /dev/null
+++ b/third_party/rust/smallvec/Cargo.toml
@@ -0,0 +1,72 @@
+# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
+#
+# When uploading crates to the registry Cargo will automatically
+# "normalize" Cargo.toml files for maximal compatibility
+# with all versions of Cargo and also rewrite `path` dependencies
+# to registry (e.g., crates.io) dependencies.
+#
+# If you are reading this file be aware that the original Cargo.toml
+# will likely look very different (and much more reasonable).
+# See Cargo.toml.orig for the original contents.
+
+[package]
+edition = "2018"
+name = "smallvec"
+version = "1.11.1"
+authors = ["The Servo Project Developers"]
+description = "'Small vector' optimization: store up to a small number of items on the stack"
+documentation = "https://docs.rs/smallvec/"
+readme = "README.md"
+keywords = [
+ "small",
+ "vec",
+ "vector",
+ "stack",
+ "no_std",
+]
+categories = ["data-structures"]
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/servo/rust-smallvec"
+
+[package.metadata.docs.rs]
+all-features = true
+rustdoc-args = [
+ "--cfg",
+ "docsrs",
+ "--generate-link-to-definition",
+]
+
+[[test]]
+name = "debugger_visualizer"
+path = "tests/debugger_visualizer.rs"
+test = false
+required-features = ["debugger_visualizer"]
+
+[dependencies.arbitrary]
+version = "1"
+optional = true
+
+[dependencies.serde]
+version = "1"
+optional = true
+default-features = false
+
+[dev-dependencies.bincode]
+version = "1.0.1"
+
+[dev-dependencies.debugger_test]
+version = "0.1.0"
+
+[dev-dependencies.debugger_test_parser]
+version = "0.1.0"
+
+[features]
+const_generics = []
+const_new = ["const_generics"]
+debugger_visualizer = []
+drain_filter = []
+drain_keep_rest = ["drain_filter"]
+may_dangle = []
+specialization = []
+union = []
+write = []
diff --git a/third_party/rust/smallvec/LICENSE-APACHE b/third_party/rust/smallvec/LICENSE-APACHE
new file mode 100644
index 0000000000..16fe87b06e
--- /dev/null
+++ b/third_party/rust/smallvec/LICENSE-APACHE
@@ -0,0 +1,201 @@
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+Copyright [yyyy] [name of copyright owner]
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
diff --git a/third_party/rust/smallvec/LICENSE-MIT b/third_party/rust/smallvec/LICENSE-MIT
new file mode 100644
index 0000000000..9729c1284e
--- /dev/null
+++ b/third_party/rust/smallvec/LICENSE-MIT
@@ -0,0 +1,25 @@
+Copyright (c) 2018 The Servo Project Developers
+
+Permission is hereby granted, free of charge, to any
+person obtaining a copy of this software and associated
+documentation files (the "Software"), to deal in the
+Software without restriction, including without
+limitation the rights to use, copy, modify, merge,
+publish, distribute, sublicense, and/or sell copies of
+the Software, and to permit persons to whom the Software
+is furnished to do so, subject to the following
+conditions:
+
+The above copyright notice and this permission notice
+shall be included in all copies or substantial portions
+of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
+ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
+TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
+PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
+SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
+IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
diff --git a/third_party/rust/smallvec/README.md b/third_party/rust/smallvec/README.md
new file mode 100644
index 0000000000..724637c6ec
--- /dev/null
+++ b/third_party/rust/smallvec/README.md
@@ -0,0 +1,26 @@
+rust-smallvec
+=============
+
+[Documentation](https://docs.rs/smallvec/)
+
+[Release notes](https://github.com/servo/rust-smallvec/releases)
+
+"Small vector" optimization for Rust: store up to a small number of items on the stack
+
+## Example
+
+```rust
+use smallvec::{SmallVec, smallvec};
+
+// This SmallVec can hold up to 4 items on the stack:
+let mut v: SmallVec<[i32; 4]> = smallvec![1, 2, 3, 4];
+
+// It will automatically move its contents to the heap if
+// contains more than four items:
+v.push(5);
+
+// SmallVec points to a slice, so you can use normal slice
+// indexing and other methods to access its contents:
+v[0] = v[1] + v[2];
+v.sort();
+```
diff --git a/third_party/rust/smallvec/benches/bench.rs b/third_party/rust/smallvec/benches/bench.rs
new file mode 100644
index 0000000000..b52ee15504
--- /dev/null
+++ b/third_party/rust/smallvec/benches/bench.rs
@@ -0,0 +1,314 @@
+#![feature(test)]
+#![allow(deprecated)]
+
+#[macro_use]
+extern crate smallvec;
+extern crate test;
+
+use self::test::Bencher;
+use smallvec::{ExtendFromSlice, SmallVec};
+
+const VEC_SIZE: usize = 16;
+const SPILLED_SIZE: usize = 100;
+
+trait Vector<T>: for<'a> From<&'a [T]> + Extend<T> + ExtendFromSlice<T> {
+ fn new() -> Self;
+ fn push(&mut self, val: T);
+ fn pop(&mut self) -> Option<T>;
+ fn remove(&mut self, p: usize) -> T;
+ fn insert(&mut self, n: usize, val: T);
+ fn from_elem(val: T, n: usize) -> Self;
+ fn from_elems(val: &[T]) -> Self;
+}
+
+impl<T: Copy> Vector<T> for Vec<T> {
+ fn new() -> Self {
+ Self::with_capacity(VEC_SIZE)
+ }
+
+ fn push(&mut self, val: T) {
+ self.push(val)
+ }
+
+ fn pop(&mut self) -> Option<T> {
+ self.pop()
+ }
+
+ fn remove(&mut self, p: usize) -> T {
+ self.remove(p)
+ }
+
+ fn insert(&mut self, n: usize, val: T) {
+ self.insert(n, val)
+ }
+
+ fn from_elem(val: T, n: usize) -> Self {
+ vec![val; n]
+ }
+
+ fn from_elems(val: &[T]) -> Self {
+ val.to_owned()
+ }
+}
+
+impl<T: Copy> Vector<T> for SmallVec<[T; VEC_SIZE]> {
+ fn new() -> Self {
+ Self::new()
+ }
+
+ fn push(&mut self, val: T) {
+ self.push(val)
+ }
+
+ fn pop(&mut self) -> Option<T> {
+ self.pop()
+ }
+
+ fn remove(&mut self, p: usize) -> T {
+ self.remove(p)
+ }
+
+ fn insert(&mut self, n: usize, val: T) {
+ self.insert(n, val)
+ }
+
+ fn from_elem(val: T, n: usize) -> Self {
+ smallvec![val; n]
+ }
+
+ fn from_elems(val: &[T]) -> Self {
+ SmallVec::from_slice(val)
+ }
+}
+
+macro_rules! make_benches {
+ ($typ:ty { $($b_name:ident => $g_name:ident($($args:expr),*),)* }) => {
+ $(
+ #[bench]
+ fn $b_name(b: &mut Bencher) {
+ $g_name::<$typ>($($args,)* b)
+ }
+ )*
+ }
+}
+
+make_benches! {
+ SmallVec<[u64; VEC_SIZE]> {
+ bench_push => gen_push(SPILLED_SIZE as _),
+ bench_push_small => gen_push(VEC_SIZE as _),
+ bench_insert_push => gen_insert_push(SPILLED_SIZE as _),
+ bench_insert_push_small => gen_insert_push(VEC_SIZE as _),
+ bench_insert => gen_insert(SPILLED_SIZE as _),
+ bench_insert_small => gen_insert(VEC_SIZE as _),
+ bench_remove => gen_remove(SPILLED_SIZE as _),
+ bench_remove_small => gen_remove(VEC_SIZE as _),
+ bench_extend => gen_extend(SPILLED_SIZE as _),
+ bench_extend_small => gen_extend(VEC_SIZE as _),
+ bench_from_iter => gen_from_iter(SPILLED_SIZE as _),
+ bench_from_iter_small => gen_from_iter(VEC_SIZE as _),
+ bench_from_slice => gen_from_slice(SPILLED_SIZE as _),
+ bench_from_slice_small => gen_from_slice(VEC_SIZE as _),
+ bench_extend_from_slice => gen_extend_from_slice(SPILLED_SIZE as _),
+ bench_extend_from_slice_small => gen_extend_from_slice(VEC_SIZE as _),
+ bench_macro_from_elem => gen_from_elem(SPILLED_SIZE as _),
+ bench_macro_from_elem_small => gen_from_elem(VEC_SIZE as _),
+ bench_pushpop => gen_pushpop(),
+ }
+}
+
+make_benches! {
+ Vec<u64> {
+ bench_push_vec => gen_push(SPILLED_SIZE as _),
+ bench_push_vec_small => gen_push(VEC_SIZE as _),
+ bench_insert_push_vec => gen_insert_push(SPILLED_SIZE as _),
+ bench_insert_push_vec_small => gen_insert_push(VEC_SIZE as _),
+ bench_insert_vec => gen_insert(SPILLED_SIZE as _),
+ bench_insert_vec_small => gen_insert(VEC_SIZE as _),
+ bench_remove_vec => gen_remove(SPILLED_SIZE as _),
+ bench_remove_vec_small => gen_remove(VEC_SIZE as _),
+ bench_extend_vec => gen_extend(SPILLED_SIZE as _),
+ bench_extend_vec_small => gen_extend(VEC_SIZE as _),
+ bench_from_iter_vec => gen_from_iter(SPILLED_SIZE as _),
+ bench_from_iter_vec_small => gen_from_iter(VEC_SIZE as _),
+ bench_from_slice_vec => gen_from_slice(SPILLED_SIZE as _),
+ bench_from_slice_vec_small => gen_from_slice(VEC_SIZE as _),
+ bench_extend_from_slice_vec => gen_extend_from_slice(SPILLED_SIZE as _),
+ bench_extend_from_slice_vec_small => gen_extend_from_slice(VEC_SIZE as _),
+ bench_macro_from_elem_vec => gen_from_elem(SPILLED_SIZE as _),
+ bench_macro_from_elem_vec_small => gen_from_elem(VEC_SIZE as _),
+ bench_pushpop_vec => gen_pushpop(),
+ }
+}
+
+fn gen_push<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+ #[inline(never)]
+ fn push_noinline<V: Vector<u64>>(vec: &mut V, x: u64) {
+ vec.push(x);
+ }
+
+ b.iter(|| {
+ let mut vec = V::new();
+ for x in 0..n {
+ push_noinline(&mut vec, x);
+ }
+ vec
+ });
+}
+
+fn gen_insert_push<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+ #[inline(never)]
+ fn insert_push_noinline<V: Vector<u64>>(vec: &mut V, x: u64) {
+ vec.insert(x as usize, x);
+ }
+
+ b.iter(|| {
+ let mut vec = V::new();
+ for x in 0..n {
+ insert_push_noinline(&mut vec, x);
+ }
+ vec
+ });
+}
+
+fn gen_insert<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+ #[inline(never)]
+ fn insert_noinline<V: Vector<u64>>(vec: &mut V, p: usize, x: u64) {
+ vec.insert(p, x)
+ }
+
+ b.iter(|| {
+ let mut vec = V::new();
+ // Always insert at position 0 so that we are subject to shifts of
+ // many different lengths.
+ vec.push(0);
+ for x in 0..n {
+ insert_noinline(&mut vec, 0, x);
+ }
+ vec
+ });
+}
+
+fn gen_remove<V: Vector<u64>>(n: usize, b: &mut Bencher) {
+ #[inline(never)]
+ fn remove_noinline<V: Vector<u64>>(vec: &mut V, p: usize) -> u64 {
+ vec.remove(p)
+ }
+
+ b.iter(|| {
+ let mut vec = V::from_elem(0, n as _);
+
+ for _ in 0..n {
+ remove_noinline(&mut vec, 0);
+ }
+ });
+}
+
+fn gen_extend<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+ b.iter(|| {
+ let mut vec = V::new();
+ vec.extend(0..n);
+ vec
+ });
+}
+
+fn gen_from_iter<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+ let v: Vec<u64> = (0..n).collect();
+ b.iter(|| {
+ let vec = V::from(&v);
+ vec
+ });
+}
+
+fn gen_from_slice<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+ let v: Vec<u64> = (0..n).collect();
+ b.iter(|| {
+ let vec = V::from_elems(&v);
+ vec
+ });
+}
+
+fn gen_extend_from_slice<V: Vector<u64>>(n: u64, b: &mut Bencher) {
+ let v: Vec<u64> = (0..n).collect();
+ b.iter(|| {
+ let mut vec = V::new();
+ vec.extend_from_slice(&v);
+ vec
+ });
+}
+
+fn gen_pushpop<V: Vector<u64>>(b: &mut Bencher) {
+ #[inline(never)]
+ fn pushpop_noinline<V: Vector<u64>>(vec: &mut V, x: u64) -> Option<u64> {
+ vec.push(x);
+ vec.pop()
+ }
+
+ b.iter(|| {
+ let mut vec = V::new();
+ for x in 0..SPILLED_SIZE as _ {
+ pushpop_noinline(&mut vec, x);
+ }
+ vec
+ });
+}
+
+fn gen_from_elem<V: Vector<u64>>(n: usize, b: &mut Bencher) {
+ b.iter(|| {
+ let vec = V::from_elem(42, n);
+ vec
+ });
+}
+
+#[bench]
+fn bench_insert_many(b: &mut Bencher) {
+ #[inline(never)]
+ fn insert_many_noinline<I: IntoIterator<Item = u64>>(
+ vec: &mut SmallVec<[u64; VEC_SIZE]>,
+ index: usize,
+ iterable: I,
+ ) {
+ vec.insert_many(index, iterable)
+ }
+
+ b.iter(|| {
+ let mut vec = SmallVec::<[u64; VEC_SIZE]>::new();
+ insert_many_noinline(&mut vec, 0, 0..SPILLED_SIZE as _);
+ insert_many_noinline(&mut vec, 0, 0..SPILLED_SIZE as _);
+ vec
+ });
+}
+
+#[bench]
+fn bench_insert_from_slice(b: &mut Bencher) {
+ let v: Vec<u64> = (0..SPILLED_SIZE as _).collect();
+ b.iter(|| {
+ let mut vec = SmallVec::<[u64; VEC_SIZE]>::new();
+ vec.insert_from_slice(0, &v);
+ vec.insert_from_slice(0, &v);
+ vec
+ });
+}
+
+#[bench]
+fn bench_macro_from_list(b: &mut Bencher) {
+ b.iter(|| {
+ let vec: SmallVec<[u64; 16]> = smallvec![
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x80,
+ 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000, 0x20000, 0x40000,
+ 0x80000, 0x100000,
+ ];
+ vec
+ });
+}
+
+#[bench]
+fn bench_macro_from_list_vec(b: &mut Bencher) {
+ b.iter(|| {
+ let vec: Vec<u64> = vec![
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 20, 24, 32, 36, 0x40, 0x80,
+ 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000, 0x10000, 0x20000, 0x40000,
+ 0x80000, 0x100000,
+ ];
+ vec
+ });
+}
diff --git a/third_party/rust/smallvec/debug_metadata/README.md b/third_party/rust/smallvec/debug_metadata/README.md
new file mode 100644
index 0000000000..9a5596ba0a
--- /dev/null
+++ b/third_party/rust/smallvec/debug_metadata/README.md
@@ -0,0 +1,111 @@
+## Debugger Visualizers
+
+Many languages and debuggers enable developers to control how a type is
+displayed in a debugger. These are called "debugger visualizations" or "debugger
+views".
+
+The Windows debuggers (WinDbg\CDB) support defining custom debugger visualizations using
+the `Natvis` framework. To use Natvis, developers write XML documents using the natvis
+schema that describe how debugger types should be displayed with the `.natvis` extension.
+(See: https://docs.microsoft.com/en-us/visualstudio/debugger/create-custom-views-of-native-objects?view=vs-2019)
+The Natvis files provide patterns which match type names a description of how to display
+those types.
+
+The Natvis schema can be found either online (See: https://code.visualstudio.com/docs/cpp/natvis#_schema)
+or locally at `<VS Installation Folder>\Xml\Schemas\1033\natvis.xsd`.
+
+The GNU debugger (GDB) supports defining custom debugger views using Pretty Printers.
+Pretty printers are written as python scripts that describe how a type should be displayed
+when loaded up in GDB/LLDB. (See: https://sourceware.org/gdb/onlinedocs/gdb/Pretty-Printing.html#Pretty-Printing)
+The pretty printers provide patterns, which match type names, and for matching
+types, describe how to display those types. (For writing a pretty printer, see: https://sourceware.org/gdb/onlinedocs/gdb/Writing-a-Pretty_002dPrinter.html#Writing-a-Pretty_002dPrinter).
+
+### Embedding Visualizers
+
+Through the use of the currently unstable `#[debugger_visualizer]` attribute, the `smallvec`
+crate can embed debugger visualizers into the crate metadata.
+
+Currently the two types of visualizers supported are Natvis and Pretty printers.
+
+For Natvis files, when linking an executable with a crate that includes Natvis files,
+the MSVC linker will embed the contents of all Natvis files into the generated `PDB`.
+
+For pretty printers, the compiler will encode the contents of the pretty printer
+in the `.debug_gdb_scripts` section of the `ELF` generated.
+
+### Testing Visualizers
+
+The `smallvec` crate supports testing debugger visualizers defined for this crate. The entry point for
+these tests are `tests/debugger_visualizer.rs`. These tests are defined using the `debugger_test` and
+`debugger_test_parser` crates. The `debugger_test` crate is a proc macro crate which defines a
+single proc macro attribute, `#[debugger_test]`. For more detailed information about this crate,
+see https://crates.io/crates/debugger_test. The CI pipeline for the `smallvec` crate has been updated
+to run the debugger visualizer tests to ensure debugger visualizers do not become broken/stale.
+
+The `#[debugger_test]` proc macro attribute may only be used on test functions and will run the
+function under the debugger specified by the `debugger` meta item.
+
+This proc macro attribute has 3 required values:
+
+1. The first required meta item, `debugger`, takes a string value which specifies the debugger to launch.
+2. The second required meta item, `commands`, takes a string of new line (`\n`) separated list of debugger
+commands to run.
+3. The third required meta item, `expected_statements`, takes a string of new line (`\n`) separated list of
+statements that must exist in the debugger output. Pattern matching through regular expressions is also
+supported by using the `pattern:` prefix for each expected statement.
+
+#### Example:
+
+```rust
+#[debugger_test(
+ debugger = "cdb",
+ commands = "command1\ncommand2\ncommand3",
+ expected_statements = "statement1\nstatement2\nstatement3")]
+fn test() {
+
+}
+```
+
+Using a multiline string is also supported, with a single debugger command/expected statement per line:
+
+```rust
+#[debugger_test(
+ debugger = "cdb",
+ commands = "
+command1
+command2
+command3",
+ expected_statements = "
+statement1
+pattern:statement[0-9]+
+statement3")]
+fn test() {
+
+}
+```
+
+In the example above, the second expected statement uses pattern matching through a regular expression
+by using the `pattern:` prefix.
+
+#### Testing Locally
+
+Currently, only Natvis visualizations have been defined for the `smallvec` crate via `debug_metadata/smallvec.natvis`,
+which means the `tests/debugger_visualizer.rs` tests need to be run on Windows using the `*-pc-windows-msvc` targets.
+To run these tests locally, first ensure the debugging tools for Windows are installed or install them following
+the steps listed here, [Debugging Tools for Windows](https://docs.microsoft.com/en-us/windows-hardware/drivers/debugger/).
+Once the debugging tools have been installed, the tests can be run in the same manner as they are in the CI
+pipeline.
+
+#### Note
+
+When running the debugger visualizer tests, `tests/debugger_visualizer.rs`, they need to be run consecutively
+and not in parallel. This can be achieved by passing the flag `--test-threads=1` to rustc. This is due to
+how the debugger tests are run. Each test marked with the `#[debugger_test]` attribute launches a debugger
+and attaches it to the current test process. If tests are running in parallel, the test will try to attach
+a debugger to the current process which may already have a debugger attached causing the test to fail.
+
+For example:
+
+```
+cargo test --test debugger_visualizer --features debugger_visualizer -- --test-threads=1
+```
diff --git a/third_party/rust/smallvec/debug_metadata/smallvec.natvis b/third_party/rust/smallvec/debug_metadata/smallvec.natvis
new file mode 100644
index 0000000000..8731860f48
--- /dev/null
+++ b/third_party/rust/smallvec/debug_metadata/smallvec.natvis
@@ -0,0 +1,35 @@
+<AutoVisualizer xmlns="http://schemas.microsoft.com/vstudio/debugger/natvis/2010">
+ <Type Name="smallvec::SmallVec&lt;array$&lt;*,*&gt;&gt;" Priority="Medium">
+ <Intrinsic Name="is_inline" Expression="$T2 &gt;= capacity" />
+ <Intrinsic Name="len" Expression="is_inline() ? capacity : data.variant1.value.len" />
+ <Intrinsic Name="data_ptr" Expression="is_inline() ? data.variant0.value.__0.value.value : data.variant1.value.ptr.pointer" />
+
+ <DisplayString>{{ len={len()} is_inline={is_inline()} }}</DisplayString>
+ <Expand>
+ <Item Name="[capacity]">is_inline() ? $T2 : capacity</Item>
+ <Item Name="[len]">len()</Item>
+ <Item Name="[data_ptr]">data_ptr()</Item>
+
+ <ArrayItems>
+ <Size>len()</Size>
+ <ValuePointer>data_ptr()</ValuePointer>
+ </ArrayItems>
+ </Expand>
+ </Type>
+
+ <Type Name="smallvec::SmallVec&lt;array$&lt;*,*&gt;&gt;" Priority="MediumLow">
+ <Intrinsic Name="is_inline" Expression="$T2 &gt;= capacity" />
+ <Intrinsic Name="len" Expression="is_inline() ? capacity : data.heap.__1" />
+ <Intrinsic Name="data_ptr" Expression="is_inline() ? data.inline.value.value.value : data.heap.__0.pointer" />
+ <DisplayString>{{ len={len()} is_inline={is_inline()} }}</DisplayString>
+ <Expand>
+ <Item Name="[capacity]">is_inline() ? $T2 : capacity</Item>
+ <Item Name="[len]">len()</Item>
+
+ <ArrayItems>
+ <Size>len()</Size>
+ <ValuePointer>data_ptr()</ValuePointer>
+ </ArrayItems>
+ </Expand>
+ </Type>
+</AutoVisualizer> \ No newline at end of file
diff --git a/third_party/rust/smallvec/scripts/run_miri.sh b/third_party/rust/smallvec/scripts/run_miri.sh
new file mode 100644
index 0000000000..010ceb06ec
--- /dev/null
+++ b/third_party/rust/smallvec/scripts/run_miri.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/bash
+
+set -ex
+
+# Clean out our target dir, which may have artifacts compiled by a version of
+# rust different from the one we're about to download.
+cargo clean
+
+# Install and run the latest version of nightly where miri built successfully.
+# Taken from: https://github.com/rust-lang/miri#running-miri-on-ci
+
+MIRI_NIGHTLY=nightly-$(curl -s https://rust-lang.github.io/rustup-components-history/x86_64-unknown-linux-gnu/miri)
+echo "Installing latest nightly with Miri: $MIRI_NIGHTLY"
+rustup override unset
+rustup default "$MIRI_NIGHTLY"
+
+rustup component add miri
+cargo miri setup
+
+cargo miri test --verbose
+cargo miri test --verbose --features union
+cargo miri test --verbose --all-features
+
+rustup override set nightly
diff --git a/third_party/rust/smallvec/src/arbitrary.rs b/third_party/rust/smallvec/src/arbitrary.rs
new file mode 100644
index 0000000000..cbdfcb0e42
--- /dev/null
+++ b/third_party/rust/smallvec/src/arbitrary.rs
@@ -0,0 +1,19 @@
+use crate::{Array, SmallVec};
+use arbitrary::{Arbitrary, Unstructured};
+
+impl<'a, A: Array> Arbitrary<'a> for SmallVec<A>
+where
+ <A as Array>::Item: Arbitrary<'a>,
+{
+ fn arbitrary(u: &mut Unstructured<'a>) -> arbitrary::Result<Self> {
+ u.arbitrary_iter()?.collect()
+ }
+
+ fn arbitrary_take_rest(u: Unstructured<'a>) -> arbitrary::Result<Self> {
+ u.arbitrary_take_rest_iter()?.collect()
+ }
+
+ fn size_hint(depth: usize) -> (usize, Option<usize>) {
+ arbitrary::size_hint::and(<usize as Arbitrary>::size_hint(depth), (0, None))
+ }
+}
diff --git a/third_party/rust/smallvec/src/lib.rs b/third_party/rust/smallvec/src/lib.rs
new file mode 100644
index 0000000000..8281fb1808
--- /dev/null
+++ b/third_party/rust/smallvec/src/lib.rs
@@ -0,0 +1,2463 @@
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Small vectors in various sizes. These store a certain number of elements inline, and fall back
+//! to the heap for larger allocations. This can be a useful optimization for improving cache
+//! locality and reducing allocator traffic for workloads that fit within the inline buffer.
+//!
+//! ## `no_std` support
+//!
+//! By default, `smallvec` does not depend on `std`. However, the optional
+//! `write` feature implements the `std::io::Write` trait for vectors of `u8`.
+//! When this feature is enabled, `smallvec` depends on `std`.
+//!
+//! ## Optional features
+//!
+//! ### `serde`
+//!
+//! When this optional dependency is enabled, `SmallVec` implements the `serde::Serialize` and
+//! `serde::Deserialize` traits.
+//!
+//! ### `write`
+//!
+//! When this feature is enabled, `SmallVec<[u8; _]>` implements the `std::io::Write` trait.
+//! This feature is not compatible with `#![no_std]` programs.
+//!
+//! ### `union`
+//!
+//! **This feature requires Rust 1.49.**
+//!
+//! When the `union` feature is enabled `smallvec` will track its state (inline or spilled)
+//! without the use of an enum tag, reducing the size of the `smallvec` by one machine word.
+//! This means that there is potentially no space overhead compared to `Vec`.
+//! Note that `smallvec` can still be larger than `Vec` if the inline buffer is larger than two
+//! machine words.
+//!
+//! To use this feature add `features = ["union"]` in the `smallvec` section of Cargo.toml.
+//! Note that this feature requires Rust 1.49.
+//!
+//! Tracking issue: [rust-lang/rust#55149](https://github.com/rust-lang/rust/issues/55149)
+//!
+//! ### `const_generics`
+//!
+//! **This feature requires Rust 1.51.**
+//!
+//! When this feature is enabled, `SmallVec` works with any arrays of any size, not just a fixed
+//! list of sizes.
+//!
+//! ### `const_new`
+//!
+//! **This feature requires Rust 1.51.**
+//!
+//! This feature exposes the functions [`SmallVec::new_const`], [`SmallVec::from_const`], and [`smallvec_inline`] which enables the `SmallVec` to be initialized from a const context.
+//! For details, see the
+//! [Rust Reference](https://doc.rust-lang.org/reference/const_eval.html#const-functions).
+//!
+//! ### `drain_filter`
+//!
+//! **This feature is unstable.** It may change to match the unstable `drain_filter` method in libstd.
+//!
+//! Enables the `drain_filter` method, which produces an iterator that calls a user-provided
+//! closure to determine which elements of the vector to remove and yield from the iterator.
+//!
+//! ### `drain_keep_rest`
+//!
+//! **This feature is unstable.** It may change to match the unstable `drain_keep_rest` method in libstd.
+//!
+//! Enables the `DrainFilter::keep_rest` method.
+//!
+//! ### `specialization`
+//!
+//! **This feature is unstable and requires a nightly build of the Rust toolchain.**
+//!
+//! When this feature is enabled, `SmallVec::from(slice)` has improved performance for slices
+//! of `Copy` types. (Without this feature, you can use `SmallVec::from_slice` to get optimal
+//! performance for `Copy` types.)
+//!
+//! Tracking issue: [rust-lang/rust#31844](https://github.com/rust-lang/rust/issues/31844)
+//!
+//! ### `may_dangle`
+//!
+//! **This feature is unstable and requires a nightly build of the Rust toolchain.**
+//!
+//! This feature makes the Rust compiler less strict about use of vectors that contain borrowed
+//! references. For details, see the
+//! [Rustonomicon](https://doc.rust-lang.org/1.42.0/nomicon/dropck.html#an-escape-hatch).
+//!
+//! Tracking issue: [rust-lang/rust#34761](https://github.com/rust-lang/rust/issues/34761)
+
+#![no_std]
+#![cfg_attr(docsrs, feature(doc_cfg))]
+#![cfg_attr(feature = "specialization", allow(incomplete_features))]
+#![cfg_attr(feature = "specialization", feature(specialization))]
+#![cfg_attr(feature = "may_dangle", feature(dropck_eyepatch))]
+#![cfg_attr(
+ feature = "debugger_visualizer",
+ feature(debugger_visualizer),
+ debugger_visualizer(natvis_file = "../debug_metadata/smallvec.natvis")
+)]
+#![deny(missing_docs)]
+
+#[doc(hidden)]
+pub extern crate alloc;
+
+#[cfg(any(test, feature = "write"))]
+extern crate std;
+
+#[cfg(test)]
+mod tests;
+
+#[allow(deprecated)]
+use alloc::alloc::{Layout, LayoutErr};
+use alloc::boxed::Box;
+use alloc::{vec, vec::Vec};
+use core::borrow::{Borrow, BorrowMut};
+use core::cmp;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::hint::unreachable_unchecked;
+use core::iter::{repeat, FromIterator, FusedIterator, IntoIterator};
+use core::mem;
+use core::mem::MaybeUninit;
+use core::ops::{self, Range, RangeBounds};
+use core::ptr::{self, NonNull};
+use core::slice::{self, SliceIndex};
+
+#[cfg(feature = "serde")]
+use serde::{
+ de::{Deserialize, Deserializer, SeqAccess, Visitor},
+ ser::{Serialize, SerializeSeq, Serializer},
+};
+
+#[cfg(feature = "serde")]
+use core::marker::PhantomData;
+
+#[cfg(feature = "write")]
+use std::io;
+
+#[cfg(feature = "drain_keep_rest")]
+use core::mem::ManuallyDrop;
+
+/// Creates a [`SmallVec`] containing the arguments.
+///
+/// `smallvec!` allows `SmallVec`s to be defined with the same syntax as array expressions.
+/// There are two forms of this macro:
+///
+/// - Create a [`SmallVec`] containing a given list of elements:
+///
+/// ```
+/// # #[macro_use] extern crate smallvec;
+/// # use smallvec::SmallVec;
+/// # fn main() {
+/// let v: SmallVec<[_; 128]> = smallvec![1, 2, 3];
+/// assert_eq!(v[0], 1);
+/// assert_eq!(v[1], 2);
+/// assert_eq!(v[2], 3);
+/// # }
+/// ```
+///
+/// - Create a [`SmallVec`] from a given element and size:
+///
+/// ```
+/// # #[macro_use] extern crate smallvec;
+/// # use smallvec::SmallVec;
+/// # fn main() {
+/// let v: SmallVec<[_; 0x8000]> = smallvec![1; 3];
+/// assert_eq!(v, SmallVec::from_buf([1, 1, 1]));
+/// # }
+/// ```
+///
+/// Note that unlike array expressions this syntax supports all elements
+/// which implement [`Clone`] and the number of elements doesn't have to be
+/// a constant.
+///
+/// This will use `clone` to duplicate an expression, so one should be careful
+/// using this with types having a nonstandard `Clone` implementation. For
+/// example, `smallvec![Rc::new(1); 5]` will create a vector of five references
+/// to the same boxed integer value, not five references pointing to independently
+/// boxed integers.
+
+#[macro_export]
+macro_rules! smallvec {
+ // count helper: transform any expression into 1
+ (@one $x:expr) => (1usize);
+ ($elem:expr; $n:expr) => ({
+ $crate::SmallVec::from_elem($elem, $n)
+ });
+ ($($x:expr),*$(,)*) => ({
+ let count = 0usize $(+ $crate::smallvec!(@one $x))*;
+ #[allow(unused_mut)]
+ let mut vec = $crate::SmallVec::new();
+ if count <= vec.inline_size() {
+ $(vec.push($x);)*
+ vec
+ } else {
+ $crate::SmallVec::from_vec($crate::alloc::vec![$($x,)*])
+ }
+ });
+}
+
+/// Creates an inline [`SmallVec`] containing the arguments. This macro is enabled by the feature `const_new`.
+///
+/// `smallvec_inline!` allows `SmallVec`s to be defined with the same syntax as array expressions in `const` contexts.
+/// The inline storage `A` will always be an array of the size specified by the arguments.
+/// There are two forms of this macro:
+///
+/// - Create a [`SmallVec`] containing a given list of elements:
+///
+/// ```
+/// # #[macro_use] extern crate smallvec;
+/// # use smallvec::SmallVec;
+/// # fn main() {
+/// const V: SmallVec<[i32; 3]> = smallvec_inline![1, 2, 3];
+/// assert_eq!(V[0], 1);
+/// assert_eq!(V[1], 2);
+/// assert_eq!(V[2], 3);
+/// # }
+/// ```
+///
+/// - Create a [`SmallVec`] from a given element and size:
+///
+/// ```
+/// # #[macro_use] extern crate smallvec;
+/// # use smallvec::SmallVec;
+/// # fn main() {
+/// const V: SmallVec<[i32; 3]> = smallvec_inline![1; 3];
+/// assert_eq!(V, SmallVec::from_buf([1, 1, 1]));
+/// # }
+/// ```
+///
+/// Note that the behavior mimics that of array expressions, in contrast to [`smallvec`].
+#[cfg(feature = "const_new")]
+#[cfg_attr(docsrs, doc(cfg(feature = "const_new")))]
+#[macro_export]
+macro_rules! smallvec_inline {
+ // count helper: transform any expression into 1
+ (@one $x:expr) => (1usize);
+ ($elem:expr; $n:expr) => ({
+ $crate::SmallVec::<[_; $n]>::from_const([$elem; $n])
+ });
+ ($($x:expr),+ $(,)?) => ({
+ const N: usize = 0usize $(+ $crate::smallvec_inline!(@one $x))*;
+ $crate::SmallVec::<[_; N]>::from_const([$($x,)*])
+ });
+}
+
+/// `panic!()` in debug builds, optimization hint in release.
+#[cfg(not(feature = "union"))]
+macro_rules! debug_unreachable {
+ () => {
+ debug_unreachable!("entered unreachable code")
+ };
+ ($e:expr) => {
+ if cfg!(debug_assertions) {
+ panic!($e);
+ } else {
+ unreachable_unchecked();
+ }
+ };
+}
+
+/// Trait to be implemented by a collection that can be extended from a slice
+///
+/// ## Example
+///
+/// ```rust
+/// use smallvec::{ExtendFromSlice, SmallVec};
+///
+/// fn initialize<V: ExtendFromSlice<u8>>(v: &mut V) {
+/// v.extend_from_slice(b"Test!");
+/// }
+///
+/// let mut vec = Vec::new();
+/// initialize(&mut vec);
+/// assert_eq!(&vec, b"Test!");
+///
+/// let mut small_vec = SmallVec::<[u8; 8]>::new();
+/// initialize(&mut small_vec);
+/// assert_eq!(&small_vec as &[_], b"Test!");
+/// ```
+#[doc(hidden)]
+#[deprecated]
+pub trait ExtendFromSlice<T> {
+ /// Extends a collection from a slice of its element type
+ fn extend_from_slice(&mut self, other: &[T]);
+}
+
+#[allow(deprecated)]
+impl<T: Clone> ExtendFromSlice<T> for Vec<T> {
+ fn extend_from_slice(&mut self, other: &[T]) {
+ Vec::extend_from_slice(self, other)
+ }
+}
+
+/// Error type for APIs with fallible heap allocation
+#[derive(Debug)]
+pub enum CollectionAllocErr {
+ /// Overflow `usize::MAX` or other error during size computation
+ CapacityOverflow,
+ /// The allocator return an error
+ AllocErr {
+ /// The layout that was passed to the allocator
+ layout: Layout,
+ },
+}
+
+impl fmt::Display for CollectionAllocErr {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "Allocation error: {:?}", self)
+ }
+}
+
+#[allow(deprecated)]
+impl From<LayoutErr> for CollectionAllocErr {
+ fn from(_: LayoutErr) -> Self {
+ CollectionAllocErr::CapacityOverflow
+ }
+}
+
+fn infallible<T>(result: Result<T, CollectionAllocErr>) -> T {
+ match result {
+ Ok(x) => x,
+ Err(CollectionAllocErr::CapacityOverflow) => panic!("capacity overflow"),
+ Err(CollectionAllocErr::AllocErr { layout }) => alloc::alloc::handle_alloc_error(layout),
+ }
+}
+
+/// FIXME: use `Layout::array` when we require a Rust version where it’s stable
+/// https://github.com/rust-lang/rust/issues/55724
+fn layout_array<T>(n: usize) -> Result<Layout, CollectionAllocErr> {
+ let size = mem::size_of::<T>()
+ .checked_mul(n)
+ .ok_or(CollectionAllocErr::CapacityOverflow)?;
+ let align = mem::align_of::<T>();
+ Layout::from_size_align(size, align).map_err(|_| CollectionAllocErr::CapacityOverflow)
+}
+
+unsafe fn deallocate<T>(ptr: NonNull<T>, capacity: usize) {
+ // This unwrap should succeed since the same did when allocating.
+ let layout = layout_array::<T>(capacity).unwrap();
+ alloc::alloc::dealloc(ptr.as_ptr() as *mut u8, layout)
+}
+
+/// An iterator that removes the items from a `SmallVec` and yields them by value.
+///
+/// Returned from [`SmallVec::drain`][1].
+///
+/// [1]: struct.SmallVec.html#method.drain
+pub struct Drain<'a, T: 'a + Array> {
+ tail_start: usize,
+ tail_len: usize,
+ iter: slice::Iter<'a, T::Item>,
+ vec: NonNull<SmallVec<T>>,
+}
+
+impl<'a, T: 'a + Array> fmt::Debug for Drain<'a, T>
+where
+ T::Item: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
+ }
+}
+
+unsafe impl<'a, T: Sync + Array> Sync for Drain<'a, T> {}
+unsafe impl<'a, T: Send + Array> Send for Drain<'a, T> {}
+
+impl<'a, T: 'a + Array> Iterator for Drain<'a, T> {
+ type Item = T::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<T::Item> {
+ self.iter
+ .next()
+ .map(|reference| unsafe { ptr::read(reference) })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+impl<'a, T: 'a + Array> DoubleEndedIterator for Drain<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T::Item> {
+ self.iter
+ .next_back()
+ .map(|reference| unsafe { ptr::read(reference) })
+ }
+}
+
+impl<'a, T: Array> ExactSizeIterator for Drain<'a, T> {
+ #[inline]
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+impl<'a, T: Array> FusedIterator for Drain<'a, T> {}
+
+impl<'a, T: 'a + Array> Drop for Drain<'a, T> {
+ fn drop(&mut self) {
+ self.for_each(drop);
+
+ if self.tail_len > 0 {
+ unsafe {
+ let source_vec = self.vec.as_mut();
+
+ // memmove back untouched tail, update to new length
+ let start = source_vec.len();
+ let tail = self.tail_start;
+ if tail != start {
+ // as_mut_ptr creates a &mut, invalidating other pointers.
+ // This pattern avoids calling it with a pointer already present.
+ let ptr = source_vec.as_mut_ptr();
+ let src = ptr.add(tail);
+ let dst = ptr.add(start);
+ ptr::copy(src, dst, self.tail_len);
+ }
+ source_vec.set_len(start + self.tail_len);
+ }
+ }
+ }
+}
+
+#[cfg(feature = "drain_filter")]
+/// An iterator which uses a closure to determine if an element should be removed.
+///
+/// Returned from [`SmallVec::drain_filter`][1].
+///
+/// [1]: struct.SmallVec.html#method.drain_filter
+pub struct DrainFilter<'a, T, F>
+where
+ F: FnMut(&mut T::Item) -> bool,
+ T: Array,
+{
+ vec: &'a mut SmallVec<T>,
+ /// The index of the item that will be inspected by the next call to `next`.
+ idx: usize,
+ /// The number of items that have been drained (removed) thus far.
+ del: usize,
+ /// The original length of `vec` prior to draining.
+ old_len: usize,
+ /// The filter test predicate.
+ pred: F,
+ /// A flag that indicates a panic has occurred in the filter test predicate.
+ /// This is used as a hint in the drop implementation to prevent consumption
+ /// of the remainder of the `DrainFilter`. Any unprocessed items will be
+ /// backshifted in the `vec`, but no further items will be dropped or
+ /// tested by the filter predicate.
+ panic_flag: bool,
+}
+
+#[cfg(feature = "drain_filter")]
+impl <T, F> fmt::Debug for DrainFilter<'_, T, F>
+where
+ F: FnMut(&mut T::Item) -> bool,
+ T: Array,
+ T::Item: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("DrainFilter").field(&self.vec.as_slice()).finish()
+ }
+}
+
+#[cfg(feature = "drain_filter")]
+impl <T, F> Iterator for DrainFilter<'_, T, F>
+where
+ F: FnMut(&mut T::Item) -> bool,
+ T: Array,
+{
+ type Item = T::Item;
+
+ fn next(&mut self) -> Option<T::Item>
+ {
+ unsafe {
+ while self.idx < self.old_len {
+ let i = self.idx;
+ let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
+ self.panic_flag = true;
+ let drained = (self.pred)(&mut v[i]);
+ self.panic_flag = false;
+ // Update the index *after* the predicate is called. If the index
+ // is updated prior and the predicate panics, the element at this
+ // index would be leaked.
+ self.idx += 1;
+ if drained {
+ self.del += 1;
+ return Some(ptr::read(&v[i]));
+ } else if self.del > 0 {
+ let del = self.del;
+ let src: *const Self::Item = &v[i];
+ let dst: *mut Self::Item = &mut v[i - del];
+ ptr::copy_nonoverlapping(src, dst, 1);
+ }
+ }
+ None
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(self.old_len - self.idx))
+ }
+}
+
+#[cfg(feature = "drain_filter")]
+impl <T, F> Drop for DrainFilter<'_, T, F>
+where
+ F: FnMut(&mut T::Item) -> bool,
+ T: Array,
+{
+ fn drop(&mut self) {
+ struct BackshiftOnDrop<'a, 'b, T, F>
+ where
+ F: FnMut(&mut T::Item) -> bool,
+ T: Array
+ {
+ drain: &'b mut DrainFilter<'a, T, F>,
+ }
+
+ impl<'a, 'b, T, F> Drop for BackshiftOnDrop<'a, 'b, T, F>
+ where
+ F: FnMut(&mut T::Item) -> bool,
+ T: Array
+ {
+ fn drop(&mut self) {
+ unsafe {
+ if self.drain.idx < self.drain.old_len && self.drain.del > 0 {
+ // This is a pretty messed up state, and there isn't really an
+ // obviously right thing to do. We don't want to keep trying
+ // to execute `pred`, so we just backshift all the unprocessed
+ // elements and tell the vec that they still exist. The backshift
+ // is required to prevent a double-drop of the last successfully
+ // drained item prior to a panic in the predicate.
+ let ptr = self.drain.vec.as_mut_ptr();
+ let src = ptr.add(self.drain.idx);
+ let dst = src.sub(self.drain.del);
+ let tail_len = self.drain.old_len - self.drain.idx;
+ src.copy_to(dst, tail_len);
+ }
+ self.drain.vec.set_len(self.drain.old_len - self.drain.del);
+ }
+ }
+ }
+
+ let backshift = BackshiftOnDrop { drain: self };
+
+ // Attempt to consume any remaining elements if the filter predicate
+ // has not yet panicked. We'll backshift any remaining elements
+ // whether we've already panicked or if the consumption here panics.
+ if !backshift.drain.panic_flag {
+ backshift.drain.for_each(drop);
+ }
+ }
+}
+
+#[cfg(feature = "drain_keep_rest")]
+impl <T, F> DrainFilter<'_, T, F>
+where
+ F: FnMut(&mut T::Item) -> bool,
+ T: Array
+{
+ /// Keep unyielded elements in the source `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use smallvec::{smallvec, SmallVec};
+ ///
+ /// let mut vec: SmallVec<[char; 2]> = smallvec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain_filter(|_| true);
+ ///
+ /// assert_eq!(drain.next().unwrap(), 'a');
+ ///
+ /// // This call keeps 'b' and 'c' in the vec.
+ /// drain.keep_rest();
+ ///
+ /// // If we wouldn't call `keep_rest()`,
+ /// // `vec` would be empty.
+ /// assert_eq!(vec, SmallVec::<[char; 2]>::from_slice(&['b', 'c']));
+ /// ```
+ pub fn keep_rest(self)
+ {
+ // At this moment layout looks like this:
+ //
+ // _____________________/-- old_len
+ // / \
+ // [kept] [yielded] [tail]
+ // \_______/ ^-- idx
+ // \-- del
+ //
+ // Normally `Drop` impl would drop [tail] (via .for_each(drop), ie still calling `pred`)
+ //
+ // 1. Move [tail] after [kept]
+ // 2. Update length of the original vec to `old_len - del`
+ // a. In case of ZST, this is the only thing we want to do
+ // 3. Do *not* drop self, as everything is put in a consistent state already, there is nothing to do
+ let mut this = ManuallyDrop::new(self);
+
+ unsafe {
+ // ZSTs have no identity, so we don't need to move them around.
+ let needs_move = mem::size_of::<T>() != 0;
+
+ if needs_move && this.idx < this.old_len && this.del > 0 {
+ let ptr = this.vec.as_mut_ptr();
+ let src = ptr.add(this.idx);
+ let dst = src.sub(this.del);
+ let tail_len = this.old_len - this.idx;
+ src.copy_to(dst, tail_len);
+ }
+
+ let new_len = this.old_len - this.del;
+ this.vec.set_len(new_len);
+ }
+ }
+}
+
+#[cfg(feature = "union")]
+union SmallVecData<A: Array> {
+ inline: core::mem::ManuallyDrop<MaybeUninit<A>>,
+ heap: (NonNull<A::Item>, usize),
+}
+
+#[cfg(all(feature = "union", feature = "const_new"))]
+impl<T, const N: usize> SmallVecData<[T; N]> {
+ #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))]
+ #[inline]
+ const fn from_const(inline: MaybeUninit<[T; N]>) -> Self {
+ SmallVecData {
+ inline: core::mem::ManuallyDrop::new(inline),
+ }
+ }
+}
+
+#[cfg(feature = "union")]
+impl<A: Array> SmallVecData<A> {
+ #[inline]
+ unsafe fn inline(&self) -> ConstNonNull<A::Item> {
+ ConstNonNull::new(self.inline.as_ptr() as *const A::Item).unwrap()
+ }
+ #[inline]
+ unsafe fn inline_mut(&mut self) -> NonNull<A::Item> {
+ NonNull::new(self.inline.as_mut_ptr() as *mut A::Item).unwrap()
+ }
+ #[inline]
+ fn from_inline(inline: MaybeUninit<A>) -> SmallVecData<A> {
+ SmallVecData {
+ inline: core::mem::ManuallyDrop::new(inline),
+ }
+ }
+ #[inline]
+ unsafe fn into_inline(self) -> MaybeUninit<A> {
+ core::mem::ManuallyDrop::into_inner(self.inline)
+ }
+ #[inline]
+ unsafe fn heap(&self) -> (ConstNonNull<A::Item>, usize) {
+ (ConstNonNull(self.heap.0), self.heap.1)
+ }
+ #[inline]
+ unsafe fn heap_mut(&mut self) -> (NonNull<A::Item>, &mut usize) {
+ let h = &mut self.heap;
+ (h.0, &mut h.1)
+ }
+ #[inline]
+ fn from_heap(ptr: NonNull<A::Item>, len: usize) -> SmallVecData<A> {
+ SmallVecData { heap: (ptr, len) }
+ }
+}
+
+#[cfg(not(feature = "union"))]
+enum SmallVecData<A: Array> {
+ Inline(MaybeUninit<A>),
+ // Using NonNull and NonZero here allows to reduce size of `SmallVec`.
+ Heap {
+ // Since we never allocate on heap
+ // unless our capacity is bigger than inline capacity
+ // heap capacity cannot be less than 1.
+ // Therefore, pointer cannot be null too.
+ ptr: NonNull<A::Item>,
+ len: usize,
+ },
+}
+
+#[cfg(all(not(feature = "union"), feature = "const_new"))]
+impl<T, const N: usize> SmallVecData<[T; N]> {
+ #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))]
+ #[inline]
+ const fn from_const(inline: MaybeUninit<[T; N]>) -> Self {
+ SmallVecData::Inline(inline)
+ }
+}
+
+#[cfg(not(feature = "union"))]
+impl<A: Array> SmallVecData<A> {
+ #[inline]
+ unsafe fn inline(&self) -> ConstNonNull<A::Item> {
+ match self {
+ SmallVecData::Inline(a) => ConstNonNull::new(a.as_ptr() as *const A::Item).unwrap(),
+ _ => debug_unreachable!(),
+ }
+ }
+ #[inline]
+ unsafe fn inline_mut(&mut self) -> NonNull<A::Item> {
+ match self {
+ SmallVecData::Inline(a) => NonNull::new(a.as_mut_ptr() as *mut A::Item).unwrap(),
+ _ => debug_unreachable!(),
+ }
+ }
+ #[inline]
+ fn from_inline(inline: MaybeUninit<A>) -> SmallVecData<A> {
+ SmallVecData::Inline(inline)
+ }
+ #[inline]
+ unsafe fn into_inline(self) -> MaybeUninit<A> {
+ match self {
+ SmallVecData::Inline(a) => a,
+ _ => debug_unreachable!(),
+ }
+ }
+ #[inline]
+ unsafe fn heap(&self) -> (ConstNonNull<A::Item>, usize) {
+ match self {
+ SmallVecData::Heap { ptr, len } => (ConstNonNull(*ptr), *len),
+ _ => debug_unreachable!(),
+ }
+ }
+ #[inline]
+ unsafe fn heap_mut(&mut self) -> (NonNull<A::Item>, &mut usize) {
+ match self {
+ SmallVecData::Heap { ptr, len } => (*ptr, len),
+ _ => debug_unreachable!(),
+ }
+ }
+ #[inline]
+ fn from_heap(ptr: NonNull<A::Item>, len: usize) -> SmallVecData<A> {
+ SmallVecData::Heap { ptr, len }
+ }
+}
+
+unsafe impl<A: Array + Send> Send for SmallVecData<A> {}
+unsafe impl<A: Array + Sync> Sync for SmallVecData<A> {}
+
+/// A `Vec`-like container that can store a small number of elements inline.
+///
+/// `SmallVec` acts like a vector, but can store a limited amount of data inline within the
+/// `SmallVec` struct rather than in a separate allocation. If the data exceeds this limit, the
+/// `SmallVec` will "spill" its data onto the heap, allocating a new buffer to hold it.
+///
+/// The amount of data that a `SmallVec` can store inline depends on its backing store. The backing
+/// store can be any type that implements the `Array` trait; usually it is a small fixed-sized
+/// array. For example a `SmallVec<[u64; 8]>` can hold up to eight 64-bit integers inline.
+///
+/// ## Example
+///
+/// ```rust
+/// use smallvec::SmallVec;
+/// let mut v = SmallVec::<[u8; 4]>::new(); // initialize an empty vector
+///
+/// // The vector can hold up to 4 items without spilling onto the heap.
+/// v.extend(0..4);
+/// assert_eq!(v.len(), 4);
+/// assert!(!v.spilled());
+///
+/// // Pushing another element will force the buffer to spill:
+/// v.push(4);
+/// assert_eq!(v.len(), 5);
+/// assert!(v.spilled());
+/// ```
+pub struct SmallVec<A: Array> {
+ // The capacity field is used to determine which of the storage variants is active:
+ // If capacity <= Self::inline_capacity() then the inline variant is used and capacity holds the current length of the vector (number of elements actually in use).
+ // If capacity > Self::inline_capacity() then the heap variant is used and capacity holds the size of the memory allocation.
+ capacity: usize,
+ data: SmallVecData<A>,
+}
+
+impl<A: Array> SmallVec<A> {
+ /// Construct an empty vector
+ #[inline]
+ pub fn new() -> SmallVec<A> {
+ // Try to detect invalid custom implementations of `Array`. Hopefully,
+ // this check should be optimized away entirely for valid ones.
+ assert!(
+ mem::size_of::<A>() == A::size() * mem::size_of::<A::Item>()
+ && mem::align_of::<A>() >= mem::align_of::<A::Item>()
+ );
+ SmallVec {
+ capacity: 0,
+ data: SmallVecData::from_inline(MaybeUninit::uninit()),
+ }
+ }
+
+ /// Construct an empty vector with enough capacity pre-allocated to store at least `n`
+ /// elements.
+ ///
+ /// Will create a heap allocation only if `n` is larger than the inline capacity.
+ ///
+ /// ```
+ /// # use smallvec::SmallVec;
+ ///
+ /// let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(100);
+ ///
+ /// assert!(v.is_empty());
+ /// assert!(v.capacity() >= 100);
+ /// ```
+ #[inline]
+ pub fn with_capacity(n: usize) -> Self {
+ let mut v = SmallVec::new();
+ v.reserve_exact(n);
+ v
+ }
+
+ /// Construct a new `SmallVec` from a `Vec<A::Item>`.
+ ///
+ /// Elements will be copied to the inline buffer if vec.capacity() <= Self::inline_capacity().
+ ///
+ /// ```rust
+ /// use smallvec::SmallVec;
+ ///
+ /// let vec = vec![1, 2, 3, 4, 5];
+ /// let small_vec: SmallVec<[_; 3]> = SmallVec::from_vec(vec);
+ ///
+ /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+ /// ```
+ #[inline]
+ pub fn from_vec(mut vec: Vec<A::Item>) -> SmallVec<A> {
+ if vec.capacity() <= Self::inline_capacity() {
+ // Cannot use Vec with smaller capacity
+ // because we use value of `Self::capacity` field as indicator.
+ unsafe {
+ let mut data = SmallVecData::<A>::from_inline(MaybeUninit::uninit());
+ let len = vec.len();
+ vec.set_len(0);
+ ptr::copy_nonoverlapping(vec.as_ptr(), data.inline_mut().as_ptr(), len);
+
+ SmallVec {
+ capacity: len,
+ data,
+ }
+ }
+ } else {
+ let (ptr, cap, len) = (vec.as_mut_ptr(), vec.capacity(), vec.len());
+ mem::forget(vec);
+ let ptr = NonNull::new(ptr)
+ // See docs: https://doc.rust-lang.org/std/vec/struct.Vec.html#method.as_mut_ptr
+ .expect("Cannot be null by `Vec` invariant");
+
+ SmallVec {
+ capacity: cap,
+ data: SmallVecData::from_heap(ptr, len),
+ }
+ }
+ }
+
+ /// Constructs a new `SmallVec` on the stack from an `A` without
+ /// copying elements.
+ ///
+ /// ```rust
+ /// use smallvec::SmallVec;
+ ///
+ /// let buf = [1, 2, 3, 4, 5];
+ /// let small_vec: SmallVec<_> = SmallVec::from_buf(buf);
+ ///
+ /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+ /// ```
+ #[inline]
+ pub fn from_buf(buf: A) -> SmallVec<A> {
+ SmallVec {
+ capacity: A::size(),
+ data: SmallVecData::from_inline(MaybeUninit::new(buf)),
+ }
+ }
+
+ /// Constructs a new `SmallVec` on the stack from an `A` without
+ /// copying elements. Also sets the length, which must be less or
+ /// equal to the size of `buf`.
+ ///
+ /// ```rust
+ /// use smallvec::SmallVec;
+ ///
+ /// let buf = [1, 2, 3, 4, 5, 0, 0, 0];
+ /// let small_vec: SmallVec<_> = SmallVec::from_buf_and_len(buf, 5);
+ ///
+ /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+ /// ```
+ #[inline]
+ pub fn from_buf_and_len(buf: A, len: usize) -> SmallVec<A> {
+ assert!(len <= A::size());
+ unsafe { SmallVec::from_buf_and_len_unchecked(MaybeUninit::new(buf), len) }
+ }
+
+ /// Constructs a new `SmallVec` on the stack from an `A` without
+ /// copying elements. Also sets the length. The user is responsible
+ /// for ensuring that `len <= A::size()`.
+ ///
+ /// ```rust
+ /// use smallvec::SmallVec;
+ /// use std::mem::MaybeUninit;
+ ///
+ /// let buf = [1, 2, 3, 4, 5, 0, 0, 0];
+ /// let small_vec: SmallVec<_> = unsafe {
+ /// SmallVec::from_buf_and_len_unchecked(MaybeUninit::new(buf), 5)
+ /// };
+ ///
+ /// assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+ /// ```
+ #[inline]
+ pub unsafe fn from_buf_and_len_unchecked(buf: MaybeUninit<A>, len: usize) -> SmallVec<A> {
+ SmallVec {
+ capacity: len,
+ data: SmallVecData::from_inline(buf),
+ }
+ }
+
+ /// Sets the length of a vector.
+ ///
+ /// This will explicitly set the size of the vector, without actually
+ /// modifying its buffers, so it is up to the caller to ensure that the
+ /// vector is actually the specified size.
+ pub unsafe fn set_len(&mut self, new_len: usize) {
+ let (_, len_ptr, _) = self.triple_mut();
+ *len_ptr = new_len;
+ }
+
+ /// The maximum number of elements this vector can hold inline
+ #[inline]
+ fn inline_capacity() -> usize {
+ if mem::size_of::<A::Item>() > 0 {
+ A::size()
+ } else {
+ // For zero-size items code like `ptr.add(offset)` always returns the same pointer.
+ // Therefore all items are at the same address,
+ // and any array size has capacity for infinitely many items.
+ // The capacity is limited by the bit width of the length field.
+ //
+ // `Vec` also does this:
+ // https://github.com/rust-lang/rust/blob/1.44.0/src/liballoc/raw_vec.rs#L186
+ //
+ // In our case, this also ensures that a smallvec of zero-size items never spills,
+ // and we never try to allocate zero bytes which `std::alloc::alloc` disallows.
+ core::usize::MAX
+ }
+ }
+
+ /// The maximum number of elements this vector can hold inline
+ #[inline]
+ pub fn inline_size(&self) -> usize {
+ Self::inline_capacity()
+ }
+
+ /// The number of elements stored in the vector
+ #[inline]
+ pub fn len(&self) -> usize {
+ self.triple().1
+ }
+
+ /// Returns `true` if the vector is empty
+ #[inline]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// The number of items the vector can hold without reallocating
+ #[inline]
+ pub fn capacity(&self) -> usize {
+ self.triple().2
+ }
+
+ /// Returns a tuple with (data ptr, len, capacity)
+ /// Useful to get all SmallVec properties with a single check of the current storage variant.
+ #[inline]
+ fn triple(&self) -> (ConstNonNull<A::Item>, usize, usize) {
+ unsafe {
+ if self.spilled() {
+ let (ptr, len) = self.data.heap();
+ (ptr, len, self.capacity)
+ } else {
+ (self.data.inline(), self.capacity, Self::inline_capacity())
+ }
+ }
+ }
+
+ /// Returns a tuple with (data ptr, len ptr, capacity)
+ #[inline]
+ fn triple_mut(&mut self) -> (NonNull<A::Item>, &mut usize, usize) {
+ unsafe {
+ if self.spilled() {
+ let (ptr, len_ptr) = self.data.heap_mut();
+ (ptr, len_ptr, self.capacity)
+ } else {
+ (
+ self.data.inline_mut(),
+ &mut self.capacity,
+ Self::inline_capacity(),
+ )
+ }
+ }
+ }
+
+ /// Returns `true` if the data has spilled into a separate heap-allocated buffer.
+ #[inline]
+ pub fn spilled(&self) -> bool {
+ self.capacity > Self::inline_capacity()
+ }
+
+ /// Creates a draining iterator that removes the specified range in the vector
+ /// and yields the removed items.
+ ///
+ /// Note 1: The element range is removed even if the iterator is only
+ /// partially consumed or not consumed at all.
+ ///
+ /// Note 2: It is unspecified how many elements are removed from the vector
+ /// if the `Drain` value is leaked.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_, A>
+ where
+ R: RangeBounds<usize>,
+ {
+ use core::ops::Bound::*;
+
+ let len = self.len();
+ let start = match range.start_bound() {
+ Included(&n) => n,
+ Excluded(&n) => n.checked_add(1).expect("Range start out of bounds"),
+ Unbounded => 0,
+ };
+ let end = match range.end_bound() {
+ Included(&n) => n.checked_add(1).expect("Range end out of bounds"),
+ Excluded(&n) => n,
+ Unbounded => len,
+ };
+
+ assert!(start <= end);
+ assert!(end <= len);
+
+ unsafe {
+ self.set_len(start);
+
+ let range_slice = slice::from_raw_parts(self.as_ptr().add(start), end - start);
+
+ Drain {
+ tail_start: end,
+ tail_len: len - end,
+ iter: range_slice.iter(),
+ // Since self is a &mut, passing it to a function would invalidate the slice iterator.
+ vec: NonNull::new_unchecked(self as *mut _),
+ }
+ }
+ }
+
+
+ #[cfg(feature = "drain_filter")]
+ /// Creates an iterator which uses a closure to determine if an element should be removed.
+ ///
+ /// If the closure returns true, the element is removed and yielded. If the closure returns
+ /// false, the element will remain in the vector and will not be yielded by the iterator.
+ ///
+ /// Using this method is equivalent to the following code:
+ /// ```
+ /// # use smallvec::SmallVec;
+ /// # let some_predicate = |x: &mut i32| { *x == 2 || *x == 3 || *x == 6 };
+ /// # let mut vec: SmallVec<[i32; 8]> = SmallVec::from_slice(&[1i32, 2, 3, 4, 5, 6]);
+ /// let mut i = 0;
+ /// while i < vec.len() {
+ /// if some_predicate(&mut vec[i]) {
+ /// let val = vec.remove(i);
+ /// // your code here
+ /// } else {
+ /// i += 1;
+ /// }
+ /// }
+ ///
+ /// # assert_eq!(vec, SmallVec::<[i32; 8]>::from_slice(&[1i32, 4, 5]));
+ /// ```
+ /// ///
+ /// But `drain_filter` is easier to use. `drain_filter` is also more efficient,
+ /// because it can backshift the elements of the array in bulk.
+ ///
+ /// Note that `drain_filter` also lets you mutate every element in the filter closure,
+ /// regardless of whether you choose to keep or remove it.
+ ///
+ /// # Examples
+ ///
+ /// Splitting an array into evens and odds, reusing the original allocation:
+ ///
+ /// ```
+ /// # use smallvec::SmallVec;
+ /// let mut numbers: SmallVec<[i32; 16]> = SmallVec::from_slice(&[1i32, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15]);
+ ///
+ /// let evens = numbers.drain_filter(|x| *x % 2 == 0).collect::<SmallVec<[i32; 16]>>();
+ /// let odds = numbers;
+ ///
+ /// assert_eq!(evens, SmallVec::<[i32; 16]>::from_slice(&[2i32, 4, 6, 8, 14]));
+ /// assert_eq!(odds, SmallVec::<[i32; 16]>::from_slice(&[1i32, 3, 5, 9, 11, 13, 15]));
+ /// ```
+ pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, A, F,>
+ where
+ F: FnMut(&mut A::Item) -> bool,
+ {
+ let old_len = self.len();
+
+ // Guard against us getting leaked (leak amplification)
+ unsafe {
+ self.set_len(0);
+ }
+
+ DrainFilter { vec: self, idx: 0, del: 0, old_len, pred: filter, panic_flag: false }
+ }
+
+ /// Append an item to the vector.
+ #[inline]
+ pub fn push(&mut self, value: A::Item) {
+ unsafe {
+ let (mut ptr, mut len, cap) = self.triple_mut();
+ if *len == cap {
+ self.reserve_one_unchecked();
+ let (heap_ptr, heap_len) = self.data.heap_mut();
+ ptr = heap_ptr;
+ len = heap_len;
+ }
+ ptr::write(ptr.as_ptr().add(*len), value);
+ *len += 1;
+ }
+ }
+
+ /// Remove an item from the end of the vector and return it, or None if empty.
+ #[inline]
+ pub fn pop(&mut self) -> Option<A::Item> {
+ unsafe {
+ let (ptr, len_ptr, _) = self.triple_mut();
+ let ptr: *const _ = ptr.as_ptr();
+ if *len_ptr == 0 {
+ return None;
+ }
+ let last_index = *len_ptr - 1;
+ *len_ptr = last_index;
+ Some(ptr::read(ptr.add(last_index)))
+ }
+ }
+
+ /// Moves all the elements of `other` into `self`, leaving `other` empty.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use smallvec::{SmallVec, smallvec};
+ /// let mut v0: SmallVec<[u8; 16]> = smallvec![1, 2, 3];
+ /// let mut v1: SmallVec<[u8; 32]> = smallvec![4, 5, 6];
+ /// v0.append(&mut v1);
+ /// assert_eq!(*v0, [1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(*v1, []);
+ /// ```
+ pub fn append<B>(&mut self, other: &mut SmallVec<B>)
+ where
+ B: Array<Item = A::Item>,
+ {
+ self.extend(other.drain(..))
+ }
+
+ /// Re-allocate to set the capacity to `max(new_cap, inline_size())`.
+ ///
+ /// Panics if `new_cap` is less than the vector's length
+ /// or if the capacity computation overflows `usize`.
+ pub fn grow(&mut self, new_cap: usize) {
+ infallible(self.try_grow(new_cap))
+ }
+
+ /// Re-allocate to set the capacity to `max(new_cap, inline_size())`.
+ ///
+ /// Panics if `new_cap` is less than the vector's length
+ pub fn try_grow(&mut self, new_cap: usize) -> Result<(), CollectionAllocErr> {
+ unsafe {
+ let unspilled = !self.spilled();
+ let (ptr, &mut len, cap) = self.triple_mut();
+ assert!(new_cap >= len);
+ if new_cap <= Self::inline_capacity() {
+ if unspilled {
+ return Ok(());
+ }
+ self.data = SmallVecData::from_inline(MaybeUninit::uninit());
+ ptr::copy_nonoverlapping(ptr.as_ptr(), self.data.inline_mut().as_ptr(), len);
+ self.capacity = len;
+ deallocate(ptr, cap);
+ } else if new_cap != cap {
+ let layout = layout_array::<A::Item>(new_cap)?;
+ debug_assert!(layout.size() > 0);
+ let new_alloc;
+ if unspilled {
+ new_alloc = NonNull::new(alloc::alloc::alloc(layout))
+ .ok_or(CollectionAllocErr::AllocErr { layout })?
+ .cast();
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_alloc.as_ptr(), len);
+ } else {
+ // This should never fail since the same succeeded
+ // when previously allocating `ptr`.
+ let old_layout = layout_array::<A::Item>(cap)?;
+
+ let new_ptr =
+ alloc::alloc::realloc(ptr.as_ptr() as *mut u8, old_layout, layout.size());
+ new_alloc = NonNull::new(new_ptr)
+ .ok_or(CollectionAllocErr::AllocErr { layout })?
+ .cast();
+ }
+ self.data = SmallVecData::from_heap(new_alloc, len);
+ self.capacity = new_cap;
+ }
+ Ok(())
+ }
+ }
+
+ /// Reserve capacity for `additional` more elements to be inserted.
+ ///
+ /// May reserve more space to avoid frequent reallocations.
+ ///
+ /// Panics if the capacity computation overflows `usize`.
+ #[inline]
+ pub fn reserve(&mut self, additional: usize) {
+ infallible(self.try_reserve(additional))
+ }
+
+ /// Internal method used to grow in push() and insert(), where we know already we have to grow.
+ #[cold]
+ fn reserve_one_unchecked(&mut self) {
+ debug_assert_eq!(self.len(), self.capacity());
+ let new_cap = self.len()
+ .checked_add(1)
+ .and_then(usize::checked_next_power_of_two)
+ .expect("capacity overflow");
+ infallible(self.try_grow(new_cap))
+ }
+
+ /// Reserve capacity for `additional` more elements to be inserted.
+ ///
+ /// May reserve more space to avoid frequent reallocations.
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), CollectionAllocErr> {
+ // prefer triple_mut() even if triple() would work so that the optimizer removes duplicated
+ // calls to it from callers.
+ let (_, &mut len, cap) = self.triple_mut();
+ if cap - len >= additional {
+ return Ok(());
+ }
+ let new_cap = len
+ .checked_add(additional)
+ .and_then(usize::checked_next_power_of_two)
+ .ok_or(CollectionAllocErr::CapacityOverflow)?;
+ self.try_grow(new_cap)
+ }
+
+ /// Reserve the minimum capacity for `additional` more elements to be inserted.
+ ///
+ /// Panics if the new capacity overflows `usize`.
+ pub fn reserve_exact(&mut self, additional: usize) {
+ infallible(self.try_reserve_exact(additional))
+ }
+
+ /// Reserve the minimum capacity for `additional` more elements to be inserted.
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), CollectionAllocErr> {
+ let (_, &mut len, cap) = self.triple_mut();
+ if cap - len >= additional {
+ return Ok(());
+ }
+ let new_cap = len
+ .checked_add(additional)
+ .ok_or(CollectionAllocErr::CapacityOverflow)?;
+ self.try_grow(new_cap)
+ }
+
+ /// Shrink the capacity of the vector as much as possible.
+ ///
+ /// When possible, this will move data from an external heap buffer to the vector's inline
+ /// storage.
+ pub fn shrink_to_fit(&mut self) {
+ if !self.spilled() {
+ return;
+ }
+ let len = self.len();
+ if self.inline_size() >= len {
+ unsafe {
+ let (ptr, len) = self.data.heap();
+ self.data = SmallVecData::from_inline(MaybeUninit::uninit());
+ ptr::copy_nonoverlapping(ptr.as_ptr(), self.data.inline_mut().as_ptr(), len);
+ deallocate(ptr.0, self.capacity);
+ self.capacity = len;
+ }
+ } else if self.capacity() > len {
+ self.grow(len);
+ }
+ }
+
+ /// Shorten the vector, keeping the first `len` elements and dropping the rest.
+ ///
+ /// If `len` is greater than or equal to the vector's current length, this has no
+ /// effect.
+ ///
+ /// This does not re-allocate. If you want the vector's capacity to shrink, call
+ /// `shrink_to_fit` after truncating.
+ pub fn truncate(&mut self, len: usize) {
+ unsafe {
+ let (ptr, len_ptr, _) = self.triple_mut();
+ let ptr = ptr.as_ptr();
+ while len < *len_ptr {
+ let last_index = *len_ptr - 1;
+ *len_ptr = last_index;
+ ptr::drop_in_place(ptr.add(last_index));
+ }
+ }
+ }
+
+ /// Extracts a slice containing the entire vector.
+ ///
+ /// Equivalent to `&s[..]`.
+ pub fn as_slice(&self) -> &[A::Item] {
+ self
+ }
+
+ /// Extracts a mutable slice of the entire vector.
+ ///
+ /// Equivalent to `&mut s[..]`.
+ pub fn as_mut_slice(&mut self) -> &mut [A::Item] {
+ self
+ }
+
+ /// Remove the element at position `index`, replacing it with the last element.
+ ///
+ /// This does not preserve ordering, but is O(1).
+ ///
+ /// Panics if `index` is out of bounds.
+ #[inline]
+ pub fn swap_remove(&mut self, index: usize) -> A::Item {
+ let len = self.len();
+ self.swap(len - 1, index);
+ self.pop()
+ .unwrap_or_else(|| unsafe { unreachable_unchecked() })
+ }
+
+ /// Remove all elements from the vector.
+ #[inline]
+ pub fn clear(&mut self) {
+ self.truncate(0);
+ }
+
+ /// Remove and return the element at position `index`, shifting all elements after it to the
+ /// left.
+ ///
+ /// Panics if `index` is out of bounds.
+ pub fn remove(&mut self, index: usize) -> A::Item {
+ unsafe {
+ let (ptr, len_ptr, _) = self.triple_mut();
+ let len = *len_ptr;
+ assert!(index < len);
+ *len_ptr = len - 1;
+ let ptr = ptr.as_ptr().add(index);
+ let item = ptr::read(ptr);
+ ptr::copy(ptr.add(1), ptr, len - index - 1);
+ item
+ }
+ }
+
+ /// Insert an element at position `index`, shifting all elements after it to the right.
+ ///
+ /// Panics if `index > len`.
+ pub fn insert(&mut self, index: usize, element: A::Item) {
+ unsafe {
+ let (mut ptr, mut len_ptr, cap) = self.triple_mut();
+ if *len_ptr == cap {
+ self.reserve_one_unchecked();
+ let (heap_ptr, heap_len_ptr) = self.data.heap_mut();
+ ptr = heap_ptr;
+ len_ptr = heap_len_ptr;
+ }
+ let mut ptr = ptr.as_ptr();
+ let len = *len_ptr;
+ ptr = ptr.add(index);
+ if index < len {
+ ptr::copy(ptr, ptr.add(1), len - index);
+ } else if index == len {
+ // No elements need shifting.
+ } else {
+ panic!("index exceeds length");
+ }
+ *len_ptr = len + 1;
+ ptr::write(ptr, element);
+ }
+ }
+
+ /// Insert multiple elements at position `index`, shifting all following elements toward the
+ /// back.
+ pub fn insert_many<I: IntoIterator<Item = A::Item>>(&mut self, index: usize, iterable: I) {
+ let mut iter = iterable.into_iter();
+ if index == self.len() {
+ return self.extend(iter);
+ }
+
+ let (lower_size_bound, _) = iter.size_hint();
+ assert!(lower_size_bound <= core::isize::MAX as usize); // Ensure offset is indexable
+ assert!(index + lower_size_bound >= index); // Protect against overflow
+
+ let mut num_added = 0;
+ let old_len = self.len();
+ assert!(index <= old_len);
+
+ unsafe {
+ // Reserve space for `lower_size_bound` elements.
+ self.reserve(lower_size_bound);
+ let start = self.as_mut_ptr();
+ let ptr = start.add(index);
+
+ // Move the trailing elements.
+ ptr::copy(ptr, ptr.add(lower_size_bound), old_len - index);
+
+ // In case the iterator panics, don't double-drop the items we just copied above.
+ self.set_len(0);
+ let mut guard = DropOnPanic {
+ start,
+ skip: index..(index + lower_size_bound),
+ len: old_len + lower_size_bound,
+ };
+
+ // The set_len above invalidates the previous pointers, so we must re-create them.
+ let start = self.as_mut_ptr();
+ let ptr = start.add(index);
+
+ while num_added < lower_size_bound {
+ let element = match iter.next() {
+ Some(x) => x,
+ None => break,
+ };
+ let cur = ptr.add(num_added);
+ ptr::write(cur, element);
+ guard.skip.start += 1;
+ num_added += 1;
+ }
+
+ if num_added < lower_size_bound {
+ // Iterator provided fewer elements than the hint. Move the tail backward.
+ ptr::copy(
+ ptr.add(lower_size_bound),
+ ptr.add(num_added),
+ old_len - index,
+ );
+ }
+ // There are no more duplicate or uninitialized slots, so the guard is not needed.
+ self.set_len(old_len + num_added);
+ mem::forget(guard);
+ }
+
+ // Insert any remaining elements one-by-one.
+ for element in iter {
+ self.insert(index + num_added, element);
+ num_added += 1;
+ }
+
+ struct DropOnPanic<T> {
+ start: *mut T,
+ skip: Range<usize>, // Space we copied-out-of, but haven't written-to yet.
+ len: usize,
+ }
+
+ impl<T> Drop for DropOnPanic<T> {
+ fn drop(&mut self) {
+ for i in 0..self.len {
+ if !self.skip.contains(&i) {
+ unsafe {
+ ptr::drop_in_place(self.start.add(i));
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /// Convert a SmallVec to a Vec, without reallocating if the SmallVec has already spilled onto
+ /// the heap.
+ pub fn into_vec(mut self) -> Vec<A::Item> {
+ if self.spilled() {
+ unsafe {
+ let (ptr, &mut len) = self.data.heap_mut();
+ let v = Vec::from_raw_parts(ptr.as_ptr(), len, self.capacity);
+ mem::forget(self);
+ v
+ }
+ } else {
+ self.into_iter().collect()
+ }
+ }
+
+ /// Converts a `SmallVec` into a `Box<[T]>` without reallocating if the `SmallVec` has already spilled
+ /// onto the heap.
+ ///
+ /// Note that this will drop any excess capacity.
+ pub fn into_boxed_slice(self) -> Box<[A::Item]> {
+ self.into_vec().into_boxed_slice()
+ }
+
+ /// Convert the SmallVec into an `A` if possible. Otherwise return `Err(Self)`.
+ ///
+ /// This method returns `Err(Self)` if the SmallVec is too short (and the `A` contains uninitialized elements),
+ /// or if the SmallVec is too long (and all the elements were spilled to the heap).
+ pub fn into_inner(self) -> Result<A, Self> {
+ if self.spilled() || self.len() != A::size() {
+ // Note: A::size, not Self::inline_capacity
+ Err(self)
+ } else {
+ unsafe {
+ let data = ptr::read(&self.data);
+ mem::forget(self);
+ Ok(data.into_inline().assume_init())
+ }
+ }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` such that `f(&e)` returns `false`.
+ /// This method operates in place and preserves the order of the retained
+ /// elements.
+ pub fn retain<F: FnMut(&mut A::Item) -> bool>(&mut self, mut f: F) {
+ let mut del = 0;
+ let len = self.len();
+ for i in 0..len {
+ if !f(&mut self[i]) {
+ del += 1;
+ } else if del > 0 {
+ self.swap(i - del, i);
+ }
+ }
+ self.truncate(len - del);
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// This method is identical in behaviour to [`retain`]; it is included only
+ /// to maintain api-compatability with `std::Vec`, where the methods are
+ /// separate for historical reasons.
+ pub fn retain_mut<F: FnMut(&mut A::Item) -> bool>(&mut self, f: F) {
+ self.retain(f)
+ }
+
+ /// Removes consecutive duplicate elements.
+ pub fn dedup(&mut self)
+ where
+ A::Item: PartialEq<A::Item>,
+ {
+ self.dedup_by(|a, b| a == b);
+ }
+
+ /// Removes consecutive duplicate elements using the given equality relation.
+ pub fn dedup_by<F>(&mut self, mut same_bucket: F)
+ where
+ F: FnMut(&mut A::Item, &mut A::Item) -> bool,
+ {
+ // See the implementation of Vec::dedup_by in the
+ // standard library for an explanation of this algorithm.
+ let len = self.len();
+ if len <= 1 {
+ return;
+ }
+
+ let ptr = self.as_mut_ptr();
+ let mut w: usize = 1;
+
+ unsafe {
+ for r in 1..len {
+ let p_r = ptr.add(r);
+ let p_wm1 = ptr.add(w - 1);
+ if !same_bucket(&mut *p_r, &mut *p_wm1) {
+ if r != w {
+ let p_w = p_wm1.add(1);
+ mem::swap(&mut *p_r, &mut *p_w);
+ }
+ w += 1;
+ }
+ }
+ }
+
+ self.truncate(w);
+ }
+
+ /// Removes consecutive elements that map to the same key.
+ pub fn dedup_by_key<F, K>(&mut self, mut key: F)
+ where
+ F: FnMut(&mut A::Item) -> K,
+ K: PartialEq<K>,
+ {
+ self.dedup_by(|a, b| key(a) == key(b));
+ }
+
+ /// Resizes the `SmallVec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `SmallVec` is extended by the difference, with each
+ /// additional slot filled with the result of calling the closure `f`. The return values from `f`
+ //// will end up in the `SmallVec` in the order they have been generated.
+ ///
+ /// If `new_len` is less than `len`, the `SmallVec` is simply truncated.
+ ///
+ /// This method uses a closure to create new values on every push. If you'd rather `Clone` a given
+ /// value, use `resize`. If you want to use the `Default` trait to generate values, you can pass
+ /// `Default::default()` as the second argument.
+ ///
+ /// Added for std::vec::Vec compatibility (added in Rust 1.33.0)
+ ///
+ /// ```
+ /// # use smallvec::{smallvec, SmallVec};
+ /// let mut vec : SmallVec<[_; 4]> = smallvec![1, 2, 3];
+ /// vec.resize_with(5, Default::default);
+ /// assert_eq!(&*vec, &[1, 2, 3, 0, 0]);
+ ///
+ /// let mut vec : SmallVec<[_; 4]> = smallvec![];
+ /// let mut p = 1;
+ /// vec.resize_with(4, || { p *= 2; p });
+ /// assert_eq!(&*vec, &[2, 4, 8, 16]);
+ /// ```
+ pub fn resize_with<F>(&mut self, new_len: usize, f: F)
+ where
+ F: FnMut() -> A::Item,
+ {
+ let old_len = self.len();
+ if old_len < new_len {
+ let mut f = f;
+ let additional = new_len - old_len;
+ self.reserve(additional);
+ for _ in 0..additional {
+ self.push(f());
+ }
+ } else if old_len > new_len {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Creates a `SmallVec` directly from the raw components of another
+ /// `SmallVec`.
+ ///
+ /// # Safety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * `ptr` needs to have been previously allocated via `SmallVec` for its
+ /// spilled storage (at least, it's highly likely to be incorrect if it
+ /// wasn't).
+ /// * `ptr`'s `A::Item` type needs to be the same size and alignment that
+ /// it was allocated with
+ /// * `length` needs to be less than or equal to `capacity`.
+ /// * `capacity` needs to be the capacity that the pointer was allocated
+ /// with.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal data structures.
+ ///
+ /// Additionally, `capacity` must be greater than the amount of inline
+ /// storage `A` has; that is, the new `SmallVec` must need to spill over
+ /// into heap allocated storage. This condition is asserted against.
+ ///
+ /// The ownership of `ptr` is effectively transferred to the
+ /// `SmallVec` which may then deallocate, reallocate or change the
+ /// contents of memory pointed to by the pointer at will. Ensure
+ /// that nothing else uses the pointer after calling this
+ /// function.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #[macro_use] extern crate smallvec;
+ /// # use smallvec::SmallVec;
+ /// use std::mem;
+ /// use std::ptr;
+ ///
+ /// fn main() {
+ /// let mut v: SmallVec<[_; 1]> = smallvec![1, 2, 3];
+ ///
+ /// // Pull out the important parts of `v`.
+ /// let p = v.as_mut_ptr();
+ /// let len = v.len();
+ /// let cap = v.capacity();
+ /// let spilled = v.spilled();
+ ///
+ /// unsafe {
+ /// // Forget all about `v`. The heap allocation that stored the
+ /// // three values won't be deallocated.
+ /// mem::forget(v);
+ ///
+ /// // Overwrite memory with [4, 5, 6].
+ /// //
+ /// // This is only safe if `spilled` is true! Otherwise, we are
+ /// // writing into the old `SmallVec`'s inline storage on the
+ /// // stack.
+ /// assert!(spilled);
+ /// for i in 0..len {
+ /// ptr::write(p.add(i), 4 + i);
+ /// }
+ ///
+ /// // Put everything back together into a SmallVec with a different
+ /// // amount of inline storage, but which is still less than `cap`.
+ /// let rebuilt = SmallVec::<[_; 2]>::from_raw_parts(p, len, cap);
+ /// assert_eq!(&*rebuilt, &[4, 5, 6]);
+ /// }
+ /// }
+ #[inline]
+ pub unsafe fn from_raw_parts(ptr: *mut A::Item, length: usize, capacity: usize) -> SmallVec<A> {
+ // SAFETY: We require caller to provide same ptr as we alloc
+ // and we never alloc null pointer.
+ let ptr = unsafe {
+ debug_assert!(!ptr.is_null(), "Called `from_raw_parts` with null pointer.");
+ NonNull::new_unchecked(ptr)
+ };
+ assert!(capacity > Self::inline_capacity());
+ SmallVec {
+ capacity,
+ data: SmallVecData::from_heap(ptr, length),
+ }
+ }
+
+ /// Returns a raw pointer to the vector's buffer.
+ pub fn as_ptr(&self) -> *const A::Item {
+ // We shadow the slice method of the same name to avoid going through
+ // `deref`, which creates an intermediate reference that may place
+ // additional safety constraints on the contents of the slice.
+ self.triple().0.as_ptr()
+ }
+
+ /// Returns a raw mutable pointer to the vector's buffer.
+ pub fn as_mut_ptr(&mut self) -> *mut A::Item {
+ // We shadow the slice method of the same name to avoid going through
+ // `deref_mut`, which creates an intermediate reference that may place
+ // additional safety constraints on the contents of the slice.
+ self.triple_mut().0.as_ptr()
+ }
+}
+
+impl<A: Array> SmallVec<A>
+where
+ A::Item: Copy,
+{
+ /// Copy the elements from a slice into a new `SmallVec`.
+ ///
+ /// For slices of `Copy` types, this is more efficient than `SmallVec::from(slice)`.
+ pub fn from_slice(slice: &[A::Item]) -> Self {
+ let len = slice.len();
+ if len <= Self::inline_capacity() {
+ SmallVec {
+ capacity: len,
+ data: SmallVecData::from_inline(unsafe {
+ let mut data: MaybeUninit<A> = MaybeUninit::uninit();
+ ptr::copy_nonoverlapping(
+ slice.as_ptr(),
+ data.as_mut_ptr() as *mut A::Item,
+ len,
+ );
+ data
+ }),
+ }
+ } else {
+ let mut b = slice.to_vec();
+ let cap = b.capacity();
+ let ptr = NonNull::new(b.as_mut_ptr()).expect("Vec always contain non null pointers.");
+ mem::forget(b);
+ SmallVec {
+ capacity: cap,
+ data: SmallVecData::from_heap(ptr, len),
+ }
+ }
+ }
+
+ /// Copy elements from a slice into the vector at position `index`, shifting any following
+ /// elements toward the back.
+ ///
+ /// For slices of `Copy` types, this is more efficient than `insert`.
+ #[inline]
+ pub fn insert_from_slice(&mut self, index: usize, slice: &[A::Item]) {
+ self.reserve(slice.len());
+
+ let len = self.len();
+ assert!(index <= len);
+
+ unsafe {
+ let slice_ptr = slice.as_ptr();
+ let ptr = self.as_mut_ptr().add(index);
+ ptr::copy(ptr, ptr.add(slice.len()), len - index);
+ ptr::copy_nonoverlapping(slice_ptr, ptr, slice.len());
+ self.set_len(len + slice.len());
+ }
+ }
+
+ /// Copy elements from a slice and append them to the vector.
+ ///
+ /// For slices of `Copy` types, this is more efficient than `extend`.
+ #[inline]
+ pub fn extend_from_slice(&mut self, slice: &[A::Item]) {
+ let len = self.len();
+ self.insert_from_slice(len, slice);
+ }
+}
+
+impl<A: Array> SmallVec<A>
+where
+ A::Item: Clone,
+{
+ /// Resizes the vector so that its length is equal to `len`.
+ ///
+ /// If `len` is less than the current length, the vector simply truncated.
+ ///
+ /// If `len` is greater than the current length, `value` is appended to the
+ /// vector until its length equals `len`.
+ pub fn resize(&mut self, len: usize, value: A::Item) {
+ let old_len = self.len();
+
+ if len > old_len {
+ self.extend(repeat(value).take(len - old_len));
+ } else {
+ self.truncate(len);
+ }
+ }
+
+ /// Creates a `SmallVec` with `n` copies of `elem`.
+ /// ```
+ /// use smallvec::SmallVec;
+ ///
+ /// let v = SmallVec::<[char; 128]>::from_elem('d', 2);
+ /// assert_eq!(v, SmallVec::from_buf(['d', 'd']));
+ /// ```
+ pub fn from_elem(elem: A::Item, n: usize) -> Self {
+ if n > Self::inline_capacity() {
+ vec![elem; n].into()
+ } else {
+ let mut v = SmallVec::<A>::new();
+ unsafe {
+ let (ptr, len_ptr, _) = v.triple_mut();
+ let ptr = ptr.as_ptr();
+ let mut local_len = SetLenOnDrop::new(len_ptr);
+
+ for i in 0..n {
+ ::core::ptr::write(ptr.add(i), elem.clone());
+ local_len.increment_len(1);
+ }
+ }
+ v
+ }
+ }
+}
+
+impl<A: Array> ops::Deref for SmallVec<A> {
+ type Target = [A::Item];
+ #[inline]
+ fn deref(&self) -> &[A::Item] {
+ unsafe {
+ let (ptr, len, _) = self.triple();
+ slice::from_raw_parts(ptr.as_ptr(), len)
+ }
+ }
+}
+
+impl<A: Array> ops::DerefMut for SmallVec<A> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut [A::Item] {
+ unsafe {
+ let (ptr, &mut len, _) = self.triple_mut();
+ slice::from_raw_parts_mut(ptr.as_ptr(), len)
+ }
+ }
+}
+
+impl<A: Array> AsRef<[A::Item]> for SmallVec<A> {
+ #[inline]
+ fn as_ref(&self) -> &[A::Item] {
+ self
+ }
+}
+
+impl<A: Array> AsMut<[A::Item]> for SmallVec<A> {
+ #[inline]
+ fn as_mut(&mut self) -> &mut [A::Item] {
+ self
+ }
+}
+
+impl<A: Array> Borrow<[A::Item]> for SmallVec<A> {
+ #[inline]
+ fn borrow(&self) -> &[A::Item] {
+ self
+ }
+}
+
+impl<A: Array> BorrowMut<[A::Item]> for SmallVec<A> {
+ #[inline]
+ fn borrow_mut(&mut self) -> &mut [A::Item] {
+ self
+ }
+}
+
+#[cfg(feature = "write")]
+#[cfg_attr(docsrs, doc(cfg(feature = "write")))]
+impl<A: Array<Item = u8>> io::Write for SmallVec<A> {
+ #[inline]
+ fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
+ self.extend_from_slice(buf);
+ Ok(buf.len())
+ }
+
+ #[inline]
+ fn write_all(&mut self, buf: &[u8]) -> io::Result<()> {
+ self.extend_from_slice(buf);
+ Ok(())
+ }
+
+ #[inline]
+ fn flush(&mut self) -> io::Result<()> {
+ Ok(())
+ }
+}
+
+#[cfg(feature = "serde")]
+#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
+impl<A: Array> Serialize for SmallVec<A>
+where
+ A::Item: Serialize,
+{
+ fn serialize<S: Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
+ let mut state = serializer.serialize_seq(Some(self.len()))?;
+ for item in self {
+ state.serialize_element(&item)?;
+ }
+ state.end()
+ }
+}
+
+#[cfg(feature = "serde")]
+#[cfg_attr(docsrs, doc(cfg(feature = "serde")))]
+impl<'de, A: Array> Deserialize<'de> for SmallVec<A>
+where
+ A::Item: Deserialize<'de>,
+{
+ fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
+ deserializer.deserialize_seq(SmallVecVisitor {
+ phantom: PhantomData,
+ })
+ }
+}
+
+#[cfg(feature = "serde")]
+struct SmallVecVisitor<A> {
+ phantom: PhantomData<A>,
+}
+
+#[cfg(feature = "serde")]
+impl<'de, A: Array> Visitor<'de> for SmallVecVisitor<A>
+where
+ A::Item: Deserialize<'de>,
+{
+ type Value = SmallVec<A>;
+
+ fn expecting(&self, formatter: &mut fmt::Formatter<'_>) -> fmt::Result {
+ formatter.write_str("a sequence")
+ }
+
+ fn visit_seq<B>(self, mut seq: B) -> Result<Self::Value, B::Error>
+ where
+ B: SeqAccess<'de>,
+ {
+ use serde::de::Error;
+ let len = seq.size_hint().unwrap_or(0);
+ let mut values = SmallVec::new();
+ values.try_reserve(len).map_err(B::Error::custom)?;
+
+ while let Some(value) = seq.next_element()? {
+ values.push(value);
+ }
+
+ Ok(values)
+ }
+}
+
+#[cfg(feature = "specialization")]
+trait SpecFrom<A: Array, S> {
+ fn spec_from(slice: S) -> SmallVec<A>;
+}
+
+#[cfg(feature = "specialization")]
+mod specialization;
+
+#[cfg(feature = "arbitrary")]
+mod arbitrary;
+
+#[cfg(feature = "specialization")]
+impl<'a, A: Array> SpecFrom<A, &'a [A::Item]> for SmallVec<A>
+where
+ A::Item: Copy,
+{
+ #[inline]
+ fn spec_from(slice: &'a [A::Item]) -> SmallVec<A> {
+ SmallVec::from_slice(slice)
+ }
+}
+
+impl<'a, A: Array> From<&'a [A::Item]> for SmallVec<A>
+where
+ A::Item: Clone,
+{
+ #[cfg(not(feature = "specialization"))]
+ #[inline]
+ fn from(slice: &'a [A::Item]) -> SmallVec<A> {
+ slice.iter().cloned().collect()
+ }
+
+ #[cfg(feature = "specialization")]
+ #[inline]
+ fn from(slice: &'a [A::Item]) -> SmallVec<A> {
+ SmallVec::spec_from(slice)
+ }
+}
+
+impl<A: Array> From<Vec<A::Item>> for SmallVec<A> {
+ #[inline]
+ fn from(vec: Vec<A::Item>) -> SmallVec<A> {
+ SmallVec::from_vec(vec)
+ }
+}
+
+impl<A: Array> From<A> for SmallVec<A> {
+ #[inline]
+ fn from(array: A) -> SmallVec<A> {
+ SmallVec::from_buf(array)
+ }
+}
+
+impl<A: Array, I: SliceIndex<[A::Item]>> ops::Index<I> for SmallVec<A> {
+ type Output = I::Output;
+
+ fn index(&self, index: I) -> &I::Output {
+ &(**self)[index]
+ }
+}
+
+impl<A: Array, I: SliceIndex<[A::Item]>> ops::IndexMut<I> for SmallVec<A> {
+ fn index_mut(&mut self, index: I) -> &mut I::Output {
+ &mut (&mut **self)[index]
+ }
+}
+
+#[allow(deprecated)]
+impl<A: Array> ExtendFromSlice<A::Item> for SmallVec<A>
+where
+ A::Item: Copy,
+{
+ fn extend_from_slice(&mut self, other: &[A::Item]) {
+ SmallVec::extend_from_slice(self, other)
+ }
+}
+
+impl<A: Array> FromIterator<A::Item> for SmallVec<A> {
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = A::Item>>(iterable: I) -> SmallVec<A> {
+ let mut v = SmallVec::new();
+ v.extend(iterable);
+ v
+ }
+}
+
+impl<A: Array> Extend<A::Item> for SmallVec<A> {
+ fn extend<I: IntoIterator<Item = A::Item>>(&mut self, iterable: I) {
+ let mut iter = iterable.into_iter();
+ let (lower_size_bound, _) = iter.size_hint();
+ self.reserve(lower_size_bound);
+
+ unsafe {
+ let (ptr, len_ptr, cap) = self.triple_mut();
+ let ptr = ptr.as_ptr();
+ let mut len = SetLenOnDrop::new(len_ptr);
+ while len.get() < cap {
+ if let Some(out) = iter.next() {
+ ptr::write(ptr.add(len.get()), out);
+ len.increment_len(1);
+ } else {
+ return;
+ }
+ }
+ }
+
+ for elem in iter {
+ self.push(elem);
+ }
+ }
+}
+
+impl<A: Array> fmt::Debug for SmallVec<A>
+where
+ A::Item: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+impl<A: Array> Default for SmallVec<A> {
+ #[inline]
+ fn default() -> SmallVec<A> {
+ SmallVec::new()
+ }
+}
+
+#[cfg(feature = "may_dangle")]
+unsafe impl<#[may_dangle] A: Array> Drop for SmallVec<A> {
+ fn drop(&mut self) {
+ unsafe {
+ if self.spilled() {
+ let (ptr, &mut len) = self.data.heap_mut();
+ Vec::from_raw_parts(ptr.as_ptr(), len, self.capacity);
+ } else {
+ ptr::drop_in_place(&mut self[..]);
+ }
+ }
+ }
+}
+
+#[cfg(not(feature = "may_dangle"))]
+impl<A: Array> Drop for SmallVec<A> {
+ fn drop(&mut self) {
+ unsafe {
+ if self.spilled() {
+ let (ptr, &mut len) = self.data.heap_mut();
+ drop(Vec::from_raw_parts(ptr.as_ptr(), len, self.capacity));
+ } else {
+ ptr::drop_in_place(&mut self[..]);
+ }
+ }
+ }
+}
+
+impl<A: Array> Clone for SmallVec<A>
+where
+ A::Item: Clone,
+{
+ #[inline]
+ fn clone(&self) -> SmallVec<A> {
+ SmallVec::from(self.as_slice())
+ }
+
+ fn clone_from(&mut self, source: &Self) {
+ // Inspired from `impl Clone for Vec`.
+
+ // drop anything that will not be overwritten
+ self.truncate(source.len());
+
+ // self.len <= other.len due to the truncate above, so the
+ // slices here are always in-bounds.
+ let (init, tail) = source.split_at(self.len());
+
+ // reuse the contained values' allocations/resources.
+ self.clone_from_slice(init);
+ self.extend(tail.iter().cloned());
+ }
+}
+
+impl<A: Array, B: Array> PartialEq<SmallVec<B>> for SmallVec<A>
+where
+ A::Item: PartialEq<B::Item>,
+{
+ #[inline]
+ fn eq(&self, other: &SmallVec<B>) -> bool {
+ self[..] == other[..]
+ }
+}
+
+impl<A: Array> Eq for SmallVec<A> where A::Item: Eq {}
+
+impl<A: Array> PartialOrd for SmallVec<A>
+where
+ A::Item: PartialOrd,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &SmallVec<A>) -> Option<cmp::Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+}
+
+impl<A: Array> Ord for SmallVec<A>
+where
+ A::Item: Ord,
+{
+ #[inline]
+ fn cmp(&self, other: &SmallVec<A>) -> cmp::Ordering {
+ Ord::cmp(&**self, &**other)
+ }
+}
+
+impl<A: Array> Hash for SmallVec<A>
+where
+ A::Item: Hash,
+{
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state)
+ }
+}
+
+unsafe impl<A: Array> Send for SmallVec<A> where A::Item: Send {}
+
+/// An iterator that consumes a `SmallVec` and yields its items by value.
+///
+/// Returned from [`SmallVec::into_iter`][1].
+///
+/// [1]: struct.SmallVec.html#method.into_iter
+pub struct IntoIter<A: Array> {
+ data: SmallVec<A>,
+ current: usize,
+ end: usize,
+}
+
+impl<A: Array> fmt::Debug for IntoIter<A>
+where
+ A::Item: fmt::Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
+ }
+}
+
+impl<A: Array + Clone> Clone for IntoIter<A>
+where
+ A::Item: Clone,
+{
+ fn clone(&self) -> IntoIter<A> {
+ SmallVec::from(self.as_slice()).into_iter()
+ }
+}
+
+impl<A: Array> Drop for IntoIter<A> {
+ fn drop(&mut self) {
+ for _ in self {}
+ }
+}
+
+impl<A: Array> Iterator for IntoIter<A> {
+ type Item = A::Item;
+
+ #[inline]
+ fn next(&mut self) -> Option<A::Item> {
+ if self.current == self.end {
+ None
+ } else {
+ unsafe {
+ let current = self.current;
+ self.current += 1;
+ Some(ptr::read(self.data.as_ptr().add(current)))
+ }
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let size = self.end - self.current;
+ (size, Some(size))
+ }
+}
+
+impl<A: Array> DoubleEndedIterator for IntoIter<A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<A::Item> {
+ if self.current == self.end {
+ None
+ } else {
+ unsafe {
+ self.end -= 1;
+ Some(ptr::read(self.data.as_ptr().add(self.end)))
+ }
+ }
+ }
+}
+
+impl<A: Array> ExactSizeIterator for IntoIter<A> {}
+impl<A: Array> FusedIterator for IntoIter<A> {}
+
+impl<A: Array> IntoIter<A> {
+ /// Returns the remaining items of this iterator as a slice.
+ pub fn as_slice(&self) -> &[A::Item] {
+ let len = self.end - self.current;
+ unsafe { core::slice::from_raw_parts(self.data.as_ptr().add(self.current), len) }
+ }
+
+ /// Returns the remaining items of this iterator as a mutable slice.
+ pub fn as_mut_slice(&mut self) -> &mut [A::Item] {
+ let len = self.end - self.current;
+ unsafe { core::slice::from_raw_parts_mut(self.data.as_mut_ptr().add(self.current), len) }
+ }
+}
+
+impl<A: Array> IntoIterator for SmallVec<A> {
+ type IntoIter = IntoIter<A>;
+ type Item = A::Item;
+ fn into_iter(mut self) -> Self::IntoIter {
+ unsafe {
+ // Set SmallVec len to zero as `IntoIter` drop handles dropping of the elements
+ let len = self.len();
+ self.set_len(0);
+ IntoIter {
+ data: self,
+ current: 0,
+ end: len,
+ }
+ }
+ }
+}
+
+impl<'a, A: Array> IntoIterator for &'a SmallVec<A> {
+ type IntoIter = slice::Iter<'a, A::Item>;
+ type Item = &'a A::Item;
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter()
+ }
+}
+
+impl<'a, A: Array> IntoIterator for &'a mut SmallVec<A> {
+ type IntoIter = slice::IterMut<'a, A::Item>;
+ type Item = &'a mut A::Item;
+ fn into_iter(self) -> Self::IntoIter {
+ self.iter_mut()
+ }
+}
+
+/// Types that can be used as the backing store for a SmallVec
+pub unsafe trait Array {
+ /// The type of the array's elements.
+ type Item;
+ /// Returns the number of items the array can hold.
+ fn size() -> usize;
+}
+
+/// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
+///
+/// Copied from https://github.com/rust-lang/rust/pull/36355
+struct SetLenOnDrop<'a> {
+ len: &'a mut usize,
+ local_len: usize,
+}
+
+impl<'a> SetLenOnDrop<'a> {
+ #[inline]
+ fn new(len: &'a mut usize) -> Self {
+ SetLenOnDrop {
+ local_len: *len,
+ len,
+ }
+ }
+
+ #[inline]
+ fn get(&self) -> usize {
+ self.local_len
+ }
+
+ #[inline]
+ fn increment_len(&mut self, increment: usize) {
+ self.local_len += increment;
+ }
+}
+
+impl<'a> Drop for SetLenOnDrop<'a> {
+ #[inline]
+ fn drop(&mut self) {
+ *self.len = self.local_len;
+ }
+}
+
+#[cfg(feature = "const_new")]
+impl<T, const N: usize> SmallVec<[T; N]> {
+ /// Construct an empty vector.
+ ///
+ /// This is a `const` version of [`SmallVec::new`] that is enabled by the feature `const_new`, with the limitation that it only works for arrays.
+ #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))]
+ #[inline]
+ pub const fn new_const() -> Self {
+ SmallVec {
+ capacity: 0,
+ data: SmallVecData::from_const(MaybeUninit::uninit()),
+ }
+ }
+
+ /// The array passed as an argument is moved to be an inline version of `SmallVec`.
+ ///
+ /// This is a `const` version of [`SmallVec::from_buf`] that is enabled by the feature `const_new`, with the limitation that it only works for arrays.
+ #[cfg_attr(docsrs, doc(cfg(feature = "const_new")))]
+ #[inline]
+ pub const fn from_const(items: [T; N]) -> Self {
+ SmallVec {
+ capacity: N,
+ data: SmallVecData::from_const(MaybeUninit::new(items)),
+ }
+ }
+}
+
+#[cfg(all(feature = "const_generics", not(doc)))]
+#[cfg_attr(docsrs, doc(cfg(feature = "const_generics")))]
+unsafe impl<T, const N: usize> Array for [T; N] {
+ type Item = T;
+ #[inline]
+ fn size() -> usize {
+ N
+ }
+}
+
+#[cfg(any(not(feature = "const_generics"), doc))]
+macro_rules! impl_array(
+ ($($size:expr),+) => {
+ $(
+ unsafe impl<T> Array for [T; $size] {
+ type Item = T;
+ #[inline]
+ fn size() -> usize { $size }
+ }
+ )+
+ }
+);
+
+#[cfg(any(not(feature = "const_generics"), doc))]
+impl_array!(
+ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
+ 26, 27, 28, 29, 30, 31, 32, 36, 0x40, 0x60, 0x80, 0x100, 0x200, 0x400, 0x600, 0x800, 0x1000,
+ 0x2000, 0x4000, 0x6000, 0x8000, 0x10000, 0x20000, 0x40000, 0x60000, 0x80000, 0x10_0000
+);
+
+/// Convenience trait for constructing a `SmallVec`
+pub trait ToSmallVec<A: Array> {
+ /// Construct a new `SmallVec` from a slice.
+ fn to_smallvec(&self) -> SmallVec<A>;
+}
+
+impl<A: Array> ToSmallVec<A> for [A::Item]
+where
+ A::Item: Copy,
+{
+ #[inline]
+ fn to_smallvec(&self) -> SmallVec<A> {
+ SmallVec::from_slice(self)
+ }
+}
+
+// Immutable counterpart for `NonNull<T>`.
+#[repr(transparent)]
+struct ConstNonNull<T>(NonNull<T>);
+
+impl<T> ConstNonNull<T> {
+ #[inline]
+ fn new(ptr: *const T) -> Option<Self> {
+ NonNull::new(ptr as *mut T).map(Self)
+ }
+ #[inline]
+ fn as_ptr(self) -> *const T {
+ self.0.as_ptr()
+ }
+}
+
+impl<T> Clone for ConstNonNull<T> {
+ #[inline]
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<T> Copy for ConstNonNull<T> {}
diff --git a/third_party/rust/smallvec/src/specialization.rs b/third_party/rust/smallvec/src/specialization.rs
new file mode 100644
index 0000000000..658fa77a2a
--- /dev/null
+++ b/third_party/rust/smallvec/src/specialization.rs
@@ -0,0 +1,19 @@
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! Implementations that require `default fn`.
+
+use super::{Array, SmallVec, SpecFrom};
+
+impl<'a, A: Array> SpecFrom<A, &'a [A::Item]> for SmallVec<A>
+where
+ A::Item: Clone,
+{
+ #[inline]
+ default fn spec_from(slice: &'a [A::Item]) -> SmallVec<A> {
+ slice.into_iter().cloned().collect()
+ }
+}
diff --git a/third_party/rust/smallvec/src/tests.rs b/third_party/rust/smallvec/src/tests.rs
new file mode 100644
index 0000000000..bb39ddeb31
--- /dev/null
+++ b/third_party/rust/smallvec/src/tests.rs
@@ -0,0 +1,1016 @@
+use crate::{smallvec, SmallVec};
+
+use std::iter::FromIterator;
+
+use alloc::borrow::ToOwned;
+use alloc::boxed::Box;
+use alloc::rc::Rc;
+use alloc::{vec, vec::Vec};
+
+#[test]
+pub fn test_zero() {
+ let mut v = SmallVec::<[_; 0]>::new();
+ assert!(!v.spilled());
+ v.push(0usize);
+ assert!(v.spilled());
+ assert_eq!(&*v, &[0]);
+}
+
+// We heap allocate all these strings so that double frees will show up under valgrind.
+
+#[test]
+pub fn test_inline() {
+ let mut v = SmallVec::<[_; 16]>::new();
+ v.push("hello".to_owned());
+ v.push("there".to_owned());
+ assert_eq!(&*v, &["hello".to_owned(), "there".to_owned(),][..]);
+}
+
+#[test]
+pub fn test_spill() {
+ let mut v = SmallVec::<[_; 2]>::new();
+ v.push("hello".to_owned());
+ assert_eq!(v[0], "hello");
+ v.push("there".to_owned());
+ v.push("burma".to_owned());
+ assert_eq!(v[0], "hello");
+ v.push("shave".to_owned());
+ assert_eq!(
+ &*v,
+ &[
+ "hello".to_owned(),
+ "there".to_owned(),
+ "burma".to_owned(),
+ "shave".to_owned(),
+ ][..]
+ );
+}
+
+#[test]
+pub fn test_double_spill() {
+ let mut v = SmallVec::<[_; 2]>::new();
+ v.push("hello".to_owned());
+ v.push("there".to_owned());
+ v.push("burma".to_owned());
+ v.push("shave".to_owned());
+ v.push("hello".to_owned());
+ v.push("there".to_owned());
+ v.push("burma".to_owned());
+ v.push("shave".to_owned());
+ assert_eq!(
+ &*v,
+ &[
+ "hello".to_owned(),
+ "there".to_owned(),
+ "burma".to_owned(),
+ "shave".to_owned(),
+ "hello".to_owned(),
+ "there".to_owned(),
+ "burma".to_owned(),
+ "shave".to_owned(),
+ ][..]
+ );
+}
+
+/// https://github.com/servo/rust-smallvec/issues/4
+#[test]
+fn issue_4() {
+ SmallVec::<[Box<u32>; 2]>::new();
+}
+
+/// https://github.com/servo/rust-smallvec/issues/5
+#[test]
+fn issue_5() {
+ assert!(Some(SmallVec::<[&u32; 2]>::new()).is_some());
+}
+
+#[test]
+fn test_with_capacity() {
+ let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(1);
+ assert!(v.is_empty());
+ assert!(!v.spilled());
+ assert_eq!(v.capacity(), 3);
+
+ let v: SmallVec<[u8; 3]> = SmallVec::with_capacity(10);
+ assert!(v.is_empty());
+ assert!(v.spilled());
+ assert_eq!(v.capacity(), 10);
+}
+
+#[test]
+fn drain() {
+ let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+ v.push(3);
+ assert_eq!(v.drain(..).collect::<Vec<_>>(), &[3]);
+
+ // spilling the vec
+ v.push(3);
+ v.push(4);
+ v.push(5);
+ let old_capacity = v.capacity();
+ assert_eq!(v.drain(1..).collect::<Vec<_>>(), &[4, 5]);
+ // drain should not change the capacity
+ assert_eq!(v.capacity(), old_capacity);
+
+ // Exercise the tail-shifting code when in the inline state
+ // This has the potential to produce UB due to aliasing
+ let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+ v.push(1);
+ v.push(2);
+ assert_eq!(v.drain(..1).collect::<Vec<_>>(), &[1]);
+}
+
+#[test]
+fn drain_rev() {
+ let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+ v.push(3);
+ assert_eq!(v.drain(..).rev().collect::<Vec<_>>(), &[3]);
+
+ // spilling the vec
+ v.push(3);
+ v.push(4);
+ v.push(5);
+ assert_eq!(v.drain(..).rev().collect::<Vec<_>>(), &[5, 4, 3]);
+}
+
+#[test]
+fn drain_forget() {
+ let mut v: SmallVec<[u8; 1]> = smallvec![0, 1, 2, 3, 4, 5, 6, 7];
+ std::mem::forget(v.drain(2..5));
+ assert_eq!(v.len(), 2);
+}
+
+#[test]
+fn into_iter() {
+ let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+ v.push(3);
+ assert_eq!(v.into_iter().collect::<Vec<_>>(), &[3]);
+
+ // spilling the vec
+ let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+ v.push(3);
+ v.push(4);
+ v.push(5);
+ assert_eq!(v.into_iter().collect::<Vec<_>>(), &[3, 4, 5]);
+}
+
+#[test]
+fn into_iter_rev() {
+ let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+ v.push(3);
+ assert_eq!(v.into_iter().rev().collect::<Vec<_>>(), &[3]);
+
+ // spilling the vec
+ let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+ v.push(3);
+ v.push(4);
+ v.push(5);
+ assert_eq!(v.into_iter().rev().collect::<Vec<_>>(), &[5, 4, 3]);
+}
+
+#[test]
+fn into_iter_drop() {
+ use std::cell::Cell;
+
+ struct DropCounter<'a>(&'a Cell<i32>);
+
+ impl<'a> Drop for DropCounter<'a> {
+ fn drop(&mut self) {
+ self.0.set(self.0.get() + 1);
+ }
+ }
+
+ {
+ let cell = Cell::new(0);
+ let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new();
+ v.push(DropCounter(&cell));
+ v.into_iter();
+ assert_eq!(cell.get(), 1);
+ }
+
+ {
+ let cell = Cell::new(0);
+ let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new();
+ v.push(DropCounter(&cell));
+ v.push(DropCounter(&cell));
+ assert!(v.into_iter().next().is_some());
+ assert_eq!(cell.get(), 2);
+ }
+
+ {
+ let cell = Cell::new(0);
+ let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new();
+ v.push(DropCounter(&cell));
+ v.push(DropCounter(&cell));
+ v.push(DropCounter(&cell));
+ assert!(v.into_iter().next().is_some());
+ assert_eq!(cell.get(), 3);
+ }
+ {
+ let cell = Cell::new(0);
+ let mut v: SmallVec<[DropCounter<'_>; 2]> = SmallVec::new();
+ v.push(DropCounter(&cell));
+ v.push(DropCounter(&cell));
+ v.push(DropCounter(&cell));
+ {
+ let mut it = v.into_iter();
+ assert!(it.next().is_some());
+ assert!(it.next_back().is_some());
+ }
+ assert_eq!(cell.get(), 3);
+ }
+}
+
+#[test]
+fn test_capacity() {
+ let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+ v.reserve(1);
+ assert_eq!(v.capacity(), 2);
+ assert!(!v.spilled());
+
+ v.reserve_exact(0x100);
+ assert!(v.capacity() >= 0x100);
+
+ v.push(0);
+ v.push(1);
+ v.push(2);
+ v.push(3);
+
+ v.shrink_to_fit();
+ assert!(v.capacity() < 0x100);
+}
+
+#[test]
+fn test_truncate() {
+ let mut v: SmallVec<[Box<u8>; 8]> = SmallVec::new();
+
+ for x in 0..8 {
+ v.push(Box::new(x));
+ }
+ v.truncate(4);
+
+ assert_eq!(v.len(), 4);
+ assert!(!v.spilled());
+
+ assert_eq!(*v.swap_remove(1), 1);
+ assert_eq!(*v.remove(1), 3);
+ v.insert(1, Box::new(3));
+
+ assert_eq!(&v.iter().map(|v| **v).collect::<Vec<_>>(), &[0, 3, 2]);
+}
+
+#[test]
+fn test_insert_many() {
+ let mut v: SmallVec<[u8; 8]> = SmallVec::new();
+ for x in 0..4 {
+ v.push(x);
+ }
+ assert_eq!(v.len(), 4);
+ v.insert_many(1, [5, 6].iter().cloned());
+ assert_eq!(
+ &v.iter().map(|v| *v).collect::<Vec<_>>(),
+ &[0, 5, 6, 1, 2, 3]
+ );
+}
+
+struct MockHintIter<T: Iterator> {
+ x: T,
+ hint: usize,
+}
+impl<T: Iterator> Iterator for MockHintIter<T> {
+ type Item = T::Item;
+ fn next(&mut self) -> Option<Self::Item> {
+ self.x.next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.hint, None)
+ }
+}
+
+#[test]
+fn test_insert_many_short_hint() {
+ let mut v: SmallVec<[u8; 8]> = SmallVec::new();
+ for x in 0..4 {
+ v.push(x);
+ }
+ assert_eq!(v.len(), 4);
+ v.insert_many(
+ 1,
+ MockHintIter {
+ x: [5, 6].iter().cloned(),
+ hint: 5,
+ },
+ );
+ assert_eq!(
+ &v.iter().map(|v| *v).collect::<Vec<_>>(),
+ &[0, 5, 6, 1, 2, 3]
+ );
+}
+
+#[test]
+fn test_insert_many_long_hint() {
+ let mut v: SmallVec<[u8; 8]> = SmallVec::new();
+ for x in 0..4 {
+ v.push(x);
+ }
+ assert_eq!(v.len(), 4);
+ v.insert_many(
+ 1,
+ MockHintIter {
+ x: [5, 6].iter().cloned(),
+ hint: 1,
+ },
+ );
+ assert_eq!(
+ &v.iter().map(|v| *v).collect::<Vec<_>>(),
+ &[0, 5, 6, 1, 2, 3]
+ );
+}
+
+// https://github.com/servo/rust-smallvec/issues/96
+mod insert_many_panic {
+ use crate::{smallvec, SmallVec};
+ use alloc::boxed::Box;
+
+ struct PanicOnDoubleDrop {
+ dropped: Box<bool>,
+ }
+
+ impl PanicOnDoubleDrop {
+ fn new() -> Self {
+ Self {
+ dropped: Box::new(false),
+ }
+ }
+ }
+
+ impl Drop for PanicOnDoubleDrop {
+ fn drop(&mut self) {
+ assert!(!*self.dropped, "already dropped");
+ *self.dropped = true;
+ }
+ }
+
+ /// Claims to yield `hint` items, but actually yields `count`, then panics.
+ struct BadIter {
+ hint: usize,
+ count: usize,
+ }
+
+ impl Iterator for BadIter {
+ type Item = PanicOnDoubleDrop;
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.hint, None)
+ }
+ fn next(&mut self) -> Option<Self::Item> {
+ if self.count == 0 {
+ panic!()
+ }
+ self.count -= 1;
+ Some(PanicOnDoubleDrop::new())
+ }
+ }
+
+ #[test]
+ fn panic_early_at_start() {
+ let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> =
+ smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),];
+ let result = ::std::panic::catch_unwind(move || {
+ vec.insert_many(0, BadIter { hint: 1, count: 0 });
+ });
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn panic_early_in_middle() {
+ let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> =
+ smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),];
+ let result = ::std::panic::catch_unwind(move || {
+ vec.insert_many(1, BadIter { hint: 4, count: 2 });
+ });
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn panic_early_at_end() {
+ let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> =
+ smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),];
+ let result = ::std::panic::catch_unwind(move || {
+ vec.insert_many(2, BadIter { hint: 3, count: 1 });
+ });
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn panic_late_at_start() {
+ let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> =
+ smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),];
+ let result = ::std::panic::catch_unwind(move || {
+ vec.insert_many(0, BadIter { hint: 3, count: 5 });
+ });
+ assert!(result.is_err());
+ }
+
+ #[test]
+ fn panic_late_at_end() {
+ let mut vec: SmallVec<[PanicOnDoubleDrop; 0]> =
+ smallvec![PanicOnDoubleDrop::new(), PanicOnDoubleDrop::new(),];
+ let result = ::std::panic::catch_unwind(move || {
+ vec.insert_many(2, BadIter { hint: 3, count: 5 });
+ });
+ assert!(result.is_err());
+ }
+}
+
+#[test]
+#[should_panic]
+fn test_invalid_grow() {
+ let mut v: SmallVec<[u8; 8]> = SmallVec::new();
+ v.extend(0..8);
+ v.grow(5);
+}
+
+#[test]
+#[should_panic]
+fn drain_overflow() {
+ let mut v: SmallVec<[u8; 8]> = smallvec![0];
+ v.drain(..=std::usize::MAX);
+}
+
+#[test]
+fn test_insert_from_slice() {
+ let mut v: SmallVec<[u8; 8]> = SmallVec::new();
+ for x in 0..4 {
+ v.push(x);
+ }
+ assert_eq!(v.len(), 4);
+ v.insert_from_slice(1, &[5, 6]);
+ assert_eq!(
+ &v.iter().map(|v| *v).collect::<Vec<_>>(),
+ &[0, 5, 6, 1, 2, 3]
+ );
+}
+
+#[test]
+fn test_extend_from_slice() {
+ let mut v: SmallVec<[u8; 8]> = SmallVec::new();
+ for x in 0..4 {
+ v.push(x);
+ }
+ assert_eq!(v.len(), 4);
+ v.extend_from_slice(&[5, 6]);
+ assert_eq!(
+ &v.iter().map(|v| *v).collect::<Vec<_>>(),
+ &[0, 1, 2, 3, 5, 6]
+ );
+}
+
+#[test]
+#[should_panic]
+fn test_drop_panic_smallvec() {
+ // This test should only panic once, and not double panic,
+ // which would mean a double drop
+ struct DropPanic;
+
+ impl Drop for DropPanic {
+ fn drop(&mut self) {
+ panic!("drop");
+ }
+ }
+
+ let mut v = SmallVec::<[_; 1]>::new();
+ v.push(DropPanic);
+}
+
+#[test]
+fn test_eq() {
+ let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+ let mut b: SmallVec<[u32; 2]> = SmallVec::new();
+ let mut c: SmallVec<[u32; 2]> = SmallVec::new();
+ // a = [1, 2]
+ a.push(1);
+ a.push(2);
+ // b = [1, 2]
+ b.push(1);
+ b.push(2);
+ // c = [3, 4]
+ c.push(3);
+ c.push(4);
+
+ assert!(a == b);
+ assert!(a != c);
+}
+
+#[test]
+fn test_ord() {
+ let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+ let mut b: SmallVec<[u32; 2]> = SmallVec::new();
+ let mut c: SmallVec<[u32; 2]> = SmallVec::new();
+ // a = [1]
+ a.push(1);
+ // b = [1, 1]
+ b.push(1);
+ b.push(1);
+ // c = [1, 2]
+ c.push(1);
+ c.push(2);
+
+ assert!(a < b);
+ assert!(b > a);
+ assert!(b < c);
+ assert!(c > b);
+}
+
+#[test]
+fn test_hash() {
+ use std::collections::hash_map::DefaultHasher;
+ use std::hash::Hash;
+
+ {
+ let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+ let b = [1, 2];
+ a.extend(b.iter().cloned());
+ let mut hasher = DefaultHasher::new();
+ assert_eq!(a.hash(&mut hasher), b.hash(&mut hasher));
+ }
+ {
+ let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+ let b = [1, 2, 11, 12];
+ a.extend(b.iter().cloned());
+ let mut hasher = DefaultHasher::new();
+ assert_eq!(a.hash(&mut hasher), b.hash(&mut hasher));
+ }
+}
+
+#[test]
+fn test_as_ref() {
+ let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+ a.push(1);
+ assert_eq!(a.as_ref(), [1]);
+ a.push(2);
+ assert_eq!(a.as_ref(), [1, 2]);
+ a.push(3);
+ assert_eq!(a.as_ref(), [1, 2, 3]);
+}
+
+#[test]
+fn test_as_mut() {
+ let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+ a.push(1);
+ assert_eq!(a.as_mut(), [1]);
+ a.push(2);
+ assert_eq!(a.as_mut(), [1, 2]);
+ a.push(3);
+ assert_eq!(a.as_mut(), [1, 2, 3]);
+ a.as_mut()[1] = 4;
+ assert_eq!(a.as_mut(), [1, 4, 3]);
+}
+
+#[test]
+fn test_borrow() {
+ use std::borrow::Borrow;
+
+ let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+ a.push(1);
+ assert_eq!(a.borrow(), [1]);
+ a.push(2);
+ assert_eq!(a.borrow(), [1, 2]);
+ a.push(3);
+ assert_eq!(a.borrow(), [1, 2, 3]);
+}
+
+#[test]
+fn test_borrow_mut() {
+ use std::borrow::BorrowMut;
+
+ let mut a: SmallVec<[u32; 2]> = SmallVec::new();
+ a.push(1);
+ assert_eq!(a.borrow_mut(), [1]);
+ a.push(2);
+ assert_eq!(a.borrow_mut(), [1, 2]);
+ a.push(3);
+ assert_eq!(a.borrow_mut(), [1, 2, 3]);
+ BorrowMut::<[u32]>::borrow_mut(&mut a)[1] = 4;
+ assert_eq!(a.borrow_mut(), [1, 4, 3]);
+}
+
+#[test]
+fn test_from() {
+ assert_eq!(&SmallVec::<[u32; 2]>::from(&[1][..])[..], [1]);
+ assert_eq!(&SmallVec::<[u32; 2]>::from(&[1, 2, 3][..])[..], [1, 2, 3]);
+
+ let vec = vec![];
+ let small_vec: SmallVec<[u8; 3]> = SmallVec::from(vec);
+ assert_eq!(&*small_vec, &[]);
+ drop(small_vec);
+
+ let vec = vec![1, 2, 3, 4, 5];
+ let small_vec: SmallVec<[u8; 3]> = SmallVec::from(vec);
+ assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+ drop(small_vec);
+
+ let vec = vec![1, 2, 3, 4, 5];
+ let small_vec: SmallVec<[u8; 1]> = SmallVec::from(vec);
+ assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+ drop(small_vec);
+
+ let array = [1];
+ let small_vec: SmallVec<[u8; 1]> = SmallVec::from(array);
+ assert_eq!(&*small_vec, &[1]);
+ drop(small_vec);
+
+ let array = [99; 128];
+ let small_vec: SmallVec<[u8; 128]> = SmallVec::from(array);
+ assert_eq!(&*small_vec, vec![99u8; 128].as_slice());
+ drop(small_vec);
+}
+
+#[test]
+fn test_from_slice() {
+ assert_eq!(&SmallVec::<[u32; 2]>::from_slice(&[1][..])[..], [1]);
+ assert_eq!(
+ &SmallVec::<[u32; 2]>::from_slice(&[1, 2, 3][..])[..],
+ [1, 2, 3]
+ );
+}
+
+#[test]
+fn test_exact_size_iterator() {
+ let mut vec = SmallVec::<[u32; 2]>::from(&[1, 2, 3][..]);
+ assert_eq!(vec.clone().into_iter().len(), 3);
+ assert_eq!(vec.drain(..2).len(), 2);
+ assert_eq!(vec.into_iter().len(), 1);
+}
+
+#[test]
+fn test_into_iter_as_slice() {
+ let vec = SmallVec::<[u32; 2]>::from(&[1, 2, 3][..]);
+ let mut iter = vec.clone().into_iter();
+ assert_eq!(iter.as_slice(), &[1, 2, 3]);
+ assert_eq!(iter.as_mut_slice(), &[1, 2, 3]);
+ iter.next();
+ assert_eq!(iter.as_slice(), &[2, 3]);
+ assert_eq!(iter.as_mut_slice(), &[2, 3]);
+ iter.next_back();
+ assert_eq!(iter.as_slice(), &[2]);
+ assert_eq!(iter.as_mut_slice(), &[2]);
+}
+
+#[test]
+fn test_into_iter_clone() {
+ // Test that the cloned iterator yields identical elements and that it owns its own copy
+ // (i.e. no use after move errors).
+ let mut iter = SmallVec::<[u8; 2]>::from_iter(0..3).into_iter();
+ let mut clone_iter = iter.clone();
+ while let Some(x) = iter.next() {
+ assert_eq!(x, clone_iter.next().unwrap());
+ }
+ assert_eq!(clone_iter.next(), None);
+}
+
+#[test]
+fn test_into_iter_clone_partially_consumed_iterator() {
+ // Test that the cloned iterator only contains the remaining elements of the original iterator.
+ let mut iter = SmallVec::<[u8; 2]>::from_iter(0..3).into_iter().skip(1);
+ let mut clone_iter = iter.clone();
+ while let Some(x) = iter.next() {
+ assert_eq!(x, clone_iter.next().unwrap());
+ }
+ assert_eq!(clone_iter.next(), None);
+}
+
+#[test]
+fn test_into_iter_clone_empty_smallvec() {
+ let mut iter = SmallVec::<[u8; 2]>::new().into_iter();
+ let mut clone_iter = iter.clone();
+ assert_eq!(iter.next(), None);
+ assert_eq!(clone_iter.next(), None);
+}
+
+#[test]
+fn shrink_to_fit_unspill() {
+ let mut vec = SmallVec::<[u8; 2]>::from_iter(0..3);
+ vec.pop();
+ assert!(vec.spilled());
+ vec.shrink_to_fit();
+ assert!(!vec.spilled(), "shrink_to_fit will un-spill if possible");
+}
+
+#[test]
+fn test_into_vec() {
+ let vec = SmallVec::<[u8; 2]>::from_iter(0..2);
+ assert_eq!(vec.into_vec(), vec![0, 1]);
+
+ let vec = SmallVec::<[u8; 2]>::from_iter(0..3);
+ assert_eq!(vec.into_vec(), vec![0, 1, 2]);
+}
+
+#[test]
+fn test_into_inner() {
+ let vec = SmallVec::<[u8; 2]>::from_iter(0..2);
+ assert_eq!(vec.into_inner(), Ok([0, 1]));
+
+ let vec = SmallVec::<[u8; 2]>::from_iter(0..1);
+ assert_eq!(vec.clone().into_inner(), Err(vec));
+
+ let vec = SmallVec::<[u8; 2]>::from_iter(0..3);
+ assert_eq!(vec.clone().into_inner(), Err(vec));
+}
+
+#[test]
+fn test_from_vec() {
+ let vec = vec![];
+ let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec);
+ assert_eq!(&*small_vec, &[]);
+ drop(small_vec);
+
+ let vec = vec![];
+ let small_vec: SmallVec<[u8; 1]> = SmallVec::from_vec(vec);
+ assert_eq!(&*small_vec, &[]);
+ drop(small_vec);
+
+ let vec = vec![1];
+ let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec);
+ assert_eq!(&*small_vec, &[1]);
+ drop(small_vec);
+
+ let vec = vec![1, 2, 3];
+ let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec);
+ assert_eq!(&*small_vec, &[1, 2, 3]);
+ drop(small_vec);
+
+ let vec = vec![1, 2, 3, 4, 5];
+ let small_vec: SmallVec<[u8; 3]> = SmallVec::from_vec(vec);
+ assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+ drop(small_vec);
+
+ let vec = vec![1, 2, 3, 4, 5];
+ let small_vec: SmallVec<[u8; 1]> = SmallVec::from_vec(vec);
+ assert_eq!(&*small_vec, &[1, 2, 3, 4, 5]);
+ drop(small_vec);
+}
+
+#[test]
+fn test_retain() {
+ // Test inline data storate
+ let mut sv: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 2, 3, 3, 4]);
+ sv.retain(|&mut i| i != 3);
+ assert_eq!(sv.pop(), Some(4));
+ assert_eq!(sv.pop(), Some(2));
+ assert_eq!(sv.pop(), Some(1));
+ assert_eq!(sv.pop(), None);
+
+ // Test spilled data storage
+ let mut sv: SmallVec<[i32; 3]> = SmallVec::from_slice(&[1, 2, 3, 3, 4]);
+ sv.retain(|&mut i| i != 3);
+ assert_eq!(sv.pop(), Some(4));
+ assert_eq!(sv.pop(), Some(2));
+ assert_eq!(sv.pop(), Some(1));
+ assert_eq!(sv.pop(), None);
+
+ // Test that drop implementations are called for inline.
+ let one = Rc::new(1);
+ let mut sv: SmallVec<[Rc<i32>; 3]> = SmallVec::new();
+ sv.push(Rc::clone(&one));
+ assert_eq!(Rc::strong_count(&one), 2);
+ sv.retain(|_| false);
+ assert_eq!(Rc::strong_count(&one), 1);
+
+ // Test that drop implementations are called for spilled data.
+ let mut sv: SmallVec<[Rc<i32>; 1]> = SmallVec::new();
+ sv.push(Rc::clone(&one));
+ sv.push(Rc::new(2));
+ assert_eq!(Rc::strong_count(&one), 2);
+ sv.retain(|_| false);
+ assert_eq!(Rc::strong_count(&one), 1);
+}
+
+#[test]
+fn test_dedup() {
+ let mut dupes: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 1, 2, 3, 3]);
+ dupes.dedup();
+ assert_eq!(&*dupes, &[1, 2, 3]);
+
+ let mut empty: SmallVec<[i32; 5]> = SmallVec::new();
+ empty.dedup();
+ assert!(empty.is_empty());
+
+ let mut all_ones: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 1, 1, 1, 1]);
+ all_ones.dedup();
+ assert_eq!(all_ones.len(), 1);
+
+ let mut no_dupes: SmallVec<[i32; 5]> = SmallVec::from_slice(&[1, 2, 3, 4, 5]);
+ no_dupes.dedup();
+ assert_eq!(no_dupes.len(), 5);
+}
+
+#[test]
+fn test_resize() {
+ let mut v: SmallVec<[i32; 8]> = SmallVec::new();
+ v.push(1);
+ v.resize(5, 0);
+ assert_eq!(v[..], [1, 0, 0, 0, 0][..]);
+
+ v.resize(2, -1);
+ assert_eq!(v[..], [1, 0][..]);
+}
+
+#[cfg(feature = "write")]
+#[test]
+fn test_write() {
+ use std::io::Write;
+
+ let data = [1, 2, 3, 4, 5];
+
+ let mut small_vec: SmallVec<[u8; 2]> = SmallVec::new();
+ let len = small_vec.write(&data[..]).unwrap();
+ assert_eq!(len, 5);
+ assert_eq!(small_vec.as_ref(), data.as_ref());
+
+ let mut small_vec: SmallVec<[u8; 2]> = SmallVec::new();
+ small_vec.write_all(&data[..]).unwrap();
+ assert_eq!(small_vec.as_ref(), data.as_ref());
+}
+
+#[cfg(feature = "serde")]
+extern crate bincode;
+
+#[cfg(feature = "serde")]
+#[test]
+fn test_serde() {
+ use self::bincode::{config, deserialize};
+ let mut small_vec: SmallVec<[i32; 2]> = SmallVec::new();
+ small_vec.push(1);
+ let encoded = config().limit(100).serialize(&small_vec).unwrap();
+ let decoded: SmallVec<[i32; 2]> = deserialize(&encoded).unwrap();
+ assert_eq!(small_vec, decoded);
+ small_vec.push(2);
+ // Spill the vec
+ small_vec.push(3);
+ small_vec.push(4);
+ // Check again after spilling.
+ let encoded = config().limit(100).serialize(&small_vec).unwrap();
+ let decoded: SmallVec<[i32; 2]> = deserialize(&encoded).unwrap();
+ assert_eq!(small_vec, decoded);
+}
+
+#[test]
+fn grow_to_shrink() {
+ let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+ v.push(1);
+ v.push(2);
+ v.push(3);
+ assert!(v.spilled());
+ v.clear();
+ // Shrink to inline.
+ v.grow(2);
+ assert!(!v.spilled());
+ assert_eq!(v.capacity(), 2);
+ assert_eq!(v.len(), 0);
+ v.push(4);
+ assert_eq!(v[..], [4]);
+}
+
+#[test]
+fn resumable_extend() {
+ let s = "a b c";
+ // This iterator yields: (Some('a'), None, Some('b'), None, Some('c')), None
+ let it = s
+ .chars()
+ .scan(0, |_, ch| if ch.is_whitespace() { None } else { Some(ch) });
+ let mut v: SmallVec<[char; 4]> = SmallVec::new();
+ v.extend(it);
+ assert_eq!(v[..], ['a']);
+}
+
+// #139
+#[test]
+fn uninhabited() {
+ enum Void {}
+ let _sv = SmallVec::<[Void; 8]>::new();
+}
+
+#[test]
+fn grow_spilled_same_size() {
+ let mut v: SmallVec<[u8; 2]> = SmallVec::new();
+ v.push(0);
+ v.push(1);
+ v.push(2);
+ assert!(v.spilled());
+ assert_eq!(v.capacity(), 4);
+ // grow with the same capacity
+ v.grow(4);
+ assert_eq!(v.capacity(), 4);
+ assert_eq!(v[..], [0, 1, 2]);
+}
+
+#[cfg(feature = "const_generics")]
+#[test]
+fn const_generics() {
+ let _v = SmallVec::<[i32; 987]>::default();
+}
+
+#[cfg(feature = "const_new")]
+#[test]
+fn const_new() {
+ let v = const_new_inner();
+ assert_eq!(v.capacity(), 4);
+ assert_eq!(v.len(), 0);
+ let v = const_new_inline_sized();
+ assert_eq!(v.capacity(), 4);
+ assert_eq!(v.len(), 4);
+ assert_eq!(v[0], 1);
+ let v = const_new_inline_args();
+ assert_eq!(v.capacity(), 2);
+ assert_eq!(v.len(), 2);
+ assert_eq!(v[0], 1);
+ assert_eq!(v[1], 4);
+}
+#[cfg(feature = "const_new")]
+const fn const_new_inner() -> SmallVec<[i32; 4]> {
+ SmallVec::<[i32; 4]>::new_const()
+}
+#[cfg(feature = "const_new")]
+const fn const_new_inline_sized() -> SmallVec<[i32; 4]> {
+ crate::smallvec_inline![1; 4]
+}
+#[cfg(feature = "const_new")]
+const fn const_new_inline_args() -> SmallVec<[i32; 2]> {
+ crate::smallvec_inline![1, 4]
+}
+
+#[test]
+fn empty_macro() {
+ let _v: SmallVec<[u8; 1]> = smallvec![];
+}
+
+#[test]
+fn zero_size_items() {
+ SmallVec::<[(); 0]>::new().push(());
+}
+
+#[test]
+fn test_insert_many_overflow() {
+ let mut v: SmallVec<[u8; 1]> = SmallVec::new();
+ v.push(123);
+
+ // Prepare an iterator with small lower bound
+ let iter = (0u8..5).filter(|n| n % 2 == 0);
+ assert_eq!(iter.size_hint().0, 0);
+
+ v.insert_many(0, iter);
+ assert_eq!(&*v, &[0, 2, 4, 123]);
+}
+
+#[test]
+fn test_clone_from() {
+ let mut a: SmallVec<[u8; 2]> = SmallVec::new();
+ a.push(1);
+ a.push(2);
+ a.push(3);
+
+ let mut b: SmallVec<[u8; 2]> = SmallVec::new();
+ b.push(10);
+
+ let mut c: SmallVec<[u8; 2]> = SmallVec::new();
+ c.push(20);
+ c.push(21);
+ c.push(22);
+
+ a.clone_from(&b);
+ assert_eq!(&*a, &[10]);
+
+ b.clone_from(&c);
+ assert_eq!(&*b, &[20, 21, 22]);
+}
+
+#[test]
+fn test_size() {
+ use core::mem::size_of;
+ assert_eq!(24, size_of::<SmallVec<[u8; 8]>>());
+}
+
+#[cfg(feature = "drain_filter")]
+#[test]
+fn drain_filter() {
+ let mut a: SmallVec<[u8; 2]> = smallvec![1u8, 2, 3, 4, 5, 6, 7, 8];
+
+ let b: SmallVec<[u8; 2]> = a.drain_filter(|x| *x % 3 == 0).collect();
+
+ assert_eq!(a, SmallVec::<[u8; 2]>::from_slice(&[1u8, 2, 4, 5, 7, 8]));
+ assert_eq!(b, SmallVec::<[u8; 2]>::from_slice(&[3u8, 6]));
+}
+
+#[cfg(feature = "drain_keep_rest")]
+#[test]
+fn drain_keep_rest() {
+ let mut a: SmallVec<[i32; 3]> = smallvec![1i32, 2, 3, 4, 5, 6, 7, 8];
+ let mut df = a.drain_filter(|x| *x % 2 == 0);
+
+ assert_eq!(df.next().unwrap(), 2);
+ assert_eq!(df.next().unwrap(), 4);
+
+ df.keep_rest();
+
+ assert_eq!(a, SmallVec::<[i32; 3]>::from_slice(&[1i32, 3, 5, 6, 7, 8]));
+}
diff --git a/third_party/rust/smallvec/tests/debugger_visualizer.rs b/third_party/rust/smallvec/tests/debugger_visualizer.rs
new file mode 100644
index 0000000000..b39aa9d981
--- /dev/null
+++ b/third_party/rust/smallvec/tests/debugger_visualizer.rs
@@ -0,0 +1,68 @@
+use debugger_test::debugger_test;
+use smallvec::{smallvec, SmallVec};
+
+#[inline(never)]
+fn __break() {}
+
+#[debugger_test(
+ debugger = "cdb",
+ commands = r#"
+.nvlist
+dx sv
+
+g
+
+dx sv
+
+g
+
+dx sv
+"#,
+ expected_statements = r#"
+sv : { len=0x2 is_inline=true } [Type: smallvec::SmallVec<array$<i32,4> >]
+ [<Raw View>] [Type: smallvec::SmallVec<array$<i32,4> >]
+ [capacity] : 4
+ [len] : 0x2 [Type: unsigned __int64]
+ [0] : 1 [Type: int]
+ [1] : 2 [Type: int]
+
+sv : { len=0x5 is_inline=false } [Type: smallvec::SmallVec<array$<i32,4> >]
+ [<Raw View>] [Type: smallvec::SmallVec<array$<i32,4> >]
+ [capacity] : 0x8 [Type: unsigned __int64]
+ [len] : 0x5 [Type: unsigned __int64]
+ [0] : 5 [Type: int]
+ [1] : 2 [Type: int]
+ [2] : 3 [Type: int]
+ [3] : 4 [Type: int]
+ [4] : 5 [Type: int]
+
+sv : { len=0x5 is_inline=false } [Type: smallvec::SmallVec<array$<i32,4> >]
+ [<Raw View>] [Type: smallvec::SmallVec<array$<i32,4> >]
+ [capacity] : 0x8 [Type: unsigned __int64]
+ [len] : 0x5 [Type: unsigned __int64]
+ [0] : 2 [Type: int]
+ [1] : 3 [Type: int]
+ [2] : 4 [Type: int]
+ [3] : 5 [Type: int]
+ [4] : 5 [Type: int]
+"#
+)]
+#[inline(never)]
+fn test_debugger_visualizer() {
+ // This SmallVec can hold up to 4 items on the stack:
+ let mut sv: SmallVec<[i32; 4]> = smallvec![1, 2];
+ __break();
+
+ // Overfill the SmallVec to move its contents to the heap
+ for i in 3..6 {
+ sv.push(i);
+ }
+
+ // Update the contents of the first value of the SmallVec.
+ sv[0] = sv[1] + sv[2];
+ __break();
+
+ // Sort the SmallVec in place.
+ sv.sort();
+ __break();
+}
diff --git a/third_party/rust/smallvec/tests/macro.rs b/third_party/rust/smallvec/tests/macro.rs
new file mode 100644
index 0000000000..fa52e79b9e
--- /dev/null
+++ b/third_party/rust/smallvec/tests/macro.rs
@@ -0,0 +1,24 @@
+/// This file tests `smallvec!` without actually having the macro in scope.
+/// This forces any recursion to use a `$crate` prefix to reliably find itself.
+
+#[test]
+fn smallvec() {
+ let mut vec: smallvec::SmallVec<[i32; 2]>;
+
+ macro_rules! check {
+ ($init:tt) => {
+ vec = smallvec::smallvec! $init;
+ assert_eq!(*vec, *vec! $init);
+ }
+ }
+
+ check!([0; 0]);
+ check!([1; 1]);
+ check!([2; 2]);
+ check!([3; 3]);
+
+ check!([]);
+ check!([1]);
+ check!([1, 2]);
+ check!([1, 2, 3]);
+}