summaryrefslogtreecommitdiffstats
path: root/library/alloc
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-17 12:02:58 +0000
commit698f8c2f01ea549d77d7dc3338a12e04c11057b9 (patch)
tree173a775858bd501c378080a10dca74132f05bc50 /library/alloc
parentInitial commit. (diff)
downloadrustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.tar.xz
rustc-698f8c2f01ea549d77d7dc3338a12e04c11057b9.zip
Adding upstream version 1.64.0+dfsg1.upstream/1.64.0+dfsg1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'library/alloc')
-rw-r--r--library/alloc/Cargo.toml37
-rw-r--r--library/alloc/benches/binary_heap.rs91
-rw-r--r--library/alloc/benches/btree/map.rs585
-rw-r--r--library/alloc/benches/btree/mod.rs2
-rw-r--r--library/alloc/benches/btree/set.rs224
-rw-r--r--library/alloc/benches/lib.rs28
-rw-r--r--library/alloc/benches/linked_list.rs77
-rw-r--r--library/alloc/benches/slice.rs379
-rw-r--r--library/alloc/benches/str.rs299
-rw-r--r--library/alloc/benches/string.rs164
-rw-r--r--library/alloc/benches/vec.rs784
-rw-r--r--library/alloc/benches/vec_deque.rs125
-rw-r--r--library/alloc/benches/vec_deque_append.rs34
-rw-r--r--library/alloc/src/alloc.rs444
-rw-r--r--library/alloc/src/alloc/tests.rs30
-rw-r--r--library/alloc/src/borrow.rs495
-rw-r--r--library/alloc/src/boxed.rs2087
-rw-r--r--library/alloc/src/boxed/thin.rs273
-rw-r--r--library/alloc/src/collections/binary_heap.rs1720
-rw-r--r--library/alloc/src/collections/binary_heap/tests.rs489
-rw-r--r--library/alloc/src/collections/btree/append.rs107
-rw-r--r--library/alloc/src/collections/btree/borrow.rs47
-rw-r--r--library/alloc/src/collections/btree/borrow/tests.rs19
-rw-r--r--library/alloc/src/collections/btree/dedup_sorted_iter.rs47
-rw-r--r--library/alloc/src/collections/btree/fix.rs179
-rw-r--r--library/alloc/src/collections/btree/map.rs2423
-rw-r--r--library/alloc/src/collections/btree/map/entry.rs555
-rw-r--r--library/alloc/src/collections/btree/map/tests.rs2338
-rw-r--r--library/alloc/src/collections/btree/mem.rs35
-rw-r--r--library/alloc/src/collections/btree/merge_iter.rs98
-rw-r--r--library/alloc/src/collections/btree/mod.rs26
-rw-r--r--library/alloc/src/collections/btree/navigate.rs719
-rw-r--r--library/alloc/src/collections/btree/node.rs1753
-rw-r--r--library/alloc/src/collections/btree/node/tests.rs102
-rw-r--r--library/alloc/src/collections/btree/remove.rs95
-rw-r--r--library/alloc/src/collections/btree/search.rs285
-rw-r--r--library/alloc/src/collections/btree/set.rs1789
-rw-r--r--library/alloc/src/collections/btree/set/tests.rs856
-rw-r--r--library/alloc/src/collections/btree/set_val.rs29
-rw-r--r--library/alloc/src/collections/btree/split.rs73
-rw-r--r--library/alloc/src/collections/btree/testing/crash_test.rs119
-rw-r--r--library/alloc/src/collections/btree/testing/mod.rs3
-rw-r--r--library/alloc/src/collections/btree/testing/ord_chaos.rs81
-rw-r--r--library/alloc/src/collections/btree/testing/rng.rs28
-rw-r--r--library/alloc/src/collections/linked_list.rs2012
-rw-r--r--library/alloc/src/collections/linked_list/tests.rs1156
-rw-r--r--library/alloc/src/collections/mod.rs154
-rw-r--r--library/alloc/src/collections/vec_deque/drain.rs142
-rw-r--r--library/alloc/src/collections/vec_deque/into_iter.rs72
-rw-r--r--library/alloc/src/collections/vec_deque/iter.rs219
-rw-r--r--library/alloc/src/collections/vec_deque/iter_mut.rs162
-rw-r--r--library/alloc/src/collections/vec_deque/macros.rs19
-rw-r--r--library/alloc/src/collections/vec_deque/mod.rs3137
-rw-r--r--library/alloc/src/collections/vec_deque/pair_slices.rs67
-rw-r--r--library/alloc/src/collections/vec_deque/ring_slices.rs56
-rw-r--r--library/alloc/src/collections/vec_deque/spec_extend.rs132
-rw-r--r--library/alloc/src/collections/vec_deque/tests.rs1110
-rw-r--r--library/alloc/src/ffi/c_str.rs1123
-rw-r--r--library/alloc/src/ffi/c_str/tests.rs228
-rw-r--r--library/alloc/src/ffi/mod.rs88
-rw-r--r--library/alloc/src/fmt.rs617
-rw-r--r--library/alloc/src/lib.rs240
-rw-r--r--library/alloc/src/macros.rs129
-rw-r--r--library/alloc/src/raw_vec.rs519
-rw-r--r--library/alloc/src/raw_vec/tests.rs163
-rw-r--r--library/alloc/src/rc.rs2700
-rw-r--r--library/alloc/src/rc/tests.rs561
-rw-r--r--library/alloc/src/slice.rs1204
-rw-r--r--library/alloc/src/str.rs686
-rw-r--r--library/alloc/src/string.rs2951
-rw-r--r--library/alloc/src/sync.rs2765
-rw-r--r--library/alloc/src/sync/tests.rs620
-rw-r--r--library/alloc/src/task.rs147
-rw-r--r--library/alloc/src/tests.rs151
-rw-r--r--library/alloc/src/vec/cow.rs53
-rw-r--r--library/alloc/src/vec/drain.rs184
-rw-r--r--library/alloc/src/vec/drain_filter.rs143
-rw-r--r--library/alloc/src/vec/in_place_collect.rs302
-rw-r--r--library/alloc/src/vec/in_place_drop.rs24
-rw-r--r--library/alloc/src/vec/into_iter.rs401
-rw-r--r--library/alloc/src/vec/is_zero.rs146
-rw-r--r--library/alloc/src/vec/mod.rs3157
-rw-r--r--library/alloc/src/vec/partial_eq.rs47
-rw-r--r--library/alloc/src/vec/set_len_on_drop.rs28
-rw-r--r--library/alloc/src/vec/spec_extend.rs87
-rw-r--r--library/alloc/src/vec/spec_from_elem.rs61
-rw-r--r--library/alloc/src/vec/spec_from_iter.rs64
-rw-r--r--library/alloc/src/vec/spec_from_iter_nested.rs65
-rw-r--r--library/alloc/src/vec/splice.rs133
-rw-r--r--library/alloc/tests/arc.rs212
-rw-r--r--library/alloc/tests/borrow.rs60
-rw-r--r--library/alloc/tests/boxed.rs167
-rw-r--r--library/alloc/tests/btree_set_hash.rs29
-rw-r--r--library/alloc/tests/c_str.rs19
-rw-r--r--library/alloc/tests/const_fns.rs35
-rw-r--r--library/alloc/tests/cow_str.rs144
-rw-r--r--library/alloc/tests/fmt.rs323
-rw-r--r--library/alloc/tests/heap.rs44
-rw-r--r--library/alloc/tests/lib.rs88
-rw-r--r--library/alloc/tests/linked_list.rs21
-rw-r--r--library/alloc/tests/rc.rs208
-rw-r--r--library/alloc/tests/slice.rs2018
-rw-r--r--library/alloc/tests/str.rs2383
-rw-r--r--library/alloc/tests/string.rs876
-rw-r--r--library/alloc/tests/thin_box.rs262
-rw-r--r--library/alloc/tests/vec.rs2436
-rw-r--r--library/alloc/tests/vec_deque.rs1786
107 files changed, 59509 insertions, 0 deletions
diff --git a/library/alloc/Cargo.toml b/library/alloc/Cargo.toml
new file mode 100644
index 000000000..265020209
--- /dev/null
+++ b/library/alloc/Cargo.toml
@@ -0,0 +1,37 @@
+[package]
+name = "alloc"
+version = "0.0.0"
+license = "MIT OR Apache-2.0"
+repository = "https://github.com/rust-lang/rust.git"
+description = "The Rust core allocation and collections library"
+autotests = false
+autobenches = false
+edition = "2021"
+
+[dependencies]
+core = { path = "../core" }
+compiler_builtins = { version = "0.1.40", features = ['rustc-dep-of-std'] }
+
+[dev-dependencies]
+rand = "0.7"
+rand_xorshift = "0.2"
+
+[[test]]
+name = "collectionstests"
+path = "tests/lib.rs"
+
+[[bench]]
+name = "collectionsbenches"
+path = "benches/lib.rs"
+test = true
+
+[[bench]]
+name = "vec_deque_append_bench"
+path = "benches/vec_deque_append.rs"
+harness = false
+
+[features]
+compiler-builtins-mem = ['compiler_builtins/mem']
+compiler-builtins-c = ["compiler_builtins/c"]
+compiler-builtins-no-asm = ["compiler_builtins/no-asm"]
+compiler-builtins-mangled-names = ["compiler_builtins/mangled-names"]
diff --git a/library/alloc/benches/binary_heap.rs b/library/alloc/benches/binary_heap.rs
new file mode 100644
index 000000000..917e71f25
--- /dev/null
+++ b/library/alloc/benches/binary_heap.rs
@@ -0,0 +1,91 @@
+use std::collections::BinaryHeap;
+
+use rand::seq::SliceRandom;
+use test::{black_box, Bencher};
+
+#[bench]
+fn bench_find_smallest_1000(b: &mut Bencher) {
+ let mut rng = crate::bench_rng();
+ let mut vec: Vec<u32> = (0..100_000).collect();
+ vec.shuffle(&mut rng);
+
+ b.iter(|| {
+ let mut iter = vec.iter().copied();
+ let mut heap: BinaryHeap<_> = iter.by_ref().take(1000).collect();
+
+ for x in iter {
+ let mut max = heap.peek_mut().unwrap();
+ // This comparison should be true only 1% of the time.
+ // Unnecessary `sift_down`s will degrade performance
+ if x < *max {
+ *max = x;
+ }
+ }
+
+ heap
+ })
+}
+
+#[bench]
+fn bench_peek_mut_deref_mut(b: &mut Bencher) {
+ let mut bheap = BinaryHeap::from(vec![42]);
+ let vec: Vec<u32> = (0..1_000_000).collect();
+
+ b.iter(|| {
+ let vec = black_box(&vec);
+ let mut peek_mut = bheap.peek_mut().unwrap();
+ // The compiler shouldn't be able to optimize away the `sift_down`
+ // assignment in `PeekMut`'s `DerefMut` implementation since
+ // the loop might not run.
+ for &i in vec.iter() {
+ *peek_mut = i;
+ }
+ // Remove the already minimal overhead of the sift_down
+ std::mem::forget(peek_mut);
+ })
+}
+
+#[bench]
+fn bench_from_vec(b: &mut Bencher) {
+ let mut rng = crate::bench_rng();
+ let mut vec: Vec<u32> = (0..100_000).collect();
+ vec.shuffle(&mut rng);
+
+ b.iter(|| BinaryHeap::from(vec.clone()))
+}
+
+#[bench]
+fn bench_into_sorted_vec(b: &mut Bencher) {
+ let bheap: BinaryHeap<i32> = (0..10_000).collect();
+
+ b.iter(|| bheap.clone().into_sorted_vec())
+}
+
+#[bench]
+fn bench_push(b: &mut Bencher) {
+ let mut bheap = BinaryHeap::with_capacity(50_000);
+ let mut rng = crate::bench_rng();
+ let mut vec: Vec<u32> = (0..50_000).collect();
+ vec.shuffle(&mut rng);
+
+ b.iter(|| {
+ for &i in vec.iter() {
+ bheap.push(i);
+ }
+ black_box(&mut bheap);
+ bheap.clear();
+ })
+}
+
+#[bench]
+fn bench_pop(b: &mut Bencher) {
+ let mut bheap = BinaryHeap::with_capacity(10_000);
+
+ b.iter(|| {
+ bheap.extend((0..10_000).rev());
+ black_box(&mut bheap);
+ while let Some(elem) = bheap.pop() {
+ black_box(elem);
+ }
+ })
+}
diff --git a/library/alloc/benches/btree/map.rs b/library/alloc/benches/btree/map.rs
new file mode 100644
index 000000000..1f6b87fb0
--- /dev/null
+++ b/library/alloc/benches/btree/map.rs
@@ -0,0 +1,585 @@
+use std::collections::BTreeMap;
+use std::iter::Iterator;
+use std::ops::RangeBounds;
+use std::vec::Vec;
+
+use rand::{seq::SliceRandom, Rng};
+use test::{black_box, Bencher};
+
+macro_rules! map_insert_rand_bench {
+ ($name: ident, $n: expr, $map: ident) => {
+ #[bench]
+ pub fn $name(b: &mut Bencher) {
+ let n: usize = $n;
+ let mut map = $map::new();
+ // setup
+ let mut rng = crate::bench_rng();
+
+ for _ in 0..n {
+ let i = rng.gen::<usize>() % n;
+ map.insert(i, i);
+ }
+
+ // measure
+ b.iter(|| {
+ let k = rng.gen::<usize>() % n;
+ map.insert(k, k);
+ map.remove(&k);
+ });
+ black_box(map);
+ }
+ };
+}
+
+macro_rules! map_insert_seq_bench {
+ ($name: ident, $n: expr, $map: ident) => {
+ #[bench]
+ pub fn $name(b: &mut Bencher) {
+ let mut map = $map::new();
+ let n: usize = $n;
+ // setup
+ for i in 0..n {
+ map.insert(i * 2, i * 2);
+ }
+
+ // measure
+ let mut i = 1;
+ b.iter(|| {
+ map.insert(i, i);
+ map.remove(&i);
+ i = (i + 2) % n;
+ });
+ black_box(map);
+ }
+ };
+}
+
+macro_rules! map_from_iter_rand_bench {
+ ($name: ident, $n: expr, $map: ident) => {
+ #[bench]
+ pub fn $name(b: &mut Bencher) {
+ let n: usize = $n;
+ // setup
+ let mut rng = crate::bench_rng();
+ let mut vec = Vec::with_capacity(n);
+
+ for _ in 0..n {
+ let i = rng.gen::<usize>() % n;
+ vec.push((i, i));
+ }
+
+ // measure
+ b.iter(|| {
+ let map: $map<_, _> = vec.iter().copied().collect();
+ black_box(map);
+ });
+ }
+ };
+}
+
+macro_rules! map_from_iter_seq_bench {
+ ($name: ident, $n: expr, $map: ident) => {
+ #[bench]
+ pub fn $name(b: &mut Bencher) {
+ let n: usize = $n;
+ // setup
+ let mut vec = Vec::with_capacity(n);
+
+ for i in 0..n {
+ vec.push((i, i));
+ }
+
+ // measure
+ b.iter(|| {
+ let map: $map<_, _> = vec.iter().copied().collect();
+ black_box(map);
+ });
+ }
+ };
+}
+
+macro_rules! map_find_rand_bench {
+ ($name: ident, $n: expr, $map: ident) => {
+ #[bench]
+ pub fn $name(b: &mut Bencher) {
+ let mut map = $map::new();
+ let n: usize = $n;
+
+ // setup
+ let mut rng = crate::bench_rng();
+ let mut keys: Vec<_> = (0..n).map(|_| rng.gen::<usize>() % n).collect();
+
+ for &k in &keys {
+ map.insert(k, k);
+ }
+
+ keys.shuffle(&mut rng);
+
+ // measure
+ let mut i = 0;
+ b.iter(|| {
+ let t = map.get(&keys[i]);
+ i = (i + 1) % n;
+ black_box(t);
+ })
+ }
+ };
+}
+
+macro_rules! map_find_seq_bench {
+ ($name: ident, $n: expr, $map: ident) => {
+ #[bench]
+ pub fn $name(b: &mut Bencher) {
+ let mut map = $map::new();
+ let n: usize = $n;
+
+ // setup
+ for i in 0..n {
+ map.insert(i, i);
+ }
+
+ // measure
+ let mut i = 0;
+ b.iter(|| {
+ let x = map.get(&i);
+ i = (i + 1) % n;
+ black_box(x);
+ })
+ }
+ };
+}
+
+map_insert_rand_bench! {insert_rand_100, 100, BTreeMap}
+map_insert_rand_bench! {insert_rand_10_000, 10_000, BTreeMap}
+
+map_insert_seq_bench! {insert_seq_100, 100, BTreeMap}
+map_insert_seq_bench! {insert_seq_10_000, 10_000, BTreeMap}
+
+map_from_iter_rand_bench! {from_iter_rand_100, 100, BTreeMap}
+map_from_iter_rand_bench! {from_iter_rand_10_000, 10_000, BTreeMap}
+
+map_from_iter_seq_bench! {from_iter_seq_100, 100, BTreeMap}
+map_from_iter_seq_bench! {from_iter_seq_10_000, 10_000, BTreeMap}
+
+map_find_rand_bench! {find_rand_100, 100, BTreeMap}
+map_find_rand_bench! {find_rand_10_000, 10_000, BTreeMap}
+
+map_find_seq_bench! {find_seq_100, 100, BTreeMap}
+map_find_seq_bench! {find_seq_10_000, 10_000, BTreeMap}
+
+fn bench_iteration(b: &mut Bencher, size: i32) {
+ let mut map = BTreeMap::<i32, i32>::new();
+ let mut rng = crate::bench_rng();
+
+ for _ in 0..size {
+ map.insert(rng.gen(), rng.gen());
+ }
+
+ b.iter(|| {
+ for entry in &map {
+ black_box(entry);
+ }
+ });
+}
+
+#[bench]
+pub fn iteration_20(b: &mut Bencher) {
+ bench_iteration(b, 20);
+}
+
+#[bench]
+pub fn iteration_1000(b: &mut Bencher) {
+ bench_iteration(b, 1000);
+}
+
+#[bench]
+pub fn iteration_100000(b: &mut Bencher) {
+ bench_iteration(b, 100000);
+}
+
+fn bench_iteration_mut(b: &mut Bencher, size: i32) {
+ let mut map = BTreeMap::<i32, i32>::new();
+ let mut rng = crate::bench_rng();
+
+ for _ in 0..size {
+ map.insert(rng.gen(), rng.gen());
+ }
+
+ b.iter(|| {
+ for kv in map.iter_mut() {
+ black_box(kv);
+ }
+ });
+}
+
+#[bench]
+pub fn iteration_mut_20(b: &mut Bencher) {
+ bench_iteration_mut(b, 20);
+}
+
+#[bench]
+pub fn iteration_mut_1000(b: &mut Bencher) {
+ bench_iteration_mut(b, 1000);
+}
+
+#[bench]
+pub fn iteration_mut_100000(b: &mut Bencher) {
+ bench_iteration_mut(b, 100000);
+}
+
+fn bench_first_and_last_nightly(b: &mut Bencher, size: i32) {
+ let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
+ b.iter(|| {
+ for _ in 0..10 {
+ black_box(map.first_key_value());
+ black_box(map.last_key_value());
+ }
+ });
+}
+
+fn bench_first_and_last_stable(b: &mut Bencher, size: i32) {
+ let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
+ b.iter(|| {
+ for _ in 0..10 {
+ black_box(map.iter().next());
+ black_box(map.iter().next_back());
+ }
+ });
+}
+
+#[bench]
+pub fn first_and_last_0_nightly(b: &mut Bencher) {
+ bench_first_and_last_nightly(b, 0);
+}
+
+#[bench]
+pub fn first_and_last_0_stable(b: &mut Bencher) {
+ bench_first_and_last_stable(b, 0);
+}
+
+#[bench]
+pub fn first_and_last_100_nightly(b: &mut Bencher) {
+ bench_first_and_last_nightly(b, 100);
+}
+
+#[bench]
+pub fn first_and_last_100_stable(b: &mut Bencher) {
+ bench_first_and_last_stable(b, 100);
+}
+
+#[bench]
+pub fn first_and_last_10k_nightly(b: &mut Bencher) {
+ bench_first_and_last_nightly(b, 10_000);
+}
+
+#[bench]
+pub fn first_and_last_10k_stable(b: &mut Bencher) {
+ bench_first_and_last_stable(b, 10_000);
+}
+
+const BENCH_RANGE_SIZE: i32 = 145;
+const BENCH_RANGE_COUNT: i32 = BENCH_RANGE_SIZE * (BENCH_RANGE_SIZE - 1) / 2;
+
+fn bench_range<F, R>(b: &mut Bencher, f: F)
+where
+ F: Fn(i32, i32) -> R,
+ R: RangeBounds<i32>,
+{
+ let map: BTreeMap<_, _> = (0..BENCH_RANGE_SIZE).map(|i| (i, i)).collect();
+ b.iter(|| {
+ let mut c = 0;
+ for i in 0..BENCH_RANGE_SIZE {
+ for j in i + 1..BENCH_RANGE_SIZE {
+ let _ = black_box(map.range(f(i, j)));
+ c += 1;
+ }
+ }
+ debug_assert_eq!(c, BENCH_RANGE_COUNT);
+ });
+}
+
+#[bench]
+pub fn range_included_excluded(b: &mut Bencher) {
+ bench_range(b, |i, j| i..j);
+}
+
+#[bench]
+pub fn range_included_included(b: &mut Bencher) {
+ bench_range(b, |i, j| i..=j);
+}
+
+#[bench]
+pub fn range_included_unbounded(b: &mut Bencher) {
+ bench_range(b, |i, _| i..);
+}
+
+#[bench]
+pub fn range_unbounded_unbounded(b: &mut Bencher) {
+ bench_range(b, |_, _| ..);
+}
+
+fn bench_iter(b: &mut Bencher, repeats: i32, size: i32) {
+ let map: BTreeMap<_, _> = (0..size).map(|i| (i, i)).collect();
+ b.iter(|| {
+ for _ in 0..repeats {
+ let _ = black_box(map.iter());
+ }
+ });
+}
+
+/// Contrast range_unbounded_unbounded with `iter()`.
+#[bench]
+pub fn range_unbounded_vs_iter(b: &mut Bencher) {
+ bench_iter(b, BENCH_RANGE_COUNT, BENCH_RANGE_SIZE);
+}
+
+#[bench]
+pub fn iter_0(b: &mut Bencher) {
+ bench_iter(b, 1_000, 0);
+}
+
+#[bench]
+pub fn iter_1(b: &mut Bencher) {
+ bench_iter(b, 1_000, 1);
+}
+
+#[bench]
+pub fn iter_100(b: &mut Bencher) {
+ bench_iter(b, 1_000, 100);
+}
+
+#[bench]
+pub fn iter_10k(b: &mut Bencher) {
+ bench_iter(b, 1_000, 10_000);
+}
+
+#[bench]
+pub fn iter_1m(b: &mut Bencher) {
+ bench_iter(b, 1_000, 1_000_000);
+}
+
+const FAT: usize = 256;
+
+// The returned map has small keys and values.
+// Benchmarks on it have a counterpart in set.rs with the same keys and no values at all.
+fn slim_map(n: usize) -> BTreeMap<usize, usize> {
+ (0..n).map(|i| (i, i)).collect::<BTreeMap<_, _>>()
+}
+
+// The returned map has small keys and large values.
+fn fat_val_map(n: usize) -> BTreeMap<usize, [usize; FAT]> {
+ (0..n).map(|i| (i, [i; FAT])).collect::<BTreeMap<_, _>>()
+}
+
+#[bench]
+pub fn clone_slim_100(b: &mut Bencher) {
+ let src = slim_map(100);
+ b.iter(|| src.clone())
+}
+
+#[bench]
+pub fn clone_slim_100_and_clear(b: &mut Bencher) {
+ let src = slim_map(100);
+ b.iter(|| src.clone().clear())
+}
+
+#[bench]
+pub fn clone_slim_100_and_drain_all(b: &mut Bencher) {
+ let src = slim_map(100);
+ b.iter(|| src.clone().drain_filter(|_, _| true).count())
+}
+
+#[bench]
+pub fn clone_slim_100_and_drain_half(b: &mut Bencher) {
+ let src = slim_map(100);
+ b.iter(|| {
+ let mut map = src.clone();
+ assert_eq!(map.drain_filter(|i, _| i % 2 == 0).count(), 100 / 2);
+ assert_eq!(map.len(), 100 / 2);
+ })
+}
+
+#[bench]
+pub fn clone_slim_100_and_into_iter(b: &mut Bencher) {
+ let src = slim_map(100);
+ b.iter(|| src.clone().into_iter().count())
+}
+
+#[bench]
+pub fn clone_slim_100_and_pop_all(b: &mut Bencher) {
+ let src = slim_map(100);
+ b.iter(|| {
+ let mut map = src.clone();
+ while map.pop_first().is_some() {}
+ map
+ });
+}
+
+#[bench]
+pub fn clone_slim_100_and_remove_all(b: &mut Bencher) {
+ let src = slim_map(100);
+ b.iter(|| {
+ let mut map = src.clone();
+ while let Some(elt) = map.iter().map(|(&i, _)| i).next() {
+ let v = map.remove(&elt);
+ debug_assert!(v.is_some());
+ }
+ map
+ });
+}
+
+#[bench]
+pub fn clone_slim_100_and_remove_half(b: &mut Bencher) {
+ let src = slim_map(100);
+ b.iter(|| {
+ let mut map = src.clone();
+ for i in (0..100).step_by(2) {
+ let v = map.remove(&i);
+ debug_assert!(v.is_some());
+ }
+ assert_eq!(map.len(), 100 / 2);
+ map
+ })
+}
+
+#[bench]
+pub fn clone_slim_10k(b: &mut Bencher) {
+ let src = slim_map(10_000);
+ b.iter(|| src.clone())
+}
+
+#[bench]
+pub fn clone_slim_10k_and_clear(b: &mut Bencher) {
+ let src = slim_map(10_000);
+ b.iter(|| src.clone().clear())
+}
+
+#[bench]
+pub fn clone_slim_10k_and_drain_all(b: &mut Bencher) {
+ let src = slim_map(10_000);
+ b.iter(|| src.clone().drain_filter(|_, _| true).count())
+}
+
+#[bench]
+pub fn clone_slim_10k_and_drain_half(b: &mut Bencher) {
+ let src = slim_map(10_000);
+ b.iter(|| {
+ let mut map = src.clone();
+ assert_eq!(map.drain_filter(|i, _| i % 2 == 0).count(), 10_000 / 2);
+ assert_eq!(map.len(), 10_000 / 2);
+ })
+}
+
+#[bench]
+pub fn clone_slim_10k_and_into_iter(b: &mut Bencher) {
+ let src = slim_map(10_000);
+ b.iter(|| src.clone().into_iter().count())
+}
+
+#[bench]
+pub fn clone_slim_10k_and_pop_all(b: &mut Bencher) {
+ let src = slim_map(10_000);
+ b.iter(|| {
+ let mut map = src.clone();
+ while map.pop_first().is_some() {}
+ map
+ });
+}
+
+#[bench]
+pub fn clone_slim_10k_and_remove_all(b: &mut Bencher) {
+ let src = slim_map(10_000);
+ b.iter(|| {
+ let mut map = src.clone();
+ while let Some(elt) = map.iter().map(|(&i, _)| i).next() {
+ let v = map.remove(&elt);
+ debug_assert!(v.is_some());
+ }
+ map
+ });
+}
+
+#[bench]
+pub fn clone_slim_10k_and_remove_half(b: &mut Bencher) {
+ let src = slim_map(10_000);
+ b.iter(|| {
+ let mut map = src.clone();
+ for i in (0..10_000).step_by(2) {
+ let v = map.remove(&i);
+ debug_assert!(v.is_some());
+ }
+ assert_eq!(map.len(), 10_000 / 2);
+ map
+ })
+}
+
+#[bench]
+pub fn clone_fat_val_100(b: &mut Bencher) {
+ let src = fat_val_map(100);
+ b.iter(|| src.clone())
+}
+
+#[bench]
+pub fn clone_fat_val_100_and_clear(b: &mut Bencher) {
+ let src = fat_val_map(100);
+ b.iter(|| src.clone().clear())
+}
+
+#[bench]
+pub fn clone_fat_val_100_and_drain_all(b: &mut Bencher) {
+ let src = fat_val_map(100);
+ b.iter(|| src.clone().drain_filter(|_, _| true).count())
+}
+
+#[bench]
+pub fn clone_fat_val_100_and_drain_half(b: &mut Bencher) {
+ let src = fat_val_map(100);
+ b.iter(|| {
+ let mut map = src.clone();
+ assert_eq!(map.drain_filter(|i, _| i % 2 == 0).count(), 100 / 2);
+ assert_eq!(map.len(), 100 / 2);
+ })
+}
+
+#[bench]
+pub fn clone_fat_val_100_and_into_iter(b: &mut Bencher) {
+ let src = fat_val_map(100);
+ b.iter(|| src.clone().into_iter().count())
+}
+
+#[bench]
+pub fn clone_fat_val_100_and_pop_all(b: &mut Bencher) {
+ let src = fat_val_map(100);
+ b.iter(|| {
+ let mut map = src.clone();
+ while map.pop_first().is_some() {}
+ map
+ });
+}
+
+#[bench]
+pub fn clone_fat_val_100_and_remove_all(b: &mut Bencher) {
+ let src = fat_val_map(100);
+ b.iter(|| {
+ let mut map = src.clone();
+ while let Some(elt) = map.iter().map(|(&i, _)| i).next() {
+ let v = map.remove(&elt);
+ debug_assert!(v.is_some());
+ }
+ map
+ });
+}
+
+#[bench]
+pub fn clone_fat_val_100_and_remove_half(b: &mut Bencher) {
+ let src = fat_val_map(100);
+ b.iter(|| {
+ let mut map = src.clone();
+ for i in (0..100).step_by(2) {
+ let v = map.remove(&i);
+ debug_assert!(v.is_some());
+ }
+ assert_eq!(map.len(), 100 / 2);
+ map
+ })
+}
diff --git a/library/alloc/benches/btree/mod.rs b/library/alloc/benches/btree/mod.rs
new file mode 100644
index 000000000..095ca5dd2
--- /dev/null
+++ b/library/alloc/benches/btree/mod.rs
@@ -0,0 +1,2 @@
+mod map;
+mod set;
diff --git a/library/alloc/benches/btree/set.rs b/library/alloc/benches/btree/set.rs
new file mode 100644
index 000000000..3f4b0e0f1
--- /dev/null
+++ b/library/alloc/benches/btree/set.rs
@@ -0,0 +1,224 @@
+use std::collections::BTreeSet;
+
+use rand::Rng;
+use test::Bencher;
+
+fn random(n: usize) -> BTreeSet<usize> {
+ let mut rng = crate::bench_rng();
+ let mut set = BTreeSet::new();
+ while set.len() < n {
+ set.insert(rng.gen());
+ }
+ assert_eq!(set.len(), n);
+ set
+}
+
+fn neg(n: usize) -> BTreeSet<i32> {
+ let set: BTreeSet<i32> = (-(n as i32)..=-1).collect();
+ assert_eq!(set.len(), n);
+ set
+}
+
+fn pos(n: usize) -> BTreeSet<i32> {
+ let set: BTreeSet<i32> = (1..=(n as i32)).collect();
+ assert_eq!(set.len(), n);
+ set
+}
+
+fn stagger(n1: usize, factor: usize) -> [BTreeSet<u32>; 2] {
+ let n2 = n1 * factor;
+ let mut sets = [BTreeSet::new(), BTreeSet::new()];
+ for i in 0..(n1 + n2) {
+ let b = i % (factor + 1) != 0;
+ sets[b as usize].insert(i as u32);
+ }
+ assert_eq!(sets[0].len(), n1);
+ assert_eq!(sets[1].len(), n2);
+ sets
+}
+
+macro_rules! set_bench {
+ ($name: ident, $set_func: ident, $result_func: ident, $sets: expr) => {
+ #[bench]
+ pub fn $name(b: &mut Bencher) {
+ // setup
+ let sets = $sets;
+
+ // measure
+ b.iter(|| sets[0].$set_func(&sets[1]).$result_func())
+ }
+ };
+}
+
+fn slim_set(n: usize) -> BTreeSet<usize> {
+ (0..n).collect::<BTreeSet<_>>()
+}
+
+#[bench]
+pub fn clone_100(b: &mut Bencher) {
+ let src = slim_set(100);
+ b.iter(|| src.clone())
+}
+
+#[bench]
+pub fn clone_100_and_clear(b: &mut Bencher) {
+ let src = slim_set(100);
+ b.iter(|| src.clone().clear())
+}
+
+#[bench]
+pub fn clone_100_and_drain_all(b: &mut Bencher) {
+ let src = slim_set(100);
+ b.iter(|| src.clone().drain_filter(|_| true).count())
+}
+
+#[bench]
+pub fn clone_100_and_drain_half(b: &mut Bencher) {
+ let src = slim_set(100);
+ b.iter(|| {
+ let mut set = src.clone();
+ assert_eq!(set.drain_filter(|i| i % 2 == 0).count(), 100 / 2);
+ assert_eq!(set.len(), 100 / 2);
+ })
+}
+
+#[bench]
+pub fn clone_100_and_into_iter(b: &mut Bencher) {
+ let src = slim_set(100);
+ b.iter(|| src.clone().into_iter().count())
+}
+
+#[bench]
+pub fn clone_100_and_pop_all(b: &mut Bencher) {
+ let src = slim_set(100);
+ b.iter(|| {
+ let mut set = src.clone();
+ while set.pop_first().is_some() {}
+ set
+ });
+}
+
+#[bench]
+pub fn clone_100_and_remove_all(b: &mut Bencher) {
+ let src = slim_set(100);
+ b.iter(|| {
+ let mut set = src.clone();
+ while let Some(elt) = set.iter().copied().next() {
+ let ok = set.remove(&elt);
+ debug_assert!(ok);
+ }
+ set
+ });
+}
+
+#[bench]
+pub fn clone_100_and_remove_half(b: &mut Bencher) {
+ let src = slim_set(100);
+ b.iter(|| {
+ let mut set = src.clone();
+ for i in (0..100).step_by(2) {
+ let ok = set.remove(&i);
+ debug_assert!(ok);
+ }
+ assert_eq!(set.len(), 100 / 2);
+ set
+ })
+}
+
+#[bench]
+pub fn clone_10k(b: &mut Bencher) {
+ let src = slim_set(10_000);
+ b.iter(|| src.clone())
+}
+
+#[bench]
+pub fn clone_10k_and_clear(b: &mut Bencher) {
+ let src = slim_set(10_000);
+ b.iter(|| src.clone().clear())
+}
+
+#[bench]
+pub fn clone_10k_and_drain_all(b: &mut Bencher) {
+ let src = slim_set(10_000);
+ b.iter(|| src.clone().drain_filter(|_| true).count())
+}
+
+#[bench]
+pub fn clone_10k_and_drain_half(b: &mut Bencher) {
+ let src = slim_set(10_000);
+ b.iter(|| {
+ let mut set = src.clone();
+ assert_eq!(set.drain_filter(|i| i % 2 == 0).count(), 10_000 / 2);
+ assert_eq!(set.len(), 10_000 / 2);
+ })
+}
+
+#[bench]
+pub fn clone_10k_and_into_iter(b: &mut Bencher) {
+ let src = slim_set(10_000);
+ b.iter(|| src.clone().into_iter().count())
+}
+
+#[bench]
+pub fn clone_10k_and_pop_all(b: &mut Bencher) {
+ let src = slim_set(10_000);
+ b.iter(|| {
+ let mut set = src.clone();
+ while set.pop_first().is_some() {}
+ set
+ });
+}
+
+#[bench]
+pub fn clone_10k_and_remove_all(b: &mut Bencher) {
+ let src = slim_set(10_000);
+ b.iter(|| {
+ let mut set = src.clone();
+ while let Some(elt) = set.iter().copied().next() {
+ let ok = set.remove(&elt);
+ debug_assert!(ok);
+ }
+ set
+ });
+}
+
+#[bench]
+pub fn clone_10k_and_remove_half(b: &mut Bencher) {
+ let src = slim_set(10_000);
+ b.iter(|| {
+ let mut set = src.clone();
+ for i in (0..10_000).step_by(2) {
+ let ok = set.remove(&i);
+ debug_assert!(ok);
+ }
+ assert_eq!(set.len(), 10_000 / 2);
+ set
+ })
+}
+
+set_bench! {intersection_100_neg_vs_100_pos, intersection, count, [neg(100), pos(100)]}
+set_bench! {intersection_100_neg_vs_10k_pos, intersection, count, [neg(100), pos(10_000)]}
+set_bench! {intersection_100_pos_vs_100_neg, intersection, count, [pos(100), neg(100)]}
+set_bench! {intersection_100_pos_vs_10k_neg, intersection, count, [pos(100), neg(10_000)]}
+set_bench! {intersection_10k_neg_vs_100_pos, intersection, count, [neg(10_000), pos(100)]}
+set_bench! {intersection_10k_neg_vs_10k_pos, intersection, count, [neg(10_000), pos(10_000)]}
+set_bench! {intersection_10k_pos_vs_100_neg, intersection, count, [pos(10_000), neg(100)]}
+set_bench! {intersection_10k_pos_vs_10k_neg, intersection, count, [pos(10_000), neg(10_000)]}
+set_bench! {intersection_random_100_vs_100, intersection, count, [random(100), random(100)]}
+set_bench! {intersection_random_100_vs_10k, intersection, count, [random(100), random(10_000)]}
+set_bench! {intersection_random_10k_vs_100, intersection, count, [random(10_000), random(100)]}
+set_bench! {intersection_random_10k_vs_10k, intersection, count, [random(10_000), random(10_000)]}
+set_bench! {intersection_staggered_100_vs_100, intersection, count, stagger(100, 1)}
+set_bench! {intersection_staggered_10k_vs_10k, intersection, count, stagger(10_000, 1)}
+set_bench! {intersection_staggered_100_vs_10k, intersection, count, stagger(100, 100)}
+set_bench! {difference_random_100_vs_100, difference, count, [random(100), random(100)]}
+set_bench! {difference_random_100_vs_10k, difference, count, [random(100), random(10_000)]}
+set_bench! {difference_random_10k_vs_100, difference, count, [random(10_000), random(100)]}
+set_bench! {difference_random_10k_vs_10k, difference, count, [random(10_000), random(10_000)]}
+set_bench! {difference_staggered_100_vs_100, difference, count, stagger(100, 1)}
+set_bench! {difference_staggered_10k_vs_10k, difference, count, stagger(10_000, 1)}
+set_bench! {difference_staggered_100_vs_10k, difference, count, stagger(100, 100)}
+set_bench! {is_subset_100_vs_100, is_subset, clone, [pos(100), pos(100)]}
+set_bench! {is_subset_100_vs_10k, is_subset, clone, [pos(100), pos(10_000)]}
+set_bench! {is_subset_10k_vs_100, is_subset, clone, [pos(10_000), pos(100)]}
+set_bench! {is_subset_10k_vs_10k, is_subset, clone, [pos(10_000), pos(10_000)]}
diff --git a/library/alloc/benches/lib.rs b/library/alloc/benches/lib.rs
new file mode 100644
index 000000000..72ac897d4
--- /dev/null
+++ b/library/alloc/benches/lib.rs
@@ -0,0 +1,28 @@
+// Disabling on android for the time being
+// See https://github.com/rust-lang/rust/issues/73535#event-3477699747
+#![cfg(not(target_os = "android"))]
+#![feature(btree_drain_filter)]
+#![feature(iter_next_chunk)]
+#![feature(map_first_last)]
+#![feature(repr_simd)]
+#![feature(slice_partition_dedup)]
+#![feature(test)]
+
+extern crate test;
+
+mod binary_heap;
+mod btree;
+mod linked_list;
+mod slice;
+mod str;
+mod string;
+mod vec;
+mod vec_deque;
+
+/// Returns a `rand::Rng` seeded with a consistent seed.
+///
+/// This is done to avoid introducing nondeterminism in benchmark results.
+fn bench_rng() -> rand_xorshift::XorShiftRng {
+ const SEED: [u8; 16] = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15];
+ rand::SeedableRng::from_seed(SEED)
+}
diff --git a/library/alloc/benches/linked_list.rs b/library/alloc/benches/linked_list.rs
new file mode 100644
index 000000000..29c5ad2bc
--- /dev/null
+++ b/library/alloc/benches/linked_list.rs
@@ -0,0 +1,77 @@
+use std::collections::LinkedList;
+use test::Bencher;
+
+#[bench]
+fn bench_collect_into(b: &mut Bencher) {
+ let v = &[0; 64];
+ b.iter(|| {
+ let _: LinkedList<_> = v.iter().cloned().collect();
+ })
+}
+
+#[bench]
+fn bench_push_front(b: &mut Bencher) {
+ let mut m: LinkedList<_> = LinkedList::new();
+ b.iter(|| {
+ m.push_front(0);
+ })
+}
+
+#[bench]
+fn bench_push_back(b: &mut Bencher) {
+ let mut m: LinkedList<_> = LinkedList::new();
+ b.iter(|| {
+ m.push_back(0);
+ })
+}
+
+#[bench]
+fn bench_push_back_pop_back(b: &mut Bencher) {
+ let mut m: LinkedList<_> = LinkedList::new();
+ b.iter(|| {
+ m.push_back(0);
+ m.pop_back();
+ })
+}
+
+#[bench]
+fn bench_push_front_pop_front(b: &mut Bencher) {
+ let mut m: LinkedList<_> = LinkedList::new();
+ b.iter(|| {
+ m.push_front(0);
+ m.pop_front();
+ })
+}
+
+#[bench]
+fn bench_iter(b: &mut Bencher) {
+ let v = &[0; 128];
+ let m: LinkedList<_> = v.iter().cloned().collect();
+ b.iter(|| {
+ assert!(m.iter().count() == 128);
+ })
+}
+#[bench]
+fn bench_iter_mut(b: &mut Bencher) {
+ let v = &[0; 128];
+ let mut m: LinkedList<_> = v.iter().cloned().collect();
+ b.iter(|| {
+ assert!(m.iter_mut().count() == 128);
+ })
+}
+#[bench]
+fn bench_iter_rev(b: &mut Bencher) {
+ let v = &[0; 128];
+ let m: LinkedList<_> = v.iter().cloned().collect();
+ b.iter(|| {
+ assert!(m.iter().rev().count() == 128);
+ })
+}
+#[bench]
+fn bench_iter_mut_rev(b: &mut Bencher) {
+ let v = &[0; 128];
+ let mut m: LinkedList<_> = v.iter().cloned().collect();
+ b.iter(|| {
+ assert!(m.iter_mut().rev().count() == 128);
+ })
+}
diff --git a/library/alloc/benches/slice.rs b/library/alloc/benches/slice.rs
new file mode 100644
index 000000000..bd6f38f2f
--- /dev/null
+++ b/library/alloc/benches/slice.rs
@@ -0,0 +1,379 @@
+use std::{mem, ptr};
+
+use rand::distributions::{Alphanumeric, Standard};
+use rand::Rng;
+use test::{black_box, Bencher};
+
+#[bench]
+fn iterator(b: &mut Bencher) {
+ // peculiar numbers to stop LLVM from optimising the summation
+ // out.
+ let v: Vec<_> = (0..100).map(|i| i ^ (i << 1) ^ (i >> 1)).collect();
+
+ b.iter(|| {
+ let mut sum = 0;
+ for x in &v {
+ sum += *x;
+ }
+ // sum == 11806, to stop dead code elimination.
+ if sum == 0 {
+ panic!()
+ }
+ })
+}
+
+#[bench]
+fn mut_iterator(b: &mut Bencher) {
+ let mut v = vec![0; 100];
+
+ b.iter(|| {
+ let mut i = 0;
+ for x in &mut v {
+ *x = i;
+ i += 1;
+ }
+ })
+}
+
+#[bench]
+fn concat(b: &mut Bencher) {
+ let xss: Vec<Vec<i32>> = (0..100).map(|i| (0..i).collect()).collect();
+ b.iter(|| {
+ xss.concat();
+ });
+}
+
+#[bench]
+fn join(b: &mut Bencher) {
+ let xss: Vec<Vec<i32>> = (0..100).map(|i| (0..i).collect()).collect();
+ b.iter(|| xss.join(&0));
+}
+
+#[bench]
+fn push(b: &mut Bencher) {
+ let mut vec = Vec::<i32>::new();
+ b.iter(|| {
+ vec.push(0);
+ black_box(&vec);
+ });
+}
+
+#[bench]
+fn starts_with_same_vector(b: &mut Bencher) {
+ let vec: Vec<_> = (0..100).collect();
+ b.iter(|| vec.starts_with(&vec))
+}
+
+#[bench]
+fn starts_with_single_element(b: &mut Bencher) {
+ let vec: Vec<_> = vec![0];
+ b.iter(|| vec.starts_with(&vec))
+}
+
+#[bench]
+fn starts_with_diff_one_element_at_end(b: &mut Bencher) {
+ let vec: Vec<_> = (0..100).collect();
+ let mut match_vec: Vec<_> = (0..99).collect();
+ match_vec.push(0);
+ b.iter(|| vec.starts_with(&match_vec))
+}
+
+#[bench]
+fn ends_with_same_vector(b: &mut Bencher) {
+ let vec: Vec<_> = (0..100).collect();
+ b.iter(|| vec.ends_with(&vec))
+}
+
+#[bench]
+fn ends_with_single_element(b: &mut Bencher) {
+ let vec: Vec<_> = vec![0];
+ b.iter(|| vec.ends_with(&vec))
+}
+
+#[bench]
+fn ends_with_diff_one_element_at_beginning(b: &mut Bencher) {
+ let vec: Vec<_> = (0..100).collect();
+ let mut match_vec: Vec<_> = (0..100).collect();
+ match_vec[0] = 200;
+ b.iter(|| vec.starts_with(&match_vec))
+}
+
+#[bench]
+fn contains_last_element(b: &mut Bencher) {
+ let vec: Vec<_> = (0..100).collect();
+ b.iter(|| vec.contains(&99))
+}
+
+#[bench]
+fn zero_1kb_from_elem(b: &mut Bencher) {
+ b.iter(|| vec![0u8; 1024]);
+}
+
+#[bench]
+fn zero_1kb_set_memory(b: &mut Bencher) {
+ b.iter(|| {
+ let mut v = Vec::<u8>::with_capacity(1024);
+ unsafe {
+ let vp = v.as_mut_ptr();
+ ptr::write_bytes(vp, 0, 1024);
+ v.set_len(1024);
+ }
+ v
+ });
+}
+
+#[bench]
+fn zero_1kb_loop_set(b: &mut Bencher) {
+ b.iter(|| {
+ let mut v = Vec::<u8>::with_capacity(1024);
+ unsafe {
+ v.set_len(1024);
+ }
+ for i in 0..1024 {
+ v[i] = 0;
+ }
+ });
+}
+
+#[bench]
+fn zero_1kb_mut_iter(b: &mut Bencher) {
+ b.iter(|| {
+ let mut v = Vec::<u8>::with_capacity(1024);
+ unsafe {
+ v.set_len(1024);
+ }
+ for x in &mut v {
+ *x = 0;
+ }
+ v
+ });
+}
+
+#[bench]
+fn random_inserts(b: &mut Bencher) {
+ let mut rng = crate::bench_rng();
+ b.iter(|| {
+ let mut v = vec![(0, 0); 30];
+ for _ in 0..100 {
+ let l = v.len();
+ v.insert(rng.gen::<usize>() % (l + 1), (1, 1));
+ }
+ })
+}
+
+#[bench]
+fn random_removes(b: &mut Bencher) {
+ let mut rng = crate::bench_rng();
+ b.iter(|| {
+ let mut v = vec![(0, 0); 130];
+ for _ in 0..100 {
+ let l = v.len();
+ v.remove(rng.gen::<usize>() % l);
+ }
+ })
+}
+
+fn gen_ascending(len: usize) -> Vec<u64> {
+ (0..len as u64).collect()
+}
+
+fn gen_descending(len: usize) -> Vec<u64> {
+ (0..len as u64).rev().collect()
+}
+
+fn gen_random(len: usize) -> Vec<u64> {
+ let mut rng = crate::bench_rng();
+ (&mut rng).sample_iter(&Standard).take(len).collect()
+}
+
+fn gen_random_bytes(len: usize) -> Vec<u8> {
+ let mut rng = crate::bench_rng();
+ (&mut rng).sample_iter(&Standard).take(len).collect()
+}
+
+fn gen_mostly_ascending(len: usize) -> Vec<u64> {
+ let mut rng = crate::bench_rng();
+ let mut v = gen_ascending(len);
+ for _ in (0usize..).take_while(|x| x * x <= len) {
+ let x = rng.gen::<usize>() % len;
+ let y = rng.gen::<usize>() % len;
+ v.swap(x, y);
+ }
+ v
+}
+
+fn gen_mostly_descending(len: usize) -> Vec<u64> {
+ let mut rng = crate::bench_rng();
+ let mut v = gen_descending(len);
+ for _ in (0usize..).take_while(|x| x * x <= len) {
+ let x = rng.gen::<usize>() % len;
+ let y = rng.gen::<usize>() % len;
+ v.swap(x, y);
+ }
+ v
+}
+
+fn gen_strings(len: usize) -> Vec<String> {
+ let mut rng = crate::bench_rng();
+ let mut v = vec![];
+ for _ in 0..len {
+ let n = rng.gen::<usize>() % 20 + 1;
+ v.push((&mut rng).sample_iter(&Alphanumeric).take(n).collect());
+ }
+ v
+}
+
+fn gen_big_random(len: usize) -> Vec<[u64; 16]> {
+ let mut rng = crate::bench_rng();
+ (&mut rng).sample_iter(&Standard).map(|x| [x; 16]).take(len).collect()
+}
+
+macro_rules! sort {
+ ($f:ident, $name:ident, $gen:expr, $len:expr) => {
+ #[bench]
+ fn $name(b: &mut Bencher) {
+ let v = $gen($len);
+ b.iter(|| v.clone().$f());
+ b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
+ }
+ };
+}
+
+macro_rules! sort_strings {
+ ($f:ident, $name:ident, $gen:expr, $len:expr) => {
+ #[bench]
+ fn $name(b: &mut Bencher) {
+ let v = $gen($len);
+ let v = v.iter().map(|s| &**s).collect::<Vec<&str>>();
+ b.iter(|| v.clone().$f());
+ b.bytes = $len * mem::size_of::<&str>() as u64;
+ }
+ };
+}
+
+macro_rules! sort_expensive {
+ ($f:ident, $name:ident, $gen:expr, $len:expr) => {
+ #[bench]
+ fn $name(b: &mut Bencher) {
+ let v = $gen($len);
+ b.iter(|| {
+ let mut v = v.clone();
+ let mut count = 0;
+ v.$f(|a: &u64, b: &u64| {
+ count += 1;
+ if count % 1_000_000_000 == 0 {
+ panic!("should not happen");
+ }
+ (*a as f64).cos().partial_cmp(&(*b as f64).cos()).unwrap()
+ });
+ black_box(count);
+ });
+ b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
+ }
+ };
+}
+
+macro_rules! sort_lexicographic {
+ ($f:ident, $name:ident, $gen:expr, $len:expr) => {
+ #[bench]
+ fn $name(b: &mut Bencher) {
+ let v = $gen($len);
+ b.iter(|| v.clone().$f(|x| x.to_string()));
+ b.bytes = $len * mem::size_of_val(&$gen(1)[0]) as u64;
+ }
+ };
+}
+
+sort!(sort, sort_small_ascending, gen_ascending, 10);
+sort!(sort, sort_small_descending, gen_descending, 10);
+sort!(sort, sort_small_random, gen_random, 10);
+sort!(sort, sort_small_big, gen_big_random, 10);
+sort!(sort, sort_medium_random, gen_random, 100);
+sort!(sort, sort_large_ascending, gen_ascending, 10000);
+sort!(sort, sort_large_descending, gen_descending, 10000);
+sort!(sort, sort_large_mostly_ascending, gen_mostly_ascending, 10000);
+sort!(sort, sort_large_mostly_descending, gen_mostly_descending, 10000);
+sort!(sort, sort_large_random, gen_random, 10000);
+sort!(sort, sort_large_big, gen_big_random, 10000);
+sort_strings!(sort, sort_large_strings, gen_strings, 10000);
+sort_expensive!(sort_by, sort_large_expensive, gen_random, 10000);
+
+sort!(sort_unstable, sort_unstable_small_ascending, gen_ascending, 10);
+sort!(sort_unstable, sort_unstable_small_descending, gen_descending, 10);
+sort!(sort_unstable, sort_unstable_small_random, gen_random, 10);
+sort!(sort_unstable, sort_unstable_small_big, gen_big_random, 10);
+sort!(sort_unstable, sort_unstable_medium_random, gen_random, 100);
+sort!(sort_unstable, sort_unstable_large_ascending, gen_ascending, 10000);
+sort!(sort_unstable, sort_unstable_large_descending, gen_descending, 10000);
+sort!(sort_unstable, sort_unstable_large_mostly_ascending, gen_mostly_ascending, 10000);
+sort!(sort_unstable, sort_unstable_large_mostly_descending, gen_mostly_descending, 10000);
+sort!(sort_unstable, sort_unstable_large_random, gen_random, 10000);
+sort!(sort_unstable, sort_unstable_large_big, gen_big_random, 10000);
+sort_strings!(sort_unstable, sort_unstable_large_strings, gen_strings, 10000);
+sort_expensive!(sort_unstable_by, sort_unstable_large_expensive, gen_random, 10000);
+
+sort_lexicographic!(sort_by_key, sort_by_key_lexicographic, gen_random, 10000);
+sort_lexicographic!(sort_unstable_by_key, sort_unstable_by_key_lexicographic, gen_random, 10000);
+sort_lexicographic!(sort_by_cached_key, sort_by_cached_key_lexicographic, gen_random, 10000);
+
+macro_rules! reverse {
+ ($name:ident, $ty:ty, $f:expr) => {
+ #[bench]
+ fn $name(b: &mut Bencher) {
+ // odd length and offset by 1 to be as unaligned as possible
+ let n = 0xFFFFF;
+ let mut v: Vec<_> = (0..1 + (n / mem::size_of::<$ty>() as u64)).map($f).collect();
+ b.iter(|| black_box(&mut v[1..]).reverse());
+ b.bytes = n;
+ }
+ };
+}
+
+reverse!(reverse_u8, u8, |x| x as u8);
+reverse!(reverse_u16, u16, |x| x as u16);
+reverse!(reverse_u8x3, [u8; 3], |x| [x as u8, (x >> 8) as u8, (x >> 16) as u8]);
+reverse!(reverse_u32, u32, |x| x as u32);
+reverse!(reverse_u64, u64, |x| x as u64);
+reverse!(reverse_u128, u128, |x| x as u128);
+#[repr(simd)]
+struct F64x4(f64, f64, f64, f64);
+reverse!(reverse_simd_f64x4, F64x4, |x| {
+ let x = x as f64;
+ F64x4(x, x, x, x)
+});
+
+macro_rules! rotate {
+ ($name:ident, $gen:expr, $len:expr, $mid:expr) => {
+ #[bench]
+ fn $name(b: &mut Bencher) {
+ let size = mem::size_of_val(&$gen(1)[0]);
+ let mut v = $gen($len * 8 / size);
+ b.iter(|| black_box(&mut v).rotate_left(($mid * 8 + size - 1) / size));
+ b.bytes = (v.len() * size) as u64;
+ }
+ };
+}
+
+rotate!(rotate_tiny_by1, gen_random, 16, 1);
+rotate!(rotate_tiny_half, gen_random, 16, 16 / 2);
+rotate!(rotate_tiny_half_plus_one, gen_random, 16, 16 / 2 + 1);
+
+rotate!(rotate_medium_by1, gen_random, 9158, 1);
+rotate!(rotate_medium_by727_u64, gen_random, 9158, 727);
+rotate!(rotate_medium_by727_bytes, gen_random_bytes, 9158, 727);
+rotate!(rotate_medium_by727_strings, gen_strings, 9158, 727);
+rotate!(rotate_medium_half, gen_random, 9158, 9158 / 2);
+rotate!(rotate_medium_half_plus_one, gen_random, 9158, 9158 / 2 + 1);
+
+// Intended to use more RAM than the machine has cache
+rotate!(rotate_huge_by1, gen_random, 5 * 1024 * 1024, 1);
+rotate!(rotate_huge_by9199_u64, gen_random, 5 * 1024 * 1024, 9199);
+rotate!(rotate_huge_by9199_bytes, gen_random_bytes, 5 * 1024 * 1024, 9199);
+rotate!(rotate_huge_by9199_strings, gen_strings, 5 * 1024 * 1024, 9199);
+rotate!(rotate_huge_by9199_big, gen_big_random, 5 * 1024 * 1024, 9199);
+rotate!(rotate_huge_by1234577_u64, gen_random, 5 * 1024 * 1024, 1234577);
+rotate!(rotate_huge_by1234577_bytes, gen_random_bytes, 5 * 1024 * 1024, 1234577);
+rotate!(rotate_huge_by1234577_strings, gen_strings, 5 * 1024 * 1024, 1234577);
+rotate!(rotate_huge_by1234577_big, gen_big_random, 5 * 1024 * 1024, 1234577);
+rotate!(rotate_huge_half, gen_random, 5 * 1024 * 1024, 5 * 1024 * 1024 / 2);
+rotate!(rotate_huge_half_plus_one, gen_random, 5 * 1024 * 1024, 5 * 1024 * 1024 / 2 + 1);
diff --git a/library/alloc/benches/str.rs b/library/alloc/benches/str.rs
new file mode 100644
index 000000000..391475bc0
--- /dev/null
+++ b/library/alloc/benches/str.rs
@@ -0,0 +1,299 @@
+use test::{black_box, Bencher};
+
+#[bench]
+fn char_iterator(b: &mut Bencher) {
+ let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
+
+ b.iter(|| s.chars().count());
+}
+
+#[bench]
+fn char_iterator_for(b: &mut Bencher) {
+ let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
+
+ b.iter(|| {
+ for ch in s.chars() {
+ black_box(ch);
+ }
+ });
+}
+
+#[bench]
+fn char_iterator_ascii(b: &mut Bencher) {
+ let s = "Mary had a little lamb, Little lamb
+ Mary had a little lamb, Little lamb
+ Mary had a little lamb, Little lamb
+ Mary had a little lamb, Little lamb
+ Mary had a little lamb, Little lamb
+ Mary had a little lamb, Little lamb";
+
+ b.iter(|| s.chars().count());
+}
+
+#[bench]
+fn char_iterator_rev(b: &mut Bencher) {
+ let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
+
+ b.iter(|| s.chars().rev().count());
+}
+
+#[bench]
+fn char_iterator_rev_for(b: &mut Bencher) {
+ let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
+
+ b.iter(|| {
+ for ch in s.chars().rev() {
+ black_box(ch);
+ }
+ });
+}
+
+#[bench]
+fn char_indicesator(b: &mut Bencher) {
+ let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
+ let len = s.chars().count();
+
+ b.iter(|| assert_eq!(s.char_indices().count(), len));
+}
+
+#[bench]
+fn char_indicesator_rev(b: &mut Bencher) {
+ let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
+ let len = s.chars().count();
+
+ b.iter(|| assert_eq!(s.char_indices().rev().count(), len));
+}
+
+#[bench]
+fn split_unicode_ascii(b: &mut Bencher) {
+ let s = "ประเทศไทย中华Việt Namประเทศไทย中华Việt Nam";
+
+ b.iter(|| assert_eq!(s.split('V').count(), 3));
+}
+
+#[bench]
+fn split_ascii(b: &mut Bencher) {
+ let s = "Mary had a little lamb, Little lamb, little-lamb.";
+ let len = s.split(' ').count();
+
+ b.iter(|| assert_eq!(s.split(' ').count(), len));
+}
+
+#[bench]
+fn split_extern_fn(b: &mut Bencher) {
+ let s = "Mary had a little lamb, Little lamb, little-lamb.";
+ let len = s.split(' ').count();
+ fn pred(c: char) -> bool {
+ c == ' '
+ }
+
+ b.iter(|| assert_eq!(s.split(pred).count(), len));
+}
+
+#[bench]
+fn split_closure(b: &mut Bencher) {
+ let s = "Mary had a little lamb, Little lamb, little-lamb.";
+ let len = s.split(' ').count();
+
+ b.iter(|| assert_eq!(s.split(|c: char| c == ' ').count(), len));
+}
+
+#[bench]
+fn split_slice(b: &mut Bencher) {
+ let s = "Mary had a little lamb, Little lamb, little-lamb.";
+ let len = s.split(' ').count();
+
+ let c: &[char] = &[' '];
+ b.iter(|| assert_eq!(s.split(c).count(), len));
+}
+
+#[bench]
+fn bench_join(b: &mut Bencher) {
+ let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
+ let sep = "→";
+ let v = vec![s, s, s, s, s, s, s, s, s, s];
+ b.iter(|| {
+ assert_eq!(v.join(sep).len(), s.len() * 10 + sep.len() * 9);
+ })
+}
+
+#[bench]
+fn bench_contains_short_short(b: &mut Bencher) {
+ let haystack = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
+ let needle = "sit";
+
+ b.iter(|| {
+ assert!(haystack.contains(needle));
+ })
+}
+
+#[bench]
+fn bench_contains_short_long(b: &mut Bencher) {
+ let haystack = "\
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem sit amet dolor \
+ultricies condimentum. Praesent iaculis purus elit, ac malesuada quam malesuada in. Duis sed orci \
+eros. Suspendisse sit amet magna mollis, mollis nunc luctus, imperdiet mi. Integer fringilla non \
+sem ut lacinia. Fusce varius tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec \
+tempus vel, gravida nec quam.
+
+In est dui, tincidunt sed tempus interdum, adipiscing laoreet ante. Etiam tempor, tellus quis \
+sagittis interdum, nulla purus mattis sem, quis auctor erat odio ac tellus. In nec nunc sit amet \
+diam volutpat molestie at sed ipsum. Vestibulum laoreet consequat vulputate. Integer accumsan \
+lorem ac dignissim placerat. Suspendisse convallis faucibus lorem. Aliquam erat volutpat. In vel \
+eleifend felis. Sed suscipit nulla lorem, sed mollis est sollicitudin et. Nam fermentum egestas \
+interdum. Curabitur ut nisi justo.
+
+Sed sollicitudin ipsum tellus, ut condimentum leo eleifend nec. Cras ut velit ante. Phasellus nec \
+mollis odio. Mauris molestie erat in arcu mattis, at aliquet dolor vehicula. Quisque malesuada \
+lectus sit amet nisi pretium, a condimentum ipsum porta. Morbi at dapibus diam. Praesent egestas \
+est sed risus elementum, eu rutrum metus ultrices. Etiam fermentum consectetur magna, id rutrum \
+felis accumsan a. Aliquam ut pellentesque libero. Sed mi nulla, lobortis eu tortor id, suscipit \
+ultricies neque. Morbi iaculis sit amet risus at iaculis. Praesent eget ligula quis turpis \
+feugiat suscipit vel non arcu. Interdum et malesuada fames ac ante ipsum primis in faucibus. \
+Aliquam sit amet placerat lorem.
+
+Cras a lacus vel ante posuere elementum. Nunc est leo, bibendum ut facilisis vel, bibendum at \
+mauris. Nullam adipiscing diam vel odio ornare, luctus adipiscing mi luctus. Nulla facilisi. \
+Mauris adipiscing bibendum neque, quis adipiscing lectus tempus et. Sed feugiat erat et nisl \
+lobortis pharetra. Donec vitae erat enim. Nullam sit amet felis et quam lacinia tincidunt. Aliquam \
+suscipit dapibus urna. Sed volutpat urna in magna pulvinar volutpat. Phasellus nec tellus ac diam \
+cursus accumsan.
+
+Nam lectus enim, dapibus non nisi tempor, consectetur convallis massa. Maecenas eleifend dictum \
+feugiat. Etiam quis mauris vel risus luctus mattis a a nunc. Nullam orci quam, imperdiet id \
+vehicula in, porttitor ut nibh. Duis sagittis adipiscing nisl vitae congue. Donec mollis risus eu \
+leo suscipit, varius porttitor nulla porta. Pellentesque ut sem nec nisi euismod vehicula. Nulla \
+malesuada sollicitudin quam eu fermentum.";
+ let needle = "english";
+
+ b.iter(|| {
+ assert!(!haystack.contains(needle));
+ })
+}
+
+#[bench]
+fn bench_contains_bad_naive(b: &mut Bencher) {
+ let haystack = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa";
+ let needle = "aaaaaaaab";
+
+ b.iter(|| {
+ assert!(!haystack.contains(needle));
+ })
+}
+
+#[bench]
+fn bench_contains_equal(b: &mut Bencher) {
+ let haystack = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
+ let needle = "Lorem ipsum dolor sit amet, consectetur adipiscing elit.";
+
+ b.iter(|| {
+ assert!(haystack.contains(needle));
+ })
+}
+
+macro_rules! make_test_inner {
+ ($s:ident, $code:expr, $name:ident, $str:expr, $iters:expr) => {
+ #[bench]
+ fn $name(bencher: &mut Bencher) {
+ let mut $s = $str;
+ black_box(&mut $s);
+ bencher.iter(|| {
+ for _ in 0..$iters {
+ black_box($code);
+ }
+ });
+ }
+ };
+}
+
+macro_rules! make_test {
+ ($name:ident, $s:ident, $code:expr) => {
+ make_test!($name, $s, $code, 1);
+ };
+ ($name:ident, $s:ident, $code:expr, $iters:expr) => {
+ mod $name {
+ use test::Bencher;
+ use test::black_box;
+
+ // Short strings: 65 bytes each
+ make_test_inner!($s, $code, short_ascii,
+ "Mary had a little lamb, Little lamb Mary had a littl lamb, lamb!", $iters);
+ make_test_inner!($s, $code, short_mixed,
+ "ศไทย中华Việt Nam; Mary had a little lamb, Little lam!", $iters);
+ make_test_inner!($s, $code, short_pile_of_poo,
+ "💩💩💩💩💩💩💩💩💩💩💩💩💩💩💩💩!", $iters);
+ make_test_inner!($s, $code, long_lorem_ipsum,"\
+Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem sit amet dolor \
+ultricies condimentum. Praesent iaculis purus elit, ac malesuada quam malesuada in. Duis sed orci \
+eros. Suspendisse sit amet magna mollis, mollis nunc luctus, imperdiet mi. Integer fringilla non \
+sem ut lacinia. Fusce varius tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec \
+tempus vel, gravida nec quam.
+
+In est dui, tincidunt sed tempus interdum, adipiscing laoreet ante. Etiam tempor, tellus quis \
+sagittis interdum, nulla purus mattis sem, quis auctor erat odio ac tellus. In nec nunc sit amet \
+diam volutpat molestie at sed ipsum. Vestibulum laoreet consequat vulputate. Integer accumsan \
+lorem ac dignissim placerat. Suspendisse convallis faucibus lorem. Aliquam erat volutpat. In vel \
+eleifend felis. Sed suscipit nulla lorem, sed mollis est sollicitudin et. Nam fermentum egestas \
+interdum. Curabitur ut nisi justo.
+
+Sed sollicitudin ipsum tellus, ut condimentum leo eleifend nec. Cras ut velit ante. Phasellus nec \
+mollis odio. Mauris molestie erat in arcu mattis, at aliquet dolor vehicula. Quisque malesuada \
+lectus sit amet nisi pretium, a condimentum ipsum porta. Morbi at dapibus diam. Praesent egestas \
+est sed risus elementum, eu rutrum metus ultrices. Etiam fermentum consectetur magna, id rutrum \
+felis accumsan a. Aliquam ut pellentesque libero. Sed mi nulla, lobortis eu tortor id, suscipit \
+ultricies neque. Morbi iaculis sit amet risus at iaculis. Praesent eget ligula quis turpis \
+feugiat suscipit vel non arcu. Interdum et malesuada fames ac ante ipsum primis in faucibus. \
+Aliquam sit amet placerat lorem.
+
+Cras a lacus vel ante posuere elementum. Nunc est leo, bibendum ut facilisis vel, bibendum at \
+mauris. Nullam adipiscing diam vel odio ornare, luctus adipiscing mi luctus. Nulla facilisi. \
+Mauris adipiscing bibendum neque, quis adipiscing lectus tempus et. Sed feugiat erat et nisl \
+lobortis pharetra. Donec vitae erat enim. Nullam sit amet felis et quam lacinia tincidunt. Aliquam \
+suscipit dapibus urna. Sed volutpat urna in magna pulvinar volutpat. Phasellus nec tellus ac diam \
+cursus accumsan.
+
+Nam lectus enim, dapibus non nisi tempor, consectetur convallis massa. Maecenas eleifend dictum \
+feugiat. Etiam quis mauris vel risus luctus mattis a a nunc. Nullam orci quam, imperdiet id \
+vehicula in, porttitor ut nibh. Duis sagittis adipiscing nisl vitae congue. Donec mollis risus eu \
+leo suscipit, varius porttitor nulla porta. Pellentesque ut sem nec nisi euismod vehicula. Nulla \
+malesuada sollicitudin quam eu fermentum!", $iters);
+ }
+ }
+}
+
+make_test!(chars_count, s, s.chars().count());
+
+make_test!(contains_bang_str, s, s.contains("!"));
+make_test!(contains_bang_char, s, s.contains('!'));
+
+make_test!(match_indices_a_str, s, s.match_indices("a").count());
+
+make_test!(split_a_str, s, s.split("a").count());
+
+make_test!(trim_ascii_char, s, { s.trim_matches(|c: char| c.is_ascii()) });
+make_test!(trim_start_ascii_char, s, { s.trim_start_matches(|c: char| c.is_ascii()) });
+make_test!(trim_end_ascii_char, s, { s.trim_end_matches(|c: char| c.is_ascii()) });
+
+make_test!(find_underscore_char, s, s.find('_'));
+make_test!(rfind_underscore_char, s, s.rfind('_'));
+make_test!(find_underscore_str, s, s.find("_"));
+
+make_test!(find_zzz_char, s, s.find('\u{1F4A4}'));
+make_test!(rfind_zzz_char, s, s.rfind('\u{1F4A4}'));
+make_test!(find_zzz_str, s, s.find("\u{1F4A4}"));
+
+make_test!(starts_with_ascii_char, s, s.starts_with('/'), 1024);
+make_test!(ends_with_ascii_char, s, s.ends_with('/'), 1024);
+make_test!(starts_with_unichar, s, s.starts_with('\u{1F4A4}'), 1024);
+make_test!(ends_with_unichar, s, s.ends_with('\u{1F4A4}'), 1024);
+make_test!(starts_with_str, s, s.starts_with("💩💩💩💩💩💩💩💩💩💩💩💩💩💩💩💩"), 1024);
+make_test!(ends_with_str, s, s.ends_with("💩💩💩💩💩💩💩💩💩💩💩💩💩💩💩💩"), 1024);
+
+make_test!(split_space_char, s, s.split(' ').count());
+make_test!(split_terminator_space_char, s, s.split_terminator(' ').count());
+
+make_test!(splitn_space_char, s, s.splitn(10, ' ').count());
+make_test!(rsplitn_space_char, s, s.rsplitn(10, ' ').count());
+
+make_test!(split_space_str, s, s.split(" ").count());
+make_test!(split_ad_str, s, s.split("ad").count());
diff --git a/library/alloc/benches/string.rs b/library/alloc/benches/string.rs
new file mode 100644
index 000000000..5c95160ba
--- /dev/null
+++ b/library/alloc/benches/string.rs
@@ -0,0 +1,164 @@
+use std::iter::repeat;
+use test::{black_box, Bencher};
+
+#[bench]
+fn bench_with_capacity(b: &mut Bencher) {
+ b.iter(|| String::with_capacity(100));
+}
+
+#[bench]
+fn bench_push_str(b: &mut Bencher) {
+ let s = "ศไทย中华Việt Nam; Mary had a little lamb, Little lamb";
+ b.iter(|| {
+ let mut r = String::new();
+ r.push_str(s);
+ });
+}
+
+const REPETITIONS: u64 = 10_000;
+
+#[bench]
+fn bench_push_str_one_byte(b: &mut Bencher) {
+ b.bytes = REPETITIONS;
+ b.iter(|| {
+ let mut r = String::new();
+ for _ in 0..REPETITIONS {
+ r.push_str("a")
+ }
+ });
+}
+
+#[bench]
+fn bench_push_char_one_byte(b: &mut Bencher) {
+ b.bytes = REPETITIONS;
+ b.iter(|| {
+ let mut r = String::new();
+ for _ in 0..REPETITIONS {
+ r.push('a')
+ }
+ });
+}
+
+#[bench]
+fn bench_push_char_two_bytes(b: &mut Bencher) {
+ b.bytes = REPETITIONS * 2;
+ b.iter(|| {
+ let mut r = String::new();
+ for _ in 0..REPETITIONS {
+ r.push('â')
+ }
+ });
+}
+
+#[bench]
+fn from_utf8_lossy_100_ascii(b: &mut Bencher) {
+ let s = b"Hello there, the quick brown fox jumped over the lazy dog! \
+ Lorem ipsum dolor sit amet, consectetur. ";
+
+ assert_eq!(100, s.len());
+ b.iter(|| {
+ let _ = String::from_utf8_lossy(s);
+ });
+}
+
+#[bench]
+fn from_utf8_lossy_100_multibyte(b: &mut Bencher) {
+ let s = "𐌀𐌖𐌋𐌄𐌑𐌉ปรدولة الكويتทศไทย中华𐍅𐌿𐌻𐍆𐌹𐌻𐌰".as_bytes();
+ assert_eq!(100, s.len());
+ b.iter(|| {
+ let _ = String::from_utf8_lossy(s);
+ });
+}
+
+#[bench]
+fn from_utf8_lossy_invalid(b: &mut Bencher) {
+ let s = b"Hello\xC0\x80 There\xE6\x83 Goodbye";
+ b.iter(|| {
+ let _ = String::from_utf8_lossy(s);
+ });
+}
+
+#[bench]
+fn from_utf8_lossy_100_invalid(b: &mut Bencher) {
+ let s = repeat(0xf5).take(100).collect::<Vec<_>>();
+ b.iter(|| {
+ let _ = String::from_utf8_lossy(&s);
+ });
+}
+
+#[bench]
+fn bench_exact_size_shrink_to_fit(b: &mut Bencher) {
+ let s = "Hello there, the quick brown fox jumped over the lazy dog! \
+ Lorem ipsum dolor sit amet, consectetur. ";
+ // ensure our operation produces an exact-size string before we benchmark it
+ let mut r = String::with_capacity(s.len());
+ r.push_str(s);
+ assert_eq!(r.len(), r.capacity());
+ b.iter(|| {
+ let mut r = String::with_capacity(s.len());
+ r.push_str(s);
+ r.shrink_to_fit();
+ r
+ });
+}
+
+#[bench]
+fn bench_from_str(b: &mut Bencher) {
+ let s = "Hello there, the quick brown fox jumped over the lazy dog! \
+ Lorem ipsum dolor sit amet, consectetur. ";
+ b.iter(|| String::from(s))
+}
+
+#[bench]
+fn bench_from(b: &mut Bencher) {
+ let s = "Hello there, the quick brown fox jumped over the lazy dog! \
+ Lorem ipsum dolor sit amet, consectetur. ";
+ b.iter(|| String::from(s))
+}
+
+#[bench]
+fn bench_to_string(b: &mut Bencher) {
+ let s = "Hello there, the quick brown fox jumped over the lazy dog! \
+ Lorem ipsum dolor sit amet, consectetur. ";
+ b.iter(|| s.to_string())
+}
+
+#[bench]
+fn bench_insert_char_short(b: &mut Bencher) {
+ let s = "Hello, World!";
+ b.iter(|| {
+ let mut x = String::from(s);
+ black_box(&mut x).insert(6, black_box(' '));
+ x
+ })
+}
+
+#[bench]
+fn bench_insert_char_long(b: &mut Bencher) {
+ let s = "Hello, World!";
+ b.iter(|| {
+ let mut x = String::from(s);
+ black_box(&mut x).insert(6, black_box('❤'));
+ x
+ })
+}
+
+#[bench]
+fn bench_insert_str_short(b: &mut Bencher) {
+ let s = "Hello, World!";
+ b.iter(|| {
+ let mut x = String::from(s);
+ black_box(&mut x).insert_str(6, black_box(" "));
+ x
+ })
+}
+
+#[bench]
+fn bench_insert_str_long(b: &mut Bencher) {
+ let s = "Hello, World!";
+ b.iter(|| {
+ let mut x = String::from(s);
+ black_box(&mut x).insert_str(6, black_box(" rustic "));
+ x
+ })
+}
diff --git a/library/alloc/benches/vec.rs b/library/alloc/benches/vec.rs
new file mode 100644
index 000000000..663f6b9dd
--- /dev/null
+++ b/library/alloc/benches/vec.rs
@@ -0,0 +1,784 @@
+use rand::RngCore;
+use std::iter::{repeat, FromIterator};
+use test::{black_box, Bencher};
+
+#[bench]
+fn bench_new(b: &mut Bencher) {
+ b.iter(|| Vec::<u32>::new())
+}
+
+fn do_bench_with_capacity(b: &mut Bencher, src_len: usize) {
+ b.bytes = src_len as u64;
+
+ b.iter(|| Vec::<u32>::with_capacity(src_len))
+}
+
+#[bench]
+fn bench_with_capacity_0000(b: &mut Bencher) {
+ do_bench_with_capacity(b, 0)
+}
+
+#[bench]
+fn bench_with_capacity_0010(b: &mut Bencher) {
+ do_bench_with_capacity(b, 10)
+}
+
+#[bench]
+fn bench_with_capacity_0100(b: &mut Bencher) {
+ do_bench_with_capacity(b, 100)
+}
+
+#[bench]
+fn bench_with_capacity_1000(b: &mut Bencher) {
+ do_bench_with_capacity(b, 1000)
+}
+
+fn do_bench_from_fn(b: &mut Bencher, src_len: usize) {
+ b.bytes = src_len as u64;
+
+ b.iter(|| (0..src_len).collect::<Vec<_>>())
+}
+
+#[bench]
+fn bench_from_fn_0000(b: &mut Bencher) {
+ do_bench_from_fn(b, 0)
+}
+
+#[bench]
+fn bench_from_fn_0010(b: &mut Bencher) {
+ do_bench_from_fn(b, 10)
+}
+
+#[bench]
+fn bench_from_fn_0100(b: &mut Bencher) {
+ do_bench_from_fn(b, 100)
+}
+
+#[bench]
+fn bench_from_fn_1000(b: &mut Bencher) {
+ do_bench_from_fn(b, 1000)
+}
+
+fn do_bench_from_elem(b: &mut Bencher, src_len: usize) {
+ b.bytes = src_len as u64;
+
+ b.iter(|| repeat(5).take(src_len).collect::<Vec<usize>>())
+}
+
+#[bench]
+fn bench_from_elem_0000(b: &mut Bencher) {
+ do_bench_from_elem(b, 0)
+}
+
+#[bench]
+fn bench_from_elem_0010(b: &mut Bencher) {
+ do_bench_from_elem(b, 10)
+}
+
+#[bench]
+fn bench_from_elem_0100(b: &mut Bencher) {
+ do_bench_from_elem(b, 100)
+}
+
+#[bench]
+fn bench_from_elem_1000(b: &mut Bencher) {
+ do_bench_from_elem(b, 1000)
+}
+
+fn do_bench_from_slice(b: &mut Bencher, src_len: usize) {
+ let src: Vec<_> = FromIterator::from_iter(0..src_len);
+
+ b.bytes = src_len as u64;
+
+ b.iter(|| src.as_slice().to_vec());
+}
+
+#[bench]
+fn bench_from_slice_0000(b: &mut Bencher) {
+ do_bench_from_slice(b, 0)
+}
+
+#[bench]
+fn bench_from_slice_0010(b: &mut Bencher) {
+ do_bench_from_slice(b, 10)
+}
+
+#[bench]
+fn bench_from_slice_0100(b: &mut Bencher) {
+ do_bench_from_slice(b, 100)
+}
+
+#[bench]
+fn bench_from_slice_1000(b: &mut Bencher) {
+ do_bench_from_slice(b, 1000)
+}
+
+fn do_bench_from_iter(b: &mut Bencher, src_len: usize) {
+ let src: Vec<_> = FromIterator::from_iter(0..src_len);
+
+ b.bytes = src_len as u64;
+
+ b.iter(|| {
+ let dst: Vec<_> = FromIterator::from_iter(src.iter().cloned());
+ dst
+ });
+}
+
+#[bench]
+fn bench_from_iter_0000(b: &mut Bencher) {
+ do_bench_from_iter(b, 0)
+}
+
+#[bench]
+fn bench_from_iter_0010(b: &mut Bencher) {
+ do_bench_from_iter(b, 10)
+}
+
+#[bench]
+fn bench_from_iter_0100(b: &mut Bencher) {
+ do_bench_from_iter(b, 100)
+}
+
+#[bench]
+fn bench_from_iter_1000(b: &mut Bencher) {
+ do_bench_from_iter(b, 1000)
+}
+
+fn do_bench_extend(b: &mut Bencher, dst_len: usize, src_len: usize) {
+ let dst: Vec<_> = FromIterator::from_iter(0..dst_len);
+ let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
+
+ b.bytes = src_len as u64;
+
+ b.iter(|| {
+ let mut dst = dst.clone();
+ dst.extend(src.clone());
+ dst
+ });
+}
+
+#[bench]
+fn bench_extend_0000_0000(b: &mut Bencher) {
+ do_bench_extend(b, 0, 0)
+}
+
+#[bench]
+fn bench_extend_0000_0010(b: &mut Bencher) {
+ do_bench_extend(b, 0, 10)
+}
+
+#[bench]
+fn bench_extend_0000_0100(b: &mut Bencher) {
+ do_bench_extend(b, 0, 100)
+}
+
+#[bench]
+fn bench_extend_0000_1000(b: &mut Bencher) {
+ do_bench_extend(b, 0, 1000)
+}
+
+#[bench]
+fn bench_extend_0010_0010(b: &mut Bencher) {
+ do_bench_extend(b, 10, 10)
+}
+
+#[bench]
+fn bench_extend_0100_0100(b: &mut Bencher) {
+ do_bench_extend(b, 100, 100)
+}
+
+#[bench]
+fn bench_extend_1000_1000(b: &mut Bencher) {
+ do_bench_extend(b, 1000, 1000)
+}
+
+fn do_bench_extend_from_slice(b: &mut Bencher, dst_len: usize, src_len: usize) {
+ let dst: Vec<_> = FromIterator::from_iter(0..dst_len);
+ let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
+
+ b.bytes = src_len as u64;
+
+ b.iter(|| {
+ let mut dst = dst.clone();
+ dst.extend_from_slice(&src);
+ dst
+ });
+}
+
+#[bench]
+fn bench_extend_recycle(b: &mut Bencher) {
+ let mut data = vec![0; 1000];
+
+ b.iter(|| {
+ let tmp = std::mem::take(&mut data);
+ let mut to_extend = black_box(Vec::new());
+ to_extend.extend(tmp.into_iter());
+ data = black_box(to_extend);
+ });
+
+ black_box(data);
+}
+
+#[bench]
+fn bench_extend_from_slice_0000_0000(b: &mut Bencher) {
+ do_bench_extend_from_slice(b, 0, 0)
+}
+
+#[bench]
+fn bench_extend_from_slice_0000_0010(b: &mut Bencher) {
+ do_bench_extend_from_slice(b, 0, 10)
+}
+
+#[bench]
+fn bench_extend_from_slice_0000_0100(b: &mut Bencher) {
+ do_bench_extend_from_slice(b, 0, 100)
+}
+
+#[bench]
+fn bench_extend_from_slice_0000_1000(b: &mut Bencher) {
+ do_bench_extend_from_slice(b, 0, 1000)
+}
+
+#[bench]
+fn bench_extend_from_slice_0010_0010(b: &mut Bencher) {
+ do_bench_extend_from_slice(b, 10, 10)
+}
+
+#[bench]
+fn bench_extend_from_slice_0100_0100(b: &mut Bencher) {
+ do_bench_extend_from_slice(b, 100, 100)
+}
+
+#[bench]
+fn bench_extend_from_slice_1000_1000(b: &mut Bencher) {
+ do_bench_extend_from_slice(b, 1000, 1000)
+}
+
+fn do_bench_clone(b: &mut Bencher, src_len: usize) {
+ let src: Vec<usize> = FromIterator::from_iter(0..src_len);
+
+ b.bytes = src_len as u64;
+
+ b.iter(|| src.clone());
+}
+
+#[bench]
+fn bench_clone_0000(b: &mut Bencher) {
+ do_bench_clone(b, 0)
+}
+
+#[bench]
+fn bench_clone_0010(b: &mut Bencher) {
+ do_bench_clone(b, 10)
+}
+
+#[bench]
+fn bench_clone_0100(b: &mut Bencher) {
+ do_bench_clone(b, 100)
+}
+
+#[bench]
+fn bench_clone_1000(b: &mut Bencher) {
+ do_bench_clone(b, 1000)
+}
+
+fn do_bench_clone_from(b: &mut Bencher, times: usize, dst_len: usize, src_len: usize) {
+ let dst: Vec<_> = FromIterator::from_iter(0..src_len);
+ let src: Vec<_> = FromIterator::from_iter(dst_len..dst_len + src_len);
+
+ b.bytes = (times * src_len) as u64;
+
+ b.iter(|| {
+ let mut dst = dst.clone();
+
+ for _ in 0..times {
+ dst.clone_from(&src);
+ dst = black_box(dst);
+ }
+ dst
+ });
+}
+
+#[bench]
+fn bench_clone_from_01_0000_0000(b: &mut Bencher) {
+ do_bench_clone_from(b, 1, 0, 0)
+}
+
+#[bench]
+fn bench_clone_from_01_0000_0010(b: &mut Bencher) {
+ do_bench_clone_from(b, 1, 0, 10)
+}
+
+#[bench]
+fn bench_clone_from_01_0000_0100(b: &mut Bencher) {
+ do_bench_clone_from(b, 1, 0, 100)
+}
+
+#[bench]
+fn bench_clone_from_01_0000_1000(b: &mut Bencher) {
+ do_bench_clone_from(b, 1, 0, 1000)
+}
+
+#[bench]
+fn bench_clone_from_01_0010_0010(b: &mut Bencher) {
+ do_bench_clone_from(b, 1, 10, 10)
+}
+
+#[bench]
+fn bench_clone_from_01_0100_0100(b: &mut Bencher) {
+ do_bench_clone_from(b, 1, 100, 100)
+}
+
+#[bench]
+fn bench_clone_from_01_1000_1000(b: &mut Bencher) {
+ do_bench_clone_from(b, 1, 1000, 1000)
+}
+
+#[bench]
+fn bench_clone_from_01_0010_0100(b: &mut Bencher) {
+ do_bench_clone_from(b, 1, 10, 100)
+}
+
+#[bench]
+fn bench_clone_from_01_0100_1000(b: &mut Bencher) {
+ do_bench_clone_from(b, 1, 100, 1000)
+}
+
+#[bench]
+fn bench_clone_from_01_0010_0000(b: &mut Bencher) {
+ do_bench_clone_from(b, 1, 10, 0)
+}
+
+#[bench]
+fn bench_clone_from_01_0100_0010(b: &mut Bencher) {
+ do_bench_clone_from(b, 1, 100, 10)
+}
+
+#[bench]
+fn bench_clone_from_01_1000_0100(b: &mut Bencher) {
+ do_bench_clone_from(b, 1, 1000, 100)
+}
+
+#[bench]
+fn bench_clone_from_10_0000_0000(b: &mut Bencher) {
+ do_bench_clone_from(b, 10, 0, 0)
+}
+
+#[bench]
+fn bench_clone_from_10_0000_0010(b: &mut Bencher) {
+ do_bench_clone_from(b, 10, 0, 10)
+}
+
+#[bench]
+fn bench_clone_from_10_0000_0100(b: &mut Bencher) {
+ do_bench_clone_from(b, 10, 0, 100)
+}
+
+#[bench]
+fn bench_clone_from_10_0000_1000(b: &mut Bencher) {
+ do_bench_clone_from(b, 10, 0, 1000)
+}
+
+#[bench]
+fn bench_clone_from_10_0010_0010(b: &mut Bencher) {
+ do_bench_clone_from(b, 10, 10, 10)
+}
+
+#[bench]
+fn bench_clone_from_10_0100_0100(b: &mut Bencher) {
+ do_bench_clone_from(b, 10, 100, 100)
+}
+
+#[bench]
+fn bench_clone_from_10_1000_1000(b: &mut Bencher) {
+ do_bench_clone_from(b, 10, 1000, 1000)
+}
+
+#[bench]
+fn bench_clone_from_10_0010_0100(b: &mut Bencher) {
+ do_bench_clone_from(b, 10, 10, 100)
+}
+
+#[bench]
+fn bench_clone_from_10_0100_1000(b: &mut Bencher) {
+ do_bench_clone_from(b, 10, 100, 1000)
+}
+
+#[bench]
+fn bench_clone_from_10_0010_0000(b: &mut Bencher) {
+ do_bench_clone_from(b, 10, 10, 0)
+}
+
+#[bench]
+fn bench_clone_from_10_0100_0010(b: &mut Bencher) {
+ do_bench_clone_from(b, 10, 100, 10)
+}
+
+#[bench]
+fn bench_clone_from_10_1000_0100(b: &mut Bencher) {
+ do_bench_clone_from(b, 10, 1000, 100)
+}
+
+macro_rules! bench_in_place {
+ ($($fname:ident, $type:ty, $count:expr, $init:expr);*) => {
+ $(
+ #[bench]
+ fn $fname(b: &mut Bencher) {
+ b.iter(|| {
+ let src: Vec<$type> = black_box(vec![$init; $count]);
+ src.into_iter()
+ .enumerate()
+ .map(|(idx, e)| idx as $type ^ e)
+ .collect::<Vec<$type>>()
+ });
+ }
+ )+
+ };
+}
+
+bench_in_place![
+ bench_in_place_xxu8_0010_i0, u8, 10, 0;
+ bench_in_place_xxu8_0100_i0, u8, 100, 0;
+ bench_in_place_xxu8_1000_i0, u8, 1000, 0;
+ bench_in_place_xxu8_0010_i1, u8, 10, 1;
+ bench_in_place_xxu8_0100_i1, u8, 100, 1;
+ bench_in_place_xxu8_1000_i1, u8, 1000, 1;
+ bench_in_place_xu32_0010_i0, u32, 10, 0;
+ bench_in_place_xu32_0100_i0, u32, 100, 0;
+ bench_in_place_xu32_1000_i0, u32, 1000, 0;
+ bench_in_place_xu32_0010_i1, u32, 10, 1;
+ bench_in_place_xu32_0100_i1, u32, 100, 1;
+ bench_in_place_xu32_1000_i1, u32, 1000, 1;
+ bench_in_place_u128_0010_i0, u128, 10, 0;
+ bench_in_place_u128_0100_i0, u128, 100, 0;
+ bench_in_place_u128_1000_i0, u128, 1000, 0;
+ bench_in_place_u128_0010_i1, u128, 10, 1;
+ bench_in_place_u128_0100_i1, u128, 100, 1;
+ bench_in_place_u128_1000_i1, u128, 1000, 1
+];
+
+#[bench]
+fn bench_in_place_recycle(b: &mut Bencher) {
+ let mut data = vec![0; 1000];
+
+ b.iter(|| {
+ let tmp = std::mem::take(&mut data);
+ data = black_box(
+ tmp.into_iter()
+ .enumerate()
+ .map(|(idx, e)| idx.wrapping_add(e))
+ .fuse()
+ .collect::<Vec<usize>>(),
+ );
+ });
+}
+
+#[bench]
+fn bench_in_place_zip_recycle(b: &mut Bencher) {
+ let mut data = vec![0u8; 1000];
+ let mut rng = crate::bench_rng();
+ let mut subst = vec![0u8; 1000];
+ rng.fill_bytes(&mut subst[..]);
+
+ b.iter(|| {
+ let tmp = std::mem::take(&mut data);
+ let mangled = tmp
+ .into_iter()
+ .zip(subst.iter().copied())
+ .enumerate()
+ .map(|(i, (d, s))| d.wrapping_add(i as u8) ^ s)
+ .collect::<Vec<_>>();
+ data = black_box(mangled);
+ });
+}
+
+#[bench]
+fn bench_in_place_zip_iter_mut(b: &mut Bencher) {
+ let mut data = vec![0u8; 256];
+ let mut rng = crate::bench_rng();
+ let mut subst = vec![0u8; 1000];
+ rng.fill_bytes(&mut subst[..]);
+
+ b.iter(|| {
+ data.iter_mut().enumerate().for_each(|(i, d)| {
+ *d = d.wrapping_add(i as u8) ^ subst[i];
+ });
+ });
+
+ black_box(data);
+}
+
+pub fn vec_cast<T, U>(input: Vec<T>) -> Vec<U> {
+ input.into_iter().map(|e| unsafe { std::mem::transmute_copy(&e) }).collect()
+}
+
+#[bench]
+fn bench_transmute(b: &mut Bencher) {
+ let mut vec = vec![10u32; 100];
+ b.bytes = 800; // 2 casts x 4 bytes x 100
+ b.iter(|| {
+ let v = std::mem::take(&mut vec);
+ let v = black_box(vec_cast::<u32, i32>(v));
+ let v = black_box(vec_cast::<i32, u32>(v));
+ vec = v;
+ });
+}
+
+#[derive(Clone)]
+struct Droppable(usize);
+
+impl Drop for Droppable {
+ fn drop(&mut self) {
+ black_box(self);
+ }
+}
+
+#[bench]
+fn bench_in_place_collect_droppable(b: &mut Bencher) {
+ let v: Vec<Droppable> = std::iter::repeat_with(|| Droppable(0)).take(1000).collect();
+ b.iter(|| {
+ v.clone()
+ .into_iter()
+ .skip(100)
+ .enumerate()
+ .map(|(i, e)| Droppable(i ^ e.0))
+ .collect::<Vec<_>>()
+ })
+}
+
+const LEN: usize = 16384;
+
+#[bench]
+fn bench_chain_collect(b: &mut Bencher) {
+ let data = black_box([0; LEN]);
+ b.iter(|| data.iter().cloned().chain([1]).collect::<Vec<_>>());
+}
+
+#[bench]
+fn bench_chain_chain_collect(b: &mut Bencher) {
+ let data = black_box([0; LEN]);
+ b.iter(|| data.iter().cloned().chain([1]).chain([2]).collect::<Vec<_>>());
+}
+
+#[bench]
+fn bench_nest_chain_chain_collect(b: &mut Bencher) {
+ let data = black_box([0; LEN]);
+ b.iter(|| {
+ data.iter().cloned().chain([1].iter().chain([2].iter()).cloned()).collect::<Vec<_>>()
+ });
+}
+
+#[bench]
+fn bench_range_map_collect(b: &mut Bencher) {
+ b.iter(|| (0..LEN).map(|_| u32::default()).collect::<Vec<_>>());
+}
+
+#[bench]
+fn bench_chain_extend_ref(b: &mut Bencher) {
+ let data = black_box([0; LEN]);
+ b.iter(|| {
+ let mut v = Vec::<u32>::with_capacity(data.len() + 1);
+ v.extend(data.iter().chain([1].iter()));
+ v
+ });
+}
+
+#[bench]
+fn bench_chain_extend_value(b: &mut Bencher) {
+ let data = black_box([0; LEN]);
+ b.iter(|| {
+ let mut v = Vec::<u32>::with_capacity(data.len() + 1);
+ v.extend(data.iter().cloned().chain(Some(1)));
+ v
+ });
+}
+
+#[bench]
+fn bench_rev_1(b: &mut Bencher) {
+ let data = black_box([0; LEN]);
+ b.iter(|| {
+ let mut v = Vec::<u32>::new();
+ v.extend(data.iter().rev());
+ v
+ });
+}
+
+#[bench]
+fn bench_rev_2(b: &mut Bencher) {
+ let data = black_box([0; LEN]);
+ b.iter(|| {
+ let mut v = Vec::<u32>::with_capacity(data.len());
+ v.extend(data.iter().rev());
+ v
+ });
+}
+
+#[bench]
+fn bench_map_regular(b: &mut Bencher) {
+ let data = black_box([(0, 0); LEN]);
+ b.iter(|| {
+ let mut v = Vec::<u32>::new();
+ v.extend(data.iter().map(|t| t.1));
+ v
+ });
+}
+
+#[bench]
+fn bench_map_fast(b: &mut Bencher) {
+ let data = black_box([(0, 0); LEN]);
+ b.iter(|| {
+ let mut result: Vec<u32> = Vec::with_capacity(data.len());
+ for i in 0..data.len() {
+ unsafe {
+ *result.as_mut_ptr().add(i) = data[i].0;
+ result.set_len(i);
+ }
+ }
+ result
+ });
+}
+
+fn random_sorted_fill(mut seed: u32, buf: &mut [u32]) {
+ let mask = if buf.len() < 8192 {
+ 0xFF
+ } else if buf.len() < 200_000 {
+ 0xFFFF
+ } else {
+ 0xFFFF_FFFF
+ };
+
+ for item in buf.iter_mut() {
+ seed ^= seed << 13;
+ seed ^= seed >> 17;
+ seed ^= seed << 5;
+
+ *item = seed & mask;
+ }
+
+ buf.sort();
+}
+
+fn bench_vec_dedup_old(b: &mut Bencher, sz: usize) {
+ let mut template = vec![0u32; sz];
+ b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
+ random_sorted_fill(0x43, &mut template);
+
+ let mut vec = template.clone();
+ b.iter(|| {
+ let len = {
+ let (dedup, _) = vec.partition_dedup();
+ dedup.len()
+ };
+ vec.truncate(len);
+
+ black_box(vec.first());
+ vec.clear();
+ vec.extend_from_slice(&template);
+ });
+}
+
+fn bench_vec_dedup_new(b: &mut Bencher, sz: usize) {
+ let mut template = vec![0u32; sz];
+ b.bytes = std::mem::size_of_val(template.as_slice()) as u64;
+ random_sorted_fill(0x43, &mut template);
+
+ let mut vec = template.clone();
+ b.iter(|| {
+ vec.dedup();
+ black_box(vec.first());
+ vec.clear();
+ vec.extend_from_slice(&template);
+ });
+}
+
+#[bench]
+fn bench_dedup_old_100(b: &mut Bencher) {
+ bench_vec_dedup_old(b, 100);
+}
+#[bench]
+fn bench_dedup_new_100(b: &mut Bencher) {
+ bench_vec_dedup_new(b, 100);
+}
+
+#[bench]
+fn bench_dedup_old_1000(b: &mut Bencher) {
+ bench_vec_dedup_old(b, 1000);
+}
+#[bench]
+fn bench_dedup_new_1000(b: &mut Bencher) {
+ bench_vec_dedup_new(b, 1000);
+}
+
+#[bench]
+fn bench_dedup_old_10000(b: &mut Bencher) {
+ bench_vec_dedup_old(b, 10000);
+}
+#[bench]
+fn bench_dedup_new_10000(b: &mut Bencher) {
+ bench_vec_dedup_new(b, 10000);
+}
+
+#[bench]
+fn bench_dedup_old_100000(b: &mut Bencher) {
+ bench_vec_dedup_old(b, 100000);
+}
+#[bench]
+fn bench_dedup_new_100000(b: &mut Bencher) {
+ bench_vec_dedup_new(b, 100000);
+}
+
+#[bench]
+fn bench_flat_map_collect(b: &mut Bencher) {
+ let v = vec![777u32; 500000];
+ b.iter(|| v.iter().flat_map(|color| color.rotate_left(8).to_be_bytes()).collect::<Vec<_>>());
+}
+
+/// Reference benchmark that `retain` has to compete with.
+#[bench]
+fn bench_retain_iter_100000(b: &mut Bencher) {
+ let mut v = Vec::with_capacity(100000);
+
+ b.iter(|| {
+ let mut tmp = std::mem::take(&mut v);
+ tmp.clear();
+ tmp.extend(black_box(1..=100000));
+ v = tmp.into_iter().filter(|x| x & 1 == 0).collect();
+ });
+}
+
+#[bench]
+fn bench_retain_100000(b: &mut Bencher) {
+ let mut v = Vec::with_capacity(100000);
+
+ b.iter(|| {
+ v.clear();
+ v.extend(black_box(1..=100000));
+ v.retain(|x| x & 1 == 0)
+ });
+}
+
+#[bench]
+fn bench_retain_whole_100000(b: &mut Bencher) {
+ let mut v = black_box(vec![826u32; 100000]);
+ b.iter(|| v.retain(|x| *x == 826u32));
+}
+
+#[bench]
+fn bench_next_chunk(b: &mut Bencher) {
+ let v = vec![13u8; 2048];
+
+ b.iter(|| {
+ const CHUNK: usize = 8;
+
+ let mut sum = [0u32; CHUNK];
+ let mut iter = black_box(v.clone()).into_iter();
+
+ while let Ok(chunk) = iter.next_chunk::<CHUNK>() {
+ for i in 0..CHUNK {
+ sum[i] += chunk[i] as u32;
+ }
+ }
+
+ sum
+ })
+}
diff --git a/library/alloc/benches/vec_deque.rs b/library/alloc/benches/vec_deque.rs
new file mode 100644
index 000000000..7c78561eb
--- /dev/null
+++ b/library/alloc/benches/vec_deque.rs
@@ -0,0 +1,125 @@
+use std::collections::VecDeque;
+use test::{black_box, Bencher};
+
+#[bench]
+fn bench_new(b: &mut Bencher) {
+ b.iter(|| {
+ let ring: VecDeque<i32> = VecDeque::new();
+ black_box(ring);
+ })
+}
+
+#[bench]
+fn bench_grow_1025(b: &mut Bencher) {
+ b.iter(|| {
+ let mut deq = VecDeque::new();
+ for i in 0..1025 {
+ deq.push_front(i);
+ }
+ black_box(deq);
+ })
+}
+
+#[bench]
+fn bench_iter_1000(b: &mut Bencher) {
+ let ring: VecDeque<_> = (0..1000).collect();
+
+ b.iter(|| {
+ let mut sum = 0;
+ for &i in &ring {
+ sum += i;
+ }
+ black_box(sum);
+ })
+}
+
+#[bench]
+fn bench_mut_iter_1000(b: &mut Bencher) {
+ let mut ring: VecDeque<_> = (0..1000).collect();
+
+ b.iter(|| {
+ let mut sum = 0;
+ for i in &mut ring {
+ sum += *i;
+ }
+ black_box(sum);
+ })
+}
+
+#[bench]
+fn bench_try_fold(b: &mut Bencher) {
+ let ring: VecDeque<_> = (0..1000).collect();
+
+ b.iter(|| black_box(ring.iter().try_fold(0, |a, b| Some(a + b))))
+}
+
+#[bench]
+fn bench_from_array_1000(b: &mut Bencher) {
+ const N: usize = 1000;
+ let mut array: [usize; N] = [0; N];
+
+ for i in 0..N {
+ array[i] = i;
+ }
+
+ b.iter(|| {
+ let deq: VecDeque<_> = array.into();
+ black_box(deq);
+ })
+}
+
+#[bench]
+fn bench_extend_bytes(b: &mut Bencher) {
+ let mut ring: VecDeque<u8> = VecDeque::with_capacity(1000);
+ let input: &[u8] = &[128; 512];
+
+ b.iter(|| {
+ ring.clear();
+ ring.extend(black_box(input));
+ });
+}
+
+#[bench]
+fn bench_extend_vec(b: &mut Bencher) {
+ let mut ring: VecDeque<u8> = VecDeque::with_capacity(1000);
+ let input = vec![128; 512];
+
+ b.iter(|| {
+ ring.clear();
+
+ let input = input.clone();
+ ring.extend(black_box(input));
+ });
+}
+
+#[bench]
+fn bench_extend_trustedlen(b: &mut Bencher) {
+ let mut ring: VecDeque<u16> = VecDeque::with_capacity(1000);
+
+ b.iter(|| {
+ ring.clear();
+ ring.extend(black_box(0..512));
+ });
+}
+
+#[bench]
+fn bench_extend_chained_trustedlen(b: &mut Bencher) {
+ let mut ring: VecDeque<u16> = VecDeque::with_capacity(1000);
+
+ b.iter(|| {
+ ring.clear();
+ ring.extend(black_box((0..256).chain(768..1024)));
+ });
+}
+
+#[bench]
+fn bench_extend_chained_bytes(b: &mut Bencher) {
+ let mut ring: VecDeque<u16> = VecDeque::with_capacity(1000);
+ let input1: &[u16] = &[128; 256];
+ let input2: &[u16] = &[255; 256];
+
+ b.iter(|| {
+ ring.clear();
+ ring.extend(black_box(input1.iter().chain(input2.iter())));
+ });
+}
diff --git a/library/alloc/benches/vec_deque_append.rs b/library/alloc/benches/vec_deque_append.rs
new file mode 100644
index 000000000..5825bdc35
--- /dev/null
+++ b/library/alloc/benches/vec_deque_append.rs
@@ -0,0 +1,34 @@
+use std::{collections::VecDeque, time::Instant};
+
+const VECDEQUE_LEN: i32 = 100000;
+const WARMUP_N: usize = 100;
+const BENCH_N: usize = 1000;
+
+fn main() {
+ let a: VecDeque<i32> = (0..VECDEQUE_LEN).collect();
+ let b: VecDeque<i32> = (0..VECDEQUE_LEN).collect();
+
+ for _ in 0..WARMUP_N {
+ let mut c = a.clone();
+ let mut d = b.clone();
+ c.append(&mut d);
+ }
+
+ let mut durations = Vec::with_capacity(BENCH_N);
+
+ for _ in 0..BENCH_N {
+ let mut c = a.clone();
+ let mut d = b.clone();
+ let before = Instant::now();
+ c.append(&mut d);
+ let after = Instant::now();
+ durations.push(after.duration_since(before));
+ }
+
+ let l = durations.len();
+ durations.sort();
+
+ assert!(BENCH_N % 2 == 0);
+ let median = (durations[(l / 2) - 1] + durations[l / 2]) / 2;
+ println!("\ncustom-bench vec_deque_append {:?} ns/iter\n", median.as_nanos());
+}
diff --git a/library/alloc/src/alloc.rs b/library/alloc/src/alloc.rs
new file mode 100644
index 000000000..cc8da7bcc
--- /dev/null
+++ b/library/alloc/src/alloc.rs
@@ -0,0 +1,444 @@
+//! Memory allocation APIs
+
+#![stable(feature = "alloc_module", since = "1.28.0")]
+
+#[cfg(not(test))]
+use core::intrinsics;
+use core::intrinsics::{min_align_of_val, size_of_val};
+
+use core::ptr::Unique;
+#[cfg(not(test))]
+use core::ptr::{self, NonNull};
+
+#[stable(feature = "alloc_module", since = "1.28.0")]
+#[doc(inline)]
+pub use core::alloc::*;
+
+use core::marker::Destruct;
+
+#[cfg(test)]
+mod tests;
+
+extern "Rust" {
+ // These are the magic symbols to call the global allocator. rustc generates
+ // them to call `__rg_alloc` etc. if there is a `#[global_allocator]` attribute
+ // (the code expanding that attribute macro generates those functions), or to call
+ // the default implementations in libstd (`__rdl_alloc` etc. in `library/std/src/alloc.rs`)
+ // otherwise.
+ // The rustc fork of LLVM 14 and earlier also special-cases these function names to be able to optimize them
+ // like `malloc`, `realloc`, and `free`, respectively.
+ #[rustc_allocator]
+ #[rustc_allocator_nounwind]
+ fn __rust_alloc(size: usize, align: usize) -> *mut u8;
+ #[cfg_attr(not(bootstrap), rustc_deallocator)]
+ #[rustc_allocator_nounwind]
+ fn __rust_dealloc(ptr: *mut u8, size: usize, align: usize);
+ #[cfg_attr(not(bootstrap), rustc_reallocator)]
+ #[rustc_allocator_nounwind]
+ fn __rust_realloc(ptr: *mut u8, old_size: usize, align: usize, new_size: usize) -> *mut u8;
+ #[cfg_attr(not(bootstrap), rustc_allocator_zeroed)]
+ #[rustc_allocator_nounwind]
+ fn __rust_alloc_zeroed(size: usize, align: usize) -> *mut u8;
+}
+
+/// The global memory allocator.
+///
+/// This type implements the [`Allocator`] trait by forwarding calls
+/// to the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// Note: while this type is unstable, the functionality it provides can be
+/// accessed through the [free functions in `alloc`](self#functions).
+#[unstable(feature = "allocator_api", issue = "32838")]
+#[derive(Copy, Clone, Default, Debug)]
+#[cfg(not(test))]
+pub struct Global;
+
+#[cfg(test)]
+pub use std::alloc::Global;
+
+/// Allocate memory with the global allocator.
+///
+/// This function forwards calls to the [`GlobalAlloc::alloc`] method
+/// of the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// This function is expected to be deprecated in favor of the `alloc` method
+/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
+///
+/// # Safety
+///
+/// See [`GlobalAlloc::alloc`].
+///
+/// # Examples
+///
+/// ```
+/// use std::alloc::{alloc, dealloc, handle_alloc_error, Layout};
+///
+/// unsafe {
+/// let layout = Layout::new::<u16>();
+/// let ptr = alloc(layout);
+/// if ptr.is_null() {
+/// handle_alloc_error(layout);
+/// }
+///
+/// *(ptr as *mut u16) = 42;
+/// assert_eq!(*(ptr as *mut u16), 42);
+///
+/// dealloc(ptr, layout);
+/// }
+/// ```
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[must_use = "losing the pointer will leak memory"]
+#[inline]
+pub unsafe fn alloc(layout: Layout) -> *mut u8 {
+ unsafe { __rust_alloc(layout.size(), layout.align()) }
+}
+
+/// Deallocate memory with the global allocator.
+///
+/// This function forwards calls to the [`GlobalAlloc::dealloc`] method
+/// of the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// This function is expected to be deprecated in favor of the `dealloc` method
+/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
+///
+/// # Safety
+///
+/// See [`GlobalAlloc::dealloc`].
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[inline]
+pub unsafe fn dealloc(ptr: *mut u8, layout: Layout) {
+ unsafe { __rust_dealloc(ptr, layout.size(), layout.align()) }
+}
+
+/// Reallocate memory with the global allocator.
+///
+/// This function forwards calls to the [`GlobalAlloc::realloc`] method
+/// of the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// This function is expected to be deprecated in favor of the `realloc` method
+/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
+///
+/// # Safety
+///
+/// See [`GlobalAlloc::realloc`].
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[must_use = "losing the pointer will leak memory"]
+#[inline]
+pub unsafe fn realloc(ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
+ unsafe { __rust_realloc(ptr, layout.size(), layout.align(), new_size) }
+}
+
+/// Allocate zero-initialized memory with the global allocator.
+///
+/// This function forwards calls to the [`GlobalAlloc::alloc_zeroed`] method
+/// of the allocator registered with the `#[global_allocator]` attribute
+/// if there is one, or the `std` crate’s default.
+///
+/// This function is expected to be deprecated in favor of the `alloc_zeroed` method
+/// of the [`Global`] type when it and the [`Allocator`] trait become stable.
+///
+/// # Safety
+///
+/// See [`GlobalAlloc::alloc_zeroed`].
+///
+/// # Examples
+///
+/// ```
+/// use std::alloc::{alloc_zeroed, dealloc, Layout};
+///
+/// unsafe {
+/// let layout = Layout::new::<u16>();
+/// let ptr = alloc_zeroed(layout);
+///
+/// assert_eq!(*(ptr as *mut u16), 0);
+///
+/// dealloc(ptr, layout);
+/// }
+/// ```
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[must_use = "losing the pointer will leak memory"]
+#[inline]
+pub unsafe fn alloc_zeroed(layout: Layout) -> *mut u8 {
+ unsafe { __rust_alloc_zeroed(layout.size(), layout.align()) }
+}
+
+#[cfg(not(test))]
+impl Global {
+ #[inline]
+ fn alloc_impl(&self, layout: Layout, zeroed: bool) -> Result<NonNull<[u8]>, AllocError> {
+ match layout.size() {
+ 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)),
+ // SAFETY: `layout` is non-zero in size,
+ size => unsafe {
+ let raw_ptr = if zeroed { alloc_zeroed(layout) } else { alloc(layout) };
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ Ok(NonNull::slice_from_raw_parts(ptr, size))
+ },
+ }
+ }
+
+ // SAFETY: Same as `Allocator::grow`
+ #[inline]
+ unsafe fn grow_impl(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ zeroed: bool,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ match old_layout.size() {
+ 0 => self.alloc_impl(new_layout, zeroed),
+
+ // SAFETY: `new_size` is non-zero as `old_size` is greater than or equal to `new_size`
+ // as required by safety conditions. Other conditions must be upheld by the caller
+ old_size if old_layout.align() == new_layout.align() => unsafe {
+ let new_size = new_layout.size();
+
+ // `realloc` probably checks for `new_size >= old_layout.size()` or something similar.
+ intrinsics::assume(new_size >= old_layout.size());
+
+ let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ if zeroed {
+ raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
+ }
+ Ok(NonNull::slice_from_raw_parts(ptr, new_size))
+ },
+
+ // SAFETY: because `new_layout.size()` must be greater than or equal to `old_size`,
+ // both the old and new memory allocation are valid for reads and writes for `old_size`
+ // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
+ // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
+ // for `dealloc` must be upheld by the caller.
+ old_size => unsafe {
+ let new_ptr = self.alloc_impl(new_layout, zeroed)?;
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), old_size);
+ self.deallocate(ptr, old_layout);
+ Ok(new_ptr)
+ },
+ }
+ }
+}
+
+#[unstable(feature = "allocator_api", issue = "32838")]
+#[cfg(not(test))]
+unsafe impl Allocator for Global {
+ #[inline]
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ self.alloc_impl(layout, false)
+ }
+
+ #[inline]
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ self.alloc_impl(layout, true)
+ }
+
+ #[inline]
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ if layout.size() != 0 {
+ // SAFETY: `layout` is non-zero in size,
+ // other conditions must be upheld by the caller
+ unsafe { dealloc(ptr.as_ptr(), layout) }
+ }
+ }
+
+ #[inline]
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: all conditions must be upheld by the caller
+ unsafe { self.grow_impl(ptr, old_layout, new_layout, false) }
+ }
+
+ #[inline]
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ // SAFETY: all conditions must be upheld by the caller
+ unsafe { self.grow_impl(ptr, old_layout, new_layout, true) }
+ }
+
+ #[inline]
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() <= old_layout.size(),
+ "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
+ );
+
+ match new_layout.size() {
+ // SAFETY: conditions must be upheld by the caller
+ 0 => unsafe {
+ self.deallocate(ptr, old_layout);
+ Ok(NonNull::slice_from_raw_parts(new_layout.dangling(), 0))
+ },
+
+ // SAFETY: `new_size` is non-zero. Other conditions must be upheld by the caller
+ new_size if old_layout.align() == new_layout.align() => unsafe {
+ // `realloc` probably checks for `new_size <= old_layout.size()` or something similar.
+ intrinsics::assume(new_size <= old_layout.size());
+
+ let raw_ptr = realloc(ptr.as_ptr(), old_layout, new_size);
+ let ptr = NonNull::new(raw_ptr).ok_or(AllocError)?;
+ Ok(NonNull::slice_from_raw_parts(ptr, new_size))
+ },
+
+ // SAFETY: because `new_size` must be smaller than or equal to `old_layout.size()`,
+ // both the old and new memory allocation are valid for reads and writes for `new_size`
+ // bytes. Also, because the old allocation wasn't yet deallocated, it cannot overlap
+ // `new_ptr`. Thus, the call to `copy_nonoverlapping` is safe. The safety contract
+ // for `dealloc` must be upheld by the caller.
+ new_size => unsafe {
+ let new_ptr = self.allocate(new_layout)?;
+ ptr::copy_nonoverlapping(ptr.as_ptr(), new_ptr.as_mut_ptr(), new_size);
+ self.deallocate(ptr, old_layout);
+ Ok(new_ptr)
+ },
+ }
+ }
+}
+
+/// The allocator for unique pointers.
+#[cfg(all(not(no_global_oom_handling), not(test)))]
+#[lang = "exchange_malloc"]
+#[inline]
+unsafe fn exchange_malloc(size: usize, align: usize) -> *mut u8 {
+ let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
+ match Global.allocate(layout) {
+ Ok(ptr) => ptr.as_mut_ptr(),
+ Err(_) => handle_alloc_error(layout),
+ }
+}
+
+#[cfg_attr(not(test), lang = "box_free")]
+#[inline]
+#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+// This signature has to be the same as `Box`, otherwise an ICE will happen.
+// When an additional parameter to `Box` is added (like `A: Allocator`), this has to be added here as
+// well.
+// For example if `Box` is changed to `struct Box<T: ?Sized, A: Allocator>(Unique<T>, A)`,
+// this function has to be changed to `fn box_free<T: ?Sized, A: Allocator>(Unique<T>, A)` as well.
+pub(crate) const unsafe fn box_free<T: ?Sized, A: ~const Allocator + ~const Destruct>(
+ ptr: Unique<T>,
+ alloc: A,
+) {
+ unsafe {
+ let size = size_of_val(ptr.as_ref());
+ let align = min_align_of_val(ptr.as_ref());
+ let layout = Layout::from_size_align_unchecked(size, align);
+ alloc.deallocate(From::from(ptr.cast()), layout)
+ }
+}
+
+// # Allocation error handler
+
+#[cfg(not(no_global_oom_handling))]
+extern "Rust" {
+ // This is the magic symbol to call the global alloc error handler. rustc generates
+ // it to call `__rg_oom` if there is a `#[alloc_error_handler]`, or to call the
+ // default implementations below (`__rdl_oom`) otherwise.
+ fn __rust_alloc_error_handler(size: usize, align: usize) -> !;
+}
+
+/// Abort on memory allocation error or failure.
+///
+/// Callers of memory allocation APIs wishing to abort computation
+/// in response to an allocation error are encouraged to call this function,
+/// rather than directly invoking `panic!` or similar.
+///
+/// The default behavior of this function is to print a message to standard error
+/// and abort the process.
+/// It can be replaced with [`set_alloc_error_hook`] and [`take_alloc_error_hook`].
+///
+/// [`set_alloc_error_hook`]: ../../std/alloc/fn.set_alloc_error_hook.html
+/// [`take_alloc_error_hook`]: ../../std/alloc/fn.take_alloc_error_hook.html
+#[stable(feature = "global_alloc", since = "1.28.0")]
+#[rustc_const_unstable(feature = "const_alloc_error", issue = "92523")]
+#[cfg(all(not(no_global_oom_handling), not(test)))]
+#[cold]
+pub const fn handle_alloc_error(layout: Layout) -> ! {
+ const fn ct_error(_: Layout) -> ! {
+ panic!("allocation failed");
+ }
+
+ fn rt_error(layout: Layout) -> ! {
+ unsafe {
+ __rust_alloc_error_handler(layout.size(), layout.align());
+ }
+ }
+
+ unsafe { core::intrinsics::const_eval_select((layout,), ct_error, rt_error) }
+}
+
+// For alloc test `std::alloc::handle_alloc_error` can be used directly.
+#[cfg(all(not(no_global_oom_handling), test))]
+pub use std::alloc::handle_alloc_error;
+
+#[cfg(all(not(no_global_oom_handling), not(test)))]
+#[doc(hidden)]
+#[allow(unused_attributes)]
+#[unstable(feature = "alloc_internals", issue = "none")]
+pub mod __alloc_error_handler {
+ use crate::alloc::Layout;
+
+ // called via generated `__rust_alloc_error_handler`
+
+ // if there is no `#[alloc_error_handler]`
+ #[rustc_std_internal_symbol]
+ pub unsafe fn __rdl_oom(size: usize, _align: usize) -> ! {
+ panic!("memory allocation of {size} bytes failed")
+ }
+
+ // if there is an `#[alloc_error_handler]`
+ #[rustc_std_internal_symbol]
+ pub unsafe fn __rg_oom(size: usize, align: usize) -> ! {
+ let layout = unsafe { Layout::from_size_align_unchecked(size, align) };
+ extern "Rust" {
+ #[lang = "oom"]
+ fn oom_impl(layout: Layout) -> !;
+ }
+ unsafe { oom_impl(layout) }
+ }
+}
+
+/// Specialize clones into pre-allocated, uninitialized memory.
+/// Used by `Box::clone` and `Rc`/`Arc::make_mut`.
+pub(crate) trait WriteCloneIntoRaw: Sized {
+ unsafe fn write_clone_into_raw(&self, target: *mut Self);
+}
+
+impl<T: Clone> WriteCloneIntoRaw for T {
+ #[inline]
+ default unsafe fn write_clone_into_raw(&self, target: *mut Self) {
+ // Having allocated *first* may allow the optimizer to create
+ // the cloned value in-place, skipping the local and move.
+ unsafe { target.write(self.clone()) };
+ }
+}
+
+impl<T: Copy> WriteCloneIntoRaw for T {
+ #[inline]
+ unsafe fn write_clone_into_raw(&self, target: *mut Self) {
+ // We can always copy in-place, without ever involving a local value.
+ unsafe { target.copy_from_nonoverlapping(self, 1) };
+ }
+}
diff --git a/library/alloc/src/alloc/tests.rs b/library/alloc/src/alloc/tests.rs
new file mode 100644
index 000000000..7d560964d
--- /dev/null
+++ b/library/alloc/src/alloc/tests.rs
@@ -0,0 +1,30 @@
+use super::*;
+
+extern crate test;
+use crate::boxed::Box;
+use test::Bencher;
+
+#[test]
+fn allocate_zeroed() {
+ unsafe {
+ let layout = Layout::from_size_align(1024, 1).unwrap();
+ let ptr =
+ Global.allocate_zeroed(layout.clone()).unwrap_or_else(|_| handle_alloc_error(layout));
+
+ let mut i = ptr.as_non_null_ptr().as_ptr();
+ let end = i.add(layout.size());
+ while i < end {
+ assert_eq!(*i, 0);
+ i = i.offset(1);
+ }
+ Global.deallocate(ptr.as_non_null_ptr(), layout);
+ }
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn alloc_owned_small(b: &mut Bencher) {
+ b.iter(|| {
+ let _: Box<_> = Box::new(10);
+ })
+}
diff --git a/library/alloc/src/borrow.rs b/library/alloc/src/borrow.rs
new file mode 100644
index 000000000..904a53bb4
--- /dev/null
+++ b/library/alloc/src/borrow.rs
@@ -0,0 +1,495 @@
+//! A module for working with borrowed data.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::cmp::Ordering;
+use core::hash::{Hash, Hasher};
+use core::ops::Deref;
+#[cfg(not(no_global_oom_handling))]
+use core::ops::{Add, AddAssign};
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::borrow::{Borrow, BorrowMut};
+
+use crate::fmt;
+#[cfg(not(no_global_oom_handling))]
+use crate::string::String;
+
+use Cow::*;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, B: ?Sized> Borrow<B> for Cow<'a, B>
+where
+ B: ToOwned,
+ <B as ToOwned>::Owned: 'a,
+{
+ fn borrow(&self) -> &B {
+ &**self
+ }
+}
+
+/// A generalization of `Clone` to borrowed data.
+///
+/// Some types make it possible to go from borrowed to owned, usually by
+/// implementing the `Clone` trait. But `Clone` works only for going from `&T`
+/// to `T`. The `ToOwned` trait generalizes `Clone` to construct owned data
+/// from any borrow of a given type.
+#[cfg_attr(not(test), rustc_diagnostic_item = "ToOwned")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait ToOwned {
+ /// The resulting type after obtaining ownership.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ type Owned: Borrow<Self>;
+
+ /// Creates owned data from borrowed data, usually by cloning.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s: &str = "a";
+ /// let ss: String = s.to_owned();
+ ///
+ /// let v: &[i32] = &[1, 2];
+ /// let vv: Vec<i32> = v.to_owned();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use = "cloning is often expensive and is not expected to have side effects"]
+ fn to_owned(&self) -> Self::Owned;
+
+ /// Uses borrowed data to replace owned data, usually by cloning.
+ ///
+ /// This is borrow-generalized version of [`Clone::clone_from`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s: String = String::new();
+ /// "hello".clone_into(&mut s);
+ ///
+ /// let mut v: Vec<i32> = Vec::new();
+ /// [1, 2][..].clone_into(&mut v);
+ /// ```
+ #[stable(feature = "toowned_clone_into", since = "1.63.0")]
+ fn clone_into(&self, target: &mut Self::Owned) {
+ *target = self.to_owned();
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ToOwned for T
+where
+ T: Clone,
+{
+ type Owned = T;
+ fn to_owned(&self) -> T {
+ self.clone()
+ }
+
+ fn clone_into(&self, target: &mut T) {
+ target.clone_from(self);
+ }
+}
+
+/// A clone-on-write smart pointer.
+///
+/// The type `Cow` is a smart pointer providing clone-on-write functionality: it
+/// can enclose and provide immutable access to borrowed data, and clone the
+/// data lazily when mutation or ownership is required. The type is designed to
+/// work with general borrowed data via the `Borrow` trait.
+///
+/// `Cow` implements `Deref`, which means that you can call
+/// non-mutating methods directly on the data it encloses. If mutation
+/// is desired, `to_mut` will obtain a mutable reference to an owned
+/// value, cloning if necessary.
+///
+/// If you need reference-counting pointers, note that
+/// [`Rc::make_mut`][crate::rc::Rc::make_mut] and
+/// [`Arc::make_mut`][crate::sync::Arc::make_mut] can provide clone-on-write
+/// functionality as well.
+///
+/// # Examples
+///
+/// ```
+/// use std::borrow::Cow;
+///
+/// fn abs_all(input: &mut Cow<[i32]>) {
+/// for i in 0..input.len() {
+/// let v = input[i];
+/// if v < 0 {
+/// // Clones into a vector if not already owned.
+/// input.to_mut()[i] = -v;
+/// }
+/// }
+/// }
+///
+/// // No clone occurs because `input` doesn't need to be mutated.
+/// let slice = [0, 1, 2];
+/// let mut input = Cow::from(&slice[..]);
+/// abs_all(&mut input);
+///
+/// // Clone occurs because `input` needs to be mutated.
+/// let slice = [-1, 0, 1];
+/// let mut input = Cow::from(&slice[..]);
+/// abs_all(&mut input);
+///
+/// // No clone occurs because `input` is already owned.
+/// let mut input = Cow::from(vec![-1, 0, 1]);
+/// abs_all(&mut input);
+/// ```
+///
+/// Another example showing how to keep `Cow` in a struct:
+///
+/// ```
+/// use std::borrow::Cow;
+///
+/// struct Items<'a, X: 'a> where [X]: ToOwned<Owned = Vec<X>> {
+/// values: Cow<'a, [X]>,
+/// }
+///
+/// impl<'a, X: Clone + 'a> Items<'a, X> where [X]: ToOwned<Owned = Vec<X>> {
+/// fn new(v: Cow<'a, [X]>) -> Self {
+/// Items { values: v }
+/// }
+/// }
+///
+/// // Creates a container from borrowed values of a slice
+/// let readonly = [1, 2];
+/// let borrowed = Items::new((&readonly[..]).into());
+/// match borrowed {
+/// Items { values: Cow::Borrowed(b) } => println!("borrowed {b:?}"),
+/// _ => panic!("expect borrowed value"),
+/// }
+///
+/// let mut clone_on_write = borrowed;
+/// // Mutates the data from slice into owned vec and pushes a new value on top
+/// clone_on_write.values.to_mut().push(3);
+/// println!("clone_on_write = {:?}", clone_on_write.values);
+///
+/// // The data was mutated. Let's check it out.
+/// match clone_on_write {
+/// Items { values: Cow::Owned(_) } => println!("clone_on_write contains owned data"),
+/// _ => panic!("expect owned data"),
+/// }
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Cow")]
+pub enum Cow<'a, B: ?Sized + 'a>
+where
+ B: ToOwned,
+{
+ /// Borrowed data.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Borrowed(#[stable(feature = "rust1", since = "1.0.0")] &'a B),
+
+ /// Owned data.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Owned(#[stable(feature = "rust1", since = "1.0.0")] <B as ToOwned>::Owned),
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized + ToOwned> Clone for Cow<'_, B> {
+ fn clone(&self) -> Self {
+ match *self {
+ Borrowed(b) => Borrowed(b),
+ Owned(ref o) => {
+ let b: &B = o.borrow();
+ Owned(b.to_owned())
+ }
+ }
+ }
+
+ fn clone_from(&mut self, source: &Self) {
+ match (self, source) {
+ (&mut Owned(ref mut dest), &Owned(ref o)) => o.borrow().clone_into(dest),
+ (t, s) => *t = s.clone(),
+ }
+ }
+}
+
+impl<B: ?Sized + ToOwned> Cow<'_, B> {
+ /// Returns true if the data is borrowed, i.e. if `to_mut` would require additional work.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cow_is_borrowed)]
+ /// use std::borrow::Cow;
+ ///
+ /// let cow = Cow::Borrowed("moo");
+ /// assert!(cow.is_borrowed());
+ ///
+ /// let bull: Cow<'_, str> = Cow::Owned("...moo?".to_string());
+ /// assert!(!bull.is_borrowed());
+ /// ```
+ #[unstable(feature = "cow_is_borrowed", issue = "65143")]
+ #[rustc_const_unstable(feature = "const_cow_is_borrowed", issue = "65143")]
+ pub const fn is_borrowed(&self) -> bool {
+ match *self {
+ Borrowed(_) => true,
+ Owned(_) => false,
+ }
+ }
+
+ /// Returns true if the data is owned, i.e. if `to_mut` would be a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(cow_is_borrowed)]
+ /// use std::borrow::Cow;
+ ///
+ /// let cow: Cow<'_, str> = Cow::Owned("moo".to_string());
+ /// assert!(cow.is_owned());
+ ///
+ /// let bull = Cow::Borrowed("...moo?");
+ /// assert!(!bull.is_owned());
+ /// ```
+ #[unstable(feature = "cow_is_borrowed", issue = "65143")]
+ #[rustc_const_unstable(feature = "const_cow_is_borrowed", issue = "65143")]
+ pub const fn is_owned(&self) -> bool {
+ !self.is_borrowed()
+ }
+
+ /// Acquires a mutable reference to the owned form of the data.
+ ///
+ /// Clones the data if it is not already owned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ ///
+ /// let mut cow = Cow::Borrowed("foo");
+ /// cow.to_mut().make_ascii_uppercase();
+ ///
+ /// assert_eq!(
+ /// cow,
+ /// Cow::Owned(String::from("FOO")) as Cow<str>
+ /// );
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn to_mut(&mut self) -> &mut <B as ToOwned>::Owned {
+ match *self {
+ Borrowed(borrowed) => {
+ *self = Owned(borrowed.to_owned());
+ match *self {
+ Borrowed(..) => unreachable!(),
+ Owned(ref mut owned) => owned,
+ }
+ }
+ Owned(ref mut owned) => owned,
+ }
+ }
+
+ /// Extracts the owned data.
+ ///
+ /// Clones the data if it is not already owned.
+ ///
+ /// # Examples
+ ///
+ /// Calling `into_owned` on a `Cow::Borrowed` returns a clone of the borrowed data:
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ ///
+ /// let s = "Hello world!";
+ /// let cow = Cow::Borrowed(s);
+ ///
+ /// assert_eq!(
+ /// cow.into_owned(),
+ /// String::from(s)
+ /// );
+ /// ```
+ ///
+ /// Calling `into_owned` on a `Cow::Owned` returns the owned data. The data is moved out of the
+ /// `Cow` without being cloned.
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ ///
+ /// let s = "Hello world!";
+ /// let cow: Cow<str> = Cow::Owned(String::from(s));
+ ///
+ /// assert_eq!(
+ /// cow.into_owned(),
+ /// String::from(s)
+ /// );
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_owned(self) -> <B as ToOwned>::Owned {
+ match self {
+ Borrowed(borrowed) => borrowed.to_owned(),
+ Owned(owned) => owned,
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_deref", issue = "88955")]
+impl<B: ?Sized + ToOwned> const Deref for Cow<'_, B>
+where
+ B::Owned: ~const Borrow<B>,
+{
+ type Target = B;
+
+ fn deref(&self) -> &B {
+ match *self {
+ Borrowed(borrowed) => borrowed,
+ Owned(ref owned) => owned.borrow(),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> Eq for Cow<'_, B> where B: Eq + ToOwned {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> Ord for Cow<'_, B>
+where
+ B: Ord + ToOwned,
+{
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, 'b, B: ?Sized, C: ?Sized> PartialEq<Cow<'b, C>> for Cow<'a, B>
+where
+ B: PartialEq<C> + ToOwned,
+ C: ToOwned,
+{
+ #[inline]
+ fn eq(&self, other: &Cow<'b, C>) -> bool {
+ PartialEq::eq(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, B: ?Sized> PartialOrd for Cow<'a, B>
+where
+ B: PartialOrd + ToOwned,
+{
+ #[inline]
+ fn partial_cmp(&self, other: &Cow<'a, B>) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> fmt::Debug for Cow<'_, B>
+where
+ B: fmt::Debug + ToOwned<Owned: fmt::Debug>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Borrowed(ref b) => fmt::Debug::fmt(b, f),
+ Owned(ref o) => fmt::Debug::fmt(o, f),
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> fmt::Display for Cow<'_, B>
+where
+ B: fmt::Display + ToOwned<Owned: fmt::Display>,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Borrowed(ref b) => fmt::Display::fmt(b, f),
+ Owned(ref o) => fmt::Display::fmt(o, f),
+ }
+ }
+}
+
+#[stable(feature = "default", since = "1.11.0")]
+impl<B: ?Sized> Default for Cow<'_, B>
+where
+ B: ToOwned<Owned: Default>,
+{
+ /// Creates an owned Cow<'a, B> with the default value for the contained owned value.
+ fn default() -> Self {
+ Owned(<B as ToOwned>::Owned::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<B: ?Sized> Hash for Cow<'_, B>
+where
+ B: Hash + ToOwned,
+{
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ Hash::hash(&**self, state)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + ToOwned> AsRef<T> for Cow<'_, T> {
+ fn as_ref(&self) -> &T {
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_add", since = "1.14.0")]
+impl<'a> Add<&'a str> for Cow<'a, str> {
+ type Output = Cow<'a, str>;
+
+ #[inline]
+ fn add(mut self, rhs: &'a str) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_add", since = "1.14.0")]
+impl<'a> Add<Cow<'a, str>> for Cow<'a, str> {
+ type Output = Cow<'a, str>;
+
+ #[inline]
+ fn add(mut self, rhs: Cow<'a, str>) -> Self::Output {
+ self += rhs;
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_add", since = "1.14.0")]
+impl<'a> AddAssign<&'a str> for Cow<'a, str> {
+ fn add_assign(&mut self, rhs: &'a str) {
+ if self.is_empty() {
+ *self = Cow::Borrowed(rhs)
+ } else if !rhs.is_empty() {
+ if let Cow::Borrowed(lhs) = *self {
+ let mut s = String::with_capacity(lhs.len() + rhs.len());
+ s.push_str(lhs);
+ *self = Cow::Owned(s);
+ }
+ self.to_mut().push_str(rhs);
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_add", since = "1.14.0")]
+impl<'a> AddAssign<Cow<'a, str>> for Cow<'a, str> {
+ fn add_assign(&mut self, rhs: Cow<'a, str>) {
+ if self.is_empty() {
+ *self = rhs
+ } else if !rhs.is_empty() {
+ if let Cow::Borrowed(lhs) = *self {
+ let mut s = String::with_capacity(lhs.len() + rhs.len());
+ s.push_str(lhs);
+ *self = Cow::Owned(s);
+ }
+ self.to_mut().push_str(&rhs);
+ }
+ }
+}
diff --git a/library/alloc/src/boxed.rs b/library/alloc/src/boxed.rs
new file mode 100644
index 000000000..c1ceeb0de
--- /dev/null
+++ b/library/alloc/src/boxed.rs
@@ -0,0 +1,2087 @@
+//! A pointer type for heap allocation.
+//!
+//! [`Box<T>`], casually referred to as a 'box', provides the simplest form of
+//! heap allocation in Rust. Boxes provide ownership for this allocation, and
+//! drop their contents when they go out of scope. Boxes also ensure that they
+//! never allocate more than `isize::MAX` bytes.
+//!
+//! # Examples
+//!
+//! Move a value from the stack to the heap by creating a [`Box`]:
+//!
+//! ```
+//! let val: u8 = 5;
+//! let boxed: Box<u8> = Box::new(val);
+//! ```
+//!
+//! Move a value from a [`Box`] back to the stack by [dereferencing]:
+//!
+//! ```
+//! let boxed: Box<u8> = Box::new(5);
+//! let val: u8 = *boxed;
+//! ```
+//!
+//! Creating a recursive data structure:
+//!
+//! ```
+//! #[derive(Debug)]
+//! enum List<T> {
+//! Cons(T, Box<List<T>>),
+//! Nil,
+//! }
+//!
+//! let list: List<i32> = List::Cons(1, Box::new(List::Cons(2, Box::new(List::Nil))));
+//! println!("{list:?}");
+//! ```
+//!
+//! This will print `Cons(1, Cons(2, Nil))`.
+//!
+//! Recursive structures must be boxed, because if the definition of `Cons`
+//! looked like this:
+//!
+//! ```compile_fail,E0072
+//! # enum List<T> {
+//! Cons(T, List<T>),
+//! # }
+//! ```
+//!
+//! It wouldn't work. This is because the size of a `List` depends on how many
+//! elements are in the list, and so we don't know how much memory to allocate
+//! for a `Cons`. By introducing a [`Box<T>`], which has a defined size, we know how
+//! big `Cons` needs to be.
+//!
+//! # Memory layout
+//!
+//! For non-zero-sized values, a [`Box`] will use the [`Global`] allocator for
+//! its allocation. It is valid to convert both ways between a [`Box`] and a
+//! raw pointer allocated with the [`Global`] allocator, given that the
+//! [`Layout`] used with the allocator is correct for the type. More precisely,
+//! a `value: *mut T` that has been allocated with the [`Global`] allocator
+//! with `Layout::for_value(&*value)` may be converted into a box using
+//! [`Box::<T>::from_raw(value)`]. Conversely, the memory backing a `value: *mut
+//! T` obtained from [`Box::<T>::into_raw`] may be deallocated using the
+//! [`Global`] allocator with [`Layout::for_value(&*value)`].
+//!
+//! For zero-sized values, the `Box` pointer still has to be [valid] for reads
+//! and writes and sufficiently aligned. In particular, casting any aligned
+//! non-zero integer literal to a raw pointer produces a valid pointer, but a
+//! pointer pointing into previously allocated memory that since got freed is
+//! not valid. The recommended way to build a Box to a ZST if `Box::new` cannot
+//! be used is to use [`ptr::NonNull::dangling`].
+//!
+//! So long as `T: Sized`, a `Box<T>` is guaranteed to be represented
+//! as a single pointer and is also ABI-compatible with C pointers
+//! (i.e. the C type `T*`). This means that if you have extern "C"
+//! Rust functions that will be called from C, you can define those
+//! Rust functions using `Box<T>` types, and use `T*` as corresponding
+//! type on the C side. As an example, consider this C header which
+//! declares functions that create and destroy some kind of `Foo`
+//! value:
+//!
+//! ```c
+//! /* C header */
+//!
+//! /* Returns ownership to the caller */
+//! struct Foo* foo_new(void);
+//!
+//! /* Takes ownership from the caller; no-op when invoked with null */
+//! void foo_delete(struct Foo*);
+//! ```
+//!
+//! These two functions might be implemented in Rust as follows. Here, the
+//! `struct Foo*` type from C is translated to `Box<Foo>`, which captures
+//! the ownership constraints. Note also that the nullable argument to
+//! `foo_delete` is represented in Rust as `Option<Box<Foo>>`, since `Box<Foo>`
+//! cannot be null.
+//!
+//! ```
+//! #[repr(C)]
+//! pub struct Foo;
+//!
+//! #[no_mangle]
+//! pub extern "C" fn foo_new() -> Box<Foo> {
+//! Box::new(Foo)
+//! }
+//!
+//! #[no_mangle]
+//! pub extern "C" fn foo_delete(_: Option<Box<Foo>>) {}
+//! ```
+//!
+//! Even though `Box<T>` has the same representation and C ABI as a C pointer,
+//! this does not mean that you can convert an arbitrary `T*` into a `Box<T>`
+//! and expect things to work. `Box<T>` values will always be fully aligned,
+//! non-null pointers. Moreover, the destructor for `Box<T>` will attempt to
+//! free the value with the global allocator. In general, the best practice
+//! is to only use `Box<T>` for pointers that originated from the global
+//! allocator.
+//!
+//! **Important.** At least at present, you should avoid using
+//! `Box<T>` types for functions that are defined in C but invoked
+//! from Rust. In those cases, you should directly mirror the C types
+//! as closely as possible. Using types like `Box<T>` where the C
+//! definition is just using `T*` can lead to undefined behavior, as
+//! described in [rust-lang/unsafe-code-guidelines#198][ucg#198].
+//!
+//! # Considerations for unsafe code
+//!
+//! **Warning: This section is not normative and is subject to change, possibly
+//! being relaxed in the future! It is a simplified summary of the rules
+//! currently implemented in the compiler.**
+//!
+//! The aliasing rules for `Box<T>` are the same as for `&mut T`. `Box<T>`
+//! asserts uniqueness over its content. Using raw pointers derived from a box
+//! after that box has been mutated through, moved or borrowed as `&mut T`
+//! is not allowed. For more guidance on working with box from unsafe code, see
+//! [rust-lang/unsafe-code-guidelines#326][ucg#326].
+//!
+//!
+//! [ucg#198]: https://github.com/rust-lang/unsafe-code-guidelines/issues/198
+//! [ucg#326]: https://github.com/rust-lang/unsafe-code-guidelines/issues/326
+//! [dereferencing]: core::ops::Deref
+//! [`Box::<T>::from_raw(value)`]: Box::from_raw
+//! [`Global`]: crate::alloc::Global
+//! [`Layout`]: crate::alloc::Layout
+//! [`Layout::for_value(&*value)`]: crate::alloc::Layout::for_value
+//! [valid]: ptr#safety
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::any::Any;
+use core::async_iter::AsyncIterator;
+use core::borrow;
+use core::cmp::Ordering;
+use core::convert::{From, TryFrom};
+use core::fmt;
+use core::future::Future;
+use core::hash::{Hash, Hasher};
+#[cfg(not(no_global_oom_handling))]
+use core::iter::FromIterator;
+use core::iter::{FusedIterator, Iterator};
+use core::marker::{Destruct, Unpin, Unsize};
+use core::mem;
+use core::ops::{
+ CoerceUnsized, Deref, DerefMut, DispatchFromDyn, Generator, GeneratorState, Receiver,
+};
+use core::pin::Pin;
+use core::ptr::{self, Unique};
+use core::task::{Context, Poll};
+
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::{handle_alloc_error, WriteCloneIntoRaw};
+use crate::alloc::{AllocError, Allocator, Global, Layout};
+#[cfg(not(no_global_oom_handling))]
+use crate::borrow::Cow;
+use crate::raw_vec::RawVec;
+#[cfg(not(no_global_oom_handling))]
+use crate::str::from_boxed_utf8_unchecked;
+#[cfg(not(no_global_oom_handling))]
+use crate::vec::Vec;
+
+#[unstable(feature = "thin_box", issue = "92791")]
+pub use thin::ThinBox;
+
+mod thin;
+
+/// A pointer type for heap allocation.
+///
+/// See the [module-level documentation](../../std/boxed/index.html) for more.
+#[lang = "owned_box"]
+#[fundamental]
+#[stable(feature = "rust1", since = "1.0.0")]
+// The declaration of the `Box` struct must be kept in sync with the
+// `alloc::alloc::box_free` function or ICEs will happen. See the comment
+// on `box_free` for more details.
+pub struct Box<
+ T: ?Sized,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+>(Unique<T>, A);
+
+impl<T> Box<T> {
+ /// Allocates memory on the heap and then places `x` into it.
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let five = Box::new(5);
+ /// ```
+ #[cfg(all(not(no_global_oom_handling)))]
+ #[inline(always)]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn new(x: T) -> Self {
+ #[rustc_box]
+ Box::new(x)
+ }
+
+ /// Constructs a new box with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut five = Box::<u32>::new_uninit();
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ #[inline]
+ pub fn new_uninit() -> Box<mem::MaybeUninit<T>> {
+ Self::new_uninit_in(Global)
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let zero = Box::<u32>::new_zeroed();
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_zeroed() -> Box<mem::MaybeUninit<T>> {
+ Self::new_zeroed_in(Global)
+ }
+
+ /// Constructs a new `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
+ /// `x` will be pinned in memory and unable to be moved.
+ ///
+ /// Constructing and pinning of the `Box` can also be done in two steps: `Box::pin(x)`
+ /// does the same as <code>[Box::into_pin]\([Box::new]\(x))</code>. Consider using
+ /// [`into_pin`](Box::into_pin) if you already have a `Box<T>`, or if you want to
+ /// construct a (pinned) `Box` in a different way than with [`Box::new`].
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[must_use]
+ #[inline(always)]
+ pub fn pin(x: T) -> Pin<Box<T>> {
+ (#[rustc_box]
+ Box::new(x))
+ .into()
+ }
+
+ /// Allocates memory on the heap then places `x` into it,
+ /// returning an error if the allocation fails
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// let five = Box::try_new(5)?;
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new(x: T) -> Result<Self, AllocError> {
+ Self::try_new_in(x, Global)
+ }
+
+ /// Constructs a new box with uninitialized contents on the heap,
+ /// returning an error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let mut five = Box::<u32>::try_new_uninit()?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn try_new_uninit() -> Result<Box<mem::MaybeUninit<T>>, AllocError> {
+ Box::try_new_uninit_in(Global)
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes on the heap
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let zero = Box::<u32>::try_new_zeroed()?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub fn try_new_zeroed() -> Result<Box<mem::MaybeUninit<T>>, AllocError> {
+ Box::try_new_zeroed_in(Global)
+ }
+}
+
+impl<T, A: Allocator> Box<T, A> {
+ /// Allocates memory in the given allocator then places `x` into it.
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let five = Box::new_in(5, System);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[must_use]
+ #[inline]
+ pub const fn new_in(x: T, alloc: A) -> Self
+ where
+ A: ~const Allocator + ~const Destruct,
+ {
+ let mut boxed = Self::new_uninit_in(alloc);
+ unsafe {
+ boxed.as_mut_ptr().write(x);
+ boxed.assume_init()
+ }
+ }
+
+ /// Allocates memory in the given allocator then places `x` into it,
+ /// returning an error if the allocation fails
+ ///
+ /// This doesn't actually allocate if `T` is zero-sized.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let five = Box::try_new_in(5, System)?;
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const fn try_new_in(x: T, alloc: A) -> Result<Self, AllocError>
+ where
+ T: ~const Destruct,
+ A: ~const Allocator + ~const Destruct,
+ {
+ let mut boxed = Self::try_new_uninit_in(alloc)?;
+ unsafe {
+ boxed.as_mut_ptr().write(x);
+ Ok(boxed.assume_init())
+ }
+ }
+
+ /// Constructs a new box with uninitialized contents in the provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut five = Box::<u32, _>::new_uninit_in(System);
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub const fn new_uninit_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ where
+ A: ~const Allocator + ~const Destruct,
+ {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
+ // That would make code size bigger.
+ match Box::try_new_uninit_in(alloc) {
+ Ok(m) => m,
+ Err(_) => handle_alloc_error(layout),
+ }
+ }
+
+ /// Constructs a new box with uninitialized contents in the provided allocator,
+ /// returning an error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut five = Box::<u32, _>::try_new_uninit_in(System)?;
+ ///
+ /// let five = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ pub const fn try_new_uninit_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ where
+ A: ~const Allocator + ~const Destruct,
+ {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ let ptr = alloc.allocate(layout)?.cast();
+ unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes in the provided allocator.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let zero = Box::<u32, _>::new_zeroed_in(System);
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[cfg(not(no_global_oom_handling))]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub const fn new_zeroed_in(alloc: A) -> Box<mem::MaybeUninit<T>, A>
+ where
+ A: ~const Allocator + ~const Destruct,
+ {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ // NOTE: Prefer match over unwrap_or_else since closure sometimes not inlineable.
+ // That would make code size bigger.
+ match Box::try_new_zeroed_in(alloc) {
+ Ok(m) => m,
+ Err(_) => handle_alloc_error(layout),
+ }
+ }
+
+ /// Constructs a new `Box` with uninitialized contents, with the memory
+ /// being filled with `0` bytes in the provided allocator,
+ /// returning an error if the allocation fails,
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let zero = Box::<u32, _>::try_new_zeroed_in(System)?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ pub const fn try_new_zeroed_in(alloc: A) -> Result<Box<mem::MaybeUninit<T>, A>, AllocError>
+ where
+ A: ~const Allocator + ~const Destruct,
+ {
+ let layout = Layout::new::<mem::MaybeUninit<T>>();
+ let ptr = alloc.allocate_zeroed(layout)?.cast();
+ unsafe { Ok(Box::from_raw_in(ptr.as_ptr(), alloc)) }
+ }
+
+ /// Constructs a new `Pin<Box<T, A>>`. If `T` does not implement [`Unpin`], then
+ /// `x` will be pinned in memory and unable to be moved.
+ ///
+ /// Constructing and pinning of the `Box` can also be done in two steps: `Box::pin_in(x, alloc)`
+ /// does the same as <code>[Box::into_pin]\([Box::new_in]\(x, alloc))</code>. Consider using
+ /// [`into_pin`](Box::into_pin) if you already have a `Box<T, A>`, or if you want to
+ /// construct a (pinned) `Box` in a different way than with [`Box::new_in`].
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[must_use]
+ #[inline(always)]
+ pub const fn pin_in(x: T, alloc: A) -> Pin<Self>
+ where
+ A: 'static + ~const Allocator + ~const Destruct,
+ {
+ Self::into_pin(Self::new_in(x, alloc))
+ }
+
+ /// Converts a `Box<T>` into a `Box<[T]>`
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ #[unstable(feature = "box_into_boxed_slice", issue = "71582")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ pub const fn into_boxed_slice(boxed: Self) -> Box<[T], A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(boxed);
+ unsafe { Box::from_raw_in(raw as *mut [T; 1], alloc) }
+ }
+
+ /// Consumes the `Box`, returning the wrapped value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(box_into_inner)]
+ ///
+ /// let c = Box::new(5);
+ ///
+ /// assert_eq!(Box::into_inner(c), 5);
+ /// ```
+ #[unstable(feature = "box_into_inner", issue = "80437")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const fn into_inner(boxed: Self) -> T
+ where
+ Self: ~const Destruct,
+ {
+ *boxed
+ }
+}
+
+impl<T> Box<[T]> {
+ /// Constructs a new boxed slice with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut values = Box::<[u32]>::new_uninit_slice(3);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_uninit_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
+ unsafe { RawVec::with_capacity(len).into_box(len) }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents, with the memory
+ /// being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let values = Box::<[u32]>::new_zeroed_slice(3);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_zeroed_slice(len: usize) -> Box<[mem::MaybeUninit<T>]> {
+ unsafe { RawVec::with_capacity_zeroed(len).into_box(len) }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents. Returns an error if
+ /// the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let mut values = Box::<[u32]>::try_new_uninit_slice(3)?;
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3]);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new_uninit_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
+ unsafe {
+ let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
+ Ok(l) => l,
+ Err(_) => return Err(AllocError),
+ };
+ let ptr = Global.allocate(layout)?;
+ Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len))
+ }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents, with the memory
+ /// being filled with `0` bytes. Returns an error if the allocation fails
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// let values = Box::<[u32]>::try_new_zeroed_slice(3)?;
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0]);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new_zeroed_slice(len: usize) -> Result<Box<[mem::MaybeUninit<T>]>, AllocError> {
+ unsafe {
+ let layout = match Layout::array::<mem::MaybeUninit<T>>(len) {
+ Ok(l) => l,
+ Err(_) => return Err(AllocError),
+ };
+ let ptr = Global.allocate_zeroed(layout)?;
+ Ok(RawVec::from_raw_parts_in(ptr.as_mut_ptr() as *mut _, len, Global).into_box(len))
+ }
+ }
+}
+
+impl<T, A: Allocator> Box<[T], A> {
+ /// Constructs a new boxed slice with uninitialized contents in the provided allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut values = Box::<[u32], _>::new_uninit_slice_in(3, System);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_uninit_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit<T>], A> {
+ unsafe { RawVec::with_capacity_in(len, alloc).into_box(len) }
+ }
+
+ /// Constructs a new boxed slice with uninitialized contents in the provided allocator,
+ /// with the memory being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let values = Box::<[u32], _>::new_zeroed_slice_in(3, System);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_zeroed_slice_in(len: usize, alloc: A) -> Box<[mem::MaybeUninit<T>], A> {
+ unsafe { RawVec::with_capacity_zeroed_in(len, alloc).into_box(len) }
+ }
+}
+
+impl<T, A: Allocator> Box<mem::MaybeUninit<T>, A> {
+ /// Converts to `Box<T, A>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the value
+ /// really is in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut five = Box::<u32>::new_uninit();
+ ///
+ /// let five: Box<u32> = unsafe {
+ /// // Deferred initialization:
+ /// five.as_mut_ptr().write(5);
+ ///
+ /// five.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const unsafe fn assume_init(self) -> Box<T, A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(self);
+ unsafe { Box::from_raw_in(raw as *mut T, alloc) }
+ }
+
+ /// Writes the value and converts to `Box<T, A>`.
+ ///
+ /// This method converts the box similarly to [`Box::assume_init`] but
+ /// writes `value` into it before conversion thus guaranteeing safety.
+ /// In some scenarios use of this method may improve performance because
+ /// the compiler may be able to optimize copying from stack.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let big_box = Box::<[usize; 1024]>::new_uninit();
+ ///
+ /// let mut array = [0; 1024];
+ /// for (i, place) in array.iter_mut().enumerate() {
+ /// *place = i;
+ /// }
+ ///
+ /// // The optimizer may be able to elide this copy, so previous code writes
+ /// // to heap directly.
+ /// let big_box = Box::write(big_box, array);
+ ///
+ /// for (i, x) in big_box.iter().enumerate() {
+ /// assert_eq!(*x, i);
+ /// }
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const fn write(mut boxed: Self, value: T) -> Box<T, A> {
+ unsafe {
+ (*boxed).write(value);
+ boxed.assume_init()
+ }
+ }
+}
+
+impl<T, A: Allocator> Box<[mem::MaybeUninit<T>], A> {
+ /// Converts to `Box<[T], A>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the values
+ /// really are in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// let mut values = Box::<[u32]>::new_uninit_slice(3);
+ ///
+ /// let values = unsafe {
+ /// // Deferred initialization:
+ /// values[0].as_mut_ptr().write(1);
+ /// values[1].as_mut_ptr().write(2);
+ /// values[2].as_mut_ptr().write(3);
+ ///
+ /// values.assume_init()
+ /// };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub unsafe fn assume_init(self) -> Box<[T], A> {
+ let (raw, alloc) = Box::into_raw_with_allocator(self);
+ unsafe { Box::from_raw_in(raw as *mut [T], alloc) }
+ }
+}
+
+impl<T: ?Sized> Box<T> {
+ /// Constructs a box from a raw pointer.
+ ///
+ /// After calling this function, the raw pointer is owned by the
+ /// resulting `Box`. Specifically, the `Box` destructor will call
+ /// the destructor of `T` and free the allocated memory. For this
+ /// to be safe, the memory must have been allocated in accordance
+ /// with the [memory layout] used by `Box` .
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because improper use may lead to
+ /// memory problems. For example, a double-free may occur if the
+ /// function is called twice on the same raw pointer.
+ ///
+ /// The safety conditions are described in the [memory layout] section.
+ ///
+ /// # Examples
+ ///
+ /// Recreate a `Box` which was previously converted to a raw pointer
+ /// using [`Box::into_raw`]:
+ /// ```
+ /// let x = Box::new(5);
+ /// let ptr = Box::into_raw(x);
+ /// let x = unsafe { Box::from_raw(ptr) };
+ /// ```
+ /// Manually create a `Box` from scratch by using the global allocator:
+ /// ```
+ /// use std::alloc::{alloc, Layout};
+ ///
+ /// unsafe {
+ /// let ptr = alloc(Layout::new::<i32>()) as *mut i32;
+ /// // In general .write is required to avoid attempting to destruct
+ /// // the (uninitialized) previous contents of `ptr`, though for this
+ /// // simple example `*ptr = 5` would have worked as well.
+ /// ptr.write(5);
+ /// let x = Box::from_raw(ptr);
+ /// }
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ /// [`Layout`]: crate::Layout
+ #[stable(feature = "box_raw", since = "1.4.0")]
+ #[inline]
+ #[must_use = "call `drop(from_raw(ptr))` if you intend to drop the `Box`"]
+ pub unsafe fn from_raw(raw: *mut T) -> Self {
+ unsafe { Self::from_raw_in(raw, Global) }
+ }
+}
+
+impl<T: ?Sized, A: Allocator> Box<T, A> {
+ /// Constructs a box from a raw pointer in the given allocator.
+ ///
+ /// After calling this function, the raw pointer is owned by the
+ /// resulting `Box`. Specifically, the `Box` destructor will call
+ /// the destructor of `T` and free the allocated memory. For this
+ /// to be safe, the memory must have been allocated in accordance
+ /// with the [memory layout] used by `Box` .
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because improper use may lead to
+ /// memory problems. For example, a double-free may occur if the
+ /// function is called twice on the same raw pointer.
+ ///
+ ///
+ /// # Examples
+ ///
+ /// Recreate a `Box` which was previously converted to a raw pointer
+ /// using [`Box::into_raw_with_allocator`]:
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let x = Box::new_in(5, System);
+ /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
+ /// let x = unsafe { Box::from_raw_in(ptr, alloc) };
+ /// ```
+ /// Manually create a `Box` from scratch by using the system allocator:
+ /// ```
+ /// #![feature(allocator_api, slice_ptr_get)]
+ ///
+ /// use std::alloc::{Allocator, Layout, System};
+ ///
+ /// unsafe {
+ /// let ptr = System.allocate(Layout::new::<i32>())?.as_mut_ptr() as *mut i32;
+ /// // In general .write is required to avoid attempting to destruct
+ /// // the (uninitialized) previous contents of `ptr`, though for this
+ /// // simple example `*ptr = 5` would have worked as well.
+ /// ptr.write(5);
+ /// let x = Box::from_raw_in(ptr, System);
+ /// }
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ /// [`Layout`]: crate::Layout
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const unsafe fn from_raw_in(raw: *mut T, alloc: A) -> Self {
+ Box(unsafe { Unique::new_unchecked(raw) }, alloc)
+ }
+
+ /// Consumes the `Box`, returning a wrapped raw pointer.
+ ///
+ /// The pointer will be properly aligned and non-null.
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Box`. In particular, the
+ /// caller should properly destroy `T` and release the memory, taking
+ /// into account the [memory layout] used by `Box`. The easiest way to
+ /// do this is to convert the raw pointer back into a `Box` with the
+ /// [`Box::from_raw`] function, allowing the `Box` destructor to perform
+ /// the cleanup.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::into_raw(b)` instead of `b.into_raw()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
+ /// # Examples
+ /// Converting the raw pointer back into a `Box` with [`Box::from_raw`]
+ /// for automatic cleanup:
+ /// ```
+ /// let x = Box::new(String::from("Hello"));
+ /// let ptr = Box::into_raw(x);
+ /// let x = unsafe { Box::from_raw(ptr) };
+ /// ```
+ /// Manual cleanup by explicitly running the destructor and deallocating
+ /// the memory:
+ /// ```
+ /// use std::alloc::{dealloc, Layout};
+ /// use std::ptr;
+ ///
+ /// let x = Box::new(String::from("Hello"));
+ /// let p = Box::into_raw(x);
+ /// unsafe {
+ /// ptr::drop_in_place(p);
+ /// dealloc(p as *mut u8, Layout::new::<String>());
+ /// }
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ #[stable(feature = "box_raw", since = "1.4.0")]
+ #[inline]
+ pub fn into_raw(b: Self) -> *mut T {
+ Self::into_raw_with_allocator(b).0
+ }
+
+ /// Consumes the `Box`, returning a wrapped raw pointer and the allocator.
+ ///
+ /// The pointer will be properly aligned and non-null.
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Box`. In particular, the
+ /// caller should properly destroy `T` and release the memory, taking
+ /// into account the [memory layout] used by `Box`. The easiest way to
+ /// do this is to convert the raw pointer back into a `Box` with the
+ /// [`Box::from_raw_in`] function, allowing the `Box` destructor to perform
+ /// the cleanup.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::into_raw_with_allocator(b)` instead of `b.into_raw_with_allocator()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
+ /// # Examples
+ /// Converting the raw pointer back into a `Box` with [`Box::from_raw_in`]
+ /// for automatic cleanup:
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let x = Box::new_in(String::from("Hello"), System);
+ /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
+ /// let x = unsafe { Box::from_raw_in(ptr, alloc) };
+ /// ```
+ /// Manual cleanup by explicitly running the destructor and deallocating
+ /// the memory:
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::{Allocator, Layout, System};
+ /// use std::ptr::{self, NonNull};
+ ///
+ /// let x = Box::new_in(String::from("Hello"), System);
+ /// let (ptr, alloc) = Box::into_raw_with_allocator(x);
+ /// unsafe {
+ /// ptr::drop_in_place(ptr);
+ /// let non_null = NonNull::new_unchecked(ptr);
+ /// alloc.deallocate(non_null.cast(), Layout::new::<String>());
+ /// }
+ /// ```
+ ///
+ /// [memory layout]: self#memory-layout
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const fn into_raw_with_allocator(b: Self) -> (*mut T, A) {
+ let (leaked, alloc) = Box::into_unique(b);
+ (leaked.as_ptr(), alloc)
+ }
+
+ #[unstable(
+ feature = "ptr_internals",
+ issue = "none",
+ reason = "use `Box::leak(b).into()` or `Unique::from(Box::leak(b))` instead"
+ )]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ #[doc(hidden)]
+ pub const fn into_unique(b: Self) -> (Unique<T>, A) {
+ // Box is recognized as a "unique pointer" by Stacked Borrows, but internally it is a
+ // raw pointer for the type system. Turning it directly into a raw pointer would not be
+ // recognized as "releasing" the unique pointer to permit aliased raw accesses,
+ // so all raw pointer methods have to go through `Box::leak`. Turning *that* to a raw pointer
+ // behaves correctly.
+ let alloc = unsafe { ptr::read(&b.1) };
+ (Unique::from(Box::leak(b)), alloc)
+ }
+
+ /// Returns a reference to the underlying allocator.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::allocator(&b)` instead of `b.allocator()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const fn allocator(b: &Self) -> &A {
+ &b.1
+ }
+
+ /// Consumes and leaks the `Box`, returning a mutable reference,
+ /// `&'a mut T`. Note that the type `T` must outlive the chosen lifetime
+ /// `'a`. If the type has only static references, or none at all, then this
+ /// may be chosen to be `'static`.
+ ///
+ /// This function is mainly useful for data that lives for the remainder of
+ /// the program's life. Dropping the returned reference will cause a memory
+ /// leak. If this is not acceptable, the reference should first be wrapped
+ /// with the [`Box::from_raw`] function producing a `Box`. This `Box` can
+ /// then be dropped which will properly destroy `T` and release the
+ /// allocated memory.
+ ///
+ /// Note: this is an associated function, which means that you have
+ /// to call it as `Box::leak(b)` instead of `b.leak()`. This
+ /// is so that there is no conflict with a method on the inner type.
+ ///
+ /// # Examples
+ ///
+ /// Simple usage:
+ ///
+ /// ```
+ /// let x = Box::new(41);
+ /// let static_ref: &'static mut usize = Box::leak(x);
+ /// *static_ref += 1;
+ /// assert_eq!(*static_ref, 42);
+ /// ```
+ ///
+ /// Unsized data:
+ ///
+ /// ```
+ /// let x = vec![1, 2, 3].into_boxed_slice();
+ /// let static_ref = Box::leak(x);
+ /// static_ref[0] = 4;
+ /// assert_eq!(*static_ref, [4, 2, 3]);
+ /// ```
+ #[stable(feature = "box_leak", since = "1.26.0")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ #[inline]
+ pub const fn leak<'a>(b: Self) -> &'a mut T
+ where
+ A: 'a,
+ {
+ unsafe { &mut *mem::ManuallyDrop::new(b).0.as_ptr() }
+ }
+
+ /// Converts a `Box<T>` into a `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
+ /// `*boxed` will be pinned in memory and unable to be moved.
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ ///
+ /// This is also available via [`From`].
+ ///
+ /// Constructing and pinning a `Box` with <code>Box::into_pin([Box::new]\(x))</code>
+ /// can also be written more concisely using <code>[Box::pin]\(x)</code>.
+ /// This `into_pin` method is useful if you already have a `Box<T>`, or you are
+ /// constructing a (pinned) `Box` in a different way than with [`Box::new`].
+ ///
+ /// # Notes
+ ///
+ /// It's not recommended that crates add an impl like `From<Box<T>> for Pin<T>`,
+ /// as it'll introduce an ambiguity when calling `Pin::from`.
+ /// A demonstration of such a poor impl is shown below.
+ ///
+ /// ```compile_fail
+ /// # use std::pin::Pin;
+ /// struct Foo; // A type defined in this crate.
+ /// impl From<Box<()>> for Pin<Foo> {
+ /// fn from(_: Box<()>) -> Pin<Foo> {
+ /// Pin::new(Foo)
+ /// }
+ /// }
+ ///
+ /// let foo = Box::new(());
+ /// let bar = Pin::from(foo);
+ /// ```
+ #[stable(feature = "box_into_pin", since = "1.63.0")]
+ #[rustc_const_unstable(feature = "const_box", issue = "92521")]
+ pub const fn into_pin(boxed: Self) -> Pin<Self>
+ where
+ A: 'static,
+ {
+ // It's not possible to move or replace the insides of a `Pin<Box<T>>`
+ // when `T: !Unpin`, so it's safe to pin it directly without any
+ // additional requirements.
+ unsafe { Pin::new_unchecked(boxed) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T: ?Sized, A: Allocator> Drop for Box<T, A> {
+ fn drop(&mut self) {
+ // FIXME: Do nothing, drop is currently performed by compiler.
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Default> Default for Box<T> {
+ /// Creates a `Box<T>`, with the `Default` value for T.
+ fn default() -> Self {
+ #[rustc_box]
+ Box::new(T::default())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl<T> const Default for Box<[T]> {
+ fn default() -> Self {
+ let ptr: Unique<[T]> = Unique::<[T; 0]>::dangling();
+ Box(ptr, Global)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "default_box_extra", since = "1.17.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl const Default for Box<str> {
+ fn default() -> Self {
+ // SAFETY: This is the same as `Unique::cast<U>` but with an unsized `U = str`.
+ let ptr: Unique<str> = unsafe {
+ let bytes: Unique<[u8]> = Unique::<[u8; 0]>::dangling();
+ Unique::new_unchecked(bytes.as_ptr() as *mut str)
+ };
+ Box(ptr, Global)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for Box<T, A> {
+ /// Returns a new box with a `clone()` of this box's contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Box::new(5);
+ /// let y = x.clone();
+ ///
+ /// // The value is the same
+ /// assert_eq!(x, y);
+ ///
+ /// // But they are unique objects
+ /// assert_ne!(&*x as *const i32, &*y as *const i32);
+ /// ```
+ #[inline]
+ fn clone(&self) -> Self {
+ // Pre-allocate memory to allow writing the cloned value directly.
+ let mut boxed = Self::new_uninit_in(self.1.clone());
+ unsafe {
+ (**self).write_clone_into_raw(boxed.as_mut_ptr());
+ boxed.assume_init()
+ }
+ }
+
+ /// Copies `source`'s contents into `self` without creating a new allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = Box::new(5);
+ /// let mut y = Box::new(10);
+ /// let yp: *const i32 = &*y;
+ ///
+ /// y.clone_from(&x);
+ ///
+ /// // The value is the same
+ /// assert_eq!(x, y);
+ ///
+ /// // And no allocation occurred
+ /// assert_eq!(yp, &*y);
+ /// ```
+ #[inline]
+ fn clone_from(&mut self, source: &Self) {
+ (**self).clone_from(&(**source));
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_slice_clone", since = "1.3.0")]
+impl Clone for Box<str> {
+ fn clone(&self) -> Self {
+ // this makes a copy of the data
+ let buf: Box<[u8]> = self.as_bytes().into();
+ unsafe { from_boxed_utf8_unchecked(buf) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq, A: Allocator> PartialEq for Box<T, A> {
+ #[inline]
+ fn eq(&self, other: &Self) -> bool {
+ PartialEq::eq(&**self, &**other)
+ }
+ #[inline]
+ fn ne(&self, other: &Self) -> bool {
+ PartialEq::ne(&**self, &**other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialOrd, A: Allocator> PartialOrd for Box<T, A> {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+ #[inline]
+ fn lt(&self, other: &Self) -> bool {
+ PartialOrd::lt(&**self, &**other)
+ }
+ #[inline]
+ fn le(&self, other: &Self) -> bool {
+ PartialOrd::le(&**self, &**other)
+ }
+ #[inline]
+ fn ge(&self, other: &Self) -> bool {
+ PartialOrd::ge(&**self, &**other)
+ }
+ #[inline]
+ fn gt(&self, other: &Self) -> bool {
+ PartialOrd::gt(&**self, &**other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Ord, A: Allocator> Ord for Box<T, A> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(&**self, &**other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Eq, A: Allocator> Eq for Box<T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Hash, A: Allocator> Hash for Box<T, A> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state);
+ }
+}
+
+#[stable(feature = "indirect_hasher_impl", since = "1.22.0")]
+impl<T: ?Sized + Hasher, A: Allocator> Hasher for Box<T, A> {
+ fn finish(&self) -> u64 {
+ (**self).finish()
+ }
+ fn write(&mut self, bytes: &[u8]) {
+ (**self).write(bytes)
+ }
+ fn write_u8(&mut self, i: u8) {
+ (**self).write_u8(i)
+ }
+ fn write_u16(&mut self, i: u16) {
+ (**self).write_u16(i)
+ }
+ fn write_u32(&mut self, i: u32) {
+ (**self).write_u32(i)
+ }
+ fn write_u64(&mut self, i: u64) {
+ (**self).write_u64(i)
+ }
+ fn write_u128(&mut self, i: u128) {
+ (**self).write_u128(i)
+ }
+ fn write_usize(&mut self, i: usize) {
+ (**self).write_usize(i)
+ }
+ fn write_i8(&mut self, i: i8) {
+ (**self).write_i8(i)
+ }
+ fn write_i16(&mut self, i: i16) {
+ (**self).write_i16(i)
+ }
+ fn write_i32(&mut self, i: i32) {
+ (**self).write_i32(i)
+ }
+ fn write_i64(&mut self, i: i64) {
+ (**self).write_i64(i)
+ }
+ fn write_i128(&mut self, i: i128) {
+ (**self).write_i128(i)
+ }
+ fn write_isize(&mut self, i: isize) {
+ (**self).write_isize(i)
+ }
+ fn write_length_prefix(&mut self, len: usize) {
+ (**self).write_length_prefix(len)
+ }
+ fn write_str(&mut self, s: &str) {
+ (**self).write_str(s)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_for_ptrs", since = "1.6.0")]
+impl<T> From<T> for Box<T> {
+ /// Converts a `T` into a `Box<T>`
+ ///
+ /// The conversion allocates on the heap and moves `t`
+ /// from the stack into it.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// let x = 5;
+ /// let boxed = Box::new(5);
+ ///
+ /// assert_eq!(Box::from(x), boxed);
+ /// ```
+ fn from(t: T) -> Self {
+ Box::new(t)
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+impl<T: ?Sized, A: Allocator> const From<Box<T, A>> for Pin<Box<T, A>>
+where
+ A: 'static,
+{
+ /// Converts a `Box<T>` into a `Pin<Box<T>>`. If `T` does not implement [`Unpin`], then
+ /// `*boxed` will be pinned in memory and unable to be moved.
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ ///
+ /// This is also available via [`Box::into_pin`].
+ ///
+ /// Constructing and pinning a `Box` with <code><Pin<Box\<T>>>::from([Box::new]\(x))</code>
+ /// can also be written more concisely using <code>[Box::pin]\(x)</code>.
+ /// This `From` implementation is useful if you already have a `Box<T>`, or you are
+ /// constructing a (pinned) `Box` in a different way than with [`Box::new`].
+ fn from(boxed: Box<T, A>) -> Self {
+ Box::into_pin(boxed)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_slice", since = "1.17.0")]
+impl<T: Copy> From<&[T]> for Box<[T]> {
+ /// Converts a `&[T]` into a `Box<[T]>`
+ ///
+ /// This conversion allocates on the heap
+ /// and performs a copy of `slice`.
+ ///
+ /// # Examples
+ /// ```rust
+ /// // create a &[u8] which will be used to create a Box<[u8]>
+ /// let slice: &[u8] = &[104, 101, 108, 108, 111];
+ /// let boxed_slice: Box<[u8]> = Box::from(slice);
+ ///
+ /// println!("{boxed_slice:?}");
+ /// ```
+ fn from(slice: &[T]) -> Box<[T]> {
+ let len = slice.len();
+ let buf = RawVec::with_capacity(len);
+ unsafe {
+ ptr::copy_nonoverlapping(slice.as_ptr(), buf.ptr(), len);
+ buf.into_box(slice.len()).assume_init()
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_cow", since = "1.45.0")]
+impl<T: Copy> From<Cow<'_, [T]>> for Box<[T]> {
+ /// Converts a `Cow<'_, [T]>` into a `Box<[T]>`
+ ///
+ /// When `cow` is the `Cow::Borrowed` variant, this
+ /// conversion allocates on the heap and copies the
+ /// underlying slice. Otherwise, it will try to reuse the owned
+ /// `Vec`'s allocation.
+ #[inline]
+ fn from(cow: Cow<'_, [T]>) -> Box<[T]> {
+ match cow {
+ Cow::Borrowed(slice) => Box::from(slice),
+ Cow::Owned(slice) => Box::from(slice),
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_slice", since = "1.17.0")]
+impl From<&str> for Box<str> {
+ /// Converts a `&str` into a `Box<str>`
+ ///
+ /// This conversion allocates on the heap
+ /// and performs a copy of `s`.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// let boxed: Box<str> = Box::from("hello");
+ /// println!("{boxed}");
+ /// ```
+ #[inline]
+ fn from(s: &str) -> Box<str> {
+ unsafe { from_boxed_utf8_unchecked(Box::from(s.as_bytes())) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_cow", since = "1.45.0")]
+impl From<Cow<'_, str>> for Box<str> {
+ /// Converts a `Cow<'_, str>` into a `Box<str>`
+ ///
+ /// When `cow` is the `Cow::Borrowed` variant, this
+ /// conversion allocates on the heap and copies the
+ /// underlying `str`. Otherwise, it will try to reuse the owned
+ /// `String`'s allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// use std::borrow::Cow;
+ ///
+ /// let unboxed = Cow::Borrowed("hello");
+ /// let boxed: Box<str> = Box::from(unboxed);
+ /// println!("{boxed}");
+ /// ```
+ ///
+ /// ```rust
+ /// # use std::borrow::Cow;
+ /// let unboxed = Cow::Owned("hello".to_string());
+ /// let boxed: Box<str> = Box::from(unboxed);
+ /// println!("{boxed}");
+ /// ```
+ #[inline]
+ fn from(cow: Cow<'_, str>) -> Box<str> {
+ match cow {
+ Cow::Borrowed(s) => Box::from(s),
+ Cow::Owned(s) => Box::from(s),
+ }
+ }
+}
+
+#[stable(feature = "boxed_str_conv", since = "1.19.0")]
+impl<A: Allocator> From<Box<str, A>> for Box<[u8], A> {
+ /// Converts a `Box<str>` into a `Box<[u8]>`
+ ///
+ /// This conversion does not allocate on the heap and happens in place.
+ ///
+ /// # Examples
+ /// ```rust
+ /// // create a Box<str> which will be used to create a Box<[u8]>
+ /// let boxed: Box<str> = Box::from("hello");
+ /// let boxed_str: Box<[u8]> = Box::from(boxed);
+ ///
+ /// // create a &[u8] which will be used to create a Box<[u8]>
+ /// let slice: &[u8] = &[104, 101, 108, 108, 111];
+ /// let boxed_slice = Box::from(slice);
+ ///
+ /// assert_eq!(boxed_slice, boxed_str);
+ /// ```
+ #[inline]
+ fn from(s: Box<str, A>) -> Self {
+ let (raw, alloc) = Box::into_raw_with_allocator(s);
+ unsafe { Box::from_raw_in(raw as *mut [u8], alloc) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_array", since = "1.45.0")]
+impl<T, const N: usize> From<[T; N]> for Box<[T]> {
+ /// Converts a `[T; N]` into a `Box<[T]>`
+ ///
+ /// This conversion moves the array to newly heap-allocated memory.
+ ///
+ /// # Examples
+ ///
+ /// ```rust
+ /// let boxed: Box<[u8]> = Box::from([4, 2]);
+ /// println!("{boxed:?}");
+ /// ```
+ fn from(array: [T; N]) -> Box<[T]> {
+ #[rustc_box]
+ Box::new(array)
+ }
+}
+
+#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
+impl<T, const N: usize> TryFrom<Box<[T]>> for Box<[T; N]> {
+ type Error = Box<[T]>;
+
+ /// Attempts to convert a `Box<[T]>` into a `Box<[T; N]>`.
+ ///
+ /// The conversion occurs in-place and does not require a
+ /// new memory allocation.
+ ///
+ /// # Errors
+ ///
+ /// Returns the old `Box<[T]>` in the `Err` variant if
+ /// `boxed_slice.len()` does not equal `N`.
+ fn try_from(boxed_slice: Box<[T]>) -> Result<Self, Self::Error> {
+ if boxed_slice.len() == N {
+ Ok(unsafe { Box::from_raw(Box::into_raw(boxed_slice) as *mut [T; N]) })
+ } else {
+ Err(boxed_slice)
+ }
+ }
+}
+
+impl<A: Allocator> Box<dyn Any, A> {
+ /// Attempt to downcast the box to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(value: Box<dyn Any>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Box::new(my_string));
+ /// print_if_string(Box::new(0i8));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
+ if self.is::<T>() { unsafe { Ok(self.downcast_unchecked::<T>()) } } else { Err(self) }
+ }
+
+ /// Downcasts the box to a concrete type.
+ ///
+ /// For a safe alternative see [`downcast`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let x: Box<dyn Any> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ ///
+ /// [`downcast`]: Self::downcast
+ #[inline]
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ pub unsafe fn downcast_unchecked<T: Any>(self) -> Box<T, A> {
+ debug_assert!(self.is::<T>());
+ unsafe {
+ let (raw, alloc): (*mut dyn Any, _) = Box::into_raw_with_allocator(self);
+ Box::from_raw_in(raw as *mut T, alloc)
+ }
+ }
+}
+
+impl<A: Allocator> Box<dyn Any + Send, A> {
+ /// Attempt to downcast the box to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(value: Box<dyn Any + Send>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Box::new(my_string));
+ /// print_if_string(Box::new(0i8));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
+ if self.is::<T>() { unsafe { Ok(self.downcast_unchecked::<T>()) } } else { Err(self) }
+ }
+
+ /// Downcasts the box to a concrete type.
+ ///
+ /// For a safe alternative see [`downcast`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let x: Box<dyn Any + Send> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ ///
+ /// [`downcast`]: Self::downcast
+ #[inline]
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ pub unsafe fn downcast_unchecked<T: Any>(self) -> Box<T, A> {
+ debug_assert!(self.is::<T>());
+ unsafe {
+ let (raw, alloc): (*mut (dyn Any + Send), _) = Box::into_raw_with_allocator(self);
+ Box::from_raw_in(raw as *mut T, alloc)
+ }
+ }
+}
+
+impl<A: Allocator> Box<dyn Any + Send + Sync, A> {
+ /// Attempt to downcast the box to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ ///
+ /// fn print_if_string(value: Box<dyn Any + Send + Sync>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Box::new(my_string));
+ /// print_if_string(Box::new(0i8));
+ /// ```
+ #[inline]
+ #[stable(feature = "box_send_sync_any_downcast", since = "1.51.0")]
+ pub fn downcast<T: Any>(self) -> Result<Box<T, A>, Self> {
+ if self.is::<T>() { unsafe { Ok(self.downcast_unchecked::<T>()) } } else { Err(self) }
+ }
+
+ /// Downcasts the box to a concrete type.
+ ///
+ /// For a safe alternative see [`downcast`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ ///
+ /// let x: Box<dyn Any + Send + Sync> = Box::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ ///
+ /// [`downcast`]: Self::downcast
+ #[inline]
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ pub unsafe fn downcast_unchecked<T: Any>(self) -> Box<T, A> {
+ debug_assert!(self.is::<T>());
+ unsafe {
+ let (raw, alloc): (*mut (dyn Any + Send + Sync), _) =
+ Box::into_raw_with_allocator(self);
+ Box::from_raw_in(raw as *mut T, alloc)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Display + ?Sized, A: Allocator> fmt::Display for Box<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug + ?Sized, A: Allocator> fmt::Debug for Box<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized, A: Allocator> fmt::Pointer for Box<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ // It's not possible to extract the inner Uniq directly from the Box,
+ // instead we cast it to a *const which aliases the Unique
+ let ptr: *const T = &**self;
+ fmt::Pointer::fmt(&ptr, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+impl<T: ?Sized, A: Allocator> const Deref for Box<T, A> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+impl<T: ?Sized, A: Allocator> const DerefMut for Box<T, A> {
+ fn deref_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<T: ?Sized, A: Allocator> Receiver for Box<T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator + ?Sized, A: Allocator> Iterator for Box<I, A> {
+ type Item = I::Item;
+ fn next(&mut self) -> Option<I::Item> {
+ (**self).next()
+ }
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+ fn nth(&mut self, n: usize) -> Option<I::Item> {
+ (**self).nth(n)
+ }
+ fn last(self) -> Option<I::Item> {
+ BoxIter::last(self)
+ }
+}
+
+trait BoxIter {
+ type Item;
+ fn last(self) -> Option<Self::Item>;
+}
+
+impl<I: Iterator + ?Sized, A: Allocator> BoxIter for Box<I, A> {
+ type Item = I::Item;
+ default fn last(self) -> Option<I::Item> {
+ #[inline]
+ fn some<T>(_: Option<T>, x: T) -> Option<T> {
+ Some(x)
+ }
+
+ self.fold(None, some)
+ }
+}
+
+/// Specialization for sized `I`s that uses `I`s implementation of `last()`
+/// instead of the default.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: Iterator, A: Allocator> BoxIter for Box<I, A> {
+ fn last(self) -> Option<I::Item> {
+ (*self).last()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: DoubleEndedIterator + ?Sized, A: Allocator> DoubleEndedIterator for Box<I, A> {
+ fn next_back(&mut self) -> Option<I::Item> {
+ (**self).next_back()
+ }
+ fn nth_back(&mut self, n: usize) -> Option<I::Item> {
+ (**self).nth_back(n)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<I: ExactSizeIterator + ?Sized, A: Allocator> ExactSizeIterator for Box<I, A> {
+ fn len(&self) -> usize {
+ (**self).len()
+ }
+ fn is_empty(&self) -> bool {
+ (**self).is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<I: FusedIterator + ?Sized, A: Allocator> FusedIterator for Box<I, A> {}
+
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args, F: FnOnce<Args> + ?Sized, A: Allocator> FnOnce<Args> for Box<F, A> {
+ type Output = <F as FnOnce<Args>>::Output;
+
+ extern "rust-call" fn call_once(self, args: Args) -> Self::Output {
+ <F as FnOnce<Args>>::call_once(*self, args)
+ }
+}
+
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args, F: FnMut<Args> + ?Sized, A: Allocator> FnMut<Args> for Box<F, A> {
+ extern "rust-call" fn call_mut(&mut self, args: Args) -> Self::Output {
+ <F as FnMut<Args>>::call_mut(self, args)
+ }
+}
+
+#[stable(feature = "boxed_closure_impls", since = "1.35.0")]
+impl<Args, F: Fn<Args> + ?Sized, A: Allocator> Fn<Args> for Box<F, A> {
+ extern "rust-call" fn call(&self, args: Args) -> Self::Output {
+ <F as Fn<Args>>::call(self, args)
+ }
+}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized, A: Allocator> CoerceUnsized<Box<U, A>> for Box<T, A> {}
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Box<U>> for Box<T, Global> {}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "boxed_slice_from_iter", since = "1.32.0")]
+impl<I> FromIterator<I> for Box<[I]> {
+ fn from_iter<T: IntoIterator<Item = I>>(iter: T) -> Self {
+ iter.into_iter().collect::<Vec<_>>().into_boxed_slice()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_slice_clone", since = "1.3.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for Box<[T], A> {
+ fn clone(&self) -> Self {
+ let alloc = Box::allocator(self).clone();
+ self.to_vec_in(alloc).into_boxed_slice()
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ if self.len() == other.len() {
+ self.clone_from_slice(&other);
+ } else {
+ *self = other.clone();
+ }
+ }
+}
+
+#[stable(feature = "box_borrow", since = "1.1.0")]
+impl<T: ?Sized, A: Allocator> borrow::Borrow<T> for Box<T, A> {
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "box_borrow", since = "1.1.0")]
+impl<T: ?Sized, A: Allocator> borrow::BorrowMut<T> for Box<T, A> {
+ fn borrow_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
+
+#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
+impl<T: ?Sized, A: Allocator> AsRef<T> for Box<T, A> {
+ fn as_ref(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
+impl<T: ?Sized, A: Allocator> AsMut<T> for Box<T, A> {
+ fn as_mut(&mut self) -> &mut T {
+ &mut **self
+ }
+}
+
+/* Nota bene
+ *
+ * We could have chosen not to add this impl, and instead have written a
+ * function of Pin<Box<T>> to Pin<T>. Such a function would not be sound,
+ * because Box<T> implements Unpin even when T does not, as a result of
+ * this impl.
+ *
+ * We chose this API instead of the alternative for a few reasons:
+ * - Logically, it is helpful to understand pinning in regard to the
+ * memory region being pointed to. For this reason none of the
+ * standard library pointer types support projecting through a pin
+ * (Box<T> is the only pointer type in std for which this would be
+ * safe.)
+ * - It is in practice very useful to have Box<T> be unconditionally
+ * Unpin because of trait objects, for which the structural auto
+ * trait functionality does not apply (e.g., Box<dyn Foo> would
+ * otherwise not be Unpin).
+ *
+ * Another type with the same semantics as Box but only a conditional
+ * implementation of `Unpin` (where `T: Unpin`) would be valid/safe, and
+ * could have a method to project a Pin<T> from it.
+ */
+#[stable(feature = "pin", since = "1.33.0")]
+#[rustc_const_unstable(feature = "const_box", issue = "92521")]
+impl<T: ?Sized, A: Allocator> const Unpin for Box<T, A> where A: 'static {}
+
+#[unstable(feature = "generator_trait", issue = "43122")]
+impl<G: ?Sized + Generator<R> + Unpin, R, A: Allocator> Generator<R> for Box<G, A>
+where
+ A: 'static,
+{
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ G::resume(Pin::new(&mut *self), arg)
+ }
+}
+
+#[unstable(feature = "generator_trait", issue = "43122")]
+impl<G: ?Sized + Generator<R>, R, A: Allocator> Generator<R> for Pin<Box<G, A>>
+where
+ A: 'static,
+{
+ type Yield = G::Yield;
+ type Return = G::Return;
+
+ fn resume(mut self: Pin<&mut Self>, arg: R) -> GeneratorState<Self::Yield, Self::Return> {
+ G::resume((*self).as_mut(), arg)
+ }
+}
+
+#[stable(feature = "futures_api", since = "1.36.0")]
+impl<F: ?Sized + Future + Unpin, A: Allocator> Future for Box<F, A>
+where
+ A: 'static,
+{
+ type Output = F::Output;
+
+ fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
+ F::poll(Pin::new(&mut *self), cx)
+ }
+}
+
+#[unstable(feature = "async_iterator", issue = "79024")]
+impl<S: ?Sized + AsyncIterator + Unpin> AsyncIterator for Box<S> {
+ type Item = S::Item;
+
+ fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
+ Pin::new(&mut **self).poll_next(cx)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (**self).size_hint()
+ }
+}
diff --git a/library/alloc/src/boxed/thin.rs b/library/alloc/src/boxed/thin.rs
new file mode 100644
index 000000000..649ccfcaa
--- /dev/null
+++ b/library/alloc/src/boxed/thin.rs
@@ -0,0 +1,273 @@
+// Based on
+// https://github.com/matthieu-m/rfc2580/blob/b58d1d3cba0d4b5e859d3617ea2d0943aaa31329/examples/thin.rs
+// by matthieu-m
+use crate::alloc::{self, Layout, LayoutError};
+use core::fmt::{self, Debug, Display, Formatter};
+use core::marker::PhantomData;
+#[cfg(not(no_global_oom_handling))]
+use core::marker::Unsize;
+use core::mem;
+use core::ops::{Deref, DerefMut};
+use core::ptr::Pointee;
+use core::ptr::{self, NonNull};
+
+/// ThinBox.
+///
+/// A thin pointer for heap allocation, regardless of T.
+///
+/// # Examples
+///
+/// ```
+/// #![feature(thin_box)]
+/// use std::boxed::ThinBox;
+///
+/// let five = ThinBox::new(5);
+/// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]);
+///
+/// use std::mem::{size_of, size_of_val};
+/// let size_of_ptr = size_of::<*const ()>();
+/// assert_eq!(size_of_ptr, size_of_val(&five));
+/// assert_eq!(size_of_ptr, size_of_val(&thin_slice));
+/// ```
+#[unstable(feature = "thin_box", issue = "92791")]
+pub struct ThinBox<T: ?Sized> {
+ // This is essentially `WithHeader<<T as Pointee>::Metadata>`,
+ // but that would be invariant in `T`, and we want covariance.
+ ptr: WithOpaqueHeader,
+ _marker: PhantomData<T>,
+}
+
+/// `ThinBox<T>` is `Send` if `T` is `Send` because the data is owned.
+#[unstable(feature = "thin_box", issue = "92791")]
+unsafe impl<T: ?Sized + Send> Send for ThinBox<T> {}
+
+/// `ThinBox<T>` is `Sync` if `T` is `Sync` because the data is owned.
+#[unstable(feature = "thin_box", issue = "92791")]
+unsafe impl<T: ?Sized + Sync> Sync for ThinBox<T> {}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T> ThinBox<T> {
+ /// Moves a type to the heap with its `Metadata` stored in the heap allocation instead of on
+ /// the stack.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(thin_box)]
+ /// use std::boxed::ThinBox;
+ ///
+ /// let five = ThinBox::new(5);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ pub fn new(value: T) -> Self {
+ let meta = ptr::metadata(&value);
+ let ptr = WithOpaqueHeader::new(meta, value);
+ ThinBox { ptr, _marker: PhantomData }
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<Dyn: ?Sized> ThinBox<Dyn> {
+ /// Moves a type to the heap with its `Metadata` stored in the heap allocation instead of on
+ /// the stack.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(thin_box)]
+ /// use std::boxed::ThinBox;
+ ///
+ /// let thin_slice = ThinBox::<[i32]>::new_unsize([1, 2, 3, 4]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ pub fn new_unsize<T>(value: T) -> Self
+ where
+ T: Unsize<Dyn>,
+ {
+ let meta = ptr::metadata(&value as &Dyn);
+ let ptr = WithOpaqueHeader::new(meta, value);
+ ThinBox { ptr, _marker: PhantomData }
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized + Debug> Debug for ThinBox<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ Debug::fmt(self.deref(), f)
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized + Display> Display for ThinBox<T> {
+ fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
+ Display::fmt(self.deref(), f)
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized> Deref for ThinBox<T> {
+ type Target = T;
+
+ fn deref(&self) -> &T {
+ let value = self.data();
+ let metadata = self.meta();
+ let pointer = ptr::from_raw_parts(value as *const (), metadata);
+ unsafe { &*pointer }
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized> DerefMut for ThinBox<T> {
+ fn deref_mut(&mut self) -> &mut T {
+ let value = self.data();
+ let metadata = self.meta();
+ let pointer = ptr::from_raw_parts_mut::<T>(value as *mut (), metadata);
+ unsafe { &mut *pointer }
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized> Drop for ThinBox<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let value = self.deref_mut();
+ let value = value as *mut T;
+ self.with_header().drop::<T>(value);
+ }
+ }
+}
+
+#[unstable(feature = "thin_box", issue = "92791")]
+impl<T: ?Sized> ThinBox<T> {
+ fn meta(&self) -> <T as Pointee>::Metadata {
+ // Safety:
+ // - NonNull and valid.
+ unsafe { *self.with_header().header() }
+ }
+
+ fn data(&self) -> *mut u8 {
+ self.with_header().value()
+ }
+
+ fn with_header(&self) -> &WithHeader<<T as Pointee>::Metadata> {
+ // SAFETY: both types are transparent to `NonNull<u8>`
+ unsafe { &*((&self.ptr) as *const WithOpaqueHeader as *const WithHeader<_>) }
+ }
+}
+
+/// A pointer to type-erased data, guaranteed to either be:
+/// 1. `NonNull::dangling()`, in the case where both the pointee (`T`) and
+/// metadata (`H`) are ZSTs.
+/// 2. A pointer to a valid `T` that has a header `H` directly before the
+/// pointed-to location.
+#[repr(transparent)]
+struct WithHeader<H>(NonNull<u8>, PhantomData<H>);
+
+/// An opaque representation of `WithHeader<H>` to avoid the
+/// projection invariance of `<T as Pointee>::Metadata`.
+#[repr(transparent)]
+struct WithOpaqueHeader(NonNull<u8>);
+
+impl WithOpaqueHeader {
+ #[cfg(not(no_global_oom_handling))]
+ fn new<H, T>(header: H, value: T) -> Self {
+ let ptr = WithHeader::new(header, value);
+ Self(ptr.0)
+ }
+}
+
+impl<H> WithHeader<H> {
+ #[cfg(not(no_global_oom_handling))]
+ fn new<T>(header: H, value: T) -> WithHeader<H> {
+ let value_layout = Layout::new::<T>();
+ let Ok((layout, value_offset)) = Self::alloc_layout(value_layout) else {
+ // We pass an empty layout here because we do not know which layout caused the
+ // arithmetic overflow in `Layout::extend` and `handle_alloc_error` takes `Layout` as
+ // its argument rather than `Result<Layout, LayoutError>`, also this function has been
+ // stable since 1.28 ._.
+ //
+ // On the other hand, look at this gorgeous turbofish!
+ alloc::handle_alloc_error(Layout::new::<()>());
+ };
+
+ unsafe {
+ // Note: It's UB to pass a layout with a zero size to `alloc::alloc`, so
+ // we use `layout.dangling()` for this case, which should have a valid
+ // alignment for both `T` and `H`.
+ let ptr = if layout.size() == 0 {
+ // Some paranoia checking, mostly so that the ThinBox tests are
+ // more able to catch issues.
+ debug_assert!(
+ value_offset == 0 && mem::size_of::<T>() == 0 && mem::size_of::<H>() == 0
+ );
+ layout.dangling()
+ } else {
+ let ptr = alloc::alloc(layout);
+ if ptr.is_null() {
+ alloc::handle_alloc_error(layout);
+ }
+ // Safety:
+ // - The size is at least `aligned_header_size`.
+ let ptr = ptr.add(value_offset) as *mut _;
+
+ NonNull::new_unchecked(ptr)
+ };
+
+ let result = WithHeader(ptr, PhantomData);
+ ptr::write(result.header(), header);
+ ptr::write(result.value().cast(), value);
+
+ result
+ }
+ }
+
+ // Safety:
+ // - Assumes that either `value` can be dereferenced, or is the
+ // `NonNull::dangling()` we use when both `T` and `H` are ZSTs.
+ unsafe fn drop<T: ?Sized>(&self, value: *mut T) {
+ unsafe {
+ let value_layout = Layout::for_value_raw(value);
+ // SAFETY: Layout must have been computable if we're in drop
+ let (layout, value_offset) = Self::alloc_layout(value_layout).unwrap_unchecked();
+
+ // We only drop the value because the Pointee trait requires that the metadata is copy
+ // aka trivially droppable.
+ ptr::drop_in_place::<T>(value);
+
+ // Note: Don't deallocate if the layout size is zero, because the pointer
+ // didn't come from the allocator.
+ if layout.size() != 0 {
+ alloc::dealloc(self.0.as_ptr().sub(value_offset), layout);
+ } else {
+ debug_assert!(
+ value_offset == 0 && mem::size_of::<H>() == 0 && value_layout.size() == 0
+ );
+ }
+ }
+ }
+
+ fn header(&self) -> *mut H {
+ // Safety:
+ // - At least `size_of::<H>()` bytes are allocated ahead of the pointer.
+ // - We know that H will be aligned because the middle pointer is aligned to the greater
+ // of the alignment of the header and the data and the header size includes the padding
+ // needed to align the header. Subtracting the header size from the aligned data pointer
+ // will always result in an aligned header pointer, it just may not point to the
+ // beginning of the allocation.
+ let hp = unsafe { self.0.as_ptr().sub(Self::header_size()) as *mut H };
+ debug_assert!(hp.is_aligned());
+ hp
+ }
+
+ fn value(&self) -> *mut u8 {
+ self.0.as_ptr()
+ }
+
+ const fn header_size() -> usize {
+ mem::size_of::<H>()
+ }
+
+ fn alloc_layout(value_layout: Layout) -> Result<(Layout, usize), LayoutError> {
+ Layout::new::<H>().extend(value_layout)
+ }
+}
diff --git a/library/alloc/src/collections/binary_heap.rs b/library/alloc/src/collections/binary_heap.rs
new file mode 100644
index 000000000..197e7aaac
--- /dev/null
+++ b/library/alloc/src/collections/binary_heap.rs
@@ -0,0 +1,1720 @@
+//! A priority queue implemented with a binary heap.
+//!
+//! Insertion and popping the largest element have *O*(log(*n*)) time complexity.
+//! Checking the largest element is *O*(1). Converting a vector to a binary heap
+//! can be done in-place, and has *O*(*n*) complexity. A binary heap can also be
+//! converted to a sorted vector in-place, allowing it to be used for an *O*(*n* * log(*n*))
+//! in-place heapsort.
+//!
+//! # Examples
+//!
+//! This is a larger example that implements [Dijkstra's algorithm][dijkstra]
+//! to solve the [shortest path problem][sssp] on a [directed graph][dir_graph].
+//! It shows how to use [`BinaryHeap`] with custom types.
+//!
+//! [dijkstra]: https://en.wikipedia.org/wiki/Dijkstra%27s_algorithm
+//! [sssp]: https://en.wikipedia.org/wiki/Shortest_path_problem
+//! [dir_graph]: https://en.wikipedia.org/wiki/Directed_graph
+//!
+//! ```
+//! use std::cmp::Ordering;
+//! use std::collections::BinaryHeap;
+//!
+//! #[derive(Copy, Clone, Eq, PartialEq)]
+//! struct State {
+//! cost: usize,
+//! position: usize,
+//! }
+//!
+//! // The priority queue depends on `Ord`.
+//! // Explicitly implement the trait so the queue becomes a min-heap
+//! // instead of a max-heap.
+//! impl Ord for State {
+//! fn cmp(&self, other: &Self) -> Ordering {
+//! // Notice that the we flip the ordering on costs.
+//! // In case of a tie we compare positions - this step is necessary
+//! // to make implementations of `PartialEq` and `Ord` consistent.
+//! other.cost.cmp(&self.cost)
+//! .then_with(|| self.position.cmp(&other.position))
+//! }
+//! }
+//!
+//! // `PartialOrd` needs to be implemented as well.
+//! impl PartialOrd for State {
+//! fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+//! Some(self.cmp(other))
+//! }
+//! }
+//!
+//! // Each node is represented as a `usize`, for a shorter implementation.
+//! struct Edge {
+//! node: usize,
+//! cost: usize,
+//! }
+//!
+//! // Dijkstra's shortest path algorithm.
+//!
+//! // Start at `start` and use `dist` to track the current shortest distance
+//! // to each node. This implementation isn't memory-efficient as it may leave duplicate
+//! // nodes in the queue. It also uses `usize::MAX` as a sentinel value,
+//! // for a simpler implementation.
+//! fn shortest_path(adj_list: &Vec<Vec<Edge>>, start: usize, goal: usize) -> Option<usize> {
+//! // dist[node] = current shortest distance from `start` to `node`
+//! let mut dist: Vec<_> = (0..adj_list.len()).map(|_| usize::MAX).collect();
+//!
+//! let mut heap = BinaryHeap::new();
+//!
+//! // We're at `start`, with a zero cost
+//! dist[start] = 0;
+//! heap.push(State { cost: 0, position: start });
+//!
+//! // Examine the frontier with lower cost nodes first (min-heap)
+//! while let Some(State { cost, position }) = heap.pop() {
+//! // Alternatively we could have continued to find all shortest paths
+//! if position == goal { return Some(cost); }
+//!
+//! // Important as we may have already found a better way
+//! if cost > dist[position] { continue; }
+//!
+//! // For each node we can reach, see if we can find a way with
+//! // a lower cost going through this node
+//! for edge in &adj_list[position] {
+//! let next = State { cost: cost + edge.cost, position: edge.node };
+//!
+//! // If so, add it to the frontier and continue
+//! if next.cost < dist[next.position] {
+//! heap.push(next);
+//! // Relaxation, we have now found a better way
+//! dist[next.position] = next.cost;
+//! }
+//! }
+//! }
+//!
+//! // Goal not reachable
+//! None
+//! }
+//!
+//! fn main() {
+//! // This is the directed graph we're going to use.
+//! // The node numbers correspond to the different states,
+//! // and the edge weights symbolize the cost of moving
+//! // from one node to another.
+//! // Note that the edges are one-way.
+//! //
+//! // 7
+//! // +-----------------+
+//! // | |
+//! // v 1 2 | 2
+//! // 0 -----> 1 -----> 3 ---> 4
+//! // | ^ ^ ^
+//! // | | 1 | |
+//! // | | | 3 | 1
+//! // +------> 2 -------+ |
+//! // 10 | |
+//! // +---------------+
+//! //
+//! // The graph is represented as an adjacency list where each index,
+//! // corresponding to a node value, has a list of outgoing edges.
+//! // Chosen for its efficiency.
+//! let graph = vec![
+//! // Node 0
+//! vec![Edge { node: 2, cost: 10 },
+//! Edge { node: 1, cost: 1 }],
+//! // Node 1
+//! vec![Edge { node: 3, cost: 2 }],
+//! // Node 2
+//! vec![Edge { node: 1, cost: 1 },
+//! Edge { node: 3, cost: 3 },
+//! Edge { node: 4, cost: 1 }],
+//! // Node 3
+//! vec![Edge { node: 0, cost: 7 },
+//! Edge { node: 4, cost: 2 }],
+//! // Node 4
+//! vec![]];
+//!
+//! assert_eq!(shortest_path(&graph, 0, 1), Some(1));
+//! assert_eq!(shortest_path(&graph, 0, 3), Some(3));
+//! assert_eq!(shortest_path(&graph, 3, 0), Some(7));
+//! assert_eq!(shortest_path(&graph, 0, 4), Some(5));
+//! assert_eq!(shortest_path(&graph, 4, 0), None);
+//! }
+//! ```
+
+#![allow(missing_docs)]
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::fmt;
+use core::iter::{FromIterator, FusedIterator, InPlaceIterable, SourceIter, TrustedLen};
+use core::mem::{self, swap, ManuallyDrop};
+use core::ops::{Deref, DerefMut};
+use core::ptr;
+
+use crate::collections::TryReserveError;
+use crate::slice;
+use crate::vec::{self, AsVecIntoIter, Vec};
+
+use super::SpecExtend;
+
+#[cfg(test)]
+mod tests;
+
+/// A priority queue implemented with a binary heap.
+///
+/// This will be a max-heap.
+///
+/// It is a logic error for an item to be modified in such a way that the
+/// item's ordering relative to any other item, as determined by the [`Ord`]
+/// trait, changes while it is in the heap. This is normally only possible
+/// through [`Cell`], [`RefCell`], global state, I/O, or unsafe code. The
+/// behavior resulting from such a logic error is not specified, but will
+/// be encapsulated to the `BinaryHeap` that observed the logic error and not
+/// result in undefined behavior. This could include panics, incorrect results,
+/// aborts, memory leaks, and non-termination.
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::BinaryHeap;
+///
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `BinaryHeap<i32>` in this example).
+/// let mut heap = BinaryHeap::new();
+///
+/// // We can use peek to look at the next item in the heap. In this case,
+/// // there's no items in there yet so we get None.
+/// assert_eq!(heap.peek(), None);
+///
+/// // Let's add some scores...
+/// heap.push(1);
+/// heap.push(5);
+/// heap.push(2);
+///
+/// // Now peek shows the most important item in the heap.
+/// assert_eq!(heap.peek(), Some(&5));
+///
+/// // We can check the length of a heap.
+/// assert_eq!(heap.len(), 3);
+///
+/// // We can iterate over the items in the heap, although they are returned in
+/// // a random order.
+/// for x in &heap {
+/// println!("{x}");
+/// }
+///
+/// // If we instead pop these scores, they should come back in order.
+/// assert_eq!(heap.pop(), Some(5));
+/// assert_eq!(heap.pop(), Some(2));
+/// assert_eq!(heap.pop(), Some(1));
+/// assert_eq!(heap.pop(), None);
+///
+/// // We can clear the heap of any remaining items.
+/// heap.clear();
+///
+/// // The heap should now be empty.
+/// assert!(heap.is_empty())
+/// ```
+///
+/// A `BinaryHeap` with a known list of items can be initialized from an array:
+///
+/// ```
+/// use std::collections::BinaryHeap;
+///
+/// let heap = BinaryHeap::from([1, 5, 2]);
+/// ```
+///
+/// ## Min-heap
+///
+/// Either [`core::cmp::Reverse`] or a custom [`Ord`] implementation can be used to
+/// make `BinaryHeap` a min-heap. This makes `heap.pop()` return the smallest
+/// value instead of the greatest one.
+///
+/// ```
+/// use std::collections::BinaryHeap;
+/// use std::cmp::Reverse;
+///
+/// let mut heap = BinaryHeap::new();
+///
+/// // Wrap values in `Reverse`
+/// heap.push(Reverse(1));
+/// heap.push(Reverse(5));
+/// heap.push(Reverse(2));
+///
+/// // If we pop these scores now, they should come back in the reverse order.
+/// assert_eq!(heap.pop(), Some(Reverse(1)));
+/// assert_eq!(heap.pop(), Some(Reverse(2)));
+/// assert_eq!(heap.pop(), Some(Reverse(5)));
+/// assert_eq!(heap.pop(), None);
+/// ```
+///
+/// # Time complexity
+///
+/// | [push] | [pop] | [peek]/[peek\_mut] |
+/// |---------|---------------|--------------------|
+/// | *O*(1)~ | *O*(log(*n*)) | *O*(1) |
+///
+/// The value for `push` is an expected cost; the method documentation gives a
+/// more detailed analysis.
+///
+/// [`core::cmp::Reverse`]: core::cmp::Reverse
+/// [`Ord`]: core::cmp::Ord
+/// [`Cell`]: core::cell::Cell
+/// [`RefCell`]: core::cell::RefCell
+/// [push]: BinaryHeap::push
+/// [pop]: BinaryHeap::pop
+/// [peek]: BinaryHeap::peek
+/// [peek\_mut]: BinaryHeap::peek_mut
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "BinaryHeap")]
+pub struct BinaryHeap<T> {
+ data: Vec<T>,
+}
+
+/// Structure wrapping a mutable reference to the greatest item on a
+/// `BinaryHeap`.
+///
+/// This `struct` is created by the [`peek_mut`] method on [`BinaryHeap`]. See
+/// its documentation for more.
+///
+/// [`peek_mut`]: BinaryHeap::peek_mut
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+pub struct PeekMut<'a, T: 'a + Ord> {
+ heap: &'a mut BinaryHeap<T>,
+ sift: bool,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: Ord + fmt::Debug> fmt::Debug for PeekMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("PeekMut").field(&self.heap.data[0]).finish()
+ }
+}
+
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+impl<T: Ord> Drop for PeekMut<'_, T> {
+ fn drop(&mut self) {
+ if self.sift {
+ // SAFETY: PeekMut is only instantiated for non-empty heaps.
+ unsafe { self.heap.sift_down(0) };
+ }
+ }
+}
+
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+impl<T: Ord> Deref for PeekMut<'_, T> {
+ type Target = T;
+ fn deref(&self) -> &T {
+ debug_assert!(!self.heap.is_empty());
+ // SAFE: PeekMut is only instantiated for non-empty heaps
+ unsafe { self.heap.data.get_unchecked(0) }
+ }
+}
+
+#[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+impl<T: Ord> DerefMut for PeekMut<'_, T> {
+ fn deref_mut(&mut self) -> &mut T {
+ debug_assert!(!self.heap.is_empty());
+ self.sift = true;
+ // SAFE: PeekMut is only instantiated for non-empty heaps
+ unsafe { self.heap.data.get_unchecked_mut(0) }
+ }
+}
+
+impl<'a, T: Ord> PeekMut<'a, T> {
+ /// Removes the peeked value from the heap and returns it.
+ #[stable(feature = "binary_heap_peek_mut_pop", since = "1.18.0")]
+ pub fn pop(mut this: PeekMut<'a, T>) -> T {
+ let value = this.heap.pop().unwrap();
+ this.sift = false;
+ value
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> Clone for BinaryHeap<T> {
+ fn clone(&self) -> Self {
+ BinaryHeap { data: self.data.clone() }
+ }
+
+ fn clone_from(&mut self, source: &Self) {
+ self.data.clone_from(&source.data);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Default for BinaryHeap<T> {
+ /// Creates an empty `BinaryHeap<T>`.
+ #[inline]
+ fn default() -> BinaryHeap<T> {
+ BinaryHeap::new()
+ }
+}
+
+#[stable(feature = "binaryheap_debug", since = "1.4.0")]
+impl<T: fmt::Debug> fmt::Debug for BinaryHeap<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+impl<T: Ord> BinaryHeap<T> {
+ /// Creates an empty `BinaryHeap` as a max-heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.push(4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn new() -> BinaryHeap<T> {
+ BinaryHeap { data: vec![] }
+ }
+
+ /// Creates an empty `BinaryHeap` with at least the specified capacity.
+ ///
+ /// The binary heap will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the binary heap will not allocate.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::with_capacity(10);
+ /// heap.push(4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn with_capacity(capacity: usize) -> BinaryHeap<T> {
+ BinaryHeap { data: Vec::with_capacity(capacity) }
+ }
+
+ /// Returns a mutable reference to the greatest item in the binary heap, or
+ /// `None` if it is empty.
+ ///
+ /// Note: If the `PeekMut` value is leaked, the heap may be in an
+ /// inconsistent state.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// assert!(heap.peek_mut().is_none());
+ ///
+ /// heap.push(1);
+ /// heap.push(5);
+ /// heap.push(2);
+ /// {
+ /// let mut val = heap.peek_mut().unwrap();
+ /// *val = 0;
+ /// }
+ /// assert_eq!(heap.peek(), Some(&2));
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// If the item is modified then the worst case time complexity is *O*(log(*n*)),
+ /// otherwise it's *O*(1).
+ #[stable(feature = "binary_heap_peek_mut", since = "1.12.0")]
+ pub fn peek_mut(&mut self) -> Option<PeekMut<'_, T>> {
+ if self.is_empty() { None } else { Some(PeekMut { heap: self, sift: false }) }
+ }
+
+ /// Removes the greatest item from the binary heap and returns it, or `None` if it
+ /// is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert_eq!(heap.pop(), Some(3));
+ /// assert_eq!(heap.pop(), Some(1));
+ /// assert_eq!(heap.pop(), None);
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// The worst case cost of `pop` on a heap containing *n* elements is *O*(log(*n*)).
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop(&mut self) -> Option<T> {
+ self.data.pop().map(|mut item| {
+ if !self.is_empty() {
+ swap(&mut item, &mut self.data[0]);
+ // SAFETY: !self.is_empty() means that self.len() > 0
+ unsafe { self.sift_down_to_bottom(0) };
+ }
+ item
+ })
+ }
+
+ /// Pushes an item onto the binary heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.push(3);
+ /// heap.push(5);
+ /// heap.push(1);
+ ///
+ /// assert_eq!(heap.len(), 3);
+ /// assert_eq!(heap.peek(), Some(&5));
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// The expected cost of `push`, averaged over every possible ordering of
+ /// the elements being pushed, and over a sufficiently large number of
+ /// pushes, is *O*(1). This is the most meaningful cost metric when pushing
+ /// elements that are *not* already in any sorted pattern.
+ ///
+ /// The time complexity degrades if elements are pushed in predominantly
+ /// ascending order. In the worst case, elements are pushed in ascending
+ /// sorted order and the amortized cost per push is *O*(log(*n*)) against a heap
+ /// containing *n* elements.
+ ///
+ /// The worst case cost of a *single* call to `push` is *O*(*n*). The worst case
+ /// occurs when capacity is exhausted and needs a resize. The resize cost
+ /// has been amortized in the previous figures.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push(&mut self, item: T) {
+ let old_len = self.len();
+ self.data.push(item);
+ // SAFETY: Since we pushed a new item it means that
+ // old_len = self.len() - 1 < self.len()
+ unsafe { self.sift_up(0, old_len) };
+ }
+
+ /// Consumes the `BinaryHeap` and returns a vector in sorted
+ /// (ascending) order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut heap = BinaryHeap::from([1, 2, 4, 5, 7]);
+ /// heap.push(6);
+ /// heap.push(3);
+ ///
+ /// let vec = heap.into_sorted_vec();
+ /// assert_eq!(vec, [1, 2, 3, 4, 5, 6, 7]);
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+ pub fn into_sorted_vec(mut self) -> Vec<T> {
+ let mut end = self.len();
+ while end > 1 {
+ end -= 1;
+ // SAFETY: `end` goes from `self.len() - 1` to 1 (both included),
+ // so it's always a valid index to access.
+ // It is safe to access index 0 (i.e. `ptr`), because
+ // 1 <= end < self.len(), which means self.len() >= 2.
+ unsafe {
+ let ptr = self.data.as_mut_ptr();
+ ptr::swap(ptr, ptr.add(end));
+ }
+ // SAFETY: `end` goes from `self.len() - 1` to 1 (both included) so:
+ // 0 < 1 <= end <= self.len() - 1 < self.len()
+ // Which means 0 < end and end < self.len().
+ unsafe { self.sift_down_range(0, end) };
+ }
+ self.into_vec()
+ }
+
+ // The implementations of sift_up and sift_down use unsafe blocks in
+ // order to move an element out of the vector (leaving behind a
+ // hole), shift along the others and move the removed element back into the
+ // vector at the final location of the hole.
+ // The `Hole` type is used to represent this, and make sure
+ // the hole is filled back at the end of its scope, even on panic.
+ // Using a hole reduces the constant factor compared to using swaps,
+ // which involves twice as many moves.
+
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < self.len()`.
+ unsafe fn sift_up(&mut self, start: usize, pos: usize) -> usize {
+ // Take out the value at `pos` and create a hole.
+ // SAFETY: The caller guarantees that pos < self.len()
+ let mut hole = unsafe { Hole::new(&mut self.data, pos) };
+
+ while hole.pos() > start {
+ let parent = (hole.pos() - 1) / 2;
+
+ // SAFETY: hole.pos() > start >= 0, which means hole.pos() > 0
+ // and so hole.pos() - 1 can't underflow.
+ // This guarantees that parent < hole.pos() so
+ // it's a valid index and also != hole.pos().
+ if hole.element() <= unsafe { hole.get(parent) } {
+ break;
+ }
+
+ // SAFETY: Same as above
+ unsafe { hole.move_to(parent) };
+ }
+
+ hole.pos()
+ }
+
+ /// Take an element at `pos` and move it down the heap,
+ /// while its children are larger.
+ ///
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < end <= self.len()`.
+ unsafe fn sift_down_range(&mut self, pos: usize, end: usize) {
+ // SAFETY: The caller guarantees that pos < end <= self.len().
+ let mut hole = unsafe { Hole::new(&mut self.data, pos) };
+ let mut child = 2 * hole.pos() + 1;
+
+ // Loop invariant: child == 2 * hole.pos() + 1.
+ while child <= end.saturating_sub(2) {
+ // compare with the greater of the two children
+ // SAFETY: child < end - 1 < self.len() and
+ // child + 1 < end <= self.len(), so they're valid indexes.
+ // child == 2 * hole.pos() + 1 != hole.pos() and
+ // child + 1 == 2 * hole.pos() + 2 != hole.pos().
+ // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
+ // if T is a ZST
+ child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
+
+ // if we are already in order, stop.
+ // SAFETY: child is now either the old child or the old child+1
+ // We already proven that both are < self.len() and != hole.pos()
+ if hole.element() >= unsafe { hole.get(child) } {
+ return;
+ }
+
+ // SAFETY: same as above.
+ unsafe { hole.move_to(child) };
+ child = 2 * hole.pos() + 1;
+ }
+
+ // SAFETY: && short circuit, which means that in the
+ // second condition it's already true that child == end - 1 < self.len().
+ if child == end - 1 && hole.element() < unsafe { hole.get(child) } {
+ // SAFETY: child is already proven to be a valid index and
+ // child == 2 * hole.pos() + 1 != hole.pos().
+ unsafe { hole.move_to(child) };
+ }
+ }
+
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < self.len()`.
+ unsafe fn sift_down(&mut self, pos: usize) {
+ let len = self.len();
+ // SAFETY: pos < len is guaranteed by the caller and
+ // obviously len = self.len() <= self.len().
+ unsafe { self.sift_down_range(pos, len) };
+ }
+
+ /// Take an element at `pos` and move it all the way down the heap,
+ /// then sift it up to its position.
+ ///
+ /// Note: This is faster when the element is known to be large / should
+ /// be closer to the bottom.
+ ///
+ /// # Safety
+ ///
+ /// The caller must guarantee that `pos < self.len()`.
+ unsafe fn sift_down_to_bottom(&mut self, mut pos: usize) {
+ let end = self.len();
+ let start = pos;
+
+ // SAFETY: The caller guarantees that pos < self.len().
+ let mut hole = unsafe { Hole::new(&mut self.data, pos) };
+ let mut child = 2 * hole.pos() + 1;
+
+ // Loop invariant: child == 2 * hole.pos() + 1.
+ while child <= end.saturating_sub(2) {
+ // SAFETY: child < end - 1 < self.len() and
+ // child + 1 < end <= self.len(), so they're valid indexes.
+ // child == 2 * hole.pos() + 1 != hole.pos() and
+ // child + 1 == 2 * hole.pos() + 2 != hole.pos().
+ // FIXME: 2 * hole.pos() + 1 or 2 * hole.pos() + 2 could overflow
+ // if T is a ZST
+ child += unsafe { hole.get(child) <= hole.get(child + 1) } as usize;
+
+ // SAFETY: Same as above
+ unsafe { hole.move_to(child) };
+ child = 2 * hole.pos() + 1;
+ }
+
+ if child == end - 1 {
+ // SAFETY: child == end - 1 < self.len(), so it's a valid index
+ // and child == 2 * hole.pos() + 1 != hole.pos().
+ unsafe { hole.move_to(child) };
+ }
+ pos = hole.pos();
+ drop(hole);
+
+ // SAFETY: pos is the position in the hole and was already proven
+ // to be a valid index.
+ unsafe { self.sift_up(start, pos) };
+ }
+
+ /// Rebuild assuming data[0..start] is still a proper heap.
+ fn rebuild_tail(&mut self, start: usize) {
+ if start == self.len() {
+ return;
+ }
+
+ let tail_len = self.len() - start;
+
+ #[inline(always)]
+ fn log2_fast(x: usize) -> usize {
+ (usize::BITS - x.leading_zeros() - 1) as usize
+ }
+
+ // `rebuild` takes O(self.len()) operations
+ // and about 2 * self.len() comparisons in the worst case
+ // while repeating `sift_up` takes O(tail_len * log(start)) operations
+ // and about 1 * tail_len * log_2(start) comparisons in the worst case,
+ // assuming start >= tail_len. For larger heaps, the crossover point
+ // no longer follows this reasoning and was determined empirically.
+ let better_to_rebuild = if start < tail_len {
+ true
+ } else if self.len() <= 2048 {
+ 2 * self.len() < tail_len * log2_fast(start)
+ } else {
+ 2 * self.len() < tail_len * 11
+ };
+
+ if better_to_rebuild {
+ self.rebuild();
+ } else {
+ for i in start..self.len() {
+ // SAFETY: The index `i` is always less than self.len().
+ unsafe { self.sift_up(0, i) };
+ }
+ }
+ }
+
+ fn rebuild(&mut self) {
+ let mut n = self.len() / 2;
+ while n > 0 {
+ n -= 1;
+ // SAFETY: n starts from self.len() / 2 and goes down to 0.
+ // The only case when !(n < self.len()) is if
+ // self.len() == 0, but it's ruled out by the loop condition.
+ unsafe { self.sift_down(n) };
+ }
+ }
+
+ /// Moves all the elements of `other` into `self`, leaving `other` empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut a = BinaryHeap::from([-10, 1, 2, 3, 3]);
+ /// let mut b = BinaryHeap::from([-20, 5, 43]);
+ ///
+ /// a.append(&mut b);
+ ///
+ /// assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
+ /// assert!(b.is_empty());
+ /// ```
+ #[stable(feature = "binary_heap_append", since = "1.11.0")]
+ pub fn append(&mut self, other: &mut Self) {
+ if self.len() < other.len() {
+ swap(self, other);
+ }
+
+ let start = self.data.len();
+
+ self.data.append(&mut other.data);
+
+ self.rebuild_tail(start);
+ }
+
+ /// Clears the binary heap, returning an iterator over the removed elements
+ /// in heap order. If the iterator is dropped before being fully consumed,
+ /// it drops the remaining elements in heap order.
+ ///
+ /// The returned iterator keeps a mutable borrow on the heap to optimize
+ /// its implementation.
+ ///
+ /// Note:
+ /// * `.drain_sorted()` is *O*(*n* \* log(*n*)); much slower than `.drain()`.
+ /// You should use the latter for most cases.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_drain_sorted)]
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut heap = BinaryHeap::from([1, 2, 3, 4, 5]);
+ /// assert_eq!(heap.len(), 5);
+ ///
+ /// drop(heap.drain_sorted()); // removes all elements in heap order
+ /// assert_eq!(heap.len(), 0);
+ /// ```
+ #[inline]
+ #[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+ pub fn drain_sorted(&mut self) -> DrainSorted<'_, T> {
+ DrainSorted { inner: self }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns
+ /// `false`. The elements are visited in unsorted (and unspecified) order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_retain)]
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut heap = BinaryHeap::from([-10, -5, 1, 2, 4, 13]);
+ ///
+ /// heap.retain(|x| x % 2 == 0); // only keep even numbers
+ ///
+ /// assert_eq!(heap.into_sorted_vec(), [-10, 2, 4])
+ /// ```
+ #[unstable(feature = "binary_heap_retain", issue = "71503")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ let mut first_removed = self.len();
+ let mut i = 0;
+ self.data.retain(|e| {
+ let keep = f(e);
+ if !keep && i < first_removed {
+ first_removed = i;
+ }
+ i += 1;
+ keep
+ });
+ // data[0..first_removed] is untouched, so we only need to rebuild the tail:
+ self.rebuild_tail(first_removed);
+ }
+}
+
+impl<T> BinaryHeap<T> {
+ /// Returns an iterator visiting all values in the underlying vector, in
+ /// arbitrary order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4]);
+ ///
+ /// // Print 1, 2, 3, 4 in arbitrary order
+ /// for x in heap.iter() {
+ /// println!("{x}");
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter { iter: self.data.iter() }
+ }
+
+ /// Returns an iterator which retrieves elements in heap order.
+ /// This method consumes the original heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_into_iter_sorted)]
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4, 5]);
+ ///
+ /// assert_eq!(heap.into_iter_sorted().take(2).collect::<Vec<_>>(), [5, 4]);
+ /// ```
+ #[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+ pub fn into_iter_sorted(self) -> IntoIterSorted<T> {
+ IntoIterSorted { inner: self }
+ }
+
+ /// Returns the greatest item in the binary heap, or `None` if it is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// assert_eq!(heap.peek(), None);
+ ///
+ /// heap.push(1);
+ /// heap.push(5);
+ /// heap.push(2);
+ /// assert_eq!(heap.peek(), Some(&5));
+ ///
+ /// ```
+ ///
+ /// # Time complexity
+ ///
+ /// Cost is *O*(1) in the worst case.
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn peek(&self) -> Option<&T> {
+ self.data.get(0)
+ }
+
+ /// Returns the number of elements the binary heap can hold without reallocating.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::with_capacity(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.data.capacity()
+ }
+
+ /// Reserves the minimum capacity for at least `additional` elements more than
+ /// the current length. Unlike [`reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional`. Does nothing if the capacity is already
+ /// sufficient.
+ ///
+ /// [`reserve`]: BinaryHeap::reserve
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows [`usize`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.reserve_exact(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4);
+ /// ```
+ ///
+ /// [`reserve`]: BinaryHeap::reserve
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.data.reserve_exact(additional);
+ }
+
+ /// Reserves capacity for at least `additional` elements more than the
+ /// current length. The allocator may reserve more space to speculatively
+ /// avoid frequent allocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if capacity is already sufficient.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows [`usize`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ /// heap.reserve(100);
+ /// assert!(heap.capacity() >= 100);
+ /// heap.push(4);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ self.data.reserve(additional);
+ }
+
+ /// Tries to reserve the minimum capacity for at least `additional` elements
+ /// more than the current length. Unlike [`try_reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `try_reserve_exact`, capacity will be greater than or
+ /// equal to `self.len() + additional` if it returns `Ok(())`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: BinaryHeap::try_reserve
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
+ /// let mut heap = BinaryHeap::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// heap.try_reserve_exact(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// heap.extend(data.iter());
+ ///
+ /// Ok(heap.pop())
+ /// }
+ /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve_2", since = "1.63.0")]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.data.try_reserve_exact(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` elements more than the
+ /// current length. The allocator may reserve more space to speculatively
+ /// avoid frequent allocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional` if it returns
+ /// `Ok(())`. Does nothing if capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn find_max_slow(data: &[u32]) -> Result<Option<u32>, TryReserveError> {
+ /// let mut heap = BinaryHeap::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// heap.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// heap.extend(data.iter());
+ ///
+ /// Ok(heap.pop())
+ /// }
+ /// # find_max_slow(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve_2", since = "1.63.0")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.data.try_reserve(additional)
+ }
+
+ /// Discards as much additional capacity as possible.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
+ ///
+ /// assert!(heap.capacity() >= 100);
+ /// heap.shrink_to_fit();
+ /// assert!(heap.capacity() == 0);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shrink_to_fit(&mut self) {
+ self.data.shrink_to_fit();
+ }
+
+ /// Discards capacity with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap: BinaryHeap<i32> = BinaryHeap::with_capacity(100);
+ ///
+ /// assert!(heap.capacity() >= 100);
+ /// heap.shrink_to(10);
+ /// assert!(heap.capacity() >= 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.data.shrink_to(min_capacity)
+ }
+
+ /// Returns a slice of all values in the underlying vector, in arbitrary
+ /// order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(binary_heap_as_slice)]
+ /// use std::collections::BinaryHeap;
+ /// use std::io::{self, Write};
+ ///
+ /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
+ ///
+ /// io::sink().write(heap.as_slice()).unwrap();
+ /// ```
+ #[must_use]
+ #[unstable(feature = "binary_heap_as_slice", issue = "83659")]
+ pub fn as_slice(&self) -> &[T] {
+ self.data.as_slice()
+ }
+
+ /// Consumes the `BinaryHeap` and returns the underlying vector
+ /// in arbitrary order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4, 5, 6, 7]);
+ /// let vec = heap.into_vec();
+ ///
+ /// // Will print in some order
+ /// for x in vec {
+ /// println!("{x}");
+ /// }
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+ pub fn into_vec(self) -> Vec<T> {
+ self.into()
+ }
+
+ /// Returns the length of the binary heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert_eq!(heap.len(), 2);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.data.len()
+ }
+
+ /// Checks if the binary heap is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::new();
+ ///
+ /// assert!(heap.is_empty());
+ ///
+ /// heap.push(3);
+ /// heap.push(5);
+ /// heap.push(1);
+ ///
+ /// assert!(!heap.is_empty());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Clears the binary heap, returning an iterator over the removed elements
+ /// in arbitrary order. If the iterator is dropped before being fully
+ /// consumed, it drops the remaining elements in arbitrary order.
+ ///
+ /// The returned iterator keeps a mutable borrow on the heap to optimize
+ /// its implementation.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert!(!heap.is_empty());
+ ///
+ /// for x in heap.drain() {
+ /// println!("{x}");
+ /// }
+ ///
+ /// assert!(heap.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain(&mut self) -> Drain<'_, T> {
+ Drain { iter: self.data.drain(..) }
+ }
+
+ /// Drops all items from the binary heap.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let mut heap = BinaryHeap::from([1, 3]);
+ ///
+ /// assert!(!heap.is_empty());
+ ///
+ /// heap.clear();
+ ///
+ /// assert!(heap.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ self.drain();
+ }
+}
+
+/// Hole represents a hole in a slice i.e., an index without valid value
+/// (because it was moved from or duplicated).
+/// In drop, `Hole` will restore the slice by filling the hole
+/// position with the value that was originally removed.
+struct Hole<'a, T: 'a> {
+ data: &'a mut [T],
+ elt: ManuallyDrop<T>,
+ pos: usize,
+}
+
+impl<'a, T> Hole<'a, T> {
+ /// Create a new `Hole` at index `pos`.
+ ///
+ /// Unsafe because pos must be within the data slice.
+ #[inline]
+ unsafe fn new(data: &'a mut [T], pos: usize) -> Self {
+ debug_assert!(pos < data.len());
+ // SAFE: pos should be inside the slice
+ let elt = unsafe { ptr::read(data.get_unchecked(pos)) };
+ Hole { data, elt: ManuallyDrop::new(elt), pos }
+ }
+
+ #[inline]
+ fn pos(&self) -> usize {
+ self.pos
+ }
+
+ /// Returns a reference to the element removed.
+ #[inline]
+ fn element(&self) -> &T {
+ &self.elt
+ }
+
+ /// Returns a reference to the element at `index`.
+ ///
+ /// Unsafe because index must be within the data slice and not equal to pos.
+ #[inline]
+ unsafe fn get(&self, index: usize) -> &T {
+ debug_assert!(index != self.pos);
+ debug_assert!(index < self.data.len());
+ unsafe { self.data.get_unchecked(index) }
+ }
+
+ /// Move hole to new location
+ ///
+ /// Unsafe because index must be within the data slice and not equal to pos.
+ #[inline]
+ unsafe fn move_to(&mut self, index: usize) {
+ debug_assert!(index != self.pos);
+ debug_assert!(index < self.data.len());
+ unsafe {
+ let ptr = self.data.as_mut_ptr();
+ let index_ptr: *const _ = ptr.add(index);
+ let hole_ptr = ptr.add(self.pos);
+ ptr::copy_nonoverlapping(index_ptr, hole_ptr, 1);
+ }
+ self.pos = index;
+ }
+}
+
+impl<T> Drop for Hole<'_, T> {
+ #[inline]
+ fn drop(&mut self) {
+ // fill the hole again
+ unsafe {
+ let pos = self.pos;
+ ptr::copy_nonoverlapping(&*self.elt, self.data.get_unchecked_mut(pos), 1);
+ }
+ }
+}
+
+/// An iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::iter()`]. See its
+/// documentation for more.
+///
+/// [`iter`]: BinaryHeap::iter
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ iter: slice::Iter<'a, T>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Iter").field(&self.iter.as_slice()).finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter { iter: self.iter.clone() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn last(self) -> Option<&'a T> {
+ self.iter.last()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a T> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
+
+/// An owning iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::into_iter()`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: BinaryHeap::into_iter
+/// [`IntoIterator`]: core::iter::IntoIterator
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Clone)]
+pub struct IntoIter<T> {
+ iter: vec::IntoIter<T>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.iter.as_slice()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IntoIter<T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IntoIter<T> {}
+
+// In addition to the SAFETY invariants of the following three unsafe traits
+// also refer to the vec::in_place_collect module documentation to get an overview
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<T> SourceIter for IntoIter<T> {
+ type Source = IntoIter<T>;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut Self::Source {
+ self
+ }
+}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<I> InPlaceIterable for IntoIter<I> {}
+
+unsafe impl<I> AsVecIntoIter for IntoIter<I> {
+ type Item = I;
+
+ fn as_into_iter(&mut self) -> &mut vec::IntoIter<Self::Item> {
+ &mut self.iter
+ }
+}
+
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+#[derive(Clone, Debug)]
+pub struct IntoIterSorted<T> {
+ inner: BinaryHeap<T>,
+}
+
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+impl<T: Ord> Iterator for IntoIterSorted<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.pop()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = self.inner.len();
+ (exact, Some(exact))
+ }
+}
+
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+impl<T: Ord> ExactSizeIterator for IntoIterSorted<T> {}
+
+#[unstable(feature = "binary_heap_into_iter_sorted", issue = "59278")]
+impl<T: Ord> FusedIterator for IntoIterSorted<T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T: Ord> TrustedLen for IntoIterSorted<T> {}
+
+/// A draining iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::drain()`]. See its
+/// documentation for more.
+///
+/// [`drain`]: BinaryHeap::drain
+#[stable(feature = "drain", since = "1.6.0")]
+#[derive(Debug)]
+pub struct Drain<'a, T: 'a> {
+ iter: vec::Drain<'a, T>,
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T> Iterator for Drain<'_, T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T> DoubleEndedIterator for Drain<'_, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T> ExactSizeIterator for Drain<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Drain<'_, T> {}
+
+/// A draining iterator over the elements of a `BinaryHeap`.
+///
+/// This `struct` is created by [`BinaryHeap::drain_sorted()`]. See its
+/// documentation for more.
+///
+/// [`drain_sorted`]: BinaryHeap::drain_sorted
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+#[derive(Debug)]
+pub struct DrainSorted<'a, T: Ord> {
+ inner: &'a mut BinaryHeap<T>,
+}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<'a, T: Ord> Drop for DrainSorted<'a, T> {
+ /// Removes heap elements in heap order.
+ fn drop(&mut self) {
+ struct DropGuard<'r, 'a, T: Ord>(&'r mut DrainSorted<'a, T>);
+
+ impl<'r, 'a, T: Ord> Drop for DropGuard<'r, 'a, T> {
+ fn drop(&mut self) {
+ while self.0.inner.pop().is_some() {}
+ }
+ }
+
+ while let Some(item) = self.inner.pop() {
+ let guard = DropGuard(self);
+ drop(item);
+ mem::forget(guard);
+ }
+ }
+}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<T: Ord> Iterator for DrainSorted<'_, T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.pop()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = self.inner.len();
+ (exact, Some(exact))
+ }
+}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<T: Ord> ExactSizeIterator for DrainSorted<'_, T> {}
+
+#[unstable(feature = "binary_heap_drain_sorted", issue = "59278")]
+impl<T: Ord> FusedIterator for DrainSorted<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T: Ord> TrustedLen for DrainSorted<'_, T> {}
+
+#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+impl<T: Ord> From<Vec<T>> for BinaryHeap<T> {
+ /// Converts a `Vec<T>` into a `BinaryHeap<T>`.
+ ///
+ /// This conversion happens in-place, and has *O*(*n*) time complexity.
+ fn from(vec: Vec<T>) -> BinaryHeap<T> {
+ let mut heap = BinaryHeap { data: vec };
+ heap.rebuild();
+ heap
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+impl<T: Ord, const N: usize> From<[T; N]> for BinaryHeap<T> {
+ /// ```
+ /// use std::collections::BinaryHeap;
+ ///
+ /// let mut h1 = BinaryHeap::from([1, 4, 2, 3]);
+ /// let mut h2: BinaryHeap<_> = [1, 4, 2, 3].into();
+ /// while let Some((a, b)) = h1.pop().zip(h2.pop()) {
+ /// assert_eq!(a, b);
+ /// }
+ /// ```
+ fn from(arr: [T; N]) -> Self {
+ Self::from_iter(arr)
+ }
+}
+
+#[stable(feature = "binary_heap_extras_15", since = "1.5.0")]
+impl<T> From<BinaryHeap<T>> for Vec<T> {
+ /// Converts a `BinaryHeap<T>` into a `Vec<T>`.
+ ///
+ /// This conversion requires no data movement or allocation, and has
+ /// constant time complexity.
+ fn from(heap: BinaryHeap<T>) -> Vec<T> {
+ heap.data
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> FromIterator<T> for BinaryHeap<T> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BinaryHeap<T> {
+ BinaryHeap::from(iter.into_iter().collect::<Vec<_>>())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> IntoIterator for BinaryHeap<T> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ /// Creates a consuming iterator, that is, one that moves each value out of
+ /// the binary heap in arbitrary order. The binary heap cannot be used
+ /// after calling this.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BinaryHeap;
+ /// let heap = BinaryHeap::from([1, 2, 3, 4]);
+ ///
+ /// // Print 1, 2, 3, 4 in arbitrary order
+ /// for x in heap.into_iter() {
+ /// // x has type i32, not &i32
+ /// println!("{x}");
+ /// }
+ /// ```
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter { iter: self.data.into_iter() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> IntoIterator for &'a BinaryHeap<T> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Extend<T> for BinaryHeap<T> {
+ #[inline]
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ <Self as SpecExtend<I>>::spec_extend(self, iter);
+ }
+
+ #[inline]
+ fn extend_one(&mut self, item: T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+impl<T: Ord, I: IntoIterator<Item = T>> SpecExtend<I> for BinaryHeap<T> {
+ default fn spec_extend(&mut self, iter: I) {
+ self.extend_desugared(iter.into_iter());
+ }
+}
+
+impl<T: Ord> SpecExtend<Vec<T>> for BinaryHeap<T> {
+ fn spec_extend(&mut self, ref mut other: Vec<T>) {
+ let start = self.data.len();
+ self.data.append(other);
+ self.rebuild_tail(start);
+ }
+}
+
+impl<T: Ord> SpecExtend<BinaryHeap<T>> for BinaryHeap<T> {
+ fn spec_extend(&mut self, ref mut other: BinaryHeap<T>) {
+ self.append(other);
+ }
+}
+
+impl<T: Ord> BinaryHeap<T> {
+ fn extend_desugared<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ let iterator = iter.into_iter();
+ let (lower, _) = iterator.size_hint();
+
+ self.reserve(lower);
+
+ iterator.for_each(move |elem| self.push(elem));
+ }
+}
+
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: 'a + Ord + Copy> Extend<&'a T> for BinaryHeap<T> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &item: &'a T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
diff --git a/library/alloc/src/collections/binary_heap/tests.rs b/library/alloc/src/collections/binary_heap/tests.rs
new file mode 100644
index 000000000..5a05215ae
--- /dev/null
+++ b/library/alloc/src/collections/binary_heap/tests.rs
@@ -0,0 +1,489 @@
+use super::*;
+use crate::boxed::Box;
+use std::iter::TrustedLen;
+use std::panic::{catch_unwind, AssertUnwindSafe};
+use std::sync::atomic::{AtomicU32, Ordering};
+
+#[test]
+fn test_iterator() {
+ let data = vec![5, 9, 3];
+ let iterout = [9, 5, 3];
+ let heap = BinaryHeap::from(data);
+ let mut i = 0;
+ for el in &heap {
+ assert_eq!(*el, iterout[i]);
+ i += 1;
+ }
+}
+
+#[test]
+fn test_iter_rev_cloned_collect() {
+ let data = vec![5, 9, 3];
+ let iterout = vec![3, 5, 9];
+ let pq = BinaryHeap::from(data);
+
+ let v: Vec<_> = pq.iter().rev().cloned().collect();
+ assert_eq!(v, iterout);
+}
+
+#[test]
+fn test_into_iter_collect() {
+ let data = vec![5, 9, 3];
+ let iterout = vec![9, 5, 3];
+ let pq = BinaryHeap::from(data);
+
+ let v: Vec<_> = pq.into_iter().collect();
+ assert_eq!(v, iterout);
+}
+
+#[test]
+fn test_into_iter_size_hint() {
+ let data = vec![5, 9];
+ let pq = BinaryHeap::from(data);
+
+ let mut it = pq.into_iter();
+
+ assert_eq!(it.size_hint(), (2, Some(2)));
+ assert_eq!(it.next(), Some(9));
+
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next(), Some(5));
+
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_into_iter_rev_collect() {
+ let data = vec![5, 9, 3];
+ let iterout = vec![3, 5, 9];
+ let pq = BinaryHeap::from(data);
+
+ let v: Vec<_> = pq.into_iter().rev().collect();
+ assert_eq!(v, iterout);
+}
+
+#[test]
+fn test_into_iter_sorted_collect() {
+ let heap = BinaryHeap::from(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
+ let it = heap.into_iter_sorted();
+ let sorted = it.collect::<Vec<_>>();
+ assert_eq!(sorted, vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 2, 1, 1, 0]);
+}
+
+#[test]
+fn test_drain_sorted_collect() {
+ let mut heap = BinaryHeap::from(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
+ let it = heap.drain_sorted();
+ let sorted = it.collect::<Vec<_>>();
+ assert_eq!(sorted, vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 2, 1, 1, 0]);
+}
+
+fn check_exact_size_iterator<I: ExactSizeIterator>(len: usize, it: I) {
+ let mut it = it;
+
+ for i in 0..it.len() {
+ let (lower, upper) = it.size_hint();
+ assert_eq!(Some(lower), upper);
+ assert_eq!(lower, len - i);
+ assert_eq!(it.len(), len - i);
+ it.next();
+ }
+ assert_eq!(it.len(), 0);
+ assert!(it.is_empty());
+}
+
+#[test]
+fn test_exact_size_iterator() {
+ let heap = BinaryHeap::from(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
+ check_exact_size_iterator(heap.len(), heap.iter());
+ check_exact_size_iterator(heap.len(), heap.clone().into_iter());
+ check_exact_size_iterator(heap.len(), heap.clone().into_iter_sorted());
+ check_exact_size_iterator(heap.len(), heap.clone().drain());
+ check_exact_size_iterator(heap.len(), heap.clone().drain_sorted());
+}
+
+fn check_trusted_len<I: TrustedLen>(len: usize, it: I) {
+ let mut it = it;
+ for i in 0..len {
+ let (lower, upper) = it.size_hint();
+ if upper.is_some() {
+ assert_eq!(Some(lower), upper);
+ assert_eq!(lower, len - i);
+ }
+ it.next();
+ }
+}
+
+#[test]
+fn test_trusted_len() {
+ let heap = BinaryHeap::from(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
+ check_trusted_len(heap.len(), heap.clone().into_iter_sorted());
+ check_trusted_len(heap.len(), heap.clone().drain_sorted());
+}
+
+#[test]
+fn test_peek_and_pop() {
+ let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
+ let mut sorted = data.clone();
+ sorted.sort();
+ let mut heap = BinaryHeap::from(data);
+ while !heap.is_empty() {
+ assert_eq!(heap.peek().unwrap(), sorted.last().unwrap());
+ assert_eq!(heap.pop().unwrap(), sorted.pop().unwrap());
+ }
+}
+
+#[test]
+fn test_peek_mut() {
+ let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
+ let mut heap = BinaryHeap::from(data);
+ assert_eq!(heap.peek(), Some(&10));
+ {
+ let mut top = heap.peek_mut().unwrap();
+ *top -= 2;
+ }
+ assert_eq!(heap.peek(), Some(&9));
+}
+
+#[test]
+fn test_peek_mut_pop() {
+ let data = vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1];
+ let mut heap = BinaryHeap::from(data);
+ assert_eq!(heap.peek(), Some(&10));
+ {
+ let mut top = heap.peek_mut().unwrap();
+ *top -= 2;
+ assert_eq!(PeekMut::pop(top), 8);
+ }
+ assert_eq!(heap.peek(), Some(&9));
+}
+
+#[test]
+fn test_push() {
+ let mut heap = BinaryHeap::from(vec![2, 4, 9]);
+ assert_eq!(heap.len(), 3);
+ assert!(*heap.peek().unwrap() == 9);
+ heap.push(11);
+ assert_eq!(heap.len(), 4);
+ assert!(*heap.peek().unwrap() == 11);
+ heap.push(5);
+ assert_eq!(heap.len(), 5);
+ assert!(*heap.peek().unwrap() == 11);
+ heap.push(27);
+ assert_eq!(heap.len(), 6);
+ assert!(*heap.peek().unwrap() == 27);
+ heap.push(3);
+ assert_eq!(heap.len(), 7);
+ assert!(*heap.peek().unwrap() == 27);
+ heap.push(103);
+ assert_eq!(heap.len(), 8);
+ assert!(*heap.peek().unwrap() == 103);
+}
+
+#[test]
+fn test_push_unique() {
+ let mut heap = BinaryHeap::<Box<_>>::from(vec![Box::new(2), Box::new(4), Box::new(9)]);
+ assert_eq!(heap.len(), 3);
+ assert!(**heap.peek().unwrap() == 9);
+ heap.push(Box::new(11));
+ assert_eq!(heap.len(), 4);
+ assert!(**heap.peek().unwrap() == 11);
+ heap.push(Box::new(5));
+ assert_eq!(heap.len(), 5);
+ assert!(**heap.peek().unwrap() == 11);
+ heap.push(Box::new(27));
+ assert_eq!(heap.len(), 6);
+ assert!(**heap.peek().unwrap() == 27);
+ heap.push(Box::new(3));
+ assert_eq!(heap.len(), 7);
+ assert!(**heap.peek().unwrap() == 27);
+ heap.push(Box::new(103));
+ assert_eq!(heap.len(), 8);
+ assert!(**heap.peek().unwrap() == 103);
+}
+
+fn check_to_vec(mut data: Vec<i32>) {
+ let heap = BinaryHeap::from(data.clone());
+ let mut v = heap.clone().into_vec();
+ v.sort();
+ data.sort();
+
+ assert_eq!(v, data);
+ assert_eq!(heap.into_sorted_vec(), data);
+}
+
+#[test]
+fn test_to_vec() {
+ check_to_vec(vec![]);
+ check_to_vec(vec![5]);
+ check_to_vec(vec![3, 2]);
+ check_to_vec(vec![2, 3]);
+ check_to_vec(vec![5, 1, 2]);
+ check_to_vec(vec![1, 100, 2, 3]);
+ check_to_vec(vec![1, 3, 5, 7, 9, 2, 4, 6, 8, 0]);
+ check_to_vec(vec![2, 4, 6, 2, 1, 8, 10, 3, 5, 7, 0, 9, 1]);
+ check_to_vec(vec![9, 11, 9, 9, 9, 9, 11, 2, 3, 4, 11, 9, 0, 0, 0, 0]);
+ check_to_vec(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ check_to_vec(vec![10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0]);
+ check_to_vec(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 0, 0, 1, 2]);
+ check_to_vec(vec![5, 4, 3, 2, 1, 5, 4, 3, 2, 1, 5, 4, 3, 2, 1]);
+}
+
+#[test]
+fn test_in_place_iterator_specialization() {
+ let src: Vec<usize> = vec![1, 2, 3];
+ let src_ptr = src.as_ptr();
+ let heap: BinaryHeap<_> = src.into_iter().map(std::convert::identity).collect();
+ let heap_ptr = heap.iter().next().unwrap() as *const usize;
+ assert_eq!(src_ptr, heap_ptr);
+ let sink: Vec<_> = heap.into_iter().map(std::convert::identity).collect();
+ let sink_ptr = sink.as_ptr();
+ assert_eq!(heap_ptr, sink_ptr);
+}
+
+#[test]
+fn test_empty_pop() {
+ let mut heap = BinaryHeap::<i32>::new();
+ assert!(heap.pop().is_none());
+}
+
+#[test]
+fn test_empty_peek() {
+ let empty = BinaryHeap::<i32>::new();
+ assert!(empty.peek().is_none());
+}
+
+#[test]
+fn test_empty_peek_mut() {
+ let mut empty = BinaryHeap::<i32>::new();
+ assert!(empty.peek_mut().is_none());
+}
+
+#[test]
+fn test_from_iter() {
+ let xs = vec![9, 8, 7, 6, 5, 4, 3, 2, 1];
+
+ let mut q: BinaryHeap<_> = xs.iter().rev().cloned().collect();
+
+ for &x in &xs {
+ assert_eq!(q.pop().unwrap(), x);
+ }
+}
+
+#[test]
+fn test_drain() {
+ let mut q: BinaryHeap<_> = [9, 8, 7, 6, 5, 4, 3, 2, 1].iter().cloned().collect();
+
+ assert_eq!(q.drain().take(5).count(), 5);
+
+ assert!(q.is_empty());
+}
+
+#[test]
+fn test_drain_sorted() {
+ let mut q: BinaryHeap<_> = [9, 8, 7, 6, 5, 4, 3, 2, 1].iter().cloned().collect();
+
+ assert_eq!(q.drain_sorted().take(5).collect::<Vec<_>>(), vec![9, 8, 7, 6, 5]);
+
+ assert!(q.is_empty());
+}
+
+#[test]
+fn test_drain_sorted_leak() {
+ static DROPS: AtomicU32 = AtomicU32::new(0);
+
+ #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
+ struct D(u32, bool);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ DROPS.fetch_add(1, Ordering::SeqCst);
+
+ if self.1 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let mut q = BinaryHeap::from(vec![
+ D(0, false),
+ D(1, false),
+ D(2, false),
+ D(3, true),
+ D(4, false),
+ D(5, false),
+ ]);
+
+ catch_unwind(AssertUnwindSafe(|| drop(q.drain_sorted()))).ok();
+
+ assert_eq!(DROPS.load(Ordering::SeqCst), 6);
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut a = BinaryHeap::new();
+ a.push(1);
+ a.push(2);
+
+ a.extend(&[3, 4, 5]);
+
+ assert_eq!(a.len(), 5);
+ assert_eq!(a.into_sorted_vec(), [1, 2, 3, 4, 5]);
+
+ let mut a = BinaryHeap::new();
+ a.push(1);
+ a.push(2);
+ let mut b = BinaryHeap::new();
+ b.push(3);
+ b.push(4);
+ b.push(5);
+
+ a.extend(&b);
+
+ assert_eq!(a.len(), 5);
+ assert_eq!(a.into_sorted_vec(), [1, 2, 3, 4, 5]);
+}
+
+#[test]
+fn test_append() {
+ let mut a = BinaryHeap::from(vec![-10, 1, 2, 3, 3]);
+ let mut b = BinaryHeap::from(vec![-20, 5, 43]);
+
+ a.append(&mut b);
+
+ assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
+ assert!(b.is_empty());
+}
+
+#[test]
+fn test_append_to_empty() {
+ let mut a = BinaryHeap::new();
+ let mut b = BinaryHeap::from(vec![-20, 5, 43]);
+
+ a.append(&mut b);
+
+ assert_eq!(a.into_sorted_vec(), [-20, 5, 43]);
+ assert!(b.is_empty());
+}
+
+#[test]
+fn test_extend_specialization() {
+ let mut a = BinaryHeap::from(vec![-10, 1, 2, 3, 3]);
+ let b = BinaryHeap::from(vec![-20, 5, 43]);
+
+ a.extend(b);
+
+ assert_eq!(a.into_sorted_vec(), [-20, -10, 1, 2, 3, 3, 5, 43]);
+}
+
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> {
+ d
+ }
+}
+
+#[test]
+fn test_retain() {
+ let mut a = BinaryHeap::from(vec![100, 10, 50, 1, 2, 20, 30]);
+ a.retain(|&x| x != 2);
+
+ // Check that 20 moved into 10's place.
+ assert_eq!(a.clone().into_vec(), [100, 20, 50, 1, 10, 30]);
+
+ a.retain(|_| true);
+
+ assert_eq!(a.clone().into_vec(), [100, 20, 50, 1, 10, 30]);
+
+ a.retain(|&x| x < 50);
+
+ assert_eq!(a.clone().into_vec(), [30, 20, 10, 1]);
+
+ a.retain(|_| false);
+
+ assert!(a.is_empty());
+}
+
+// old binaryheap failed this test
+//
+// Integrity means that all elements are present after a comparison panics,
+// even if the order might not be correct.
+//
+// Destructors must be called exactly once per element.
+// FIXME: re-enable emscripten once it can unwind again
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn panic_safe() {
+ use rand::{seq::SliceRandom, thread_rng};
+ use std::cmp;
+ use std::panic::{self, AssertUnwindSafe};
+ use std::sync::atomic::{AtomicUsize, Ordering};
+
+ static DROP_COUNTER: AtomicUsize = AtomicUsize::new(0);
+
+ #[derive(Eq, PartialEq, Ord, Clone, Debug)]
+ struct PanicOrd<T>(T, bool);
+
+ impl<T> Drop for PanicOrd<T> {
+ fn drop(&mut self) {
+ // update global drop count
+ DROP_COUNTER.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ impl<T: PartialOrd> PartialOrd for PanicOrd<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> {
+ if self.1 || other.1 {
+ panic!("Panicking comparison");
+ }
+ self.0.partial_cmp(&other.0)
+ }
+ }
+ let mut rng = thread_rng();
+ const DATASZ: usize = 32;
+ // Miri is too slow
+ let ntest = if cfg!(miri) { 1 } else { 10 };
+
+ // don't use 0 in the data -- we want to catch the zeroed-out case.
+ let data = (1..=DATASZ).collect::<Vec<_>>();
+
+ // since it's a fuzzy test, run several tries.
+ for _ in 0..ntest {
+ for i in 1..=DATASZ {
+ DROP_COUNTER.store(0, Ordering::SeqCst);
+
+ let mut panic_ords: Vec<_> =
+ data.iter().filter(|&&x| x != i).map(|&x| PanicOrd(x, false)).collect();
+ let panic_item = PanicOrd(i, true);
+
+ // heapify the sane items
+ panic_ords.shuffle(&mut rng);
+ let mut heap = BinaryHeap::from(panic_ords);
+ let inner_data;
+
+ {
+ // push the panicking item to the heap and catch the panic
+ let thread_result = {
+ let mut heap_ref = AssertUnwindSafe(&mut heap);
+ panic::catch_unwind(move || {
+ heap_ref.push(panic_item);
+ })
+ };
+ assert!(thread_result.is_err());
+
+ // Assert no elements were dropped
+ let drops = DROP_COUNTER.load(Ordering::SeqCst);
+ assert!(drops == 0, "Must not drop items. drops={}", drops);
+ inner_data = heap.clone().into_vec();
+ drop(heap);
+ }
+ let drops = DROP_COUNTER.load(Ordering::SeqCst);
+ assert_eq!(drops, DATASZ);
+
+ let mut data_sorted = inner_data.into_iter().map(|p| p.0).collect::<Vec<_>>();
+ data_sorted.sort();
+ assert_eq!(data_sorted, data);
+ }
+ }
+}
diff --git a/library/alloc/src/collections/btree/append.rs b/library/alloc/src/collections/btree/append.rs
new file mode 100644
index 000000000..b6989afb6
--- /dev/null
+++ b/library/alloc/src/collections/btree/append.rs
@@ -0,0 +1,107 @@
+use super::merge_iter::MergeIterInner;
+use super::node::{self, Root};
+use core::alloc::Allocator;
+use core::iter::FusedIterator;
+
+impl<K, V> Root<K, V> {
+ /// Appends all key-value pairs from the union of two ascending iterators,
+ /// incrementing a `length` variable along the way. The latter makes it
+ /// easier for the caller to avoid a leak when a drop handler panicks.
+ ///
+ /// If both iterators produce the same key, this method drops the pair from
+ /// the left iterator and appends the pair from the right iterator.
+ ///
+ /// If you want the tree to end up in a strictly ascending order, like for
+ /// a `BTreeMap`, both iterators should produce keys in strictly ascending
+ /// order, each greater than all keys in the tree, including any keys
+ /// already in the tree upon entry.
+ pub fn append_from_sorted_iters<I, A: Allocator + Clone>(
+ &mut self,
+ left: I,
+ right: I,
+ length: &mut usize,
+ alloc: A,
+ ) where
+ K: Ord,
+ I: Iterator<Item = (K, V)> + FusedIterator,
+ {
+ // We prepare to merge `left` and `right` into a sorted sequence in linear time.
+ let iter = MergeIter(MergeIterInner::new(left, right));
+
+ // Meanwhile, we build a tree from the sorted sequence in linear time.
+ self.bulk_push(iter, length, alloc)
+ }
+
+ /// Pushes all key-value pairs to the end of the tree, incrementing a
+ /// `length` variable along the way. The latter makes it easier for the
+ /// caller to avoid a leak when the iterator panicks.
+ pub fn bulk_push<I, A: Allocator + Clone>(&mut self, iter: I, length: &mut usize, alloc: A)
+ where
+ I: Iterator<Item = (K, V)>,
+ {
+ let mut cur_node = self.borrow_mut().last_leaf_edge().into_node();
+ // Iterate through all key-value pairs, pushing them into nodes at the right level.
+ for (key, value) in iter {
+ // Try to push key-value pair into the current leaf node.
+ if cur_node.len() < node::CAPACITY {
+ cur_node.push(key, value);
+ } else {
+ // No space left, go up and push there.
+ let mut open_node;
+ let mut test_node = cur_node.forget_type();
+ loop {
+ match test_node.ascend() {
+ Ok(parent) => {
+ let parent = parent.into_node();
+ if parent.len() < node::CAPACITY {
+ // Found a node with space left, push here.
+ open_node = parent;
+ break;
+ } else {
+ // Go up again.
+ test_node = parent.forget_type();
+ }
+ }
+ Err(_) => {
+ // We are at the top, create a new root node and push there.
+ open_node = self.push_internal_level(alloc.clone());
+ break;
+ }
+ }
+ }
+
+ // Push key-value pair and new right subtree.
+ let tree_height = open_node.height() - 1;
+ let mut right_tree = Root::new(alloc.clone());
+ for _ in 0..tree_height {
+ right_tree.push_internal_level(alloc.clone());
+ }
+ open_node.push(key, value, right_tree);
+
+ // Go down to the right-most leaf again.
+ cur_node = open_node.forget_type().last_leaf_edge().into_node();
+ }
+
+ // Increment length every iteration, to make sure the map drops
+ // the appended elements even if advancing the iterator panicks.
+ *length += 1;
+ }
+ self.fix_right_border_of_plentiful();
+ }
+}
+
+// An iterator for merging two sorted sequences into one
+struct MergeIter<K, V, I: Iterator<Item = (K, V)>>(MergeIterInner<I>);
+
+impl<K: Ord, V, I> Iterator for MergeIter<K, V, I>
+where
+ I: Iterator<Item = (K, V)> + FusedIterator,
+{
+ type Item = (K, V);
+
+ /// If two keys are equal, returns the key-value pair from the right source.
+ fn next(&mut self) -> Option<(K, V)> {
+ let (a_next, b_next) = self.0.nexts(|a: &(K, V), b: &(K, V)| K::cmp(&a.0, &b.0));
+ b_next.or(a_next)
+ }
+}
diff --git a/library/alloc/src/collections/btree/borrow.rs b/library/alloc/src/collections/btree/borrow.rs
new file mode 100644
index 000000000..016f139a5
--- /dev/null
+++ b/library/alloc/src/collections/btree/borrow.rs
@@ -0,0 +1,47 @@
+use core::marker::PhantomData;
+use core::ptr::NonNull;
+
+/// Models a reborrow of some unique reference, when you know that the reborrow
+/// and all its descendants (i.e., all pointers and references derived from it)
+/// will not be used any more at some point, after which you want to use the
+/// original unique reference again.
+///
+/// The borrow checker usually handles this stacking of borrows for you, but
+/// some control flows that accomplish this stacking are too complicated for
+/// the compiler to follow. A `DormantMutRef` allows you to check borrowing
+/// yourself, while still expressing its stacked nature, and encapsulating
+/// the raw pointer code needed to do this without undefined behavior.
+pub struct DormantMutRef<'a, T> {
+ ptr: NonNull<T>,
+ _marker: PhantomData<&'a mut T>,
+}
+
+unsafe impl<'a, T> Sync for DormantMutRef<'a, T> where &'a mut T: Sync {}
+unsafe impl<'a, T> Send for DormantMutRef<'a, T> where &'a mut T: Send {}
+
+impl<'a, T> DormantMutRef<'a, T> {
+ /// Capture a unique borrow, and immediately reborrow it. For the compiler,
+ /// the lifetime of the new reference is the same as the lifetime of the
+ /// original reference, but you promise to use it for a shorter period.
+ pub fn new(t: &'a mut T) -> (&'a mut T, Self) {
+ let ptr = NonNull::from(t);
+ // SAFETY: we hold the borrow throughout 'a via `_marker`, and we expose
+ // only this reference, so it is unique.
+ let new_ref = unsafe { &mut *ptr.as_ptr() };
+ (new_ref, Self { ptr, _marker: PhantomData })
+ }
+
+ /// Revert to the unique borrow initially captured.
+ ///
+ /// # Safety
+ ///
+ /// The reborrow must have ended, i.e., the reference returned by `new` and
+ /// all pointers and references derived from it, must not be used anymore.
+ pub unsafe fn awaken(self) -> &'a mut T {
+ // SAFETY: our own safety conditions imply this reference is again unique.
+ unsafe { &mut *self.ptr.as_ptr() }
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/alloc/src/collections/btree/borrow/tests.rs b/library/alloc/src/collections/btree/borrow/tests.rs
new file mode 100644
index 000000000..56a8434fc
--- /dev/null
+++ b/library/alloc/src/collections/btree/borrow/tests.rs
@@ -0,0 +1,19 @@
+use super::DormantMutRef;
+
+#[test]
+fn test_borrow() {
+ let mut data = 1;
+ let mut stack = vec![];
+ let mut rr = &mut data;
+ for factor in [2, 3, 7].iter() {
+ let (r, dormant_r) = DormantMutRef::new(rr);
+ rr = r;
+ assert_eq!(*rr, 1);
+ stack.push((factor, dormant_r));
+ }
+ while let Some((factor, dormant_r)) = stack.pop() {
+ let r = unsafe { dormant_r.awaken() };
+ *r *= factor;
+ }
+ assert_eq!(data, 42);
+}
diff --git a/library/alloc/src/collections/btree/dedup_sorted_iter.rs b/library/alloc/src/collections/btree/dedup_sorted_iter.rs
new file mode 100644
index 000000000..60bf83b83
--- /dev/null
+++ b/library/alloc/src/collections/btree/dedup_sorted_iter.rs
@@ -0,0 +1,47 @@
+use core::iter::Peekable;
+
+/// A iterator for deduping the key of a sorted iterator.
+/// When encountering the duplicated key, only the last key-value pair is yielded.
+///
+/// Used by [`BTreeMap::bulk_build_from_sorted_iter`].
+pub struct DedupSortedIter<K, V, I>
+where
+ I: Iterator<Item = (K, V)>,
+{
+ iter: Peekable<I>,
+}
+
+impl<K, V, I> DedupSortedIter<K, V, I>
+where
+ I: Iterator<Item = (K, V)>,
+{
+ pub fn new(iter: I) -> Self {
+ Self { iter: iter.peekable() }
+ }
+}
+
+impl<K, V, I> Iterator for DedupSortedIter<K, V, I>
+where
+ K: Eq,
+ I: Iterator<Item = (K, V)>,
+{
+ type Item = (K, V);
+
+ fn next(&mut self) -> Option<(K, V)> {
+ loop {
+ let next = match self.iter.next() {
+ Some(next) => next,
+ None => return None,
+ };
+
+ let peeked = match self.iter.peek() {
+ Some(peeked) => peeked,
+ None => return Some(next),
+ };
+
+ if next.0 != peeked.0 {
+ return Some(next);
+ }
+ }
+ }
+}
diff --git a/library/alloc/src/collections/btree/fix.rs b/library/alloc/src/collections/btree/fix.rs
new file mode 100644
index 000000000..91b612180
--- /dev/null
+++ b/library/alloc/src/collections/btree/fix.rs
@@ -0,0 +1,179 @@
+use super::map::MIN_LEN;
+use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef, Root};
+use core::alloc::Allocator;
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ /// Stocks up a possibly underfull node by merging with or stealing from a
+ /// sibling. If successful but at the cost of shrinking the parent node,
+ /// returns that shrunk parent node. Returns an `Err` if the node is
+ /// an empty root.
+ fn fix_node_through_parent<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> Result<Option<NodeRef<marker::Mut<'a>, K, V, marker::Internal>>, Self> {
+ let len = self.len();
+ if len >= MIN_LEN {
+ Ok(None)
+ } else {
+ match self.choose_parent_kv() {
+ Ok(Left(mut left_parent_kv)) => {
+ if left_parent_kv.can_merge() {
+ let parent = left_parent_kv.merge_tracking_parent(alloc);
+ Ok(Some(parent))
+ } else {
+ left_parent_kv.bulk_steal_left(MIN_LEN - len);
+ Ok(None)
+ }
+ }
+ Ok(Right(mut right_parent_kv)) => {
+ if right_parent_kv.can_merge() {
+ let parent = right_parent_kv.merge_tracking_parent(alloc);
+ Ok(Some(parent))
+ } else {
+ right_parent_kv.bulk_steal_right(MIN_LEN - len);
+ Ok(None)
+ }
+ }
+ Err(root) => {
+ if len > 0 {
+ Ok(None)
+ } else {
+ Err(root)
+ }
+ }
+ }
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ /// Stocks up a possibly underfull node, and if that causes its parent node
+ /// to shrink, stocks up the parent, recursively.
+ /// Returns `true` if it fixed the tree, `false` if it couldn't because the
+ /// root node became empty.
+ ///
+ /// This method does not expect ancestors to already be underfull upon entry
+ /// and panics if it encounters an empty ancestor.
+ pub fn fix_node_and_affected_ancestors<A: Allocator + Clone>(mut self, alloc: A) -> bool {
+ loop {
+ match self.fix_node_through_parent(alloc.clone()) {
+ Ok(Some(parent)) => self = parent.forget_type(),
+ Ok(None) => return true,
+ Err(_) => return false,
+ }
+ }
+ }
+}
+
+impl<K, V> Root<K, V> {
+ /// Removes empty levels on the top, but keeps an empty leaf if the entire tree is empty.
+ pub fn fix_top<A: Allocator + Clone>(&mut self, alloc: A) {
+ while self.height() > 0 && self.len() == 0 {
+ self.pop_internal_level(alloc.clone());
+ }
+ }
+
+ /// Stocks up or merge away any underfull nodes on the right border of the
+ /// tree. The other nodes, those that are not the root nor a rightmost edge,
+ /// must already have at least MIN_LEN elements.
+ pub fn fix_right_border<A: Allocator + Clone>(&mut self, alloc: A) {
+ self.fix_top(alloc.clone());
+ if self.len() > 0 {
+ self.borrow_mut().last_kv().fix_right_border_of_right_edge(alloc.clone());
+ self.fix_top(alloc);
+ }
+ }
+
+ /// The symmetric clone of `fix_right_border`.
+ pub fn fix_left_border<A: Allocator + Clone>(&mut self, alloc: A) {
+ self.fix_top(alloc.clone());
+ if self.len() > 0 {
+ self.borrow_mut().first_kv().fix_left_border_of_left_edge(alloc.clone());
+ self.fix_top(alloc);
+ }
+ }
+
+ /// Stocks up any underfull nodes on the right border of the tree.
+ /// The other nodes, those that are neither the root nor a rightmost edge,
+ /// must be prepared to have up to MIN_LEN elements stolen.
+ pub fn fix_right_border_of_plentiful(&mut self) {
+ let mut cur_node = self.borrow_mut();
+ while let Internal(internal) = cur_node.force() {
+ // Check if right-most child is underfull.
+ let mut last_kv = internal.last_kv().consider_for_balancing();
+ debug_assert!(last_kv.left_child_len() >= MIN_LEN * 2);
+ let right_child_len = last_kv.right_child_len();
+ if right_child_len < MIN_LEN {
+ // We need to steal.
+ last_kv.bulk_steal_left(MIN_LEN - right_child_len);
+ }
+
+ // Go further down.
+ cur_node = last_kv.into_right_child();
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV> {
+ fn fix_left_border_of_left_edge<A: Allocator + Clone>(mut self, alloc: A) {
+ while let Internal(internal_kv) = self.force() {
+ self = internal_kv.fix_left_child(alloc.clone()).first_kv();
+ debug_assert!(self.reborrow().into_node().len() > MIN_LEN);
+ }
+ }
+
+ fn fix_right_border_of_right_edge<A: Allocator + Clone>(mut self, alloc: A) {
+ while let Internal(internal_kv) = self.force() {
+ self = internal_kv.fix_right_child(alloc.clone()).last_kv();
+ debug_assert!(self.reborrow().into_node().len() > MIN_LEN);
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> {
+ /// Stocks up the left child, assuming the right child isn't underfull, and
+ /// provisions an extra element to allow merging its children in turn
+ /// without becoming underfull.
+ /// Returns the left child.
+ fn fix_left_child<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ let mut internal_kv = self.consider_for_balancing();
+ let left_len = internal_kv.left_child_len();
+ debug_assert!(internal_kv.right_child_len() >= MIN_LEN);
+ if internal_kv.can_merge() {
+ internal_kv.merge_tracking_child(alloc)
+ } else {
+ // `MIN_LEN + 1` to avoid readjust if merge happens on the next level.
+ let count = (MIN_LEN + 1).saturating_sub(left_len);
+ if count > 0 {
+ internal_kv.bulk_steal_right(count);
+ }
+ internal_kv.into_left_child()
+ }
+ }
+
+ /// Stocks up the right child, assuming the left child isn't underfull, and
+ /// provisions an extra element to allow merging its children in turn
+ /// without becoming underfull.
+ /// Returns wherever the right child ended up.
+ fn fix_right_child<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ let mut internal_kv = self.consider_for_balancing();
+ let right_len = internal_kv.right_child_len();
+ debug_assert!(internal_kv.left_child_len() >= MIN_LEN);
+ if internal_kv.can_merge() {
+ internal_kv.merge_tracking_child(alloc)
+ } else {
+ // `MIN_LEN + 1` to avoid readjust if merge happens on the next level.
+ let count = (MIN_LEN + 1).saturating_sub(right_len);
+ if count > 0 {
+ internal_kv.bulk_steal_left(count);
+ }
+ internal_kv.into_right_child()
+ }
+ }
+}
diff --git a/library/alloc/src/collections/btree/map.rs b/library/alloc/src/collections/btree/map.rs
new file mode 100644
index 000000000..cacbd54b6
--- /dev/null
+++ b/library/alloc/src/collections/btree/map.rs
@@ -0,0 +1,2423 @@
+use crate::vec::Vec;
+use core::borrow::Borrow;
+use core::cmp::Ordering;
+use core::fmt::{self, Debug};
+use core::hash::{Hash, Hasher};
+use core::iter::{FromIterator, FusedIterator};
+use core::marker::PhantomData;
+use core::mem::{self, ManuallyDrop};
+use core::ops::{Index, RangeBounds};
+use core::ptr;
+
+use crate::alloc::{Allocator, Global};
+
+use super::borrow::DormantMutRef;
+use super::dedup_sorted_iter::DedupSortedIter;
+use super::navigate::{LazyLeafRange, LeafRange};
+use super::node::{self, marker, ForceResult::*, Handle, NodeRef, Root};
+use super::search::SearchResult::*;
+use super::set_val::SetValZST;
+
+mod entry;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use entry::{Entry, OccupiedEntry, OccupiedError, VacantEntry};
+
+use Entry::*;
+
+/// Minimum number of elements in a node that is not a root.
+/// We might temporarily have fewer elements during methods.
+pub(super) const MIN_LEN: usize = node::MIN_LEN_AFTER_SPLIT;
+
+// A tree in a `BTreeMap` is a tree in the `node` module with additional invariants:
+// - Keys must appear in ascending order (according to the key's type).
+// - Every non-leaf node contains at least 1 element (has at least 2 children).
+// - Every non-root node contains at least MIN_LEN elements.
+//
+// An empty map is represented either by the absence of a root node or by a
+// root node that is an empty leaf.
+
+/// An ordered map based on a [B-Tree].
+///
+/// B-Trees represent a fundamental compromise between cache-efficiency and actually minimizing
+/// the amount of work performed in a search. In theory, a binary search tree (BST) is the optimal
+/// choice for a sorted map, as a perfectly balanced BST performs the theoretical minimum amount of
+/// comparisons necessary to find an element (log<sub>2</sub>n). However, in practice the way this
+/// is done is *very* inefficient for modern computer architectures. In particular, every element
+/// is stored in its own individually heap-allocated node. This means that every single insertion
+/// triggers a heap-allocation, and every single comparison should be a cache-miss. Since these
+/// are both notably expensive things to do in practice, we are forced to at very least reconsider
+/// the BST strategy.
+///
+/// A B-Tree instead makes each node contain B-1 to 2B-1 elements in a contiguous array. By doing
+/// this, we reduce the number of allocations by a factor of B, and improve cache efficiency in
+/// searches. However, this does mean that searches will have to do *more* comparisons on average.
+/// The precise number of comparisons depends on the node search strategy used. For optimal cache
+/// efficiency, one could search the nodes linearly. For optimal comparisons, one could search
+/// the node using binary search. As a compromise, one could also perform a linear search
+/// that initially only checks every i<sup>th</sup> element for some choice of i.
+///
+/// Currently, our implementation simply performs naive linear search. This provides excellent
+/// performance on *small* nodes of elements which are cheap to compare. However in the future we
+/// would like to further explore choosing the optimal search strategy based on the choice of B,
+/// and possibly other factors. Using linear search, searching for a random element is expected
+/// to take B * log(n) comparisons, which is generally worse than a BST. In practice,
+/// however, performance is excellent.
+///
+/// It is a logic error for a key to be modified in such a way that the key's ordering relative to
+/// any other key, as determined by the [`Ord`] trait, changes while it is in the map. This is
+/// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
+/// The behavior resulting from such a logic error is not specified, but will be encapsulated to the
+/// `BTreeMap` that observed the logic error and not result in undefined behavior. This could
+/// include panics, incorrect results, aborts, memory leaks, and non-termination.
+///
+/// Iterators obtained from functions such as [`BTreeMap::iter`], [`BTreeMap::values`], or
+/// [`BTreeMap::keys`] produce their items in order by key, and take worst-case logarithmic and
+/// amortized constant time per item returned.
+///
+/// [B-Tree]: https://en.wikipedia.org/wiki/B-tree
+/// [`Cell`]: core::cell::Cell
+/// [`RefCell`]: core::cell::RefCell
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::BTreeMap;
+///
+/// // type inference lets us omit an explicit type signature (which
+/// // would be `BTreeMap<&str, &str>` in this example).
+/// let mut movie_reviews = BTreeMap::new();
+///
+/// // review some movies.
+/// movie_reviews.insert("Office Space", "Deals with real issues in the workplace.");
+/// movie_reviews.insert("Pulp Fiction", "Masterpiece.");
+/// movie_reviews.insert("The Godfather", "Very enjoyable.");
+/// movie_reviews.insert("The Blues Brothers", "Eye lyked it a lot.");
+///
+/// // check for a specific one.
+/// if !movie_reviews.contains_key("Les Misérables") {
+/// println!("We've got {} reviews, but Les Misérables ain't one.",
+/// movie_reviews.len());
+/// }
+///
+/// // oops, this review has a lot of spelling mistakes, let's delete it.
+/// movie_reviews.remove("The Blues Brothers");
+///
+/// // look up the values associated with some keys.
+/// let to_find = ["Up!", "Office Space"];
+/// for movie in &to_find {
+/// match movie_reviews.get(movie) {
+/// Some(review) => println!("{movie}: {review}"),
+/// None => println!("{movie} is unreviewed.")
+/// }
+/// }
+///
+/// // Look up the value for a key (will panic if the key is not found).
+/// println!("Movie review: {}", movie_reviews["Office Space"]);
+///
+/// // iterate over everything.
+/// for (movie, review) in &movie_reviews {
+/// println!("{movie}: \"{review}\"");
+/// }
+/// ```
+///
+/// A `BTreeMap` with a known list of items can be initialized from an array:
+///
+/// ```
+/// use std::collections::BTreeMap;
+///
+/// let solar_distance = BTreeMap::from([
+/// ("Mercury", 0.4),
+/// ("Venus", 0.7),
+/// ("Earth", 1.0),
+/// ("Mars", 1.5),
+/// ]);
+/// ```
+///
+/// `BTreeMap` implements an [`Entry API`], which allows for complex
+/// methods of getting, setting, updating and removing keys and their values:
+///
+/// [`Entry API`]: BTreeMap::entry
+///
+/// ```
+/// use std::collections::BTreeMap;
+///
+/// // type inference lets us omit an explicit type signature (which
+/// // would be `BTreeMap<&str, u8>` in this example).
+/// let mut player_stats = BTreeMap::new();
+///
+/// fn random_stat_buff() -> u8 {
+/// // could actually return some random value here - let's just return
+/// // some fixed value for now
+/// 42
+/// }
+///
+/// // insert a key only if it doesn't already exist
+/// player_stats.entry("health").or_insert(100);
+///
+/// // insert a key using a function that provides a new value only if it
+/// // doesn't already exist
+/// player_stats.entry("defence").or_insert_with(random_stat_buff);
+///
+/// // update a key, guarding against the key possibly not being set
+/// let stat = player_stats.entry("attack").or_insert(100);
+/// *stat += random_stat_buff();
+///
+/// // modify an entry before an insert with in-place mutation
+/// player_stats.entry("mana").and_modify(|mana| *mana += 200).or_insert(100);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "BTreeMap")]
+#[rustc_insignificant_dtor]
+pub struct BTreeMap<
+ K,
+ V,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ root: Option<Root<K, V>>,
+ length: usize,
+ /// `ManuallyDrop` to control drop order (needs to be dropped after all the nodes).
+ pub(super) alloc: ManuallyDrop<A>,
+ // For dropck; the `Box` avoids making the `Unpin` impl more strict than before
+ _marker: PhantomData<crate::boxed::Box<(K, V)>>,
+}
+
+#[stable(feature = "btree_drop", since = "1.7.0")]
+unsafe impl<#[may_dangle] K, #[may_dangle] V, A: Allocator + Clone> Drop for BTreeMap<K, V, A> {
+ fn drop(&mut self) {
+ drop(unsafe { ptr::read(self) }.into_iter())
+ }
+}
+
+// FIXME: This implementation is "wrong", but changing it would be a breaking change.
+// (The bounds of the automatic `UnwindSafe` implementation have been like this since Rust 1.50.)
+// Maybe we can fix it nonetheless with a crater run, or if the `UnwindSafe`
+// traits are deprecated, or disarmed (no longer causing hard errors) in the future.
+#[stable(feature = "btree_unwindsafe", since = "1.64.0")]
+impl<K, V, A: Allocator + Clone> core::panic::UnwindSafe for BTreeMap<K, V, A>
+where
+ A: core::panic::UnwindSafe,
+ K: core::panic::RefUnwindSafe,
+ V: core::panic::RefUnwindSafe,
+{
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Clone, V: Clone, A: Allocator + Clone> Clone for BTreeMap<K, V, A> {
+ fn clone(&self) -> BTreeMap<K, V, A> {
+ fn clone_subtree<'a, K: Clone, V: Clone, A: Allocator + Clone>(
+ node: NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal>,
+ alloc: A,
+ ) -> BTreeMap<K, V, A>
+ where
+ K: 'a,
+ V: 'a,
+ {
+ match node.force() {
+ Leaf(leaf) => {
+ let mut out_tree = BTreeMap {
+ root: Some(Root::new(alloc.clone())),
+ length: 0,
+ alloc: ManuallyDrop::new(alloc),
+ _marker: PhantomData,
+ };
+
+ {
+ let root = out_tree.root.as_mut().unwrap(); // unwrap succeeds because we just wrapped
+ let mut out_node = match root.borrow_mut().force() {
+ Leaf(leaf) => leaf,
+ Internal(_) => unreachable!(),
+ };
+
+ let mut in_edge = leaf.first_edge();
+ while let Ok(kv) = in_edge.right_kv() {
+ let (k, v) = kv.into_kv();
+ in_edge = kv.right_edge();
+
+ out_node.push(k.clone(), v.clone());
+ out_tree.length += 1;
+ }
+ }
+
+ out_tree
+ }
+ Internal(internal) => {
+ let mut out_tree =
+ clone_subtree(internal.first_edge().descend(), alloc.clone());
+
+ {
+ let out_root = out_tree.root.as_mut().unwrap();
+ let mut out_node = out_root.push_internal_level(alloc.clone());
+ let mut in_edge = internal.first_edge();
+ while let Ok(kv) = in_edge.right_kv() {
+ let (k, v) = kv.into_kv();
+ in_edge = kv.right_edge();
+
+ let k = (*k).clone();
+ let v = (*v).clone();
+ let subtree = clone_subtree(in_edge.descend(), alloc.clone());
+
+ // We can't destructure subtree directly
+ // because BTreeMap implements Drop
+ let (subroot, sublength) = unsafe {
+ let subtree = ManuallyDrop::new(subtree);
+ let root = ptr::read(&subtree.root);
+ let length = subtree.length;
+ (root, length)
+ };
+
+ out_node.push(
+ k,
+ v,
+ subroot.unwrap_or_else(|| Root::new(alloc.clone())),
+ );
+ out_tree.length += 1 + sublength;
+ }
+ }
+
+ out_tree
+ }
+ }
+ }
+
+ if self.is_empty() {
+ BTreeMap::new_in((*self.alloc).clone())
+ } else {
+ clone_subtree(self.root.as_ref().unwrap().reborrow(), (*self.alloc).clone()) // unwrap succeeds because not empty
+ }
+ }
+}
+
+impl<K, Q: ?Sized, A: Allocator + Clone> super::Recover<Q> for BTreeMap<K, SetValZST, A>
+where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+{
+ type Key = K;
+
+ fn get(&self, key: &Q) -> Option<&K> {
+ let root_node = self.root.as_ref()?.reborrow();
+ match root_node.search_tree(key) {
+ Found(handle) => Some(handle.into_kv().0),
+ GoDown(_) => None,
+ }
+ }
+
+ fn take(&mut self, key: &Q) -> Option<K> {
+ let (map, dormant_map) = DormantMutRef::new(self);
+ let root_node = map.root.as_mut()?.borrow_mut();
+ match root_node.search_tree(key) {
+ Found(handle) => Some(
+ OccupiedEntry {
+ handle,
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ }
+ .remove_kv()
+ .0,
+ ),
+ GoDown(_) => None,
+ }
+ }
+
+ fn replace(&mut self, key: K) -> Option<K> {
+ let (map, dormant_map) = DormantMutRef::new(self);
+ let root_node =
+ map.root.get_or_insert_with(|| Root::new((*map.alloc).clone())).borrow_mut();
+ match root_node.search_tree::<K>(&key) {
+ Found(mut kv) => Some(mem::replace(kv.key_mut(), key)),
+ GoDown(handle) => {
+ VacantEntry {
+ key,
+ handle: Some(handle),
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ }
+ .insert(SetValZST::default());
+ None
+ }
+ }
+ }
+}
+
+/// An iterator over the entries of a `BTreeMap`.
+///
+/// This `struct` is created by the [`iter`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`iter`]: BTreeMap::iter
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, K: 'a, V: 'a> {
+ range: LazyLeafRange<marker::Immut<'a>, K, V>,
+ length: usize,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Iter<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A mutable iterator over the entries of a `BTreeMap`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: BTreeMap::iter_mut
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IterMut<'a, K: 'a, V: 'a> {
+ range: LazyLeafRange<marker::ValMut<'a>, K, V>,
+ length: usize,
+
+ // Be invariant in `K` and `V`
+ _marker: PhantomData<&'a mut (K, V)>,
+}
+
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for IterMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let range = Iter { range: self.range.reborrow(), length: self.length };
+ f.debug_list().entries(range).finish()
+ }
+}
+
+/// An owning iterator over the entries of a `BTreeMap`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`BTreeMap`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: IntoIterator::into_iter
+/// [`IntoIterator`]: core::iter::IntoIterator
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_insignificant_dtor]
+pub struct IntoIter<
+ K,
+ V,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ range: LazyLeafRange<marker::Dying, K, V>,
+ length: usize,
+ /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
+ alloc: A,
+}
+
+impl<K, V, A: Allocator + Clone> IntoIter<K, V, A> {
+ /// Returns an iterator of references over the remaining items.
+ #[inline]
+ pub(super) fn iter(&self) -> Iter<'_, K, V> {
+ Iter { range: self.range.reborrow(), length: self.length }
+ }
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for IntoIter<K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.iter()).finish()
+ }
+}
+
+/// An iterator over the keys of a `BTreeMap`.
+///
+/// This `struct` is created by the [`keys`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`keys`]: BTreeMap::keys
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Keys<'a, K, V> {
+ inner: Iter<'a, K, V>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K: fmt::Debug, V> fmt::Debug for Keys<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// An iterator over the values of a `BTreeMap`.
+///
+/// This `struct` is created by the [`values`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`values`]: BTreeMap::values
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Values<'a, K, V> {
+ inner: Iter<'a, K, V>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K, V: fmt::Debug> fmt::Debug for Values<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A mutable iterator over the values of a `BTreeMap`.
+///
+/// This `struct` is created by the [`values_mut`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`values_mut`]: BTreeMap::values_mut
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+pub struct ValuesMut<'a, K, V> {
+ inner: IterMut<'a, K, V>,
+}
+
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+impl<K, V: fmt::Debug> fmt::Debug for ValuesMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.inner.iter().map(|(_, val)| val)).finish()
+ }
+}
+
+/// An owning iterator over the keys of a `BTreeMap`.
+///
+/// This `struct` is created by the [`into_keys`] method on [`BTreeMap`].
+/// See its documentation for more.
+///
+/// [`into_keys`]: BTreeMap::into_keys
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+pub struct IntoKeys<K, V, A: Allocator + Clone = Global> {
+ inner: IntoIter<K, V, A>,
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K: fmt::Debug, V, A: Allocator + Clone> fmt::Debug for IntoKeys<K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.inner.iter().map(|(key, _)| key)).finish()
+ }
+}
+
+/// An owning iterator over the values of a `BTreeMap`.
+///
+/// This `struct` is created by the [`into_values`] method on [`BTreeMap`].
+/// See its documentation for more.
+///
+/// [`into_values`]: BTreeMap::into_values
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+pub struct IntoValues<
+ K,
+ V,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ inner: IntoIter<K, V, A>,
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V: fmt::Debug, A: Allocator + Clone> fmt::Debug for IntoValues<K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.inner.iter().map(|(_, val)| val)).finish()
+ }
+}
+
+/// An iterator over a sub-range of entries in a `BTreeMap`.
+///
+/// This `struct` is created by the [`range`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`range`]: BTreeMap::range
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "btree_range", since = "1.17.0")]
+pub struct Range<'a, K: 'a, V: 'a> {
+ inner: LeafRange<marker::Immut<'a>, K, V>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for Range<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self.clone()).finish()
+ }
+}
+
+/// A mutable iterator over a sub-range of entries in a `BTreeMap`.
+///
+/// This `struct` is created by the [`range_mut`] method on [`BTreeMap`]. See its
+/// documentation for more.
+///
+/// [`range_mut`]: BTreeMap::range_mut
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "btree_range", since = "1.17.0")]
+pub struct RangeMut<'a, K: 'a, V: 'a> {
+ inner: LeafRange<marker::ValMut<'a>, K, V>,
+
+ // Be invariant in `K` and `V`
+ _marker: PhantomData<&'a mut (K, V)>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<K: fmt::Debug, V: fmt::Debug> fmt::Debug for RangeMut<'_, K, V> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let range = Range { inner: self.inner.reborrow() };
+ f.debug_list().entries(range).finish()
+ }
+}
+
+impl<K, V> BTreeMap<K, V> {
+ /// Makes a new, empty `BTreeMap`.
+ ///
+ /// Does not allocate anything on its own.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ ///
+ /// // entries can now be inserted into the empty map
+ /// map.insert(1, "a");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[must_use]
+ pub const fn new() -> BTreeMap<K, V> {
+ BTreeMap { root: None, length: 0, alloc: ManuallyDrop::new(Global), _marker: PhantomData }
+ }
+}
+
+impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
+ /// Clears the map, removing all elements.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, "a");
+ /// a.clear();
+ /// assert!(a.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ // avoid moving the allocator
+ mem::drop(BTreeMap {
+ root: mem::replace(&mut self.root, None),
+ length: mem::replace(&mut self.length, 0),
+ alloc: self.alloc.clone(),
+ _marker: PhantomData,
+ });
+ }
+
+ /// Makes a new empty BTreeMap with a reasonable choice for B.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// # #![feature(allocator_api)]
+ /// # #![feature(btreemap_alloc)]
+ /// use std::collections::BTreeMap;
+ /// use std::alloc::Global;
+ ///
+ /// let mut map = BTreeMap::new_in(Global);
+ ///
+ /// // entries can now be inserted into the empty map
+ /// map.insert(1, "a");
+ /// ```
+ #[unstable(feature = "btreemap_alloc", issue = "32838")]
+ pub fn new_in(alloc: A) -> BTreeMap<K, V, A> {
+ BTreeMap { root: None, length: 0, alloc: ManuallyDrop::new(alloc), _marker: PhantomData }
+ }
+}
+
+impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
+ /// Returns a reference to the value corresponding to the key.
+ ///
+ /// The key may be any borrowed form of the map's key type, but the ordering
+ /// on the borrowed form *must* match the ordering on the key type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.get(&1), Some(&"a"));
+ /// assert_eq!(map.get(&2), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get<Q: ?Sized>(&self, key: &Q) -> Option<&V>
+ where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ let root_node = self.root.as_ref()?.reborrow();
+ match root_node.search_tree(key) {
+ Found(handle) => Some(handle.into_kv().1),
+ GoDown(_) => None,
+ }
+ }
+
+ /// Returns the key-value pair corresponding to the supplied key.
+ ///
+ /// The supplied key may be any borrowed form of the map's key type, but the ordering
+ /// on the borrowed form *must* match the ordering on the key type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.get_key_value(&1), Some((&1, &"a")));
+ /// assert_eq!(map.get_key_value(&2), None);
+ /// ```
+ #[stable(feature = "map_get_key_value", since = "1.40.0")]
+ pub fn get_key_value<Q: ?Sized>(&self, k: &Q) -> Option<(&K, &V)>
+ where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ let root_node = self.root.as_ref()?.reborrow();
+ match root_node.search_tree(k) {
+ Found(handle) => Some(handle.into_kv()),
+ GoDown(_) => None,
+ }
+ }
+
+ /// Returns the first key-value pair in the map.
+ /// The key in this pair is the minimum key in the map.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// assert_eq!(map.first_key_value(), None);
+ /// map.insert(1, "b");
+ /// map.insert(2, "a");
+ /// assert_eq!(map.first_key_value(), Some((&1, &"b")));
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn first_key_value(&self) -> Option<(&K, &V)>
+ where
+ K: Ord,
+ {
+ let root_node = self.root.as_ref()?.reborrow();
+ root_node.first_leaf_edge().right_kv().ok().map(Handle::into_kv)
+ }
+
+ /// Returns the first entry in the map for in-place manipulation.
+ /// The key of this entry is the minimum key in the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// map.insert(2, "b");
+ /// if let Some(mut entry) = map.first_entry() {
+ /// if *entry.key() > 0 {
+ /// entry.insert("first");
+ /// }
+ /// }
+ /// assert_eq!(*map.get(&1).unwrap(), "first");
+ /// assert_eq!(*map.get(&2).unwrap(), "b");
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn first_entry(&mut self) -> Option<OccupiedEntry<'_, K, V, A>>
+ where
+ K: Ord,
+ {
+ let (map, dormant_map) = DormantMutRef::new(self);
+ let root_node = map.root.as_mut()?.borrow_mut();
+ let kv = root_node.first_leaf_edge().right_kv().ok()?;
+ Some(OccupiedEntry {
+ handle: kv.forget_node_type(),
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ })
+ }
+
+ /// Removes and returns the first element in the map.
+ /// The key of this element is the minimum key that was in the map.
+ ///
+ /// # Examples
+ ///
+ /// Draining elements in ascending order, while keeping a usable map each iteration.
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// map.insert(2, "b");
+ /// while let Some((key, _val)) = map.pop_first() {
+ /// assert!(map.iter().all(|(k, _v)| *k > key));
+ /// }
+ /// assert!(map.is_empty());
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn pop_first(&mut self) -> Option<(K, V)>
+ where
+ K: Ord,
+ {
+ self.first_entry().map(|entry| entry.remove_entry())
+ }
+
+ /// Returns the last key-value pair in the map.
+ /// The key in this pair is the maximum key in the map.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "b");
+ /// map.insert(2, "a");
+ /// assert_eq!(map.last_key_value(), Some((&2, &"a")));
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn last_key_value(&self) -> Option<(&K, &V)>
+ where
+ K: Ord,
+ {
+ let root_node = self.root.as_ref()?.reborrow();
+ root_node.last_leaf_edge().left_kv().ok().map(Handle::into_kv)
+ }
+
+ /// Returns the last entry in the map for in-place manipulation.
+ /// The key of this entry is the maximum key in the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// map.insert(2, "b");
+ /// if let Some(mut entry) = map.last_entry() {
+ /// if *entry.key() > 0 {
+ /// entry.insert("last");
+ /// }
+ /// }
+ /// assert_eq!(*map.get(&1).unwrap(), "a");
+ /// assert_eq!(*map.get(&2).unwrap(), "last");
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn last_entry(&mut self) -> Option<OccupiedEntry<'_, K, V, A>>
+ where
+ K: Ord,
+ {
+ let (map, dormant_map) = DormantMutRef::new(self);
+ let root_node = map.root.as_mut()?.borrow_mut();
+ let kv = root_node.last_leaf_edge().left_kv().ok()?;
+ Some(OccupiedEntry {
+ handle: kv.forget_node_type(),
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ })
+ }
+
+ /// Removes and returns the last element in the map.
+ /// The key of this element is the maximum key that was in the map.
+ ///
+ /// # Examples
+ ///
+ /// Draining elements in descending order, while keeping a usable map each iteration.
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// map.insert(2, "b");
+ /// while let Some((key, _val)) = map.pop_last() {
+ /// assert!(map.iter().all(|(k, _v)| *k < key));
+ /// }
+ /// assert!(map.is_empty());
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn pop_last(&mut self) -> Option<(K, V)>
+ where
+ K: Ord,
+ {
+ self.last_entry().map(|entry| entry.remove_entry())
+ }
+
+ /// Returns `true` if the map contains a value for the specified key.
+ ///
+ /// The key may be any borrowed form of the map's key type, but the ordering
+ /// on the borrowed form *must* match the ordering on the key type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.contains_key(&1), true);
+ /// assert_eq!(map.contains_key(&2), false);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn contains_key<Q: ?Sized>(&self, key: &Q) -> bool
+ where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ self.get(key).is_some()
+ }
+
+ /// Returns a mutable reference to the value corresponding to the key.
+ ///
+ /// The key may be any borrowed form of the map's key type, but the ordering
+ /// on the borrowed form *must* match the ordering on the key type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// if let Some(x) = map.get_mut(&1) {
+ /// *x = "b";
+ /// }
+ /// assert_eq!(map[&1], "b");
+ /// ```
+ // See `get` for implementation notes, this is basically a copy-paste with mut's added
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_mut<Q: ?Sized>(&mut self, key: &Q) -> Option<&mut V>
+ where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ let root_node = self.root.as_mut()?.borrow_mut();
+ match root_node.search_tree(key) {
+ Found(handle) => Some(handle.into_val_mut()),
+ GoDown(_) => None,
+ }
+ }
+
+ /// Inserts a key-value pair into the map.
+ ///
+ /// If the map did not have this key present, `None` is returned.
+ ///
+ /// If the map did have this key present, the value is updated, and the old
+ /// value is returned. The key is not updated, though; this matters for
+ /// types that can be `==` without being identical. See the [module-level
+ /// documentation] for more.
+ ///
+ /// [module-level documentation]: index.html#insert-and-complex-keys
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// assert_eq!(map.insert(37, "a"), None);
+ /// assert_eq!(map.is_empty(), false);
+ ///
+ /// map.insert(37, "b");
+ /// assert_eq!(map.insert(37, "c"), Some("b"));
+ /// assert_eq!(map[&37], "c");
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, key: K, value: V) -> Option<V>
+ where
+ K: Ord,
+ {
+ match self.entry(key) {
+ Occupied(mut entry) => Some(entry.insert(value)),
+ Vacant(entry) => {
+ entry.insert(value);
+ None
+ }
+ }
+ }
+
+ /// Tries to insert a key-value pair into the map, and returns
+ /// a mutable reference to the value in the entry.
+ ///
+ /// If the map already had this key present, nothing is updated, and
+ /// an error containing the occupied entry and the value is returned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(map_try_insert)]
+ ///
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// assert_eq!(map.try_insert(37, "a").unwrap(), &"a");
+ ///
+ /// let err = map.try_insert(37, "b").unwrap_err();
+ /// assert_eq!(err.entry.key(), &37);
+ /// assert_eq!(err.entry.get(), &"a");
+ /// assert_eq!(err.value, "b");
+ /// ```
+ #[unstable(feature = "map_try_insert", issue = "82766")]
+ pub fn try_insert(&mut self, key: K, value: V) -> Result<&mut V, OccupiedError<'_, K, V, A>>
+ where
+ K: Ord,
+ {
+ match self.entry(key) {
+ Occupied(entry) => Err(OccupiedError { entry, value }),
+ Vacant(entry) => Ok(entry.insert(value)),
+ }
+ }
+
+ /// Removes a key from the map, returning the value at the key if the key
+ /// was previously in the map.
+ ///
+ /// The key may be any borrowed form of the map's key type, but the ordering
+ /// on the borrowed form *must* match the ordering on the key type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.remove(&1), Some("a"));
+ /// assert_eq!(map.remove(&1), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove<Q: ?Sized>(&mut self, key: &Q) -> Option<V>
+ where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ self.remove_entry(key).map(|(_, v)| v)
+ }
+
+ /// Removes a key from the map, returning the stored key and value if the key
+ /// was previously in the map.
+ ///
+ /// The key may be any borrowed form of the map's key type, but the ordering
+ /// on the borrowed form *must* match the ordering on the key type.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(1, "a");
+ /// assert_eq!(map.remove_entry(&1), Some((1, "a")));
+ /// assert_eq!(map.remove_entry(&1), None);
+ /// ```
+ #[stable(feature = "btreemap_remove_entry", since = "1.45.0")]
+ pub fn remove_entry<Q: ?Sized>(&mut self, key: &Q) -> Option<(K, V)>
+ where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ let (map, dormant_map) = DormantMutRef::new(self);
+ let root_node = map.root.as_mut()?.borrow_mut();
+ match root_node.search_tree(key) {
+ Found(handle) => Some(
+ OccupiedEntry {
+ handle,
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ }
+ .remove_entry(),
+ ),
+ GoDown(_) => None,
+ }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all pairs `(k, v)` for which `f(&k, &mut v)` returns `false`.
+ /// The elements are visited in ascending key order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<i32, i32> = (0..8).map(|x| (x, x*10)).collect();
+ /// // Keep only the elements with even-numbered keys.
+ /// map.retain(|&k, _| k % 2 == 0);
+ /// assert!(map.into_iter().eq(vec![(0, 0), (2, 20), (4, 40), (6, 60)]));
+ /// ```
+ #[inline]
+ #[stable(feature = "btree_retain", since = "1.53.0")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ K: Ord,
+ F: FnMut(&K, &mut V) -> bool,
+ {
+ self.drain_filter(|k, v| !f(k, v));
+ }
+
+ /// Moves all elements from `other` into `self`, leaving `other` empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, "a");
+ /// a.insert(2, "b");
+ /// a.insert(3, "c");
+ ///
+ /// let mut b = BTreeMap::new();
+ /// b.insert(3, "d");
+ /// b.insert(4, "e");
+ /// b.insert(5, "f");
+ ///
+ /// a.append(&mut b);
+ ///
+ /// assert_eq!(a.len(), 5);
+ /// assert_eq!(b.len(), 0);
+ ///
+ /// assert_eq!(a[&1], "a");
+ /// assert_eq!(a[&2], "b");
+ /// assert_eq!(a[&3], "d");
+ /// assert_eq!(a[&4], "e");
+ /// assert_eq!(a[&5], "f");
+ /// ```
+ #[stable(feature = "btree_append", since = "1.11.0")]
+ pub fn append(&mut self, other: &mut Self)
+ where
+ K: Ord,
+ A: Clone,
+ {
+ // Do we have to append anything at all?
+ if other.is_empty() {
+ return;
+ }
+
+ // We can just swap `self` and `other` if `self` is empty.
+ if self.is_empty() {
+ mem::swap(self, other);
+ return;
+ }
+
+ let self_iter = mem::replace(self, Self::new_in((*self.alloc).clone())).into_iter();
+ let other_iter = mem::replace(other, Self::new_in((*self.alloc).clone())).into_iter();
+ let root = self.root.get_or_insert_with(|| Root::new((*self.alloc).clone()));
+ root.append_from_sorted_iters(
+ self_iter,
+ other_iter,
+ &mut self.length,
+ (*self.alloc).clone(),
+ )
+ }
+
+ /// Constructs a double-ended iterator over a sub-range of elements in the map.
+ /// The simplest way is to use the range syntax `min..max`, thus `range(min..max)` will
+ /// yield elements from min (inclusive) to max (exclusive).
+ /// The range may also be entered as `(Bound<T>, Bound<T>)`, so for example
+ /// `range((Excluded(4), Included(10)))` will yield a left-exclusive, right-inclusive
+ /// range from 4 to 10.
+ ///
+ /// # Panics
+ ///
+ /// Panics if range `start > end`.
+ /// Panics if range `start == end` and both bounds are `Excluded`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::ops::Bound::Included;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(3, "a");
+ /// map.insert(5, "b");
+ /// map.insert(8, "c");
+ /// for (&key, &value) in map.range((Included(&4), Included(&8))) {
+ /// println!("{key}: {value}");
+ /// }
+ /// assert_eq!(Some((&5, &"b")), map.range(4..).next());
+ /// ```
+ #[stable(feature = "btree_range", since = "1.17.0")]
+ pub fn range<T: ?Sized, R>(&self, range: R) -> Range<'_, K, V>
+ where
+ T: Ord,
+ K: Borrow<T> + Ord,
+ R: RangeBounds<T>,
+ {
+ if let Some(root) = &self.root {
+ Range { inner: root.reborrow().range_search(range) }
+ } else {
+ Range { inner: LeafRange::none() }
+ }
+ }
+
+ /// Constructs a mutable double-ended iterator over a sub-range of elements in the map.
+ /// The simplest way is to use the range syntax `min..max`, thus `range(min..max)` will
+ /// yield elements from min (inclusive) to max (exclusive).
+ /// The range may also be entered as `(Bound<T>, Bound<T>)`, so for example
+ /// `range((Excluded(4), Included(10)))` will yield a left-exclusive, right-inclusive
+ /// range from 4 to 10.
+ ///
+ /// # Panics
+ ///
+ /// Panics if range `start > end`.
+ /// Panics if range `start == end` and both bounds are `Excluded`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, i32> =
+ /// [("Alice", 0), ("Bob", 0), ("Carol", 0), ("Cheryl", 0)].into();
+ /// for (_, balance) in map.range_mut("B".."Cheryl") {
+ /// *balance += 100;
+ /// }
+ /// for (name, balance) in &map {
+ /// println!("{name} => {balance}");
+ /// }
+ /// ```
+ #[stable(feature = "btree_range", since = "1.17.0")]
+ pub fn range_mut<T: ?Sized, R>(&mut self, range: R) -> RangeMut<'_, K, V>
+ where
+ T: Ord,
+ K: Borrow<T> + Ord,
+ R: RangeBounds<T>,
+ {
+ if let Some(root) = &mut self.root {
+ RangeMut { inner: root.borrow_valmut().range_search(range), _marker: PhantomData }
+ } else {
+ RangeMut { inner: LeafRange::none(), _marker: PhantomData }
+ }
+ }
+
+ /// Gets the given key's corresponding entry in the map for in-place manipulation.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut count: BTreeMap<&str, usize> = BTreeMap::new();
+ ///
+ /// // count the number of occurrences of letters in the vec
+ /// for x in ["a", "b", "a", "c", "a", "b"] {
+ /// count.entry(x).and_modify(|curr| *curr += 1).or_insert(1);
+ /// }
+ ///
+ /// assert_eq!(count["a"], 3);
+ /// assert_eq!(count["b"], 2);
+ /// assert_eq!(count["c"], 1);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn entry(&mut self, key: K) -> Entry<'_, K, V, A>
+ where
+ K: Ord,
+ {
+ let (map, dormant_map) = DormantMutRef::new(self);
+ match map.root {
+ None => Vacant(VacantEntry {
+ key,
+ handle: None,
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ }),
+ Some(ref mut root) => match root.borrow_mut().search_tree(&key) {
+ Found(handle) => Occupied(OccupiedEntry {
+ handle,
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ }),
+ GoDown(handle) => Vacant(VacantEntry {
+ key,
+ handle: Some(handle),
+ dormant_map,
+ alloc: (*map.alloc).clone(),
+ _marker: PhantomData,
+ }),
+ },
+ }
+ }
+
+ /// Splits the collection into two at the given key. Returns everything after the given key,
+ /// including the key.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, "a");
+ /// a.insert(2, "b");
+ /// a.insert(3, "c");
+ /// a.insert(17, "d");
+ /// a.insert(41, "e");
+ ///
+ /// let b = a.split_off(&3);
+ ///
+ /// assert_eq!(a.len(), 2);
+ /// assert_eq!(b.len(), 3);
+ ///
+ /// assert_eq!(a[&1], "a");
+ /// assert_eq!(a[&2], "b");
+ ///
+ /// assert_eq!(b[&3], "c");
+ /// assert_eq!(b[&17], "d");
+ /// assert_eq!(b[&41], "e");
+ /// ```
+ #[stable(feature = "btree_split_off", since = "1.11.0")]
+ pub fn split_off<Q: ?Sized + Ord>(&mut self, key: &Q) -> Self
+ where
+ K: Borrow<Q> + Ord,
+ A: Clone,
+ {
+ if self.is_empty() {
+ return Self::new_in((*self.alloc).clone());
+ }
+
+ let total_num = self.len();
+ let left_root = self.root.as_mut().unwrap(); // unwrap succeeds because not empty
+
+ let right_root = left_root.split_off(key, (*self.alloc).clone());
+
+ let (new_left_len, right_len) = Root::calc_split_length(total_num, &left_root, &right_root);
+ self.length = new_left_len;
+
+ BTreeMap {
+ root: Some(right_root),
+ length: right_len,
+ alloc: self.alloc.clone(),
+ _marker: PhantomData,
+ }
+ }
+
+ /// Creates an iterator that visits all elements (key-value pairs) in
+ /// ascending key order and uses a closure to determine if an element should
+ /// be removed. If the closure returns `true`, the element is removed from
+ /// the map and yielded. If the closure returns `false`, or panics, the
+ /// element remains in the map and will not be yielded.
+ ///
+ /// The iterator also lets you mutate the value of each element in the
+ /// closure, regardless of whether you choose to keep or remove it.
+ ///
+ /// If the iterator is only partially consumed or not consumed at all, each
+ /// of the remaining elements is still subjected to the closure, which may
+ /// change its value and, by returning `true`, have the element removed and
+ /// dropped.
+ ///
+ /// It is unspecified how many more elements will be subjected to the
+ /// closure if a panic occurs in the closure, or a panic occurs while
+ /// dropping an element, or if the `DrainFilter` value is leaked.
+ ///
+ /// # Examples
+ ///
+ /// Splitting a map into even and odd keys, reusing the original map:
+ ///
+ /// ```
+ /// #![feature(btree_drain_filter)]
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<i32, i32> = (0..8).map(|x| (x, x)).collect();
+ /// let evens: BTreeMap<_, _> = map.drain_filter(|k, _v| k % 2 == 0).collect();
+ /// let odds = map;
+ /// assert_eq!(evens.keys().copied().collect::<Vec<_>>(), [0, 2, 4, 6]);
+ /// assert_eq!(odds.keys().copied().collect::<Vec<_>>(), [1, 3, 5, 7]);
+ /// ```
+ #[unstable(feature = "btree_drain_filter", issue = "70530")]
+ pub fn drain_filter<F>(&mut self, pred: F) -> DrainFilter<'_, K, V, F, A>
+ where
+ K: Ord,
+ F: FnMut(&K, &mut V) -> bool,
+ {
+ let (inner, alloc) = self.drain_filter_inner();
+ DrainFilter { pred, inner, alloc }
+ }
+
+ pub(super) fn drain_filter_inner(&mut self) -> (DrainFilterInner<'_, K, V>, A)
+ where
+ K: Ord,
+ {
+ if let Some(root) = self.root.as_mut() {
+ let (root, dormant_root) = DormantMutRef::new(root);
+ let front = root.borrow_mut().first_leaf_edge();
+ (
+ DrainFilterInner {
+ length: &mut self.length,
+ dormant_root: Some(dormant_root),
+ cur_leaf_edge: Some(front),
+ },
+ (*self.alloc).clone(),
+ )
+ } else {
+ (
+ DrainFilterInner {
+ length: &mut self.length,
+ dormant_root: None,
+ cur_leaf_edge: None,
+ },
+ (*self.alloc).clone(),
+ )
+ }
+ }
+
+ /// Creates a consuming iterator visiting all the keys, in sorted order.
+ /// The map cannot be used after calling this.
+ /// The iterator element type is `K`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(2, "b");
+ /// a.insert(1, "a");
+ ///
+ /// let keys: Vec<i32> = a.into_keys().collect();
+ /// assert_eq!(keys, [1, 2]);
+ /// ```
+ #[inline]
+ #[stable(feature = "map_into_keys_values", since = "1.54.0")]
+ pub fn into_keys(self) -> IntoKeys<K, V, A> {
+ IntoKeys { inner: self.into_iter() }
+ }
+
+ /// Creates a consuming iterator visiting all the values, in order by key.
+ /// The map cannot be used after calling this.
+ /// The iterator element type is `V`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, "hello");
+ /// a.insert(2, "goodbye");
+ ///
+ /// let values: Vec<&str> = a.into_values().collect();
+ /// assert_eq!(values, ["hello", "goodbye"]);
+ /// ```
+ #[inline]
+ #[stable(feature = "map_into_keys_values", since = "1.54.0")]
+ pub fn into_values(self) -> IntoValues<K, V, A> {
+ IntoValues { inner: self.into_iter() }
+ }
+
+ /// Makes a `BTreeMap` from a sorted iterator.
+ pub(crate) fn bulk_build_from_sorted_iter<I>(iter: I, alloc: A) -> Self
+ where
+ K: Ord,
+ I: IntoIterator<Item = (K, V)>,
+ {
+ let mut root = Root::new(alloc.clone());
+ let mut length = 0;
+ root.bulk_push(DedupSortedIter::new(iter.into_iter()), &mut length, alloc.clone());
+ BTreeMap { root: Some(root), length, alloc: ManuallyDrop::new(alloc), _marker: PhantomData }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V, A: Allocator + Clone> IntoIterator for &'a BTreeMap<K, V, A> {
+ type Item = (&'a K, &'a V);
+ type IntoIter = Iter<'a, K, V>;
+
+ fn into_iter(self) -> Iter<'a, K, V> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K: 'a, V: 'a> Iterator for Iter<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ fn next(&mut self) -> Option<(&'a K, &'a V)> {
+ if self.length == 0 {
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.range.next_unchecked() })
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.length, Some(self.length))
+ }
+
+ fn last(mut self) -> Option<(&'a K, &'a V)> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<(&'a K, &'a V)> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<(&'a K, &'a V)> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for Iter<'_, K, V> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K: 'a, V: 'a> DoubleEndedIterator for Iter<'a, K, V> {
+ fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
+ if self.length == 0 {
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.range.next_back_unchecked() })
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for Iter<'_, K, V> {
+ fn len(&self) -> usize {
+ self.length
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> Clone for Iter<'_, K, V> {
+ fn clone(&self) -> Self {
+ Iter { range: self.range.clone(), length: self.length }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V, A: Allocator + Clone> IntoIterator for &'a mut BTreeMap<K, V, A> {
+ type Item = (&'a K, &'a mut V);
+ type IntoIter = IterMut<'a, K, V>;
+
+ fn into_iter(self) -> IterMut<'a, K, V> {
+ self.iter_mut()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> Iterator for IterMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
+ if self.length == 0 {
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.range.next_unchecked() })
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.length, Some(self.length))
+ }
+
+ fn last(mut self) -> Option<(&'a K, &'a mut V)> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<(&'a K, &'a mut V)> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<(&'a K, &'a mut V)> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> DoubleEndedIterator for IterMut<'a, K, V> {
+ fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> {
+ if self.length == 0 {
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.range.next_back_unchecked() })
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for IterMut<'_, K, V> {
+ fn len(&self) -> usize {
+ self.length
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for IterMut<'_, K, V> {}
+
+impl<'a, K, V> IterMut<'a, K, V> {
+ /// Returns an iterator of references over the remaining items.
+ #[inline]
+ pub(super) fn iter(&self) -> Iter<'_, K, V> {
+ Iter { range: self.range.reborrow(), length: self.length }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, A: Allocator + Clone> IntoIterator for BTreeMap<K, V, A> {
+ type Item = (K, V);
+ type IntoIter = IntoIter<K, V, A>;
+
+ fn into_iter(self) -> IntoIter<K, V, A> {
+ let mut me = ManuallyDrop::new(self);
+ if let Some(root) = me.root.take() {
+ let full_range = root.into_dying().full_range();
+
+ IntoIter {
+ range: full_range,
+ length: me.length,
+ alloc: unsafe { ManuallyDrop::take(&mut me.alloc) },
+ }
+ } else {
+ IntoIter {
+ range: LazyLeafRange::none(),
+ length: 0,
+ alloc: unsafe { ManuallyDrop::take(&mut me.alloc) },
+ }
+ }
+ }
+}
+
+#[stable(feature = "btree_drop", since = "1.7.0")]
+impl<K, V, A: Allocator + Clone> Drop for IntoIter<K, V, A> {
+ fn drop(&mut self) {
+ struct DropGuard<'a, K, V, A: Allocator + Clone>(&'a mut IntoIter<K, V, A>);
+
+ impl<'a, K, V, A: Allocator + Clone> Drop for DropGuard<'a, K, V, A> {
+ fn drop(&mut self) {
+ // Continue the same loop we perform below. This only runs when unwinding, so we
+ // don't have to care about panics this time (they'll abort).
+ while let Some(kv) = self.0.dying_next() {
+ // SAFETY: we consume the dying handle immediately.
+ unsafe { kv.drop_key_val() };
+ }
+ }
+ }
+
+ while let Some(kv) = self.dying_next() {
+ let guard = DropGuard(self);
+ // SAFETY: we don't touch the tree before consuming the dying handle.
+ unsafe { kv.drop_key_val() };
+ mem::forget(guard);
+ }
+ }
+}
+
+impl<K, V, A: Allocator + Clone> IntoIter<K, V, A> {
+ /// Core of a `next` method returning a dying KV handle,
+ /// invalidated by further calls to this function and some others.
+ fn dying_next(
+ &mut self,
+ ) -> Option<Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>> {
+ if self.length == 0 {
+ self.range.deallocating_end(self.alloc.clone());
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.range.deallocating_next_unchecked(self.alloc.clone()) })
+ }
+ }
+
+ /// Core of a `next_back` method returning a dying KV handle,
+ /// invalidated by further calls to this function and some others.
+ fn dying_next_back(
+ &mut self,
+ ) -> Option<Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>> {
+ if self.length == 0 {
+ self.range.deallocating_end(self.alloc.clone());
+ None
+ } else {
+ self.length -= 1;
+ Some(unsafe { self.range.deallocating_next_back_unchecked(self.alloc.clone()) })
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, A: Allocator + Clone> Iterator for IntoIter<K, V, A> {
+ type Item = (K, V);
+
+ fn next(&mut self) -> Option<(K, V)> {
+ // SAFETY: we consume the dying handle immediately.
+ self.dying_next().map(unsafe { |kv| kv.into_key_val() })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.length, Some(self.length))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, A: Allocator + Clone> DoubleEndedIterator for IntoIter<K, V, A> {
+ fn next_back(&mut self) -> Option<(K, V)> {
+ // SAFETY: we consume the dying handle immediately.
+ self.dying_next_back().map(unsafe { |kv| kv.into_key_val() })
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoIter<K, V, A> {
+ fn len(&self) -> usize {
+ self.length
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V, A: Allocator + Clone> FusedIterator for IntoIter<K, V, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> Iterator for Keys<'a, K, V> {
+ type Item = &'a K;
+
+ fn next(&mut self) -> Option<&'a K> {
+ self.inner.next().map(|(k, _)| k)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn last(mut self) -> Option<&'a K> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<&'a K> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<&'a K> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> DoubleEndedIterator for Keys<'a, K, V> {
+ fn next_back(&mut self) -> Option<&'a K> {
+ self.inner.next_back().map(|(k, _)| k)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for Keys<'_, K, V> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for Keys<'_, K, V> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> Clone for Keys<'_, K, V> {
+ fn clone(&self) -> Self {
+ Keys { inner: self.inner.clone() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> Iterator for Values<'a, K, V> {
+ type Item = &'a V;
+
+ fn next(&mut self) -> Option<&'a V> {
+ self.inner.next().map(|(_, v)| v)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn last(mut self) -> Option<&'a V> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, K, V> DoubleEndedIterator for Values<'a, K, V> {
+ fn next_back(&mut self) -> Option<&'a V> {
+ self.inner.next_back().map(|(_, v)| v)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> ExactSizeIterator for Values<'_, K, V> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for Values<'_, K, V> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> Clone for Values<'_, K, V> {
+ fn clone(&self) -> Self {
+ Values { inner: self.inner.clone() }
+ }
+}
+
+/// An iterator produced by calling `drain_filter` on BTreeMap.
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+pub struct DrainFilter<
+ 'a,
+ K,
+ V,
+ F,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> where
+ F: 'a + FnMut(&K, &mut V) -> bool,
+{
+ pred: F,
+ inner: DrainFilterInner<'a, K, V>,
+ /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
+ alloc: A,
+}
+/// Most of the implementation of DrainFilter are generic over the type
+/// of the predicate, thus also serving for BTreeSet::DrainFilter.
+pub(super) struct DrainFilterInner<'a, K, V> {
+ /// Reference to the length field in the borrowed map, updated live.
+ length: &'a mut usize,
+ /// Buried reference to the root field in the borrowed map.
+ /// Wrapped in `Option` to allow drop handler to `take` it.
+ dormant_root: Option<DormantMutRef<'a, Root<K, V>>>,
+ /// Contains a leaf edge preceding the next element to be returned, or the last leaf edge.
+ /// Empty if the map has no root, if iteration went beyond the last leaf edge,
+ /// or if a panic occurred in the predicate.
+ cur_leaf_edge: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>>,
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<K, V, F, A: Allocator + Clone> Drop for DrainFilter<'_, K, V, F, A>
+where
+ F: FnMut(&K, &mut V) -> bool,
+{
+ fn drop(&mut self) {
+ self.for_each(drop);
+ }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<K, V, F> fmt::Debug for DrainFilter<'_, K, V, F>
+where
+ K: fmt::Debug,
+ V: fmt::Debug,
+ F: FnMut(&K, &mut V) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("DrainFilter").field(&self.inner.peek()).finish()
+ }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<K, V, F, A: Allocator + Clone> Iterator for DrainFilter<'_, K, V, F, A>
+where
+ F: FnMut(&K, &mut V) -> bool,
+{
+ type Item = (K, V);
+
+ fn next(&mut self) -> Option<(K, V)> {
+ self.inner.next(&mut self.pred, self.alloc.clone())
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+impl<'a, K, V> DrainFilterInner<'a, K, V> {
+ /// Allow Debug implementations to predict the next element.
+ pub(super) fn peek(&self) -> Option<(&K, &V)> {
+ let edge = self.cur_leaf_edge.as_ref()?;
+ edge.reborrow().next_kv().ok().map(Handle::into_kv)
+ }
+
+ /// Implementation of a typical `DrainFilter::next` method, given the predicate.
+ pub(super) fn next<F, A: Allocator + Clone>(&mut self, pred: &mut F, alloc: A) -> Option<(K, V)>
+ where
+ F: FnMut(&K, &mut V) -> bool,
+ {
+ while let Ok(mut kv) = self.cur_leaf_edge.take()?.next_kv() {
+ let (k, v) = kv.kv_mut();
+ if pred(k, v) {
+ *self.length -= 1;
+ let (kv, pos) = kv.remove_kv_tracking(
+ || {
+ // SAFETY: we will touch the root in a way that will not
+ // invalidate the position returned.
+ let root = unsafe { self.dormant_root.take().unwrap().awaken() };
+ root.pop_internal_level(alloc.clone());
+ self.dormant_root = Some(DormantMutRef::new(root).1);
+ },
+ alloc.clone(),
+ );
+ self.cur_leaf_edge = Some(pos);
+ return Some(kv);
+ }
+ self.cur_leaf_edge = Some(kv.next_leaf_edge());
+ }
+ None
+ }
+
+ /// Implementation of a typical `DrainFilter::size_hint` method.
+ pub(super) fn size_hint(&self) -> (usize, Option<usize>) {
+ // In most of the btree iterators, `self.length` is the number of elements
+ // yet to be visited. Here, it includes elements that were visited and that
+ // the predicate decided not to drain. Making this upper bound more tight
+ // during iteration would require an extra field.
+ (0, Some(*self.length))
+ }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<K, V, F> FusedIterator for DrainFilter<'_, K, V, F> where F: FnMut(&K, &mut V) -> bool {}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<'a, K, V> Iterator for Range<'a, K, V> {
+ type Item = (&'a K, &'a V);
+
+ fn next(&mut self) -> Option<(&'a K, &'a V)> {
+ self.inner.next_checked()
+ }
+
+ fn last(mut self) -> Option<(&'a K, &'a V)> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<(&'a K, &'a V)> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<(&'a K, &'a V)> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+impl<'a, K, V> Iterator for ValuesMut<'a, K, V> {
+ type Item = &'a mut V;
+
+ fn next(&mut self) -> Option<&'a mut V> {
+ self.inner.next().map(|(_, v)| v)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn last(mut self) -> Option<&'a mut V> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+impl<'a, K, V> DoubleEndedIterator for ValuesMut<'a, K, V> {
+ fn next_back(&mut self) -> Option<&'a mut V> {
+ self.inner.next_back().map(|(_, v)| v)
+ }
+}
+
+#[stable(feature = "map_values_mut", since = "1.10.0")]
+impl<K, V> ExactSizeIterator for ValuesMut<'_, K, V> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for ValuesMut<'_, K, V> {}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> Iterator for IntoKeys<K, V, A> {
+ type Item = K;
+
+ fn next(&mut self) -> Option<K> {
+ self.inner.next().map(|(k, _)| k)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn last(mut self) -> Option<K> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<K> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<K> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> DoubleEndedIterator for IntoKeys<K, V, A> {
+ fn next_back(&mut self) -> Option<K> {
+ self.inner.next_back().map(|(k, _)| k)
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoKeys<K, V, A> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> FusedIterator for IntoKeys<K, V, A> {}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> Iterator for IntoValues<K, V, A> {
+ type Item = V;
+
+ fn next(&mut self) -> Option<V> {
+ self.inner.next().map(|(_, v)| v)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+
+ fn last(mut self) -> Option<V> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> DoubleEndedIterator for IntoValues<K, V, A> {
+ fn next_back(&mut self) -> Option<V> {
+ self.inner.next_back().map(|(_, v)| v)
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> ExactSizeIterator for IntoValues<K, V, A> {
+ fn len(&self) -> usize {
+ self.inner.len()
+ }
+}
+
+#[stable(feature = "map_into_keys_values", since = "1.54.0")]
+impl<K, V, A: Allocator + Clone> FusedIterator for IntoValues<K, V, A> {}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<'a, K, V> DoubleEndedIterator for Range<'a, K, V> {
+ fn next_back(&mut self) -> Option<(&'a K, &'a V)> {
+ self.inner.next_back_checked()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for Range<'_, K, V> {}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<K, V> Clone for Range<'_, K, V> {
+ fn clone(&self) -> Self {
+ Range { inner: self.inner.clone() }
+ }
+}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<'a, K, V> Iterator for RangeMut<'a, K, V> {
+ type Item = (&'a K, &'a mut V);
+
+ fn next(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.inner.next_checked()
+ }
+
+ fn last(mut self) -> Option<(&'a K, &'a mut V)> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<(&'a K, &'a mut V)> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<(&'a K, &'a mut V)> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<'a, K, V> DoubleEndedIterator for RangeMut<'a, K, V> {
+ fn next_back(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.inner.next_back_checked()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<K, V> FusedIterator for RangeMut<'_, K, V> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Ord, V> FromIterator<(K, V)> for BTreeMap<K, V> {
+ fn from_iter<T: IntoIterator<Item = (K, V)>>(iter: T) -> BTreeMap<K, V> {
+ let mut inputs: Vec<_> = iter.into_iter().collect();
+
+ if inputs.is_empty() {
+ return BTreeMap::new();
+ }
+
+ // use stable sort to preserve the insertion order.
+ inputs.sort_by(|a, b| a.0.cmp(&b.0));
+ BTreeMap::bulk_build_from_sorted_iter(inputs, Global)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Ord, V, A: Allocator + Clone> Extend<(K, V)> for BTreeMap<K, V, A> {
+ #[inline]
+ fn extend<T: IntoIterator<Item = (K, V)>>(&mut self, iter: T) {
+ iter.into_iter().for_each(move |(k, v)| {
+ self.insert(k, v);
+ });
+ }
+
+ #[inline]
+ fn extend_one(&mut self, (k, v): (K, V)) {
+ self.insert(k, v);
+ }
+}
+
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, K: Ord + Copy, V: Copy, A: Allocator + Clone> Extend<(&'a K, &'a V)>
+ for BTreeMap<K, V, A>
+{
+ fn extend<I: IntoIterator<Item = (&'a K, &'a V)>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().map(|(&key, &value)| (key, value)));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, (&k, &v): (&'a K, &'a V)) {
+ self.insert(k, v);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Hash, V: Hash, A: Allocator + Clone> Hash for BTreeMap<K, V, A> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_length_prefix(self.len());
+ for elt in self {
+ elt.hash(state);
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, V> Default for BTreeMap<K, V> {
+ /// Creates an empty `BTreeMap`.
+ fn default() -> BTreeMap<K, V> {
+ BTreeMap::new()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: PartialEq, V: PartialEq, A: Allocator + Clone> PartialEq for BTreeMap<K, V, A> {
+ fn eq(&self, other: &BTreeMap<K, V, A>) -> bool {
+ self.len() == other.len() && self.iter().zip(other).all(|(a, b)| a == b)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Eq, V: Eq, A: Allocator + Clone> Eq for BTreeMap<K, V, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: PartialOrd, V: PartialOrd, A: Allocator + Clone> PartialOrd for BTreeMap<K, V, A> {
+ #[inline]
+ fn partial_cmp(&self, other: &BTreeMap<K, V, A>) -> Option<Ordering> {
+ self.iter().partial_cmp(other.iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Ord, V: Ord, A: Allocator + Clone> Ord for BTreeMap<K, V, A> {
+ #[inline]
+ fn cmp(&self, other: &BTreeMap<K, V, A>) -> Ordering {
+ self.iter().cmp(other.iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K: Debug, V: Debug, A: Allocator + Clone> Debug for BTreeMap<K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_map().entries(self.iter()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<K, Q: ?Sized, V, A: Allocator + Clone> Index<&Q> for BTreeMap<K, V, A>
+where
+ K: Borrow<Q> + Ord,
+ Q: Ord,
+{
+ type Output = V;
+
+ /// Returns a reference to the value corresponding to the supplied key.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the key is not present in the `BTreeMap`.
+ #[inline]
+ fn index(&self, key: &Q) -> &V {
+ self.get(key).expect("no entry found for key")
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+impl<K: Ord, V, const N: usize> From<[(K, V); N]> for BTreeMap<K, V> {
+ /// Converts a `[(K, V); N]` into a `BTreeMap<(K, V)>`.
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let map1 = BTreeMap::from([(1, 2), (3, 4)]);
+ /// let map2: BTreeMap<_, _> = [(1, 2), (3, 4)].into();
+ /// assert_eq!(map1, map2);
+ /// ```
+ fn from(mut arr: [(K, V); N]) -> Self {
+ if N == 0 {
+ return BTreeMap::new();
+ }
+
+ // use stable sort to preserve the insertion order.
+ arr.sort_by(|a, b| a.0.cmp(&b.0));
+ BTreeMap::bulk_build_from_sorted_iter(arr, Global)
+ }
+}
+
+impl<K, V, A: Allocator + Clone> BTreeMap<K, V, A> {
+ /// Gets an iterator over the entries of the map, sorted by key.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::new();
+ /// map.insert(3, "c");
+ /// map.insert(2, "b");
+ /// map.insert(1, "a");
+ ///
+ /// for (key, value) in map.iter() {
+ /// println!("{key}: {value}");
+ /// }
+ ///
+ /// let (first_key, first_value) = map.iter().next().unwrap();
+ /// assert_eq!((*first_key, *first_value), (1, "a"));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, K, V> {
+ if let Some(root) = &self.root {
+ let full_range = root.reborrow().full_range();
+
+ Iter { range: full_range, length: self.length }
+ } else {
+ Iter { range: LazyLeafRange::none(), length: 0 }
+ }
+ }
+
+ /// Gets a mutable iterator over the entries of the map, sorted by key.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map = BTreeMap::from([
+ /// ("a", 1),
+ /// ("b", 2),
+ /// ("c", 3),
+ /// ]);
+ ///
+ /// // add 10 to the value if the key isn't "a"
+ /// for (key, value) in map.iter_mut() {
+ /// if key != &"a" {
+ /// *value += 10;
+ /// }
+ /// }
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
+ if let Some(root) = &mut self.root {
+ let full_range = root.borrow_valmut().full_range();
+
+ IterMut { range: full_range, length: self.length, _marker: PhantomData }
+ } else {
+ IterMut { range: LazyLeafRange::none(), length: 0, _marker: PhantomData }
+ }
+ }
+
+ /// Gets an iterator over the keys of the map, in sorted order.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(2, "b");
+ /// a.insert(1, "a");
+ ///
+ /// let keys: Vec<_> = a.keys().cloned().collect();
+ /// assert_eq!(keys, [1, 2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn keys(&self) -> Keys<'_, K, V> {
+ Keys { inner: self.iter() }
+ }
+
+ /// Gets an iterator over the values of the map, in order by key.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, "hello");
+ /// a.insert(2, "goodbye");
+ ///
+ /// let values: Vec<&str> = a.values().cloned().collect();
+ /// assert_eq!(values, ["hello", "goodbye"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn values(&self) -> Values<'_, K, V> {
+ Values { inner: self.iter() }
+ }
+
+ /// Gets a mutable iterator over the values of the map, in order by key.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// a.insert(1, String::from("hello"));
+ /// a.insert(2, String::from("goodbye"));
+ ///
+ /// for value in a.values_mut() {
+ /// value.push_str("!");
+ /// }
+ ///
+ /// let values: Vec<String> = a.values().cloned().collect();
+ /// assert_eq!(values, [String::from("hello!"),
+ /// String::from("goodbye!")]);
+ /// ```
+ #[stable(feature = "map_values_mut", since = "1.10.0")]
+ pub fn values_mut(&mut self) -> ValuesMut<'_, K, V> {
+ ValuesMut { inner: self.iter_mut() }
+ }
+
+ /// Returns the number of elements in the map.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// assert_eq!(a.len(), 0);
+ /// a.insert(1, "a");
+ /// assert_eq!(a.len(), 1);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ pub const fn len(&self) -> usize {
+ self.length
+ }
+
+ /// Returns `true` if the map contains no elements.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut a = BTreeMap::new();
+ /// assert!(a.is_empty());
+ /// a.insert(1, "a");
+ /// assert!(!a.is_empty());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ pub const fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/alloc/src/collections/btree/map/entry.rs b/library/alloc/src/collections/btree/map/entry.rs
new file mode 100644
index 000000000..b6eecf9b0
--- /dev/null
+++ b/library/alloc/src/collections/btree/map/entry.rs
@@ -0,0 +1,555 @@
+use core::fmt::{self, Debug};
+use core::marker::PhantomData;
+use core::mem;
+
+use crate::alloc::{Allocator, Global};
+
+use super::super::borrow::DormantMutRef;
+use super::super::node::{marker, Handle, NodeRef};
+use super::BTreeMap;
+
+use Entry::*;
+
+/// A view into a single entry in a map, which may either be vacant or occupied.
+///
+/// This `enum` is constructed from the [`entry`] method on [`BTreeMap`].
+///
+/// [`entry`]: BTreeMap::entry
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "BTreeEntry")]
+pub enum Entry<
+ 'a,
+ K: 'a,
+ V: 'a,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ /// A vacant entry.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Vacant(#[stable(feature = "rust1", since = "1.0.0")] VacantEntry<'a, K, V, A>),
+
+ /// An occupied entry.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ Occupied(#[stable(feature = "rust1", since = "1.0.0")] OccupiedEntry<'a, K, V, A>),
+}
+
+#[stable(feature = "debug_btree_map", since = "1.12.0")]
+impl<K: Debug + Ord, V: Debug, A: Allocator + Clone> Debug for Entry<'_, K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match *self {
+ Vacant(ref v) => f.debug_tuple("Entry").field(v).finish(),
+ Occupied(ref o) => f.debug_tuple("Entry").field(o).finish(),
+ }
+ }
+}
+
+/// A view into a vacant entry in a `BTreeMap`.
+/// It is part of the [`Entry`] enum.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct VacantEntry<
+ 'a,
+ K,
+ V,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ pub(super) key: K,
+ /// `None` for a (empty) map without root
+ pub(super) handle: Option<Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>>,
+ pub(super) dormant_map: DormantMutRef<'a, BTreeMap<K, V, A>>,
+
+ /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
+ pub(super) alloc: A,
+
+ // Be invariant in `K` and `V`
+ pub(super) _marker: PhantomData<&'a mut (K, V)>,
+}
+
+#[stable(feature = "debug_btree_map", since = "1.12.0")]
+impl<K: Debug + Ord, V, A: Allocator + Clone> Debug for VacantEntry<'_, K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("VacantEntry").field(self.key()).finish()
+ }
+}
+
+/// A view into an occupied entry in a `BTreeMap`.
+/// It is part of the [`Entry`] enum.
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct OccupiedEntry<
+ 'a,
+ K,
+ V,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ pub(super) handle: Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV>,
+ pub(super) dormant_map: DormantMutRef<'a, BTreeMap<K, V, A>>,
+
+ /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
+ pub(super) alloc: A,
+
+ // Be invariant in `K` and `V`
+ pub(super) _marker: PhantomData<&'a mut (K, V)>,
+}
+
+#[stable(feature = "debug_btree_map", since = "1.12.0")]
+impl<K: Debug + Ord, V: Debug, A: Allocator + Clone> Debug for OccupiedEntry<'_, K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OccupiedEntry").field("key", self.key()).field("value", self.get()).finish()
+ }
+}
+
+/// The error returned by [`try_insert`](BTreeMap::try_insert) when the key already exists.
+///
+/// Contains the occupied entry, and the value that was not inserted.
+#[unstable(feature = "map_try_insert", issue = "82766")]
+pub struct OccupiedError<'a, K: 'a, V: 'a, A: Allocator + Clone = Global> {
+ /// The entry in the map that was already occupied.
+ pub entry: OccupiedEntry<'a, K, V, A>,
+ /// The value which was not inserted, because the entry was already occupied.
+ pub value: V,
+}
+
+#[unstable(feature = "map_try_insert", issue = "82766")]
+impl<K: Debug + Ord, V: Debug, A: Allocator + Clone> Debug for OccupiedError<'_, K, V, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_struct("OccupiedError")
+ .field("key", self.entry.key())
+ .field("old_value", self.entry.get())
+ .field("new_value", &self.value)
+ .finish()
+ }
+}
+
+#[unstable(feature = "map_try_insert", issue = "82766")]
+impl<'a, K: Debug + Ord, V: Debug, A: Allocator + Clone> fmt::Display
+ for OccupiedError<'a, K, V, A>
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(
+ f,
+ "failed to insert {:?}, key {:?} already exists with value {:?}",
+ self.value,
+ self.entry.key(),
+ self.entry.get(),
+ )
+ }
+}
+
+impl<'a, K: Ord, V, A: Allocator + Clone> Entry<'a, K, V, A> {
+ /// Ensures a value is in the entry by inserting the default if empty, and returns
+ /// a mutable reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// assert_eq!(map["poneyland"], 12);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn or_insert(self, default: V) -> &'a mut V {
+ match self {
+ Occupied(entry) => entry.into_mut(),
+ Vacant(entry) => entry.insert(default),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting the result of the default function if empty,
+ /// and returns a mutable reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, String> = BTreeMap::new();
+ /// let s = "hoho".to_string();
+ ///
+ /// map.entry("poneyland").or_insert_with(|| s);
+ ///
+ /// assert_eq!(map["poneyland"], "hoho".to_string());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn or_insert_with<F: FnOnce() -> V>(self, default: F) -> &'a mut V {
+ match self {
+ Occupied(entry) => entry.into_mut(),
+ Vacant(entry) => entry.insert(default()),
+ }
+ }
+
+ /// Ensures a value is in the entry by inserting, if empty, the result of the default function.
+ /// This method allows for generating key-derived values for insertion by providing the default
+ /// function a reference to the key that was moved during the `.entry(key)` method call.
+ ///
+ /// The reference to the moved key is provided so that cloning or copying the key is
+ /// unnecessary, unlike with `.or_insert_with(|| ... )`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ ///
+ /// map.entry("poneyland").or_insert_with_key(|key| key.chars().count());
+ ///
+ /// assert_eq!(map["poneyland"], 9);
+ /// ```
+ #[inline]
+ #[stable(feature = "or_insert_with_key", since = "1.50.0")]
+ pub fn or_insert_with_key<F: FnOnce(&K) -> V>(self, default: F) -> &'a mut V {
+ match self {
+ Occupied(entry) => entry.into_mut(),
+ Vacant(entry) => {
+ let value = default(entry.key());
+ entry.insert(value)
+ }
+ }
+ }
+
+ /// Returns a reference to this entry's key.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// assert_eq!(map.entry("poneyland").key(), &"poneyland");
+ /// ```
+ #[stable(feature = "map_entry_keys", since = "1.10.0")]
+ pub fn key(&self) -> &K {
+ match *self {
+ Occupied(ref entry) => entry.key(),
+ Vacant(ref entry) => entry.key(),
+ }
+ }
+
+ /// Provides in-place mutable access to an occupied entry before any
+ /// potential inserts into the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ ///
+ /// map.entry("poneyland")
+ /// .and_modify(|e| { *e += 1 })
+ /// .or_insert(42);
+ /// assert_eq!(map["poneyland"], 42);
+ ///
+ /// map.entry("poneyland")
+ /// .and_modify(|e| { *e += 1 })
+ /// .or_insert(42);
+ /// assert_eq!(map["poneyland"], 43);
+ /// ```
+ #[stable(feature = "entry_and_modify", since = "1.26.0")]
+ pub fn and_modify<F>(self, f: F) -> Self
+ where
+ F: FnOnce(&mut V),
+ {
+ match self {
+ Occupied(mut entry) => {
+ f(entry.get_mut());
+ Occupied(entry)
+ }
+ Vacant(entry) => Vacant(entry),
+ }
+ }
+}
+
+impl<'a, K: Ord, V: Default, A: Allocator + Clone> Entry<'a, K, V, A> {
+ #[stable(feature = "entry_or_default", since = "1.28.0")]
+ /// Ensures a value is in the entry by inserting the default value if empty,
+ /// and returns a mutable reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, Option<usize>> = BTreeMap::new();
+ /// map.entry("poneyland").or_default();
+ ///
+ /// assert_eq!(map["poneyland"], None);
+ /// ```
+ pub fn or_default(self) -> &'a mut V {
+ match self {
+ Occupied(entry) => entry.into_mut(),
+ Vacant(entry) => entry.insert(Default::default()),
+ }
+ }
+}
+
+impl<'a, K: Ord, V, A: Allocator + Clone> VacantEntry<'a, K, V, A> {
+ /// Gets a reference to the key that would be used when inserting a value
+ /// through the VacantEntry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// assert_eq!(map.entry("poneyland").key(), &"poneyland");
+ /// ```
+ #[stable(feature = "map_entry_keys", since = "1.10.0")]
+ pub fn key(&self) -> &K {
+ &self.key
+ }
+
+ /// Take ownership of the key.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ ///
+ /// if let Entry::Vacant(v) = map.entry("poneyland") {
+ /// v.into_key();
+ /// }
+ /// ```
+ #[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
+ pub fn into_key(self) -> K {
+ self.key
+ }
+
+ /// Sets the value of the entry with the `VacantEntry`'s key,
+ /// and returns a mutable reference to it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, u32> = BTreeMap::new();
+ ///
+ /// if let Entry::Vacant(o) = map.entry("poneyland") {
+ /// o.insert(37);
+ /// }
+ /// assert_eq!(map["poneyland"], 37);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(self, value: V) -> &'a mut V {
+ let out_ptr = match self.handle {
+ None => {
+ // SAFETY: There is no tree yet so no reference to it exists.
+ let map = unsafe { self.dormant_map.awaken() };
+ let mut root = NodeRef::new_leaf(self.alloc.clone());
+ let val_ptr = root.borrow_mut().push(self.key, value) as *mut V;
+ map.root = Some(root.forget_type());
+ map.length = 1;
+ val_ptr
+ }
+ Some(handle) => match handle.insert_recursing(self.key, value, self.alloc.clone()) {
+ (None, val_ptr) => {
+ // SAFETY: We have consumed self.handle.
+ let map = unsafe { self.dormant_map.awaken() };
+ map.length += 1;
+ val_ptr
+ }
+ (Some(ins), val_ptr) => {
+ drop(ins.left);
+ // SAFETY: We have consumed self.handle and dropped the
+ // remaining reference to the tree, ins.left.
+ let map = unsafe { self.dormant_map.awaken() };
+ let root = map.root.as_mut().unwrap(); // same as ins.left
+ root.push_internal_level(self.alloc).push(ins.kv.0, ins.kv.1, ins.right);
+ map.length += 1;
+ val_ptr
+ }
+ },
+ };
+ // Now that we have finished growing the tree using borrowed references,
+ // dereference the pointer to a part of it, that we picked up along the way.
+ unsafe { &mut *out_ptr }
+ }
+}
+
+impl<'a, K: Ord, V, A: Allocator + Clone> OccupiedEntry<'a, K, V, A> {
+ /// Gets a reference to the key in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ /// assert_eq!(map.entry("poneyland").key(), &"poneyland");
+ /// ```
+ #[must_use]
+ #[stable(feature = "map_entry_keys", since = "1.10.0")]
+ pub fn key(&self) -> &K {
+ self.handle.reborrow().into_kv().0
+ }
+
+ /// Take ownership of the key and value from the map.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// if let Entry::Occupied(o) = map.entry("poneyland") {
+ /// // We delete the entry from the map.
+ /// o.remove_entry();
+ /// }
+ ///
+ /// // If now try to get the value, it will panic:
+ /// // println!("{}", map["poneyland"]);
+ /// ```
+ #[stable(feature = "map_entry_recover_keys2", since = "1.12.0")]
+ pub fn remove_entry(self) -> (K, V) {
+ self.remove_kv()
+ }
+
+ /// Gets a reference to the value in the entry.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// if let Entry::Occupied(o) = map.entry("poneyland") {
+ /// assert_eq!(o.get(), &12);
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get(&self) -> &V {
+ self.handle.reborrow().into_kv().1
+ }
+
+ /// Gets a mutable reference to the value in the entry.
+ ///
+ /// If you need a reference to the `OccupiedEntry` that may outlive the
+ /// destruction of the `Entry` value, see [`into_mut`].
+ ///
+ /// [`into_mut`]: OccupiedEntry::into_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// assert_eq!(map["poneyland"], 12);
+ /// if let Entry::Occupied(mut o) = map.entry("poneyland") {
+ /// *o.get_mut() += 10;
+ /// assert_eq!(*o.get(), 22);
+ ///
+ /// // We can use the same Entry multiple times.
+ /// *o.get_mut() += 2;
+ /// }
+ /// assert_eq!(map["poneyland"], 24);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_mut(&mut self) -> &mut V {
+ self.handle.kv_mut().1
+ }
+
+ /// Converts the entry into a mutable reference to its value.
+ ///
+ /// If you need multiple references to the `OccupiedEntry`, see [`get_mut`].
+ ///
+ /// [`get_mut`]: OccupiedEntry::get_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// assert_eq!(map["poneyland"], 12);
+ /// if let Entry::Occupied(o) = map.entry("poneyland") {
+ /// *o.into_mut() += 10;
+ /// }
+ /// assert_eq!(map["poneyland"], 22);
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_mut(self) -> &'a mut V {
+ self.handle.into_val_mut()
+ }
+
+ /// Sets the value of the entry with the `OccupiedEntry`'s key,
+ /// and returns the entry's old value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// if let Entry::Occupied(mut o) = map.entry("poneyland") {
+ /// assert_eq!(o.insert(15), 12);
+ /// }
+ /// assert_eq!(map["poneyland"], 15);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, value: V) -> V {
+ mem::replace(self.get_mut(), value)
+ }
+
+ /// Takes the value of the entry out of the map, and returns it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeMap;
+ /// use std::collections::btree_map::Entry;
+ ///
+ /// let mut map: BTreeMap<&str, usize> = BTreeMap::new();
+ /// map.entry("poneyland").or_insert(12);
+ ///
+ /// if let Entry::Occupied(o) = map.entry("poneyland") {
+ /// assert_eq!(o.remove(), 12);
+ /// }
+ /// // If we try to get "poneyland"'s value, it'll panic:
+ /// // println!("{}", map["poneyland"]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove(self) -> V {
+ self.remove_kv().1
+ }
+
+ // Body of `remove_entry`, probably separate because the name reflects the returned pair.
+ pub(super) fn remove_kv(self) -> (K, V) {
+ let mut emptied_internal_root = false;
+ let (old_kv, _) =
+ self.handle.remove_kv_tracking(|| emptied_internal_root = true, self.alloc.clone());
+ // SAFETY: we consumed the intermediate root borrow, `self.handle`.
+ let map = unsafe { self.dormant_map.awaken() };
+ map.length -= 1;
+ if emptied_internal_root {
+ let root = map.root.as_mut().unwrap();
+ root.pop_internal_level(self.alloc);
+ }
+ old_kv
+ }
+}
diff --git a/library/alloc/src/collections/btree/map/tests.rs b/library/alloc/src/collections/btree/map/tests.rs
new file mode 100644
index 000000000..4c372b1d6
--- /dev/null
+++ b/library/alloc/src/collections/btree/map/tests.rs
@@ -0,0 +1,2338 @@
+use super::super::testing::crash_test::{CrashTestDummy, Panic};
+use super::super::testing::ord_chaos::{Cyclic3, Governed, Governor};
+use super::super::testing::rng::DeterministicRng;
+use super::Entry::{Occupied, Vacant};
+use super::*;
+use crate::boxed::Box;
+use crate::fmt::Debug;
+use crate::rc::Rc;
+use crate::string::{String, ToString};
+use crate::vec::Vec;
+use std::cmp::Ordering;
+use std::convert::TryFrom;
+use std::iter::{self, FromIterator};
+use std::mem;
+use std::ops::Bound::{self, Excluded, Included, Unbounded};
+use std::ops::RangeBounds;
+use std::panic::{catch_unwind, AssertUnwindSafe};
+use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
+
+// Minimum number of elements to insert, to guarantee a tree with 2 levels,
+// i.e., a tree who's root is an internal node at height 1, with edges to leaf nodes.
+// It's not the minimum size: removing an element from such a tree does not always reduce height.
+const MIN_INSERTS_HEIGHT_1: usize = node::CAPACITY + 1;
+
+// Minimum number of elements to insert in ascending order, to guarantee a tree with 3 levels,
+// i.e., a tree who's root is an internal node at height 2, with edges to more internal nodes.
+// It's not the minimum size: removing an element from such a tree does not always reduce height.
+const MIN_INSERTS_HEIGHT_2: usize = 89;
+
+// Gathers all references from a mutable iterator and makes sure Miri notices if
+// using them is dangerous.
+fn test_all_refs<'a, T: 'a>(dummy: &mut T, iter: impl Iterator<Item = &'a mut T>) {
+ // Gather all those references.
+ let mut refs: Vec<&mut T> = iter.collect();
+ // Use them all. Twice, to be sure we got all interleavings.
+ for r in refs.iter_mut() {
+ mem::swap(dummy, r);
+ }
+ for r in refs {
+ mem::swap(dummy, r);
+ }
+}
+
+impl<K, V> BTreeMap<K, V> {
+ // Panics if the map (or the code navigating it) is corrupted.
+ fn check_invariants(&self) {
+ if let Some(root) = &self.root {
+ let root_node = root.reborrow();
+
+ // Check the back pointers top-down, before we attempt to rely on
+ // more serious navigation code.
+ assert!(root_node.ascend().is_err());
+ root_node.assert_back_pointers();
+
+ // Check consistency of `length` with what navigation code encounters.
+ assert_eq!(self.length, root_node.calc_length());
+
+ // Lastly, check the invariant causing the least harm.
+ root_node.assert_min_len(if root_node.height() > 0 { 1 } else { 0 });
+ } else {
+ assert_eq!(self.length, 0);
+ }
+
+ // Check that `assert_strictly_ascending` will encounter all keys.
+ assert_eq!(self.length, self.keys().count());
+ }
+
+ // Panics if the map is corrupted or if the keys are not in strictly
+ // ascending order, in the current opinion of the `Ord` implementation.
+ // If the `Ord` implementation violates transitivity, this method does not
+ // guarantee that all keys are unique, just that adjacent keys are unique.
+ fn check(&self)
+ where
+ K: Debug + Ord,
+ {
+ self.check_invariants();
+ self.assert_strictly_ascending();
+ }
+
+ // Returns the height of the root, if any.
+ fn height(&self) -> Option<usize> {
+ self.root.as_ref().map(node::Root::height)
+ }
+
+ fn dump_keys(&self) -> String
+ where
+ K: Debug,
+ {
+ if let Some(root) = self.root.as_ref() {
+ root.reborrow().dump_keys()
+ } else {
+ String::from("not yet allocated")
+ }
+ }
+
+ // Panics if the keys are not in strictly ascending order.
+ fn assert_strictly_ascending(&self)
+ where
+ K: Debug + Ord,
+ {
+ let mut keys = self.keys();
+ if let Some(mut previous) = keys.next() {
+ for next in keys {
+ assert!(previous < next, "{:?} >= {:?}", previous, next);
+ previous = next;
+ }
+ }
+ }
+
+ // Transform the tree to minimize wasted space, obtaining fewer nodes that
+ // are mostly filled up to their capacity. The same compact tree could have
+ // been obtained by inserting keys in a shrewd order.
+ fn compact(&mut self)
+ where
+ K: Ord,
+ {
+ let iter = mem::take(self).into_iter();
+ if !iter.is_empty() {
+ self.root.insert(Root::new(*self.alloc)).bulk_push(iter, &mut self.length, *self.alloc);
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> {
+ fn assert_min_len(self, min_len: usize) {
+ assert!(self.len() >= min_len, "node len {} < {}", self.len(), min_len);
+ if let node::ForceResult::Internal(node) = self.force() {
+ for idx in 0..=node.len() {
+ let edge = unsafe { Handle::new_edge(node, idx) };
+ edge.descend().assert_min_len(MIN_LEN);
+ }
+ }
+ }
+}
+
+// Tests our value of MIN_INSERTS_HEIGHT_2. Failure may mean you just need to
+// adapt that value to match a change in node::CAPACITY or the choices made
+// during insertion, otherwise other test cases may fail or be less useful.
+#[test]
+fn test_levels() {
+ let mut map = BTreeMap::new();
+ map.check();
+ assert_eq!(map.height(), None);
+ assert_eq!(map.len(), 0);
+
+ map.insert(0, ());
+ while map.height() == Some(0) {
+ let last_key = *map.last_key_value().unwrap().0;
+ map.insert(last_key + 1, ());
+ }
+ map.check();
+ // Structure:
+ // - 1 element in internal root node with 2 children
+ // - 6 elements in left leaf child
+ // - 5 elements in right leaf child
+ assert_eq!(map.height(), Some(1));
+ assert_eq!(map.len(), MIN_INSERTS_HEIGHT_1, "{}", map.dump_keys());
+
+ while map.height() == Some(1) {
+ let last_key = *map.last_key_value().unwrap().0;
+ map.insert(last_key + 1, ());
+ }
+ map.check();
+ // Structure:
+ // - 1 element in internal root node with 2 children
+ // - 6 elements in left internal child with 7 grandchildren
+ // - 42 elements in left child's 7 grandchildren with 6 elements each
+ // - 5 elements in right internal child with 6 grandchildren
+ // - 30 elements in right child's 5 first grandchildren with 6 elements each
+ // - 5 elements in right child's last grandchild
+ assert_eq!(map.height(), Some(2));
+ assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2, "{}", map.dump_keys());
+}
+
+// Ensures the testing infrastructure usually notices order violations.
+#[test]
+#[should_panic]
+fn test_check_ord_chaos() {
+ let gov = Governor::new();
+ let map = BTreeMap::from([(Governed(1, &gov), ()), (Governed(2, &gov), ())]);
+ gov.flip();
+ map.check();
+}
+
+// Ensures the testing infrastructure doesn't always mind order violations.
+#[test]
+fn test_check_invariants_ord_chaos() {
+ let gov = Governor::new();
+ let map = BTreeMap::from([(Governed(1, &gov), ()), (Governed(2, &gov), ())]);
+ gov.flip();
+ map.check_invariants();
+}
+
+#[test]
+fn test_basic_large() {
+ let mut map = BTreeMap::new();
+ // Miri is too slow
+ let size = if cfg!(miri) { MIN_INSERTS_HEIGHT_2 } else { 10000 };
+ let size = size + (size % 2); // round up to even number
+ assert_eq!(map.len(), 0);
+
+ for i in 0..size {
+ assert_eq!(map.insert(i, 10 * i), None);
+ assert_eq!(map.len(), i + 1);
+ }
+
+ assert_eq!(map.first_key_value(), Some((&0, &0)));
+ assert_eq!(map.last_key_value(), Some((&(size - 1), &(10 * (size - 1)))));
+ assert_eq!(map.first_entry().unwrap().key(), &0);
+ assert_eq!(map.last_entry().unwrap().key(), &(size - 1));
+
+ for i in 0..size {
+ assert_eq!(map.get(&i).unwrap(), &(i * 10));
+ }
+
+ for i in size..size * 2 {
+ assert_eq!(map.get(&i), None);
+ }
+
+ for i in 0..size {
+ assert_eq!(map.insert(i, 100 * i), Some(10 * i));
+ assert_eq!(map.len(), size);
+ }
+
+ for i in 0..size {
+ assert_eq!(map.get(&i).unwrap(), &(i * 100));
+ }
+
+ for i in 0..size / 2 {
+ assert_eq!(map.remove(&(i * 2)), Some(i * 200));
+ assert_eq!(map.len(), size - i - 1);
+ }
+
+ for i in 0..size / 2 {
+ assert_eq!(map.get(&(2 * i)), None);
+ assert_eq!(map.get(&(2 * i + 1)).unwrap(), &(i * 200 + 100));
+ }
+
+ for i in 0..size / 2 {
+ assert_eq!(map.remove(&(2 * i)), None);
+ assert_eq!(map.remove(&(2 * i + 1)), Some(i * 200 + 100));
+ assert_eq!(map.len(), size / 2 - i - 1);
+ }
+ map.check();
+}
+
+#[test]
+fn test_basic_small() {
+ let mut map = BTreeMap::new();
+ // Empty, root is absent (None):
+ assert_eq!(map.remove(&1), None);
+ assert_eq!(map.len(), 0);
+ assert_eq!(map.get(&1), None);
+ assert_eq!(map.get_mut(&1), None);
+ assert_eq!(map.first_key_value(), None);
+ assert_eq!(map.last_key_value(), None);
+ assert_eq!(map.keys().count(), 0);
+ assert_eq!(map.values().count(), 0);
+ assert_eq!(map.range(..).next(), None);
+ assert_eq!(map.range(..1).next(), None);
+ assert_eq!(map.range(1..).next(), None);
+ assert_eq!(map.range(1..=1).next(), None);
+ assert_eq!(map.range(1..2).next(), None);
+ assert_eq!(map.height(), None);
+ assert_eq!(map.insert(1, 1), None);
+ assert_eq!(map.height(), Some(0));
+ map.check();
+
+ // 1 key-value pair:
+ assert_eq!(map.len(), 1);
+ assert_eq!(map.get(&1), Some(&1));
+ assert_eq!(map.get_mut(&1), Some(&mut 1));
+ assert_eq!(map.first_key_value(), Some((&1, &1)));
+ assert_eq!(map.last_key_value(), Some((&1, &1)));
+ assert_eq!(map.keys().collect::<Vec<_>>(), vec![&1]);
+ assert_eq!(map.values().collect::<Vec<_>>(), vec![&1]);
+ assert_eq!(map.insert(1, 2), Some(1));
+ assert_eq!(map.len(), 1);
+ assert_eq!(map.get(&1), Some(&2));
+ assert_eq!(map.get_mut(&1), Some(&mut 2));
+ assert_eq!(map.first_key_value(), Some((&1, &2)));
+ assert_eq!(map.last_key_value(), Some((&1, &2)));
+ assert_eq!(map.keys().collect::<Vec<_>>(), vec![&1]);
+ assert_eq!(map.values().collect::<Vec<_>>(), vec![&2]);
+ assert_eq!(map.insert(2, 4), None);
+ assert_eq!(map.height(), Some(0));
+ map.check();
+
+ // 2 key-value pairs:
+ assert_eq!(map.len(), 2);
+ assert_eq!(map.get(&2), Some(&4));
+ assert_eq!(map.get_mut(&2), Some(&mut 4));
+ assert_eq!(map.first_key_value(), Some((&1, &2)));
+ assert_eq!(map.last_key_value(), Some((&2, &4)));
+ assert_eq!(map.keys().collect::<Vec<_>>(), vec![&1, &2]);
+ assert_eq!(map.values().collect::<Vec<_>>(), vec![&2, &4]);
+ assert_eq!(map.remove(&1), Some(2));
+ assert_eq!(map.height(), Some(0));
+ map.check();
+
+ // 1 key-value pair:
+ assert_eq!(map.len(), 1);
+ assert_eq!(map.get(&1), None);
+ assert_eq!(map.get_mut(&1), None);
+ assert_eq!(map.get(&2), Some(&4));
+ assert_eq!(map.get_mut(&2), Some(&mut 4));
+ assert_eq!(map.first_key_value(), Some((&2, &4)));
+ assert_eq!(map.last_key_value(), Some((&2, &4)));
+ assert_eq!(map.keys().collect::<Vec<_>>(), vec![&2]);
+ assert_eq!(map.values().collect::<Vec<_>>(), vec![&4]);
+ assert_eq!(map.remove(&2), Some(4));
+ assert_eq!(map.height(), Some(0));
+ map.check();
+
+ // Empty but root is owned (Some(...)):
+ assert_eq!(map.len(), 0);
+ assert_eq!(map.get(&1), None);
+ assert_eq!(map.get_mut(&1), None);
+ assert_eq!(map.first_key_value(), None);
+ assert_eq!(map.last_key_value(), None);
+ assert_eq!(map.keys().count(), 0);
+ assert_eq!(map.values().count(), 0);
+ assert_eq!(map.range(..).next(), None);
+ assert_eq!(map.range(..1).next(), None);
+ assert_eq!(map.range(1..).next(), None);
+ assert_eq!(map.range(1..=1).next(), None);
+ assert_eq!(map.range(1..2).next(), None);
+ assert_eq!(map.remove(&1), None);
+ assert_eq!(map.height(), Some(0));
+ map.check();
+}
+
+#[test]
+fn test_iter() {
+ // Miri is too slow
+ let size = if cfg!(miri) { 200 } else { 10000 };
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (i, i)));
+
+ fn test<T>(size: usize, mut iter: T)
+ where
+ T: Iterator<Item = (usize, usize)>,
+ {
+ for i in 0..size {
+ assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
+ assert_eq!(iter.next().unwrap(), (i, i));
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ }
+ test(size, map.iter().map(|(&k, &v)| (k, v)));
+ test(size, map.iter_mut().map(|(&k, &mut v)| (k, v)));
+ test(size, map.into_iter());
+}
+
+#[test]
+fn test_iter_rev() {
+ // Miri is too slow
+ let size = if cfg!(miri) { 200 } else { 10000 };
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (i, i)));
+
+ fn test<T>(size: usize, mut iter: T)
+ where
+ T: Iterator<Item = (usize, usize)>,
+ {
+ for i in 0..size {
+ assert_eq!(iter.size_hint(), (size - i, Some(size - i)));
+ assert_eq!(iter.next().unwrap(), (size - i - 1, size - i - 1));
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ }
+ test(size, map.iter().rev().map(|(&k, &v)| (k, v)));
+ test(size, map.iter_mut().rev().map(|(&k, &mut v)| (k, v)));
+ test(size, map.into_iter().rev());
+}
+
+// Specifically tests iter_mut's ability to mutate the value of pairs in-line.
+fn do_test_iter_mut_mutation<T>(size: usize)
+where
+ T: Copy + Debug + Ord + TryFrom<usize>,
+ <T as TryFrom<usize>>::Error: Debug,
+{
+ let zero = T::try_from(0).unwrap();
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (T::try_from(i).unwrap(), zero)));
+
+ // Forward and backward iteration sees enough pairs (also tested elsewhere)
+ assert_eq!(map.iter_mut().count(), size);
+ assert_eq!(map.iter_mut().rev().count(), size);
+
+ // Iterate forwards, trying to mutate to unique values
+ for (i, (k, v)) in map.iter_mut().enumerate() {
+ assert_eq!(*k, T::try_from(i).unwrap());
+ assert_eq!(*v, zero);
+ *v = T::try_from(i + 1).unwrap();
+ }
+
+ // Iterate backwards, checking that mutations succeeded and trying to mutate again
+ for (i, (k, v)) in map.iter_mut().rev().enumerate() {
+ assert_eq!(*k, T::try_from(size - i - 1).unwrap());
+ assert_eq!(*v, T::try_from(size - i).unwrap());
+ *v = T::try_from(2 * size - i).unwrap();
+ }
+
+ // Check that backward mutations succeeded
+ for (i, (k, v)) in map.iter_mut().enumerate() {
+ assert_eq!(*k, T::try_from(i).unwrap());
+ assert_eq!(*v, T::try_from(size + i + 1).unwrap());
+ }
+ map.check();
+}
+
+#[derive(Clone, Copy, Debug, Eq, PartialEq, PartialOrd, Ord)]
+#[repr(align(32))]
+struct Align32(usize);
+
+impl TryFrom<usize> for Align32 {
+ type Error = ();
+
+ fn try_from(s: usize) -> Result<Align32, ()> {
+ Ok(Align32(s))
+ }
+}
+
+#[test]
+fn test_iter_mut_mutation() {
+ // Check many alignments and trees with roots at various heights.
+ do_test_iter_mut_mutation::<u8>(0);
+ do_test_iter_mut_mutation::<u8>(1);
+ do_test_iter_mut_mutation::<u8>(MIN_INSERTS_HEIGHT_1);
+ do_test_iter_mut_mutation::<u8>(MIN_INSERTS_HEIGHT_2);
+ do_test_iter_mut_mutation::<u16>(1);
+ do_test_iter_mut_mutation::<u16>(MIN_INSERTS_HEIGHT_1);
+ do_test_iter_mut_mutation::<u16>(MIN_INSERTS_HEIGHT_2);
+ do_test_iter_mut_mutation::<u32>(1);
+ do_test_iter_mut_mutation::<u32>(MIN_INSERTS_HEIGHT_1);
+ do_test_iter_mut_mutation::<u32>(MIN_INSERTS_HEIGHT_2);
+ do_test_iter_mut_mutation::<u64>(1);
+ do_test_iter_mut_mutation::<u64>(MIN_INSERTS_HEIGHT_1);
+ do_test_iter_mut_mutation::<u64>(MIN_INSERTS_HEIGHT_2);
+ do_test_iter_mut_mutation::<u128>(1);
+ do_test_iter_mut_mutation::<u128>(MIN_INSERTS_HEIGHT_1);
+ do_test_iter_mut_mutation::<u128>(MIN_INSERTS_HEIGHT_2);
+ do_test_iter_mut_mutation::<Align32>(1);
+ do_test_iter_mut_mutation::<Align32>(MIN_INSERTS_HEIGHT_1);
+ do_test_iter_mut_mutation::<Align32>(MIN_INSERTS_HEIGHT_2);
+}
+
+#[test]
+fn test_values_mut() {
+ let mut a = BTreeMap::from_iter((0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i)));
+ test_all_refs(&mut 13, a.values_mut());
+ a.check();
+}
+
+#[test]
+fn test_values_mut_mutation() {
+ let mut a = BTreeMap::new();
+ a.insert(1, String::from("hello"));
+ a.insert(2, String::from("goodbye"));
+
+ for value in a.values_mut() {
+ value.push_str("!");
+ }
+
+ let values = Vec::from_iter(a.values().cloned());
+ assert_eq!(values, [String::from("hello!"), String::from("goodbye!")]);
+ a.check();
+}
+
+#[test]
+fn test_iter_entering_root_twice() {
+ let mut map = BTreeMap::from([(0, 0), (1, 1)]);
+ let mut it = map.iter_mut();
+ let front = it.next().unwrap();
+ let back = it.next_back().unwrap();
+ assert_eq!(front, (&0, &mut 0));
+ assert_eq!(back, (&1, &mut 1));
+ *front.1 = 24;
+ *back.1 = 42;
+ assert_eq!(front, (&0, &mut 24));
+ assert_eq!(back, (&1, &mut 42));
+ assert_eq!(it.next(), None);
+ assert_eq!(it.next_back(), None);
+ map.check();
+}
+
+#[test]
+fn test_iter_descending_to_same_node_twice() {
+ let mut map = BTreeMap::from_iter((0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i)));
+ let mut it = map.iter_mut();
+ // Descend into first child.
+ let front = it.next().unwrap();
+ // Descend into first child again, after running through second child.
+ while it.next_back().is_some() {}
+ // Check immutable access.
+ assert_eq!(front, (&0, &mut 0));
+ // Perform mutable access.
+ *front.1 = 42;
+ map.check();
+}
+
+#[test]
+fn test_iter_mixed() {
+ // Miri is too slow
+ let size = if cfg!(miri) { 200 } else { 10000 };
+
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (i, i)));
+
+ fn test<T>(size: usize, mut iter: T)
+ where
+ T: Iterator<Item = (usize, usize)> + DoubleEndedIterator,
+ {
+ for i in 0..size / 4 {
+ assert_eq!(iter.size_hint(), (size - i * 2, Some(size - i * 2)));
+ assert_eq!(iter.next().unwrap(), (i, i));
+ assert_eq!(iter.next_back().unwrap(), (size - i - 1, size - i - 1));
+ }
+ for i in size / 4..size * 3 / 4 {
+ assert_eq!(iter.size_hint(), (size * 3 / 4 - i, Some(size * 3 / 4 - i)));
+ assert_eq!(iter.next().unwrap(), (i, i));
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ }
+ test(size, map.iter().map(|(&k, &v)| (k, v)));
+ test(size, map.iter_mut().map(|(&k, &mut v)| (k, v)));
+ test(size, map.into_iter());
+}
+
+#[test]
+fn test_iter_min_max() {
+ let mut a = BTreeMap::new();
+ assert_eq!(a.iter().min(), None);
+ assert_eq!(a.iter().max(), None);
+ assert_eq!(a.iter_mut().min(), None);
+ assert_eq!(a.iter_mut().max(), None);
+ assert_eq!(a.range(..).min(), None);
+ assert_eq!(a.range(..).max(), None);
+ assert_eq!(a.range_mut(..).min(), None);
+ assert_eq!(a.range_mut(..).max(), None);
+ assert_eq!(a.keys().min(), None);
+ assert_eq!(a.keys().max(), None);
+ assert_eq!(a.values().min(), None);
+ assert_eq!(a.values().max(), None);
+ assert_eq!(a.values_mut().min(), None);
+ assert_eq!(a.values_mut().max(), None);
+ a.insert(1, 42);
+ a.insert(2, 24);
+ assert_eq!(a.iter().min(), Some((&1, &42)));
+ assert_eq!(a.iter().max(), Some((&2, &24)));
+ assert_eq!(a.iter_mut().min(), Some((&1, &mut 42)));
+ assert_eq!(a.iter_mut().max(), Some((&2, &mut 24)));
+ assert_eq!(a.range(..).min(), Some((&1, &42)));
+ assert_eq!(a.range(..).max(), Some((&2, &24)));
+ assert_eq!(a.range_mut(..).min(), Some((&1, &mut 42)));
+ assert_eq!(a.range_mut(..).max(), Some((&2, &mut 24)));
+ assert_eq!(a.keys().min(), Some(&1));
+ assert_eq!(a.keys().max(), Some(&2));
+ assert_eq!(a.values().min(), Some(&24));
+ assert_eq!(a.values().max(), Some(&42));
+ assert_eq!(a.values_mut().min(), Some(&mut 24));
+ assert_eq!(a.values_mut().max(), Some(&mut 42));
+ a.check();
+}
+
+fn range_keys(map: &BTreeMap<i32, i32>, range: impl RangeBounds<i32>) -> Vec<i32> {
+ Vec::from_iter(map.range(range).map(|(&k, &v)| {
+ assert_eq!(k, v);
+ k
+ }))
+}
+
+#[test]
+fn test_range_small() {
+ let size = 4;
+
+ let all = Vec::from_iter(1..=size);
+ let (first, last) = (vec![all[0]], vec![all[size as usize - 1]]);
+ let map = BTreeMap::from_iter(all.iter().copied().map(|i| (i, i)));
+
+ assert_eq!(range_keys(&map, (Excluded(0), Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(size))), all);
+ assert_eq!(range_keys(&map, (Excluded(0), Unbounded)), all);
+ assert_eq!(range_keys(&map, (Included(0), Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(0), Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(0), Included(size))), all);
+ assert_eq!(range_keys(&map, (Included(0), Unbounded)), all);
+ assert_eq!(range_keys(&map, (Included(1), Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(1), Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(1), Included(size))), all);
+ assert_eq!(range_keys(&map, (Included(1), Unbounded)), all);
+ assert_eq!(range_keys(&map, (Unbounded, Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Unbounded, Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Unbounded, Included(size))), all);
+ assert_eq!(range_keys(&map, ..), all);
+
+ assert_eq!(range_keys(&map, (Excluded(0), Excluded(1))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(0))), vec![]);
+ assert_eq!(range_keys(&map, (Included(0), Included(0))), vec![]);
+ assert_eq!(range_keys(&map, (Included(0), Excluded(1))), vec![]);
+ assert_eq!(range_keys(&map, (Unbounded, Excluded(1))), vec![]);
+ assert_eq!(range_keys(&map, (Unbounded, Included(0))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(0), Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(1))), first);
+ assert_eq!(range_keys(&map, (Included(0), Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Included(0), Included(1))), first);
+ assert_eq!(range_keys(&map, (Included(1), Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Included(1), Included(1))), first);
+ assert_eq!(range_keys(&map, (Unbounded, Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Unbounded, Included(1))), first);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Excluded(size + 1))), last);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Included(size + 1))), last);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Included(size))), last);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Unbounded)), last);
+ assert_eq!(range_keys(&map, (Included(size), Excluded(size + 1))), last);
+ assert_eq!(range_keys(&map, (Included(size), Included(size + 1))), last);
+ assert_eq!(range_keys(&map, (Included(size), Included(size))), last);
+ assert_eq!(range_keys(&map, (Included(size), Unbounded)), last);
+ assert_eq!(range_keys(&map, (Excluded(size), Excluded(size + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(size), Included(size))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(size), Unbounded)), vec![]);
+ assert_eq!(range_keys(&map, (Included(size + 1), Excluded(size + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Included(size + 1), Included(size + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Included(size + 1), Unbounded)), vec![]);
+
+ assert_eq!(range_keys(&map, ..3), vec![1, 2]);
+ assert_eq!(range_keys(&map, 3..), vec![3, 4]);
+ assert_eq!(range_keys(&map, 2..=3), vec![2, 3]);
+}
+
+#[test]
+fn test_range_height_1() {
+ // Tests tree with a root and 2 leaves. We test around the middle of the
+ // keys because one of those is the single key in the root node.
+ let map = BTreeMap::from_iter((0..MIN_INSERTS_HEIGHT_1 as i32).map(|i| (i, i)));
+ let middle = MIN_INSERTS_HEIGHT_1 as i32 / 2;
+ for root in middle - 2..=middle + 2 {
+ assert_eq!(range_keys(&map, (Excluded(root), Excluded(root + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(root), Included(root + 1))), vec![root + 1]);
+ assert_eq!(range_keys(&map, (Included(root), Excluded(root + 1))), vec![root]);
+ assert_eq!(range_keys(&map, (Included(root), Included(root + 1))), vec![root, root + 1]);
+
+ assert_eq!(range_keys(&map, (Excluded(root - 1), Excluded(root))), vec![]);
+ assert_eq!(range_keys(&map, (Included(root - 1), Excluded(root))), vec![root - 1]);
+ assert_eq!(range_keys(&map, (Excluded(root - 1), Included(root))), vec![root]);
+ assert_eq!(range_keys(&map, (Included(root - 1), Included(root))), vec![root - 1, root]);
+ }
+}
+
+#[test]
+fn test_range_large() {
+ let size = 200;
+
+ let all = Vec::from_iter(1..=size);
+ let (first, last) = (vec![all[0]], vec![all[size as usize - 1]]);
+ let map = BTreeMap::from_iter(all.iter().copied().map(|i| (i, i)));
+
+ assert_eq!(range_keys(&map, (Excluded(0), Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(size))), all);
+ assert_eq!(range_keys(&map, (Excluded(0), Unbounded)), all);
+ assert_eq!(range_keys(&map, (Included(0), Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(0), Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(0), Included(size))), all);
+ assert_eq!(range_keys(&map, (Included(0), Unbounded)), all);
+ assert_eq!(range_keys(&map, (Included(1), Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(1), Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Included(1), Included(size))), all);
+ assert_eq!(range_keys(&map, (Included(1), Unbounded)), all);
+ assert_eq!(range_keys(&map, (Unbounded, Excluded(size + 1))), all);
+ assert_eq!(range_keys(&map, (Unbounded, Included(size + 1))), all);
+ assert_eq!(range_keys(&map, (Unbounded, Included(size))), all);
+ assert_eq!(range_keys(&map, ..), all);
+
+ assert_eq!(range_keys(&map, (Excluded(0), Excluded(1))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(0))), vec![]);
+ assert_eq!(range_keys(&map, (Included(0), Included(0))), vec![]);
+ assert_eq!(range_keys(&map, (Included(0), Excluded(1))), vec![]);
+ assert_eq!(range_keys(&map, (Unbounded, Excluded(1))), vec![]);
+ assert_eq!(range_keys(&map, (Unbounded, Included(0))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(0), Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Excluded(0), Included(1))), first);
+ assert_eq!(range_keys(&map, (Included(0), Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Included(0), Included(1))), first);
+ assert_eq!(range_keys(&map, (Included(1), Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Included(1), Included(1))), first);
+ assert_eq!(range_keys(&map, (Unbounded, Excluded(2))), first);
+ assert_eq!(range_keys(&map, (Unbounded, Included(1))), first);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Excluded(size + 1))), last);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Included(size + 1))), last);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Included(size))), last);
+ assert_eq!(range_keys(&map, (Excluded(size - 1), Unbounded)), last);
+ assert_eq!(range_keys(&map, (Included(size), Excluded(size + 1))), last);
+ assert_eq!(range_keys(&map, (Included(size), Included(size + 1))), last);
+ assert_eq!(range_keys(&map, (Included(size), Included(size))), last);
+ assert_eq!(range_keys(&map, (Included(size), Unbounded)), last);
+ assert_eq!(range_keys(&map, (Excluded(size), Excluded(size + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(size), Included(size))), vec![]);
+ assert_eq!(range_keys(&map, (Excluded(size), Unbounded)), vec![]);
+ assert_eq!(range_keys(&map, (Included(size + 1), Excluded(size + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Included(size + 1), Included(size + 1))), vec![]);
+ assert_eq!(range_keys(&map, (Included(size + 1), Unbounded)), vec![]);
+
+ fn check<'a, L, R>(lhs: L, rhs: R)
+ where
+ L: IntoIterator<Item = (&'a i32, &'a i32)>,
+ R: IntoIterator<Item = (&'a i32, &'a i32)>,
+ {
+ assert_eq!(Vec::from_iter(lhs), Vec::from_iter(rhs));
+ }
+
+ check(map.range(..=100), map.range(..101));
+ check(map.range(5..=8), vec![(&5, &5), (&6, &6), (&7, &7), (&8, &8)]);
+ check(map.range(-1..=2), vec![(&1, &1), (&2, &2)]);
+}
+
+#[test]
+fn test_range_inclusive_max_value() {
+ let max = usize::MAX;
+ let map = BTreeMap::from([(max, 0)]);
+ assert_eq!(Vec::from_iter(map.range(max..=max)), &[(&max, &0)]);
+}
+
+#[test]
+fn test_range_equal_empty_cases() {
+ let map = BTreeMap::from_iter((0..5).map(|i| (i, i)));
+ assert_eq!(map.range((Included(2), Excluded(2))).next(), None);
+ assert_eq!(map.range((Excluded(2), Included(2))).next(), None);
+}
+
+#[test]
+#[should_panic]
+fn test_range_equal_excluded() {
+ let map = BTreeMap::from_iter((0..5).map(|i| (i, i)));
+ let _ = map.range((Excluded(2), Excluded(2)));
+}
+
+#[test]
+#[should_panic]
+fn test_range_backwards_1() {
+ let map = BTreeMap::from_iter((0..5).map(|i| (i, i)));
+ let _ = map.range((Included(3), Included(2)));
+}
+
+#[test]
+#[should_panic]
+fn test_range_backwards_2() {
+ let map = BTreeMap::from_iter((0..5).map(|i| (i, i)));
+ let _ = map.range((Included(3), Excluded(2)));
+}
+
+#[test]
+#[should_panic]
+fn test_range_backwards_3() {
+ let map = BTreeMap::from_iter((0..5).map(|i| (i, i)));
+ let _ = map.range((Excluded(3), Included(2)));
+}
+
+#[test]
+#[should_panic]
+fn test_range_backwards_4() {
+ let map = BTreeMap::from_iter((0..5).map(|i| (i, i)));
+ let _ = map.range((Excluded(3), Excluded(2)));
+}
+
+#[test]
+fn test_range_finding_ill_order_in_map() {
+ let mut map = BTreeMap::new();
+ map.insert(Cyclic3::B, ());
+ // Lacking static_assert, call `range` conditionally, to emphasise that
+ // we cause a different panic than `test_range_backwards_1` does.
+ // A more refined `should_panic` would be welcome.
+ if Cyclic3::C < Cyclic3::A {
+ let _ = map.range(Cyclic3::C..=Cyclic3::A);
+ }
+}
+
+#[test]
+fn test_range_finding_ill_order_in_range_ord() {
+ // Has proper order the first time asked, then flips around.
+ struct EvilTwin(i32);
+
+ impl PartialOrd for EvilTwin {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+ }
+
+ static COMPARES: AtomicUsize = AtomicUsize::new(0);
+ impl Ord for EvilTwin {
+ fn cmp(&self, other: &Self) -> Ordering {
+ let ord = self.0.cmp(&other.0);
+ if COMPARES.fetch_add(1, SeqCst) > 0 { ord.reverse() } else { ord }
+ }
+ }
+
+ impl PartialEq for EvilTwin {
+ fn eq(&self, other: &Self) -> bool {
+ self.0.eq(&other.0)
+ }
+ }
+
+ impl Eq for EvilTwin {}
+
+ #[derive(PartialEq, Eq, PartialOrd, Ord)]
+ struct CompositeKey(i32, EvilTwin);
+
+ impl Borrow<EvilTwin> for CompositeKey {
+ fn borrow(&self) -> &EvilTwin {
+ &self.1
+ }
+ }
+
+ let map = BTreeMap::from_iter((0..12).map(|i| (CompositeKey(i, EvilTwin(i)), ())));
+ let _ = map.range(EvilTwin(5)..=EvilTwin(7));
+}
+
+#[test]
+fn test_range_1000() {
+ // Miri is too slow
+ let size = if cfg!(miri) { MIN_INSERTS_HEIGHT_2 as u32 } else { 1000 };
+ let map = BTreeMap::from_iter((0..size).map(|i| (i, i)));
+
+ fn test(map: &BTreeMap<u32, u32>, size: u32, min: Bound<&u32>, max: Bound<&u32>) {
+ let mut kvs = map.range((min, max)).map(|(&k, &v)| (k, v));
+ let mut pairs = (0..size).map(|i| (i, i));
+
+ for (kv, pair) in kvs.by_ref().zip(pairs.by_ref()) {
+ assert_eq!(kv, pair);
+ }
+ assert_eq!(kvs.next(), None);
+ assert_eq!(pairs.next(), None);
+ }
+ test(&map, size, Included(&0), Excluded(&size));
+ test(&map, size, Unbounded, Excluded(&size));
+ test(&map, size, Included(&0), Included(&(size - 1)));
+ test(&map, size, Unbounded, Included(&(size - 1)));
+ test(&map, size, Included(&0), Unbounded);
+ test(&map, size, Unbounded, Unbounded);
+}
+
+#[test]
+fn test_range_borrowed_key() {
+ let mut map = BTreeMap::new();
+ map.insert("aardvark".to_string(), 1);
+ map.insert("baboon".to_string(), 2);
+ map.insert("coyote".to_string(), 3);
+ map.insert("dingo".to_string(), 4);
+ // NOTE: would like to use simply "b".."d" here...
+ let mut iter = map.range::<str, _>((Included("b"), Excluded("d")));
+ assert_eq!(iter.next(), Some((&"baboon".to_string(), &2)));
+ assert_eq!(iter.next(), Some((&"coyote".to_string(), &3)));
+ assert_eq!(iter.next(), None);
+}
+
+#[test]
+fn test_range() {
+ let size = 200;
+ // Miri is too slow
+ let step = if cfg!(miri) { 66 } else { 1 };
+ let map = BTreeMap::from_iter((0..size).map(|i| (i, i)));
+
+ for i in (0..size).step_by(step) {
+ for j in (i..size).step_by(step) {
+ let mut kvs = map.range((Included(&i), Included(&j))).map(|(&k, &v)| (k, v));
+ let mut pairs = (i..=j).map(|i| (i, i));
+
+ for (kv, pair) in kvs.by_ref().zip(pairs.by_ref()) {
+ assert_eq!(kv, pair);
+ }
+ assert_eq!(kvs.next(), None);
+ assert_eq!(pairs.next(), None);
+ }
+ }
+}
+
+#[test]
+fn test_range_mut() {
+ let size = 200;
+ // Miri is too slow
+ let step = if cfg!(miri) { 66 } else { 1 };
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (i, i)));
+
+ for i in (0..size).step_by(step) {
+ for j in (i..size).step_by(step) {
+ let mut kvs = map.range_mut((Included(&i), Included(&j))).map(|(&k, &mut v)| (k, v));
+ let mut pairs = (i..=j).map(|i| (i, i));
+
+ for (kv, pair) in kvs.by_ref().zip(pairs.by_ref()) {
+ assert_eq!(kv, pair);
+ }
+ assert_eq!(kvs.next(), None);
+ assert_eq!(pairs.next(), None);
+ }
+ }
+ map.check();
+}
+
+#[should_panic(expected = "range start is greater than range end in BTreeMap")]
+#[test]
+fn test_range_panic_1() {
+ let mut map = BTreeMap::new();
+ map.insert(3, "a");
+ map.insert(5, "b");
+ map.insert(8, "c");
+
+ let _invalid_range = map.range((Included(&8), Included(&3)));
+}
+
+#[should_panic(expected = "range start and end are equal and excluded in BTreeMap")]
+#[test]
+fn test_range_panic_2() {
+ let mut map = BTreeMap::new();
+ map.insert(3, "a");
+ map.insert(5, "b");
+ map.insert(8, "c");
+
+ let _invalid_range = map.range((Excluded(&5), Excluded(&5)));
+}
+
+#[should_panic(expected = "range start and end are equal and excluded in BTreeMap")]
+#[test]
+fn test_range_panic_3() {
+ let mut map: BTreeMap<i32, ()> = BTreeMap::new();
+ map.insert(3, ());
+ map.insert(5, ());
+ map.insert(8, ());
+
+ let _invalid_range = map.range((Excluded(&5), Excluded(&5)));
+}
+
+#[test]
+fn test_retain() {
+ let mut map = BTreeMap::from_iter((0..100).map(|x| (x, x * 10)));
+
+ map.retain(|&k, _| k % 2 == 0);
+ assert_eq!(map.len(), 50);
+ assert_eq!(map[&2], 20);
+ assert_eq!(map[&4], 40);
+ assert_eq!(map[&6], 60);
+}
+
+mod test_drain_filter {
+ use super::*;
+
+ #[test]
+ fn empty() {
+ let mut map: BTreeMap<i32, i32> = BTreeMap::new();
+ map.drain_filter(|_, _| unreachable!("there's nothing to decide on"));
+ assert_eq!(map.height(), None);
+ map.check();
+ }
+
+ // Explicitly consumes the iterator, where most test cases drop it instantly.
+ #[test]
+ fn consumed_keeping_all() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ assert!(map.drain_filter(|_, _| false).eq(iter::empty()));
+ map.check();
+ }
+
+ // Explicitly consumes the iterator, where most test cases drop it instantly.
+ #[test]
+ fn consumed_removing_all() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ assert!(map.drain_filter(|_, _| true).eq(pairs));
+ assert!(map.is_empty());
+ map.check();
+ }
+
+ // Explicitly consumes the iterator and modifies values through it.
+ #[test]
+ fn mutating_and_keeping() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ assert!(
+ map.drain_filter(|_, v| {
+ *v += 6;
+ false
+ })
+ .eq(iter::empty())
+ );
+ assert!(map.keys().copied().eq(0..3));
+ assert!(map.values().copied().eq(6..9));
+ map.check();
+ }
+
+ // Explicitly consumes the iterator and modifies values through it.
+ #[test]
+ fn mutating_and_removing() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ assert!(
+ map.drain_filter(|_, v| {
+ *v += 6;
+ true
+ })
+ .eq((0..3).map(|i| (i, i + 6)))
+ );
+ assert!(map.is_empty());
+ map.check();
+ }
+
+ #[test]
+ fn underfull_keeping_all() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ map.drain_filter(|_, _| false);
+ assert!(map.keys().copied().eq(0..3));
+ map.check();
+ }
+
+ #[test]
+ fn underfull_removing_one() {
+ let pairs = (0..3).map(|i| (i, i));
+ for doomed in 0..3 {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i == doomed);
+ assert_eq!(map.len(), 2);
+ map.check();
+ }
+ }
+
+ #[test]
+ fn underfull_keeping_one() {
+ let pairs = (0..3).map(|i| (i, i));
+ for sacred in 0..3 {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i != sacred);
+ assert!(map.keys().copied().eq(sacred..=sacred));
+ map.check();
+ }
+ }
+
+ #[test]
+ fn underfull_removing_all() {
+ let pairs = (0..3).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ map.drain_filter(|_, _| true);
+ assert!(map.is_empty());
+ map.check();
+ }
+
+ #[test]
+ fn height_0_keeping_all() {
+ let pairs = (0..node::CAPACITY).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ map.drain_filter(|_, _| false);
+ assert!(map.keys().copied().eq(0..node::CAPACITY));
+ map.check();
+ }
+
+ #[test]
+ fn height_0_removing_one() {
+ let pairs = (0..node::CAPACITY).map(|i| (i, i));
+ for doomed in 0..node::CAPACITY {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i == doomed);
+ assert_eq!(map.len(), node::CAPACITY - 1);
+ map.check();
+ }
+ }
+
+ #[test]
+ fn height_0_keeping_one() {
+ let pairs = (0..node::CAPACITY).map(|i| (i, i));
+ for sacred in 0..node::CAPACITY {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i != sacred);
+ assert!(map.keys().copied().eq(sacred..=sacred));
+ map.check();
+ }
+ }
+
+ #[test]
+ fn height_0_removing_all() {
+ let pairs = (0..node::CAPACITY).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ map.drain_filter(|_, _| true);
+ assert!(map.is_empty());
+ map.check();
+ }
+
+ #[test]
+ fn height_0_keeping_half() {
+ let mut map = BTreeMap::from_iter((0..16).map(|i| (i, i)));
+ assert_eq!(map.drain_filter(|i, _| *i % 2 == 0).count(), 8);
+ assert_eq!(map.len(), 8);
+ map.check();
+ }
+
+ #[test]
+ fn height_1_removing_all() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ map.drain_filter(|_, _| true);
+ assert!(map.is_empty());
+ map.check();
+ }
+
+ #[test]
+ fn height_1_removing_one() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+ for doomed in 0..MIN_INSERTS_HEIGHT_1 {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i == doomed);
+ assert_eq!(map.len(), MIN_INSERTS_HEIGHT_1 - 1);
+ map.check();
+ }
+ }
+
+ #[test]
+ fn height_1_keeping_one() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_1).map(|i| (i, i));
+ for sacred in 0..MIN_INSERTS_HEIGHT_1 {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i != sacred);
+ assert!(map.keys().copied().eq(sacred..=sacred));
+ map.check();
+ }
+ }
+
+ #[test]
+ fn height_2_removing_one() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+ for doomed in (0..MIN_INSERTS_HEIGHT_2).step_by(12) {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i == doomed);
+ assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2 - 1);
+ map.check();
+ }
+ }
+
+ #[test]
+ fn height_2_keeping_one() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+ for sacred in (0..MIN_INSERTS_HEIGHT_2).step_by(12) {
+ let mut map = BTreeMap::from_iter(pairs.clone());
+ map.drain_filter(|i, _| *i != sacred);
+ assert!(map.keys().copied().eq(sacred..=sacred));
+ map.check();
+ }
+ }
+
+ #[test]
+ fn height_2_removing_all() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+ let mut map = BTreeMap::from_iter(pairs);
+ map.drain_filter(|_, _| true);
+ assert!(map.is_empty());
+ map.check();
+ }
+
+ #[test]
+ fn drop_panic_leak() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut map = BTreeMap::new();
+ map.insert(a.spawn(Panic::Never), ());
+ map.insert(b.spawn(Panic::InDrop), ());
+ map.insert(c.spawn(Panic::Never), ());
+
+ catch_unwind(move || drop(map.drain_filter(|dummy, _| dummy.query(true)))).unwrap_err();
+
+ assert_eq!(a.queried(), 1);
+ assert_eq!(b.queried(), 1);
+ assert_eq!(c.queried(), 0);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1);
+ assert_eq!(c.dropped(), 1);
+ }
+
+ #[test]
+ fn pred_panic_leak() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut map = BTreeMap::new();
+ map.insert(a.spawn(Panic::Never), ());
+ map.insert(b.spawn(Panic::InQuery), ());
+ map.insert(c.spawn(Panic::InQuery), ());
+
+ catch_unwind(AssertUnwindSafe(|| drop(map.drain_filter(|dummy, _| dummy.query(true)))))
+ .unwrap_err();
+
+ assert_eq!(a.queried(), 1);
+ assert_eq!(b.queried(), 1);
+ assert_eq!(c.queried(), 0);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 0);
+ assert_eq!(c.dropped(), 0);
+ assert_eq!(map.len(), 2);
+ assert_eq!(map.first_entry().unwrap().key().id(), 1);
+ assert_eq!(map.last_entry().unwrap().key().id(), 2);
+ map.check();
+ }
+
+ // Same as above, but attempt to use the iterator again after the panic in the predicate
+ #[test]
+ fn pred_panic_reuse() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut map = BTreeMap::new();
+ map.insert(a.spawn(Panic::Never), ());
+ map.insert(b.spawn(Panic::InQuery), ());
+ map.insert(c.spawn(Panic::InQuery), ());
+
+ {
+ let mut it = map.drain_filter(|dummy, _| dummy.query(true));
+ catch_unwind(AssertUnwindSafe(|| while it.next().is_some() {})).unwrap_err();
+ // Iterator behaviour after a panic is explicitly unspecified,
+ // so this is just the current implementation:
+ let result = catch_unwind(AssertUnwindSafe(|| it.next()));
+ assert!(matches!(result, Ok(None)));
+ }
+
+ assert_eq!(a.queried(), 1);
+ assert_eq!(b.queried(), 1);
+ assert_eq!(c.queried(), 0);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 0);
+ assert_eq!(c.dropped(), 0);
+ assert_eq!(map.len(), 2);
+ assert_eq!(map.first_entry().unwrap().key().id(), 1);
+ assert_eq!(map.last_entry().unwrap().key().id(), 2);
+ map.check();
+ }
+}
+
+#[test]
+fn test_borrow() {
+ // make sure these compile -- using the Borrow trait
+ {
+ let mut map = BTreeMap::new();
+ map.insert("0".to_string(), 1);
+ assert_eq!(map["0"], 1);
+ }
+
+ {
+ let mut map = BTreeMap::new();
+ map.insert(Box::new(0), 1);
+ assert_eq!(map[&0], 1);
+ }
+
+ {
+ let mut map = BTreeMap::new();
+ map.insert(Box::new([0, 1]) as Box<[i32]>, 1);
+ assert_eq!(map[&[0, 1][..]], 1);
+ }
+
+ {
+ let mut map = BTreeMap::new();
+ map.insert(Rc::new(0), 1);
+ assert_eq!(map[&0], 1);
+ }
+
+ #[allow(dead_code)]
+ fn get<T: Ord>(v: &BTreeMap<Box<T>, ()>, t: &T) {
+ let _ = v.get(t);
+ }
+
+ #[allow(dead_code)]
+ fn get_mut<T: Ord>(v: &mut BTreeMap<Box<T>, ()>, t: &T) {
+ let _ = v.get_mut(t);
+ }
+
+ #[allow(dead_code)]
+ fn get_key_value<T: Ord>(v: &BTreeMap<Box<T>, ()>, t: &T) {
+ let _ = v.get_key_value(t);
+ }
+
+ #[allow(dead_code)]
+ fn contains_key<T: Ord>(v: &BTreeMap<Box<T>, ()>, t: &T) {
+ let _ = v.contains_key(t);
+ }
+
+ #[allow(dead_code)]
+ fn range<T: Ord>(v: &BTreeMap<Box<T>, ()>, t: T) {
+ let _ = v.range(t..);
+ }
+
+ #[allow(dead_code)]
+ fn range_mut<T: Ord>(v: &mut BTreeMap<Box<T>, ()>, t: T) {
+ let _ = v.range_mut(t..);
+ }
+
+ #[allow(dead_code)]
+ fn remove<T: Ord>(v: &mut BTreeMap<Box<T>, ()>, t: &T) {
+ v.remove(t);
+ }
+
+ #[allow(dead_code)]
+ fn remove_entry<T: Ord>(v: &mut BTreeMap<Box<T>, ()>, t: &T) {
+ v.remove_entry(t);
+ }
+
+ #[allow(dead_code)]
+ fn split_off<T: Ord>(v: &mut BTreeMap<Box<T>, ()>, t: &T) {
+ v.split_off(t);
+ }
+}
+
+#[test]
+fn test_entry() {
+ let xs = [(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)];
+
+ let mut map = BTreeMap::from(xs);
+
+ // Existing key (insert)
+ match map.entry(1) {
+ Vacant(_) => unreachable!(),
+ Occupied(mut view) => {
+ assert_eq!(view.get(), &10);
+ assert_eq!(view.insert(100), 10);
+ }
+ }
+ assert_eq!(map.get(&1).unwrap(), &100);
+ assert_eq!(map.len(), 6);
+
+ // Existing key (update)
+ match map.entry(2) {
+ Vacant(_) => unreachable!(),
+ Occupied(mut view) => {
+ let v = view.get_mut();
+ *v *= 10;
+ }
+ }
+ assert_eq!(map.get(&2).unwrap(), &200);
+ assert_eq!(map.len(), 6);
+ map.check();
+
+ // Existing key (take)
+ match map.entry(3) {
+ Vacant(_) => unreachable!(),
+ Occupied(view) => {
+ assert_eq!(view.remove(), 30);
+ }
+ }
+ assert_eq!(map.get(&3), None);
+ assert_eq!(map.len(), 5);
+ map.check();
+
+ // Inexistent key (insert)
+ match map.entry(10) {
+ Occupied(_) => unreachable!(),
+ Vacant(view) => {
+ assert_eq!(*view.insert(1000), 1000);
+ }
+ }
+ assert_eq!(map.get(&10).unwrap(), &1000);
+ assert_eq!(map.len(), 6);
+ map.check();
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut a = BTreeMap::new();
+ a.insert(1, "one");
+ let mut b = BTreeMap::new();
+ b.insert(2, "two");
+ b.insert(3, "three");
+
+ a.extend(&b);
+
+ assert_eq!(a.len(), 3);
+ assert_eq!(a[&1], "one");
+ assert_eq!(a[&2], "two");
+ assert_eq!(a[&3], "three");
+ a.check();
+}
+
+#[test]
+fn test_zst() {
+ let mut m = BTreeMap::new();
+ assert_eq!(m.len(), 0);
+
+ assert_eq!(m.insert((), ()), None);
+ assert_eq!(m.len(), 1);
+
+ assert_eq!(m.insert((), ()), Some(()));
+ assert_eq!(m.len(), 1);
+ assert_eq!(m.iter().count(), 1);
+
+ m.clear();
+ assert_eq!(m.len(), 0);
+
+ for _ in 0..100 {
+ m.insert((), ());
+ }
+
+ assert_eq!(m.len(), 1);
+ assert_eq!(m.iter().count(), 1);
+ m.check();
+}
+
+// This test's only purpose is to ensure that zero-sized keys with nonsensical orderings
+// do not cause segfaults when used with zero-sized values. All other map behavior is
+// undefined.
+#[test]
+fn test_bad_zst() {
+ #[derive(Clone, Copy, Debug)]
+ struct Bad;
+
+ impl PartialEq for Bad {
+ fn eq(&self, _: &Self) -> bool {
+ false
+ }
+ }
+
+ impl Eq for Bad {}
+
+ impl PartialOrd for Bad {
+ fn partial_cmp(&self, _: &Self) -> Option<Ordering> {
+ Some(Ordering::Less)
+ }
+ }
+
+ impl Ord for Bad {
+ fn cmp(&self, _: &Self) -> Ordering {
+ Ordering::Less
+ }
+ }
+
+ let mut m = BTreeMap::new();
+
+ for _ in 0..100 {
+ m.insert(Bad, Bad);
+ }
+ m.check();
+}
+
+#[test]
+fn test_clear() {
+ let mut map = BTreeMap::new();
+ for &len in &[MIN_INSERTS_HEIGHT_1, MIN_INSERTS_HEIGHT_2, 0, node::CAPACITY] {
+ for i in 0..len {
+ map.insert(i, ());
+ }
+ assert_eq!(map.len(), len);
+ map.clear();
+ map.check();
+ assert_eq!(map.height(), None);
+ }
+}
+
+#[test]
+fn test_clear_drop_panic_leak() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+
+ let mut map = BTreeMap::new();
+ map.insert(a.spawn(Panic::Never), ());
+ map.insert(b.spawn(Panic::InDrop), ());
+ map.insert(c.spawn(Panic::Never), ());
+
+ catch_unwind(AssertUnwindSafe(|| map.clear())).unwrap_err();
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1);
+ assert_eq!(c.dropped(), 1);
+ assert_eq!(map.len(), 0);
+
+ drop(map);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1);
+ assert_eq!(c.dropped(), 1);
+}
+
+#[test]
+fn test_clone() {
+ let mut map = BTreeMap::new();
+ let size = MIN_INSERTS_HEIGHT_1;
+ assert_eq!(map.len(), 0);
+
+ for i in 0..size {
+ assert_eq!(map.insert(i, 10 * i), None);
+ assert_eq!(map.len(), i + 1);
+ map.check();
+ assert_eq!(map, map.clone());
+ }
+
+ for i in 0..size {
+ assert_eq!(map.insert(i, 100 * i), Some(10 * i));
+ assert_eq!(map.len(), size);
+ map.check();
+ assert_eq!(map, map.clone());
+ }
+
+ for i in 0..size / 2 {
+ assert_eq!(map.remove(&(i * 2)), Some(i * 200));
+ assert_eq!(map.len(), size - i - 1);
+ map.check();
+ assert_eq!(map, map.clone());
+ }
+
+ for i in 0..size / 2 {
+ assert_eq!(map.remove(&(2 * i)), None);
+ assert_eq!(map.remove(&(2 * i + 1)), Some(i * 200 + 100));
+ assert_eq!(map.len(), size / 2 - i - 1);
+ map.check();
+ assert_eq!(map, map.clone());
+ }
+
+ // Test a tree with 2 semi-full levels and a tree with 3 levels.
+ map = BTreeMap::from_iter((1..MIN_INSERTS_HEIGHT_2).map(|i| (i, i)));
+ assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2 - 1);
+ assert_eq!(map, map.clone());
+ map.insert(0, 0);
+ assert_eq!(map.len(), MIN_INSERTS_HEIGHT_2);
+ assert_eq!(map, map.clone());
+ map.check();
+}
+
+fn test_clone_panic_leak(size: usize) {
+ for i in 0..size {
+ let dummies = Vec::from_iter((0..size).map(|id| CrashTestDummy::new(id)));
+ let map = BTreeMap::from_iter(dummies.iter().map(|dummy| {
+ let panic = if dummy.id == i { Panic::InClone } else { Panic::Never };
+ (dummy.spawn(panic), ())
+ }));
+
+ catch_unwind(|| map.clone()).unwrap_err();
+ for d in &dummies {
+ assert_eq!(d.cloned(), if d.id <= i { 1 } else { 0 }, "id={}/{}", d.id, i);
+ assert_eq!(d.dropped(), if d.id < i { 1 } else { 0 }, "id={}/{}", d.id, i);
+ }
+ assert_eq!(map.len(), size);
+
+ drop(map);
+ for d in &dummies {
+ assert_eq!(d.cloned(), if d.id <= i { 1 } else { 0 }, "id={}/{}", d.id, i);
+ assert_eq!(d.dropped(), if d.id < i { 2 } else { 1 }, "id={}/{}", d.id, i);
+ }
+ }
+}
+
+#[test]
+fn test_clone_panic_leak_height_0() {
+ test_clone_panic_leak(3)
+}
+
+#[test]
+fn test_clone_panic_leak_height_1() {
+ test_clone_panic_leak(MIN_INSERTS_HEIGHT_1)
+}
+
+#[test]
+fn test_clone_from() {
+ let mut map1 = BTreeMap::new();
+ let max_size = MIN_INSERTS_HEIGHT_1;
+
+ // Range to max_size inclusive, because i is the size of map1 being tested.
+ for i in 0..=max_size {
+ let mut map2 = BTreeMap::new();
+ for j in 0..i {
+ let mut map1_copy = map2.clone();
+ map1_copy.clone_from(&map1); // small cloned from large
+ assert_eq!(map1_copy, map1);
+ let mut map2_copy = map1.clone();
+ map2_copy.clone_from(&map2); // large cloned from small
+ assert_eq!(map2_copy, map2);
+ map2.insert(100 * j + 1, 2 * j + 1);
+ }
+ map2.clone_from(&map1); // same length
+ map2.check();
+ assert_eq!(map2, map1);
+ map1.insert(i, 10 * i);
+ map1.check();
+ }
+}
+
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn map_key<'new>(v: BTreeMap<&'static str, ()>) -> BTreeMap<&'new str, ()> {
+ v
+ }
+ fn map_val<'new>(v: BTreeMap<(), &'static str>) -> BTreeMap<(), &'new str> {
+ v
+ }
+
+ fn iter_key<'a, 'new>(v: Iter<'a, &'static str, ()>) -> Iter<'a, &'new str, ()> {
+ v
+ }
+ fn iter_val<'a, 'new>(v: Iter<'a, (), &'static str>) -> Iter<'a, (), &'new str> {
+ v
+ }
+
+ fn into_iter_key<'new>(v: IntoIter<&'static str, ()>) -> IntoIter<&'new str, ()> {
+ v
+ }
+ fn into_iter_val<'new>(v: IntoIter<(), &'static str>) -> IntoIter<(), &'new str> {
+ v
+ }
+
+ fn into_keys_key<'new>(v: IntoKeys<&'static str, ()>) -> IntoKeys<&'new str, ()> {
+ v
+ }
+ fn into_keys_val<'new>(v: IntoKeys<(), &'static str>) -> IntoKeys<(), &'new str> {
+ v
+ }
+
+ fn into_values_key<'new>(v: IntoValues<&'static str, ()>) -> IntoValues<&'new str, ()> {
+ v
+ }
+ fn into_values_val<'new>(v: IntoValues<(), &'static str>) -> IntoValues<(), &'new str> {
+ v
+ }
+
+ fn range_key<'a, 'new>(v: Range<'a, &'static str, ()>) -> Range<'a, &'new str, ()> {
+ v
+ }
+ fn range_val<'a, 'new>(v: Range<'a, (), &'static str>) -> Range<'a, (), &'new str> {
+ v
+ }
+
+ fn keys_key<'a, 'new>(v: Keys<'a, &'static str, ()>) -> Keys<'a, &'new str, ()> {
+ v
+ }
+ fn keys_val<'a, 'new>(v: Keys<'a, (), &'static str>) -> Keys<'a, (), &'new str> {
+ v
+ }
+
+ fn values_key<'a, 'new>(v: Values<'a, &'static str, ()>) -> Values<'a, &'new str, ()> {
+ v
+ }
+ fn values_val<'a, 'new>(v: Values<'a, (), &'static str>) -> Values<'a, (), &'new str> {
+ v
+ }
+}
+
+#[allow(dead_code)]
+fn assert_sync() {
+ fn map<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
+ v
+ }
+
+ fn into_iter<T: Sync>(v: BTreeMap<T, T>) -> impl Sync {
+ v.into_iter()
+ }
+
+ fn into_keys<T: Sync + Ord>(v: BTreeMap<T, T>) -> impl Sync {
+ v.into_keys()
+ }
+
+ fn into_values<T: Sync + Ord>(v: BTreeMap<T, T>) -> impl Sync {
+ v.into_values()
+ }
+
+ fn drain_filter<T: Sync + Ord>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ v.drain_filter(|_, _| false)
+ }
+
+ fn iter<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
+ v.iter()
+ }
+
+ fn iter_mut<T: Sync>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ v.iter_mut()
+ }
+
+ fn keys<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
+ v.keys()
+ }
+
+ fn values<T: Sync>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
+ v.values()
+ }
+
+ fn values_mut<T: Sync>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ v.values_mut()
+ }
+
+ fn range<T: Sync + Ord>(v: &BTreeMap<T, T>) -> impl Sync + '_ {
+ v.range(..)
+ }
+
+ fn range_mut<T: Sync + Ord>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ v.range_mut(..)
+ }
+
+ fn entry<T: Sync + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ v.entry(Default::default())
+ }
+
+ fn occupied_entry<T: Sync + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ match v.entry(Default::default()) {
+ Occupied(entry) => entry,
+ _ => unreachable!(),
+ }
+ }
+
+ fn vacant_entry<T: Sync + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Sync + '_ {
+ match v.entry(Default::default()) {
+ Vacant(entry) => entry,
+ _ => unreachable!(),
+ }
+ }
+}
+
+#[allow(dead_code)]
+fn assert_send() {
+ fn map<T: Send>(v: BTreeMap<T, T>) -> impl Send {
+ v
+ }
+
+ fn into_iter<T: Send>(v: BTreeMap<T, T>) -> impl Send {
+ v.into_iter()
+ }
+
+ fn into_keys<T: Send + Ord>(v: BTreeMap<T, T>) -> impl Send {
+ v.into_keys()
+ }
+
+ fn into_values<T: Send + Ord>(v: BTreeMap<T, T>) -> impl Send {
+ v.into_values()
+ }
+
+ fn drain_filter<T: Send + Ord>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ v.drain_filter(|_, _| false)
+ }
+
+ fn iter<T: Send + Sync>(v: &BTreeMap<T, T>) -> impl Send + '_ {
+ v.iter()
+ }
+
+ fn iter_mut<T: Send>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ v.iter_mut()
+ }
+
+ fn keys<T: Send + Sync>(v: &BTreeMap<T, T>) -> impl Send + '_ {
+ v.keys()
+ }
+
+ fn values<T: Send + Sync>(v: &BTreeMap<T, T>) -> impl Send + '_ {
+ v.values()
+ }
+
+ fn values_mut<T: Send>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ v.values_mut()
+ }
+
+ fn range<T: Send + Sync + Ord>(v: &BTreeMap<T, T>) -> impl Send + '_ {
+ v.range(..)
+ }
+
+ fn range_mut<T: Send + Ord>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ v.range_mut(..)
+ }
+
+ fn entry<T: Send + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ v.entry(Default::default())
+ }
+
+ fn occupied_entry<T: Send + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ match v.entry(Default::default()) {
+ Occupied(entry) => entry,
+ _ => unreachable!(),
+ }
+ }
+
+ fn vacant_entry<T: Send + Ord + Default>(v: &mut BTreeMap<T, T>) -> impl Send + '_ {
+ match v.entry(Default::default()) {
+ Vacant(entry) => entry,
+ _ => unreachable!(),
+ }
+ }
+}
+
+#[test]
+fn test_ord_absence() {
+ fn map<K>(mut map: BTreeMap<K, ()>) {
+ let _ = map.is_empty();
+ let _ = map.len();
+ map.clear();
+ let _ = map.iter();
+ let _ = map.iter_mut();
+ let _ = map.keys();
+ let _ = map.values();
+ let _ = map.values_mut();
+ if true {
+ let _ = map.into_values();
+ } else if true {
+ let _ = map.into_iter();
+ } else {
+ let _ = map.into_keys();
+ }
+ }
+
+ fn map_debug<K: Debug>(mut map: BTreeMap<K, ()>) {
+ format!("{map:?}");
+ format!("{:?}", map.iter());
+ format!("{:?}", map.iter_mut());
+ format!("{:?}", map.keys());
+ format!("{:?}", map.values());
+ format!("{:?}", map.values_mut());
+ if true {
+ format!("{:?}", map.into_iter());
+ } else if true {
+ format!("{:?}", map.into_keys());
+ } else {
+ format!("{:?}", map.into_values());
+ }
+ }
+
+ fn map_clone<K: Clone>(mut map: BTreeMap<K, ()>) {
+ map.clone_from(&map.clone());
+ }
+
+ #[derive(Debug, Clone)]
+ struct NonOrd;
+ map(BTreeMap::<NonOrd, _>::new());
+ map_debug(BTreeMap::<NonOrd, _>::new());
+ map_clone(BTreeMap::<NonOrd, _>::default());
+}
+
+#[test]
+fn test_occupied_entry_key() {
+ let mut a = BTreeMap::new();
+ let key = "hello there";
+ let value = "value goes here";
+ assert_eq!(a.height(), None);
+ a.insert(key, value);
+ assert_eq!(a.len(), 1);
+ assert_eq!(a[key], value);
+
+ match a.entry(key) {
+ Vacant(_) => panic!(),
+ Occupied(e) => assert_eq!(key, *e.key()),
+ }
+ assert_eq!(a.len(), 1);
+ assert_eq!(a[key], value);
+ a.check();
+}
+
+#[test]
+fn test_vacant_entry_key() {
+ let mut a = BTreeMap::new();
+ let key = "hello there";
+ let value = "value goes here";
+
+ assert_eq!(a.height(), None);
+ match a.entry(key) {
+ Occupied(_) => unreachable!(),
+ Vacant(e) => {
+ assert_eq!(key, *e.key());
+ e.insert(value);
+ }
+ }
+ assert_eq!(a.len(), 1);
+ assert_eq!(a[key], value);
+ a.check();
+}
+
+#[test]
+fn test_vacant_entry_no_insert() {
+ let mut a = BTreeMap::<&str, ()>::new();
+ let key = "hello there";
+
+ // Non-allocated
+ assert_eq!(a.height(), None);
+ match a.entry(key) {
+ Occupied(_) => unreachable!(),
+ Vacant(e) => assert_eq!(key, *e.key()),
+ }
+ // Ensures the tree has no root.
+ assert_eq!(a.height(), None);
+ a.check();
+
+ // Allocated but still empty
+ a.insert(key, ());
+ a.remove(&key);
+ assert_eq!(a.height(), Some(0));
+ assert!(a.is_empty());
+ match a.entry(key) {
+ Occupied(_) => unreachable!(),
+ Vacant(e) => assert_eq!(key, *e.key()),
+ }
+ // Ensures the allocated root is not changed.
+ assert_eq!(a.height(), Some(0));
+ assert!(a.is_empty());
+ a.check();
+}
+
+#[test]
+fn test_first_last_entry() {
+ let mut a = BTreeMap::new();
+ assert!(a.first_entry().is_none());
+ assert!(a.last_entry().is_none());
+ a.insert(1, 42);
+ assert_eq!(a.first_entry().unwrap().key(), &1);
+ assert_eq!(a.last_entry().unwrap().key(), &1);
+ a.insert(2, 24);
+ assert_eq!(a.first_entry().unwrap().key(), &1);
+ assert_eq!(a.last_entry().unwrap().key(), &2);
+ a.insert(0, 6);
+ assert_eq!(a.first_entry().unwrap().key(), &0);
+ assert_eq!(a.last_entry().unwrap().key(), &2);
+ let (k1, v1) = a.first_entry().unwrap().remove_entry();
+ assert_eq!(k1, 0);
+ assert_eq!(v1, 6);
+ let (k2, v2) = a.last_entry().unwrap().remove_entry();
+ assert_eq!(k2, 2);
+ assert_eq!(v2, 24);
+ assert_eq!(a.first_entry().unwrap().key(), &1);
+ assert_eq!(a.last_entry().unwrap().key(), &1);
+ a.check();
+}
+
+#[test]
+fn test_pop_first_last() {
+ let mut map = BTreeMap::new();
+ assert_eq!(map.pop_first(), None);
+ assert_eq!(map.pop_last(), None);
+
+ map.insert(1, 10);
+ map.insert(2, 20);
+ map.insert(3, 30);
+ map.insert(4, 40);
+
+ assert_eq!(map.len(), 4);
+
+ let (key, val) = map.pop_first().unwrap();
+ assert_eq!(key, 1);
+ assert_eq!(val, 10);
+ assert_eq!(map.len(), 3);
+
+ let (key, val) = map.pop_first().unwrap();
+ assert_eq!(key, 2);
+ assert_eq!(val, 20);
+ assert_eq!(map.len(), 2);
+ let (key, val) = map.pop_last().unwrap();
+ assert_eq!(key, 4);
+ assert_eq!(val, 40);
+ assert_eq!(map.len(), 1);
+
+ map.insert(5, 50);
+ map.insert(6, 60);
+ assert_eq!(map.len(), 3);
+
+ let (key, val) = map.pop_first().unwrap();
+ assert_eq!(key, 3);
+ assert_eq!(val, 30);
+ assert_eq!(map.len(), 2);
+
+ let (key, val) = map.pop_last().unwrap();
+ assert_eq!(key, 6);
+ assert_eq!(val, 60);
+ assert_eq!(map.len(), 1);
+
+ let (key, val) = map.pop_last().unwrap();
+ assert_eq!(key, 5);
+ assert_eq!(val, 50);
+ assert_eq!(map.len(), 0);
+
+ assert_eq!(map.pop_first(), None);
+ assert_eq!(map.pop_last(), None);
+
+ map.insert(7, 70);
+ map.insert(8, 80);
+
+ let (key, val) = map.pop_last().unwrap();
+ assert_eq!(key, 8);
+ assert_eq!(val, 80);
+ assert_eq!(map.len(), 1);
+
+ let (key, val) = map.pop_last().unwrap();
+ assert_eq!(key, 7);
+ assert_eq!(val, 70);
+ assert_eq!(map.len(), 0);
+
+ assert_eq!(map.pop_first(), None);
+ assert_eq!(map.pop_last(), None);
+}
+
+#[test]
+fn test_get_key_value() {
+ let mut map = BTreeMap::new();
+
+ assert!(map.is_empty());
+ assert_eq!(map.get_key_value(&1), None);
+ assert_eq!(map.get_key_value(&2), None);
+
+ map.insert(1, 10);
+ map.insert(2, 20);
+ map.insert(3, 30);
+
+ assert_eq!(map.len(), 3);
+ assert_eq!(map.get_key_value(&1), Some((&1, &10)));
+ assert_eq!(map.get_key_value(&3), Some((&3, &30)));
+ assert_eq!(map.get_key_value(&4), None);
+
+ map.remove(&3);
+
+ assert_eq!(map.len(), 2);
+ assert_eq!(map.get_key_value(&3), None);
+ assert_eq!(map.get_key_value(&2), Some((&2, &20)));
+}
+
+#[test]
+fn test_insert_into_full_height_0() {
+ let size = node::CAPACITY;
+ for pos in 0..=size {
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (i * 2 + 1, ())));
+ assert!(map.insert(pos * 2, ()).is_none());
+ map.check();
+ }
+}
+
+#[test]
+fn test_insert_into_full_height_1() {
+ let size = node::CAPACITY + 1 + node::CAPACITY;
+ for pos in 0..=size {
+ let mut map = BTreeMap::from_iter((0..size).map(|i| (i * 2 + 1, ())));
+ map.compact();
+ let root_node = map.root.as_ref().unwrap().reborrow();
+ assert_eq!(root_node.len(), 1);
+ assert_eq!(root_node.first_leaf_edge().into_node().len(), node::CAPACITY);
+ assert_eq!(root_node.last_leaf_edge().into_node().len(), node::CAPACITY);
+
+ assert!(map.insert(pos * 2, ()).is_none());
+ map.check();
+ }
+}
+
+#[test]
+fn test_try_insert() {
+ let mut map = BTreeMap::new();
+
+ assert!(map.is_empty());
+
+ assert_eq!(map.try_insert(1, 10).unwrap(), &10);
+ assert_eq!(map.try_insert(2, 20).unwrap(), &20);
+
+ let err = map.try_insert(2, 200).unwrap_err();
+ assert_eq!(err.entry.key(), &2);
+ assert_eq!(err.entry.get(), &20);
+ assert_eq!(err.value, 200);
+}
+
+macro_rules! create_append_test {
+ ($name:ident, $len:expr) => {
+ #[test]
+ fn $name() {
+ let mut a = BTreeMap::new();
+ for i in 0..8 {
+ a.insert(i, i);
+ }
+
+ let mut b = BTreeMap::new();
+ for i in 5..$len {
+ b.insert(i, 2 * i);
+ }
+
+ a.append(&mut b);
+
+ assert_eq!(a.len(), $len);
+ assert_eq!(b.len(), 0);
+
+ for i in 0..$len {
+ if i < 5 {
+ assert_eq!(a[&i], i);
+ } else {
+ assert_eq!(a[&i], 2 * i);
+ }
+ }
+
+ a.check();
+ assert_eq!(a.remove(&($len - 1)), Some(2 * ($len - 1)));
+ assert_eq!(a.insert($len - 1, 20), None);
+ a.check();
+ }
+ };
+}
+
+// These are mostly for testing the algorithm that "fixes" the right edge after insertion.
+// Single node.
+create_append_test!(test_append_9, 9);
+// Two leafs that don't need fixing.
+create_append_test!(test_append_17, 17);
+// Two leafs where the second one ends up underfull and needs stealing at the end.
+create_append_test!(test_append_14, 14);
+// Two leafs where the second one ends up empty because the insertion finished at the root.
+create_append_test!(test_append_12, 12);
+// Three levels; insertion finished at the root.
+create_append_test!(test_append_144, 144);
+// Three levels; insertion finished at leaf while there is an empty node on the second level.
+create_append_test!(test_append_145, 145);
+// Tests for several randomly chosen sizes.
+create_append_test!(test_append_170, 170);
+create_append_test!(test_append_181, 181);
+#[cfg(not(miri))] // Miri is too slow
+create_append_test!(test_append_239, 239);
+#[cfg(not(miri))] // Miri is too slow
+create_append_test!(test_append_1700, 1700);
+
+#[test]
+fn test_append_drop_leak() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut left = BTreeMap::new();
+ let mut right = BTreeMap::new();
+ left.insert(a.spawn(Panic::Never), ());
+ left.insert(b.spawn(Panic::InDrop), ()); // first duplicate key, dropped during append
+ left.insert(c.spawn(Panic::Never), ());
+ right.insert(b.spawn(Panic::Never), ());
+ right.insert(c.spawn(Panic::Never), ());
+
+ catch_unwind(move || left.append(&mut right)).unwrap_err();
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1); // should be 2 were it not for Rust issue #47949
+ assert_eq!(c.dropped(), 2);
+}
+
+#[test]
+fn test_append_ord_chaos() {
+ let mut map1 = BTreeMap::new();
+ map1.insert(Cyclic3::A, ());
+ map1.insert(Cyclic3::B, ());
+ let mut map2 = BTreeMap::new();
+ map2.insert(Cyclic3::A, ());
+ map2.insert(Cyclic3::B, ());
+ map2.insert(Cyclic3::C, ()); // lands first, before A
+ map2.insert(Cyclic3::B, ()); // lands first, before C
+ map1.check();
+ map2.check(); // keys are not unique but still strictly ascending
+ assert_eq!(map1.len(), 2);
+ assert_eq!(map2.len(), 4);
+ map1.append(&mut map2);
+ assert_eq!(map1.len(), 5);
+ assert_eq!(map2.len(), 0);
+ map1.check();
+ map2.check();
+}
+
+fn rand_data(len: usize) -> Vec<(u32, u32)> {
+ let mut rng = DeterministicRng::new();
+ Vec::from_iter((0..len).map(|_| (rng.next(), rng.next())))
+}
+
+#[test]
+fn test_split_off_empty_right() {
+ let mut data = rand_data(173);
+
+ let mut map = BTreeMap::from_iter(data.clone());
+ let right = map.split_off(&(data.iter().max().unwrap().0 + 1));
+ map.check();
+ right.check();
+
+ data.sort();
+ assert!(map.into_iter().eq(data));
+ assert!(right.into_iter().eq(None));
+}
+
+#[test]
+fn test_split_off_empty_left() {
+ let mut data = rand_data(314);
+
+ let mut map = BTreeMap::from_iter(data.clone());
+ let right = map.split_off(&data.iter().min().unwrap().0);
+ map.check();
+ right.check();
+
+ data.sort();
+ assert!(map.into_iter().eq(None));
+ assert!(right.into_iter().eq(data));
+}
+
+// In a tree with 3 levels, if all but a part of the first leaf node is split off,
+// make sure fix_top eliminates both top levels.
+#[test]
+fn test_split_off_tiny_left_height_2() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+ let mut left = BTreeMap::from_iter(pairs.clone());
+ let right = left.split_off(&1);
+ left.check();
+ right.check();
+ assert_eq!(left.len(), 1);
+ assert_eq!(right.len(), MIN_INSERTS_HEIGHT_2 - 1);
+ assert_eq!(*left.first_key_value().unwrap().0, 0);
+ assert_eq!(*right.first_key_value().unwrap().0, 1);
+}
+
+// In a tree with 3 levels, if only part of the last leaf node is split off,
+// make sure fix_top eliminates both top levels.
+#[test]
+fn test_split_off_tiny_right_height_2() {
+ let pairs = (0..MIN_INSERTS_HEIGHT_2).map(|i| (i, i));
+ let last = MIN_INSERTS_HEIGHT_2 - 1;
+ let mut left = BTreeMap::from_iter(pairs.clone());
+ assert_eq!(*left.last_key_value().unwrap().0, last);
+ let right = left.split_off(&last);
+ left.check();
+ right.check();
+ assert_eq!(left.len(), MIN_INSERTS_HEIGHT_2 - 1);
+ assert_eq!(right.len(), 1);
+ assert_eq!(*left.last_key_value().unwrap().0, last - 1);
+ assert_eq!(*right.last_key_value().unwrap().0, last);
+}
+
+#[test]
+fn test_split_off_halfway() {
+ let mut rng = DeterministicRng::new();
+ for &len in &[node::CAPACITY, 25, 50, 75, 100] {
+ let mut data = Vec::from_iter((0..len).map(|_| (rng.next(), ())));
+ // Insertion in non-ascending order creates some variation in node length.
+ let mut map = BTreeMap::from_iter(data.iter().copied());
+ data.sort();
+ let small_keys = data.iter().take(len / 2).map(|kv| kv.0);
+ let large_keys = data.iter().skip(len / 2).map(|kv| kv.0);
+ let split_key = large_keys.clone().next().unwrap();
+ let right = map.split_off(&split_key);
+ map.check();
+ right.check();
+ assert!(map.keys().copied().eq(small_keys));
+ assert!(right.keys().copied().eq(large_keys));
+ }
+}
+
+#[test]
+fn test_split_off_large_random_sorted() {
+ // Miri is too slow
+ let mut data = if cfg!(miri) { rand_data(529) } else { rand_data(1529) };
+ // special case with maximum height.
+ data.sort();
+
+ let mut map = BTreeMap::from_iter(data.clone());
+ let key = data[data.len() / 2].0;
+ let right = map.split_off(&key);
+ map.check();
+ right.check();
+
+ assert!(map.into_iter().eq(data.clone().into_iter().filter(|x| x.0 < key)));
+ assert!(right.into_iter().eq(data.into_iter().filter(|x| x.0 >= key)));
+}
+
+#[test]
+fn test_into_iter_drop_leak_height_0() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let d = CrashTestDummy::new(3);
+ let e = CrashTestDummy::new(4);
+ let mut map = BTreeMap::new();
+ map.insert("a", a.spawn(Panic::Never));
+ map.insert("b", b.spawn(Panic::Never));
+ map.insert("c", c.spawn(Panic::Never));
+ map.insert("d", d.spawn(Panic::InDrop));
+ map.insert("e", e.spawn(Panic::Never));
+
+ catch_unwind(move || drop(map.into_iter())).unwrap_err();
+
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1);
+ assert_eq!(c.dropped(), 1);
+ assert_eq!(d.dropped(), 1);
+ assert_eq!(e.dropped(), 1);
+}
+
+#[test]
+fn test_into_iter_drop_leak_height_1() {
+ let size = MIN_INSERTS_HEIGHT_1;
+ for panic_point in vec![0, 1, size - 2, size - 1] {
+ let dummies = Vec::from_iter((0..size).map(|i| CrashTestDummy::new(i)));
+ let map = BTreeMap::from_iter((0..size).map(|i| {
+ let panic = if i == panic_point { Panic::InDrop } else { Panic::Never };
+ (dummies[i].spawn(Panic::Never), dummies[i].spawn(panic))
+ }));
+ catch_unwind(move || drop(map.into_iter())).unwrap_err();
+ for i in 0..size {
+ assert_eq!(dummies[i].dropped(), 2);
+ }
+ }
+}
+
+#[test]
+fn test_into_keys() {
+ let map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let keys = Vec::from_iter(map.into_keys());
+
+ assert_eq!(keys.len(), 3);
+ assert!(keys.contains(&1));
+ assert!(keys.contains(&2));
+ assert!(keys.contains(&3));
+}
+
+#[test]
+fn test_into_values() {
+ let map = BTreeMap::from([(1, 'a'), (2, 'b'), (3, 'c')]);
+ let values = Vec::from_iter(map.into_values());
+
+ assert_eq!(values.len(), 3);
+ assert!(values.contains(&'a'));
+ assert!(values.contains(&'b'));
+ assert!(values.contains(&'c'));
+}
+
+#[test]
+fn test_insert_remove_intertwined() {
+ let loops = if cfg!(miri) { 100 } else { 1_000_000 };
+ let mut map = BTreeMap::new();
+ let mut i = 1;
+ let offset = 165; // somewhat arbitrarily chosen to cover some code paths
+ for _ in 0..loops {
+ i = (i + offset) & 0xFF;
+ map.insert(i, i);
+ map.remove(&(0xFF - i));
+ }
+ map.check();
+}
+
+#[test]
+fn test_insert_remove_intertwined_ord_chaos() {
+ let loops = if cfg!(miri) { 100 } else { 1_000_000 };
+ let gov = Governor::new();
+ let mut map = BTreeMap::new();
+ let mut i = 1;
+ let offset = 165; // more arbitrarily copied from above
+ for _ in 0..loops {
+ i = (i + offset) & 0xFF;
+ map.insert(Governed(i, &gov), ());
+ map.remove(&Governed(0xFF - i, &gov));
+ gov.flip();
+ }
+ map.check_invariants();
+}
+
+#[test]
+fn from_array() {
+ let map = BTreeMap::from([(1, 2), (3, 4)]);
+ let unordered_duplicates = BTreeMap::from([(3, 4), (1, 2), (1, 2)]);
+ assert_eq!(map, unordered_duplicates);
+}
diff --git a/library/alloc/src/collections/btree/mem.rs b/library/alloc/src/collections/btree/mem.rs
new file mode 100644
index 000000000..e1363d1ae
--- /dev/null
+++ b/library/alloc/src/collections/btree/mem.rs
@@ -0,0 +1,35 @@
+use core::intrinsics;
+use core::mem;
+use core::ptr;
+
+/// This replaces the value behind the `v` unique reference by calling the
+/// relevant function.
+///
+/// If a panic occurs in the `change` closure, the entire process will be aborted.
+#[allow(dead_code)] // keep as illustration and for future use
+#[inline]
+pub fn take_mut<T>(v: &mut T, change: impl FnOnce(T) -> T) {
+ replace(v, |value| (change(value), ()))
+}
+
+/// This replaces the value behind the `v` unique reference by calling the
+/// relevant function, and returns a result obtained along the way.
+///
+/// If a panic occurs in the `change` closure, the entire process will be aborted.
+#[inline]
+pub fn replace<T, R>(v: &mut T, change: impl FnOnce(T) -> (T, R)) -> R {
+ struct PanicGuard;
+ impl Drop for PanicGuard {
+ fn drop(&mut self) {
+ intrinsics::abort()
+ }
+ }
+ let guard = PanicGuard;
+ let value = unsafe { ptr::read(v) };
+ let (new_value, ret) = change(value);
+ unsafe {
+ ptr::write(v, new_value);
+ }
+ mem::forget(guard);
+ ret
+}
diff --git a/library/alloc/src/collections/btree/merge_iter.rs b/library/alloc/src/collections/btree/merge_iter.rs
new file mode 100644
index 000000000..7f23d93b9
--- /dev/null
+++ b/library/alloc/src/collections/btree/merge_iter.rs
@@ -0,0 +1,98 @@
+use core::cmp::Ordering;
+use core::fmt::{self, Debug};
+use core::iter::FusedIterator;
+
+/// Core of an iterator that merges the output of two strictly ascending iterators,
+/// for instance a union or a symmetric difference.
+pub struct MergeIterInner<I: Iterator> {
+ a: I,
+ b: I,
+ peeked: Option<Peeked<I>>,
+}
+
+/// Benchmarks faster than wrapping both iterators in a Peekable,
+/// probably because we can afford to impose a FusedIterator bound.
+#[derive(Clone, Debug)]
+enum Peeked<I: Iterator> {
+ A(I::Item),
+ B(I::Item),
+}
+
+impl<I: Iterator> Clone for MergeIterInner<I>
+where
+ I: Clone,
+ I::Item: Clone,
+{
+ fn clone(&self) -> Self {
+ Self { a: self.a.clone(), b: self.b.clone(), peeked: self.peeked.clone() }
+ }
+}
+
+impl<I: Iterator> Debug for MergeIterInner<I>
+where
+ I: Debug,
+ I::Item: Debug,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("MergeIterInner").field(&self.a).field(&self.b).field(&self.peeked).finish()
+ }
+}
+
+impl<I: Iterator> MergeIterInner<I> {
+ /// Creates a new core for an iterator merging a pair of sources.
+ pub fn new(a: I, b: I) -> Self {
+ MergeIterInner { a, b, peeked: None }
+ }
+
+ /// Returns the next pair of items stemming from the pair of sources
+ /// being merged. If both returned options contain a value, that value
+ /// is equal and occurs in both sources. If one of the returned options
+ /// contains a value, that value doesn't occur in the other source (or
+ /// the sources are not strictly ascending). If neither returned option
+ /// contains a value, iteration has finished and subsequent calls will
+ /// return the same empty pair.
+ pub fn nexts<Cmp: Fn(&I::Item, &I::Item) -> Ordering>(
+ &mut self,
+ cmp: Cmp,
+ ) -> (Option<I::Item>, Option<I::Item>)
+ where
+ I: FusedIterator,
+ {
+ let mut a_next;
+ let mut b_next;
+ match self.peeked.take() {
+ Some(Peeked::A(next)) => {
+ a_next = Some(next);
+ b_next = self.b.next();
+ }
+ Some(Peeked::B(next)) => {
+ b_next = Some(next);
+ a_next = self.a.next();
+ }
+ None => {
+ a_next = self.a.next();
+ b_next = self.b.next();
+ }
+ }
+ if let (Some(ref a1), Some(ref b1)) = (&a_next, &b_next) {
+ match cmp(a1, b1) {
+ Ordering::Less => self.peeked = b_next.take().map(Peeked::B),
+ Ordering::Greater => self.peeked = a_next.take().map(Peeked::A),
+ Ordering::Equal => (),
+ }
+ }
+ (a_next, b_next)
+ }
+
+ /// Returns a pair of upper bounds for the `size_hint` of the final iterator.
+ pub fn lens(&self) -> (usize, usize)
+ where
+ I: ExactSizeIterator,
+ {
+ match self.peeked {
+ Some(Peeked::A(_)) => (1 + self.a.len(), self.b.len()),
+ Some(Peeked::B(_)) => (self.a.len(), 1 + self.b.len()),
+ _ => (self.a.len(), self.b.len()),
+ }
+ }
+}
diff --git a/library/alloc/src/collections/btree/mod.rs b/library/alloc/src/collections/btree/mod.rs
new file mode 100644
index 000000000..9d43ac5c5
--- /dev/null
+++ b/library/alloc/src/collections/btree/mod.rs
@@ -0,0 +1,26 @@
+mod append;
+mod borrow;
+mod dedup_sorted_iter;
+mod fix;
+pub mod map;
+mod mem;
+mod merge_iter;
+mod navigate;
+mod node;
+mod remove;
+mod search;
+pub mod set;
+mod set_val;
+mod split;
+
+#[doc(hidden)]
+trait Recover<Q: ?Sized> {
+ type Key;
+
+ fn get(&self, key: &Q) -> Option<&Self::Key>;
+ fn take(&mut self, key: &Q) -> Option<Self::Key>;
+ fn replace(&mut self, key: Self::Key) -> Option<Self::Key>;
+}
+
+#[cfg(test)]
+mod testing;
diff --git a/library/alloc/src/collections/btree/navigate.rs b/library/alloc/src/collections/btree/navigate.rs
new file mode 100644
index 000000000..1e33c1e64
--- /dev/null
+++ b/library/alloc/src/collections/btree/navigate.rs
@@ -0,0 +1,719 @@
+use core::borrow::Borrow;
+use core::hint;
+use core::ops::RangeBounds;
+use core::ptr;
+
+use super::node::{marker, ForceResult::*, Handle, NodeRef};
+
+use crate::alloc::Allocator;
+// `front` and `back` are always both `None` or both `Some`.
+pub struct LeafRange<BorrowType, K, V> {
+ front: Option<Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>>,
+ back: Option<Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>>,
+}
+
+impl<'a, K: 'a, V: 'a> Clone for LeafRange<marker::Immut<'a>, K, V> {
+ fn clone(&self) -> Self {
+ LeafRange { front: self.front.clone(), back: self.back.clone() }
+ }
+}
+
+impl<BorrowType, K, V> LeafRange<BorrowType, K, V> {
+ pub fn none() -> Self {
+ LeafRange { front: None, back: None }
+ }
+
+ fn is_empty(&self) -> bool {
+ self.front == self.back
+ }
+
+ /// Temporarily takes out another, immutable equivalent of the same range.
+ pub fn reborrow(&self) -> LeafRange<marker::Immut<'_>, K, V> {
+ LeafRange {
+ front: self.front.as_ref().map(|f| f.reborrow()),
+ back: self.back.as_ref().map(|b| b.reborrow()),
+ }
+ }
+}
+
+impl<'a, K, V> LeafRange<marker::Immut<'a>, K, V> {
+ #[inline]
+ pub fn next_checked(&mut self) -> Option<(&'a K, &'a V)> {
+ self.perform_next_checked(|kv| kv.into_kv())
+ }
+
+ #[inline]
+ pub fn next_back_checked(&mut self) -> Option<(&'a K, &'a V)> {
+ self.perform_next_back_checked(|kv| kv.into_kv())
+ }
+}
+
+impl<'a, K, V> LeafRange<marker::ValMut<'a>, K, V> {
+ #[inline]
+ pub fn next_checked(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.perform_next_checked(|kv| unsafe { ptr::read(kv) }.into_kv_valmut())
+ }
+
+ #[inline]
+ pub fn next_back_checked(&mut self) -> Option<(&'a K, &'a mut V)> {
+ self.perform_next_back_checked(|kv| unsafe { ptr::read(kv) }.into_kv_valmut())
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V> LeafRange<BorrowType, K, V> {
+ /// If possible, extract some result from the following KV and move to the edge beyond it.
+ fn perform_next_checked<F, R>(&mut self, f: F) -> Option<R>
+ where
+ F: Fn(&Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>) -> R,
+ {
+ if self.is_empty() {
+ None
+ } else {
+ super::mem::replace(self.front.as_mut().unwrap(), |front| {
+ let kv = front.next_kv().ok().unwrap();
+ let result = f(&kv);
+ (kv.next_leaf_edge(), Some(result))
+ })
+ }
+ }
+
+ /// If possible, extract some result from the preceding KV and move to the edge beyond it.
+ fn perform_next_back_checked<F, R>(&mut self, f: F) -> Option<R>
+ where
+ F: Fn(&Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>) -> R,
+ {
+ if self.is_empty() {
+ None
+ } else {
+ super::mem::replace(self.back.as_mut().unwrap(), |back| {
+ let kv = back.next_back_kv().ok().unwrap();
+ let result = f(&kv);
+ (kv.next_back_leaf_edge(), Some(result))
+ })
+ }
+ }
+}
+
+enum LazyLeafHandle<BorrowType, K, V> {
+ Root(NodeRef<BorrowType, K, V, marker::LeafOrInternal>), // not yet descended
+ Edge(Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>),
+}
+
+impl<'a, K: 'a, V: 'a> Clone for LazyLeafHandle<marker::Immut<'a>, K, V> {
+ fn clone(&self) -> Self {
+ match self {
+ LazyLeafHandle::Root(root) => LazyLeafHandle::Root(*root),
+ LazyLeafHandle::Edge(edge) => LazyLeafHandle::Edge(*edge),
+ }
+ }
+}
+
+impl<BorrowType, K, V> LazyLeafHandle<BorrowType, K, V> {
+ fn reborrow(&self) -> LazyLeafHandle<marker::Immut<'_>, K, V> {
+ match self {
+ LazyLeafHandle::Root(root) => LazyLeafHandle::Root(root.reborrow()),
+ LazyLeafHandle::Edge(edge) => LazyLeafHandle::Edge(edge.reborrow()),
+ }
+ }
+}
+
+// `front` and `back` are always both `None` or both `Some`.
+pub struct LazyLeafRange<BorrowType, K, V> {
+ front: Option<LazyLeafHandle<BorrowType, K, V>>,
+ back: Option<LazyLeafHandle<BorrowType, K, V>>,
+}
+
+impl<'a, K: 'a, V: 'a> Clone for LazyLeafRange<marker::Immut<'a>, K, V> {
+ fn clone(&self) -> Self {
+ LazyLeafRange { front: self.front.clone(), back: self.back.clone() }
+ }
+}
+
+impl<BorrowType, K, V> LazyLeafRange<BorrowType, K, V> {
+ pub fn none() -> Self {
+ LazyLeafRange { front: None, back: None }
+ }
+
+ /// Temporarily takes out another, immutable equivalent of the same range.
+ pub fn reborrow(&self) -> LazyLeafRange<marker::Immut<'_>, K, V> {
+ LazyLeafRange {
+ front: self.front.as_ref().map(|f| f.reborrow()),
+ back: self.back.as_ref().map(|b| b.reborrow()),
+ }
+ }
+}
+
+impl<'a, K, V> LazyLeafRange<marker::Immut<'a>, K, V> {
+ #[inline]
+ pub unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
+ unsafe { self.init_front().unwrap().next_unchecked() }
+ }
+
+ #[inline]
+ pub unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a V) {
+ unsafe { self.init_back().unwrap().next_back_unchecked() }
+ }
+}
+
+impl<'a, K, V> LazyLeafRange<marker::ValMut<'a>, K, V> {
+ #[inline]
+ pub unsafe fn next_unchecked(&mut self) -> (&'a K, &'a mut V) {
+ unsafe { self.init_front().unwrap().next_unchecked() }
+ }
+
+ #[inline]
+ pub unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a mut V) {
+ unsafe { self.init_back().unwrap().next_back_unchecked() }
+ }
+}
+
+impl<K, V> LazyLeafRange<marker::Dying, K, V> {
+ fn take_front(
+ &mut self,
+ ) -> Option<Handle<NodeRef<marker::Dying, K, V, marker::Leaf>, marker::Edge>> {
+ match self.front.take()? {
+ LazyLeafHandle::Root(root) => Some(root.first_leaf_edge()),
+ LazyLeafHandle::Edge(edge) => Some(edge),
+ }
+ }
+
+ #[inline]
+ pub unsafe fn deallocating_next_unchecked<A: Allocator + Clone>(
+ &mut self,
+ alloc: A,
+ ) -> Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV> {
+ debug_assert!(self.front.is_some());
+ let front = self.init_front().unwrap();
+ unsafe { front.deallocating_next_unchecked(alloc) }
+ }
+
+ #[inline]
+ pub unsafe fn deallocating_next_back_unchecked<A: Allocator + Clone>(
+ &mut self,
+ alloc: A,
+ ) -> Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV> {
+ debug_assert!(self.back.is_some());
+ let back = self.init_back().unwrap();
+ unsafe { back.deallocating_next_back_unchecked(alloc) }
+ }
+
+ #[inline]
+ pub fn deallocating_end<A: Allocator + Clone>(&mut self, alloc: A) {
+ if let Some(front) = self.take_front() {
+ front.deallocating_end(alloc)
+ }
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V> LazyLeafRange<BorrowType, K, V> {
+ fn init_front(
+ &mut self,
+ ) -> Option<&mut Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>> {
+ if let Some(LazyLeafHandle::Root(root)) = &self.front {
+ self.front = Some(LazyLeafHandle::Edge(unsafe { ptr::read(root) }.first_leaf_edge()));
+ }
+ match &mut self.front {
+ None => None,
+ Some(LazyLeafHandle::Edge(edge)) => Some(edge),
+ // SAFETY: the code above would have replaced it.
+ Some(LazyLeafHandle::Root(_)) => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+
+ fn init_back(
+ &mut self,
+ ) -> Option<&mut Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>> {
+ if let Some(LazyLeafHandle::Root(root)) = &self.back {
+ self.back = Some(LazyLeafHandle::Edge(unsafe { ptr::read(root) }.last_leaf_edge()));
+ }
+ match &mut self.back {
+ None => None,
+ Some(LazyLeafHandle::Edge(edge)) => Some(edge),
+ // SAFETY: the code above would have replaced it.
+ Some(LazyLeafHandle::Root(_)) => unsafe { hint::unreachable_unchecked() },
+ }
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ /// Finds the distinct leaf edges delimiting a specified range in a tree.
+ ///
+ /// If such distinct edges exist, returns them in ascending order, meaning
+ /// that a non-zero number of calls to `next_unchecked` on the `front` of
+ /// the result and/or calls to `next_back_unchecked` on the `back` of the
+ /// result will eventually reach the same edge.
+ ///
+ /// If there are no such edges, i.e., if the tree contains no key within
+ /// the range, returns an empty `front` and `back`.
+ ///
+ /// # Safety
+ /// Unless `BorrowType` is `Immut`, do not use the handles to visit the same
+ /// KV twice.
+ unsafe fn find_leaf_edges_spanning_range<Q: ?Sized, R>(
+ self,
+ range: R,
+ ) -> LeafRange<BorrowType, K, V>
+ where
+ Q: Ord,
+ K: Borrow<Q>,
+ R: RangeBounds<Q>,
+ {
+ match self.search_tree_for_bifurcation(&range) {
+ Err(_) => LeafRange::none(),
+ Ok((
+ node,
+ lower_edge_idx,
+ upper_edge_idx,
+ mut lower_child_bound,
+ mut upper_child_bound,
+ )) => {
+ let mut lower_edge = unsafe { Handle::new_edge(ptr::read(&node), lower_edge_idx) };
+ let mut upper_edge = unsafe { Handle::new_edge(node, upper_edge_idx) };
+ loop {
+ match (lower_edge.force(), upper_edge.force()) {
+ (Leaf(f), Leaf(b)) => return LeafRange { front: Some(f), back: Some(b) },
+ (Internal(f), Internal(b)) => {
+ (lower_edge, lower_child_bound) =
+ f.descend().find_lower_bound_edge(lower_child_bound);
+ (upper_edge, upper_child_bound) =
+ b.descend().find_upper_bound_edge(upper_child_bound);
+ }
+ _ => unreachable!("BTreeMap has different depths"),
+ }
+ }
+ }
+ }
+ }
+}
+
+fn full_range<BorrowType: marker::BorrowType, K, V>(
+ root1: NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
+ root2: NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
+) -> LazyLeafRange<BorrowType, K, V> {
+ LazyLeafRange {
+ front: Some(LazyLeafHandle::Root(root1)),
+ back: Some(LazyLeafHandle::Root(root2)),
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> {
+ /// Finds the pair of leaf edges delimiting a specific range in a tree.
+ ///
+ /// The result is meaningful only if the tree is ordered by key, like the tree
+ /// in a `BTreeMap` is.
+ pub fn range_search<Q, R>(self, range: R) -> LeafRange<marker::Immut<'a>, K, V>
+ where
+ Q: ?Sized + Ord,
+ K: Borrow<Q>,
+ R: RangeBounds<Q>,
+ {
+ // SAFETY: our borrow type is immutable.
+ unsafe { self.find_leaf_edges_spanning_range(range) }
+ }
+
+ /// Finds the pair of leaf edges delimiting an entire tree.
+ pub fn full_range(self) -> LazyLeafRange<marker::Immut<'a>, K, V> {
+ full_range(self, self)
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::ValMut<'a>, K, V, marker::LeafOrInternal> {
+ /// Splits a unique reference into a pair of leaf edges delimiting a specified range.
+ /// The result are non-unique references allowing (some) mutation, which must be used
+ /// carefully.
+ ///
+ /// The result is meaningful only if the tree is ordered by key, like the tree
+ /// in a `BTreeMap` is.
+ ///
+ /// # Safety
+ /// Do not use the duplicate handles to visit the same KV twice.
+ pub fn range_search<Q, R>(self, range: R) -> LeafRange<marker::ValMut<'a>, K, V>
+ where
+ Q: ?Sized + Ord,
+ K: Borrow<Q>,
+ R: RangeBounds<Q>,
+ {
+ unsafe { self.find_leaf_edges_spanning_range(range) }
+ }
+
+ /// Splits a unique reference into a pair of leaf edges delimiting the full range of the tree.
+ /// The results are non-unique references allowing mutation (of values only), so must be used
+ /// with care.
+ pub fn full_range(self) -> LazyLeafRange<marker::ValMut<'a>, K, V> {
+ // We duplicate the root NodeRef here -- we will never visit the same KV
+ // twice, and never end up with overlapping value references.
+ let self2 = unsafe { ptr::read(&self) };
+ full_range(self, self2)
+ }
+}
+
+impl<K, V> NodeRef<marker::Dying, K, V, marker::LeafOrInternal> {
+ /// Splits a unique reference into a pair of leaf edges delimiting the full range of the tree.
+ /// The results are non-unique references allowing massively destructive mutation, so must be
+ /// used with the utmost care.
+ pub fn full_range(self) -> LazyLeafRange<marker::Dying, K, V> {
+ // We duplicate the root NodeRef here -- we will never access it in a way
+ // that overlaps references obtained from the root.
+ let self2 = unsafe { ptr::read(&self) };
+ full_range(self, self2)
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V>
+ Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>
+{
+ /// Given a leaf edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
+ /// on the right side, which is either in the same leaf node or in an ancestor node.
+ /// If the leaf edge is the last one in the tree, returns [`Result::Err`] with the root node.
+ pub fn next_kv(
+ self,
+ ) -> Result<
+ Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>,
+ NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
+ > {
+ let mut edge = self.forget_node_type();
+ loop {
+ edge = match edge.right_kv() {
+ Ok(kv) => return Ok(kv),
+ Err(last_edge) => match last_edge.into_node().ascend() {
+ Ok(parent_edge) => parent_edge.forget_node_type(),
+ Err(root) => return Err(root),
+ },
+ }
+ }
+ }
+
+ /// Given a leaf edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
+ /// on the left side, which is either in the same leaf node or in an ancestor node.
+ /// If the leaf edge is the first one in the tree, returns [`Result::Err`] with the root node.
+ fn next_back_kv(
+ self,
+ ) -> Result<
+ Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>,
+ NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
+ > {
+ let mut edge = self.forget_node_type();
+ loop {
+ edge = match edge.left_kv() {
+ Ok(kv) => return Ok(kv),
+ Err(last_edge) => match last_edge.into_node().ascend() {
+ Ok(parent_edge) => parent_edge.forget_node_type(),
+ Err(root) => return Err(root),
+ },
+ }
+ }
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V>
+ Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>
+{
+ /// Given an internal edge handle, returns [`Result::Ok`] with a handle to the neighboring KV
+ /// on the right side, which is either in the same internal node or in an ancestor node.
+ /// If the internal edge is the last one in the tree, returns [`Result::Err`] with the root node.
+ fn next_kv(
+ self,
+ ) -> Result<
+ Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::KV>,
+ NodeRef<BorrowType, K, V, marker::Internal>,
+ > {
+ let mut edge = self;
+ loop {
+ edge = match edge.right_kv() {
+ Ok(internal_kv) => return Ok(internal_kv),
+ Err(last_edge) => match last_edge.into_node().ascend() {
+ Ok(parent_edge) => parent_edge,
+ Err(root) => return Err(root),
+ },
+ }
+ }
+ }
+}
+
+impl<K, V> Handle<NodeRef<marker::Dying, K, V, marker::Leaf>, marker::Edge> {
+ /// Given a leaf edge handle into a dying tree, returns the next leaf edge
+ /// on the right side, and the key-value pair in between, if they exist.
+ ///
+ /// If the given edge is the last one in a leaf, this method deallocates
+ /// the leaf, as well as any ancestor nodes whose last edge was reached.
+ /// This implies that if no more key-value pair follows, the entire tree
+ /// will have been deallocated and there is nothing left to return.
+ ///
+ /// # Safety
+ /// - The given edge must not have been previously returned by counterpart
+ /// `deallocating_next_back`.
+ /// - The returned KV handle is only valid to access the key and value,
+ /// and only valid until the next call to a `deallocating_` method.
+ unsafe fn deallocating_next<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> Option<(Self, Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>)>
+ {
+ let mut edge = self.forget_node_type();
+ loop {
+ edge = match edge.right_kv() {
+ Ok(kv) => return Some((unsafe { ptr::read(&kv) }.next_leaf_edge(), kv)),
+ Err(last_edge) => {
+ match unsafe { last_edge.into_node().deallocate_and_ascend(alloc.clone()) } {
+ Some(parent_edge) => parent_edge.forget_node_type(),
+ None => return None,
+ }
+ }
+ }
+ }
+ }
+
+ /// Given a leaf edge handle into a dying tree, returns the next leaf edge
+ /// on the left side, and the key-value pair in between, if they exist.
+ ///
+ /// If the given edge is the first one in a leaf, this method deallocates
+ /// the leaf, as well as any ancestor nodes whose first edge was reached.
+ /// This implies that if no more key-value pair follows, the entire tree
+ /// will have been deallocated and there is nothing left to return.
+ ///
+ /// # Safety
+ /// - The given edge must not have been previously returned by counterpart
+ /// `deallocating_next`.
+ /// - The returned KV handle is only valid to access the key and value,
+ /// and only valid until the next call to a `deallocating_` method.
+ unsafe fn deallocating_next_back<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> Option<(Self, Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV>)>
+ {
+ let mut edge = self.forget_node_type();
+ loop {
+ edge = match edge.left_kv() {
+ Ok(kv) => return Some((unsafe { ptr::read(&kv) }.next_back_leaf_edge(), kv)),
+ Err(last_edge) => {
+ match unsafe { last_edge.into_node().deallocate_and_ascend(alloc.clone()) } {
+ Some(parent_edge) => parent_edge.forget_node_type(),
+ None => return None,
+ }
+ }
+ }
+ }
+ }
+
+ /// Deallocates a pile of nodes from the leaf up to the root.
+ /// This is the only way to deallocate the remainder of a tree after
+ /// `deallocating_next` and `deallocating_next_back` have been nibbling at
+ /// both sides of the tree, and have hit the same edge. As it is intended
+ /// only to be called when all keys and values have been returned,
+ /// no cleanup is done on any of the keys or values.
+ fn deallocating_end<A: Allocator + Clone>(self, alloc: A) {
+ let mut edge = self.forget_node_type();
+ while let Some(parent_edge) =
+ unsafe { edge.into_node().deallocate_and_ascend(alloc.clone()) }
+ {
+ edge = parent_edge.forget_node_type();
+ }
+ }
+}
+
+impl<'a, K, V> Handle<NodeRef<marker::Immut<'a>, K, V, marker::Leaf>, marker::Edge> {
+ /// Moves the leaf edge handle to the next leaf edge and returns references to the
+ /// key and value in between.
+ ///
+ /// # Safety
+ /// There must be another KV in the direction travelled.
+ unsafe fn next_unchecked(&mut self) -> (&'a K, &'a V) {
+ super::mem::replace(self, |leaf_edge| {
+ let kv = leaf_edge.next_kv().ok().unwrap();
+ (kv.next_leaf_edge(), kv.into_kv())
+ })
+ }
+
+ /// Moves the leaf edge handle to the previous leaf edge and returns references to the
+ /// key and value in between.
+ ///
+ /// # Safety
+ /// There must be another KV in the direction travelled.
+ unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a V) {
+ super::mem::replace(self, |leaf_edge| {
+ let kv = leaf_edge.next_back_kv().ok().unwrap();
+ (kv.next_back_leaf_edge(), kv.into_kv())
+ })
+ }
+}
+
+impl<'a, K, V> Handle<NodeRef<marker::ValMut<'a>, K, V, marker::Leaf>, marker::Edge> {
+ /// Moves the leaf edge handle to the next leaf edge and returns references to the
+ /// key and value in between.
+ ///
+ /// # Safety
+ /// There must be another KV in the direction travelled.
+ unsafe fn next_unchecked(&mut self) -> (&'a K, &'a mut V) {
+ let kv = super::mem::replace(self, |leaf_edge| {
+ let kv = leaf_edge.next_kv().ok().unwrap();
+ (unsafe { ptr::read(&kv) }.next_leaf_edge(), kv)
+ });
+ // Doing this last is faster, according to benchmarks.
+ kv.into_kv_valmut()
+ }
+
+ /// Moves the leaf edge handle to the previous leaf and returns references to the
+ /// key and value in between.
+ ///
+ /// # Safety
+ /// There must be another KV in the direction travelled.
+ unsafe fn next_back_unchecked(&mut self) -> (&'a K, &'a mut V) {
+ let kv = super::mem::replace(self, |leaf_edge| {
+ let kv = leaf_edge.next_back_kv().ok().unwrap();
+ (unsafe { ptr::read(&kv) }.next_back_leaf_edge(), kv)
+ });
+ // Doing this last is faster, according to benchmarks.
+ kv.into_kv_valmut()
+ }
+}
+
+impl<K, V> Handle<NodeRef<marker::Dying, K, V, marker::Leaf>, marker::Edge> {
+ /// Moves the leaf edge handle to the next leaf edge and returns the key and value
+ /// in between, deallocating any node left behind while leaving the corresponding
+ /// edge in its parent node dangling.
+ ///
+ /// # Safety
+ /// - There must be another KV in the direction travelled.
+ /// - That KV was not previously returned by counterpart
+ /// `deallocating_next_back_unchecked` on any copy of the handles
+ /// being used to traverse the tree.
+ ///
+ /// The only safe way to proceed with the updated handle is to compare it, drop it,
+ /// or call this method or counterpart `deallocating_next_back_unchecked` again.
+ unsafe fn deallocating_next_unchecked<A: Allocator + Clone>(
+ &mut self,
+ alloc: A,
+ ) -> Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV> {
+ super::mem::replace(self, |leaf_edge| unsafe {
+ leaf_edge.deallocating_next(alloc).unwrap()
+ })
+ }
+
+ /// Moves the leaf edge handle to the previous leaf edge and returns the key and value
+ /// in between, deallocating any node left behind while leaving the corresponding
+ /// edge in its parent node dangling.
+ ///
+ /// # Safety
+ /// - There must be another KV in the direction travelled.
+ /// - That leaf edge was not previously returned by counterpart
+ /// `deallocating_next_unchecked` on any copy of the handles
+ /// being used to traverse the tree.
+ ///
+ /// The only safe way to proceed with the updated handle is to compare it, drop it,
+ /// or call this method or counterpart `deallocating_next_unchecked` again.
+ unsafe fn deallocating_next_back_unchecked<A: Allocator + Clone>(
+ &mut self,
+ alloc: A,
+ ) -> Handle<NodeRef<marker::Dying, K, V, marker::LeafOrInternal>, marker::KV> {
+ super::mem::replace(self, |leaf_edge| unsafe {
+ leaf_edge.deallocating_next_back(alloc).unwrap()
+ })
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ /// Returns the leftmost leaf edge in or underneath a node - in other words, the edge
+ /// you need first when navigating forward (or last when navigating backward).
+ #[inline]
+ pub fn first_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
+ let mut node = self;
+ loop {
+ match node.force() {
+ Leaf(leaf) => return leaf.first_edge(),
+ Internal(internal) => node = internal.first_edge().descend(),
+ }
+ }
+ }
+
+ /// Returns the rightmost leaf edge in or underneath a node - in other words, the edge
+ /// you need last when navigating forward (or first when navigating backward).
+ #[inline]
+ pub fn last_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
+ let mut node = self;
+ loop {
+ match node.force() {
+ Leaf(leaf) => return leaf.last_edge(),
+ Internal(internal) => node = internal.last_edge().descend(),
+ }
+ }
+ }
+}
+
+pub enum Position<BorrowType, K, V> {
+ Leaf(NodeRef<BorrowType, K, V, marker::Leaf>),
+ Internal(NodeRef<BorrowType, K, V, marker::Internal>),
+ InternalKV(Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::KV>),
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> {
+ /// Visits leaf nodes and internal KVs in order of ascending keys, and also
+ /// visits internal nodes as a whole in a depth first order, meaning that
+ /// internal nodes precede their individual KVs and their child nodes.
+ pub fn visit_nodes_in_order<F>(self, mut visit: F)
+ where
+ F: FnMut(Position<marker::Immut<'a>, K, V>),
+ {
+ match self.force() {
+ Leaf(leaf) => visit(Position::Leaf(leaf)),
+ Internal(internal) => {
+ visit(Position::Internal(internal));
+ let mut edge = internal.first_edge();
+ loop {
+ edge = match edge.descend().force() {
+ Leaf(leaf) => {
+ visit(Position::Leaf(leaf));
+ match edge.next_kv() {
+ Ok(kv) => {
+ visit(Position::InternalKV(kv));
+ kv.right_edge()
+ }
+ Err(_) => return,
+ }
+ }
+ Internal(internal) => {
+ visit(Position::Internal(internal));
+ internal.first_edge()
+ }
+ }
+ }
+ }
+ }
+ }
+
+ /// Calculates the number of elements in a (sub)tree.
+ pub fn calc_length(self) -> usize {
+ let mut result = 0;
+ self.visit_nodes_in_order(|pos| match pos {
+ Position::Leaf(node) => result += node.len(),
+ Position::Internal(node) => result += node.len(),
+ Position::InternalKV(_) => (),
+ });
+ result
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V>
+ Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV>
+{
+ /// Returns the leaf edge closest to a KV for forward navigation.
+ pub fn next_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
+ match self.force() {
+ Leaf(leaf_kv) => leaf_kv.right_edge(),
+ Internal(internal_kv) => {
+ let next_internal_edge = internal_kv.right_edge();
+ next_internal_edge.descend().first_leaf_edge()
+ }
+ }
+ }
+
+ /// Returns the leaf edge closest to a KV for backward navigation.
+ fn next_back_leaf_edge(self) -> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
+ match self.force() {
+ Leaf(leaf_kv) => leaf_kv.left_edge(),
+ Internal(internal_kv) => {
+ let next_internal_edge = internal_kv.left_edge();
+ next_internal_edge.descend().last_leaf_edge()
+ }
+ }
+ }
+}
diff --git a/library/alloc/src/collections/btree/node.rs b/library/alloc/src/collections/btree/node.rs
new file mode 100644
index 000000000..d831161bc
--- /dev/null
+++ b/library/alloc/src/collections/btree/node.rs
@@ -0,0 +1,1753 @@
+// This is an attempt at an implementation following the ideal
+//
+// ```
+// struct BTreeMap<K, V> {
+// height: usize,
+// root: Option<Box<Node<K, V, height>>>
+// }
+//
+// struct Node<K, V, height: usize> {
+// keys: [K; 2 * B - 1],
+// vals: [V; 2 * B - 1],
+// edges: [if height > 0 { Box<Node<K, V, height - 1>> } else { () }; 2 * B],
+// parent: Option<(NonNull<Node<K, V, height + 1>>, u16)>,
+// len: u16,
+// }
+// ```
+//
+// Since Rust doesn't actually have dependent types and polymorphic recursion,
+// we make do with lots of unsafety.
+
+// A major goal of this module is to avoid complexity by treating the tree as a generic (if
+// weirdly shaped) container and avoiding dealing with most of the B-Tree invariants. As such,
+// this module doesn't care whether the entries are sorted, which nodes can be underfull, or
+// even what underfull means. However, we do rely on a few invariants:
+//
+// - Trees must have uniform depth/height. This means that every path down to a leaf from a
+// given node has exactly the same length.
+// - A node of length `n` has `n` keys, `n` values, and `n + 1` edges.
+// This implies that even an empty node has at least one edge.
+// For a leaf node, "having an edge" only means we can identify a position in the node,
+// since leaf edges are empty and need no data representation. In an internal node,
+// an edge both identifies a position and contains a pointer to a child node.
+
+use core::marker::PhantomData;
+use core::mem::{self, MaybeUninit};
+use core::ptr::{self, NonNull};
+use core::slice::SliceIndex;
+
+use crate::alloc::{Allocator, Layout};
+use crate::boxed::Box;
+
+const B: usize = 6;
+pub const CAPACITY: usize = 2 * B - 1;
+pub const MIN_LEN_AFTER_SPLIT: usize = B - 1;
+const KV_IDX_CENTER: usize = B - 1;
+const EDGE_IDX_LEFT_OF_CENTER: usize = B - 1;
+const EDGE_IDX_RIGHT_OF_CENTER: usize = B;
+
+/// The underlying representation of leaf nodes and part of the representation of internal nodes.
+struct LeafNode<K, V> {
+ /// We want to be covariant in `K` and `V`.
+ parent: Option<NonNull<InternalNode<K, V>>>,
+
+ /// This node's index into the parent node's `edges` array.
+ /// `*node.parent.edges[node.parent_idx]` should be the same thing as `node`.
+ /// This is only guaranteed to be initialized when `parent` is non-null.
+ parent_idx: MaybeUninit<u16>,
+
+ /// The number of keys and values this node stores.
+ len: u16,
+
+ /// The arrays storing the actual data of the node. Only the first `len` elements of each
+ /// array are initialized and valid.
+ keys: [MaybeUninit<K>; CAPACITY],
+ vals: [MaybeUninit<V>; CAPACITY],
+}
+
+impl<K, V> LeafNode<K, V> {
+ /// Initializes a new `LeafNode` in-place.
+ unsafe fn init(this: *mut Self) {
+ // As a general policy, we leave fields uninitialized if they can be, as this should
+ // be both slightly faster and easier to track in Valgrind.
+ unsafe {
+ // parent_idx, keys, and vals are all MaybeUninit
+ ptr::addr_of_mut!((*this).parent).write(None);
+ ptr::addr_of_mut!((*this).len).write(0);
+ }
+ }
+
+ /// Creates a new boxed `LeafNode`.
+ fn new<A: Allocator + Clone>(alloc: A) -> Box<Self, A> {
+ unsafe {
+ let mut leaf = Box::new_uninit_in(alloc);
+ LeafNode::init(leaf.as_mut_ptr());
+ leaf.assume_init()
+ }
+ }
+}
+
+/// The underlying representation of internal nodes. As with `LeafNode`s, these should be hidden
+/// behind `BoxedNode`s to prevent dropping uninitialized keys and values. Any pointer to an
+/// `InternalNode` can be directly cast to a pointer to the underlying `LeafNode` portion of the
+/// node, allowing code to act on leaf and internal nodes generically without having to even check
+/// which of the two a pointer is pointing at. This property is enabled by the use of `repr(C)`.
+#[repr(C)]
+// gdb_providers.py uses this type name for introspection.
+struct InternalNode<K, V> {
+ data: LeafNode<K, V>,
+
+ /// The pointers to the children of this node. `len + 1` of these are considered
+ /// initialized and valid, except that near the end, while the tree is held
+ /// through borrow type `Dying`, some of these pointers are dangling.
+ edges: [MaybeUninit<BoxedNode<K, V>>; 2 * B],
+}
+
+impl<K, V> InternalNode<K, V> {
+ /// Creates a new boxed `InternalNode`.
+ ///
+ /// # Safety
+ /// An invariant of internal nodes is that they have at least one
+ /// initialized and valid edge. This function does not set up
+ /// such an edge.
+ unsafe fn new<A: Allocator + Clone>(alloc: A) -> Box<Self, A> {
+ unsafe {
+ let mut node = Box::<Self, _>::new_uninit_in(alloc);
+ // We only need to initialize the data; the edges are MaybeUninit.
+ LeafNode::init(ptr::addr_of_mut!((*node.as_mut_ptr()).data));
+ node.assume_init()
+ }
+ }
+}
+
+/// A managed, non-null pointer to a node. This is either an owned pointer to
+/// `LeafNode<K, V>` or an owned pointer to `InternalNode<K, V>`.
+///
+/// However, `BoxedNode` contains no information as to which of the two types
+/// of nodes it actually contains, and, partially due to this lack of information,
+/// is not a separate type and has no destructor.
+type BoxedNode<K, V> = NonNull<LeafNode<K, V>>;
+
+// N.B. `NodeRef` is always covariant in `K` and `V`, even when the `BorrowType`
+// is `Mut`. This is technically wrong, but cannot result in any unsafety due to
+// internal use of `NodeRef` because we stay completely generic over `K` and `V`.
+// However, whenever a public type wraps `NodeRef`, make sure that it has the
+// correct variance.
+///
+/// A reference to a node.
+///
+/// This type has a number of parameters that controls how it acts:
+/// - `BorrowType`: A dummy type that describes the kind of borrow and carries a lifetime.
+/// - When this is `Immut<'a>`, the `NodeRef` acts roughly like `&'a Node`.
+/// - When this is `ValMut<'a>`, the `NodeRef` acts roughly like `&'a Node`
+/// with respect to keys and tree structure, but also allows many
+/// mutable references to values throughout the tree to coexist.
+/// - When this is `Mut<'a>`, the `NodeRef` acts roughly like `&'a mut Node`,
+/// although insert methods allow a mutable pointer to a value to coexist.
+/// - When this is `Owned`, the `NodeRef` acts roughly like `Box<Node>`,
+/// but does not have a destructor, and must be cleaned up manually.
+/// - When this is `Dying`, the `NodeRef` still acts roughly like `Box<Node>`,
+/// but has methods to destroy the tree bit by bit, and ordinary methods,
+/// while not marked as unsafe to call, can invoke UB if called incorrectly.
+/// Since any `NodeRef` allows navigating through the tree, `BorrowType`
+/// effectively applies to the entire tree, not just to the node itself.
+/// - `K` and `V`: These are the types of keys and values stored in the nodes.
+/// - `Type`: This can be `Leaf`, `Internal`, or `LeafOrInternal`. When this is
+/// `Leaf`, the `NodeRef` points to a leaf node, when this is `Internal` the
+/// `NodeRef` points to an internal node, and when this is `LeafOrInternal` the
+/// `NodeRef` could be pointing to either type of node.
+/// `Type` is named `NodeType` when used outside `NodeRef`.
+///
+/// Both `BorrowType` and `NodeType` restrict what methods we implement, to
+/// exploit static type safety. There are limitations in the way we can apply
+/// such restrictions:
+/// - For each type parameter, we can only define a method either generically
+/// or for one particular type. For example, we cannot define a method like
+/// `into_kv` generically for all `BorrowType`, or once for all types that
+/// carry a lifetime, because we want it to return `&'a` references.
+/// Therefore, we define it only for the least powerful type `Immut<'a>`.
+/// - We cannot get implicit coercion from say `Mut<'a>` to `Immut<'a>`.
+/// Therefore, we have to explicitly call `reborrow` on a more powerful
+/// `NodeRef` in order to reach a method like `into_kv`.
+///
+/// All methods on `NodeRef` that return some kind of reference, either:
+/// - Take `self` by value, and return the lifetime carried by `BorrowType`.
+/// Sometimes, to invoke such a method, we need to call `reborrow_mut`.
+/// - Take `self` by reference, and (implicitly) return that reference's
+/// lifetime, instead of the lifetime carried by `BorrowType`. That way,
+/// the borrow checker guarantees that the `NodeRef` remains borrowed as long
+/// as the returned reference is used.
+/// The methods supporting insert bend this rule by returning a raw pointer,
+/// i.e., a reference without any lifetime.
+pub struct NodeRef<BorrowType, K, V, Type> {
+ /// The number of levels that the node and the level of leaves are apart, a
+ /// constant of the node that cannot be entirely described by `Type`, and that
+ /// the node itself does not store. We only need to store the height of the root
+ /// node, and derive every other node's height from it.
+ /// Must be zero if `Type` is `Leaf` and non-zero if `Type` is `Internal`.
+ height: usize,
+ /// The pointer to the leaf or internal node. The definition of `InternalNode`
+ /// ensures that the pointer is valid either way.
+ node: NonNull<LeafNode<K, V>>,
+ _marker: PhantomData<(BorrowType, Type)>,
+}
+
+/// The root node of an owned tree.
+///
+/// Note that this does not have a destructor, and must be cleaned up manually.
+pub type Root<K, V> = NodeRef<marker::Owned, K, V, marker::LeafOrInternal>;
+
+impl<'a, K: 'a, V: 'a, Type> Copy for NodeRef<marker::Immut<'a>, K, V, Type> {}
+impl<'a, K: 'a, V: 'a, Type> Clone for NodeRef<marker::Immut<'a>, K, V, Type> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+unsafe impl<BorrowType, K: Sync, V: Sync, Type> Sync for NodeRef<BorrowType, K, V, Type> {}
+
+unsafe impl<'a, K: Sync + 'a, V: Sync + 'a, Type> Send for NodeRef<marker::Immut<'a>, K, V, Type> {}
+unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::Mut<'a>, K, V, Type> {}
+unsafe impl<'a, K: Send + 'a, V: Send + 'a, Type> Send for NodeRef<marker::ValMut<'a>, K, V, Type> {}
+unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Owned, K, V, Type> {}
+unsafe impl<K: Send, V: Send, Type> Send for NodeRef<marker::Dying, K, V, Type> {}
+
+impl<K, V> NodeRef<marker::Owned, K, V, marker::Leaf> {
+ pub fn new_leaf<A: Allocator + Clone>(alloc: A) -> Self {
+ Self::from_new_leaf(LeafNode::new(alloc))
+ }
+
+ fn from_new_leaf<A: Allocator + Clone>(leaf: Box<LeafNode<K, V>, A>) -> Self {
+ NodeRef { height: 0, node: NonNull::from(Box::leak(leaf)), _marker: PhantomData }
+ }
+}
+
+impl<K, V> NodeRef<marker::Owned, K, V, marker::Internal> {
+ fn new_internal<A: Allocator + Clone>(child: Root<K, V>, alloc: A) -> Self {
+ let mut new_node = unsafe { InternalNode::new(alloc) };
+ new_node.edges[0].write(child.node);
+ unsafe { NodeRef::from_new_internal(new_node, child.height + 1) }
+ }
+
+ /// # Safety
+ /// `height` must not be zero.
+ unsafe fn from_new_internal<A: Allocator + Clone>(
+ internal: Box<InternalNode<K, V>, A>,
+ height: usize,
+ ) -> Self {
+ debug_assert!(height > 0);
+ let node = NonNull::from(Box::leak(internal)).cast();
+ let mut this = NodeRef { height, node, _marker: PhantomData };
+ this.borrow_mut().correct_all_childrens_parent_links();
+ this
+ }
+}
+
+impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> {
+ /// Unpack a node reference that was packed as `NodeRef::parent`.
+ fn from_internal(node: NonNull<InternalNode<K, V>>, height: usize) -> Self {
+ debug_assert!(height > 0);
+ NodeRef { height, node: node.cast(), _marker: PhantomData }
+ }
+}
+
+impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> {
+ /// Exposes the data of an internal node.
+ ///
+ /// Returns a raw ptr to avoid invalidating other references to this node.
+ fn as_internal_ptr(this: &Self) -> *mut InternalNode<K, V> {
+ // SAFETY: the static node type is `Internal`.
+ this.node.as_ptr() as *mut InternalNode<K, V>
+ }
+}
+
+impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
+ /// Borrows exclusive access to the data of an internal node.
+ fn as_internal_mut(&mut self) -> &mut InternalNode<K, V> {
+ let ptr = Self::as_internal_ptr(self);
+ unsafe { &mut *ptr }
+ }
+}
+
+impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
+ /// Finds the length of the node. This is the number of keys or values.
+ /// The number of edges is `len() + 1`.
+ /// Note that, despite being safe, calling this function can have the side effect
+ /// of invalidating mutable references that unsafe code has created.
+ pub fn len(&self) -> usize {
+ // Crucially, we only access the `len` field here. If BorrowType is marker::ValMut,
+ // there might be outstanding mutable references to values that we must not invalidate.
+ unsafe { usize::from((*Self::as_leaf_ptr(self)).len) }
+ }
+
+ /// Returns the number of levels that the node and leaves are apart. Zero
+ /// height means the node is a leaf itself. If you picture trees with the
+ /// root on top, the number says at which elevation the node appears.
+ /// If you picture trees with leaves on top, the number says how high
+ /// the tree extends above the node.
+ pub fn height(&self) -> usize {
+ self.height
+ }
+
+ /// Temporarily takes out another, immutable reference to the same node.
+ pub fn reborrow(&self) -> NodeRef<marker::Immut<'_>, K, V, Type> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+
+ /// Exposes the leaf portion of any leaf or internal node.
+ ///
+ /// Returns a raw ptr to avoid invalidating other references to this node.
+ fn as_leaf_ptr(this: &Self) -> *mut LeafNode<K, V> {
+ // The node must be valid for at least the LeafNode portion.
+ // This is not a reference in the NodeRef type because we don't know if
+ // it should be unique or shared.
+ this.node.as_ptr()
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
+ /// Finds the parent of the current node. Returns `Ok(handle)` if the current
+ /// node actually has a parent, where `handle` points to the edge of the parent
+ /// that points to the current node. Returns `Err(self)` if the current node has
+ /// no parent, giving back the original `NodeRef`.
+ ///
+ /// The method name assumes you picture trees with the root node on top.
+ ///
+ /// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
+ /// both, upon success, do nothing.
+ pub fn ascend(
+ self,
+ ) -> Result<Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>, Self> {
+ assert!(BorrowType::PERMITS_TRAVERSAL);
+ // We need to use raw pointers to nodes because, if BorrowType is marker::ValMut,
+ // there might be outstanding mutable references to values that we must not invalidate.
+ let leaf_ptr: *const _ = Self::as_leaf_ptr(&self);
+ unsafe { (*leaf_ptr).parent }
+ .as_ref()
+ .map(|parent| Handle {
+ node: NodeRef::from_internal(*parent, self.height + 1),
+ idx: unsafe { usize::from((*leaf_ptr).parent_idx.assume_init()) },
+ _marker: PhantomData,
+ })
+ .ok_or(self)
+ }
+
+ pub fn first_edge(self) -> Handle<Self, marker::Edge> {
+ unsafe { Handle::new_edge(self, 0) }
+ }
+
+ pub fn last_edge(self) -> Handle<Self, marker::Edge> {
+ let len = self.len();
+ unsafe { Handle::new_edge(self, len) }
+ }
+
+ /// Note that `self` must be nonempty.
+ pub fn first_kv(self) -> Handle<Self, marker::KV> {
+ let len = self.len();
+ assert!(len > 0);
+ unsafe { Handle::new_kv(self, 0) }
+ }
+
+ /// Note that `self` must be nonempty.
+ pub fn last_kv(self) -> Handle<Self, marker::KV> {
+ let len = self.len();
+ assert!(len > 0);
+ unsafe { Handle::new_kv(self, len - 1) }
+ }
+}
+
+impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
+ /// Could be a public implementation of PartialEq, but only used in this module.
+ fn eq(&self, other: &Self) -> bool {
+ let Self { node, height, _marker } = self;
+ if node.eq(&other.node) {
+ debug_assert_eq!(*height, other.height);
+ true
+ } else {
+ false
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Immut<'a>, K, V, Type> {
+ /// Exposes the leaf portion of any leaf or internal node in an immutable tree.
+ fn into_leaf(self) -> &'a LeafNode<K, V> {
+ let ptr = Self::as_leaf_ptr(&self);
+ // SAFETY: there can be no mutable references into this tree borrowed as `Immut`.
+ unsafe { &*ptr }
+ }
+
+ /// Borrows a view into the keys stored in the node.
+ pub fn keys(&self) -> &[K] {
+ let leaf = self.into_leaf();
+ unsafe {
+ MaybeUninit::slice_assume_init_ref(leaf.keys.get_unchecked(..usize::from(leaf.len)))
+ }
+ }
+}
+
+impl<K, V> NodeRef<marker::Dying, K, V, marker::LeafOrInternal> {
+ /// Similar to `ascend`, gets a reference to a node's parent node, but also
+ /// deallocates the current node in the process. This is unsafe because the
+ /// current node will still be accessible despite being deallocated.
+ pub unsafe fn deallocate_and_ascend<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> Option<Handle<NodeRef<marker::Dying, K, V, marker::Internal>, marker::Edge>> {
+ let height = self.height;
+ let node = self.node;
+ let ret = self.ascend().ok();
+ unsafe {
+ alloc.deallocate(
+ node.cast(),
+ if height > 0 {
+ Layout::new::<InternalNode<K, V>>()
+ } else {
+ Layout::new::<LeafNode<K, V>>()
+ },
+ );
+ }
+ ret
+ }
+}
+
+impl<'a, K, V, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
+ /// Temporarily takes out another mutable reference to the same node. Beware, as
+ /// this method is very dangerous, doubly so since it might not immediately appear
+ /// dangerous.
+ ///
+ /// Because mutable pointers can roam anywhere around the tree, the returned
+ /// pointer can easily be used to make the original pointer dangling, out of
+ /// bounds, or invalid under stacked borrow rules.
+ // FIXME(@gereeter) consider adding yet another type parameter to `NodeRef`
+ // that restricts the use of navigation methods on reborrowed pointers,
+ // preventing this unsafety.
+ unsafe fn reborrow_mut(&mut self) -> NodeRef<marker::Mut<'_>, K, V, Type> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+
+ /// Borrows exclusive access to the leaf portion of a leaf or internal node.
+ fn as_leaf_mut(&mut self) -> &mut LeafNode<K, V> {
+ let ptr = Self::as_leaf_ptr(self);
+ // SAFETY: we have exclusive access to the entire node.
+ unsafe { &mut *ptr }
+ }
+
+ /// Offers exclusive access to the leaf portion of a leaf or internal node.
+ fn into_leaf_mut(mut self) -> &'a mut LeafNode<K, V> {
+ let ptr = Self::as_leaf_ptr(&mut self);
+ // SAFETY: we have exclusive access to the entire node.
+ unsafe { &mut *ptr }
+ }
+}
+
+impl<K, V, Type> NodeRef<marker::Dying, K, V, Type> {
+ /// Borrows exclusive access to the leaf portion of a dying leaf or internal node.
+ fn as_leaf_dying(&mut self) -> &mut LeafNode<K, V> {
+ let ptr = Self::as_leaf_ptr(self);
+ // SAFETY: we have exclusive access to the entire node.
+ unsafe { &mut *ptr }
+ }
+}
+
+impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
+ /// Borrows exclusive access to an element of the key storage area.
+ ///
+ /// # Safety
+ /// `index` is in bounds of 0..CAPACITY
+ unsafe fn key_area_mut<I, Output: ?Sized>(&mut self, index: I) -> &mut Output
+ where
+ I: SliceIndex<[MaybeUninit<K>], Output = Output>,
+ {
+ // SAFETY: the caller will not be able to call further methods on self
+ // until the key slice reference is dropped, as we have unique access
+ // for the lifetime of the borrow.
+ unsafe { self.as_leaf_mut().keys.as_mut_slice().get_unchecked_mut(index) }
+ }
+
+ /// Borrows exclusive access to an element or slice of the node's value storage area.
+ ///
+ /// # Safety
+ /// `index` is in bounds of 0..CAPACITY
+ unsafe fn val_area_mut<I, Output: ?Sized>(&mut self, index: I) -> &mut Output
+ where
+ I: SliceIndex<[MaybeUninit<V>], Output = Output>,
+ {
+ // SAFETY: the caller will not be able to call further methods on self
+ // until the value slice reference is dropped, as we have unique access
+ // for the lifetime of the borrow.
+ unsafe { self.as_leaf_mut().vals.as_mut_slice().get_unchecked_mut(index) }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
+ /// Borrows exclusive access to an element or slice of the node's storage area for edge contents.
+ ///
+ /// # Safety
+ /// `index` is in bounds of 0..CAPACITY + 1
+ unsafe fn edge_area_mut<I, Output: ?Sized>(&mut self, index: I) -> &mut Output
+ where
+ I: SliceIndex<[MaybeUninit<BoxedNode<K, V>>], Output = Output>,
+ {
+ // SAFETY: the caller will not be able to call further methods on self
+ // until the edge slice reference is dropped, as we have unique access
+ // for the lifetime of the borrow.
+ unsafe { self.as_internal_mut().edges.as_mut_slice().get_unchecked_mut(index) }
+ }
+}
+
+impl<'a, K, V, Type> NodeRef<marker::ValMut<'a>, K, V, Type> {
+ /// # Safety
+ /// - The node has more than `idx` initialized elements.
+ unsafe fn into_key_val_mut_at(mut self, idx: usize) -> (&'a K, &'a mut V) {
+ // We only create a reference to the one element we are interested in,
+ // to avoid aliasing with outstanding references to other elements,
+ // in particular, those returned to the caller in earlier iterations.
+ let leaf = Self::as_leaf_ptr(&mut self);
+ let keys = unsafe { ptr::addr_of!((*leaf).keys) };
+ let vals = unsafe { ptr::addr_of_mut!((*leaf).vals) };
+ // We must coerce to unsized array pointers because of Rust issue #74679.
+ let keys: *const [_] = keys;
+ let vals: *mut [_] = vals;
+ let key = unsafe { (&*keys.get_unchecked(idx)).assume_init_ref() };
+ let val = unsafe { (&mut *vals.get_unchecked_mut(idx)).assume_init_mut() };
+ (key, val)
+ }
+}
+
+impl<'a, K: 'a, V: 'a, Type> NodeRef<marker::Mut<'a>, K, V, Type> {
+ /// Borrows exclusive access to the length of the node.
+ pub fn len_mut(&mut self) -> &mut u16 {
+ &mut self.as_leaf_mut().len
+ }
+}
+
+impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
+ /// # Safety
+ /// Every item returned by `range` is a valid edge index for the node.
+ unsafe fn correct_childrens_parent_links<R: Iterator<Item = usize>>(&mut self, range: R) {
+ for i in range {
+ debug_assert!(i <= self.len());
+ unsafe { Handle::new_edge(self.reborrow_mut(), i) }.correct_parent_link();
+ }
+ }
+
+ fn correct_all_childrens_parent_links(&mut self) {
+ let len = self.len();
+ unsafe { self.correct_childrens_parent_links(0..=len) };
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ /// Sets the node's link to its parent edge,
+ /// without invalidating other references to the node.
+ fn set_parent_link(&mut self, parent: NonNull<InternalNode<K, V>>, parent_idx: usize) {
+ let leaf = Self::as_leaf_ptr(self);
+ unsafe { (*leaf).parent = Some(parent) };
+ unsafe { (*leaf).parent_idx.write(parent_idx as u16) };
+ }
+}
+
+impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> {
+ /// Clears the root's link to its parent edge.
+ fn clear_parent_link(&mut self) {
+ let mut root_node = self.borrow_mut();
+ let leaf = root_node.as_leaf_mut();
+ leaf.parent = None;
+ }
+}
+
+impl<K, V> NodeRef<marker::Owned, K, V, marker::LeafOrInternal> {
+ /// Returns a new owned tree, with its own root node that is initially empty.
+ pub fn new<A: Allocator + Clone>(alloc: A) -> Self {
+ NodeRef::new_leaf(alloc).forget_type()
+ }
+
+ /// Adds a new internal node with a single edge pointing to the previous root node,
+ /// make that new node the root node, and return it. This increases the height by 1
+ /// and is the opposite of `pop_internal_level`.
+ pub fn push_internal_level<A: Allocator + Clone>(
+ &mut self,
+ alloc: A,
+ ) -> NodeRef<marker::Mut<'_>, K, V, marker::Internal> {
+ super::mem::take_mut(self, |old_root| NodeRef::new_internal(old_root, alloc).forget_type());
+
+ // `self.borrow_mut()`, except that we just forgot we're internal now:
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+
+ /// Removes the internal root node, using its first child as the new root node.
+ /// As it is intended only to be called when the root node has only one child,
+ /// no cleanup is done on any of the keys, values and other children.
+ /// This decreases the height by 1 and is the opposite of `push_internal_level`.
+ ///
+ /// Requires exclusive access to the `NodeRef` object but not to the root node;
+ /// it will not invalidate other handles or references to the root node.
+ ///
+ /// Panics if there is no internal level, i.e., if the root node is a leaf.
+ pub fn pop_internal_level<A: Allocator + Clone>(&mut self, alloc: A) {
+ assert!(self.height > 0);
+
+ let top = self.node;
+
+ // SAFETY: we asserted to be internal.
+ let internal_self = unsafe { self.borrow_mut().cast_to_internal_unchecked() };
+ // SAFETY: we borrowed `self` exclusively and its borrow type is exclusive.
+ let internal_node = unsafe { &mut *NodeRef::as_internal_ptr(&internal_self) };
+ // SAFETY: the first edge is always initialized.
+ self.node = unsafe { internal_node.edges[0].assume_init_read() };
+ self.height -= 1;
+ self.clear_parent_link();
+
+ unsafe {
+ alloc.deallocate(top.cast(), Layout::new::<InternalNode<K, V>>());
+ }
+ }
+}
+
+impl<K, V, Type> NodeRef<marker::Owned, K, V, Type> {
+ /// Mutably borrows the owned root node. Unlike `reborrow_mut`, this is safe
+ /// because the return value cannot be used to destroy the root, and there
+ /// cannot be other references to the tree.
+ pub fn borrow_mut(&mut self) -> NodeRef<marker::Mut<'_>, K, V, Type> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+
+ /// Slightly mutably borrows the owned root node.
+ pub fn borrow_valmut(&mut self) -> NodeRef<marker::ValMut<'_>, K, V, Type> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+
+ /// Irreversibly transitions to a reference that permits traversal and offers
+ /// destructive methods and little else.
+ pub fn into_dying(self) -> NodeRef<marker::Dying, K, V, Type> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
+ /// Adds a key-value pair to the end of the node, and returns
+ /// the mutable reference of the inserted value.
+ pub fn push(&mut self, key: K, val: V) -> &mut V {
+ let len = self.len_mut();
+ let idx = usize::from(*len);
+ assert!(idx < CAPACITY);
+ *len += 1;
+ unsafe {
+ self.key_area_mut(idx).write(key);
+ self.val_area_mut(idx).write(val)
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
+ /// Adds a key-value pair, and an edge to go to the right of that pair,
+ /// to the end of the node.
+ pub fn push(&mut self, key: K, val: V, edge: Root<K, V>) {
+ assert!(edge.height == self.height - 1);
+
+ let len = self.len_mut();
+ let idx = usize::from(*len);
+ assert!(idx < CAPACITY);
+ *len += 1;
+ unsafe {
+ self.key_area_mut(idx).write(key);
+ self.val_area_mut(idx).write(val);
+ self.edge_area_mut(idx + 1).write(edge.node);
+ Handle::new_edge(self.reborrow_mut(), idx + 1).correct_parent_link();
+ }
+ }
+}
+
+impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Leaf> {
+ /// Removes any static information asserting that this node is a `Leaf` node.
+ pub fn forget_type(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+}
+
+impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::Internal> {
+ /// Removes any static information asserting that this node is an `Internal` node.
+ pub fn forget_type(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+}
+
+impl<BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ /// Checks whether a node is an `Internal` node or a `Leaf` node.
+ pub fn force(
+ self,
+ ) -> ForceResult<
+ NodeRef<BorrowType, K, V, marker::Leaf>,
+ NodeRef<BorrowType, K, V, marker::Internal>,
+ > {
+ if self.height == 0 {
+ ForceResult::Leaf(NodeRef {
+ height: self.height,
+ node: self.node,
+ _marker: PhantomData,
+ })
+ } else {
+ ForceResult::Internal(NodeRef {
+ height: self.height,
+ node: self.node,
+ _marker: PhantomData,
+ })
+ }
+ }
+}
+
+impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ /// Unsafely asserts to the compiler the static information that this node is a `Leaf`.
+ unsafe fn cast_to_leaf_unchecked(self) -> NodeRef<marker::Mut<'a>, K, V, marker::Leaf> {
+ debug_assert!(self.height == 0);
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+
+ /// Unsafely asserts to the compiler the static information that this node is an `Internal`.
+ unsafe fn cast_to_internal_unchecked(self) -> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
+ debug_assert!(self.height > 0);
+ NodeRef { height: self.height, node: self.node, _marker: PhantomData }
+ }
+}
+
+/// A reference to a specific key-value pair or edge within a node. The `Node` parameter
+/// must be a `NodeRef`, while the `Type` can either be `KV` (signifying a handle on a key-value
+/// pair) or `Edge` (signifying a handle on an edge).
+///
+/// Note that even `Leaf` nodes can have `Edge` handles. Instead of representing a pointer to
+/// a child node, these represent the spaces where child pointers would go between the key-value
+/// pairs. For example, in a node with length 2, there would be 3 possible edge locations - one
+/// to the left of the node, one between the two pairs, and one at the right of the node.
+pub struct Handle<Node, Type> {
+ node: Node,
+ idx: usize,
+ _marker: PhantomData<Type>,
+}
+
+impl<Node: Copy, Type> Copy for Handle<Node, Type> {}
+// We don't need the full generality of `#[derive(Clone)]`, as the only time `Node` will be
+// `Clone`able is when it is an immutable reference and therefore `Copy`.
+impl<Node: Copy, Type> Clone for Handle<Node, Type> {
+ fn clone(&self) -> Self {
+ *self
+ }
+}
+
+impl<Node, Type> Handle<Node, Type> {
+ /// Retrieves the node that contains the edge or key-value pair this handle points to.
+ pub fn into_node(self) -> Node {
+ self.node
+ }
+
+ /// Returns the position of this handle in the node.
+ pub fn idx(&self) -> usize {
+ self.idx
+ }
+}
+
+impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV> {
+ /// Creates a new handle to a key-value pair in `node`.
+ /// Unsafe because the caller must ensure that `idx < node.len()`.
+ pub unsafe fn new_kv(node: NodeRef<BorrowType, K, V, NodeType>, idx: usize) -> Self {
+ debug_assert!(idx < node.len());
+
+ Handle { node, idx, _marker: PhantomData }
+ }
+
+ pub fn left_edge(self) -> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
+ unsafe { Handle::new_edge(self.node, self.idx) }
+ }
+
+ pub fn right_edge(self) -> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
+ unsafe { Handle::new_edge(self.node, self.idx + 1) }
+ }
+}
+
+impl<BorrowType, K, V, NodeType, HandleType> PartialEq
+ for Handle<NodeRef<BorrowType, K, V, NodeType>, HandleType>
+{
+ fn eq(&self, other: &Self) -> bool {
+ let Self { node, idx, _marker } = self;
+ node.eq(&other.node) && *idx == other.idx
+ }
+}
+
+impl<BorrowType, K, V, NodeType, HandleType>
+ Handle<NodeRef<BorrowType, K, V, NodeType>, HandleType>
+{
+ /// Temporarily takes out another immutable handle on the same location.
+ pub fn reborrow(&self) -> Handle<NodeRef<marker::Immut<'_>, K, V, NodeType>, HandleType> {
+ // We can't use Handle::new_kv or Handle::new_edge because we don't know our type
+ Handle { node: self.node.reborrow(), idx: self.idx, _marker: PhantomData }
+ }
+}
+
+impl<'a, K, V, NodeType, HandleType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, HandleType> {
+ /// Temporarily takes out another mutable handle on the same location. Beware, as
+ /// this method is very dangerous, doubly so since it might not immediately appear
+ /// dangerous.
+ ///
+ /// For details, see `NodeRef::reborrow_mut`.
+ pub unsafe fn reborrow_mut(
+ &mut self,
+ ) -> Handle<NodeRef<marker::Mut<'_>, K, V, NodeType>, HandleType> {
+ // We can't use Handle::new_kv or Handle::new_edge because we don't know our type
+ Handle { node: unsafe { self.node.reborrow_mut() }, idx: self.idx, _marker: PhantomData }
+ }
+}
+
+impl<BorrowType, K, V, NodeType> Handle<NodeRef<BorrowType, K, V, NodeType>, marker::Edge> {
+ /// Creates a new handle to an edge in `node`.
+ /// Unsafe because the caller must ensure that `idx <= node.len()`.
+ pub unsafe fn new_edge(node: NodeRef<BorrowType, K, V, NodeType>, idx: usize) -> Self {
+ debug_assert!(idx <= node.len());
+
+ Handle { node, idx, _marker: PhantomData }
+ }
+
+ pub fn left_kv(self) -> Result<Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV>, Self> {
+ if self.idx > 0 {
+ Ok(unsafe { Handle::new_kv(self.node, self.idx - 1) })
+ } else {
+ Err(self)
+ }
+ }
+
+ pub fn right_kv(self) -> Result<Handle<NodeRef<BorrowType, K, V, NodeType>, marker::KV>, Self> {
+ if self.idx < self.node.len() {
+ Ok(unsafe { Handle::new_kv(self.node, self.idx) })
+ } else {
+ Err(self)
+ }
+ }
+}
+
+pub enum LeftOrRight<T> {
+ Left(T),
+ Right(T),
+}
+
+/// Given an edge index where we want to insert into a node filled to capacity,
+/// computes a sensible KV index of a split point and where to perform the insertion.
+/// The goal of the split point is for its key and value to end up in a parent node;
+/// the keys, values and edges to the left of the split point become the left child;
+/// the keys, values and edges to the right of the split point become the right child.
+fn splitpoint(edge_idx: usize) -> (usize, LeftOrRight<usize>) {
+ debug_assert!(edge_idx <= CAPACITY);
+ // Rust issue #74834 tries to explain these symmetric rules.
+ match edge_idx {
+ 0..EDGE_IDX_LEFT_OF_CENTER => (KV_IDX_CENTER - 1, LeftOrRight::Left(edge_idx)),
+ EDGE_IDX_LEFT_OF_CENTER => (KV_IDX_CENTER, LeftOrRight::Left(edge_idx)),
+ EDGE_IDX_RIGHT_OF_CENTER => (KV_IDX_CENTER, LeftOrRight::Right(0)),
+ _ => (KV_IDX_CENTER + 1, LeftOrRight::Right(edge_idx - (KV_IDX_CENTER + 1 + 1))),
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
+ /// Inserts a new key-value pair between the key-value pairs to the right and left of
+ /// this edge. This method assumes that there is enough space in the node for the new
+ /// pair to fit.
+ ///
+ /// The returned pointer points to the inserted value.
+ fn insert_fit(&mut self, key: K, val: V) -> *mut V {
+ debug_assert!(self.node.len() < CAPACITY);
+ let new_len = self.node.len() + 1;
+
+ unsafe {
+ slice_insert(self.node.key_area_mut(..new_len), self.idx, key);
+ slice_insert(self.node.val_area_mut(..new_len), self.idx, val);
+ *self.node.len_mut() = new_len as u16;
+
+ self.node.val_area_mut(self.idx).assume_init_mut()
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
+ /// Inserts a new key-value pair between the key-value pairs to the right and left of
+ /// this edge. This method splits the node if there isn't enough room.
+ ///
+ /// The returned pointer points to the inserted value.
+ fn insert<A: Allocator + Clone>(
+ mut self,
+ key: K,
+ val: V,
+ alloc: A,
+ ) -> (Option<SplitResult<'a, K, V, marker::Leaf>>, *mut V) {
+ if self.node.len() < CAPACITY {
+ let val_ptr = self.insert_fit(key, val);
+ (None, val_ptr)
+ } else {
+ let (middle_kv_idx, insertion) = splitpoint(self.idx);
+ let middle = unsafe { Handle::new_kv(self.node, middle_kv_idx) };
+ let mut result = middle.split(alloc);
+ let mut insertion_edge = match insertion {
+ LeftOrRight::Left(insert_idx) => unsafe {
+ Handle::new_edge(result.left.reborrow_mut(), insert_idx)
+ },
+ LeftOrRight::Right(insert_idx) => unsafe {
+ Handle::new_edge(result.right.borrow_mut(), insert_idx)
+ },
+ };
+ let val_ptr = insertion_edge.insert_fit(key, val);
+ (Some(result), val_ptr)
+ }
+ }
+}
+
+impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::Edge> {
+ /// Fixes the parent pointer and index in the child node that this edge
+ /// links to. This is useful when the ordering of edges has been changed,
+ fn correct_parent_link(self) {
+ // Create backpointer without invalidating other references to the node.
+ let ptr = unsafe { NonNull::new_unchecked(NodeRef::as_internal_ptr(&self.node)) };
+ let idx = self.idx;
+ let mut child = self.descend();
+ child.set_parent_link(ptr, idx);
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::Edge> {
+ /// Inserts a new key-value pair and an edge that will go to the right of that new pair
+ /// between this edge and the key-value pair to the right of this edge. This method assumes
+ /// that there is enough space in the node for the new pair to fit.
+ fn insert_fit(&mut self, key: K, val: V, edge: Root<K, V>) {
+ debug_assert!(self.node.len() < CAPACITY);
+ debug_assert!(edge.height == self.node.height - 1);
+ let new_len = self.node.len() + 1;
+
+ unsafe {
+ slice_insert(self.node.key_area_mut(..new_len), self.idx, key);
+ slice_insert(self.node.val_area_mut(..new_len), self.idx, val);
+ slice_insert(self.node.edge_area_mut(..new_len + 1), self.idx + 1, edge.node);
+ *self.node.len_mut() = new_len as u16;
+
+ self.node.correct_childrens_parent_links(self.idx + 1..new_len + 1);
+ }
+ }
+
+ /// Inserts a new key-value pair and an edge that will go to the right of that new pair
+ /// between this edge and the key-value pair to the right of this edge. This method splits
+ /// the node if there isn't enough room.
+ fn insert<A: Allocator + Clone>(
+ mut self,
+ key: K,
+ val: V,
+ edge: Root<K, V>,
+ alloc: A,
+ ) -> Option<SplitResult<'a, K, V, marker::Internal>> {
+ assert!(edge.height == self.node.height - 1);
+
+ if self.node.len() < CAPACITY {
+ self.insert_fit(key, val, edge);
+ None
+ } else {
+ let (middle_kv_idx, insertion) = splitpoint(self.idx);
+ let middle = unsafe { Handle::new_kv(self.node, middle_kv_idx) };
+ let mut result = middle.split(alloc);
+ let mut insertion_edge = match insertion {
+ LeftOrRight::Left(insert_idx) => unsafe {
+ Handle::new_edge(result.left.reborrow_mut(), insert_idx)
+ },
+ LeftOrRight::Right(insert_idx) => unsafe {
+ Handle::new_edge(result.right.borrow_mut(), insert_idx)
+ },
+ };
+ insertion_edge.insert_fit(key, val, edge);
+ Some(result)
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge> {
+ /// Inserts a new key-value pair between the key-value pairs to the right and left of
+ /// this edge. This method splits the node if there isn't enough room, and tries to
+ /// insert the split off portion into the parent node recursively, until the root is reached.
+ ///
+ /// If the returned result is some `SplitResult`, the `left` field will be the root node.
+ /// The returned pointer points to the inserted value, which in the case of `SplitResult`
+ /// is in the `left` or `right` tree.
+ pub fn insert_recursing<A: Allocator + Clone>(
+ self,
+ key: K,
+ value: V,
+ alloc: A,
+ ) -> (Option<SplitResult<'a, K, V, marker::LeafOrInternal>>, *mut V) {
+ let (mut split, val_ptr) = match self.insert(key, value, alloc.clone()) {
+ (None, val_ptr) => return (None, val_ptr),
+ (Some(split), val_ptr) => (split.forget_node_type(), val_ptr),
+ };
+
+ loop {
+ split = match split.left.ascend() {
+ Ok(parent) => {
+ match parent.insert(split.kv.0, split.kv.1, split.right, alloc.clone()) {
+ None => return (None, val_ptr),
+ Some(split) => split.forget_node_type(),
+ }
+ }
+ Err(root) => return (Some(SplitResult { left: root, ..split }), val_ptr),
+ };
+ }
+ }
+}
+
+impl<BorrowType: marker::BorrowType, K, V>
+ Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge>
+{
+ /// Finds the node pointed to by this edge.
+ ///
+ /// The method name assumes you picture trees with the root node on top.
+ ///
+ /// `edge.descend().ascend().unwrap()` and `node.ascend().unwrap().descend()` should
+ /// both, upon success, do nothing.
+ pub fn descend(self) -> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ assert!(BorrowType::PERMITS_TRAVERSAL);
+ // We need to use raw pointers to nodes because, if BorrowType is
+ // marker::ValMut, there might be outstanding mutable references to
+ // values that we must not invalidate. There's no worry accessing the
+ // height field because that value is copied. Beware that, once the
+ // node pointer is dereferenced, we access the edges array with a
+ // reference (Rust issue #73987) and invalidate any other references
+ // to or inside the array, should any be around.
+ let parent_ptr = NodeRef::as_internal_ptr(&self.node);
+ let node = unsafe { (*parent_ptr).edges.get_unchecked(self.idx).assume_init_read() };
+ NodeRef { node, height: self.node.height - 1, _marker: PhantomData }
+ }
+}
+
+impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Immut<'a>, K, V, NodeType>, marker::KV> {
+ pub fn into_kv(self) -> (&'a K, &'a V) {
+ debug_assert!(self.idx < self.node.len());
+ let leaf = self.node.into_leaf();
+ let k = unsafe { leaf.keys.get_unchecked(self.idx).assume_init_ref() };
+ let v = unsafe { leaf.vals.get_unchecked(self.idx).assume_init_ref() };
+ (k, v)
+ }
+}
+
+impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
+ pub fn key_mut(&mut self) -> &mut K {
+ unsafe { self.node.key_area_mut(self.idx).assume_init_mut() }
+ }
+
+ pub fn into_val_mut(self) -> &'a mut V {
+ debug_assert!(self.idx < self.node.len());
+ let leaf = self.node.into_leaf_mut();
+ unsafe { leaf.vals.get_unchecked_mut(self.idx).assume_init_mut() }
+ }
+}
+
+impl<'a, K, V, NodeType> Handle<NodeRef<marker::ValMut<'a>, K, V, NodeType>, marker::KV> {
+ pub fn into_kv_valmut(self) -> (&'a K, &'a mut V) {
+ unsafe { self.node.into_key_val_mut_at(self.idx) }
+ }
+}
+
+impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
+ pub fn kv_mut(&mut self) -> (&mut K, &mut V) {
+ debug_assert!(self.idx < self.node.len());
+ // We cannot call separate key and value methods, because calling the second one
+ // invalidates the reference returned by the first.
+ unsafe {
+ let leaf = self.node.as_leaf_mut();
+ let key = leaf.keys.get_unchecked_mut(self.idx).assume_init_mut();
+ let val = leaf.vals.get_unchecked_mut(self.idx).assume_init_mut();
+ (key, val)
+ }
+ }
+
+ /// Replaces the key and value that the KV handle refers to.
+ pub fn replace_kv(&mut self, k: K, v: V) -> (K, V) {
+ let (key, val) = self.kv_mut();
+ (mem::replace(key, k), mem::replace(val, v))
+ }
+}
+
+impl<K, V, NodeType> Handle<NodeRef<marker::Dying, K, V, NodeType>, marker::KV> {
+ /// Extracts the key and value that the KV handle refers to.
+ /// # Safety
+ /// The node that the handle refers to must not yet have been deallocated.
+ pub unsafe fn into_key_val(mut self) -> (K, V) {
+ debug_assert!(self.idx < self.node.len());
+ let leaf = self.node.as_leaf_dying();
+ unsafe {
+ let key = leaf.keys.get_unchecked_mut(self.idx).assume_init_read();
+ let val = leaf.vals.get_unchecked_mut(self.idx).assume_init_read();
+ (key, val)
+ }
+ }
+
+ /// Drops the key and value that the KV handle refers to.
+ /// # Safety
+ /// The node that the handle refers to must not yet have been deallocated.
+ #[inline]
+ pub unsafe fn drop_key_val(mut self) {
+ debug_assert!(self.idx < self.node.len());
+ let leaf = self.node.as_leaf_dying();
+ unsafe {
+ leaf.keys.get_unchecked_mut(self.idx).assume_init_drop();
+ leaf.vals.get_unchecked_mut(self.idx).assume_init_drop();
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a, NodeType> Handle<NodeRef<marker::Mut<'a>, K, V, NodeType>, marker::KV> {
+ /// Helps implementations of `split` for a particular `NodeType`,
+ /// by taking care of leaf data.
+ fn split_leaf_data(&mut self, new_node: &mut LeafNode<K, V>) -> (K, V) {
+ debug_assert!(self.idx < self.node.len());
+ let old_len = self.node.len();
+ let new_len = old_len - self.idx - 1;
+ new_node.len = new_len as u16;
+ unsafe {
+ let k = self.node.key_area_mut(self.idx).assume_init_read();
+ let v = self.node.val_area_mut(self.idx).assume_init_read();
+
+ move_to_slice(
+ self.node.key_area_mut(self.idx + 1..old_len),
+ &mut new_node.keys[..new_len],
+ );
+ move_to_slice(
+ self.node.val_area_mut(self.idx + 1..old_len),
+ &mut new_node.vals[..new_len],
+ );
+
+ *self.node.len_mut() = self.idx as u16;
+ (k, v)
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> {
+ /// Splits the underlying node into three parts:
+ ///
+ /// - The node is truncated to only contain the key-value pairs to the left of
+ /// this handle.
+ /// - The key and value pointed to by this handle are extracted.
+ /// - All the key-value pairs to the right of this handle are put into a newly
+ /// allocated node.
+ pub fn split<A: Allocator + Clone>(mut self, alloc: A) -> SplitResult<'a, K, V, marker::Leaf> {
+ let mut new_node = LeafNode::new(alloc);
+
+ let kv = self.split_leaf_data(&mut new_node);
+
+ let right = NodeRef::from_new_leaf(new_node);
+ SplitResult { left: self.node, kv, right }
+ }
+
+ /// Removes the key-value pair pointed to by this handle and returns it, along with the edge
+ /// that the key-value pair collapsed into.
+ pub fn remove(
+ mut self,
+ ) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
+ let old_len = self.node.len();
+ unsafe {
+ let k = slice_remove(self.node.key_area_mut(..old_len), self.idx);
+ let v = slice_remove(self.node.val_area_mut(..old_len), self.idx);
+ *self.node.len_mut() = (old_len - 1) as u16;
+ ((k, v), self.left_edge())
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> {
+ /// Splits the underlying node into three parts:
+ ///
+ /// - The node is truncated to only contain the edges and key-value pairs to the
+ /// left of this handle.
+ /// - The key and value pointed to by this handle are extracted.
+ /// - All the edges and key-value pairs to the right of this handle are put into
+ /// a newly allocated node.
+ pub fn split<A: Allocator + Clone>(
+ mut self,
+ alloc: A,
+ ) -> SplitResult<'a, K, V, marker::Internal> {
+ let old_len = self.node.len();
+ unsafe {
+ let mut new_node = InternalNode::new(alloc);
+ let kv = self.split_leaf_data(&mut new_node.data);
+ let new_len = usize::from(new_node.data.len);
+ move_to_slice(
+ self.node.edge_area_mut(self.idx + 1..old_len + 1),
+ &mut new_node.edges[..new_len + 1],
+ );
+
+ let height = self.node.height;
+ let right = NodeRef::from_new_internal(new_node, height);
+
+ SplitResult { left: self.node, kv, right }
+ }
+ }
+}
+
+/// Represents a session for evaluating and performing a balancing operation
+/// around an internal key-value pair.
+pub struct BalancingContext<'a, K, V> {
+ parent: Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV>,
+ left_child: NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>,
+ right_child: NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>,
+}
+
+impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> {
+ pub fn consider_for_balancing(self) -> BalancingContext<'a, K, V> {
+ let self1 = unsafe { ptr::read(&self) };
+ let self2 = unsafe { ptr::read(&self) };
+ BalancingContext {
+ parent: self,
+ left_child: self1.left_edge().descend(),
+ right_child: self2.right_edge().descend(),
+ }
+ }
+}
+
+impl<'a, K, V> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ /// Chooses a balancing context involving the node as a child, thus between
+ /// the KV immediately to the left or to the right in the parent node.
+ /// Returns an `Err` if there is no parent.
+ /// Panics if the parent is empty.
+ ///
+ /// Prefers the left side, to be optimal if the given node is somehow
+ /// underfull, meaning here only that it has fewer elements than its left
+ /// sibling and than its right sibling, if they exist. In that case,
+ /// merging with the left sibling is faster, since we only need to move
+ /// the node's N elements, instead of shifting them to the right and moving
+ /// more than N elements in front. Stealing from the left sibling is also
+ /// typically faster, since we only need to shift the node's N elements to
+ /// the right, instead of shifting at least N of the sibling's elements to
+ /// the left.
+ pub fn choose_parent_kv(self) -> Result<LeftOrRight<BalancingContext<'a, K, V>>, Self> {
+ match unsafe { ptr::read(&self) }.ascend() {
+ Ok(parent_edge) => match parent_edge.left_kv() {
+ Ok(left_parent_kv) => Ok(LeftOrRight::Left(BalancingContext {
+ parent: unsafe { ptr::read(&left_parent_kv) },
+ left_child: left_parent_kv.left_edge().descend(),
+ right_child: self,
+ })),
+ Err(parent_edge) => match parent_edge.right_kv() {
+ Ok(right_parent_kv) => Ok(LeftOrRight::Right(BalancingContext {
+ parent: unsafe { ptr::read(&right_parent_kv) },
+ left_child: self,
+ right_child: right_parent_kv.right_edge().descend(),
+ })),
+ Err(_) => unreachable!("empty internal node"),
+ },
+ },
+ Err(root) => Err(root),
+ }
+ }
+}
+
+impl<'a, K, V> BalancingContext<'a, K, V> {
+ pub fn left_child_len(&self) -> usize {
+ self.left_child.len()
+ }
+
+ pub fn right_child_len(&self) -> usize {
+ self.right_child.len()
+ }
+
+ pub fn into_left_child(self) -> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ self.left_child
+ }
+
+ pub fn into_right_child(self) -> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ self.right_child
+ }
+
+ /// Returns whether merging is possible, i.e., whether there is enough room
+ /// in a node to combine the central KV with both adjacent child nodes.
+ pub fn can_merge(&self) -> bool {
+ self.left_child.len() + 1 + self.right_child.len() <= CAPACITY
+ }
+}
+
+impl<'a, K: 'a, V: 'a> BalancingContext<'a, K, V> {
+ /// Performs a merge and lets a closure decide what to return.
+ fn do_merge<
+ F: FnOnce(
+ NodeRef<marker::Mut<'a>, K, V, marker::Internal>,
+ NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>,
+ ) -> R,
+ R,
+ A: Allocator,
+ >(
+ self,
+ result: F,
+ alloc: A,
+ ) -> R {
+ let Handle { node: mut parent_node, idx: parent_idx, _marker } = self.parent;
+ let old_parent_len = parent_node.len();
+ let mut left_node = self.left_child;
+ let old_left_len = left_node.len();
+ let mut right_node = self.right_child;
+ let right_len = right_node.len();
+ let new_left_len = old_left_len + 1 + right_len;
+
+ assert!(new_left_len <= CAPACITY);
+
+ unsafe {
+ *left_node.len_mut() = new_left_len as u16;
+
+ let parent_key = slice_remove(parent_node.key_area_mut(..old_parent_len), parent_idx);
+ left_node.key_area_mut(old_left_len).write(parent_key);
+ move_to_slice(
+ right_node.key_area_mut(..right_len),
+ left_node.key_area_mut(old_left_len + 1..new_left_len),
+ );
+
+ let parent_val = slice_remove(parent_node.val_area_mut(..old_parent_len), parent_idx);
+ left_node.val_area_mut(old_left_len).write(parent_val);
+ move_to_slice(
+ right_node.val_area_mut(..right_len),
+ left_node.val_area_mut(old_left_len + 1..new_left_len),
+ );
+
+ slice_remove(&mut parent_node.edge_area_mut(..old_parent_len + 1), parent_idx + 1);
+ parent_node.correct_childrens_parent_links(parent_idx + 1..old_parent_len);
+ *parent_node.len_mut() -= 1;
+
+ if parent_node.height > 1 {
+ // SAFETY: the height of the nodes being merged is one below the height
+ // of the node of this edge, thus above zero, so they are internal.
+ let mut left_node = left_node.reborrow_mut().cast_to_internal_unchecked();
+ let mut right_node = right_node.cast_to_internal_unchecked();
+ move_to_slice(
+ right_node.edge_area_mut(..right_len + 1),
+ left_node.edge_area_mut(old_left_len + 1..new_left_len + 1),
+ );
+
+ left_node.correct_childrens_parent_links(old_left_len + 1..new_left_len + 1);
+
+ alloc.deallocate(right_node.node.cast(), Layout::new::<InternalNode<K, V>>());
+ } else {
+ alloc.deallocate(right_node.node.cast(), Layout::new::<LeafNode<K, V>>());
+ }
+ }
+ result(parent_node, left_node)
+ }
+
+ /// Merges the parent's key-value pair and both adjacent child nodes into
+ /// the left child node and returns the shrunk parent node.
+ ///
+ /// Panics unless we `.can_merge()`.
+ pub fn merge_tracking_parent<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> NodeRef<marker::Mut<'a>, K, V, marker::Internal> {
+ self.do_merge(|parent, _child| parent, alloc)
+ }
+
+ /// Merges the parent's key-value pair and both adjacent child nodes into
+ /// the left child node and returns that child node.
+ ///
+ /// Panics unless we `.can_merge()`.
+ pub fn merge_tracking_child<A: Allocator + Clone>(
+ self,
+ alloc: A,
+ ) -> NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal> {
+ self.do_merge(|_parent, child| child, alloc)
+ }
+
+ /// Merges the parent's key-value pair and both adjacent child nodes into
+ /// the left child node and returns the edge handle in that child node
+ /// where the tracked child edge ended up,
+ ///
+ /// Panics unless we `.can_merge()`.
+ pub fn merge_tracking_child_edge<A: Allocator + Clone>(
+ self,
+ track_edge_idx: LeftOrRight<usize>,
+ alloc: A,
+ ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> {
+ let old_left_len = self.left_child.len();
+ let right_len = self.right_child.len();
+ assert!(match track_edge_idx {
+ LeftOrRight::Left(idx) => idx <= old_left_len,
+ LeftOrRight::Right(idx) => idx <= right_len,
+ });
+ let child = self.merge_tracking_child(alloc);
+ let new_idx = match track_edge_idx {
+ LeftOrRight::Left(idx) => idx,
+ LeftOrRight::Right(idx) => old_left_len + 1 + idx,
+ };
+ unsafe { Handle::new_edge(child, new_idx) }
+ }
+
+ /// Removes a key-value pair from the left child and places it in the key-value storage
+ /// of the parent, while pushing the old parent key-value pair into the right child.
+ /// Returns a handle to the edge in the right child corresponding to where the original
+ /// edge specified by `track_right_edge_idx` ended up.
+ pub fn steal_left(
+ mut self,
+ track_right_edge_idx: usize,
+ ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> {
+ self.bulk_steal_left(1);
+ unsafe { Handle::new_edge(self.right_child, 1 + track_right_edge_idx) }
+ }
+
+ /// Removes a key-value pair from the right child and places it in the key-value storage
+ /// of the parent, while pushing the old parent key-value pair onto the left child.
+ /// Returns a handle to the edge in the left child specified by `track_left_edge_idx`,
+ /// which didn't move.
+ pub fn steal_right(
+ mut self,
+ track_left_edge_idx: usize,
+ ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> {
+ self.bulk_steal_right(1);
+ unsafe { Handle::new_edge(self.left_child, track_left_edge_idx) }
+ }
+
+ /// This does stealing similar to `steal_left` but steals multiple elements at once.
+ pub fn bulk_steal_left(&mut self, count: usize) {
+ assert!(count > 0);
+ unsafe {
+ let left_node = &mut self.left_child;
+ let old_left_len = left_node.len();
+ let right_node = &mut self.right_child;
+ let old_right_len = right_node.len();
+
+ // Make sure that we may steal safely.
+ assert!(old_right_len + count <= CAPACITY);
+ assert!(old_left_len >= count);
+
+ let new_left_len = old_left_len - count;
+ let new_right_len = old_right_len + count;
+ *left_node.len_mut() = new_left_len as u16;
+ *right_node.len_mut() = new_right_len as u16;
+
+ // Move leaf data.
+ {
+ // Make room for stolen elements in the right child.
+ slice_shr(right_node.key_area_mut(..new_right_len), count);
+ slice_shr(right_node.val_area_mut(..new_right_len), count);
+
+ // Move elements from the left child to the right one.
+ move_to_slice(
+ left_node.key_area_mut(new_left_len + 1..old_left_len),
+ right_node.key_area_mut(..count - 1),
+ );
+ move_to_slice(
+ left_node.val_area_mut(new_left_len + 1..old_left_len),
+ right_node.val_area_mut(..count - 1),
+ );
+
+ // Move the left-most stolen pair to the parent.
+ let k = left_node.key_area_mut(new_left_len).assume_init_read();
+ let v = left_node.val_area_mut(new_left_len).assume_init_read();
+ let (k, v) = self.parent.replace_kv(k, v);
+
+ // Move parent's key-value pair to the right child.
+ right_node.key_area_mut(count - 1).write(k);
+ right_node.val_area_mut(count - 1).write(v);
+ }
+
+ match (left_node.reborrow_mut().force(), right_node.reborrow_mut().force()) {
+ (ForceResult::Internal(mut left), ForceResult::Internal(mut right)) => {
+ // Make room for stolen edges.
+ slice_shr(right.edge_area_mut(..new_right_len + 1), count);
+
+ // Steal edges.
+ move_to_slice(
+ left.edge_area_mut(new_left_len + 1..old_left_len + 1),
+ right.edge_area_mut(..count),
+ );
+
+ right.correct_childrens_parent_links(0..new_right_len + 1);
+ }
+ (ForceResult::Leaf(_), ForceResult::Leaf(_)) => {}
+ _ => unreachable!(),
+ }
+ }
+ }
+
+ /// The symmetric clone of `bulk_steal_left`.
+ pub fn bulk_steal_right(&mut self, count: usize) {
+ assert!(count > 0);
+ unsafe {
+ let left_node = &mut self.left_child;
+ let old_left_len = left_node.len();
+ let right_node = &mut self.right_child;
+ let old_right_len = right_node.len();
+
+ // Make sure that we may steal safely.
+ assert!(old_left_len + count <= CAPACITY);
+ assert!(old_right_len >= count);
+
+ let new_left_len = old_left_len + count;
+ let new_right_len = old_right_len - count;
+ *left_node.len_mut() = new_left_len as u16;
+ *right_node.len_mut() = new_right_len as u16;
+
+ // Move leaf data.
+ {
+ // Move the right-most stolen pair to the parent.
+ let k = right_node.key_area_mut(count - 1).assume_init_read();
+ let v = right_node.val_area_mut(count - 1).assume_init_read();
+ let (k, v) = self.parent.replace_kv(k, v);
+
+ // Move parent's key-value pair to the left child.
+ left_node.key_area_mut(old_left_len).write(k);
+ left_node.val_area_mut(old_left_len).write(v);
+
+ // Move elements from the right child to the left one.
+ move_to_slice(
+ right_node.key_area_mut(..count - 1),
+ left_node.key_area_mut(old_left_len + 1..new_left_len),
+ );
+ move_to_slice(
+ right_node.val_area_mut(..count - 1),
+ left_node.val_area_mut(old_left_len + 1..new_left_len),
+ );
+
+ // Fill gap where stolen elements used to be.
+ slice_shl(right_node.key_area_mut(..old_right_len), count);
+ slice_shl(right_node.val_area_mut(..old_right_len), count);
+ }
+
+ match (left_node.reborrow_mut().force(), right_node.reborrow_mut().force()) {
+ (ForceResult::Internal(mut left), ForceResult::Internal(mut right)) => {
+ // Steal edges.
+ move_to_slice(
+ right.edge_area_mut(..count),
+ left.edge_area_mut(old_left_len + 1..new_left_len + 1),
+ );
+
+ // Fill gap where stolen edges used to be.
+ slice_shl(right.edge_area_mut(..old_right_len + 1), count);
+
+ left.correct_childrens_parent_links(old_left_len + 1..new_left_len + 1);
+ right.correct_childrens_parent_links(0..new_right_len + 1);
+ }
+ (ForceResult::Leaf(_), ForceResult::Leaf(_)) => {}
+ _ => unreachable!(),
+ }
+ }
+ }
+}
+
+impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge> {
+ pub fn forget_node_type(
+ self,
+ ) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::Edge> {
+ unsafe { Handle::new_edge(self.node.forget_type(), self.idx) }
+ }
+}
+
+impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Internal>, marker::Edge> {
+ pub fn forget_node_type(
+ self,
+ ) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::Edge> {
+ unsafe { Handle::new_edge(self.node.forget_type(), self.idx) }
+ }
+}
+
+impl<BorrowType, K, V> Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::KV> {
+ pub fn forget_node_type(
+ self,
+ ) -> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, marker::KV> {
+ unsafe { Handle::new_kv(self.node.forget_type(), self.idx) }
+ }
+}
+
+impl<BorrowType, K, V, Type> Handle<NodeRef<BorrowType, K, V, marker::LeafOrInternal>, Type> {
+ /// Checks whether the underlying node is an `Internal` node or a `Leaf` node.
+ pub fn force(
+ self,
+ ) -> ForceResult<
+ Handle<NodeRef<BorrowType, K, V, marker::Leaf>, Type>,
+ Handle<NodeRef<BorrowType, K, V, marker::Internal>, Type>,
+ > {
+ match self.node.force() {
+ ForceResult::Leaf(node) => {
+ ForceResult::Leaf(Handle { node, idx: self.idx, _marker: PhantomData })
+ }
+ ForceResult::Internal(node) => {
+ ForceResult::Internal(Handle { node, idx: self.idx, _marker: PhantomData })
+ }
+ }
+ }
+}
+
+impl<'a, K, V, Type> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, Type> {
+ /// Unsafely asserts to the compiler the static information that the handle's node is a `Leaf`.
+ pub unsafe fn cast_to_leaf_unchecked(
+ self,
+ ) -> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, Type> {
+ let node = unsafe { self.node.cast_to_leaf_unchecked() };
+ Handle { node, idx: self.idx, _marker: PhantomData }
+ }
+}
+
+impl<'a, K, V> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::Edge> {
+ /// Move the suffix after `self` from one node to another one. `right` must be empty.
+ /// The first edge of `right` remains unchanged.
+ pub fn move_suffix(
+ &mut self,
+ right: &mut NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>,
+ ) {
+ unsafe {
+ let new_left_len = self.idx;
+ let mut left_node = self.reborrow_mut().into_node();
+ let old_left_len = left_node.len();
+
+ let new_right_len = old_left_len - new_left_len;
+ let mut right_node = right.reborrow_mut();
+
+ assert!(right_node.len() == 0);
+ assert!(left_node.height == right_node.height);
+
+ if new_right_len > 0 {
+ *left_node.len_mut() = new_left_len as u16;
+ *right_node.len_mut() = new_right_len as u16;
+
+ move_to_slice(
+ left_node.key_area_mut(new_left_len..old_left_len),
+ right_node.key_area_mut(..new_right_len),
+ );
+ move_to_slice(
+ left_node.val_area_mut(new_left_len..old_left_len),
+ right_node.val_area_mut(..new_right_len),
+ );
+ match (left_node.force(), right_node.force()) {
+ (ForceResult::Internal(mut left), ForceResult::Internal(mut right)) => {
+ move_to_slice(
+ left.edge_area_mut(new_left_len + 1..old_left_len + 1),
+ right.edge_area_mut(1..new_right_len + 1),
+ );
+ right.correct_childrens_parent_links(1..new_right_len + 1);
+ }
+ (ForceResult::Leaf(_), ForceResult::Leaf(_)) => {}
+ _ => unreachable!(),
+ }
+ }
+ }
+ }
+}
+
+pub enum ForceResult<Leaf, Internal> {
+ Leaf(Leaf),
+ Internal(Internal),
+}
+
+/// Result of insertion, when a node needed to expand beyond its capacity.
+pub struct SplitResult<'a, K, V, NodeType> {
+ // Altered node in existing tree with elements and edges that belong to the left of `kv`.
+ pub left: NodeRef<marker::Mut<'a>, K, V, NodeType>,
+ // Some key and value that existed before and were split off, to be inserted elsewhere.
+ pub kv: (K, V),
+ // Owned, unattached, new node with elements and edges that belong to the right of `kv`.
+ pub right: NodeRef<marker::Owned, K, V, NodeType>,
+}
+
+impl<'a, K, V> SplitResult<'a, K, V, marker::Leaf> {
+ pub fn forget_node_type(self) -> SplitResult<'a, K, V, marker::LeafOrInternal> {
+ SplitResult { left: self.left.forget_type(), kv: self.kv, right: self.right.forget_type() }
+ }
+}
+
+impl<'a, K, V> SplitResult<'a, K, V, marker::Internal> {
+ pub fn forget_node_type(self) -> SplitResult<'a, K, V, marker::LeafOrInternal> {
+ SplitResult { left: self.left.forget_type(), kv: self.kv, right: self.right.forget_type() }
+ }
+}
+
+pub mod marker {
+ use core::marker::PhantomData;
+
+ pub enum Leaf {}
+ pub enum Internal {}
+ pub enum LeafOrInternal {}
+
+ pub enum Owned {}
+ pub enum Dying {}
+ pub struct Immut<'a>(PhantomData<&'a ()>);
+ pub struct Mut<'a>(PhantomData<&'a mut ()>);
+ pub struct ValMut<'a>(PhantomData<&'a mut ()>);
+
+ pub trait BorrowType {
+ // Whether node references of this borrow type allow traversing
+ // to other nodes in the tree.
+ const PERMITS_TRAVERSAL: bool = true;
+ }
+ impl BorrowType for Owned {
+ // Traversal isn't needed, it happens using the result of `borrow_mut`.
+ // By disabling traversal, and only creating new references to roots,
+ // we know that every reference of the `Owned` type is to a root node.
+ const PERMITS_TRAVERSAL: bool = false;
+ }
+ impl BorrowType for Dying {}
+ impl<'a> BorrowType for Immut<'a> {}
+ impl<'a> BorrowType for Mut<'a> {}
+ impl<'a> BorrowType for ValMut<'a> {}
+
+ pub enum KV {}
+ pub enum Edge {}
+}
+
+/// Inserts a value into a slice of initialized elements followed by one uninitialized element.
+///
+/// # Safety
+/// The slice has more than `idx` elements.
+unsafe fn slice_insert<T>(slice: &mut [MaybeUninit<T>], idx: usize, val: T) {
+ unsafe {
+ let len = slice.len();
+ debug_assert!(len > idx);
+ let slice_ptr = slice.as_mut_ptr();
+ if len > idx + 1 {
+ ptr::copy(slice_ptr.add(idx), slice_ptr.add(idx + 1), len - idx - 1);
+ }
+ (*slice_ptr.add(idx)).write(val);
+ }
+}
+
+/// Removes and returns a value from a slice of all initialized elements, leaving behind one
+/// trailing uninitialized element.
+///
+/// # Safety
+/// The slice has more than `idx` elements.
+unsafe fn slice_remove<T>(slice: &mut [MaybeUninit<T>], idx: usize) -> T {
+ unsafe {
+ let len = slice.len();
+ debug_assert!(idx < len);
+ let slice_ptr = slice.as_mut_ptr();
+ let ret = (*slice_ptr.add(idx)).assume_init_read();
+ ptr::copy(slice_ptr.add(idx + 1), slice_ptr.add(idx), len - idx - 1);
+ ret
+ }
+}
+
+/// Shifts the elements in a slice `distance` positions to the left.
+///
+/// # Safety
+/// The slice has at least `distance` elements.
+unsafe fn slice_shl<T>(slice: &mut [MaybeUninit<T>], distance: usize) {
+ unsafe {
+ let slice_ptr = slice.as_mut_ptr();
+ ptr::copy(slice_ptr.add(distance), slice_ptr, slice.len() - distance);
+ }
+}
+
+/// Shifts the elements in a slice `distance` positions to the right.
+///
+/// # Safety
+/// The slice has at least `distance` elements.
+unsafe fn slice_shr<T>(slice: &mut [MaybeUninit<T>], distance: usize) {
+ unsafe {
+ let slice_ptr = slice.as_mut_ptr();
+ ptr::copy(slice_ptr, slice_ptr.add(distance), slice.len() - distance);
+ }
+}
+
+/// Moves all values from a slice of initialized elements to a slice
+/// of uninitialized elements, leaving behind `src` as all uninitialized.
+/// Works like `dst.copy_from_slice(src)` but does not require `T` to be `Copy`.
+fn move_to_slice<T>(src: &mut [MaybeUninit<T>], dst: &mut [MaybeUninit<T>]) {
+ assert!(src.len() == dst.len());
+ unsafe {
+ ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len());
+ }
+}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/alloc/src/collections/btree/node/tests.rs b/library/alloc/src/collections/btree/node/tests.rs
new file mode 100644
index 000000000..aadb0dc9c
--- /dev/null
+++ b/library/alloc/src/collections/btree/node/tests.rs
@@ -0,0 +1,102 @@
+use super::super::navigate;
+use super::*;
+use crate::alloc::Global;
+use crate::fmt::Debug;
+use crate::string::String;
+
+impl<'a, K: 'a, V: 'a> NodeRef<marker::Immut<'a>, K, V, marker::LeafOrInternal> {
+ // Asserts that the back pointer in each reachable node points to its parent.
+ pub fn assert_back_pointers(self) {
+ if let ForceResult::Internal(node) = self.force() {
+ for idx in 0..=node.len() {
+ let edge = unsafe { Handle::new_edge(node, idx) };
+ let child = edge.descend();
+ assert!(child.ascend().ok() == Some(edge));
+ child.assert_back_pointers();
+ }
+ }
+ }
+
+ // Renders a multi-line display of the keys in order and in tree hierarchy,
+ // picturing the tree growing sideways from its root on the left to its
+ // leaves on the right.
+ pub fn dump_keys(self) -> String
+ where
+ K: Debug,
+ {
+ let mut result = String::new();
+ self.visit_nodes_in_order(|pos| match pos {
+ navigate::Position::Leaf(leaf) => {
+ let depth = self.height();
+ let indent = " ".repeat(depth);
+ result += &format!("\n{}{:?}", indent, leaf.keys());
+ }
+ navigate::Position::Internal(_) => {}
+ navigate::Position::InternalKV(kv) => {
+ let depth = self.height() - kv.into_node().height();
+ let indent = " ".repeat(depth);
+ result += &format!("\n{}{:?}", indent, kv.into_kv().0);
+ }
+ });
+ result
+ }
+}
+
+#[test]
+fn test_splitpoint() {
+ for idx in 0..=CAPACITY {
+ let (middle_kv_idx, insertion) = splitpoint(idx);
+
+ // Simulate performing the split:
+ let mut left_len = middle_kv_idx;
+ let mut right_len = CAPACITY - middle_kv_idx - 1;
+ match insertion {
+ LeftOrRight::Left(edge_idx) => {
+ assert!(edge_idx <= left_len);
+ left_len += 1;
+ }
+ LeftOrRight::Right(edge_idx) => {
+ assert!(edge_idx <= right_len);
+ right_len += 1;
+ }
+ }
+ assert!(left_len >= MIN_LEN_AFTER_SPLIT);
+ assert!(right_len >= MIN_LEN_AFTER_SPLIT);
+ assert!(left_len + right_len == CAPACITY);
+ }
+}
+
+#[test]
+fn test_partial_eq() {
+ let mut root1 = NodeRef::new_leaf(Global);
+ root1.borrow_mut().push(1, ());
+ let mut root1 = NodeRef::new_internal(root1.forget_type(), Global).forget_type();
+ let root2 = Root::new(Global);
+ root1.reborrow().assert_back_pointers();
+ root2.reborrow().assert_back_pointers();
+
+ let leaf_edge_1a = root1.reborrow().first_leaf_edge().forget_node_type();
+ let leaf_edge_1b = root1.reborrow().last_leaf_edge().forget_node_type();
+ let top_edge_1 = root1.reborrow().first_edge();
+ let top_edge_2 = root2.reborrow().first_edge();
+
+ assert!(leaf_edge_1a == leaf_edge_1a);
+ assert!(leaf_edge_1a != leaf_edge_1b);
+ assert!(leaf_edge_1a != top_edge_1);
+ assert!(leaf_edge_1a != top_edge_2);
+ assert!(top_edge_1 == top_edge_1);
+ assert!(top_edge_1 != top_edge_2);
+
+ root1.pop_internal_level(Global);
+ unsafe { root1.into_dying().deallocate_and_ascend(Global) };
+ unsafe { root2.into_dying().deallocate_and_ascend(Global) };
+}
+
+#[test]
+#[cfg(target_arch = "x86_64")]
+fn test_sizes() {
+ assert_eq!(core::mem::size_of::<LeafNode<(), ()>>(), 16);
+ assert_eq!(core::mem::size_of::<LeafNode<i64, i64>>(), 16 + CAPACITY * 2 * 8);
+ assert_eq!(core::mem::size_of::<InternalNode<(), ()>>(), 16 + (CAPACITY + 1) * 8);
+ assert_eq!(core::mem::size_of::<InternalNode<i64, i64>>(), 16 + (CAPACITY * 3 + 1) * 8);
+}
diff --git a/library/alloc/src/collections/btree/remove.rs b/library/alloc/src/collections/btree/remove.rs
new file mode 100644
index 000000000..090429925
--- /dev/null
+++ b/library/alloc/src/collections/btree/remove.rs
@@ -0,0 +1,95 @@
+use super::map::MIN_LEN;
+use super::node::{marker, ForceResult::*, Handle, LeftOrRight::*, NodeRef};
+use core::alloc::Allocator;
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::LeafOrInternal>, marker::KV> {
+ /// Removes a key-value pair from the tree, and returns that pair, as well as
+ /// the leaf edge corresponding to that former pair. It's possible this empties
+ /// a root node that is internal, which the caller should pop from the map
+ /// holding the tree. The caller should also decrement the map's length.
+ pub fn remove_kv_tracking<F: FnOnce(), A: Allocator + Clone>(
+ self,
+ handle_emptied_internal_root: F,
+ alloc: A,
+ ) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
+ match self.force() {
+ Leaf(node) => node.remove_leaf_kv(handle_emptied_internal_root, alloc),
+ Internal(node) => node.remove_internal_kv(handle_emptied_internal_root, alloc),
+ }
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::KV> {
+ fn remove_leaf_kv<F: FnOnce(), A: Allocator + Clone>(
+ self,
+ handle_emptied_internal_root: F,
+ alloc: A,
+ ) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
+ let (old_kv, mut pos) = self.remove();
+ let len = pos.reborrow().into_node().len();
+ if len < MIN_LEN {
+ let idx = pos.idx();
+ // We have to temporarily forget the child type, because there is no
+ // distinct node type for the immediate parents of a leaf.
+ let new_pos = match pos.into_node().forget_type().choose_parent_kv() {
+ Ok(Left(left_parent_kv)) => {
+ debug_assert!(left_parent_kv.right_child_len() == MIN_LEN - 1);
+ if left_parent_kv.can_merge() {
+ left_parent_kv.merge_tracking_child_edge(Right(idx), alloc.clone())
+ } else {
+ debug_assert!(left_parent_kv.left_child_len() > MIN_LEN);
+ left_parent_kv.steal_left(idx)
+ }
+ }
+ Ok(Right(right_parent_kv)) => {
+ debug_assert!(right_parent_kv.left_child_len() == MIN_LEN - 1);
+ if right_parent_kv.can_merge() {
+ right_parent_kv.merge_tracking_child_edge(Left(idx), alloc.clone())
+ } else {
+ debug_assert!(right_parent_kv.right_child_len() > MIN_LEN);
+ right_parent_kv.steal_right(idx)
+ }
+ }
+ Err(pos) => unsafe { Handle::new_edge(pos, idx) },
+ };
+ // SAFETY: `new_pos` is the leaf we started from or a sibling.
+ pos = unsafe { new_pos.cast_to_leaf_unchecked() };
+
+ // Only if we merged, the parent (if any) has shrunk, but skipping
+ // the following step otherwise does not pay off in benchmarks.
+ //
+ // SAFETY: We won't destroy or rearrange the leaf where `pos` is at
+ // by handling its parent recursively; at worst we will destroy or
+ // rearrange the parent through the grandparent, thus change the
+ // link to the parent inside the leaf.
+ if let Ok(parent) = unsafe { pos.reborrow_mut() }.into_node().ascend() {
+ if !parent.into_node().forget_type().fix_node_and_affected_ancestors(alloc) {
+ handle_emptied_internal_root();
+ }
+ }
+ }
+ (old_kv, pos)
+ }
+}
+
+impl<'a, K: 'a, V: 'a> Handle<NodeRef<marker::Mut<'a>, K, V, marker::Internal>, marker::KV> {
+ fn remove_internal_kv<F: FnOnce(), A: Allocator + Clone>(
+ self,
+ handle_emptied_internal_root: F,
+ alloc: A,
+ ) -> ((K, V), Handle<NodeRef<marker::Mut<'a>, K, V, marker::Leaf>, marker::Edge>) {
+ // Remove an adjacent KV from its leaf and then put it back in place of
+ // the element we were asked to remove. Prefer the left adjacent KV,
+ // for the reasons listed in `choose_parent_kv`.
+ let left_leaf_kv = self.left_edge().descend().last_leaf_edge().left_kv();
+ let left_leaf_kv = unsafe { left_leaf_kv.ok().unwrap_unchecked() };
+ let (left_kv, left_hole) = left_leaf_kv.remove_leaf_kv(handle_emptied_internal_root, alloc);
+
+ // The internal node may have been stolen from or merged. Go back right
+ // to find where the original KV ended up.
+ let mut internal = unsafe { left_hole.next_kv().ok().unwrap_unchecked() };
+ let old_kv = internal.replace_kv(left_kv.0, left_kv.1);
+ let pos = internal.next_leaf_edge();
+ (old_kv, pos)
+ }
+}
diff --git a/library/alloc/src/collections/btree/search.rs b/library/alloc/src/collections/btree/search.rs
new file mode 100644
index 000000000..ad3522b4e
--- /dev/null
+++ b/library/alloc/src/collections/btree/search.rs
@@ -0,0 +1,285 @@
+use core::borrow::Borrow;
+use core::cmp::Ordering;
+use core::ops::{Bound, RangeBounds};
+
+use super::node::{marker, ForceResult::*, Handle, NodeRef};
+
+use SearchBound::*;
+use SearchResult::*;
+
+pub enum SearchBound<T> {
+ /// An inclusive bound to look for, just like `Bound::Included(T)`.
+ Included(T),
+ /// An exclusive bound to look for, just like `Bound::Excluded(T)`.
+ Excluded(T),
+ /// An unconditional inclusive bound, just like `Bound::Unbounded`.
+ AllIncluded,
+ /// An unconditional exclusive bound.
+ AllExcluded,
+}
+
+impl<T> SearchBound<T> {
+ pub fn from_range(range_bound: Bound<T>) -> Self {
+ match range_bound {
+ Bound::Included(t) => Included(t),
+ Bound::Excluded(t) => Excluded(t),
+ Bound::Unbounded => AllIncluded,
+ }
+ }
+}
+
+pub enum SearchResult<BorrowType, K, V, FoundType, GoDownType> {
+ Found(Handle<NodeRef<BorrowType, K, V, FoundType>, marker::KV>),
+ GoDown(Handle<NodeRef<BorrowType, K, V, GoDownType>, marker::Edge>),
+}
+
+pub enum IndexResult {
+ KV(usize),
+ Edge(usize),
+}
+
+impl<BorrowType: marker::BorrowType, K, V> NodeRef<BorrowType, K, V, marker::LeafOrInternal> {
+ /// Looks up a given key in a (sub)tree headed by the node, recursively.
+ /// Returns a `Found` with the handle of the matching KV, if any. Otherwise,
+ /// returns a `GoDown` with the handle of the leaf edge where the key belongs.
+ ///
+ /// The result is meaningful only if the tree is ordered by key, like the tree
+ /// in a `BTreeMap` is.
+ pub fn search_tree<Q: ?Sized>(
+ mut self,
+ key: &Q,
+ ) -> SearchResult<BorrowType, K, V, marker::LeafOrInternal, marker::Leaf>
+ where
+ Q: Ord,
+ K: Borrow<Q>,
+ {
+ loop {
+ self = match self.search_node(key) {
+ Found(handle) => return Found(handle),
+ GoDown(handle) => match handle.force() {
+ Leaf(leaf) => return GoDown(leaf),
+ Internal(internal) => internal.descend(),
+ },
+ }
+ }
+ }
+
+ /// Descends to the nearest node where the edge matching the lower bound
+ /// of the range is different from the edge matching the upper bound, i.e.,
+ /// the nearest node that has at least one key contained in the range.
+ ///
+ /// If found, returns an `Ok` with that node, the strictly ascending pair of
+ /// edge indices in the node delimiting the range, and the corresponding
+ /// pair of bounds for continuing the search in the child nodes, in case
+ /// the node is internal.
+ ///
+ /// If not found, returns an `Err` with the leaf edge matching the entire
+ /// range.
+ ///
+ /// As a diagnostic service, panics if the range specifies impossible bounds.
+ ///
+ /// The result is meaningful only if the tree is ordered by key.
+ pub fn search_tree_for_bifurcation<'r, Q: ?Sized, R>(
+ mut self,
+ range: &'r R,
+ ) -> Result<
+ (
+ NodeRef<BorrowType, K, V, marker::LeafOrInternal>,
+ usize,
+ usize,
+ SearchBound<&'r Q>,
+ SearchBound<&'r Q>,
+ ),
+ Handle<NodeRef<BorrowType, K, V, marker::Leaf>, marker::Edge>,
+ >
+ where
+ Q: Ord,
+ K: Borrow<Q>,
+ R: RangeBounds<Q>,
+ {
+ // Determine if map or set is being searched
+ let is_set = <V as super::set_val::IsSetVal>::is_set_val();
+
+ // Inlining these variables should be avoided. We assume the bounds reported by `range`
+ // remain the same, but an adversarial implementation could change between calls (#81138).
+ let (start, end) = (range.start_bound(), range.end_bound());
+ match (start, end) {
+ (Bound::Excluded(s), Bound::Excluded(e)) if s == e => {
+ if is_set {
+ panic!("range start and end are equal and excluded in BTreeSet")
+ } else {
+ panic!("range start and end are equal and excluded in BTreeMap")
+ }
+ }
+ (Bound::Included(s) | Bound::Excluded(s), Bound::Included(e) | Bound::Excluded(e))
+ if s > e =>
+ {
+ if is_set {
+ panic!("range start is greater than range end in BTreeSet")
+ } else {
+ panic!("range start is greater than range end in BTreeMap")
+ }
+ }
+ _ => {}
+ }
+ let mut lower_bound = SearchBound::from_range(start);
+ let mut upper_bound = SearchBound::from_range(end);
+ loop {
+ let (lower_edge_idx, lower_child_bound) = self.find_lower_bound_index(lower_bound);
+ let (upper_edge_idx, upper_child_bound) =
+ unsafe { self.find_upper_bound_index(upper_bound, lower_edge_idx) };
+ if lower_edge_idx < upper_edge_idx {
+ return Ok((
+ self,
+ lower_edge_idx,
+ upper_edge_idx,
+ lower_child_bound,
+ upper_child_bound,
+ ));
+ }
+ debug_assert_eq!(lower_edge_idx, upper_edge_idx);
+ let common_edge = unsafe { Handle::new_edge(self, lower_edge_idx) };
+ match common_edge.force() {
+ Leaf(common_edge) => return Err(common_edge),
+ Internal(common_edge) => {
+ self = common_edge.descend();
+ lower_bound = lower_child_bound;
+ upper_bound = upper_child_bound;
+ }
+ }
+ }
+ }
+
+ /// Finds an edge in the node delimiting the lower bound of a range.
+ /// Also returns the lower bound to be used for continuing the search in
+ /// the matching child node, if `self` is an internal node.
+ ///
+ /// The result is meaningful only if the tree is ordered by key.
+ pub fn find_lower_bound_edge<'r, Q>(
+ self,
+ bound: SearchBound<&'r Q>,
+ ) -> (Handle<Self, marker::Edge>, SearchBound<&'r Q>)
+ where
+ Q: ?Sized + Ord,
+ K: Borrow<Q>,
+ {
+ let (edge_idx, bound) = self.find_lower_bound_index(bound);
+ let edge = unsafe { Handle::new_edge(self, edge_idx) };
+ (edge, bound)
+ }
+
+ /// Clone of `find_lower_bound_edge` for the upper bound.
+ pub fn find_upper_bound_edge<'r, Q>(
+ self,
+ bound: SearchBound<&'r Q>,
+ ) -> (Handle<Self, marker::Edge>, SearchBound<&'r Q>)
+ where
+ Q: ?Sized + Ord,
+ K: Borrow<Q>,
+ {
+ let (edge_idx, bound) = unsafe { self.find_upper_bound_index(bound, 0) };
+ let edge = unsafe { Handle::new_edge(self, edge_idx) };
+ (edge, bound)
+ }
+}
+
+impl<BorrowType, K, V, Type> NodeRef<BorrowType, K, V, Type> {
+ /// Looks up a given key in the node, without recursion.
+ /// Returns a `Found` with the handle of the matching KV, if any. Otherwise,
+ /// returns a `GoDown` with the handle of the edge where the key might be found
+ /// (if the node is internal) or where the key can be inserted.
+ ///
+ /// The result is meaningful only if the tree is ordered by key, like the tree
+ /// in a `BTreeMap` is.
+ pub fn search_node<Q: ?Sized>(self, key: &Q) -> SearchResult<BorrowType, K, V, Type, Type>
+ where
+ Q: Ord,
+ K: Borrow<Q>,
+ {
+ match unsafe { self.find_key_index(key, 0) } {
+ IndexResult::KV(idx) => Found(unsafe { Handle::new_kv(self, idx) }),
+ IndexResult::Edge(idx) => GoDown(unsafe { Handle::new_edge(self, idx) }),
+ }
+ }
+
+ /// Returns either the KV index in the node at which the key (or an equivalent)
+ /// exists, or the edge index where the key belongs, starting from a particular index.
+ ///
+ /// The result is meaningful only if the tree is ordered by key, like the tree
+ /// in a `BTreeMap` is.
+ ///
+ /// # Safety
+ /// `start_index` must be a valid edge index for the node.
+ unsafe fn find_key_index<Q: ?Sized>(&self, key: &Q, start_index: usize) -> IndexResult
+ where
+ Q: Ord,
+ K: Borrow<Q>,
+ {
+ let node = self.reborrow();
+ let keys = node.keys();
+ debug_assert!(start_index <= keys.len());
+ for (offset, k) in unsafe { keys.get_unchecked(start_index..) }.iter().enumerate() {
+ match key.cmp(k.borrow()) {
+ Ordering::Greater => {}
+ Ordering::Equal => return IndexResult::KV(start_index + offset),
+ Ordering::Less => return IndexResult::Edge(start_index + offset),
+ }
+ }
+ IndexResult::Edge(keys.len())
+ }
+
+ /// Finds an edge index in the node delimiting the lower bound of a range.
+ /// Also returns the lower bound to be used for continuing the search in
+ /// the matching child node, if `self` is an internal node.
+ ///
+ /// The result is meaningful only if the tree is ordered by key.
+ fn find_lower_bound_index<'r, Q>(
+ &self,
+ bound: SearchBound<&'r Q>,
+ ) -> (usize, SearchBound<&'r Q>)
+ where
+ Q: ?Sized + Ord,
+ K: Borrow<Q>,
+ {
+ match bound {
+ Included(key) => match unsafe { self.find_key_index(key, 0) } {
+ IndexResult::KV(idx) => (idx, AllExcluded),
+ IndexResult::Edge(idx) => (idx, bound),
+ },
+ Excluded(key) => match unsafe { self.find_key_index(key, 0) } {
+ IndexResult::KV(idx) => (idx + 1, AllIncluded),
+ IndexResult::Edge(idx) => (idx, bound),
+ },
+ AllIncluded => (0, AllIncluded),
+ AllExcluded => (self.len(), AllExcluded),
+ }
+ }
+
+ /// Mirror image of `find_lower_bound_index` for the upper bound,
+ /// with an additional parameter to skip part of the key array.
+ ///
+ /// # Safety
+ /// `start_index` must be a valid edge index for the node.
+ unsafe fn find_upper_bound_index<'r, Q>(
+ &self,
+ bound: SearchBound<&'r Q>,
+ start_index: usize,
+ ) -> (usize, SearchBound<&'r Q>)
+ where
+ Q: ?Sized + Ord,
+ K: Borrow<Q>,
+ {
+ match bound {
+ Included(key) => match unsafe { self.find_key_index(key, start_index) } {
+ IndexResult::KV(idx) => (idx + 1, AllExcluded),
+ IndexResult::Edge(idx) => (idx, bound),
+ },
+ Excluded(key) => match unsafe { self.find_key_index(key, start_index) } {
+ IndexResult::KV(idx) => (idx, AllIncluded),
+ IndexResult::Edge(idx) => (idx, bound),
+ },
+ AllIncluded => (self.len(), AllIncluded),
+ AllExcluded => (start_index, AllExcluded),
+ }
+ }
+}
diff --git a/library/alloc/src/collections/btree/set.rs b/library/alloc/src/collections/btree/set.rs
new file mode 100644
index 000000000..2cfc08074
--- /dev/null
+++ b/library/alloc/src/collections/btree/set.rs
@@ -0,0 +1,1789 @@
+// This is pretty much entirely stolen from TreeSet, since BTreeMap has an identical interface
+// to TreeMap
+
+use crate::vec::Vec;
+use core::borrow::Borrow;
+use core::cmp::Ordering::{self, Equal, Greater, Less};
+use core::cmp::{max, min};
+use core::fmt::{self, Debug};
+use core::hash::{Hash, Hasher};
+use core::iter::{FromIterator, FusedIterator, Peekable};
+use core::mem::ManuallyDrop;
+use core::ops::{BitAnd, BitOr, BitXor, RangeBounds, Sub};
+
+use super::map::{BTreeMap, Keys};
+use super::merge_iter::MergeIterInner;
+use super::set_val::SetValZST;
+use super::Recover;
+
+use crate::alloc::{Allocator, Global};
+
+// FIXME(conventions): implement bounded iterators
+
+/// An ordered set based on a B-Tree.
+///
+/// See [`BTreeMap`]'s documentation for a detailed discussion of this collection's performance
+/// benefits and drawbacks.
+///
+/// It is a logic error for an item to be modified in such a way that the item's ordering relative
+/// to any other item, as determined by the [`Ord`] trait, changes while it is in the set. This is
+/// normally only possible through [`Cell`], [`RefCell`], global state, I/O, or unsafe code.
+/// The behavior resulting from such a logic error is not specified, but will be encapsulated to the
+/// `BTreeSet` that observed the logic error and not result in undefined behavior. This could
+/// include panics, incorrect results, aborts, memory leaks, and non-termination.
+///
+/// Iterators returned by [`BTreeSet::iter`] produce their items in order, and take worst-case
+/// logarithmic and amortized constant time per item returned.
+///
+/// [`Ord`]: core::cmp::Ord
+/// [`Cell`]: core::cell::Cell
+/// [`RefCell`]: core::cell::RefCell
+///
+/// # Examples
+///
+/// ```
+/// use std::collections::BTreeSet;
+///
+/// // Type inference lets us omit an explicit type signature (which
+/// // would be `BTreeSet<&str>` in this example).
+/// let mut books = BTreeSet::new();
+///
+/// // Add some books.
+/// books.insert("A Dance With Dragons");
+/// books.insert("To Kill a Mockingbird");
+/// books.insert("The Odyssey");
+/// books.insert("The Great Gatsby");
+///
+/// // Check for a specific one.
+/// if !books.contains("The Winds of Winter") {
+/// println!("We have {} books, but The Winds of Winter ain't one.",
+/// books.len());
+/// }
+///
+/// // Remove a book.
+/// books.remove("The Odyssey");
+///
+/// // Iterate over everything.
+/// for book in &books {
+/// println!("{book}");
+/// }
+/// ```
+///
+/// A `BTreeSet` with a known list of items can be initialized from an array:
+///
+/// ```
+/// use std::collections::BTreeSet;
+///
+/// let set = BTreeSet::from([1, 2, 3]);
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "BTreeSet")]
+pub struct BTreeSet<
+ T,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ map: BTreeMap<T, SetValZST, A>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Hash, A: Allocator + Clone> Hash for BTreeSet<T, A> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ self.map.hash(state)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialEq, A: Allocator + Clone> PartialEq for BTreeSet<T, A> {
+ fn eq(&self, other: &BTreeSet<T, A>) -> bool {
+ self.map.eq(&other.map)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq, A: Allocator + Clone> Eq for BTreeSet<T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd, A: Allocator + Clone> PartialOrd for BTreeSet<T, A> {
+ fn partial_cmp(&self, other: &BTreeSet<T, A>) -> Option<Ordering> {
+ self.map.partial_cmp(&other.map)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord, A: Allocator + Clone> Ord for BTreeSet<T, A> {
+ fn cmp(&self, other: &BTreeSet<T, A>) -> Ordering {
+ self.map.cmp(&other.map)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for BTreeSet<T, A> {
+ fn clone(&self) -> Self {
+ BTreeSet { map: self.map.clone() }
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ self.map.clone_from(&other.map);
+ }
+}
+
+/// An iterator over the items of a `BTreeSet`.
+///
+/// This `struct` is created by the [`iter`] method on [`BTreeSet`].
+/// See its documentation for more.
+///
+/// [`iter`]: BTreeSet::iter
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ iter: Keys<'a, T, SetValZST>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Iter").field(&self.iter.clone()).finish()
+ }
+}
+
+/// An owning iterator over the items of a `BTreeSet`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`BTreeSet`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: BTreeSet#method.into_iter
+/// [`IntoIterator`]: core::iter::IntoIterator
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct IntoIter<
+ T,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ iter: super::map::IntoIter<T, SetValZST, A>,
+}
+
+/// An iterator over a sub-range of items in a `BTreeSet`.
+///
+/// This `struct` is created by the [`range`] method on [`BTreeSet`].
+/// See its documentation for more.
+///
+/// [`range`]: BTreeSet::range
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[derive(Debug)]
+#[stable(feature = "btree_range", since = "1.17.0")]
+pub struct Range<'a, T: 'a> {
+ iter: super::map::Range<'a, T, SetValZST>,
+}
+
+/// A lazy iterator producing elements in the difference of `BTreeSet`s.
+///
+/// This `struct` is created by the [`difference`] method on [`BTreeSet`].
+/// See its documentation for more.
+///
+/// [`difference`]: BTreeSet::difference
+#[must_use = "this returns the difference as an iterator, \
+ without modifying either input set"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Difference<
+ 'a,
+ T: 'a,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ inner: DifferenceInner<'a, T, A>,
+}
+enum DifferenceInner<'a, T: 'a, A: Allocator + Clone> {
+ Stitch {
+ // iterate all of `self` and some of `other`, spotting matches along the way
+ self_iter: Iter<'a, T>,
+ other_iter: Peekable<Iter<'a, T>>,
+ },
+ Search {
+ // iterate `self`, look up in `other`
+ self_iter: Iter<'a, T>,
+ other_set: &'a BTreeSet<T, A>,
+ },
+ Iterate(Iter<'a, T>), // simply produce all elements in `self`
+}
+
+// Explicit Debug impl necessary because of issue #26925
+impl<T: Debug, A: Allocator + Clone> Debug for DifferenceInner<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ DifferenceInner::Stitch { self_iter, other_iter } => f
+ .debug_struct("Stitch")
+ .field("self_iter", self_iter)
+ .field("other_iter", other_iter)
+ .finish(),
+ DifferenceInner::Search { self_iter, other_set } => f
+ .debug_struct("Search")
+ .field("self_iter", self_iter)
+ .field("other_iter", other_set)
+ .finish(),
+ DifferenceInner::Iterate(x) => f.debug_tuple("Iterate").field(x).finish(),
+ }
+ }
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug, A: Allocator + Clone> fmt::Debug for Difference<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Difference").field(&self.inner).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the symmetric difference of `BTreeSet`s.
+///
+/// This `struct` is created by the [`symmetric_difference`] method on
+/// [`BTreeSet`]. See its documentation for more.
+///
+/// [`symmetric_difference`]: BTreeSet::symmetric_difference
+#[must_use = "this returns the difference as an iterator, \
+ without modifying either input set"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct SymmetricDifference<'a, T: 'a>(MergeIterInner<Iter<'a, T>>);
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for SymmetricDifference<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("SymmetricDifference").field(&self.0).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the intersection of `BTreeSet`s.
+///
+/// This `struct` is created by the [`intersection`] method on [`BTreeSet`].
+/// See its documentation for more.
+///
+/// [`intersection`]: BTreeSet::intersection
+#[must_use = "this returns the intersection as an iterator, \
+ without modifying either input set"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Intersection<
+ 'a,
+ T: 'a,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> {
+ inner: IntersectionInner<'a, T, A>,
+}
+enum IntersectionInner<'a, T: 'a, A: Allocator + Clone> {
+ Stitch {
+ // iterate similarly sized sets jointly, spotting matches along the way
+ a: Iter<'a, T>,
+ b: Iter<'a, T>,
+ },
+ Search {
+ // iterate a small set, look up in the large set
+ small_iter: Iter<'a, T>,
+ large_set: &'a BTreeSet<T, A>,
+ },
+ Answer(Option<&'a T>), // return a specific element or emptiness
+}
+
+// Explicit Debug impl necessary because of issue #26925
+impl<T: Debug, A: Allocator + Clone> Debug for IntersectionInner<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self {
+ IntersectionInner::Stitch { a, b } => {
+ f.debug_struct("Stitch").field("a", a).field("b", b).finish()
+ }
+ IntersectionInner::Search { small_iter, large_set } => f
+ .debug_struct("Search")
+ .field("small_iter", small_iter)
+ .field("large_set", large_set)
+ .finish(),
+ IntersectionInner::Answer(x) => f.debug_tuple("Answer").field(x).finish(),
+ }
+ }
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: Debug, A: Allocator + Clone> Debug for Intersection<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Intersection").field(&self.inner).finish()
+ }
+}
+
+/// A lazy iterator producing elements in the union of `BTreeSet`s.
+///
+/// This `struct` is created by the [`union`] method on [`BTreeSet`].
+/// See its documentation for more.
+///
+/// [`union`]: BTreeSet::union
+#[must_use = "this returns the union as an iterator, \
+ without modifying either input set"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Union<'a, T: 'a>(MergeIterInner<Iter<'a, T>>);
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Union<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Union").field(&self.0).finish()
+ }
+}
+
+// This constant is used by functions that compare two sets.
+// It estimates the relative size at which searching performs better
+// than iterating, based on the benchmarks in
+// https://github.com/ssomers/rust_bench_btreeset_intersection.
+// It's used to divide rather than multiply sizes, to rule out overflow,
+// and it's a power of two to make that division cheap.
+const ITER_PERFORMANCE_TIPPING_SIZE_DIFF: usize = 16;
+
+impl<T> BTreeSet<T> {
+ /// Makes a new, empty `BTreeSet`.
+ ///
+ /// Does not allocate anything on its own.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(unused_mut)]
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set: BTreeSet<i32> = BTreeSet::new();
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ #[must_use]
+ pub const fn new() -> BTreeSet<T> {
+ BTreeSet { map: BTreeMap::new() }
+ }
+}
+
+impl<T, A: Allocator + Clone> BTreeSet<T, A> {
+ /// Makes a new `BTreeSet` with a reasonable choice of B.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(unused_mut)]
+ /// # #![feature(allocator_api)]
+ /// # #![feature(btreemap_alloc)]
+ /// use std::collections::BTreeSet;
+ /// use std::alloc::Global;
+ ///
+ /// let mut set: BTreeSet<i32> = BTreeSet::new_in(Global);
+ /// ```
+ #[unstable(feature = "btreemap_alloc", issue = "32838")]
+ pub fn new_in(alloc: A) -> BTreeSet<T, A> {
+ BTreeSet { map: BTreeMap::new_in(alloc) }
+ }
+
+ /// Constructs a double-ended iterator over a sub-range of elements in the set.
+ /// The simplest way is to use the range syntax `min..max`, thus `range(min..max)` will
+ /// yield elements from min (inclusive) to max (exclusive).
+ /// The range may also be entered as `(Bound<T>, Bound<T>)`, so for example
+ /// `range((Excluded(4), Included(10)))` will yield a left-exclusive, right-inclusive
+ /// range from 4 to 10.
+ ///
+ /// # Panics
+ ///
+ /// Panics if range `start > end`.
+ /// Panics if range `start == end` and both bounds are `Excluded`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ /// use std::ops::Bound::Included;
+ ///
+ /// let mut set = BTreeSet::new();
+ /// set.insert(3);
+ /// set.insert(5);
+ /// set.insert(8);
+ /// for &elem in set.range((Included(&4), Included(&8))) {
+ /// println!("{elem}");
+ /// }
+ /// assert_eq!(Some(&5), set.range(4..).next());
+ /// ```
+ #[stable(feature = "btree_range", since = "1.17.0")]
+ pub fn range<K: ?Sized, R>(&self, range: R) -> Range<'_, T>
+ where
+ K: Ord,
+ T: Borrow<K> + Ord,
+ R: RangeBounds<K>,
+ {
+ Range { iter: self.map.range(range) }
+ }
+
+ /// Visits the elements representing the difference,
+ /// i.e., the elements that are in `self` but not in `other`,
+ /// in ascending order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut a = BTreeSet::new();
+ /// a.insert(1);
+ /// a.insert(2);
+ ///
+ /// let mut b = BTreeSet::new();
+ /// b.insert(2);
+ /// b.insert(3);
+ ///
+ /// let diff: Vec<_> = a.difference(&b).cloned().collect();
+ /// assert_eq!(diff, [1]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn difference<'a>(&'a self, other: &'a BTreeSet<T, A>) -> Difference<'a, T, A>
+ where
+ T: Ord,
+ {
+ let (self_min, self_max) =
+ if let (Some(self_min), Some(self_max)) = (self.first(), self.last()) {
+ (self_min, self_max)
+ } else {
+ return Difference { inner: DifferenceInner::Iterate(self.iter()) };
+ };
+ let (other_min, other_max) =
+ if let (Some(other_min), Some(other_max)) = (other.first(), other.last()) {
+ (other_min, other_max)
+ } else {
+ return Difference { inner: DifferenceInner::Iterate(self.iter()) };
+ };
+ Difference {
+ inner: match (self_min.cmp(other_max), self_max.cmp(other_min)) {
+ (Greater, _) | (_, Less) => DifferenceInner::Iterate(self.iter()),
+ (Equal, _) => {
+ let mut self_iter = self.iter();
+ self_iter.next();
+ DifferenceInner::Iterate(self_iter)
+ }
+ (_, Equal) => {
+ let mut self_iter = self.iter();
+ self_iter.next_back();
+ DifferenceInner::Iterate(self_iter)
+ }
+ _ if self.len() <= other.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF => {
+ DifferenceInner::Search { self_iter: self.iter(), other_set: other }
+ }
+ _ => DifferenceInner::Stitch {
+ self_iter: self.iter(),
+ other_iter: other.iter().peekable(),
+ },
+ },
+ }
+ }
+
+ /// Visits the elements representing the symmetric difference,
+ /// i.e., the elements that are in `self` or in `other` but not in both,
+ /// in ascending order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut a = BTreeSet::new();
+ /// a.insert(1);
+ /// a.insert(2);
+ ///
+ /// let mut b = BTreeSet::new();
+ /// b.insert(2);
+ /// b.insert(3);
+ ///
+ /// let sym_diff: Vec<_> = a.symmetric_difference(&b).cloned().collect();
+ /// assert_eq!(sym_diff, [1, 3]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn symmetric_difference<'a>(
+ &'a self,
+ other: &'a BTreeSet<T, A>,
+ ) -> SymmetricDifference<'a, T>
+ where
+ T: Ord,
+ {
+ SymmetricDifference(MergeIterInner::new(self.iter(), other.iter()))
+ }
+
+ /// Visits the elements representing the intersection,
+ /// i.e., the elements that are both in `self` and `other`,
+ /// in ascending order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut a = BTreeSet::new();
+ /// a.insert(1);
+ /// a.insert(2);
+ ///
+ /// let mut b = BTreeSet::new();
+ /// b.insert(2);
+ /// b.insert(3);
+ ///
+ /// let intersection: Vec<_> = a.intersection(&b).cloned().collect();
+ /// assert_eq!(intersection, [2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn intersection<'a>(&'a self, other: &'a BTreeSet<T, A>) -> Intersection<'a, T, A>
+ where
+ T: Ord,
+ {
+ let (self_min, self_max) =
+ if let (Some(self_min), Some(self_max)) = (self.first(), self.last()) {
+ (self_min, self_max)
+ } else {
+ return Intersection { inner: IntersectionInner::Answer(None) };
+ };
+ let (other_min, other_max) =
+ if let (Some(other_min), Some(other_max)) = (other.first(), other.last()) {
+ (other_min, other_max)
+ } else {
+ return Intersection { inner: IntersectionInner::Answer(None) };
+ };
+ Intersection {
+ inner: match (self_min.cmp(other_max), self_max.cmp(other_min)) {
+ (Greater, _) | (_, Less) => IntersectionInner::Answer(None),
+ (Equal, _) => IntersectionInner::Answer(Some(self_min)),
+ (_, Equal) => IntersectionInner::Answer(Some(self_max)),
+ _ if self.len() <= other.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF => {
+ IntersectionInner::Search { small_iter: self.iter(), large_set: other }
+ }
+ _ if other.len() <= self.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF => {
+ IntersectionInner::Search { small_iter: other.iter(), large_set: self }
+ }
+ _ => IntersectionInner::Stitch { a: self.iter(), b: other.iter() },
+ },
+ }
+ }
+
+ /// Visits the elements representing the union,
+ /// i.e., all the elements in `self` or `other`, without duplicates,
+ /// in ascending order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut a = BTreeSet::new();
+ /// a.insert(1);
+ ///
+ /// let mut b = BTreeSet::new();
+ /// b.insert(2);
+ ///
+ /// let union: Vec<_> = a.union(&b).cloned().collect();
+ /// assert_eq!(union, [1, 2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn union<'a>(&'a self, other: &'a BTreeSet<T, A>) -> Union<'a, T>
+ where
+ T: Ord,
+ {
+ Union(MergeIterInner::new(self.iter(), other.iter()))
+ }
+
+ /// Clears the set, removing all elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut v = BTreeSet::new();
+ /// v.insert(1);
+ /// v.clear();
+ /// assert!(v.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self)
+ where
+ A: Clone,
+ {
+ self.map.clear()
+ }
+
+ /// Returns `true` if the set contains an element equal to the value.
+ ///
+ /// The value may be any borrowed form of the set's element type,
+ /// but the ordering on the borrowed form *must* match the
+ /// ordering on the element type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let set = BTreeSet::from([1, 2, 3]);
+ /// assert_eq!(set.contains(&1), true);
+ /// assert_eq!(set.contains(&4), false);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn contains<Q: ?Sized>(&self, value: &Q) -> bool
+ where
+ T: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ self.map.contains_key(value)
+ }
+
+ /// Returns a reference to the element in the set, if any, that is equal to
+ /// the value.
+ ///
+ /// The value may be any borrowed form of the set's element type,
+ /// but the ordering on the borrowed form *must* match the
+ /// ordering on the element type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let set = BTreeSet::from([1, 2, 3]);
+ /// assert_eq!(set.get(&2), Some(&2));
+ /// assert_eq!(set.get(&4), None);
+ /// ```
+ #[stable(feature = "set_recovery", since = "1.9.0")]
+ pub fn get<Q: ?Sized>(&self, value: &Q) -> Option<&T>
+ where
+ T: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ Recover::get(&self.map, value)
+ }
+
+ /// Returns `true` if `self` has no elements in common with `other`.
+ /// This is equivalent to checking for an empty intersection.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let a = BTreeSet::from([1, 2, 3]);
+ /// let mut b = BTreeSet::new();
+ ///
+ /// assert_eq!(a.is_disjoint(&b), true);
+ /// b.insert(4);
+ /// assert_eq!(a.is_disjoint(&b), true);
+ /// b.insert(1);
+ /// assert_eq!(a.is_disjoint(&b), false);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_disjoint(&self, other: &BTreeSet<T, A>) -> bool
+ where
+ T: Ord,
+ {
+ self.intersection(other).next().is_none()
+ }
+
+ /// Returns `true` if the set is a subset of another,
+ /// i.e., `other` contains at least all the elements in `self`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let sup = BTreeSet::from([1, 2, 3]);
+ /// let mut set = BTreeSet::new();
+ ///
+ /// assert_eq!(set.is_subset(&sup), true);
+ /// set.insert(2);
+ /// assert_eq!(set.is_subset(&sup), true);
+ /// set.insert(4);
+ /// assert_eq!(set.is_subset(&sup), false);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_subset(&self, other: &BTreeSet<T, A>) -> bool
+ where
+ T: Ord,
+ {
+ // Same result as self.difference(other).next().is_none()
+ // but the code below is faster (hugely in some cases).
+ if self.len() > other.len() {
+ return false;
+ }
+ let (self_min, self_max) =
+ if let (Some(self_min), Some(self_max)) = (self.first(), self.last()) {
+ (self_min, self_max)
+ } else {
+ return true; // self is empty
+ };
+ let (other_min, other_max) =
+ if let (Some(other_min), Some(other_max)) = (other.first(), other.last()) {
+ (other_min, other_max)
+ } else {
+ return false; // other is empty
+ };
+ let mut self_iter = self.iter();
+ match self_min.cmp(other_min) {
+ Less => return false,
+ Equal => {
+ self_iter.next();
+ }
+ Greater => (),
+ }
+ match self_max.cmp(other_max) {
+ Greater => return false,
+ Equal => {
+ self_iter.next_back();
+ }
+ Less => (),
+ }
+ if self_iter.len() <= other.len() / ITER_PERFORMANCE_TIPPING_SIZE_DIFF {
+ for next in self_iter {
+ if !other.contains(next) {
+ return false;
+ }
+ }
+ } else {
+ let mut other_iter = other.iter();
+ other_iter.next();
+ other_iter.next_back();
+ let mut self_next = self_iter.next();
+ while let Some(self1) = self_next {
+ match other_iter.next().map_or(Less, |other1| self1.cmp(other1)) {
+ Less => return false,
+ Equal => self_next = self_iter.next(),
+ Greater => (),
+ }
+ }
+ }
+ true
+ }
+
+ /// Returns `true` if the set is a superset of another,
+ /// i.e., `self` contains at least all the elements in `other`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let sub = BTreeSet::from([1, 2]);
+ /// let mut set = BTreeSet::new();
+ ///
+ /// assert_eq!(set.is_superset(&sub), false);
+ ///
+ /// set.insert(0);
+ /// set.insert(1);
+ /// assert_eq!(set.is_superset(&sub), false);
+ ///
+ /// set.insert(2);
+ /// assert_eq!(set.is_superset(&sub), true);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_superset(&self, other: &BTreeSet<T, A>) -> bool
+ where
+ T: Ord,
+ {
+ other.is_subset(self)
+ }
+
+ /// Returns a reference to the first element in the set, if any.
+ /// This element is always the minimum of all elements in the set.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ /// assert_eq!(set.first(), None);
+ /// set.insert(1);
+ /// assert_eq!(set.first(), Some(&1));
+ /// set.insert(2);
+ /// assert_eq!(set.first(), Some(&1));
+ /// ```
+ #[must_use]
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn first(&self) -> Option<&T>
+ where
+ T: Ord,
+ {
+ self.map.first_key_value().map(|(k, _)| k)
+ }
+
+ /// Returns a reference to the last element in the set, if any.
+ /// This element is always the maximum of all elements in the set.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ /// assert_eq!(set.last(), None);
+ /// set.insert(1);
+ /// assert_eq!(set.last(), Some(&1));
+ /// set.insert(2);
+ /// assert_eq!(set.last(), Some(&2));
+ /// ```
+ #[must_use]
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn last(&self) -> Option<&T>
+ where
+ T: Ord,
+ {
+ self.map.last_key_value().map(|(k, _)| k)
+ }
+
+ /// Removes the first element from the set and returns it, if any.
+ /// The first element is always the minimum element in the set.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ ///
+ /// set.insert(1);
+ /// while let Some(n) = set.pop_first() {
+ /// assert_eq!(n, 1);
+ /// }
+ /// assert!(set.is_empty());
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn pop_first(&mut self) -> Option<T>
+ where
+ T: Ord,
+ {
+ self.map.pop_first().map(|kv| kv.0)
+ }
+
+ /// Removes the last element from the set and returns it, if any.
+ /// The last element is always the maximum element in the set.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(map_first_last)]
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ ///
+ /// set.insert(1);
+ /// while let Some(n) = set.pop_last() {
+ /// assert_eq!(n, 1);
+ /// }
+ /// assert!(set.is_empty());
+ /// ```
+ #[unstable(feature = "map_first_last", issue = "62924")]
+ pub fn pop_last(&mut self) -> Option<T>
+ where
+ T: Ord,
+ {
+ self.map.pop_last().map(|kv| kv.0)
+ }
+
+ /// Adds a value to the set.
+ ///
+ /// Returns whether the value was newly inserted. That is:
+ ///
+ /// - If the set did not previously contain an equal value, `true` is
+ /// returned.
+ /// - If the set already contained an equal value, `false` is returned, and
+ /// the entry is not updated.
+ ///
+ /// See the [module-level documentation] for more.
+ ///
+ /// [module-level documentation]: index.html#insert-and-complex-keys
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ ///
+ /// assert_eq!(set.insert(2), true);
+ /// assert_eq!(set.insert(2), false);
+ /// assert_eq!(set.len(), 1);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, value: T) -> bool
+ where
+ T: Ord,
+ {
+ self.map.insert(value, SetValZST::default()).is_none()
+ }
+
+ /// Adds a value to the set, replacing the existing element, if any, that is
+ /// equal to the value. Returns the replaced element.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ /// set.insert(Vec::<i32>::new());
+ ///
+ /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 0);
+ /// set.replace(Vec::with_capacity(10));
+ /// assert_eq!(set.get(&[][..]).unwrap().capacity(), 10);
+ /// ```
+ #[stable(feature = "set_recovery", since = "1.9.0")]
+ pub fn replace(&mut self, value: T) -> Option<T>
+ where
+ T: Ord,
+ {
+ Recover::replace(&mut self.map, value)
+ }
+
+ /// If the set contains an element equal to the value, removes it from the
+ /// set and drops it. Returns whether such an element was present.
+ ///
+ /// The value may be any borrowed form of the set's element type,
+ /// but the ordering on the borrowed form *must* match the
+ /// ordering on the element type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::new();
+ ///
+ /// set.insert(2);
+ /// assert_eq!(set.remove(&2), true);
+ /// assert_eq!(set.remove(&2), false);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove<Q: ?Sized>(&mut self, value: &Q) -> bool
+ where
+ T: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ self.map.remove(value).is_some()
+ }
+
+ /// Removes and returns the element in the set, if any, that is equal to
+ /// the value.
+ ///
+ /// The value may be any borrowed form of the set's element type,
+ /// but the ordering on the borrowed form *must* match the
+ /// ordering on the element type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::from([1, 2, 3]);
+ /// assert_eq!(set.take(&2), Some(2));
+ /// assert_eq!(set.take(&2), None);
+ /// ```
+ #[stable(feature = "set_recovery", since = "1.9.0")]
+ pub fn take<Q: ?Sized>(&mut self, value: &Q) -> Option<T>
+ where
+ T: Borrow<Q> + Ord,
+ Q: Ord,
+ {
+ Recover::take(&mut self.map, value)
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns `false`.
+ /// The elements are visited in ascending order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set = BTreeSet::from([1, 2, 3, 4, 5, 6]);
+ /// // Keep only the even numbers.
+ /// set.retain(|&k| k % 2 == 0);
+ /// assert!(set.iter().eq([2, 4, 6].iter()));
+ /// ```
+ #[stable(feature = "btree_retain", since = "1.53.0")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ T: Ord,
+ F: FnMut(&T) -> bool,
+ {
+ self.drain_filter(|v| !f(v));
+ }
+
+ /// Moves all elements from `other` into `self`, leaving `other` empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut a = BTreeSet::new();
+ /// a.insert(1);
+ /// a.insert(2);
+ /// a.insert(3);
+ ///
+ /// let mut b = BTreeSet::new();
+ /// b.insert(3);
+ /// b.insert(4);
+ /// b.insert(5);
+ ///
+ /// a.append(&mut b);
+ ///
+ /// assert_eq!(a.len(), 5);
+ /// assert_eq!(b.len(), 0);
+ ///
+ /// assert!(a.contains(&1));
+ /// assert!(a.contains(&2));
+ /// assert!(a.contains(&3));
+ /// assert!(a.contains(&4));
+ /// assert!(a.contains(&5));
+ /// ```
+ #[stable(feature = "btree_append", since = "1.11.0")]
+ pub fn append(&mut self, other: &mut Self)
+ where
+ T: Ord,
+ A: Clone,
+ {
+ self.map.append(&mut other.map);
+ }
+
+ /// Splits the collection into two at the value. Returns a new collection
+ /// with all elements greater than or equal to the value.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut a = BTreeSet::new();
+ /// a.insert(1);
+ /// a.insert(2);
+ /// a.insert(3);
+ /// a.insert(17);
+ /// a.insert(41);
+ ///
+ /// let b = a.split_off(&3);
+ ///
+ /// assert_eq!(a.len(), 2);
+ /// assert_eq!(b.len(), 3);
+ ///
+ /// assert!(a.contains(&1));
+ /// assert!(a.contains(&2));
+ ///
+ /// assert!(b.contains(&3));
+ /// assert!(b.contains(&17));
+ /// assert!(b.contains(&41));
+ /// ```
+ #[stable(feature = "btree_split_off", since = "1.11.0")]
+ pub fn split_off<Q: ?Sized + Ord>(&mut self, value: &Q) -> Self
+ where
+ T: Borrow<Q> + Ord,
+ A: Clone,
+ {
+ BTreeSet { map: self.map.split_off(value) }
+ }
+
+ /// Creates an iterator that visits all elements in ascending order and
+ /// uses a closure to determine if an element should be removed.
+ ///
+ /// If the closure returns `true`, the element is removed from the set and
+ /// yielded. If the closure returns `false`, or panics, the element remains
+ /// in the set and will not be yielded.
+ ///
+ /// If the iterator is only partially consumed or not consumed at all, each
+ /// of the remaining elements is still subjected to the closure and removed
+ /// and dropped if it returns `true`.
+ ///
+ /// It is unspecified how many more elements will be subjected to the
+ /// closure if a panic occurs in the closure, or if a panic occurs while
+ /// dropping an element, or if the `DrainFilter` itself is leaked.
+ ///
+ /// # Examples
+ ///
+ /// Splitting a set into even and odd values, reusing the original set:
+ ///
+ /// ```
+ /// #![feature(btree_drain_filter)]
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut set: BTreeSet<i32> = (0..8).collect();
+ /// let evens: BTreeSet<_> = set.drain_filter(|v| v % 2 == 0).collect();
+ /// let odds = set;
+ /// assert_eq!(evens.into_iter().collect::<Vec<_>>(), vec![0, 2, 4, 6]);
+ /// assert_eq!(odds.into_iter().collect::<Vec<_>>(), vec![1, 3, 5, 7]);
+ /// ```
+ #[unstable(feature = "btree_drain_filter", issue = "70530")]
+ pub fn drain_filter<'a, F>(&'a mut self, pred: F) -> DrainFilter<'a, T, F, A>
+ where
+ T: Ord,
+ F: 'a + FnMut(&T) -> bool,
+ {
+ let (inner, alloc) = self.map.drain_filter_inner();
+ DrainFilter { pred, inner, alloc }
+ }
+
+ /// Gets an iterator that visits the elements in the `BTreeSet` in ascending
+ /// order.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let set = BTreeSet::from([1, 2, 3]);
+ /// let mut set_iter = set.iter();
+ /// assert_eq!(set_iter.next(), Some(&1));
+ /// assert_eq!(set_iter.next(), Some(&2));
+ /// assert_eq!(set_iter.next(), Some(&3));
+ /// assert_eq!(set_iter.next(), None);
+ /// ```
+ ///
+ /// Values returned by the iterator are returned in ascending order:
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let set = BTreeSet::from([3, 1, 2]);
+ /// let mut set_iter = set.iter();
+ /// assert_eq!(set_iter.next(), Some(&1));
+ /// assert_eq!(set_iter.next(), Some(&2));
+ /// assert_eq!(set_iter.next(), Some(&3));
+ /// assert_eq!(set_iter.next(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter { iter: self.map.keys() }
+ }
+
+ /// Returns the number of elements in the set.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut v = BTreeSet::new();
+ /// assert_eq!(v.len(), 0);
+ /// v.insert(1);
+ /// assert_eq!(v.len(), 1);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ pub const fn len(&self) -> usize {
+ self.map.len()
+ }
+
+ /// Returns `true` if the set contains no elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let mut v = BTreeSet::new();
+ /// assert!(v.is_empty());
+ /// v.insert(1);
+ /// assert!(!v.is_empty());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[rustc_const_unstable(feature = "const_btree_new", issue = "71835")]
+ pub const fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> FromIterator<T> for BTreeSet<T> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> BTreeSet<T> {
+ let mut inputs: Vec<_> = iter.into_iter().collect();
+
+ if inputs.is_empty() {
+ return BTreeSet::new();
+ }
+
+ // use stable sort to preserve the insertion order.
+ inputs.sort();
+ BTreeSet::from_sorted_iter(inputs.into_iter(), Global)
+ }
+}
+
+impl<T: Ord, A: Allocator + Clone> BTreeSet<T, A> {
+ fn from_sorted_iter<I: Iterator<Item = T>>(iter: I, alloc: A) -> BTreeSet<T, A> {
+ let iter = iter.map(|k| (k, SetValZST::default()));
+ let map = BTreeMap::bulk_build_from_sorted_iter(iter, alloc);
+ BTreeSet { map }
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+impl<T: Ord, const N: usize> From<[T; N]> for BTreeSet<T> {
+ /// Converts a `[T; N]` into a `BTreeSet<T>`.
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let set1 = BTreeSet::from([1, 2, 3, 4]);
+ /// let set2: BTreeSet<_> = [1, 2, 3, 4].into();
+ /// assert_eq!(set1, set2);
+ /// ```
+ fn from(mut arr: [T; N]) -> Self {
+ if N == 0 {
+ return BTreeSet::new();
+ }
+
+ // use stable sort to preserve the insertion order.
+ arr.sort();
+ let iter = IntoIterator::into_iter(arr).map(|k| (k, SetValZST::default()));
+ let map = BTreeMap::bulk_build_from_sorted_iter(iter, Global);
+ BTreeSet { map }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator + Clone> IntoIterator for BTreeSet<T, A> {
+ type Item = T;
+ type IntoIter = IntoIter<T, A>;
+
+ /// Gets an iterator for moving out the `BTreeSet`'s contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let set = BTreeSet::from([1, 2, 3, 4]);
+ ///
+ /// let v: Vec<_> = set.into_iter().collect();
+ /// assert_eq!(v, [1, 2, 3, 4]);
+ /// ```
+ fn into_iter(self) -> IntoIter<T, A> {
+ IntoIter { iter: self.map.into_iter() }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, A: Allocator + Clone> IntoIterator for &'a BTreeSet<T, A> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+/// An iterator produced by calling `drain_filter` on BTreeSet.
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+pub struct DrainFilter<
+ 'a,
+ T,
+ F,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + Clone = Global,
+> where
+ T: 'a,
+ F: 'a + FnMut(&T) -> bool,
+{
+ pred: F,
+ inner: super::map::DrainFilterInner<'a, T, SetValZST>,
+ /// The BTreeMap will outlive this IntoIter so we don't care about drop order for `alloc`.
+ alloc: A,
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<T, F, A: Allocator + Clone> Drop for DrainFilter<'_, T, F, A>
+where
+ F: FnMut(&T) -> bool,
+{
+ fn drop(&mut self) {
+ self.for_each(drop);
+ }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<T, F, A: Allocator + Clone> fmt::Debug for DrainFilter<'_, T, F, A>
+where
+ T: fmt::Debug,
+ F: FnMut(&T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("DrainFilter").field(&self.inner.peek().map(|(k, _)| k)).finish()
+ }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<'a, T, F, A: Allocator + Clone> Iterator for DrainFilter<'_, T, F, A>
+where
+ F: 'a + FnMut(&T) -> bool,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ let pred = &mut self.pred;
+ let mut mapped_pred = |k: &T, _v: &mut SetValZST| pred(k);
+ self.inner.next(&mut mapped_pred, self.alloc.clone()).map(|(k, _)| k)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.inner.size_hint()
+ }
+}
+
+#[unstable(feature = "btree_drain_filter", issue = "70530")]
+impl<T, F, A: Allocator + Clone> FusedIterator for DrainFilter<'_, T, F, A> where
+ F: FnMut(&T) -> bool
+{
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord, A: Allocator + Clone> Extend<T> for BTreeSet<T, A> {
+ #[inline]
+ fn extend<Iter: IntoIterator<Item = T>>(&mut self, iter: Iter) {
+ iter.into_iter().for_each(move |elem| {
+ self.insert(elem);
+ });
+ }
+
+ #[inline]
+ fn extend_one(&mut self, elem: T) {
+ self.insert(elem);
+ }
+}
+
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: 'a + Ord + Copy, A: Allocator + Clone> Extend<&'a T> for BTreeSet<T, A> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &elem: &'a T) {
+ self.insert(elem);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Default for BTreeSet<T> {
+ /// Creates an empty `BTreeSet`.
+ fn default() -> BTreeSet<T> {
+ BTreeSet::new()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord + Clone, A: Allocator + Clone> Sub<&BTreeSet<T, A>> for &BTreeSet<T, A> {
+ type Output = BTreeSet<T, A>;
+
+ /// Returns the difference of `self` and `rhs` as a new `BTreeSet<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let a = BTreeSet::from([1, 2, 3]);
+ /// let b = BTreeSet::from([3, 4, 5]);
+ ///
+ /// let result = &a - &b;
+ /// assert_eq!(result, BTreeSet::from([1, 2]));
+ /// ```
+ fn sub(self, rhs: &BTreeSet<T, A>) -> BTreeSet<T, A> {
+ BTreeSet::from_sorted_iter(
+ self.difference(rhs).cloned(),
+ ManuallyDrop::into_inner(self.map.alloc.clone()),
+ )
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord + Clone, A: Allocator + Clone> BitXor<&BTreeSet<T, A>> for &BTreeSet<T, A> {
+ type Output = BTreeSet<T, A>;
+
+ /// Returns the symmetric difference of `self` and `rhs` as a new `BTreeSet<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let a = BTreeSet::from([1, 2, 3]);
+ /// let b = BTreeSet::from([2, 3, 4]);
+ ///
+ /// let result = &a ^ &b;
+ /// assert_eq!(result, BTreeSet::from([1, 4]));
+ /// ```
+ fn bitxor(self, rhs: &BTreeSet<T, A>) -> BTreeSet<T, A> {
+ BTreeSet::from_sorted_iter(
+ self.symmetric_difference(rhs).cloned(),
+ ManuallyDrop::into_inner(self.map.alloc.clone()),
+ )
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord + Clone, A: Allocator + Clone> BitAnd<&BTreeSet<T, A>> for &BTreeSet<T, A> {
+ type Output = BTreeSet<T, A>;
+
+ /// Returns the intersection of `self` and `rhs` as a new `BTreeSet<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let a = BTreeSet::from([1, 2, 3]);
+ /// let b = BTreeSet::from([2, 3, 4]);
+ ///
+ /// let result = &a & &b;
+ /// assert_eq!(result, BTreeSet::from([2, 3]));
+ /// ```
+ fn bitand(self, rhs: &BTreeSet<T, A>) -> BTreeSet<T, A> {
+ BTreeSet::from_sorted_iter(
+ self.intersection(rhs).cloned(),
+ ManuallyDrop::into_inner(self.map.alloc.clone()),
+ )
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord + Clone, A: Allocator + Clone> BitOr<&BTreeSet<T, A>> for &BTreeSet<T, A> {
+ type Output = BTreeSet<T, A>;
+
+ /// Returns the union of `self` and `rhs` as a new `BTreeSet<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::BTreeSet;
+ ///
+ /// let a = BTreeSet::from([1, 2, 3]);
+ /// let b = BTreeSet::from([3, 4, 5]);
+ ///
+ /// let result = &a | &b;
+ /// assert_eq!(result, BTreeSet::from([1, 2, 3, 4, 5]));
+ /// ```
+ fn bitor(self, rhs: &BTreeSet<T, A>) -> BTreeSet<T, A> {
+ BTreeSet::from_sorted_iter(
+ self.union(rhs).cloned(),
+ ManuallyDrop::into_inner(self.map.alloc.clone()),
+ )
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Debug, A: Allocator + Clone> Debug for BTreeSet<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_set().entries(self.iter()).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter { iter: self.iter.clone() }
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<&'a T> {
+ self.iter.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ fn last(mut self) -> Option<&'a T> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<&'a T> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<&'a T> {
+ self.next_back()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+ fn next_back(&mut self) -> Option<&'a T> {
+ self.iter.next_back()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator + Clone> Iterator for IntoIter<T, A> {
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ self.iter.next().map(|(k, _)| k)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator + Clone> DoubleEndedIterator for IntoIter<T, A> {
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back().map(|(k, _)| k)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator + Clone> ExactSizeIterator for IntoIter<T, A> {
+ fn len(&self) -> usize {
+ self.iter.len()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator + Clone> FusedIterator for IntoIter<T, A> {}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<T> Clone for Range<'_, T> {
+ fn clone(&self) -> Self {
+ Range { iter: self.iter.clone() }
+ }
+}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<'a, T> Iterator for Range<'a, T> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<&'a T> {
+ self.iter.next().map(|(k, _)| k)
+ }
+
+ fn last(mut self) -> Option<&'a T> {
+ self.next_back()
+ }
+
+ fn min(mut self) -> Option<&'a T> {
+ self.next()
+ }
+
+ fn max(mut self) -> Option<&'a T> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "btree_range", since = "1.17.0")]
+impl<'a, T> DoubleEndedIterator for Range<'a, T> {
+ fn next_back(&mut self) -> Option<&'a T> {
+ self.iter.next_back().map(|(k, _)| k)
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Range<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator + Clone> Clone for Difference<'_, T, A> {
+ fn clone(&self) -> Self {
+ Difference {
+ inner: match &self.inner {
+ DifferenceInner::Stitch { self_iter, other_iter } => DifferenceInner::Stitch {
+ self_iter: self_iter.clone(),
+ other_iter: other_iter.clone(),
+ },
+ DifferenceInner::Search { self_iter, other_set } => {
+ DifferenceInner::Search { self_iter: self_iter.clone(), other_set }
+ }
+ DifferenceInner::Iterate(iter) => DifferenceInner::Iterate(iter.clone()),
+ },
+ }
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T: Ord, A: Allocator + Clone> Iterator for Difference<'a, T, A> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<&'a T> {
+ match &mut self.inner {
+ DifferenceInner::Stitch { self_iter, other_iter } => {
+ let mut self_next = self_iter.next()?;
+ loop {
+ match other_iter.peek().map_or(Less, |other_next| self_next.cmp(other_next)) {
+ Less => return Some(self_next),
+ Equal => {
+ self_next = self_iter.next()?;
+ other_iter.next();
+ }
+ Greater => {
+ other_iter.next();
+ }
+ }
+ }
+ }
+ DifferenceInner::Search { self_iter, other_set } => loop {
+ let self_next = self_iter.next()?;
+ if !other_set.contains(&self_next) {
+ return Some(self_next);
+ }
+ },
+ DifferenceInner::Iterate(iter) => iter.next(),
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (self_len, other_len) = match &self.inner {
+ DifferenceInner::Stitch { self_iter, other_iter } => {
+ (self_iter.len(), other_iter.len())
+ }
+ DifferenceInner::Search { self_iter, other_set } => (self_iter.len(), other_set.len()),
+ DifferenceInner::Iterate(iter) => (iter.len(), 0),
+ };
+ (self_len.saturating_sub(other_len), Some(self_len))
+ }
+
+ fn min(mut self) -> Option<&'a T> {
+ self.next()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T: Ord, A: Allocator + Clone> FusedIterator for Difference<'_, T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for SymmetricDifference<'_, T> {
+ fn clone(&self) -> Self {
+ SymmetricDifference(self.0.clone())
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T: Ord> Iterator for SymmetricDifference<'a, T> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<&'a T> {
+ loop {
+ let (a_next, b_next) = self.0.nexts(Self::Item::cmp);
+ if a_next.and(b_next).is_none() {
+ return a_next.or(b_next);
+ }
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (a_len, b_len) = self.0.lens();
+ // No checked_add, because even if a and b refer to the same set,
+ // and T is a zero-sized type, the storage overhead of sets limits
+ // the number of elements to less than half the range of usize.
+ (0, Some(a_len + b_len))
+ }
+
+ fn min(mut self) -> Option<&'a T> {
+ self.next()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T: Ord> FusedIterator for SymmetricDifference<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator + Clone> Clone for Intersection<'_, T, A> {
+ fn clone(&self) -> Self {
+ Intersection {
+ inner: match &self.inner {
+ IntersectionInner::Stitch { a, b } => {
+ IntersectionInner::Stitch { a: a.clone(), b: b.clone() }
+ }
+ IntersectionInner::Search { small_iter, large_set } => {
+ IntersectionInner::Search { small_iter: small_iter.clone(), large_set }
+ }
+ IntersectionInner::Answer(answer) => IntersectionInner::Answer(*answer),
+ },
+ }
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T: Ord, A: Allocator + Clone> Iterator for Intersection<'a, T, A> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<&'a T> {
+ match &mut self.inner {
+ IntersectionInner::Stitch { a, b } => {
+ let mut a_next = a.next()?;
+ let mut b_next = b.next()?;
+ loop {
+ match a_next.cmp(b_next) {
+ Less => a_next = a.next()?,
+ Greater => b_next = b.next()?,
+ Equal => return Some(a_next),
+ }
+ }
+ }
+ IntersectionInner::Search { small_iter, large_set } => loop {
+ let small_next = small_iter.next()?;
+ if large_set.contains(&small_next) {
+ return Some(small_next);
+ }
+ },
+ IntersectionInner::Answer(answer) => answer.take(),
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ match &self.inner {
+ IntersectionInner::Stitch { a, b } => (0, Some(min(a.len(), b.len()))),
+ IntersectionInner::Search { small_iter, .. } => (0, Some(small_iter.len())),
+ IntersectionInner::Answer(None) => (0, Some(0)),
+ IntersectionInner::Answer(Some(_)) => (1, Some(1)),
+ }
+ }
+
+ fn min(mut self) -> Option<&'a T> {
+ self.next()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T: Ord, A: Allocator + Clone> FusedIterator for Intersection<'_, T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Union<'_, T> {
+ fn clone(&self) -> Self {
+ Union(self.0.clone())
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T: Ord> Iterator for Union<'a, T> {
+ type Item = &'a T;
+
+ fn next(&mut self) -> Option<&'a T> {
+ let (a_next, b_next) = self.0.nexts(Self::Item::cmp);
+ a_next.or(b_next)
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let (a_len, b_len) = self.0.lens();
+ // No checked_add - see SymmetricDifference::size_hint.
+ (max(a_len, b_len), Some(a_len + b_len))
+ }
+
+ fn min(mut self) -> Option<&'a T> {
+ self.next()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T: Ord> FusedIterator for Union<'_, T> {}
+
+#[cfg(test)]
+mod tests;
diff --git a/library/alloc/src/collections/btree/set/tests.rs b/library/alloc/src/collections/btree/set/tests.rs
new file mode 100644
index 000000000..502d3e1d1
--- /dev/null
+++ b/library/alloc/src/collections/btree/set/tests.rs
@@ -0,0 +1,856 @@
+use super::super::testing::crash_test::{CrashTestDummy, Panic};
+use super::super::testing::rng::DeterministicRng;
+use super::*;
+use crate::vec::Vec;
+use std::cmp::Ordering;
+use std::hash::{Hash, Hasher};
+use std::iter::FromIterator;
+use std::ops::Bound::{Excluded, Included};
+use std::panic::{catch_unwind, AssertUnwindSafe};
+
+#[test]
+fn test_clone_eq() {
+ let mut m = BTreeSet::new();
+
+ m.insert(1);
+ m.insert(2);
+
+ assert_eq!(m.clone(), m);
+}
+
+#[test]
+fn test_iter_min_max() {
+ let mut a = BTreeSet::new();
+ assert_eq!(a.iter().min(), None);
+ assert_eq!(a.iter().max(), None);
+ assert_eq!(a.range(..).min(), None);
+ assert_eq!(a.range(..).max(), None);
+ assert_eq!(a.difference(&BTreeSet::new()).min(), None);
+ assert_eq!(a.difference(&BTreeSet::new()).max(), None);
+ assert_eq!(a.intersection(&a).min(), None);
+ assert_eq!(a.intersection(&a).max(), None);
+ assert_eq!(a.symmetric_difference(&BTreeSet::new()).min(), None);
+ assert_eq!(a.symmetric_difference(&BTreeSet::new()).max(), None);
+ assert_eq!(a.union(&a).min(), None);
+ assert_eq!(a.union(&a).max(), None);
+ a.insert(1);
+ a.insert(2);
+ assert_eq!(a.iter().min(), Some(&1));
+ assert_eq!(a.iter().max(), Some(&2));
+ assert_eq!(a.range(..).min(), Some(&1));
+ assert_eq!(a.range(..).max(), Some(&2));
+ assert_eq!(a.difference(&BTreeSet::new()).min(), Some(&1));
+ assert_eq!(a.difference(&BTreeSet::new()).max(), Some(&2));
+ assert_eq!(a.intersection(&a).min(), Some(&1));
+ assert_eq!(a.intersection(&a).max(), Some(&2));
+ assert_eq!(a.symmetric_difference(&BTreeSet::new()).min(), Some(&1));
+ assert_eq!(a.symmetric_difference(&BTreeSet::new()).max(), Some(&2));
+ assert_eq!(a.union(&a).min(), Some(&1));
+ assert_eq!(a.union(&a).max(), Some(&2));
+}
+
+fn check<F>(a: &[i32], b: &[i32], expected: &[i32], f: F)
+where
+ F: FnOnce(&BTreeSet<i32>, &BTreeSet<i32>, &mut dyn FnMut(&i32) -> bool) -> bool,
+{
+ let mut set_a = BTreeSet::new();
+ let mut set_b = BTreeSet::new();
+
+ for x in a {
+ assert!(set_a.insert(*x))
+ }
+ for y in b {
+ assert!(set_b.insert(*y))
+ }
+
+ let mut i = 0;
+ f(&set_a, &set_b, &mut |&x| {
+ if i < expected.len() {
+ assert_eq!(x, expected[i]);
+ }
+ i += 1;
+ true
+ });
+ assert_eq!(i, expected.len());
+}
+
+#[test]
+fn test_intersection() {
+ fn check_intersection(a: &[i32], b: &[i32], expected: &[i32]) {
+ check(a, b, expected, |x, y, f| x.intersection(y).all(f))
+ }
+
+ check_intersection(&[], &[], &[]);
+ check_intersection(&[1, 2, 3], &[], &[]);
+ check_intersection(&[], &[1, 2, 3], &[]);
+ check_intersection(&[2], &[1, 2, 3], &[2]);
+ check_intersection(&[1, 2, 3], &[2], &[2]);
+ check_intersection(&[11, 1, 3, 77, 103, 5, -5], &[2, 11, 77, -9, -42, 5, 3], &[3, 5, 11, 77]);
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ let large = Vec::from_iter(0..100);
+ check_intersection(&[], &large, &[]);
+ check_intersection(&large, &[], &[]);
+ check_intersection(&[-1], &large, &[]);
+ check_intersection(&large, &[-1], &[]);
+ check_intersection(&[0], &large, &[0]);
+ check_intersection(&large, &[0], &[0]);
+ check_intersection(&[99], &large, &[99]);
+ check_intersection(&large, &[99], &[99]);
+ check_intersection(&[100], &large, &[]);
+ check_intersection(&large, &[100], &[]);
+ check_intersection(&[11, 5000, 1, 3, 77, 8924], &large, &[1, 3, 11, 77]);
+}
+
+#[test]
+fn test_intersection_size_hint() {
+ let x = BTreeSet::from([3, 4]);
+ let y = BTreeSet::from([1, 2, 3]);
+ let mut iter = x.intersection(&y);
+ assert_eq!(iter.size_hint(), (1, Some(1)));
+ assert_eq!(iter.next(), Some(&3));
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+
+ iter = y.intersection(&y);
+ assert_eq!(iter.size_hint(), (0, Some(3)));
+ assert_eq!(iter.next(), Some(&1));
+ assert_eq!(iter.size_hint(), (0, Some(2)));
+}
+
+#[test]
+fn test_difference() {
+ fn check_difference(a: &[i32], b: &[i32], expected: &[i32]) {
+ check(a, b, expected, |x, y, f| x.difference(y).all(f))
+ }
+
+ check_difference(&[], &[], &[]);
+ check_difference(&[1, 12], &[], &[1, 12]);
+ check_difference(&[], &[1, 2, 3, 9], &[]);
+ check_difference(&[1, 3, 5, 9, 11], &[3, 9], &[1, 5, 11]);
+ check_difference(&[1, 3, 5, 9, 11], &[3, 6, 9], &[1, 5, 11]);
+ check_difference(&[1, 3, 5, 9, 11], &[0, 1], &[3, 5, 9, 11]);
+ check_difference(&[1, 3, 5, 9, 11], &[11, 12], &[1, 3, 5, 9]);
+ check_difference(
+ &[-5, 11, 22, 33, 40, 42],
+ &[-12, -5, 14, 23, 34, 38, 39, 50],
+ &[11, 22, 33, 40, 42],
+ );
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ let large = Vec::from_iter(0..100);
+ check_difference(&[], &large, &[]);
+ check_difference(&[-1], &large, &[-1]);
+ check_difference(&[0], &large, &[]);
+ check_difference(&[99], &large, &[]);
+ check_difference(&[100], &large, &[100]);
+ check_difference(&[11, 5000, 1, 3, 77, 8924], &large, &[5000, 8924]);
+ check_difference(&large, &[], &large);
+ check_difference(&large, &[-1], &large);
+ check_difference(&large, &[100], &large);
+}
+
+#[test]
+fn test_difference_size_hint() {
+ let s246 = BTreeSet::from([2, 4, 6]);
+ let s23456 = BTreeSet::from_iter(2..=6);
+ let mut iter = s246.difference(&s23456);
+ assert_eq!(iter.size_hint(), (0, Some(3)));
+ assert_eq!(iter.next(), None);
+
+ let s12345 = BTreeSet::from_iter(1..=5);
+ iter = s246.difference(&s12345);
+ assert_eq!(iter.size_hint(), (0, Some(3)));
+ assert_eq!(iter.next(), Some(&6));
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+
+ let s34567 = BTreeSet::from_iter(3..=7);
+ iter = s246.difference(&s34567);
+ assert_eq!(iter.size_hint(), (0, Some(3)));
+ assert_eq!(iter.next(), Some(&2));
+ assert_eq!(iter.size_hint(), (0, Some(2)));
+ assert_eq!(iter.next(), None);
+
+ let s1 = BTreeSet::from_iter(-9..=1);
+ iter = s246.difference(&s1);
+ assert_eq!(iter.size_hint(), (3, Some(3)));
+
+ let s2 = BTreeSet::from_iter(-9..=2);
+ iter = s246.difference(&s2);
+ assert_eq!(iter.size_hint(), (2, Some(2)));
+ assert_eq!(iter.next(), Some(&4));
+ assert_eq!(iter.size_hint(), (1, Some(1)));
+
+ let s23 = BTreeSet::from([2, 3]);
+ iter = s246.difference(&s23);
+ assert_eq!(iter.size_hint(), (1, Some(3)));
+ assert_eq!(iter.next(), Some(&4));
+ assert_eq!(iter.size_hint(), (1, Some(1)));
+
+ let s4 = BTreeSet::from([4]);
+ iter = s246.difference(&s4);
+ assert_eq!(iter.size_hint(), (2, Some(3)));
+ assert_eq!(iter.next(), Some(&2));
+ assert_eq!(iter.size_hint(), (1, Some(2)));
+ assert_eq!(iter.next(), Some(&6));
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+
+ let s56 = BTreeSet::from([5, 6]);
+ iter = s246.difference(&s56);
+ assert_eq!(iter.size_hint(), (1, Some(3)));
+ assert_eq!(iter.next(), Some(&2));
+ assert_eq!(iter.size_hint(), (0, Some(2)));
+
+ let s6 = BTreeSet::from_iter(6..=19);
+ iter = s246.difference(&s6);
+ assert_eq!(iter.size_hint(), (2, Some(2)));
+ assert_eq!(iter.next(), Some(&2));
+ assert_eq!(iter.size_hint(), (1, Some(1)));
+
+ let s7 = BTreeSet::from_iter(7..=19);
+ iter = s246.difference(&s7);
+ assert_eq!(iter.size_hint(), (3, Some(3)));
+}
+
+#[test]
+fn test_symmetric_difference() {
+ fn check_symmetric_difference(a: &[i32], b: &[i32], expected: &[i32]) {
+ check(a, b, expected, |x, y, f| x.symmetric_difference(y).all(f))
+ }
+
+ check_symmetric_difference(&[], &[], &[]);
+ check_symmetric_difference(&[1, 2, 3], &[2], &[1, 3]);
+ check_symmetric_difference(&[2], &[1, 2, 3], &[1, 3]);
+ check_symmetric_difference(&[1, 3, 5, 9, 11], &[-2, 3, 9, 14, 22], &[-2, 1, 5, 11, 14, 22]);
+}
+
+#[test]
+fn test_symmetric_difference_size_hint() {
+ let x = BTreeSet::from([2, 4]);
+ let y = BTreeSet::from([1, 2, 3]);
+ let mut iter = x.symmetric_difference(&y);
+ assert_eq!(iter.size_hint(), (0, Some(5)));
+ assert_eq!(iter.next(), Some(&1));
+ assert_eq!(iter.size_hint(), (0, Some(4)));
+ assert_eq!(iter.next(), Some(&3));
+ assert_eq!(iter.size_hint(), (0, Some(1)));
+}
+
+#[test]
+fn test_union() {
+ fn check_union(a: &[i32], b: &[i32], expected: &[i32]) {
+ check(a, b, expected, |x, y, f| x.union(y).all(f))
+ }
+
+ check_union(&[], &[], &[]);
+ check_union(&[1, 2, 3], &[2], &[1, 2, 3]);
+ check_union(&[2], &[1, 2, 3], &[1, 2, 3]);
+ check_union(
+ &[1, 3, 5, 9, 11, 16, 19, 24],
+ &[-2, 1, 5, 9, 13, 19],
+ &[-2, 1, 3, 5, 9, 11, 13, 16, 19, 24],
+ );
+}
+
+#[test]
+fn test_union_size_hint() {
+ let x = BTreeSet::from([2, 4]);
+ let y = BTreeSet::from([1, 2, 3]);
+ let mut iter = x.union(&y);
+ assert_eq!(iter.size_hint(), (3, Some(5)));
+ assert_eq!(iter.next(), Some(&1));
+ assert_eq!(iter.size_hint(), (2, Some(4)));
+ assert_eq!(iter.next(), Some(&2));
+ assert_eq!(iter.size_hint(), (1, Some(2)));
+}
+
+#[test]
+// Only tests the simple function definition with respect to intersection
+fn test_is_disjoint() {
+ let one = BTreeSet::from([1]);
+ let two = BTreeSet::from([2]);
+ assert!(one.is_disjoint(&two));
+}
+
+#[test]
+// Also implicitly tests the trivial function definition of is_superset
+fn test_is_subset() {
+ fn is_subset(a: &[i32], b: &[i32]) -> bool {
+ let set_a = BTreeSet::from_iter(a.iter());
+ let set_b = BTreeSet::from_iter(b.iter());
+ set_a.is_subset(&set_b)
+ }
+
+ assert_eq!(is_subset(&[], &[]), true);
+ assert_eq!(is_subset(&[], &[1, 2]), true);
+ assert_eq!(is_subset(&[0], &[1, 2]), false);
+ assert_eq!(is_subset(&[1], &[1, 2]), true);
+ assert_eq!(is_subset(&[2], &[1, 2]), true);
+ assert_eq!(is_subset(&[3], &[1, 2]), false);
+ assert_eq!(is_subset(&[1, 2], &[1]), false);
+ assert_eq!(is_subset(&[1, 2], &[1, 2]), true);
+ assert_eq!(is_subset(&[1, 2], &[2, 3]), false);
+ assert_eq!(
+ is_subset(&[-5, 11, 22, 33, 40, 42], &[-12, -5, 11, 14, 22, 23, 33, 34, 38, 39, 40, 42]),
+ true
+ );
+ assert_eq!(is_subset(&[-5, 11, 22, 33, 40, 42], &[-12, -5, 11, 14, 22, 23, 34, 38]), false);
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ let large = Vec::from_iter(0..100);
+ assert_eq!(is_subset(&[], &large), true);
+ assert_eq!(is_subset(&large, &[]), false);
+ assert_eq!(is_subset(&[-1], &large), false);
+ assert_eq!(is_subset(&[0], &large), true);
+ assert_eq!(is_subset(&[1, 2], &large), true);
+ assert_eq!(is_subset(&[99, 100], &large), false);
+}
+
+#[test]
+fn test_is_superset() {
+ fn is_superset(a: &[i32], b: &[i32]) -> bool {
+ let set_a = BTreeSet::from_iter(a.iter());
+ let set_b = BTreeSet::from_iter(b.iter());
+ set_a.is_superset(&set_b)
+ }
+
+ assert_eq!(is_superset(&[], &[]), true);
+ assert_eq!(is_superset(&[], &[1, 2]), false);
+ assert_eq!(is_superset(&[0], &[1, 2]), false);
+ assert_eq!(is_superset(&[1], &[1, 2]), false);
+ assert_eq!(is_superset(&[4], &[1, 2]), false);
+ assert_eq!(is_superset(&[1, 4], &[1, 2]), false);
+ assert_eq!(is_superset(&[1, 2], &[1, 2]), true);
+ assert_eq!(is_superset(&[1, 2, 3], &[1, 3]), true);
+ assert_eq!(is_superset(&[1, 2, 3], &[]), true);
+ assert_eq!(is_superset(&[-1, 1, 2, 3], &[-1, 3]), true);
+
+ if cfg!(miri) {
+ // Miri is too slow
+ return;
+ }
+
+ let large = Vec::from_iter(0..100);
+ assert_eq!(is_superset(&[], &large), false);
+ assert_eq!(is_superset(&large, &[]), true);
+ assert_eq!(is_superset(&large, &[1]), true);
+ assert_eq!(is_superset(&large, &[50, 99]), true);
+ assert_eq!(is_superset(&large, &[100]), false);
+ assert_eq!(is_superset(&large, &[0, 99]), true);
+ assert_eq!(is_superset(&[-1], &large), false);
+ assert_eq!(is_superset(&[0], &large), false);
+ assert_eq!(is_superset(&[99, 100], &large), false);
+}
+
+#[test]
+fn test_retain() {
+ let mut set = BTreeSet::from([1, 2, 3, 4, 5, 6]);
+ set.retain(|&k| k % 2 == 0);
+ assert_eq!(set.len(), 3);
+ assert!(set.contains(&2));
+ assert!(set.contains(&4));
+ assert!(set.contains(&6));
+}
+
+#[test]
+fn test_drain_filter() {
+ let mut x = BTreeSet::from([1]);
+ let mut y = BTreeSet::from([1]);
+
+ x.drain_filter(|_| true);
+ y.drain_filter(|_| false);
+ assert_eq!(x.len(), 0);
+ assert_eq!(y.len(), 1);
+}
+
+#[test]
+fn test_drain_filter_drop_panic_leak() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut set = BTreeSet::new();
+ set.insert(a.spawn(Panic::Never));
+ set.insert(b.spawn(Panic::InDrop));
+ set.insert(c.spawn(Panic::Never));
+
+ catch_unwind(move || drop(set.drain_filter(|dummy| dummy.query(true)))).ok();
+
+ assert_eq!(a.queried(), 1);
+ assert_eq!(b.queried(), 1);
+ assert_eq!(c.queried(), 0);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 1);
+ assert_eq!(c.dropped(), 1);
+}
+
+#[test]
+fn test_drain_filter_pred_panic_leak() {
+ let a = CrashTestDummy::new(0);
+ let b = CrashTestDummy::new(1);
+ let c = CrashTestDummy::new(2);
+ let mut set = BTreeSet::new();
+ set.insert(a.spawn(Panic::Never));
+ set.insert(b.spawn(Panic::InQuery));
+ set.insert(c.spawn(Panic::InQuery));
+
+ catch_unwind(AssertUnwindSafe(|| drop(set.drain_filter(|dummy| dummy.query(true))))).ok();
+
+ assert_eq!(a.queried(), 1);
+ assert_eq!(b.queried(), 1);
+ assert_eq!(c.queried(), 0);
+ assert_eq!(a.dropped(), 1);
+ assert_eq!(b.dropped(), 0);
+ assert_eq!(c.dropped(), 0);
+ assert_eq!(set.len(), 2);
+ assert_eq!(set.first().unwrap().id(), 1);
+ assert_eq!(set.last().unwrap().id(), 2);
+}
+
+#[test]
+fn test_clear() {
+ let mut x = BTreeSet::new();
+ x.insert(1);
+
+ x.clear();
+ assert!(x.is_empty());
+}
+#[test]
+fn test_remove() {
+ let mut x = BTreeSet::new();
+ assert!(x.is_empty());
+
+ x.insert(1);
+ x.insert(2);
+ x.insert(3);
+ x.insert(4);
+
+ assert_eq!(x.remove(&2), true);
+ assert_eq!(x.remove(&0), false);
+ assert_eq!(x.remove(&5), false);
+ assert_eq!(x.remove(&1), true);
+ assert_eq!(x.remove(&2), false);
+ assert_eq!(x.remove(&3), true);
+ assert_eq!(x.remove(&4), true);
+ assert_eq!(x.remove(&4), false);
+ assert!(x.is_empty());
+}
+
+#[test]
+fn test_zip() {
+ let mut x = BTreeSet::new();
+ x.insert(5);
+ x.insert(12);
+ x.insert(11);
+
+ let mut y = BTreeSet::new();
+ y.insert("foo");
+ y.insert("bar");
+
+ let x = x;
+ let y = y;
+ let mut z = x.iter().zip(&y);
+
+ assert_eq!(z.next().unwrap(), (&5, &("bar")));
+ assert_eq!(z.next().unwrap(), (&11, &("foo")));
+ assert!(z.next().is_none());
+}
+
+#[test]
+fn test_from_iter() {
+ let xs = [1, 2, 3, 4, 5, 6, 7, 8, 9];
+
+ let set = BTreeSet::from_iter(xs.iter());
+
+ for x in &xs {
+ assert!(set.contains(x));
+ }
+}
+
+#[test]
+fn test_show() {
+ let mut set = BTreeSet::new();
+ let empty = BTreeSet::<i32>::new();
+
+ set.insert(1);
+ set.insert(2);
+
+ let set_str = format!("{set:?}");
+
+ assert_eq!(set_str, "{1, 2}");
+ assert_eq!(format!("{empty:?}"), "{}");
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut a = BTreeSet::new();
+ a.insert(1);
+
+ a.extend(&[2, 3, 4]);
+
+ assert_eq!(a.len(), 4);
+ assert!(a.contains(&1));
+ assert!(a.contains(&2));
+ assert!(a.contains(&3));
+ assert!(a.contains(&4));
+
+ let mut b = BTreeSet::new();
+ b.insert(5);
+ b.insert(6);
+
+ a.extend(&b);
+
+ assert_eq!(a.len(), 6);
+ assert!(a.contains(&1));
+ assert!(a.contains(&2));
+ assert!(a.contains(&3));
+ assert!(a.contains(&4));
+ assert!(a.contains(&5));
+ assert!(a.contains(&6));
+}
+
+#[test]
+fn test_recovery() {
+ #[derive(Debug)]
+ struct Foo(&'static str, i32);
+
+ impl PartialEq for Foo {
+ fn eq(&self, other: &Self) -> bool {
+ self.0 == other.0
+ }
+ }
+
+ impl Eq for Foo {}
+
+ impl PartialOrd for Foo {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.0.partial_cmp(&other.0)
+ }
+ }
+
+ impl Ord for Foo {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.0.cmp(&other.0)
+ }
+ }
+
+ let mut s = BTreeSet::new();
+ assert_eq!(s.replace(Foo("a", 1)), None);
+ assert_eq!(s.len(), 1);
+ assert_eq!(s.replace(Foo("a", 2)), Some(Foo("a", 1)));
+ assert_eq!(s.len(), 1);
+
+ {
+ let mut it = s.iter();
+ assert_eq!(it.next(), Some(&Foo("a", 2)));
+ assert_eq!(it.next(), None);
+ }
+
+ assert_eq!(s.get(&Foo("a", 1)), Some(&Foo("a", 2)));
+ assert_eq!(s.take(&Foo("a", 1)), Some(Foo("a", 2)));
+ assert_eq!(s.len(), 0);
+
+ assert_eq!(s.get(&Foo("a", 1)), None);
+ assert_eq!(s.take(&Foo("a", 1)), None);
+
+ assert_eq!(s.iter().next(), None);
+}
+
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn set<'new>(v: BTreeSet<&'static str>) -> BTreeSet<&'new str> {
+ v
+ }
+ fn iter<'a, 'new>(v: Iter<'a, &'static str>) -> Iter<'a, &'new str> {
+ v
+ }
+ fn into_iter<'new>(v: IntoIter<&'static str>) -> IntoIter<&'new str> {
+ v
+ }
+ fn range<'a, 'new>(v: Range<'a, &'static str>) -> Range<'a, &'new str> {
+ v
+ }
+ // not applied to Difference, Intersection, SymmetricDifference, Union
+}
+
+#[allow(dead_code)]
+fn assert_sync() {
+ fn set<T: Sync>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v
+ }
+
+ fn iter<T: Sync>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v.iter()
+ }
+
+ fn into_iter<T: Sync>(v: BTreeSet<T>) -> impl Sync {
+ v.into_iter()
+ }
+
+ fn range<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v.range(..)
+ }
+
+ fn drain_filter<T: Sync + Ord>(v: &mut BTreeSet<T>) -> impl Sync + '_ {
+ v.drain_filter(|_| false)
+ }
+
+ fn difference<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v.difference(&v)
+ }
+
+ fn intersection<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v.intersection(&v)
+ }
+
+ fn symmetric_difference<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v.symmetric_difference(&v)
+ }
+
+ fn union<T: Sync + Ord>(v: &BTreeSet<T>) -> impl Sync + '_ {
+ v.union(&v)
+ }
+}
+
+#[allow(dead_code)]
+fn assert_send() {
+ fn set<T: Send>(v: BTreeSet<T>) -> impl Send {
+ v
+ }
+
+ fn iter<T: Send + Sync>(v: &BTreeSet<T>) -> impl Send + '_ {
+ v.iter()
+ }
+
+ fn into_iter<T: Send>(v: BTreeSet<T>) -> impl Send {
+ v.into_iter()
+ }
+
+ fn range<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
+ v.range(..)
+ }
+
+ fn drain_filter<T: Send + Ord>(v: &mut BTreeSet<T>) -> impl Send + '_ {
+ v.drain_filter(|_| false)
+ }
+
+ fn difference<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
+ v.difference(&v)
+ }
+
+ fn intersection<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
+ v.intersection(&v)
+ }
+
+ fn symmetric_difference<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
+ v.symmetric_difference(&v)
+ }
+
+ fn union<T: Send + Sync + Ord>(v: &BTreeSet<T>) -> impl Send + '_ {
+ v.union(&v)
+ }
+}
+
+#[allow(dead_code)]
+// Check that the member-like functions conditionally provided by #[derive()]
+// are not overridden by genuine member functions with a different signature.
+fn assert_derives() {
+ fn hash<T: Hash, H: Hasher>(v: BTreeSet<T>, state: &mut H) {
+ v.hash(state);
+ // Tested much more thoroughly outside the crate in btree_set_hash.rs
+ }
+ fn eq<T: PartialEq>(v: BTreeSet<T>) {
+ let _ = v.eq(&v);
+ }
+ fn ne<T: PartialEq>(v: BTreeSet<T>) {
+ let _ = v.ne(&v);
+ }
+ fn cmp<T: Ord>(v: BTreeSet<T>) {
+ let _ = v.cmp(&v);
+ }
+ fn min<T: Ord>(v: BTreeSet<T>, w: BTreeSet<T>) {
+ let _ = v.min(w);
+ }
+ fn max<T: Ord>(v: BTreeSet<T>, w: BTreeSet<T>) {
+ let _ = v.max(w);
+ }
+ fn clamp<T: Ord>(v: BTreeSet<T>, w: BTreeSet<T>, x: BTreeSet<T>) {
+ let _ = v.clamp(w, x);
+ }
+ fn partial_cmp<T: PartialOrd>(v: &BTreeSet<T>) {
+ let _ = v.partial_cmp(&v);
+ }
+}
+
+#[test]
+fn test_ord_absence() {
+ fn set<K>(mut set: BTreeSet<K>) {
+ let _ = set.is_empty();
+ let _ = set.len();
+ set.clear();
+ let _ = set.iter();
+ let _ = set.into_iter();
+ }
+
+ fn set_debug<K: Debug>(set: BTreeSet<K>) {
+ format!("{set:?}");
+ format!("{:?}", set.iter());
+ format!("{:?}", set.into_iter());
+ }
+
+ fn set_clone<K: Clone>(mut set: BTreeSet<K>) {
+ set.clone_from(&set.clone());
+ }
+
+ #[derive(Debug, Clone)]
+ struct NonOrd;
+ set(BTreeSet::<NonOrd>::new());
+ set_debug(BTreeSet::<NonOrd>::new());
+ set_clone(BTreeSet::<NonOrd>::default());
+}
+
+#[test]
+fn test_append() {
+ let mut a = BTreeSet::new();
+ a.insert(1);
+ a.insert(2);
+ a.insert(3);
+
+ let mut b = BTreeSet::new();
+ b.insert(3);
+ b.insert(4);
+ b.insert(5);
+
+ a.append(&mut b);
+
+ assert_eq!(a.len(), 5);
+ assert_eq!(b.len(), 0);
+
+ assert_eq!(a.contains(&1), true);
+ assert_eq!(a.contains(&2), true);
+ assert_eq!(a.contains(&3), true);
+ assert_eq!(a.contains(&4), true);
+ assert_eq!(a.contains(&5), true);
+}
+
+#[test]
+fn test_first_last() {
+ let mut a = BTreeSet::new();
+ assert_eq!(a.first(), None);
+ assert_eq!(a.last(), None);
+ a.insert(1);
+ assert_eq!(a.first(), Some(&1));
+ assert_eq!(a.last(), Some(&1));
+ a.insert(2);
+ assert_eq!(a.first(), Some(&1));
+ assert_eq!(a.last(), Some(&2));
+ for i in 3..=12 {
+ a.insert(i);
+ }
+ assert_eq!(a.first(), Some(&1));
+ assert_eq!(a.last(), Some(&12));
+ assert_eq!(a.pop_first(), Some(1));
+ assert_eq!(a.pop_last(), Some(12));
+ assert_eq!(a.pop_first(), Some(2));
+ assert_eq!(a.pop_last(), Some(11));
+ assert_eq!(a.pop_first(), Some(3));
+ assert_eq!(a.pop_last(), Some(10));
+ assert_eq!(a.pop_first(), Some(4));
+ assert_eq!(a.pop_first(), Some(5));
+ assert_eq!(a.pop_first(), Some(6));
+ assert_eq!(a.pop_first(), Some(7));
+ assert_eq!(a.pop_first(), Some(8));
+ assert_eq!(a.clone().pop_last(), Some(9));
+ assert_eq!(a.pop_first(), Some(9));
+ assert_eq!(a.pop_first(), None);
+ assert_eq!(a.pop_last(), None);
+}
+
+// Unlike the function with the same name in map/tests, returns no values.
+// Which also means it returns different predetermined pseudo-random keys,
+// and the test cases using this function explore slightly different trees.
+fn rand_data(len: usize) -> Vec<u32> {
+ let mut rng = DeterministicRng::new();
+ Vec::from_iter((0..len).map(|_| rng.next()))
+}
+
+#[test]
+fn test_split_off_empty_right() {
+ let mut data = rand_data(173);
+
+ let mut set = BTreeSet::from_iter(data.clone());
+ let right = set.split_off(&(data.iter().max().unwrap() + 1));
+
+ data.sort();
+ assert!(set.into_iter().eq(data));
+ assert!(right.into_iter().eq(None));
+}
+
+#[test]
+fn test_split_off_empty_left() {
+ let mut data = rand_data(314);
+
+ let mut set = BTreeSet::from_iter(data.clone());
+ let right = set.split_off(data.iter().min().unwrap());
+
+ data.sort();
+ assert!(set.into_iter().eq(None));
+ assert!(right.into_iter().eq(data));
+}
+
+#[test]
+fn test_split_off_large_random_sorted() {
+ // Miri is too slow
+ let mut data = if cfg!(miri) { rand_data(529) } else { rand_data(1529) };
+ // special case with maximum height.
+ data.sort();
+
+ let mut set = BTreeSet::from_iter(data.clone());
+ let key = data[data.len() / 2];
+ let right = set.split_off(&key);
+
+ assert!(set.into_iter().eq(data.clone().into_iter().filter(|x| *x < key)));
+ assert!(right.into_iter().eq(data.into_iter().filter(|x| *x >= key)));
+}
+
+#[test]
+fn from_array() {
+ let set = BTreeSet::from([1, 2, 3, 4]);
+ let unordered_duplicates = BTreeSet::from([4, 1, 4, 3, 2]);
+ assert_eq!(set, unordered_duplicates);
+}
+
+#[should_panic(expected = "range start is greater than range end in BTreeSet")]
+#[test]
+fn test_range_panic_1() {
+ let mut set = BTreeSet::new();
+ set.insert(3);
+ set.insert(5);
+ set.insert(8);
+
+ let _invalid_range = set.range((Included(&8), Included(&3)));
+}
+
+#[should_panic(expected = "range start and end are equal and excluded in BTreeSet")]
+#[test]
+fn test_range_panic_2() {
+ let mut set = BTreeSet::new();
+ set.insert(3);
+ set.insert(5);
+ set.insert(8);
+
+ let _invalid_range = set.range((Excluded(&5), Excluded(&5)));
+}
diff --git a/library/alloc/src/collections/btree/set_val.rs b/library/alloc/src/collections/btree/set_val.rs
new file mode 100644
index 000000000..80c459bcf
--- /dev/null
+++ b/library/alloc/src/collections/btree/set_val.rs
@@ -0,0 +1,29 @@
+/// Zero-Sized Type (ZST) for internal `BTreeSet` values.
+/// Used instead of `()` to differentiate between:
+/// * `BTreeMap<T, ()>` (possible user-defined map)
+/// * `BTreeMap<T, SetValZST>` (internal set representation)
+#[derive(Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Clone, Default)]
+pub struct SetValZST;
+
+/// A trait to differentiate between `BTreeMap` and `BTreeSet` values.
+/// Returns `true` only for type `SetValZST`, `false` for all other types (blanket implementation).
+/// `TypeId` requires a `'static` lifetime, use of this trait avoids that restriction.
+///
+/// [`TypeId`]: std::any::TypeId
+pub trait IsSetVal {
+ fn is_set_val() -> bool;
+}
+
+// Blanket implementation
+impl<V> IsSetVal for V {
+ default fn is_set_val() -> bool {
+ false
+ }
+}
+
+// Specialization
+impl IsSetVal for SetValZST {
+ fn is_set_val() -> bool {
+ true
+ }
+}
diff --git a/library/alloc/src/collections/btree/split.rs b/library/alloc/src/collections/btree/split.rs
new file mode 100644
index 000000000..638dc98fc
--- /dev/null
+++ b/library/alloc/src/collections/btree/split.rs
@@ -0,0 +1,73 @@
+use super::node::{ForceResult::*, Root};
+use super::search::SearchResult::*;
+use core::alloc::Allocator;
+use core::borrow::Borrow;
+
+impl<K, V> Root<K, V> {
+ /// Calculates the length of both trees that result from splitting up
+ /// a given number of distinct key-value pairs.
+ pub fn calc_split_length(
+ total_num: usize,
+ root_a: &Root<K, V>,
+ root_b: &Root<K, V>,
+ ) -> (usize, usize) {
+ let (length_a, length_b);
+ if root_a.height() < root_b.height() {
+ length_a = root_a.reborrow().calc_length();
+ length_b = total_num - length_a;
+ debug_assert_eq!(length_b, root_b.reborrow().calc_length());
+ } else {
+ length_b = root_b.reborrow().calc_length();
+ length_a = total_num - length_b;
+ debug_assert_eq!(length_a, root_a.reborrow().calc_length());
+ }
+ (length_a, length_b)
+ }
+
+ /// Split off a tree with key-value pairs at and after the given key.
+ /// The result is meaningful only if the tree is ordered by key,
+ /// and if the ordering of `Q` corresponds to that of `K`.
+ /// If `self` respects all `BTreeMap` tree invariants, then both
+ /// `self` and the returned tree will respect those invariants.
+ pub fn split_off<Q: ?Sized + Ord, A: Allocator + Clone>(&mut self, key: &Q, alloc: A) -> Self
+ where
+ K: Borrow<Q>,
+ {
+ let left_root = self;
+ let mut right_root = Root::new_pillar(left_root.height(), alloc.clone());
+ let mut left_node = left_root.borrow_mut();
+ let mut right_node = right_root.borrow_mut();
+
+ loop {
+ let mut split_edge = match left_node.search_node(key) {
+ // key is going to the right tree
+ Found(kv) => kv.left_edge(),
+ GoDown(edge) => edge,
+ };
+
+ split_edge.move_suffix(&mut right_node);
+
+ match (split_edge.force(), right_node.force()) {
+ (Internal(edge), Internal(node)) => {
+ left_node = edge.descend();
+ right_node = node.first_edge().descend();
+ }
+ (Leaf(_), Leaf(_)) => break,
+ _ => unreachable!(),
+ }
+ }
+
+ left_root.fix_right_border(alloc.clone());
+ right_root.fix_left_border(alloc);
+ right_root
+ }
+
+ /// Creates a tree consisting of empty nodes.
+ fn new_pillar<A: Allocator + Clone>(height: usize, alloc: A) -> Self {
+ let mut root = Root::new(alloc.clone());
+ for _ in 0..height {
+ root.push_internal_level(alloc.clone());
+ }
+ root
+ }
+}
diff --git a/library/alloc/src/collections/btree/testing/crash_test.rs b/library/alloc/src/collections/btree/testing/crash_test.rs
new file mode 100644
index 000000000..bcf5f5f72
--- /dev/null
+++ b/library/alloc/src/collections/btree/testing/crash_test.rs
@@ -0,0 +1,119 @@
+// We avoid relying on anything else in the crate, apart from the `Debug` trait.
+use crate::fmt::Debug;
+use std::cmp::Ordering;
+use std::sync::atomic::{AtomicUsize, Ordering::SeqCst};
+
+/// A blueprint for crash test dummy instances that monitor particular events.
+/// Some instances may be configured to panic at some point.
+/// Events are `clone`, `drop` or some anonymous `query`.
+///
+/// Crash test dummies are identified and ordered by an id, so they can be used
+/// as keys in a BTreeMap.
+#[derive(Debug)]
+pub struct CrashTestDummy {
+ pub id: usize,
+ cloned: AtomicUsize,
+ dropped: AtomicUsize,
+ queried: AtomicUsize,
+}
+
+impl CrashTestDummy {
+ /// Creates a crash test dummy design. The `id` determines order and equality of instances.
+ pub fn new(id: usize) -> CrashTestDummy {
+ CrashTestDummy {
+ id,
+ cloned: AtomicUsize::new(0),
+ dropped: AtomicUsize::new(0),
+ queried: AtomicUsize::new(0),
+ }
+ }
+
+ /// Creates an instance of a crash test dummy that records what events it experiences
+ /// and optionally panics.
+ pub fn spawn(&self, panic: Panic) -> Instance<'_> {
+ Instance { origin: self, panic }
+ }
+
+ /// Returns how many times instances of the dummy have been cloned.
+ pub fn cloned(&self) -> usize {
+ self.cloned.load(SeqCst)
+ }
+
+ /// Returns how many times instances of the dummy have been dropped.
+ pub fn dropped(&self) -> usize {
+ self.dropped.load(SeqCst)
+ }
+
+ /// Returns how many times instances of the dummy have had their `query` member invoked.
+ pub fn queried(&self) -> usize {
+ self.queried.load(SeqCst)
+ }
+}
+
+#[derive(Debug)]
+pub struct Instance<'a> {
+ origin: &'a CrashTestDummy,
+ panic: Panic,
+}
+
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
+pub enum Panic {
+ Never,
+ InClone,
+ InDrop,
+ InQuery,
+}
+
+impl Instance<'_> {
+ pub fn id(&self) -> usize {
+ self.origin.id
+ }
+
+ /// Some anonymous query, the result of which is already given.
+ pub fn query<R>(&self, result: R) -> R {
+ self.origin.queried.fetch_add(1, SeqCst);
+ if self.panic == Panic::InQuery {
+ panic!("panic in `query`");
+ }
+ result
+ }
+}
+
+impl Clone for Instance<'_> {
+ fn clone(&self) -> Self {
+ self.origin.cloned.fetch_add(1, SeqCst);
+ if self.panic == Panic::InClone {
+ panic!("panic in `clone`");
+ }
+ Self { origin: self.origin, panic: Panic::Never }
+ }
+}
+
+impl Drop for Instance<'_> {
+ fn drop(&mut self) {
+ self.origin.dropped.fetch_add(1, SeqCst);
+ if self.panic == Panic::InDrop {
+ panic!("panic in `drop`");
+ }
+ }
+}
+
+impl PartialOrd for Instance<'_> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.id().partial_cmp(&other.id())
+ }
+}
+
+impl Ord for Instance<'_> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.id().cmp(&other.id())
+ }
+}
+
+impl PartialEq for Instance<'_> {
+ fn eq(&self, other: &Self) -> bool {
+ self.id().eq(&other.id())
+ }
+}
+
+impl Eq for Instance<'_> {}
diff --git a/library/alloc/src/collections/btree/testing/mod.rs b/library/alloc/src/collections/btree/testing/mod.rs
new file mode 100644
index 000000000..7a094f8a5
--- /dev/null
+++ b/library/alloc/src/collections/btree/testing/mod.rs
@@ -0,0 +1,3 @@
+pub mod crash_test;
+pub mod ord_chaos;
+pub mod rng;
diff --git a/library/alloc/src/collections/btree/testing/ord_chaos.rs b/library/alloc/src/collections/btree/testing/ord_chaos.rs
new file mode 100644
index 000000000..96ce7c157
--- /dev/null
+++ b/library/alloc/src/collections/btree/testing/ord_chaos.rs
@@ -0,0 +1,81 @@
+use std::cell::Cell;
+use std::cmp::Ordering::{self, *};
+use std::ptr;
+
+// Minimal type with an `Ord` implementation violating transitivity.
+#[derive(Debug)]
+pub enum Cyclic3 {
+ A,
+ B,
+ C,
+}
+use Cyclic3::*;
+
+impl PartialOrd for Cyclic3 {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl Ord for Cyclic3 {
+ fn cmp(&self, other: &Self) -> Ordering {
+ match (self, other) {
+ (A, A) | (B, B) | (C, C) => Equal,
+ (A, B) | (B, C) | (C, A) => Less,
+ (A, C) | (B, A) | (C, B) => Greater,
+ }
+ }
+}
+
+impl PartialEq for Cyclic3 {
+ fn eq(&self, other: &Self) -> bool {
+ self.cmp(&other) == Equal
+ }
+}
+
+impl Eq for Cyclic3 {}
+
+// Controls the ordering of values wrapped by `Governed`.
+#[derive(Debug)]
+pub struct Governor {
+ flipped: Cell<bool>,
+}
+
+impl Governor {
+ pub fn new() -> Self {
+ Governor { flipped: Cell::new(false) }
+ }
+
+ pub fn flip(&self) {
+ self.flipped.set(!self.flipped.get());
+ }
+}
+
+// Type with an `Ord` implementation that forms a total order at any moment
+// (assuming that `T` respects total order), but can suddenly be made to invert
+// that total order.
+#[derive(Debug)]
+pub struct Governed<'a, T>(pub T, pub &'a Governor);
+
+impl<T: Ord> PartialOrd for Governed<'_, T> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ Some(self.cmp(other))
+ }
+}
+
+impl<T: Ord> Ord for Governed<'_, T> {
+ fn cmp(&self, other: &Self) -> Ordering {
+ assert!(ptr::eq(self.1, other.1));
+ let ord = self.0.cmp(&other.0);
+ if self.1.flipped.get() { ord.reverse() } else { ord }
+ }
+}
+
+impl<T: PartialEq> PartialEq for Governed<'_, T> {
+ fn eq(&self, other: &Self) -> bool {
+ assert!(ptr::eq(self.1, other.1));
+ self.0.eq(&other.0)
+ }
+}
+
+impl<T: Eq> Eq for Governed<'_, T> {}
diff --git a/library/alloc/src/collections/btree/testing/rng.rs b/library/alloc/src/collections/btree/testing/rng.rs
new file mode 100644
index 000000000..ecf543bee
--- /dev/null
+++ b/library/alloc/src/collections/btree/testing/rng.rs
@@ -0,0 +1,28 @@
+/// XorShiftRng
+pub struct DeterministicRng {
+ count: usize,
+ x: u32,
+ y: u32,
+ z: u32,
+ w: u32,
+}
+
+impl DeterministicRng {
+ pub fn new() -> Self {
+ DeterministicRng { count: 0, x: 0x193a6754, y: 0xa8a7d469, z: 0x97830e05, w: 0x113ba7bb }
+ }
+
+ /// Guarantees that each returned number is unique.
+ pub fn next(&mut self) -> u32 {
+ self.count += 1;
+ assert!(self.count <= 70029);
+ let x = self.x;
+ let t = x ^ (x << 11);
+ self.x = self.y;
+ self.y = self.z;
+ self.z = self.w;
+ let w_ = self.w;
+ self.w = w_ ^ (w_ >> 19) ^ (t ^ (t >> 8));
+ self.w
+ }
+}
diff --git a/library/alloc/src/collections/linked_list.rs b/library/alloc/src/collections/linked_list.rs
new file mode 100644
index 000000000..e21c8aa3b
--- /dev/null
+++ b/library/alloc/src/collections/linked_list.rs
@@ -0,0 +1,2012 @@
+//! A doubly-linked list with owned nodes.
+//!
+//! The `LinkedList` allows pushing and popping elements at either end
+//! in constant time.
+//!
+//! NOTE: It is almost always better to use [`Vec`] or [`VecDeque`] because
+//! array-based containers are generally faster,
+//! more memory efficient, and make better use of CPU cache.
+//!
+//! [`Vec`]: crate::vec::Vec
+//! [`VecDeque`]: super::vec_deque::VecDeque
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::cmp::Ordering;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::iter::{FromIterator, FusedIterator};
+use core::marker::PhantomData;
+use core::mem;
+use core::ptr::NonNull;
+
+use super::SpecExtend;
+use crate::boxed::Box;
+
+#[cfg(test)]
+mod tests;
+
+/// A doubly-linked list with owned nodes.
+///
+/// The `LinkedList` allows pushing and popping elements at either end
+/// in constant time.
+///
+/// A `LinkedList` with a known list of items can be initialized from an array:
+/// ```
+/// use std::collections::LinkedList;
+///
+/// let list = LinkedList::from([1, 2, 3]);
+/// ```
+///
+/// NOTE: It is almost always better to use [`Vec`] or [`VecDeque`] because
+/// array-based containers are generally faster,
+/// more memory efficient, and make better use of CPU cache.
+///
+/// [`Vec`]: crate::vec::Vec
+/// [`VecDeque`]: super::vec_deque::VecDeque
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "LinkedList")]
+#[rustc_insignificant_dtor]
+pub struct LinkedList<T> {
+ head: Option<NonNull<Node<T>>>,
+ tail: Option<NonNull<Node<T>>>,
+ len: usize,
+ marker: PhantomData<Box<Node<T>>>,
+}
+
+struct Node<T> {
+ next: Option<NonNull<Node<T>>>,
+ prev: Option<NonNull<Node<T>>>,
+ element: T,
+}
+
+/// An iterator over the elements of a `LinkedList`.
+///
+/// This `struct` is created by [`LinkedList::iter()`]. See its
+/// documentation for more.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ head: Option<NonNull<Node<T>>>,
+ tail: Option<NonNull<Node<T>>>,
+ len: usize,
+ marker: PhantomData<&'a Node<T>>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Iter")
+ .field(&*mem::ManuallyDrop::new(LinkedList {
+ head: self.head,
+ tail: self.tail,
+ len: self.len,
+ marker: PhantomData,
+ }))
+ .field(&self.len)
+ .finish()
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter { ..*self }
+ }
+}
+
+/// A mutable iterator over the elements of a `LinkedList`.
+///
+/// This `struct` is created by [`LinkedList::iter_mut()`]. See its
+/// documentation for more.
+#[must_use = "iterators are lazy and do nothing unless consumed"]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IterMut<'a, T: 'a> {
+ head: Option<NonNull<Node<T>>>,
+ tail: Option<NonNull<Node<T>>>,
+ len: usize,
+ marker: PhantomData<&'a mut Node<T>>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IterMut")
+ .field(&*mem::ManuallyDrop::new(LinkedList {
+ head: self.head,
+ tail: self.tail,
+ len: self.len,
+ marker: PhantomData,
+ }))
+ .field(&self.len)
+ .finish()
+ }
+}
+
+/// An owning iterator over the elements of a `LinkedList`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`LinkedList`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: LinkedList::into_iter
+/// [`IntoIterator`]: core::iter::IntoIterator
+#[derive(Clone)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoIter<T> {
+ list: LinkedList<T>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for IntoIter<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.list).finish()
+ }
+}
+
+impl<T> Node<T> {
+ fn new(element: T) -> Self {
+ Node { next: None, prev: None, element }
+ }
+
+ fn into_element(self: Box<Self>) -> T {
+ self.element
+ }
+}
+
+// private methods
+impl<T> LinkedList<T> {
+ /// Adds the given node to the front of the list.
+ #[inline]
+ fn push_front_node(&mut self, mut node: Box<Node<T>>) {
+ // This method takes care not to create mutable references to whole nodes,
+ // to maintain validity of aliasing pointers into `element`.
+ unsafe {
+ node.next = self.head;
+ node.prev = None;
+ let node = Some(Box::leak(node).into());
+
+ match self.head {
+ None => self.tail = node,
+ // Not creating new mutable (unique!) references overlapping `element`.
+ Some(head) => (*head.as_ptr()).prev = node,
+ }
+
+ self.head = node;
+ self.len += 1;
+ }
+ }
+
+ /// Removes and returns the node at the front of the list.
+ #[inline]
+ fn pop_front_node(&mut self) -> Option<Box<Node<T>>> {
+ // This method takes care not to create mutable references to whole nodes,
+ // to maintain validity of aliasing pointers into `element`.
+ self.head.map(|node| unsafe {
+ let node = Box::from_raw(node.as_ptr());
+ self.head = node.next;
+
+ match self.head {
+ None => self.tail = None,
+ // Not creating new mutable (unique!) references overlapping `element`.
+ Some(head) => (*head.as_ptr()).prev = None,
+ }
+
+ self.len -= 1;
+ node
+ })
+ }
+
+ /// Adds the given node to the back of the list.
+ #[inline]
+ fn push_back_node(&mut self, mut node: Box<Node<T>>) {
+ // This method takes care not to create mutable references to whole nodes,
+ // to maintain validity of aliasing pointers into `element`.
+ unsafe {
+ node.next = None;
+ node.prev = self.tail;
+ let node = Some(Box::leak(node).into());
+
+ match self.tail {
+ None => self.head = node,
+ // Not creating new mutable (unique!) references overlapping `element`.
+ Some(tail) => (*tail.as_ptr()).next = node,
+ }
+
+ self.tail = node;
+ self.len += 1;
+ }
+ }
+
+ /// Removes and returns the node at the back of the list.
+ #[inline]
+ fn pop_back_node(&mut self) -> Option<Box<Node<T>>> {
+ // This method takes care not to create mutable references to whole nodes,
+ // to maintain validity of aliasing pointers into `element`.
+ self.tail.map(|node| unsafe {
+ let node = Box::from_raw(node.as_ptr());
+ self.tail = node.prev;
+
+ match self.tail {
+ None => self.head = None,
+ // Not creating new mutable (unique!) references overlapping `element`.
+ Some(tail) => (*tail.as_ptr()).next = None,
+ }
+
+ self.len -= 1;
+ node
+ })
+ }
+
+ /// Unlinks the specified node from the current list.
+ ///
+ /// Warning: this will not check that the provided node belongs to the current list.
+ ///
+ /// This method takes care not to create mutable references to `element`, to
+ /// maintain validity of aliasing pointers.
+ #[inline]
+ unsafe fn unlink_node(&mut self, mut node: NonNull<Node<T>>) {
+ let node = unsafe { node.as_mut() }; // this one is ours now, we can create an &mut.
+
+ // Not creating new mutable (unique!) references overlapping `element`.
+ match node.prev {
+ Some(prev) => unsafe { (*prev.as_ptr()).next = node.next },
+ // this node is the head node
+ None => self.head = node.next,
+ };
+
+ match node.next {
+ Some(next) => unsafe { (*next.as_ptr()).prev = node.prev },
+ // this node is the tail node
+ None => self.tail = node.prev,
+ };
+
+ self.len -= 1;
+ }
+
+ /// Splices a series of nodes between two existing nodes.
+ ///
+ /// Warning: this will not check that the provided node belongs to the two existing lists.
+ #[inline]
+ unsafe fn splice_nodes(
+ &mut self,
+ existing_prev: Option<NonNull<Node<T>>>,
+ existing_next: Option<NonNull<Node<T>>>,
+ mut splice_start: NonNull<Node<T>>,
+ mut splice_end: NonNull<Node<T>>,
+ splice_length: usize,
+ ) {
+ // This method takes care not to create multiple mutable references to whole nodes at the same time,
+ // to maintain validity of aliasing pointers into `element`.
+ if let Some(mut existing_prev) = existing_prev {
+ unsafe {
+ existing_prev.as_mut().next = Some(splice_start);
+ }
+ } else {
+ self.head = Some(splice_start);
+ }
+ if let Some(mut existing_next) = existing_next {
+ unsafe {
+ existing_next.as_mut().prev = Some(splice_end);
+ }
+ } else {
+ self.tail = Some(splice_end);
+ }
+ unsafe {
+ splice_start.as_mut().prev = existing_prev;
+ splice_end.as_mut().next = existing_next;
+ }
+
+ self.len += splice_length;
+ }
+
+ /// Detaches all nodes from a linked list as a series of nodes.
+ #[inline]
+ fn detach_all_nodes(mut self) -> Option<(NonNull<Node<T>>, NonNull<Node<T>>, usize)> {
+ let head = self.head.take();
+ let tail = self.tail.take();
+ let len = mem::replace(&mut self.len, 0);
+ if let Some(head) = head {
+ // SAFETY: In a LinkedList, either both the head and tail are None because
+ // the list is empty, or both head and tail are Some because the list is populated.
+ // Since we have verified the head is Some, we are sure the tail is Some too.
+ let tail = unsafe { tail.unwrap_unchecked() };
+ Some((head, tail, len))
+ } else {
+ None
+ }
+ }
+
+ #[inline]
+ unsafe fn split_off_before_node(
+ &mut self,
+ split_node: Option<NonNull<Node<T>>>,
+ at: usize,
+ ) -> Self {
+ // The split node is the new head node of the second part
+ if let Some(mut split_node) = split_node {
+ let first_part_head;
+ let first_part_tail;
+ unsafe {
+ first_part_tail = split_node.as_mut().prev.take();
+ }
+ if let Some(mut tail) = first_part_tail {
+ unsafe {
+ tail.as_mut().next = None;
+ }
+ first_part_head = self.head;
+ } else {
+ first_part_head = None;
+ }
+
+ let first_part = LinkedList {
+ head: first_part_head,
+ tail: first_part_tail,
+ len: at,
+ marker: PhantomData,
+ };
+
+ // Fix the head ptr of the second part
+ self.head = Some(split_node);
+ self.len = self.len - at;
+
+ first_part
+ } else {
+ mem::replace(self, LinkedList::new())
+ }
+ }
+
+ #[inline]
+ unsafe fn split_off_after_node(
+ &mut self,
+ split_node: Option<NonNull<Node<T>>>,
+ at: usize,
+ ) -> Self {
+ // The split node is the new tail node of the first part and owns
+ // the head of the second part.
+ if let Some(mut split_node) = split_node {
+ let second_part_head;
+ let second_part_tail;
+ unsafe {
+ second_part_head = split_node.as_mut().next.take();
+ }
+ if let Some(mut head) = second_part_head {
+ unsafe {
+ head.as_mut().prev = None;
+ }
+ second_part_tail = self.tail;
+ } else {
+ second_part_tail = None;
+ }
+
+ let second_part = LinkedList {
+ head: second_part_head,
+ tail: second_part_tail,
+ len: self.len - at,
+ marker: PhantomData,
+ };
+
+ // Fix the tail ptr of the first part
+ self.tail = Some(split_node);
+ self.len = at;
+
+ second_part
+ } else {
+ mem::replace(self, LinkedList::new())
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Default for LinkedList<T> {
+ /// Creates an empty `LinkedList<T>`.
+ #[inline]
+ fn default() -> Self {
+ Self::new()
+ }
+}
+
+impl<T> LinkedList<T> {
+ /// Creates an empty `LinkedList`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let list: LinkedList<u32> = LinkedList::new();
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_linked_list_new", since = "1.39.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub const fn new() -> Self {
+ LinkedList { head: None, tail: None, len: 0, marker: PhantomData }
+ }
+
+ /// Moves all elements from `other` to the end of the list.
+ ///
+ /// This reuses all the nodes from `other` and moves them into `self`. After
+ /// this operation, `other` becomes empty.
+ ///
+ /// This operation should compute in *O*(1) time and *O*(1) memory.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut list1 = LinkedList::new();
+ /// list1.push_back('a');
+ ///
+ /// let mut list2 = LinkedList::new();
+ /// list2.push_back('b');
+ /// list2.push_back('c');
+ ///
+ /// list1.append(&mut list2);
+ ///
+ /// let mut iter = list1.iter();
+ /// assert_eq!(iter.next(), Some(&'a'));
+ /// assert_eq!(iter.next(), Some(&'b'));
+ /// assert_eq!(iter.next(), Some(&'c'));
+ /// assert!(iter.next().is_none());
+ ///
+ /// assert!(list2.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn append(&mut self, other: &mut Self) {
+ match self.tail {
+ None => mem::swap(self, other),
+ Some(mut tail) => {
+ // `as_mut` is okay here because we have exclusive access to the entirety
+ // of both lists.
+ if let Some(mut other_head) = other.head.take() {
+ unsafe {
+ tail.as_mut().next = Some(other_head);
+ other_head.as_mut().prev = Some(tail);
+ }
+
+ self.tail = other.tail.take();
+ self.len += mem::replace(&mut other.len, 0);
+ }
+ }
+ }
+ }
+
+ /// Provides a forward iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut list: LinkedList<u32> = LinkedList::new();
+ ///
+ /// list.push_back(0);
+ /// list.push_back(1);
+ /// list.push_back(2);
+ ///
+ /// let mut iter = list.iter();
+ /// assert_eq!(iter.next(), Some(&0));
+ /// assert_eq!(iter.next(), Some(&1));
+ /// assert_eq!(iter.next(), Some(&2));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter { head: self.head, tail: self.tail, len: self.len, marker: PhantomData }
+ }
+
+ /// Provides a forward iterator with mutable references.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut list: LinkedList<u32> = LinkedList::new();
+ ///
+ /// list.push_back(0);
+ /// list.push_back(1);
+ /// list.push_back(2);
+ ///
+ /// for element in list.iter_mut() {
+ /// *element += 10;
+ /// }
+ ///
+ /// let mut iter = list.iter();
+ /// assert_eq!(iter.next(), Some(&10));
+ /// assert_eq!(iter.next(), Some(&11));
+ /// assert_eq!(iter.next(), Some(&12));
+ /// assert_eq!(iter.next(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+ IterMut { head: self.head, tail: self.tail, len: self.len, marker: PhantomData }
+ }
+
+ /// Provides a cursor at the front element.
+ ///
+ /// The cursor is pointing to the "ghost" non-element if the list is empty.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn cursor_front(&self) -> Cursor<'_, T> {
+ Cursor { index: 0, current: self.head, list: self }
+ }
+
+ /// Provides a cursor with editing operations at the front element.
+ ///
+ /// The cursor is pointing to the "ghost" non-element if the list is empty.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn cursor_front_mut(&mut self) -> CursorMut<'_, T> {
+ CursorMut { index: 0, current: self.head, list: self }
+ }
+
+ /// Provides a cursor at the back element.
+ ///
+ /// The cursor is pointing to the "ghost" non-element if the list is empty.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn cursor_back(&self) -> Cursor<'_, T> {
+ Cursor { index: self.len.checked_sub(1).unwrap_or(0), current: self.tail, list: self }
+ }
+
+ /// Provides a cursor with editing operations at the back element.
+ ///
+ /// The cursor is pointing to the "ghost" non-element if the list is empty.
+ #[inline]
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn cursor_back_mut(&mut self) -> CursorMut<'_, T> {
+ CursorMut { index: self.len.checked_sub(1).unwrap_or(0), current: self.tail, list: self }
+ }
+
+ /// Returns `true` if the `LinkedList` is empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ /// assert!(dl.is_empty());
+ ///
+ /// dl.push_front("foo");
+ /// assert!(!dl.is_empty());
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.head.is_none()
+ }
+
+ /// Returns the length of the `LinkedList`.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ ///
+ /// dl.push_front(2);
+ /// assert_eq!(dl.len(), 1);
+ ///
+ /// dl.push_front(1);
+ /// assert_eq!(dl.len(), 2);
+ ///
+ /// dl.push_back(3);
+ /// assert_eq!(dl.len(), 3);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Removes all elements from the `LinkedList`.
+ ///
+ /// This operation should compute in *O*(*n*) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ ///
+ /// dl.push_front(2);
+ /// dl.push_front(1);
+ /// assert_eq!(dl.len(), 2);
+ /// assert_eq!(dl.front(), Some(&1));
+ ///
+ /// dl.clear();
+ /// assert_eq!(dl.len(), 0);
+ /// assert_eq!(dl.front(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ *self = Self::new();
+ }
+
+ /// Returns `true` if the `LinkedList` contains an element equal to the
+ /// given value.
+ ///
+ /// This operation should compute linearly in *O*(*n*) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut list: LinkedList<u32> = LinkedList::new();
+ ///
+ /// list.push_back(0);
+ /// list.push_back(1);
+ /// list.push_back(2);
+ ///
+ /// assert_eq!(list.contains(&0), true);
+ /// assert_eq!(list.contains(&10), false);
+ /// ```
+ #[stable(feature = "linked_list_contains", since = "1.12.0")]
+ pub fn contains(&self, x: &T) -> bool
+ where
+ T: PartialEq<T>,
+ {
+ self.iter().any(|e| e == x)
+ }
+
+ /// Provides a reference to the front element, or `None` if the list is
+ /// empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ /// assert_eq!(dl.front(), None);
+ ///
+ /// dl.push_front(1);
+ /// assert_eq!(dl.front(), Some(&1));
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn front(&self) -> Option<&T> {
+ unsafe { self.head.as_ref().map(|node| &node.as_ref().element) }
+ }
+
+ /// Provides a mutable reference to the front element, or `None` if the list
+ /// is empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ /// assert_eq!(dl.front(), None);
+ ///
+ /// dl.push_front(1);
+ /// assert_eq!(dl.front(), Some(&1));
+ ///
+ /// match dl.front_mut() {
+ /// None => {},
+ /// Some(x) => *x = 5,
+ /// }
+ /// assert_eq!(dl.front(), Some(&5));
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn front_mut(&mut self) -> Option<&mut T> {
+ unsafe { self.head.as_mut().map(|node| &mut node.as_mut().element) }
+ }
+
+ /// Provides a reference to the back element, or `None` if the list is
+ /// empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ /// assert_eq!(dl.back(), None);
+ ///
+ /// dl.push_back(1);
+ /// assert_eq!(dl.back(), Some(&1));
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn back(&self) -> Option<&T> {
+ unsafe { self.tail.as_ref().map(|node| &node.as_ref().element) }
+ }
+
+ /// Provides a mutable reference to the back element, or `None` if the list
+ /// is empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ /// assert_eq!(dl.back(), None);
+ ///
+ /// dl.push_back(1);
+ /// assert_eq!(dl.back(), Some(&1));
+ ///
+ /// match dl.back_mut() {
+ /// None => {},
+ /// Some(x) => *x = 5,
+ /// }
+ /// assert_eq!(dl.back(), Some(&5));
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn back_mut(&mut self) -> Option<&mut T> {
+ unsafe { self.tail.as_mut().map(|node| &mut node.as_mut().element) }
+ }
+
+ /// Adds an element first in the list.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut dl = LinkedList::new();
+ ///
+ /// dl.push_front(2);
+ /// assert_eq!(dl.front().unwrap(), &2);
+ ///
+ /// dl.push_front(1);
+ /// assert_eq!(dl.front().unwrap(), &1);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push_front(&mut self, elt: T) {
+ self.push_front_node(Box::new(Node::new(elt)));
+ }
+
+ /// Removes the first element and returns it, or `None` if the list is
+ /// empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ /// assert_eq!(d.pop_front(), None);
+ ///
+ /// d.push_front(1);
+ /// d.push_front(3);
+ /// assert_eq!(d.pop_front(), Some(3));
+ /// assert_eq!(d.pop_front(), Some(1));
+ /// assert_eq!(d.pop_front(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop_front(&mut self) -> Option<T> {
+ self.pop_front_node().map(Node::into_element)
+ }
+
+ /// Appends an element to the back of a list.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ /// d.push_back(1);
+ /// d.push_back(3);
+ /// assert_eq!(3, *d.back().unwrap());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push_back(&mut self, elt: T) {
+ self.push_back_node(Box::new(Node::new(elt)));
+ }
+
+ /// Removes the last element from a list and returns it, or `None` if
+ /// it is empty.
+ ///
+ /// This operation should compute in *O*(1) time.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ /// assert_eq!(d.pop_back(), None);
+ /// d.push_back(1);
+ /// d.push_back(3);
+ /// assert_eq!(d.pop_back(), Some(3));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop_back(&mut self) -> Option<T> {
+ self.pop_back_node().map(Node::into_element)
+ }
+
+ /// Splits the list into two at the given index. Returns everything after the given index,
+ /// including the index.
+ ///
+ /// This operation should compute in *O*(*n*) time.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ ///
+ /// d.push_front(1);
+ /// d.push_front(2);
+ /// d.push_front(3);
+ ///
+ /// let mut split = d.split_off(2);
+ ///
+ /// assert_eq!(split.pop_front(), Some(1));
+ /// assert_eq!(split.pop_front(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn split_off(&mut self, at: usize) -> LinkedList<T> {
+ let len = self.len();
+ assert!(at <= len, "Cannot split off at a nonexistent index");
+ if at == 0 {
+ return mem::take(self);
+ } else if at == len {
+ return Self::new();
+ }
+
+ // Below, we iterate towards the `i-1`th node, either from the start or the end,
+ // depending on which would be faster.
+ let split_node = if at - 1 <= len - 1 - (at - 1) {
+ let mut iter = self.iter_mut();
+ // instead of skipping using .skip() (which creates a new struct),
+ // we skip manually so we can access the head field without
+ // depending on implementation details of Skip
+ for _ in 0..at - 1 {
+ iter.next();
+ }
+ iter.head
+ } else {
+ // better off starting from the end
+ let mut iter = self.iter_mut();
+ for _ in 0..len - 1 - (at - 1) {
+ iter.next_back();
+ }
+ iter.tail
+ };
+ unsafe { self.split_off_after_node(split_node, at) }
+ }
+
+ /// Removes the element at the given index and returns it.
+ ///
+ /// This operation should compute in *O*(*n*) time.
+ ///
+ /// # Panics
+ /// Panics if at >= len
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(linked_list_remove)]
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut d = LinkedList::new();
+ ///
+ /// d.push_front(1);
+ /// d.push_front(2);
+ /// d.push_front(3);
+ ///
+ /// assert_eq!(d.remove(1), 2);
+ /// assert_eq!(d.remove(0), 3);
+ /// assert_eq!(d.remove(0), 1);
+ /// ```
+ #[unstable(feature = "linked_list_remove", issue = "69210")]
+ pub fn remove(&mut self, at: usize) -> T {
+ let len = self.len();
+ assert!(at < len, "Cannot remove at an index outside of the list bounds");
+
+ // Below, we iterate towards the node at the given index, either from
+ // the start or the end, depending on which would be faster.
+ let offset_from_end = len - at - 1;
+ if at <= offset_from_end {
+ let mut cursor = self.cursor_front_mut();
+ for _ in 0..at {
+ cursor.move_next();
+ }
+ cursor.remove_current().unwrap()
+ } else {
+ let mut cursor = self.cursor_back_mut();
+ for _ in 0..offset_from_end {
+ cursor.move_prev();
+ }
+ cursor.remove_current().unwrap()
+ }
+ }
+
+ /// Creates an iterator which uses a closure to determine if an element should be removed.
+ ///
+ /// If the closure returns true, then the element is removed and yielded.
+ /// If the closure returns false, the element will remain in the list and will not be yielded
+ /// by the iterator.
+ ///
+ /// Note that `drain_filter` lets you mutate every element in the filter closure, regardless of
+ /// whether you choose to keep or remove it.
+ ///
+ /// # Examples
+ ///
+ /// Splitting a list into evens and odds, reusing the original list:
+ ///
+ /// ```
+ /// #![feature(drain_filter)]
+ /// use std::collections::LinkedList;
+ ///
+ /// let mut numbers: LinkedList<u32> = LinkedList::new();
+ /// numbers.extend(&[1, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15]);
+ ///
+ /// let evens = numbers.drain_filter(|x| *x % 2 == 0).collect::<LinkedList<_>>();
+ /// let odds = numbers;
+ ///
+ /// assert_eq!(evens.into_iter().collect::<Vec<_>>(), vec![2, 4, 6, 8, 14]);
+ /// assert_eq!(odds.into_iter().collect::<Vec<_>>(), vec![1, 3, 5, 9, 11, 13, 15]);
+ /// ```
+ #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+ pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ // avoid borrow issues.
+ let it = self.head;
+ let old_len = self.len;
+
+ DrainFilter { list: self, it, pred: filter, idx: 0, old_len }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T> Drop for LinkedList<T> {
+ fn drop(&mut self) {
+ struct DropGuard<'a, T>(&'a mut LinkedList<T>);
+
+ impl<'a, T> Drop for DropGuard<'a, T> {
+ fn drop(&mut self) {
+ // Continue the same loop we do below. This only runs when a destructor has
+ // panicked. If another one panics this will abort.
+ while self.0.pop_front_node().is_some() {}
+ }
+ }
+
+ while let Some(node) = self.pop_front_node() {
+ let guard = DropGuard(self);
+ drop(node);
+ mem::forget(guard);
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ if self.len == 0 {
+ None
+ } else {
+ self.head.map(|node| unsafe {
+ // Need an unbound lifetime to get 'a
+ let node = &*node.as_ptr();
+ self.len -= 1;
+ self.head = node.next;
+ &node.element
+ })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.len, Some(self.len))
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a T> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a T> {
+ if self.len == 0 {
+ None
+ } else {
+ self.tail.map(|node| unsafe {
+ // Need an unbound lifetime to get 'a
+ let node = &*node.as_ptr();
+ self.len -= 1;
+ self.tail = node.prev;
+ &node.element
+ })
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for IterMut<'a, T> {
+ type Item = &'a mut T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut T> {
+ if self.len == 0 {
+ None
+ } else {
+ self.head.map(|node| unsafe {
+ // Need an unbound lifetime to get 'a
+ let node = &mut *node.as_ptr();
+ self.len -= 1;
+ self.head = node.next;
+ &mut node.element
+ })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.len, Some(self.len))
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a mut T> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut T> {
+ if self.len == 0 {
+ None
+ } else {
+ self.tail.map(|node| unsafe {
+ // Need an unbound lifetime to get 'a
+ let node = &mut *node.as_ptr();
+ self.len -= 1;
+ self.tail = node.prev;
+ &mut node.element
+ })
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IterMut<'_, T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IterMut<'_, T> {}
+
+/// A cursor over a `LinkedList`.
+///
+/// A `Cursor` is like an iterator, except that it can freely seek back-and-forth.
+///
+/// Cursors always rest between two elements in the list, and index in a logically circular way.
+/// To accommodate this, there is a "ghost" non-element that yields `None` between the head and
+/// tail of the list.
+///
+/// When created, cursors start at the front of the list, or the "ghost" non-element if the list is empty.
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+pub struct Cursor<'a, T: 'a> {
+ index: usize,
+ current: Option<NonNull<Node<T>>>,
+ list: &'a LinkedList<T>,
+}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+impl<T> Clone for Cursor<'_, T> {
+ fn clone(&self) -> Self {
+ let Cursor { index, current, list } = *self;
+ Cursor { index, current, list }
+ }
+}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+impl<T: fmt::Debug> fmt::Debug for Cursor<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Cursor").field(&self.list).field(&self.index()).finish()
+ }
+}
+
+/// A cursor over a `LinkedList` with editing operations.
+///
+/// A `Cursor` is like an iterator, except that it can freely seek back-and-forth, and can
+/// safely mutate the list during iteration. This is because the lifetime of its yielded
+/// references is tied to its own lifetime, instead of just the underlying list. This means
+/// cursors cannot yield multiple elements at once.
+///
+/// Cursors always rest between two elements in the list, and index in a logically circular way.
+/// To accommodate this, there is a "ghost" non-element that yields `None` between the head and
+/// tail of the list.
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+pub struct CursorMut<'a, T: 'a> {
+ index: usize,
+ current: Option<NonNull<Node<T>>>,
+ list: &'a mut LinkedList<T>,
+}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+impl<T: fmt::Debug> fmt::Debug for CursorMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("CursorMut").field(&self.list).field(&self.index()).finish()
+ }
+}
+
+impl<'a, T> Cursor<'a, T> {
+ /// Returns the cursor position index within the `LinkedList`.
+ ///
+ /// This returns `None` if the cursor is currently pointing to the
+ /// "ghost" non-element.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn index(&self) -> Option<usize> {
+ let _ = self.current?;
+ Some(self.index)
+ }
+
+ /// Moves the cursor to the next element of the `LinkedList`.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this will move it to
+ /// the first element of the `LinkedList`. If it is pointing to the last
+ /// element of the `LinkedList` then this will move it to the "ghost" non-element.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn move_next(&mut self) {
+ match self.current.take() {
+ // We had no current element; the cursor was sitting at the start position
+ // Next element should be the head of the list
+ None => {
+ self.current = self.list.head;
+ self.index = 0;
+ }
+ // We had a previous element, so let's go to its next
+ Some(current) => unsafe {
+ self.current = current.as_ref().next;
+ self.index += 1;
+ },
+ }
+ }
+
+ /// Moves the cursor to the previous element of the `LinkedList`.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this will move it to
+ /// the last element of the `LinkedList`. If it is pointing to the first
+ /// element of the `LinkedList` then this will move it to the "ghost" non-element.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn move_prev(&mut self) {
+ match self.current.take() {
+ // No current. We're at the start of the list. Yield None and jump to the end.
+ None => {
+ self.current = self.list.tail;
+ self.index = self.list.len().checked_sub(1).unwrap_or(0);
+ }
+ // Have a prev. Yield it and go to the previous element.
+ Some(current) => unsafe {
+ self.current = current.as_ref().prev;
+ self.index = self.index.checked_sub(1).unwrap_or_else(|| self.list.len());
+ },
+ }
+ }
+
+ /// Returns a reference to the element that the cursor is currently
+ /// pointing to.
+ ///
+ /// This returns `None` if the cursor is currently pointing to the
+ /// "ghost" non-element.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn current(&self) -> Option<&'a T> {
+ unsafe { self.current.map(|current| &(*current.as_ptr()).element) }
+ }
+
+ /// Returns a reference to the next element.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this returns
+ /// the first element of the `LinkedList`. If it is pointing to the last
+ /// element of the `LinkedList` then this returns `None`.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn peek_next(&self) -> Option<&'a T> {
+ unsafe {
+ let next = match self.current {
+ None => self.list.head,
+ Some(current) => current.as_ref().next,
+ };
+ next.map(|next| &(*next.as_ptr()).element)
+ }
+ }
+
+ /// Returns a reference to the previous element.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this returns
+ /// the last element of the `LinkedList`. If it is pointing to the first
+ /// element of the `LinkedList` then this returns `None`.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn peek_prev(&self) -> Option<&'a T> {
+ unsafe {
+ let prev = match self.current {
+ None => self.list.tail,
+ Some(current) => current.as_ref().prev,
+ };
+ prev.map(|prev| &(*prev.as_ptr()).element)
+ }
+ }
+
+ /// Provides a reference to the front element of the cursor's parent list,
+ /// or None if the list is empty.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn front(&self) -> Option<&'a T> {
+ self.list.front()
+ }
+
+ /// Provides a reference to the back element of the cursor's parent list,
+ /// or None if the list is empty.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn back(&self) -> Option<&'a T> {
+ self.list.back()
+ }
+}
+
+impl<'a, T> CursorMut<'a, T> {
+ /// Returns the cursor position index within the `LinkedList`.
+ ///
+ /// This returns `None` if the cursor is currently pointing to the
+ /// "ghost" non-element.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn index(&self) -> Option<usize> {
+ let _ = self.current?;
+ Some(self.index)
+ }
+
+ /// Moves the cursor to the next element of the `LinkedList`.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this will move it to
+ /// the first element of the `LinkedList`. If it is pointing to the last
+ /// element of the `LinkedList` then this will move it to the "ghost" non-element.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn move_next(&mut self) {
+ match self.current.take() {
+ // We had no current element; the cursor was sitting at the start position
+ // Next element should be the head of the list
+ None => {
+ self.current = self.list.head;
+ self.index = 0;
+ }
+ // We had a previous element, so let's go to its next
+ Some(current) => unsafe {
+ self.current = current.as_ref().next;
+ self.index += 1;
+ },
+ }
+ }
+
+ /// Moves the cursor to the previous element of the `LinkedList`.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this will move it to
+ /// the last element of the `LinkedList`. If it is pointing to the first
+ /// element of the `LinkedList` then this will move it to the "ghost" non-element.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn move_prev(&mut self) {
+ match self.current.take() {
+ // No current. We're at the start of the list. Yield None and jump to the end.
+ None => {
+ self.current = self.list.tail;
+ self.index = self.list.len().checked_sub(1).unwrap_or(0);
+ }
+ // Have a prev. Yield it and go to the previous element.
+ Some(current) => unsafe {
+ self.current = current.as_ref().prev;
+ self.index = self.index.checked_sub(1).unwrap_or_else(|| self.list.len());
+ },
+ }
+ }
+
+ /// Returns a reference to the element that the cursor is currently
+ /// pointing to.
+ ///
+ /// This returns `None` if the cursor is currently pointing to the
+ /// "ghost" non-element.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn current(&mut self) -> Option<&mut T> {
+ unsafe { self.current.map(|current| &mut (*current.as_ptr()).element) }
+ }
+
+ /// Returns a reference to the next element.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this returns
+ /// the first element of the `LinkedList`. If it is pointing to the last
+ /// element of the `LinkedList` then this returns `None`.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn peek_next(&mut self) -> Option<&mut T> {
+ unsafe {
+ let next = match self.current {
+ None => self.list.head,
+ Some(current) => current.as_ref().next,
+ };
+ next.map(|next| &mut (*next.as_ptr()).element)
+ }
+ }
+
+ /// Returns a reference to the previous element.
+ ///
+ /// If the cursor is pointing to the "ghost" non-element then this returns
+ /// the last element of the `LinkedList`. If it is pointing to the first
+ /// element of the `LinkedList` then this returns `None`.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn peek_prev(&mut self) -> Option<&mut T> {
+ unsafe {
+ let prev = match self.current {
+ None => self.list.tail,
+ Some(current) => current.as_ref().prev,
+ };
+ prev.map(|prev| &mut (*prev.as_ptr()).element)
+ }
+ }
+
+ /// Returns a read-only cursor pointing to the current element.
+ ///
+ /// The lifetime of the returned `Cursor` is bound to that of the
+ /// `CursorMut`, which means it cannot outlive the `CursorMut` and that the
+ /// `CursorMut` is frozen for the lifetime of the `Cursor`.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn as_cursor(&self) -> Cursor<'_, T> {
+ Cursor { list: self.list, current: self.current, index: self.index }
+ }
+}
+
+// Now the list editing operations
+
+impl<'a, T> CursorMut<'a, T> {
+ /// Inserts a new element into the `LinkedList` after the current one.
+ ///
+ /// If the cursor is pointing at the "ghost" non-element then the new element is
+ /// inserted at the front of the `LinkedList`.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn insert_after(&mut self, item: T) {
+ unsafe {
+ let spliced_node = Box::leak(Box::new(Node::new(item))).into();
+ let node_next = match self.current {
+ None => self.list.head,
+ Some(node) => node.as_ref().next,
+ };
+ self.list.splice_nodes(self.current, node_next, spliced_node, spliced_node, 1);
+ if self.current.is_none() {
+ // The "ghost" non-element's index has changed.
+ self.index = self.list.len;
+ }
+ }
+ }
+
+ /// Inserts a new element into the `LinkedList` before the current one.
+ ///
+ /// If the cursor is pointing at the "ghost" non-element then the new element is
+ /// inserted at the end of the `LinkedList`.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn insert_before(&mut self, item: T) {
+ unsafe {
+ let spliced_node = Box::leak(Box::new(Node::new(item))).into();
+ let node_prev = match self.current {
+ None => self.list.tail,
+ Some(node) => node.as_ref().prev,
+ };
+ self.list.splice_nodes(node_prev, self.current, spliced_node, spliced_node, 1);
+ self.index += 1;
+ }
+ }
+
+ /// Removes the current element from the `LinkedList`.
+ ///
+ /// The element that was removed is returned, and the cursor is
+ /// moved to point to the next element in the `LinkedList`.
+ ///
+ /// If the cursor is currently pointing to the "ghost" non-element then no element
+ /// is removed and `None` is returned.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn remove_current(&mut self) -> Option<T> {
+ let unlinked_node = self.current?;
+ unsafe {
+ self.current = unlinked_node.as_ref().next;
+ self.list.unlink_node(unlinked_node);
+ let unlinked_node = Box::from_raw(unlinked_node.as_ptr());
+ Some(unlinked_node.element)
+ }
+ }
+
+ /// Removes the current element from the `LinkedList` without deallocating the list node.
+ ///
+ /// The node that was removed is returned as a new `LinkedList` containing only this node.
+ /// The cursor is moved to point to the next element in the current `LinkedList`.
+ ///
+ /// If the cursor is currently pointing to the "ghost" non-element then no element
+ /// is removed and `None` is returned.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn remove_current_as_list(&mut self) -> Option<LinkedList<T>> {
+ let mut unlinked_node = self.current?;
+ unsafe {
+ self.current = unlinked_node.as_ref().next;
+ self.list.unlink_node(unlinked_node);
+
+ unlinked_node.as_mut().prev = None;
+ unlinked_node.as_mut().next = None;
+ Some(LinkedList {
+ head: Some(unlinked_node),
+ tail: Some(unlinked_node),
+ len: 1,
+ marker: PhantomData,
+ })
+ }
+ }
+
+ /// Inserts the elements from the given `LinkedList` after the current one.
+ ///
+ /// If the cursor is pointing at the "ghost" non-element then the new elements are
+ /// inserted at the start of the `LinkedList`.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn splice_after(&mut self, list: LinkedList<T>) {
+ unsafe {
+ let (splice_head, splice_tail, splice_len) = match list.detach_all_nodes() {
+ Some(parts) => parts,
+ _ => return,
+ };
+ let node_next = match self.current {
+ None => self.list.head,
+ Some(node) => node.as_ref().next,
+ };
+ self.list.splice_nodes(self.current, node_next, splice_head, splice_tail, splice_len);
+ if self.current.is_none() {
+ // The "ghost" non-element's index has changed.
+ self.index = self.list.len;
+ }
+ }
+ }
+
+ /// Inserts the elements from the given `LinkedList` before the current one.
+ ///
+ /// If the cursor is pointing at the "ghost" non-element then the new elements are
+ /// inserted at the end of the `LinkedList`.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn splice_before(&mut self, list: LinkedList<T>) {
+ unsafe {
+ let (splice_head, splice_tail, splice_len) = match list.detach_all_nodes() {
+ Some(parts) => parts,
+ _ => return,
+ };
+ let node_prev = match self.current {
+ None => self.list.tail,
+ Some(node) => node.as_ref().prev,
+ };
+ self.list.splice_nodes(node_prev, self.current, splice_head, splice_tail, splice_len);
+ self.index += splice_len;
+ }
+ }
+
+ /// Splits the list into two after the current element. This will return a
+ /// new list consisting of everything after the cursor, with the original
+ /// list retaining everything before.
+ ///
+ /// If the cursor is pointing at the "ghost" non-element then the entire contents
+ /// of the `LinkedList` are moved.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn split_after(&mut self) -> LinkedList<T> {
+ let split_off_idx = if self.index == self.list.len { 0 } else { self.index + 1 };
+ if self.index == self.list.len {
+ // The "ghost" non-element's index has changed to 0.
+ self.index = 0;
+ }
+ unsafe { self.list.split_off_after_node(self.current, split_off_idx) }
+ }
+
+ /// Splits the list into two before the current element. This will return a
+ /// new list consisting of everything before the cursor, with the original
+ /// list retaining everything after.
+ ///
+ /// If the cursor is pointing at the "ghost" non-element then the entire contents
+ /// of the `LinkedList` are moved.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn split_before(&mut self) -> LinkedList<T> {
+ let split_off_idx = self.index;
+ self.index = 0;
+ unsafe { self.list.split_off_before_node(self.current, split_off_idx) }
+ }
+
+ /// Appends an element to the front of the cursor's parent list. The node
+ /// that the cursor points to is unchanged, even if it is the "ghost" node.
+ ///
+ /// This operation should compute in *O*(1) time.
+ // `push_front` continues to point to "ghost" when it addes a node to mimic
+ // the behavior of `insert_before` on an empty list.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn push_front(&mut self, elt: T) {
+ // Safety: We know that `push_front` does not change the position in
+ // memory of other nodes. This ensures that `self.current` remains
+ // valid.
+ self.list.push_front(elt);
+ self.index += 1;
+ }
+
+ /// Appends an element to the back of the cursor's parent list. The node
+ /// that the cursor points to is unchanged, even if it is the "ghost" node.
+ ///
+ /// This operation should compute in *O*(1) time.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn push_back(&mut self, elt: T) {
+ // Safety: We know that `push_back` does not change the position in
+ // memory of other nodes. This ensures that `self.current` remains
+ // valid.
+ self.list.push_back(elt);
+ if self.current().is_none() {
+ // The index of "ghost" is the length of the list, so we just need
+ // to increment self.index to reflect the new length of the list.
+ self.index += 1;
+ }
+ }
+
+ /// Removes the first element from the cursor's parent list and returns it,
+ /// or None if the list is empty. The element the cursor points to remains
+ /// unchanged, unless it was pointing to the front element. In that case, it
+ /// points to the new front element.
+ ///
+ /// This operation should compute in *O*(1) time.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn pop_front(&mut self) -> Option<T> {
+ // We can't check if current is empty, we must check the list directly.
+ // It is possible for `self.current == None` and the list to be
+ // non-empty.
+ if self.list.is_empty() {
+ None
+ } else {
+ // We can't point to the node that we pop. Copying the behavior of
+ // `remove_current`, we move on the the next node in the sequence.
+ // If the list is of length 1 then we end pointing to the "ghost"
+ // node at index 0, which is expected.
+ if self.list.head == self.current {
+ self.move_next();
+ } else {
+ self.index -= 1;
+ }
+ self.list.pop_front()
+ }
+ }
+
+ /// Removes the last element from the cursor's parent list and returns it,
+ /// or None if the list is empty. The element the cursor points to remains
+ /// unchanged, unless it was pointing to the back element. In that case, it
+ /// points to the "ghost" element.
+ ///
+ /// This operation should compute in *O*(1) time.
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn pop_back(&mut self) -> Option<T> {
+ if self.list.is_empty() {
+ None
+ } else {
+ if self.list.tail == self.current {
+ // The index now reflects the length of the list. It was the
+ // length of the list minus 1, but now the list is 1 smaller. No
+ // change is needed for `index`.
+ self.current = None;
+ } else if self.current.is_none() {
+ self.index = self.list.len - 1;
+ }
+ self.list.pop_back()
+ }
+ }
+
+ /// Provides a reference to the front element of the cursor's parent list,
+ /// or None if the list is empty.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn front(&self) -> Option<&T> {
+ self.list.front()
+ }
+
+ /// Provides a mutable reference to the front element of the cursor's
+ /// parent list, or None if the list is empty.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn front_mut(&mut self) -> Option<&mut T> {
+ self.list.front_mut()
+ }
+
+ /// Provides a reference to the back element of the cursor's parent list,
+ /// or None if the list is empty.
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn back(&self) -> Option<&T> {
+ self.list.back()
+ }
+
+ /// Provides a mutable reference to back element of the cursor's parent
+ /// list, or `None` if the list is empty.
+ ///
+ /// # Examples
+ /// Building and mutating a list with a cursor, then getting the back element:
+ /// ```
+ /// #![feature(linked_list_cursors)]
+ /// use std::collections::LinkedList;
+ /// let mut dl = LinkedList::new();
+ /// dl.push_front(3);
+ /// dl.push_front(2);
+ /// dl.push_front(1);
+ /// let mut cursor = dl.cursor_front_mut();
+ /// *cursor.current().unwrap() = 99;
+ /// *cursor.back_mut().unwrap() = 0;
+ /// let mut contents = dl.into_iter();
+ /// assert_eq!(contents.next(), Some(99));
+ /// assert_eq!(contents.next(), Some(2));
+ /// assert_eq!(contents.next(), Some(0));
+ /// assert_eq!(contents.next(), None);
+ /// ```
+ #[must_use]
+ #[unstable(feature = "linked_list_cursors", issue = "58533")]
+ pub fn back_mut(&mut self) -> Option<&mut T> {
+ self.list.back_mut()
+ }
+}
+
+/// An iterator produced by calling `drain_filter` on LinkedList.
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+pub struct DrainFilter<'a, T: 'a, F: 'a>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ list: &'a mut LinkedList<T>,
+ it: Option<NonNull<Node<T>>>,
+ pred: F,
+ idx: usize,
+ old_len: usize,
+}
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+impl<T, F> Iterator for DrainFilter<'_, T, F>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ while let Some(mut node) = self.it {
+ unsafe {
+ self.it = node.as_ref().next;
+ self.idx += 1;
+
+ if (self.pred)(&mut node.as_mut().element) {
+ // `unlink_node` is okay with aliasing `element` references.
+ self.list.unlink_node(node);
+ return Some(Box::from_raw(node.as_ptr()).element);
+ }
+ }
+ }
+
+ None
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(self.old_len - self.idx))
+ }
+}
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+impl<T, F> Drop for DrainFilter<'_, T, F>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ fn drop(&mut self) {
+ struct DropGuard<'r, 'a, T, F>(&'r mut DrainFilter<'a, T, F>)
+ where
+ F: FnMut(&mut T) -> bool;
+
+ impl<'r, 'a, T, F> Drop for DropGuard<'r, 'a, T, F>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ fn drop(&mut self) {
+ self.0.for_each(drop);
+ }
+ }
+
+ while let Some(item) = self.next() {
+ let guard = DropGuard(self);
+ drop(item);
+ mem::forget(guard);
+ }
+ }
+}
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+impl<T: fmt::Debug, F> fmt::Debug for DrainFilter<'_, T, F>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("DrainFilter").field(&self.list).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Iterator for IntoIter<T> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.list.pop_front()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (self.list.len, Some(self.list.len))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> DoubleEndedIterator for IntoIter<T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.list.pop_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IntoIter<T> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IntoIter<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> FromIterator<T> for LinkedList<T> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Self {
+ let mut list = Self::new();
+ list.extend(iter);
+ list
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> IntoIterator for LinkedList<T> {
+ type Item = T;
+ type IntoIter = IntoIter<T>;
+
+ /// Consumes the list into an iterator yielding elements by value.
+ #[inline]
+ fn into_iter(self) -> IntoIter<T> {
+ IntoIter { list: self }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> IntoIterator for &'a LinkedList<T> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> IntoIterator for &'a mut LinkedList<T> {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Extend<T> for LinkedList<T> {
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ <Self as SpecExtend<I>>::spec_extend(self, iter);
+ }
+
+ #[inline]
+ fn extend_one(&mut self, elem: T) {
+ self.push_back(elem);
+ }
+}
+
+impl<I: IntoIterator> SpecExtend<I> for LinkedList<I::Item> {
+ default fn spec_extend(&mut self, iter: I) {
+ iter.into_iter().for_each(move |elt| self.push_back(elt));
+ }
+}
+
+impl<T> SpecExtend<LinkedList<T>> for LinkedList<T> {
+ fn spec_extend(&mut self, ref mut other: LinkedList<T>) {
+ self.append(other);
+ }
+}
+
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: 'a + Copy> Extend<&'a T> for LinkedList<T> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &elem: &'a T) {
+ self.push_back(elem);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialEq> PartialEq for LinkedList<T> {
+ fn eq(&self, other: &Self) -> bool {
+ self.len() == other.len() && self.iter().eq(other)
+ }
+
+ fn ne(&self, other: &Self) -> bool {
+ self.len() != other.len() || self.iter().ne(other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq> Eq for LinkedList<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd> PartialOrd for LinkedList<T> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.iter().partial_cmp(other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord> Ord for LinkedList<T> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.iter().cmp(other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> Clone for LinkedList<T> {
+ fn clone(&self) -> Self {
+ self.iter().cloned().collect()
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ let mut iter_other = other.iter();
+ if self.len() > other.len() {
+ self.split_off(other.len());
+ }
+ for (elem, elem_other) in self.iter_mut().zip(&mut iter_other) {
+ elem.clone_from(elem_other);
+ }
+ if !iter_other.is_empty() {
+ self.extend(iter_other.cloned());
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug> fmt::Debug for LinkedList<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Hash> Hash for LinkedList<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_length_prefix(self.len());
+ for elt in self {
+ elt.hash(state);
+ }
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+impl<T, const N: usize> From<[T; N]> for LinkedList<T> {
+ /// Converts a `[T; N]` into a `LinkedList<T>`.
+ ///
+ /// ```
+ /// use std::collections::LinkedList;
+ ///
+ /// let list1 = LinkedList::from([1, 2, 3, 4]);
+ /// let list2: LinkedList<_> = [1, 2, 3, 4].into();
+ /// assert_eq!(list1, list2);
+ /// ```
+ fn from(arr: [T; N]) -> Self {
+ Self::from_iter(arr)
+ }
+}
+
+// Ensure that `LinkedList` and its read-only iterators are covariant in their type parameters.
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn a<'a>(x: LinkedList<&'static str>) -> LinkedList<&'a str> {
+ x
+ }
+ fn b<'i, 'a>(x: Iter<'i, &'static str>) -> Iter<'i, &'a str> {
+ x
+ }
+ fn c<'a>(x: IntoIter<&'static str>) -> IntoIter<&'a str> {
+ x
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send> Send for LinkedList<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for LinkedList<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Send for Iter<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for Iter<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send> Send for IterMut<'_, T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+unsafe impl<T: Sync> Send for Cursor<'_, T> {}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+unsafe impl<T: Sync> Sync for Cursor<'_, T> {}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+unsafe impl<T: Send> Send for CursorMut<'_, T> {}
+
+#[unstable(feature = "linked_list_cursors", issue = "58533")]
+unsafe impl<T: Sync> Sync for CursorMut<'_, T> {}
diff --git a/library/alloc/src/collections/linked_list/tests.rs b/library/alloc/src/collections/linked_list/tests.rs
new file mode 100644
index 000000000..f8fbfa1bf
--- /dev/null
+++ b/library/alloc/src/collections/linked_list/tests.rs
@@ -0,0 +1,1156 @@
+use super::*;
+use crate::vec::Vec;
+
+use std::panic::{catch_unwind, AssertUnwindSafe};
+use std::thread;
+
+use rand::{thread_rng, RngCore};
+
+#[test]
+fn test_basic() {
+ let mut m = LinkedList::<Box<_>>::new();
+ assert_eq!(m.pop_front(), None);
+ assert_eq!(m.pop_back(), None);
+ assert_eq!(m.pop_front(), None);
+ m.push_front(Box::new(1));
+ assert_eq!(m.pop_front(), Some(Box::new(1)));
+ m.push_back(Box::new(2));
+ m.push_back(Box::new(3));
+ assert_eq!(m.len(), 2);
+ assert_eq!(m.pop_front(), Some(Box::new(2)));
+ assert_eq!(m.pop_front(), Some(Box::new(3)));
+ assert_eq!(m.len(), 0);
+ assert_eq!(m.pop_front(), None);
+ m.push_back(Box::new(1));
+ m.push_back(Box::new(3));
+ m.push_back(Box::new(5));
+ m.push_back(Box::new(7));
+ assert_eq!(m.pop_front(), Some(Box::new(1)));
+
+ let mut n = LinkedList::new();
+ n.push_front(2);
+ n.push_front(3);
+ {
+ assert_eq!(n.front().unwrap(), &3);
+ let x = n.front_mut().unwrap();
+ assert_eq!(*x, 3);
+ *x = 0;
+ }
+ {
+ assert_eq!(n.back().unwrap(), &2);
+ let y = n.back_mut().unwrap();
+ assert_eq!(*y, 2);
+ *y = 1;
+ }
+ assert_eq!(n.pop_front(), Some(0));
+ assert_eq!(n.pop_front(), Some(1));
+}
+
+fn generate_test() -> LinkedList<i32> {
+ list_from(&[0, 1, 2, 3, 4, 5, 6])
+}
+
+fn list_from<T: Clone>(v: &[T]) -> LinkedList<T> {
+ v.iter().cloned().collect()
+}
+
+pub fn check_links<T>(list: &LinkedList<T>) {
+ unsafe {
+ let mut len = 0;
+ let mut last_ptr: Option<&Node<T>> = None;
+ let mut node_ptr: &Node<T>;
+ match list.head {
+ None => {
+ // tail node should also be None.
+ assert!(list.tail.is_none());
+ assert_eq!(0, list.len);
+ return;
+ }
+ Some(node) => node_ptr = &*node.as_ptr(),
+ }
+ loop {
+ match (last_ptr, node_ptr.prev) {
+ (None, None) => {}
+ (None, _) => panic!("prev link for head"),
+ (Some(p), Some(pptr)) => {
+ assert_eq!(p as *const Node<T>, pptr.as_ptr() as *const Node<T>);
+ }
+ _ => panic!("prev link is none, not good"),
+ }
+ match node_ptr.next {
+ Some(next) => {
+ last_ptr = Some(node_ptr);
+ node_ptr = &*next.as_ptr();
+ len += 1;
+ }
+ None => {
+ len += 1;
+ break;
+ }
+ }
+ }
+
+ // verify that the tail node points to the last node.
+ let tail = list.tail.as_ref().expect("some tail node").as_ref();
+ assert_eq!(tail as *const Node<T>, node_ptr as *const Node<T>);
+ // check that len matches interior links.
+ assert_eq!(len, list.len);
+ }
+}
+
+#[test]
+fn test_append() {
+ // Empty to empty
+ {
+ let mut m = LinkedList::<i32>::new();
+ let mut n = LinkedList::new();
+ m.append(&mut n);
+ check_links(&m);
+ assert_eq!(m.len(), 0);
+ assert_eq!(n.len(), 0);
+ }
+ // Non-empty to empty
+ {
+ let mut m = LinkedList::new();
+ let mut n = LinkedList::new();
+ n.push_back(2);
+ m.append(&mut n);
+ check_links(&m);
+ assert_eq!(m.len(), 1);
+ assert_eq!(m.pop_back(), Some(2));
+ assert_eq!(n.len(), 0);
+ check_links(&m);
+ }
+ // Empty to non-empty
+ {
+ let mut m = LinkedList::new();
+ let mut n = LinkedList::new();
+ m.push_back(2);
+ m.append(&mut n);
+ check_links(&m);
+ assert_eq!(m.len(), 1);
+ assert_eq!(m.pop_back(), Some(2));
+ check_links(&m);
+ }
+
+ // Non-empty to non-empty
+ let v = vec![1, 2, 3, 4, 5];
+ let u = vec![9, 8, 1, 2, 3, 4, 5];
+ let mut m = list_from(&v);
+ let mut n = list_from(&u);
+ m.append(&mut n);
+ check_links(&m);
+ let mut sum = v;
+ sum.extend_from_slice(&u);
+ assert_eq!(sum.len(), m.len());
+ for elt in sum {
+ assert_eq!(m.pop_front(), Some(elt))
+ }
+ assert_eq!(n.len(), 0);
+ // Let's make sure it's working properly, since we
+ // did some direct changes to private members.
+ n.push_back(3);
+ assert_eq!(n.len(), 1);
+ assert_eq!(n.pop_front(), Some(3));
+ check_links(&n);
+}
+
+#[test]
+fn test_iterator() {
+ let m = generate_test();
+ for (i, elt) in m.iter().enumerate() {
+ assert_eq!(i as i32, *elt);
+ }
+ let mut n = LinkedList::new();
+ assert_eq!(n.iter().next(), None);
+ n.push_front(4);
+ let mut it = n.iter();
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next().unwrap(), &4);
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_iterator_clone() {
+ let mut n = LinkedList::new();
+ n.push_back(2);
+ n.push_back(3);
+ n.push_back(4);
+ let mut it = n.iter();
+ it.next();
+ let mut jt = it.clone();
+ assert_eq!(it.next(), jt.next());
+ assert_eq!(it.next_back(), jt.next_back());
+ assert_eq!(it.next(), jt.next());
+}
+
+#[test]
+fn test_iterator_double_end() {
+ let mut n = LinkedList::new();
+ assert_eq!(n.iter().next(), None);
+ n.push_front(4);
+ n.push_front(5);
+ n.push_front(6);
+ let mut it = n.iter();
+ assert_eq!(it.size_hint(), (3, Some(3)));
+ assert_eq!(it.next().unwrap(), &6);
+ assert_eq!(it.size_hint(), (2, Some(2)));
+ assert_eq!(it.next_back().unwrap(), &4);
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next_back().unwrap(), &5);
+ assert_eq!(it.next_back(), None);
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_rev_iter() {
+ let m = generate_test();
+ for (i, elt) in m.iter().rev().enumerate() {
+ assert_eq!((6 - i) as i32, *elt);
+ }
+ let mut n = LinkedList::new();
+ assert_eq!(n.iter().rev().next(), None);
+ n.push_front(4);
+ let mut it = n.iter().rev();
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next().unwrap(), &4);
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_mut_iter() {
+ let mut m = generate_test();
+ let mut len = m.len();
+ for (i, elt) in m.iter_mut().enumerate() {
+ assert_eq!(i as i32, *elt);
+ len -= 1;
+ }
+ assert_eq!(len, 0);
+ let mut n = LinkedList::new();
+ assert!(n.iter_mut().next().is_none());
+ n.push_front(4);
+ n.push_back(5);
+ let mut it = n.iter_mut();
+ assert_eq!(it.size_hint(), (2, Some(2)));
+ assert!(it.next().is_some());
+ assert!(it.next().is_some());
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert!(it.next().is_none());
+}
+
+#[test]
+fn test_iterator_mut_double_end() {
+ let mut n = LinkedList::new();
+ assert!(n.iter_mut().next_back().is_none());
+ n.push_front(4);
+ n.push_front(5);
+ n.push_front(6);
+ let mut it = n.iter_mut();
+ assert_eq!(it.size_hint(), (3, Some(3)));
+ assert_eq!(*it.next().unwrap(), 6);
+ assert_eq!(it.size_hint(), (2, Some(2)));
+ assert_eq!(*it.next_back().unwrap(), 4);
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(*it.next_back().unwrap(), 5);
+ assert!(it.next_back().is_none());
+ assert!(it.next().is_none());
+}
+
+#[test]
+fn test_mut_rev_iter() {
+ let mut m = generate_test();
+ for (i, elt) in m.iter_mut().rev().enumerate() {
+ assert_eq!((6 - i) as i32, *elt);
+ }
+ let mut n = LinkedList::new();
+ assert!(n.iter_mut().rev().next().is_none());
+ n.push_front(4);
+ let mut it = n.iter_mut().rev();
+ assert!(it.next().is_some());
+ assert!(it.next().is_none());
+}
+
+#[test]
+fn test_clone_from() {
+ // Short cloned from long
+ {
+ let v = vec![1, 2, 3, 4, 5];
+ let u = vec![8, 7, 6, 2, 3, 4, 5];
+ let mut m = list_from(&v);
+ let n = list_from(&u);
+ m.clone_from(&n);
+ check_links(&m);
+ assert_eq!(m, n);
+ for elt in u {
+ assert_eq!(m.pop_front(), Some(elt))
+ }
+ }
+ // Long cloned from short
+ {
+ let v = vec![1, 2, 3, 4, 5];
+ let u = vec![6, 7, 8];
+ let mut m = list_from(&v);
+ let n = list_from(&u);
+ m.clone_from(&n);
+ check_links(&m);
+ assert_eq!(m, n);
+ for elt in u {
+ assert_eq!(m.pop_front(), Some(elt))
+ }
+ }
+ // Two equal length lists
+ {
+ let v = vec![1, 2, 3, 4, 5];
+ let u = vec![9, 8, 1, 2, 3];
+ let mut m = list_from(&v);
+ let n = list_from(&u);
+ m.clone_from(&n);
+ check_links(&m);
+ assert_eq!(m, n);
+ for elt in u {
+ assert_eq!(m.pop_front(), Some(elt))
+ }
+ }
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn test_send() {
+ let n = list_from(&[1, 2, 3]);
+ thread::spawn(move || {
+ check_links(&n);
+ let a: &[_] = &[&1, &2, &3];
+ assert_eq!(a, &*n.iter().collect::<Vec<_>>());
+ })
+ .join()
+ .ok()
+ .unwrap();
+}
+
+#[test]
+fn test_eq() {
+ let mut n = list_from(&[]);
+ let mut m = list_from(&[]);
+ assert!(n == m);
+ n.push_front(1);
+ assert!(n != m);
+ m.push_back(1);
+ assert!(n == m);
+
+ let n = list_from(&[2, 3, 4]);
+ let m = list_from(&[1, 2, 3]);
+ assert!(n != m);
+}
+
+#[test]
+fn test_ord() {
+ let n = list_from(&[]);
+ let m = list_from(&[1, 2, 3]);
+ assert!(n < m);
+ assert!(m > n);
+ assert!(n <= n);
+ assert!(n >= n);
+}
+
+#[test]
+fn test_ord_nan() {
+ let nan = 0.0f64 / 0.0;
+ let n = list_from(&[nan]);
+ let m = list_from(&[nan]);
+ assert!(!(n < m));
+ assert!(!(n > m));
+ assert!(!(n <= m));
+ assert!(!(n >= m));
+
+ let n = list_from(&[nan]);
+ let one = list_from(&[1.0f64]);
+ assert!(!(n < one));
+ assert!(!(n > one));
+ assert!(!(n <= one));
+ assert!(!(n >= one));
+
+ let u = list_from(&[1.0f64, 2.0, nan]);
+ let v = list_from(&[1.0f64, 2.0, 3.0]);
+ assert!(!(u < v));
+ assert!(!(u > v));
+ assert!(!(u <= v));
+ assert!(!(u >= v));
+
+ let s = list_from(&[1.0f64, 2.0, 4.0, 2.0]);
+ let t = list_from(&[1.0f64, 2.0, 3.0, 2.0]);
+ assert!(!(s < t));
+ assert!(s > one);
+ assert!(!(s <= one));
+ assert!(s >= one);
+}
+
+#[test]
+fn test_26021() {
+ // There was a bug in split_off that failed to null out the RHS's head's prev ptr.
+ // This caused the RHS's dtor to walk up into the LHS at drop and delete all of
+ // its nodes.
+ //
+ // https://github.com/rust-lang/rust/issues/26021
+ let mut v1 = LinkedList::new();
+ v1.push_front(1);
+ v1.push_front(1);
+ v1.push_front(1);
+ v1.push_front(1);
+ let _ = v1.split_off(3); // Dropping this now should not cause laundry consumption
+ assert_eq!(v1.len(), 3);
+
+ assert_eq!(v1.iter().len(), 3);
+ assert_eq!(v1.iter().collect::<Vec<_>>().len(), 3);
+}
+
+#[test]
+fn test_split_off() {
+ let mut v1 = LinkedList::new();
+ v1.push_front(1);
+ v1.push_front(1);
+ v1.push_front(1);
+ v1.push_front(1);
+
+ // test all splits
+ for ix in 0..1 + v1.len() {
+ let mut a = v1.clone();
+ let b = a.split_off(ix);
+ check_links(&a);
+ check_links(&b);
+ a.extend(b);
+ assert_eq!(v1, a);
+ }
+}
+
+#[test]
+fn test_split_off_2() {
+ // singleton
+ {
+ let mut m = LinkedList::new();
+ m.push_back(1);
+
+ let p = m.split_off(0);
+ assert_eq!(m.len(), 0);
+ assert_eq!(p.len(), 1);
+ assert_eq!(p.back(), Some(&1));
+ assert_eq!(p.front(), Some(&1));
+ }
+
+ // not singleton, forwards
+ {
+ let u = vec![1, 2, 3, 4, 5];
+ let mut m = list_from(&u);
+ let mut n = m.split_off(2);
+ assert_eq!(m.len(), 2);
+ assert_eq!(n.len(), 3);
+ for elt in 1..3 {
+ assert_eq!(m.pop_front(), Some(elt));
+ }
+ for elt in 3..6 {
+ assert_eq!(n.pop_front(), Some(elt));
+ }
+ }
+ // not singleton, backwards
+ {
+ let u = vec![1, 2, 3, 4, 5];
+ let mut m = list_from(&u);
+ let mut n = m.split_off(4);
+ assert_eq!(m.len(), 4);
+ assert_eq!(n.len(), 1);
+ for elt in 1..5 {
+ assert_eq!(m.pop_front(), Some(elt));
+ }
+ for elt in 5..6 {
+ assert_eq!(n.pop_front(), Some(elt));
+ }
+ }
+
+ // no-op on the last index
+ {
+ let mut m = LinkedList::new();
+ m.push_back(1);
+
+ let p = m.split_off(1);
+ assert_eq!(m.len(), 1);
+ assert_eq!(p.len(), 0);
+ assert_eq!(m.back(), Some(&1));
+ assert_eq!(m.front(), Some(&1));
+ }
+}
+
+fn fuzz_test(sz: i32) {
+ let mut m: LinkedList<_> = LinkedList::new();
+ let mut v = vec![];
+ for i in 0..sz {
+ check_links(&m);
+ let r: u8 = thread_rng().next_u32() as u8;
+ match r % 6 {
+ 0 => {
+ m.pop_back();
+ v.pop();
+ }
+ 1 => {
+ if !v.is_empty() {
+ m.pop_front();
+ v.remove(0);
+ }
+ }
+ 2 | 4 => {
+ m.push_front(-i);
+ v.insert(0, -i);
+ }
+ 3 | 5 | _ => {
+ m.push_back(i);
+ v.push(i);
+ }
+ }
+ }
+
+ check_links(&m);
+
+ let mut i = 0;
+ for (a, &b) in m.into_iter().zip(&v) {
+ i += 1;
+ assert_eq!(a, b);
+ }
+ assert_eq!(i, v.len());
+}
+
+#[test]
+fn test_fuzz() {
+ for _ in 0..25 {
+ fuzz_test(3);
+ fuzz_test(16);
+ #[cfg(not(miri))] // Miri is too slow
+ fuzz_test(189);
+ }
+}
+
+#[test]
+fn test_show() {
+ let list: LinkedList<_> = (0..10).collect();
+ assert_eq!(format!("{list:?}"), "[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]");
+
+ let list: LinkedList<_> = ["just", "one", "test", "more"].into_iter().collect();
+ assert_eq!(format!("{list:?}"), "[\"just\", \"one\", \"test\", \"more\"]");
+}
+
+#[test]
+fn drain_filter_test() {
+ let mut m: LinkedList<u32> = LinkedList::new();
+ m.extend(&[1, 2, 3, 4, 5, 6]);
+ let deleted = m.drain_filter(|v| *v < 4).collect::<Vec<_>>();
+
+ check_links(&m);
+
+ assert_eq!(deleted, &[1, 2, 3]);
+ assert_eq!(m.into_iter().collect::<Vec<_>>(), &[4, 5, 6]);
+}
+
+#[test]
+fn drain_to_empty_test() {
+ let mut m: LinkedList<u32> = LinkedList::new();
+ m.extend(&[1, 2, 3, 4, 5, 6]);
+ let deleted = m.drain_filter(|_| true).collect::<Vec<_>>();
+
+ check_links(&m);
+
+ assert_eq!(deleted, &[1, 2, 3, 4, 5, 6]);
+ assert_eq!(m.into_iter().collect::<Vec<_>>(), &[]);
+}
+
+#[test]
+fn test_cursor_move_peek() {
+ let mut m: LinkedList<u32> = LinkedList::new();
+ m.extend(&[1, 2, 3, 4, 5, 6]);
+ let mut cursor = m.cursor_front();
+ assert_eq!(cursor.current(), Some(&1));
+ assert_eq!(cursor.peek_next(), Some(&2));
+ assert_eq!(cursor.peek_prev(), None);
+ assert_eq!(cursor.index(), Some(0));
+ cursor.move_prev();
+ assert_eq!(cursor.current(), None);
+ assert_eq!(cursor.peek_next(), Some(&1));
+ assert_eq!(cursor.peek_prev(), Some(&6));
+ assert_eq!(cursor.index(), None);
+ cursor.move_next();
+ cursor.move_next();
+ assert_eq!(cursor.current(), Some(&2));
+ assert_eq!(cursor.peek_next(), Some(&3));
+ assert_eq!(cursor.peek_prev(), Some(&1));
+ assert_eq!(cursor.index(), Some(1));
+
+ let mut cursor = m.cursor_back();
+ assert_eq!(cursor.current(), Some(&6));
+ assert_eq!(cursor.peek_next(), None);
+ assert_eq!(cursor.peek_prev(), Some(&5));
+ assert_eq!(cursor.index(), Some(5));
+ cursor.move_next();
+ assert_eq!(cursor.current(), None);
+ assert_eq!(cursor.peek_next(), Some(&1));
+ assert_eq!(cursor.peek_prev(), Some(&6));
+ assert_eq!(cursor.index(), None);
+ cursor.move_prev();
+ cursor.move_prev();
+ assert_eq!(cursor.current(), Some(&5));
+ assert_eq!(cursor.peek_next(), Some(&6));
+ assert_eq!(cursor.peek_prev(), Some(&4));
+ assert_eq!(cursor.index(), Some(4));
+
+ let mut m: LinkedList<u32> = LinkedList::new();
+ m.extend(&[1, 2, 3, 4, 5, 6]);
+ let mut cursor = m.cursor_front_mut();
+ assert_eq!(cursor.current(), Some(&mut 1));
+ assert_eq!(cursor.peek_next(), Some(&mut 2));
+ assert_eq!(cursor.peek_prev(), None);
+ assert_eq!(cursor.index(), Some(0));
+ cursor.move_prev();
+ assert_eq!(cursor.current(), None);
+ assert_eq!(cursor.peek_next(), Some(&mut 1));
+ assert_eq!(cursor.peek_prev(), Some(&mut 6));
+ assert_eq!(cursor.index(), None);
+ cursor.move_next();
+ cursor.move_next();
+ assert_eq!(cursor.current(), Some(&mut 2));
+ assert_eq!(cursor.peek_next(), Some(&mut 3));
+ assert_eq!(cursor.peek_prev(), Some(&mut 1));
+ assert_eq!(cursor.index(), Some(1));
+ let mut cursor2 = cursor.as_cursor();
+ assert_eq!(cursor2.current(), Some(&2));
+ assert_eq!(cursor2.index(), Some(1));
+ cursor2.move_next();
+ assert_eq!(cursor2.current(), Some(&3));
+ assert_eq!(cursor2.index(), Some(2));
+ assert_eq!(cursor.current(), Some(&mut 2));
+ assert_eq!(cursor.index(), Some(1));
+
+ let mut m: LinkedList<u32> = LinkedList::new();
+ m.extend(&[1, 2, 3, 4, 5, 6]);
+ let mut cursor = m.cursor_back_mut();
+ assert_eq!(cursor.current(), Some(&mut 6));
+ assert_eq!(cursor.peek_next(), None);
+ assert_eq!(cursor.peek_prev(), Some(&mut 5));
+ assert_eq!(cursor.index(), Some(5));
+ cursor.move_next();
+ assert_eq!(cursor.current(), None);
+ assert_eq!(cursor.peek_next(), Some(&mut 1));
+ assert_eq!(cursor.peek_prev(), Some(&mut 6));
+ assert_eq!(cursor.index(), None);
+ cursor.move_prev();
+ cursor.move_prev();
+ assert_eq!(cursor.current(), Some(&mut 5));
+ assert_eq!(cursor.peek_next(), Some(&mut 6));
+ assert_eq!(cursor.peek_prev(), Some(&mut 4));
+ assert_eq!(cursor.index(), Some(4));
+ let mut cursor2 = cursor.as_cursor();
+ assert_eq!(cursor2.current(), Some(&5));
+ assert_eq!(cursor2.index(), Some(4));
+ cursor2.move_prev();
+ assert_eq!(cursor2.current(), Some(&4));
+ assert_eq!(cursor2.index(), Some(3));
+ assert_eq!(cursor.current(), Some(&mut 5));
+ assert_eq!(cursor.index(), Some(4));
+}
+
+#[test]
+fn test_cursor_mut_insert() {
+ let mut m: LinkedList<u32> = LinkedList::new();
+ m.extend(&[1, 2, 3, 4, 5, 6]);
+ let mut cursor = m.cursor_front_mut();
+ cursor.insert_before(7);
+ cursor.insert_after(8);
+ check_links(&m);
+ assert_eq!(m.iter().cloned().collect::<Vec<_>>(), &[7, 1, 8, 2, 3, 4, 5, 6]);
+ let mut cursor = m.cursor_front_mut();
+ cursor.move_prev();
+ cursor.insert_before(9);
+ cursor.insert_after(10);
+ check_links(&m);
+ assert_eq!(m.iter().cloned().collect::<Vec<_>>(), &[10, 7, 1, 8, 2, 3, 4, 5, 6, 9]);
+ let mut cursor = m.cursor_front_mut();
+ cursor.move_prev();
+ assert_eq!(cursor.remove_current(), None);
+ cursor.move_next();
+ cursor.move_next();
+ assert_eq!(cursor.remove_current(), Some(7));
+ cursor.move_prev();
+ cursor.move_prev();
+ cursor.move_prev();
+ assert_eq!(cursor.remove_current(), Some(9));
+ cursor.move_next();
+ assert_eq!(cursor.remove_current(), Some(10));
+ check_links(&m);
+ assert_eq!(m.iter().cloned().collect::<Vec<_>>(), &[1, 8, 2, 3, 4, 5, 6]);
+ let mut cursor = m.cursor_front_mut();
+ let mut p: LinkedList<u32> = LinkedList::new();
+ p.extend(&[100, 101, 102, 103]);
+ let mut q: LinkedList<u32> = LinkedList::new();
+ q.extend(&[200, 201, 202, 203]);
+ cursor.splice_after(p);
+ cursor.splice_before(q);
+ check_links(&m);
+ assert_eq!(
+ m.iter().cloned().collect::<Vec<_>>(),
+ &[200, 201, 202, 203, 1, 100, 101, 102, 103, 8, 2, 3, 4, 5, 6]
+ );
+ let mut cursor = m.cursor_front_mut();
+ cursor.move_prev();
+ let tmp = cursor.split_before();
+ assert_eq!(m.into_iter().collect::<Vec<_>>(), &[]);
+ m = tmp;
+ let mut cursor = m.cursor_front_mut();
+ cursor.move_next();
+ cursor.move_next();
+ cursor.move_next();
+ cursor.move_next();
+ cursor.move_next();
+ cursor.move_next();
+ let tmp = cursor.split_after();
+ assert_eq!(tmp.into_iter().collect::<Vec<_>>(), &[102, 103, 8, 2, 3, 4, 5, 6]);
+ check_links(&m);
+ assert_eq!(m.iter().cloned().collect::<Vec<_>>(), &[200, 201, 202, 203, 1, 100, 101]);
+}
+
+#[test]
+fn test_cursor_push_front_back() {
+ let mut ll: LinkedList<u32> = LinkedList::new();
+ ll.extend(&[1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+ let mut c = ll.cursor_front_mut();
+ assert_eq!(c.current(), Some(&mut 1));
+ assert_eq!(c.index(), Some(0));
+ c.push_front(0);
+ assert_eq!(c.current(), Some(&mut 1));
+ assert_eq!(c.peek_prev(), Some(&mut 0));
+ assert_eq!(c.index(), Some(1));
+ c.push_back(11);
+ drop(c);
+ let p = ll.cursor_back().front().unwrap();
+ assert_eq!(p, &0);
+ assert_eq!(ll, (0..12).collect());
+ check_links(&ll);
+}
+
+#[test]
+fn test_cursor_pop_front_back() {
+ let mut ll: LinkedList<u32> = LinkedList::new();
+ ll.extend(&[1, 2, 3, 4, 5, 6]);
+ let mut c = ll.cursor_back_mut();
+ assert_eq!(c.pop_front(), Some(1));
+ c.move_prev();
+ c.move_prev();
+ c.move_prev();
+ assert_eq!(c.pop_back(), Some(6));
+ let c = c.as_cursor();
+ assert_eq!(c.front(), Some(&2));
+ assert_eq!(c.back(), Some(&5));
+ assert_eq!(c.index(), Some(1));
+ drop(c);
+ assert_eq!(ll, (2..6).collect());
+ check_links(&ll);
+ let mut c = ll.cursor_back_mut();
+ assert_eq!(c.current(), Some(&mut 5));
+ assert_eq!(c.index, 3);
+ assert_eq!(c.pop_back(), Some(5));
+ assert_eq!(c.current(), None);
+ assert_eq!(c.index, 3);
+ assert_eq!(c.pop_back(), Some(4));
+ assert_eq!(c.current(), None);
+ assert_eq!(c.index, 2);
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut a = LinkedList::new();
+ a.push_back(1);
+
+ a.extend(&[2, 3, 4]);
+
+ assert_eq!(a.len(), 4);
+ assert_eq!(a, list_from(&[1, 2, 3, 4]));
+
+ let mut b = LinkedList::new();
+ b.push_back(5);
+ b.push_back(6);
+ a.extend(&b);
+
+ assert_eq!(a.len(), 6);
+ assert_eq!(a, list_from(&[1, 2, 3, 4, 5, 6]));
+}
+
+#[test]
+fn test_extend() {
+ let mut a = LinkedList::new();
+ a.push_back(1);
+ a.extend(vec![2, 3, 4]); // uses iterator
+
+ assert_eq!(a.len(), 4);
+ assert!(a.iter().eq(&[1, 2, 3, 4]));
+
+ let b: LinkedList<_> = [5, 6, 7].into_iter().collect();
+ a.extend(b); // specializes to `append`
+
+ assert_eq!(a.len(), 7);
+ assert!(a.iter().eq(&[1, 2, 3, 4, 5, 6, 7]));
+}
+
+#[test]
+fn test_contains() {
+ let mut l = LinkedList::new();
+ l.extend(&[2, 3, 4]);
+
+ assert!(l.contains(&3));
+ assert!(!l.contains(&1));
+
+ l.clear();
+
+ assert!(!l.contains(&3));
+}
+
+#[test]
+fn drain_filter_empty() {
+ let mut list: LinkedList<i32> = LinkedList::new();
+
+ {
+ let mut iter = list.drain_filter(|_| true);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ assert_eq!(list.len(), 0);
+ assert_eq!(list.into_iter().collect::<Vec<_>>(), vec![]);
+}
+
+#[test]
+fn drain_filter_zst() {
+ let mut list: LinkedList<_> = [(), (), (), (), ()].into_iter().collect();
+ let initial_len = list.len();
+ let mut count = 0;
+
+ {
+ let mut iter = list.drain_filter(|_| true);
+ assert_eq!(iter.size_hint(), (0, Some(initial_len)));
+ while let Some(_) = iter.next() {
+ count += 1;
+ assert_eq!(iter.size_hint(), (0, Some(initial_len - count)));
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ assert_eq!(count, initial_len);
+ assert_eq!(list.len(), 0);
+ assert_eq!(list.into_iter().collect::<Vec<_>>(), vec![]);
+}
+
+#[test]
+fn drain_filter_false() {
+ let mut list: LinkedList<_> = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
+
+ let initial_len = list.len();
+ let mut count = 0;
+
+ {
+ let mut iter = list.drain_filter(|_| false);
+ assert_eq!(iter.size_hint(), (0, Some(initial_len)));
+ for _ in iter.by_ref() {
+ count += 1;
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ assert_eq!(count, 0);
+ assert_eq!(list.len(), initial_len);
+ assert_eq!(list.into_iter().collect::<Vec<_>>(), vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+}
+
+#[test]
+fn drain_filter_true() {
+ let mut list: LinkedList<_> = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
+
+ let initial_len = list.len();
+ let mut count = 0;
+
+ {
+ let mut iter = list.drain_filter(|_| true);
+ assert_eq!(iter.size_hint(), (0, Some(initial_len)));
+ while let Some(_) = iter.next() {
+ count += 1;
+ assert_eq!(iter.size_hint(), (0, Some(initial_len - count)));
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ assert_eq!(count, initial_len);
+ assert_eq!(list.len(), 0);
+ assert_eq!(list.into_iter().collect::<Vec<_>>(), vec![]);
+}
+
+#[test]
+fn drain_filter_complex() {
+ {
+ // [+xxx++++++xxxxx++++x+x++]
+ let mut list = [
+ 1, 2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37,
+ 39,
+ ]
+ .into_iter()
+ .collect::<LinkedList<_>>();
+
+ let removed = list.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]);
+
+ assert_eq!(list.len(), 14);
+ assert_eq!(
+ list.into_iter().collect::<Vec<_>>(),
+ vec![1, 7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35, 37, 39]
+ );
+ }
+
+ {
+ // [xxx++++++xxxxx++++x+x++]
+ let mut list =
+ [2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37, 39]
+ .into_iter()
+ .collect::<LinkedList<_>>();
+
+ let removed = list.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]);
+
+ assert_eq!(list.len(), 13);
+ assert_eq!(
+ list.into_iter().collect::<Vec<_>>(),
+ vec![7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35, 37, 39]
+ );
+ }
+
+ {
+ // [xxx++++++xxxxx++++x+x]
+ let mut list =
+ [2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36]
+ .into_iter()
+ .collect::<LinkedList<_>>();
+
+ let removed = list.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]);
+
+ assert_eq!(list.len(), 11);
+ assert_eq!(
+ list.into_iter().collect::<Vec<_>>(),
+ vec![7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35]
+ );
+ }
+
+ {
+ // [xxxxxxxxxx+++++++++++]
+ let mut list = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
+ .into_iter()
+ .collect::<LinkedList<_>>();
+
+ let removed = list.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20]);
+
+ assert_eq!(list.len(), 10);
+ assert_eq!(list.into_iter().collect::<Vec<_>>(), vec![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]);
+ }
+
+ {
+ // [+++++++++++xxxxxxxxxx]
+ let mut list = [1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]
+ .into_iter()
+ .collect::<LinkedList<_>>();
+
+ let removed = list.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20]);
+
+ assert_eq!(list.len(), 10);
+ assert_eq!(list.into_iter().collect::<Vec<_>>(), vec![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]);
+ }
+}
+
+#[test]
+fn drain_filter_drop_panic_leak() {
+ static mut DROPS: i32 = 0;
+
+ struct D(bool);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+
+ if self.0 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let mut q = LinkedList::new();
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_front(D(false));
+ q.push_front(D(true));
+ q.push_front(D(false));
+
+ catch_unwind(AssertUnwindSafe(|| drop(q.drain_filter(|_| true)))).ok();
+
+ assert_eq!(unsafe { DROPS }, 8);
+ assert!(q.is_empty());
+}
+
+#[test]
+fn drain_filter_pred_panic_leak() {
+ static mut DROPS: i32 = 0;
+
+ #[derive(Debug)]
+ struct D(u32);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let mut q = LinkedList::new();
+ q.push_back(D(3));
+ q.push_back(D(4));
+ q.push_back(D(5));
+ q.push_back(D(6));
+ q.push_back(D(7));
+ q.push_front(D(2));
+ q.push_front(D(1));
+ q.push_front(D(0));
+
+ catch_unwind(AssertUnwindSafe(|| {
+ drop(q.drain_filter(|item| if item.0 >= 2 { panic!() } else { true }))
+ }))
+ .ok();
+
+ assert_eq!(unsafe { DROPS }, 2); // 0 and 1
+ assert_eq!(q.len(), 6);
+}
+
+#[test]
+fn test_drop() {
+ static mut DROPS: i32 = 0;
+ struct Elem;
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let mut ring = LinkedList::new();
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ drop(ring);
+
+ assert_eq!(unsafe { DROPS }, 4);
+}
+
+#[test]
+fn test_drop_with_pop() {
+ static mut DROPS: i32 = 0;
+ struct Elem;
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let mut ring = LinkedList::new();
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+
+ drop(ring.pop_back());
+ drop(ring.pop_front());
+ assert_eq!(unsafe { DROPS }, 2);
+
+ drop(ring);
+ assert_eq!(unsafe { DROPS }, 4);
+}
+
+#[test]
+fn test_drop_clear() {
+ static mut DROPS: i32 = 0;
+ struct Elem;
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let mut ring = LinkedList::new();
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ ring.clear();
+ assert_eq!(unsafe { DROPS }, 4);
+
+ drop(ring);
+ assert_eq!(unsafe { DROPS }, 4);
+}
+
+#[test]
+fn test_drop_panic() {
+ static mut DROPS: i32 = 0;
+
+ struct D(bool);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+
+ if self.0 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let mut q = LinkedList::new();
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_front(D(false));
+ q.push_front(D(false));
+ q.push_front(D(true));
+
+ catch_unwind(move || drop(q)).ok();
+
+ assert_eq!(unsafe { DROPS }, 8);
+}
diff --git a/library/alloc/src/collections/mod.rs b/library/alloc/src/collections/mod.rs
new file mode 100644
index 000000000..628a5b155
--- /dev/null
+++ b/library/alloc/src/collections/mod.rs
@@ -0,0 +1,154 @@
+//! Collection types.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(not(no_global_oom_handling))]
+pub mod binary_heap;
+#[cfg(not(no_global_oom_handling))]
+mod btree;
+#[cfg(not(no_global_oom_handling))]
+pub mod linked_list;
+#[cfg(not(no_global_oom_handling))]
+pub mod vec_deque;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod btree_map {
+ //! An ordered map based on a B-Tree.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::btree::map::*;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub mod btree_set {
+ //! An ordered set based on a B-Tree.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub use super::btree::set::*;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use binary_heap::BinaryHeap;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use btree_map::BTreeMap;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use btree_set::BTreeSet;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use linked_list::LinkedList;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[doc(no_inline)]
+pub use vec_deque::VecDeque;
+
+use crate::alloc::{Layout, LayoutError};
+use core::fmt::Display;
+
+/// The error type for `try_reserve` methods.
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[stable(feature = "try_reserve", since = "1.57.0")]
+pub struct TryReserveError {
+ kind: TryReserveErrorKind,
+}
+
+impl TryReserveError {
+ /// Details about the allocation that caused the error
+ #[inline]
+ #[must_use]
+ #[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+ )]
+ pub fn kind(&self) -> TryReserveErrorKind {
+ self.kind.clone()
+ }
+}
+
+/// Details of the allocation that caused a `TryReserveError`
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+)]
+pub enum TryReserveErrorKind {
+ /// Error due to the computed capacity exceeding the collection's maximum
+ /// (usually `isize::MAX` bytes).
+ CapacityOverflow,
+
+ /// The memory allocator returned an error
+ AllocError {
+ /// The layout of allocation request that failed
+ layout: Layout,
+
+ #[doc(hidden)]
+ #[unstable(
+ feature = "container_error_extra",
+ issue = "none",
+ reason = "\
+ Enable exposing the allocator’s custom error value \
+ if an associated type is added in the future: \
+ https://github.com/rust-lang/wg-allocators/issues/23"
+ )]
+ non_exhaustive: (),
+ },
+}
+
+#[unstable(
+ feature = "try_reserve_kind",
+ reason = "Uncertain how much info should be exposed",
+ issue = "48043"
+)]
+impl From<TryReserveErrorKind> for TryReserveError {
+ #[inline]
+ fn from(kind: TryReserveErrorKind) -> Self {
+ Self { kind }
+ }
+}
+
+#[unstable(feature = "try_reserve_kind", reason = "new API", issue = "48043")]
+impl From<LayoutError> for TryReserveErrorKind {
+ /// Always evaluates to [`TryReserveErrorKind::CapacityOverflow`].
+ #[inline]
+ fn from(_: LayoutError) -> Self {
+ TryReserveErrorKind::CapacityOverflow
+ }
+}
+
+#[stable(feature = "try_reserve", since = "1.57.0")]
+impl Display for TryReserveError {
+ fn fmt(
+ &self,
+ fmt: &mut core::fmt::Formatter<'_>,
+ ) -> core::result::Result<(), core::fmt::Error> {
+ fmt.write_str("memory allocation failed")?;
+ let reason = match self.kind {
+ TryReserveErrorKind::CapacityOverflow => {
+ " because the computed capacity exceeded the collection's maximum"
+ }
+ TryReserveErrorKind::AllocError { .. } => {
+ " because the memory allocator returned a error"
+ }
+ };
+ fmt.write_str(reason)
+ }
+}
+
+/// An intermediate trait for specialization of `Extend`.
+#[doc(hidden)]
+trait SpecExtend<I: IntoIterator> {
+ /// Extends `self` with the contents of the given iterator.
+ fn spec_extend(&mut self, iter: I);
+}
diff --git a/library/alloc/src/collections/vec_deque/drain.rs b/library/alloc/src/collections/vec_deque/drain.rs
new file mode 100644
index 000000000..05f94da6d
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/drain.rs
@@ -0,0 +1,142 @@
+use core::iter::FusedIterator;
+use core::ptr::{self, NonNull};
+use core::{fmt, mem};
+
+use crate::alloc::{Allocator, Global};
+
+use super::{count, Iter, VecDeque};
+
+/// A draining iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`drain`] method on [`VecDeque`]. See its
+/// documentation for more.
+///
+/// [`drain`]: VecDeque::drain
+#[stable(feature = "drain", since = "1.6.0")]
+pub struct Drain<
+ 'a,
+ T: 'a,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
+ after_tail: usize,
+ after_head: usize,
+ iter: Iter<'a, T>,
+ deque: NonNull<VecDeque<T, A>>,
+}
+
+impl<'a, T, A: Allocator> Drain<'a, T, A> {
+ pub(super) unsafe fn new(
+ after_tail: usize,
+ after_head: usize,
+ iter: Iter<'a, T>,
+ deque: NonNull<VecDeque<T, A>>,
+ ) -> Self {
+ Drain { after_tail, after_head, iter, deque }
+ }
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Drain")
+ .field(&self.after_tail)
+ .field(&self.after_head)
+ .field(&self.iter)
+ .finish()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl<T: Sync, A: Allocator + Sync> Sync for Drain<'_, T, A> {}
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl<T: Send, A: Allocator + Send> Send for Drain<'_, T, A> {}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> Drop for Drain<'_, T, A> {
+ fn drop(&mut self) {
+ struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
+
+ impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
+ fn drop(&mut self) {
+ self.0.for_each(drop);
+
+ let source_deque = unsafe { self.0.deque.as_mut() };
+
+ // T = source_deque_tail; H = source_deque_head; t = drain_tail; h = drain_head
+ //
+ // T t h H
+ // [. . . o o x x o o . . .]
+ //
+ let orig_tail = source_deque.tail;
+ let drain_tail = source_deque.head;
+ let drain_head = self.0.after_tail;
+ let orig_head = self.0.after_head;
+
+ let tail_len = count(orig_tail, drain_tail, source_deque.cap());
+ let head_len = count(drain_head, orig_head, source_deque.cap());
+
+ // Restore the original head value
+ source_deque.head = orig_head;
+
+ match (tail_len, head_len) {
+ (0, 0) => {
+ source_deque.head = 0;
+ source_deque.tail = 0;
+ }
+ (0, _) => {
+ source_deque.tail = drain_head;
+ }
+ (_, 0) => {
+ source_deque.head = drain_tail;
+ }
+ _ => unsafe {
+ if tail_len <= head_len {
+ source_deque.tail = source_deque.wrap_sub(drain_head, tail_len);
+ source_deque.wrap_copy(source_deque.tail, orig_tail, tail_len);
+ } else {
+ source_deque.head = source_deque.wrap_add(drain_tail, head_len);
+ source_deque.wrap_copy(drain_tail, drain_head, head_len);
+ }
+ },
+ }
+ }
+ }
+
+ while let Some(item) = self.next() {
+ let guard = DropGuard(self);
+ drop(item);
+ mem::forget(guard);
+ }
+
+ DropGuard(self);
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next().map(|elt| unsafe { ptr::read(elt) })
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back().map(|elt| unsafe { ptr::read(elt) })
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}
diff --git a/library/alloc/src/collections/vec_deque/into_iter.rs b/library/alloc/src/collections/vec_deque/into_iter.rs
new file mode 100644
index 000000000..55f6138cd
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/into_iter.rs
@@ -0,0 +1,72 @@
+use core::fmt;
+use core::iter::{FusedIterator, TrustedLen};
+
+use crate::alloc::{Allocator, Global};
+
+use super::VecDeque;
+
+/// An owning iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`into_iter`] method on [`VecDeque`]
+/// (provided by the [`IntoIterator`] trait). See its documentation for more.
+///
+/// [`into_iter`]: VecDeque::into_iter
+/// [`IntoIterator`]: core::iter::IntoIterator
+#[derive(Clone)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IntoIter<
+ T,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
+ inner: VecDeque<T, A>,
+}
+
+impl<T, A: Allocator> IntoIter<T, A> {
+ pub(super) fn new(inner: VecDeque<T, A>) -> Self {
+ IntoIter { inner }
+ }
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.inner).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Iterator for IntoIter<T, A> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.inner.pop_front()
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = self.inner.len();
+ (len, Some(len))
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.inner.pop_back()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
+ fn is_empty(&self) -> bool {
+ self.inner.is_empty()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
diff --git a/library/alloc/src/collections/vec_deque/iter.rs b/library/alloc/src/collections/vec_deque/iter.rs
new file mode 100644
index 000000000..e696d7ed6
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/iter.rs
@@ -0,0 +1,219 @@
+use core::fmt;
+use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
+use core::mem::MaybeUninit;
+use core::ops::Try;
+
+use super::{count, wrap_index, RingSlices};
+
+/// An iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`iter`] method on [`super::VecDeque`]. See its
+/// documentation for more.
+///
+/// [`iter`]: super::VecDeque::iter
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Iter<'a, T: 'a> {
+ ring: &'a [MaybeUninit<T>],
+ tail: usize,
+ head: usize,
+}
+
+impl<'a, T> Iter<'a, T> {
+ pub(super) fn new(ring: &'a [MaybeUninit<T>], tail: usize, head: usize) -> Self {
+ Iter { ring, tail, head }
+ }
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for Iter<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ f.debug_tuple("Iter")
+ .field(&MaybeUninit::slice_assume_init_ref(front))
+ .field(&MaybeUninit::slice_assume_init_ref(back))
+ .finish()
+ }
+ }
+}
+
+// FIXME(#26925) Remove in favor of `#[derive(Clone)]`
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Clone for Iter<'_, T> {
+ fn clone(&self) -> Self {
+ Iter { ring: self.ring, tail: self.tail, head: self.head }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for Iter<'a, T> {
+ type Item = &'a T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a T> {
+ if self.tail == self.head {
+ return None;
+ }
+ let tail = self.tail;
+ self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+ // Safety:
+ // - `self.tail` in a ring buffer is always a valid index.
+ // - `self.head` and `self.tail` equality is checked above.
+ unsafe { Some(self.ring.get_unchecked(tail).assume_init_ref()) }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = count(self.tail, self.head, self.ring.len());
+ (len, Some(len))
+ }
+
+ fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ accum = MaybeUninit::slice_assume_init_ref(front).iter().fold(accum, &mut f);
+ MaybeUninit::slice_assume_init_ref(back).iter().fold(accum, &mut f)
+ }
+ }
+
+ fn try_fold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let (mut iter, final_res);
+ if self.tail <= self.head {
+ // Safety: single slice self.ring[self.tail..self.head] is initialized.
+ iter = unsafe { MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]) }
+ .iter();
+ final_res = iter.try_fold(init, &mut f);
+ } else {
+ // Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
+ let (front, back) = self.ring.split_at(self.tail);
+
+ let mut back_iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
+ let res = back_iter.try_fold(init, &mut f);
+ let len = self.ring.len();
+ self.tail = (self.ring.len() - back_iter.len()) & (len - 1);
+ iter = unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
+ final_res = iter.try_fold(res?, &mut f);
+ }
+ self.tail = self.head - iter.len();
+ final_res
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ if n >= count(self.tail, self.head, self.ring.len()) {
+ self.tail = self.head;
+ None
+ } else {
+ self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
+ self.next()
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a T> {
+ self.next_back()
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ // Safety: The TrustedRandomAccess contract requires that callers only pass an index
+ // that is in bounds.
+ unsafe {
+ let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len());
+ self.ring.get_unchecked(idx).assume_init_ref()
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for Iter<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a T> {
+ if self.tail == self.head {
+ return None;
+ }
+ self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+ // Safety:
+ // - `self.head` in a ring buffer is always a valid index.
+ // - `self.head` and `self.tail` equality is checked above.
+ unsafe { Some(self.ring.get_unchecked(self.head).assume_init_ref()) }
+ }
+
+ fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ accum = MaybeUninit::slice_assume_init_ref(back).iter().rfold(accum, &mut f);
+ MaybeUninit::slice_assume_init_ref(front).iter().rfold(accum, &mut f)
+ }
+ }
+
+ fn try_rfold<B, F, R>(&mut self, init: B, mut f: F) -> R
+ where
+ Self: Sized,
+ F: FnMut(B, Self::Item) -> R,
+ R: Try<Output = B>,
+ {
+ let (mut iter, final_res);
+ if self.tail <= self.head {
+ // Safety: single slice self.ring[self.tail..self.head] is initialized.
+ iter = unsafe {
+ MaybeUninit::slice_assume_init_ref(&self.ring[self.tail..self.head]).iter()
+ };
+ final_res = iter.try_rfold(init, &mut f);
+ } else {
+ // Safety: two slices: self.ring[self.tail..], self.ring[..self.head] both are initialized.
+ let (front, back) = self.ring.split_at(self.tail);
+
+ let mut front_iter =
+ unsafe { MaybeUninit::slice_assume_init_ref(&front[..self.head]).iter() };
+ let res = front_iter.try_rfold(init, &mut f);
+ self.head = front_iter.len();
+ iter = unsafe { MaybeUninit::slice_assume_init_ref(back).iter() };
+ final_res = iter.try_rfold(res?, &mut f);
+ }
+ self.head = self.tail + iter.len();
+ final_res
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for Iter<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.head == self.tail
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for Iter<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for Iter<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<T> TrustedRandomAccess for Iter<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<T> TrustedRandomAccessNoCoerce for Iter<'_, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
diff --git a/library/alloc/src/collections/vec_deque/iter_mut.rs b/library/alloc/src/collections/vec_deque/iter_mut.rs
new file mode 100644
index 000000000..b78c0d5e1
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/iter_mut.rs
@@ -0,0 +1,162 @@
+use core::fmt;
+use core::iter::{FusedIterator, TrustedLen, TrustedRandomAccess, TrustedRandomAccessNoCoerce};
+use core::marker::PhantomData;
+
+use super::{count, wrap_index, RingSlices};
+
+/// A mutable iterator over the elements of a `VecDeque`.
+///
+/// This `struct` is created by the [`iter_mut`] method on [`super::VecDeque`]. See its
+/// documentation for more.
+///
+/// [`iter_mut`]: super::VecDeque::iter_mut
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct IterMut<'a, T: 'a> {
+ // Internal safety invariant: the entire slice is dereferenceable.
+ ring: *mut [T],
+ tail: usize,
+ head: usize,
+ phantom: PhantomData<&'a mut [T]>,
+}
+
+impl<'a, T> IterMut<'a, T> {
+ pub(super) unsafe fn new(
+ ring: *mut [T],
+ tail: usize,
+ head: usize,
+ phantom: PhantomData<&'a mut [T]>,
+ ) -> Self {
+ IterMut { ring, tail, head, phantom }
+ }
+}
+
+// SAFETY: we do nothing thread-local and there is no interior mutability,
+// so the usual structural `Send`/`Sync` apply.
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send> Send for IterMut<'_, T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync> Sync for IterMut<'_, T> {}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug> fmt::Debug for IterMut<'_, T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+ // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
+ // The `IterMut` invariant also ensures everything is dereferenceable.
+ let (front, back) = unsafe { (&*front, &*back) };
+ f.debug_tuple("IterMut").field(&front).field(&back).finish()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> Iterator for IterMut<'a, T> {
+ type Item = &'a mut T;
+
+ #[inline]
+ fn next(&mut self) -> Option<&'a mut T> {
+ if self.tail == self.head {
+ return None;
+ }
+ let tail = self.tail;
+ self.tail = wrap_index(self.tail.wrapping_add(1), self.ring.len());
+
+ unsafe {
+ let elem = self.ring.get_unchecked_mut(tail);
+ Some(&mut *elem)
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let len = count(self.tail, self.head, self.ring.len());
+ (len, Some(len))
+ }
+
+ fn fold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+ // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
+ // The `IterMut` invariant also ensures everything is dereferenceable.
+ let (front, back) = unsafe { (&mut *front, &mut *back) };
+ accum = front.iter_mut().fold(accum, &mut f);
+ back.iter_mut().fold(accum, &mut f)
+ }
+
+ fn nth(&mut self, n: usize) -> Option<Self::Item> {
+ if n >= count(self.tail, self.head, self.ring.len()) {
+ self.tail = self.head;
+ None
+ } else {
+ self.tail = wrap_index(self.tail.wrapping_add(n), self.ring.len());
+ self.next()
+ }
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<&'a mut T> {
+ self.next_back()
+ }
+
+ #[inline]
+ unsafe fn __iterator_get_unchecked(&mut self, idx: usize) -> Self::Item {
+ // Safety: The TrustedRandomAccess contract requires that callers only pass an index
+ // that is in bounds.
+ unsafe {
+ let idx = wrap_index(self.tail.wrapping_add(idx), self.ring.len());
+ &mut *self.ring.get_unchecked_mut(idx)
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> DoubleEndedIterator for IterMut<'a, T> {
+ #[inline]
+ fn next_back(&mut self) -> Option<&'a mut T> {
+ if self.tail == self.head {
+ return None;
+ }
+ self.head = wrap_index(self.head.wrapping_sub(1), self.ring.len());
+
+ unsafe {
+ let elem = self.ring.get_unchecked_mut(self.head);
+ Some(&mut *elem)
+ }
+ }
+
+ fn rfold<Acc, F>(self, mut accum: Acc, mut f: F) -> Acc
+ where
+ F: FnMut(Acc, Self::Item) -> Acc,
+ {
+ let (front, back) = RingSlices::ring_slices(self.ring, self.head, self.tail);
+ // SAFETY: these are the elements we have not handed out yet, so aliasing is fine.
+ // The `IterMut` invariant also ensures everything is dereferenceable.
+ let (front, back) = unsafe { (&mut *front, &mut *back) };
+ accum = back.iter_mut().rfold(accum, &mut f);
+ front.iter_mut().rfold(accum, &mut f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> ExactSizeIterator for IterMut<'_, T> {
+ fn is_empty(&self) -> bool {
+ self.head == self.tail
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T> FusedIterator for IterMut<'_, T> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T> TrustedLen for IterMut<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<T> TrustedRandomAccess for IterMut<'_, T> {}
+
+#[doc(hidden)]
+#[unstable(feature = "trusted_random_access", issue = "none")]
+unsafe impl<T> TrustedRandomAccessNoCoerce for IterMut<'_, T> {
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
diff --git a/library/alloc/src/collections/vec_deque/macros.rs b/library/alloc/src/collections/vec_deque/macros.rs
new file mode 100644
index 000000000..5c7913073
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/macros.rs
@@ -0,0 +1,19 @@
+macro_rules! __impl_slice_eq1 {
+ ([$($vars:tt)*] $lhs:ty, $rhs:ty, $($constraints:tt)*) => {
+ #[stable(feature = "vec_deque_partial_eq_slice", since = "1.17.0")]
+ impl<T, U, A: Allocator, $($vars)*> PartialEq<$rhs> for $lhs
+ where
+ T: PartialEq<U>,
+ $($constraints)*
+ {
+ fn eq(&self, other: &$rhs) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+ let (sa, sb) = self.as_slices();
+ let (oa, ob) = other[..].split_at(sa.len());
+ sa == oa && sb == ob
+ }
+ }
+ }
+}
diff --git a/library/alloc/src/collections/vec_deque/mod.rs b/library/alloc/src/collections/vec_deque/mod.rs
new file mode 100644
index 000000000..4d895d837
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/mod.rs
@@ -0,0 +1,3137 @@
+//! A double-ended queue (deque) implemented with a growable ring buffer.
+//!
+//! This queue has *O*(1) amortized inserts and removals from both ends of the
+//! container. It also has *O*(1) indexing like a vector. The contained elements
+//! are not required to be copyable, and the queue will be sendable if the
+//! contained type is sendable.
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+use core::cmp::{self, Ordering};
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::iter::{repeat_with, FromIterator};
+use core::marker::PhantomData;
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::ops::{Index, IndexMut, Range, RangeBounds};
+use core::ptr::{self, NonNull};
+use core::slice;
+
+use crate::alloc::{Allocator, Global};
+use crate::collections::TryReserveError;
+use crate::collections::TryReserveErrorKind;
+use crate::raw_vec::RawVec;
+use crate::vec::Vec;
+
+#[macro_use]
+mod macros;
+
+#[stable(feature = "drain", since = "1.6.0")]
+pub use self::drain::Drain;
+
+mod drain;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::iter_mut::IterMut;
+
+mod iter_mut;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::into_iter::IntoIter;
+
+mod into_iter;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::iter::Iter;
+
+mod iter;
+
+use self::pair_slices::PairSlices;
+
+mod pair_slices;
+
+use self::ring_slices::RingSlices;
+
+mod ring_slices;
+
+use self::spec_extend::SpecExtend;
+
+mod spec_extend;
+
+#[cfg(test)]
+mod tests;
+
+const INITIAL_CAPACITY: usize = 7; // 2^3 - 1
+const MINIMUM_CAPACITY: usize = 1; // 2 - 1
+
+const MAXIMUM_ZST_CAPACITY: usize = 1 << (usize::BITS - 1); // Largest possible power of two
+
+/// A double-ended queue implemented with a growable ring buffer.
+///
+/// The "default" usage of this type as a queue is to use [`push_back`] to add to
+/// the queue, and [`pop_front`] to remove from the queue. [`extend`] and [`append`]
+/// push onto the back in this manner, and iterating over `VecDeque` goes front
+/// to back.
+///
+/// A `VecDeque` with a known list of items can be initialized from an array:
+///
+/// ```
+/// use std::collections::VecDeque;
+///
+/// let deq = VecDeque::from([-1, 0, 1]);
+/// ```
+///
+/// Since `VecDeque` is a ring buffer, its elements are not necessarily contiguous
+/// in memory. If you want to access the elements as a single slice, such as for
+/// efficient sorting, you can use [`make_contiguous`]. It rotates the `VecDeque`
+/// so that its elements do not wrap, and returns a mutable slice to the
+/// now-contiguous element sequence.
+///
+/// [`push_back`]: VecDeque::push_back
+/// [`pop_front`]: VecDeque::pop_front
+/// [`extend`]: VecDeque::extend
+/// [`append`]: VecDeque::append
+/// [`make_contiguous`]: VecDeque::make_contiguous
+#[cfg_attr(not(test), rustc_diagnostic_item = "VecDeque")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_insignificant_dtor]
+pub struct VecDeque<
+ T,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
+ // tail and head are pointers into the buffer. Tail always points
+ // to the first element that could be read, Head always points
+ // to where data should be written.
+ // If tail == head the buffer is empty. The length of the ringbuffer
+ // is defined as the distance between the two.
+ tail: usize,
+ head: usize,
+ buf: RawVec<T, A>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for VecDeque<T, A> {
+ fn clone(&self) -> Self {
+ let mut deq = Self::with_capacity_in(self.len(), self.allocator().clone());
+ deq.extend(self.iter().cloned());
+ deq
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ self.truncate(other.len());
+
+ let mut iter = PairSlices::from(self, other);
+ while let Some((dst, src)) = iter.next() {
+ dst.clone_from_slice(&src);
+ }
+
+ if iter.has_remainder() {
+ for remainder in iter.remainder() {
+ self.extend(remainder.iter().cloned());
+ }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for VecDeque<T, A> {
+ fn drop(&mut self) {
+ /// Runs the destructor for all items in the slice when it gets dropped (normally or
+ /// during unwinding).
+ struct Dropper<'a, T>(&'a mut [T]);
+
+ impl<'a, T> Drop for Dropper<'a, T> {
+ fn drop(&mut self) {
+ unsafe {
+ ptr::drop_in_place(self.0);
+ }
+ }
+ }
+
+ let (front, back) = self.as_mut_slices();
+ unsafe {
+ let _back_dropper = Dropper(back);
+ // use drop for [T]
+ ptr::drop_in_place(front);
+ }
+ // RawVec handles deallocation
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> Default for VecDeque<T> {
+ /// Creates an empty deque.
+ #[inline]
+ fn default() -> VecDeque<T> {
+ VecDeque::new()
+ }
+}
+
+impl<T, A: Allocator> VecDeque<T, A> {
+ /// Marginally more convenient
+ #[inline]
+ fn ptr(&self) -> *mut T {
+ self.buf.ptr()
+ }
+
+ /// Marginally more convenient
+ #[inline]
+ fn cap(&self) -> usize {
+ if mem::size_of::<T>() == 0 {
+ // For zero sized types, we are always at maximum capacity
+ MAXIMUM_ZST_CAPACITY
+ } else {
+ self.buf.capacity()
+ }
+ }
+
+ /// Turn ptr into a slice, since the elements of the backing buffer may be uninitialized,
+ /// we will return a slice of [`MaybeUninit<T>`].
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[inline]
+ unsafe fn buffer_as_slice(&self) -> &[MaybeUninit<T>] {
+ unsafe { slice::from_raw_parts(self.ptr() as *mut MaybeUninit<T>, self.cap()) }
+ }
+
+ /// Turn ptr into a mut slice, since the elements of the backing buffer may be uninitialized,
+ /// we will return a slice of [`MaybeUninit<T>`].
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[inline]
+ unsafe fn buffer_as_mut_slice(&mut self) -> &mut [MaybeUninit<T>] {
+ unsafe { slice::from_raw_parts_mut(self.ptr() as *mut MaybeUninit<T>, self.cap()) }
+ }
+
+ /// Moves an element out of the buffer
+ #[inline]
+ unsafe fn buffer_read(&mut self, off: usize) -> T {
+ unsafe { ptr::read(self.ptr().add(off)) }
+ }
+
+ /// Writes an element into the buffer, moving it.
+ #[inline]
+ unsafe fn buffer_write(&mut self, off: usize, value: T) {
+ unsafe {
+ ptr::write(self.ptr().add(off), value);
+ }
+ }
+
+ /// Returns `true` if the buffer is at full capacity.
+ #[inline]
+ fn is_full(&self) -> bool {
+ self.cap() - self.len() == 1
+ }
+
+ /// Returns the index in the underlying buffer for a given logical element
+ /// index.
+ #[inline]
+ fn wrap_index(&self, idx: usize) -> usize {
+ wrap_index(idx, self.cap())
+ }
+
+ /// Returns the index in the underlying buffer for a given logical element
+ /// index + addend.
+ #[inline]
+ fn wrap_add(&self, idx: usize, addend: usize) -> usize {
+ wrap_index(idx.wrapping_add(addend), self.cap())
+ }
+
+ /// Returns the index in the underlying buffer for a given logical element
+ /// index - subtrahend.
+ #[inline]
+ fn wrap_sub(&self, idx: usize, subtrahend: usize) -> usize {
+ wrap_index(idx.wrapping_sub(subtrahend), self.cap())
+ }
+
+ /// Copies a contiguous block of memory len long from src to dst
+ #[inline]
+ unsafe fn copy(&self, dst: usize, src: usize, len: usize) {
+ debug_assert!(
+ dst + len <= self.cap(),
+ "cpy dst={} src={} len={} cap={}",
+ dst,
+ src,
+ len,
+ self.cap()
+ );
+ debug_assert!(
+ src + len <= self.cap(),
+ "cpy dst={} src={} len={} cap={}",
+ dst,
+ src,
+ len,
+ self.cap()
+ );
+ unsafe {
+ ptr::copy(self.ptr().add(src), self.ptr().add(dst), len);
+ }
+ }
+
+ /// Copies a contiguous block of memory len long from src to dst
+ #[inline]
+ unsafe fn copy_nonoverlapping(&self, dst: usize, src: usize, len: usize) {
+ debug_assert!(
+ dst + len <= self.cap(),
+ "cno dst={} src={} len={} cap={}",
+ dst,
+ src,
+ len,
+ self.cap()
+ );
+ debug_assert!(
+ src + len <= self.cap(),
+ "cno dst={} src={} len={} cap={}",
+ dst,
+ src,
+ len,
+ self.cap()
+ );
+ unsafe {
+ ptr::copy_nonoverlapping(self.ptr().add(src), self.ptr().add(dst), len);
+ }
+ }
+
+ /// Copies a potentially wrapping block of memory len long from src to dest.
+ /// (abs(dst - src) + len) must be no larger than cap() (There must be at
+ /// most one continuous overlapping region between src and dest).
+ unsafe fn wrap_copy(&self, dst: usize, src: usize, len: usize) {
+ #[allow(dead_code)]
+ fn diff(a: usize, b: usize) -> usize {
+ if a <= b { b - a } else { a - b }
+ }
+ debug_assert!(
+ cmp::min(diff(dst, src), self.cap() - diff(dst, src)) + len <= self.cap(),
+ "wrc dst={} src={} len={} cap={}",
+ dst,
+ src,
+ len,
+ self.cap()
+ );
+
+ if src == dst || len == 0 {
+ return;
+ }
+
+ let dst_after_src = self.wrap_sub(dst, src) < len;
+
+ let src_pre_wrap_len = self.cap() - src;
+ let dst_pre_wrap_len = self.cap() - dst;
+ let src_wraps = src_pre_wrap_len < len;
+ let dst_wraps = dst_pre_wrap_len < len;
+
+ match (dst_after_src, src_wraps, dst_wraps) {
+ (_, false, false) => {
+ // src doesn't wrap, dst doesn't wrap
+ //
+ // S . . .
+ // 1 [_ _ A A B B C C _]
+ // 2 [_ _ A A A A B B _]
+ // D . . .
+ //
+ unsafe {
+ self.copy(dst, src, len);
+ }
+ }
+ (false, false, true) => {
+ // dst before src, src doesn't wrap, dst wraps
+ //
+ // S . . .
+ // 1 [A A B B _ _ _ C C]
+ // 2 [A A B B _ _ _ A A]
+ // 3 [B B B B _ _ _ A A]
+ // . . D .
+ //
+ unsafe {
+ self.copy(dst, src, dst_pre_wrap_len);
+ self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
+ }
+ }
+ (true, false, true) => {
+ // src before dst, src doesn't wrap, dst wraps
+ //
+ // S . . .
+ // 1 [C C _ _ _ A A B B]
+ // 2 [B B _ _ _ A A B B]
+ // 3 [B B _ _ _ A A A A]
+ // . . D .
+ //
+ unsafe {
+ self.copy(0, src + dst_pre_wrap_len, len - dst_pre_wrap_len);
+ self.copy(dst, src, dst_pre_wrap_len);
+ }
+ }
+ (false, true, false) => {
+ // dst before src, src wraps, dst doesn't wrap
+ //
+ // . . S .
+ // 1 [C C _ _ _ A A B B]
+ // 2 [C C _ _ _ B B B B]
+ // 3 [C C _ _ _ B B C C]
+ // D . . .
+ //
+ unsafe {
+ self.copy(dst, src, src_pre_wrap_len);
+ self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
+ }
+ }
+ (true, true, false) => {
+ // src before dst, src wraps, dst doesn't wrap
+ //
+ // . . S .
+ // 1 [A A B B _ _ _ C C]
+ // 2 [A A A A _ _ _ C C]
+ // 3 [C C A A _ _ _ C C]
+ // D . . .
+ //
+ unsafe {
+ self.copy(dst + src_pre_wrap_len, 0, len - src_pre_wrap_len);
+ self.copy(dst, src, src_pre_wrap_len);
+ }
+ }
+ (false, true, true) => {
+ // dst before src, src wraps, dst wraps
+ //
+ // . . . S .
+ // 1 [A B C D _ E F G H]
+ // 2 [A B C D _ E G H H]
+ // 3 [A B C D _ E G H A]
+ // 4 [B C C D _ E G H A]
+ // . . D . .
+ //
+ debug_assert!(dst_pre_wrap_len > src_pre_wrap_len);
+ let delta = dst_pre_wrap_len - src_pre_wrap_len;
+ unsafe {
+ self.copy(dst, src, src_pre_wrap_len);
+ self.copy(dst + src_pre_wrap_len, 0, delta);
+ self.copy(0, delta, len - dst_pre_wrap_len);
+ }
+ }
+ (true, true, true) => {
+ // src before dst, src wraps, dst wraps
+ //
+ // . . S . .
+ // 1 [A B C D _ E F G H]
+ // 2 [A A B D _ E F G H]
+ // 3 [H A B D _ E F G H]
+ // 4 [H A B D _ E F F G]
+ // . . . D .
+ //
+ debug_assert!(src_pre_wrap_len > dst_pre_wrap_len);
+ let delta = src_pre_wrap_len - dst_pre_wrap_len;
+ unsafe {
+ self.copy(delta, 0, len - src_pre_wrap_len);
+ self.copy(0, self.cap() - delta, delta);
+ self.copy(dst, src, dst_pre_wrap_len);
+ }
+ }
+ }
+ }
+
+ /// Copies all values from `src` to `dst`, wrapping around if needed.
+ /// Assumes capacity is sufficient.
+ #[inline]
+ unsafe fn copy_slice(&mut self, dst: usize, src: &[T]) {
+ debug_assert!(src.len() <= self.cap());
+ let head_room = self.cap() - dst;
+ if src.len() <= head_room {
+ unsafe {
+ ptr::copy_nonoverlapping(src.as_ptr(), self.ptr().add(dst), src.len());
+ }
+ } else {
+ let (left, right) = src.split_at(head_room);
+ unsafe {
+ ptr::copy_nonoverlapping(left.as_ptr(), self.ptr().add(dst), left.len());
+ ptr::copy_nonoverlapping(right.as_ptr(), self.ptr(), right.len());
+ }
+ }
+ }
+
+ /// Writes all values from `iter` to `dst`.
+ ///
+ /// # Safety
+ ///
+ /// Assumes no wrapping around happens.
+ /// Assumes capacity is sufficient.
+ #[inline]
+ unsafe fn write_iter(
+ &mut self,
+ dst: usize,
+ iter: impl Iterator<Item = T>,
+ written: &mut usize,
+ ) {
+ iter.enumerate().for_each(|(i, element)| unsafe {
+ self.buffer_write(dst + i, element);
+ *written += 1;
+ });
+ }
+
+ /// Frobs the head and tail sections around to handle the fact that we
+ /// just reallocated. Unsafe because it trusts old_capacity.
+ #[inline]
+ unsafe fn handle_capacity_increase(&mut self, old_capacity: usize) {
+ let new_capacity = self.cap();
+
+ // Move the shortest contiguous section of the ring buffer
+ // T H
+ // [o o o o o o o . ]
+ // T H
+ // A [o o o o o o o . . . . . . . . . ]
+ // H T
+ // [o o . o o o o o ]
+ // T H
+ // B [. . . o o o o o o o . . . . . . ]
+ // H T
+ // [o o o o o . o o ]
+ // H T
+ // C [o o o o o . . . . . . . . . o o ]
+
+ if self.tail <= self.head {
+ // A
+ // Nop
+ } else if self.head < old_capacity - self.tail {
+ // B
+ unsafe {
+ self.copy_nonoverlapping(old_capacity, 0, self.head);
+ }
+ self.head += old_capacity;
+ debug_assert!(self.head > self.tail);
+ } else {
+ // C
+ let new_tail = new_capacity - (old_capacity - self.tail);
+ unsafe {
+ self.copy_nonoverlapping(new_tail, self.tail, old_capacity - self.tail);
+ }
+ self.tail = new_tail;
+ debug_assert!(self.head < self.tail);
+ }
+ debug_assert!(self.head < self.cap());
+ debug_assert!(self.tail < self.cap());
+ debug_assert!(self.cap().count_ones() == 1);
+ }
+}
+
+impl<T> VecDeque<T> {
+ /// Creates an empty deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<u32> = VecDeque::new();
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn new() -> VecDeque<T> {
+ VecDeque::new_in(Global)
+ }
+
+ /// Creates an empty deque with space for at least `capacity` elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<u32> = VecDeque::with_capacity(10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn with_capacity(capacity: usize) -> VecDeque<T> {
+ Self::with_capacity_in(capacity, Global)
+ }
+}
+
+impl<T, A: Allocator> VecDeque<T, A> {
+ /// Creates an empty deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<u32> = VecDeque::new();
+ /// ```
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn new_in(alloc: A) -> VecDeque<T, A> {
+ VecDeque::with_capacity_in(INITIAL_CAPACITY, alloc)
+ }
+
+ /// Creates an empty deque with space for at least `capacity` elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<u32> = VecDeque::with_capacity(10);
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> VecDeque<T, A> {
+ assert!(capacity < 1_usize << usize::BITS - 1, "capacity overflow");
+ // +1 since the ringbuffer always leaves one space empty
+ let cap = cmp::max(capacity + 1, MINIMUM_CAPACITY + 1).next_power_of_two();
+
+ VecDeque { tail: 0, head: 0, buf: RawVec::with_capacity_in(cap, alloc) }
+ }
+
+ /// Provides a reference to the element at the given index.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(3);
+ /// buf.push_back(4);
+ /// buf.push_back(5);
+ /// assert_eq!(buf.get(1), Some(&4));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get(&self, index: usize) -> Option<&T> {
+ if index < self.len() {
+ let idx = self.wrap_add(self.tail, index);
+ unsafe { Some(&*self.ptr().add(idx)) }
+ } else {
+ None
+ }
+ }
+
+ /// Provides a mutable reference to the element at the given index.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(3);
+ /// buf.push_back(4);
+ /// buf.push_back(5);
+ /// if let Some(elem) = buf.get_mut(1) {
+ /// *elem = 7;
+ /// }
+ ///
+ /// assert_eq!(buf[1], 7);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn get_mut(&mut self, index: usize) -> Option<&mut T> {
+ if index < self.len() {
+ let idx = self.wrap_add(self.tail, index);
+ unsafe { Some(&mut *self.ptr().add(idx)) }
+ } else {
+ None
+ }
+ }
+
+ /// Swaps elements at indices `i` and `j`.
+ ///
+ /// `i` and `j` may be equal.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Panics
+ ///
+ /// Panics if either index is out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(3);
+ /// buf.push_back(4);
+ /// buf.push_back(5);
+ /// assert_eq!(buf, [3, 4, 5]);
+ /// buf.swap(0, 2);
+ /// assert_eq!(buf, [5, 4, 3]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn swap(&mut self, i: usize, j: usize) {
+ assert!(i < self.len());
+ assert!(j < self.len());
+ let ri = self.wrap_add(self.tail, i);
+ let rj = self.wrap_add(self.tail, j);
+ unsafe { ptr::swap(self.ptr().add(ri), self.ptr().add(rj)) }
+ }
+
+ /// Returns the number of elements the deque can hold without
+ /// reallocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let buf: VecDeque<i32> = VecDeque::with_capacity(10);
+ /// assert!(buf.capacity() >= 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.cap() - 1
+ }
+
+ /// Reserves the minimum capacity for at least `additional` more elements to be inserted in the
+ /// given deque. Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it requests. Therefore
+ /// capacity can not be relied upon to be precisely minimal. Prefer [`reserve`] if future
+ /// insertions are expected.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows `usize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf: VecDeque<i32> = [1].into();
+ /// buf.reserve_exact(10);
+ /// assert!(buf.capacity() >= 11);
+ /// ```
+ ///
+ /// [`reserve`]: VecDeque::reserve
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+
+ /// Reserves capacity for at least `additional` more elements to be inserted in the given
+ /// deque. The collection may reserve more space to speculatively avoid frequent reallocations.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows `usize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf: VecDeque<i32> = [1].into();
+ /// buf.reserve(10);
+ /// assert!(buf.capacity() >= 11);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ let old_cap = self.cap();
+ let used_cap = self.len() + 1;
+ let new_cap = used_cap
+ .checked_add(additional)
+ .and_then(|needed_cap| needed_cap.checked_next_power_of_two())
+ .expect("capacity overflow");
+
+ if new_cap > old_cap {
+ self.buf.reserve_exact(used_cap, new_cap - used_cap);
+ unsafe {
+ self.handle_capacity_increase(old_cap);
+ }
+ }
+ }
+
+ /// Tries to reserve the minimum capacity for at least `additional` more elements to
+ /// be inserted in the given deque. After calling `try_reserve_exact`,
+ /// capacity will be greater than or equal to `self.len() + additional` if
+ /// it returns `Ok(())`. Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: VecDeque::try_reserve
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows `usize`, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ /// use std::collections::VecDeque;
+ ///
+ /// fn process_data(data: &[u32]) -> Result<VecDeque<u32>, TryReserveError> {
+ /// let mut output = VecDeque::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve_exact(data.len())?;
+ ///
+ /// // Now we know this can't OOM(Out-Of-Memory) in the middle of our complex work
+ /// output.extend(data.iter().map(|&val| {
+ /// val * 2 + 5 // very complicated
+ /// }));
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.try_reserve(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` more elements to be inserted
+ /// in the given deque. The collection may reserve more space to speculatively avoid
+ /// frequent reallocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional` if it returns
+ /// `Ok(())`. Does nothing if capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows `usize`, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ /// use std::collections::VecDeque;
+ ///
+ /// fn process_data(data: &[u32]) -> Result<VecDeque<u32>, TryReserveError> {
+ /// let mut output = VecDeque::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.extend(data.iter().map(|&val| {
+ /// val * 2 + 5 // very complicated
+ /// }));
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ let old_cap = self.cap();
+ let used_cap = self.len() + 1;
+ let new_cap = used_cap
+ .checked_add(additional)
+ .and_then(|needed_cap| needed_cap.checked_next_power_of_two())
+ .ok_or(TryReserveErrorKind::CapacityOverflow)?;
+
+ if new_cap > old_cap {
+ self.buf.try_reserve_exact(used_cap, new_cap - used_cap)?;
+ unsafe {
+ self.handle_capacity_increase(old_cap);
+ }
+ }
+ Ok(())
+ }
+
+ /// Shrinks the capacity of the deque as much as possible.
+ ///
+ /// It will drop down as close as possible to the length but the allocator may still inform the
+ /// deque that there is space for a few more elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::with_capacity(15);
+ /// buf.extend(0..4);
+ /// assert_eq!(buf.capacity(), 15);
+ /// buf.shrink_to_fit();
+ /// assert!(buf.capacity() >= 4);
+ /// ```
+ #[stable(feature = "deque_extras_15", since = "1.5.0")]
+ pub fn shrink_to_fit(&mut self) {
+ self.shrink_to(0);
+ }
+
+ /// Shrinks the capacity of the deque with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::with_capacity(15);
+ /// buf.extend(0..4);
+ /// assert_eq!(buf.capacity(), 15);
+ /// buf.shrink_to(6);
+ /// assert!(buf.capacity() >= 6);
+ /// buf.shrink_to(0);
+ /// assert!(buf.capacity() >= 4);
+ /// ```
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ let min_capacity = cmp::min(min_capacity, self.capacity());
+ // We don't have to worry about an overflow as neither `self.len()` nor `self.capacity()`
+ // can ever be `usize::MAX`. +1 as the ringbuffer always leaves one space empty.
+ let target_cap = cmp::max(cmp::max(min_capacity, self.len()) + 1, MINIMUM_CAPACITY + 1)
+ .next_power_of_two();
+
+ if target_cap < self.cap() {
+ // There are three cases of interest:
+ // All elements are out of desired bounds
+ // Elements are contiguous, and head is out of desired bounds
+ // Elements are discontiguous, and tail is out of desired bounds
+ //
+ // At all other times, element positions are unaffected.
+ //
+ // Indicates that elements at the head should be moved.
+ let head_outside = self.head == 0 || self.head >= target_cap;
+ // Move elements from out of desired bounds (positions after target_cap)
+ if self.tail >= target_cap && head_outside {
+ // T H
+ // [. . . . . . . . o o o o o o o . ]
+ // T H
+ // [o o o o o o o . ]
+ unsafe {
+ self.copy_nonoverlapping(0, self.tail, self.len());
+ }
+ self.head = self.len();
+ self.tail = 0;
+ } else if self.tail != 0 && self.tail < target_cap && head_outside {
+ // T H
+ // [. . . o o o o o o o . . . . . . ]
+ // H T
+ // [o o . o o o o o ]
+ let len = self.wrap_sub(self.head, target_cap);
+ unsafe {
+ self.copy_nonoverlapping(0, target_cap, len);
+ }
+ self.head = len;
+ debug_assert!(self.head < self.tail);
+ } else if self.tail >= target_cap {
+ // H T
+ // [o o o o o . . . . . . . . . o o ]
+ // H T
+ // [o o o o o . o o ]
+ debug_assert!(self.wrap_sub(self.head, 1) < target_cap);
+ let len = self.cap() - self.tail;
+ let new_tail = target_cap - len;
+ unsafe {
+ self.copy_nonoverlapping(new_tail, self.tail, len);
+ }
+ self.tail = new_tail;
+ debug_assert!(self.head < self.tail);
+ }
+
+ self.buf.shrink_to_fit(target_cap);
+
+ debug_assert!(self.head < self.cap());
+ debug_assert!(self.tail < self.cap());
+ debug_assert!(self.cap().count_ones() == 1);
+ }
+ }
+
+ /// Shortens the deque, keeping the first `len` elements and dropping
+ /// the rest.
+ ///
+ /// If `len` is greater than the deque's current length, this has no
+ /// effect.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(5);
+ /// buf.push_back(10);
+ /// buf.push_back(15);
+ /// assert_eq!(buf, [5, 10, 15]);
+ /// buf.truncate(1);
+ /// assert_eq!(buf, [5]);
+ /// ```
+ #[stable(feature = "deque_extras", since = "1.16.0")]
+ pub fn truncate(&mut self, len: usize) {
+ /// Runs the destructor for all items in the slice when it gets dropped (normally or
+ /// during unwinding).
+ struct Dropper<'a, T>(&'a mut [T]);
+
+ impl<'a, T> Drop for Dropper<'a, T> {
+ fn drop(&mut self) {
+ unsafe {
+ ptr::drop_in_place(self.0);
+ }
+ }
+ }
+
+ // Safe because:
+ //
+ // * Any slice passed to `drop_in_place` is valid; the second case has
+ // `len <= front.len()` and returning on `len > self.len()` ensures
+ // `begin <= back.len()` in the first case
+ // * The head of the VecDeque is moved before calling `drop_in_place`,
+ // so no value is dropped twice if `drop_in_place` panics
+ unsafe {
+ if len > self.len() {
+ return;
+ }
+ let num_dropped = self.len() - len;
+ let (front, back) = self.as_mut_slices();
+ if len > front.len() {
+ let begin = len - front.len();
+ let drop_back = back.get_unchecked_mut(begin..) as *mut _;
+ self.head = self.wrap_sub(self.head, num_dropped);
+ ptr::drop_in_place(drop_back);
+ } else {
+ let drop_back = back as *mut _;
+ let drop_front = front.get_unchecked_mut(len..) as *mut _;
+ self.head = self.wrap_sub(self.head, num_dropped);
+
+ // Make sure the second half is dropped even when a destructor
+ // in the first one panics.
+ let _back_dropper = Dropper(&mut *drop_back);
+ ptr::drop_in_place(drop_front);
+ }
+ }
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ self.buf.allocator()
+ }
+
+ /// Returns a front-to-back iterator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(5);
+ /// buf.push_back(3);
+ /// buf.push_back(4);
+ /// let b: &[_] = &[&5, &3, &4];
+ /// let c: Vec<&i32> = buf.iter().collect();
+ /// assert_eq!(&c[..], b);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter(&self) -> Iter<'_, T> {
+ Iter::new(unsafe { self.buffer_as_slice() }, self.tail, self.head)
+ }
+
+ /// Returns a front-to-back iterator that returns mutable references.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(5);
+ /// buf.push_back(3);
+ /// buf.push_back(4);
+ /// for num in buf.iter_mut() {
+ /// *num = *num - 2;
+ /// }
+ /// let b: &[_] = &[&mut 3, &mut 1, &mut 2];
+ /// assert_eq!(&buf.iter_mut().collect::<Vec<&mut i32>>()[..], b);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn iter_mut(&mut self) -> IterMut<'_, T> {
+ // SAFETY: The internal `IterMut` safety invariant is established because the
+ // `ring` we create is a dereferenceable slice for lifetime '_.
+ let ring = ptr::slice_from_raw_parts_mut(self.ptr(), self.cap());
+
+ unsafe { IterMut::new(ring, self.tail, self.head, PhantomData) }
+ }
+
+ /// Returns a pair of slices which contain, in order, the contents of the
+ /// deque.
+ ///
+ /// If [`make_contiguous`] was previously called, all elements of the
+ /// deque will be in the first slice and the second slice will be empty.
+ ///
+ /// [`make_contiguous`]: VecDeque::make_contiguous
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque = VecDeque::new();
+ ///
+ /// deque.push_back(0);
+ /// deque.push_back(1);
+ /// deque.push_back(2);
+ ///
+ /// assert_eq!(deque.as_slices(), (&[0, 1, 2][..], &[][..]));
+ ///
+ /// deque.push_front(10);
+ /// deque.push_front(9);
+ ///
+ /// assert_eq!(deque.as_slices(), (&[9, 10][..], &[0, 1, 2][..]));
+ /// ```
+ #[inline]
+ #[stable(feature = "deque_extras_15", since = "1.5.0")]
+ pub fn as_slices(&self) -> (&[T], &[T]) {
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ let buf = self.buffer_as_slice();
+ let (front, back) = RingSlices::ring_slices(buf, self.head, self.tail);
+ (MaybeUninit::slice_assume_init_ref(front), MaybeUninit::slice_assume_init_ref(back))
+ }
+ }
+
+ /// Returns a pair of slices which contain, in order, the contents of the
+ /// deque.
+ ///
+ /// If [`make_contiguous`] was previously called, all elements of the
+ /// deque will be in the first slice and the second slice will be empty.
+ ///
+ /// [`make_contiguous`]: VecDeque::make_contiguous
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque = VecDeque::new();
+ ///
+ /// deque.push_back(0);
+ /// deque.push_back(1);
+ ///
+ /// deque.push_front(10);
+ /// deque.push_front(9);
+ ///
+ /// deque.as_mut_slices().0[0] = 42;
+ /// deque.as_mut_slices().1[0] = 24;
+ /// assert_eq!(deque.as_slices(), (&[42, 10][..], &[24, 1][..]));
+ /// ```
+ #[inline]
+ #[stable(feature = "deque_extras_15", since = "1.5.0")]
+ pub fn as_mut_slices(&mut self) -> (&mut [T], &mut [T]) {
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ let head = self.head;
+ let tail = self.tail;
+ let buf = self.buffer_as_mut_slice();
+ let (front, back) = RingSlices::ring_slices(buf, head, tail);
+ (MaybeUninit::slice_assume_init_mut(front), MaybeUninit::slice_assume_init_mut(back))
+ }
+ }
+
+ /// Returns the number of elements in the deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque = VecDeque::new();
+ /// assert_eq!(deque.len(), 0);
+ /// deque.push_back(1);
+ /// assert_eq!(deque.len(), 1);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ count(self.tail, self.head, self.cap())
+ }
+
+ /// Returns `true` if the deque is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque = VecDeque::new();
+ /// assert!(deque.is_empty());
+ /// deque.push_front(1);
+ /// assert!(!deque.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.tail == self.head
+ }
+
+ fn range_tail_head<R>(&self, range: R) -> (usize, usize)
+ where
+ R: RangeBounds<usize>,
+ {
+ let Range { start, end } = slice::range(range, ..self.len());
+ let tail = self.wrap_add(self.tail, start);
+ let head = self.wrap_add(self.tail, end);
+ (tail, head)
+ }
+
+ /// Creates an iterator that covers the specified range in the deque.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<_> = [1, 2, 3].into();
+ /// let range = deque.range(2..).copied().collect::<VecDeque<_>>();
+ /// assert_eq!(range, [3]);
+ ///
+ /// // A full range covers all contents
+ /// let all = deque.range(..);
+ /// assert_eq!(all.len(), 3);
+ /// ```
+ #[inline]
+ #[stable(feature = "deque_range", since = "1.51.0")]
+ pub fn range<R>(&self, range: R) -> Iter<'_, T>
+ where
+ R: RangeBounds<usize>,
+ {
+ let (tail, head) = self.range_tail_head(range);
+ // The shared reference we have in &self is maintained in the '_ of Iter.
+ Iter::new(unsafe { self.buffer_as_slice() }, tail, head)
+ }
+
+ /// Creates an iterator that covers the specified mutable range in the deque.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque: VecDeque<_> = [1, 2, 3].into();
+ /// for v in deque.range_mut(2..) {
+ /// *v *= 2;
+ /// }
+ /// assert_eq!(deque, [1, 2, 6]);
+ ///
+ /// // A full range covers all contents
+ /// for v in deque.range_mut(..) {
+ /// *v *= 2;
+ /// }
+ /// assert_eq!(deque, [2, 4, 12]);
+ /// ```
+ #[inline]
+ #[stable(feature = "deque_range", since = "1.51.0")]
+ pub fn range_mut<R>(&mut self, range: R) -> IterMut<'_, T>
+ where
+ R: RangeBounds<usize>,
+ {
+ let (tail, head) = self.range_tail_head(range);
+
+ // SAFETY: The internal `IterMut` safety invariant is established because the
+ // `ring` we create is a dereferenceable slice for lifetime '_.
+ let ring = ptr::slice_from_raw_parts_mut(self.ptr(), self.cap());
+
+ unsafe { IterMut::new(ring, tail, head, PhantomData) }
+ }
+
+ /// Removes the specified range from the deque in bulk, returning all
+ /// removed elements as an iterator. If the iterator is dropped before
+ /// being fully consumed, it drops the remaining removed elements.
+ ///
+ /// The returned iterator keeps a mutable borrow on the queue to optimize
+ /// its implementation.
+ ///
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the deque.
+ ///
+ /// # Leaking
+ ///
+ /// If the returned iterator goes out of scope without being dropped (due to
+ /// [`mem::forget`], for example), the deque may have lost and leaked
+ /// elements arbitrarily, including elements outside the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque: VecDeque<_> = [1, 2, 3].into();
+ /// let drained = deque.drain(2..).collect::<VecDeque<_>>();
+ /// assert_eq!(drained, [3]);
+ /// assert_eq!(deque, [1, 2]);
+ ///
+ /// // A full range clears all contents, like `clear()` does
+ /// deque.drain(..);
+ /// assert!(deque.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_, T, A>
+ where
+ R: RangeBounds<usize>,
+ {
+ // Memory safety
+ //
+ // When the Drain is first created, the source deque is shortened to
+ // make sure no uninitialized or moved-from elements are accessible at
+ // all if the Drain's destructor never gets to run.
+ //
+ // Drain will ptr::read out the values to remove.
+ // When finished, the remaining data will be copied back to cover the hole,
+ // and the head/tail values will be restored correctly.
+ //
+ let (drain_tail, drain_head) = self.range_tail_head(range);
+
+ // The deque's elements are parted into three segments:
+ // * self.tail -> drain_tail
+ // * drain_tail -> drain_head
+ // * drain_head -> self.head
+ //
+ // T = self.tail; H = self.head; t = drain_tail; h = drain_head
+ //
+ // We store drain_tail as self.head, and drain_head and self.head as
+ // after_tail and after_head respectively on the Drain. This also
+ // truncates the effective array such that if the Drain is leaked, we
+ // have forgotten about the potentially moved values after the start of
+ // the drain.
+ //
+ // T t h H
+ // [. . . o o x x o o . . .]
+ //
+ let head = self.head;
+
+ // "forget" about the values after the start of the drain until after
+ // the drain is complete and the Drain destructor is run.
+ self.head = drain_tail;
+
+ let deque = NonNull::from(&mut *self);
+ unsafe {
+ // Crucially, we only create shared references from `self` here and read from
+ // it. We do not write to `self` nor reborrow to a mutable reference.
+ // Hence the raw pointer we created above, for `deque`, remains valid.
+ let ring = self.buffer_as_slice();
+ let iter = Iter::new(ring, drain_tail, drain_head);
+
+ Drain::new(drain_head, head, iter, deque)
+ }
+ }
+
+ /// Clears the deque, removing all values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque = VecDeque::new();
+ /// deque.push_back(1);
+ /// deque.clear();
+ /// assert!(deque.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn clear(&mut self) {
+ self.truncate(0);
+ }
+
+ /// Returns `true` if the deque contains an element equal to the
+ /// given value.
+ ///
+ /// This operation is *O*(*n*).
+ ///
+ /// Note that if you have a sorted `VecDeque`, [`binary_search`] may be faster.
+ ///
+ /// [`binary_search`]: VecDeque::binary_search
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque: VecDeque<u32> = VecDeque::new();
+ ///
+ /// deque.push_back(0);
+ /// deque.push_back(1);
+ ///
+ /// assert_eq!(deque.contains(&1), true);
+ /// assert_eq!(deque.contains(&10), false);
+ /// ```
+ #[stable(feature = "vec_deque_contains", since = "1.12.0")]
+ pub fn contains(&self, x: &T) -> bool
+ where
+ T: PartialEq<T>,
+ {
+ let (a, b) = self.as_slices();
+ a.contains(x) || b.contains(x)
+ }
+
+ /// Provides a reference to the front element, or `None` if the deque is
+ /// empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut d = VecDeque::new();
+ /// assert_eq!(d.front(), None);
+ ///
+ /// d.push_back(1);
+ /// d.push_back(2);
+ /// assert_eq!(d.front(), Some(&1));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn front(&self) -> Option<&T> {
+ self.get(0)
+ }
+
+ /// Provides a mutable reference to the front element, or `None` if the
+ /// deque is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut d = VecDeque::new();
+ /// assert_eq!(d.front_mut(), None);
+ ///
+ /// d.push_back(1);
+ /// d.push_back(2);
+ /// match d.front_mut() {
+ /// Some(x) => *x = 9,
+ /// None => (),
+ /// }
+ /// assert_eq!(d.front(), Some(&9));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn front_mut(&mut self) -> Option<&mut T> {
+ self.get_mut(0)
+ }
+
+ /// Provides a reference to the back element, or `None` if the deque is
+ /// empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut d = VecDeque::new();
+ /// assert_eq!(d.back(), None);
+ ///
+ /// d.push_back(1);
+ /// d.push_back(2);
+ /// assert_eq!(d.back(), Some(&2));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn back(&self) -> Option<&T> {
+ self.get(self.len().wrapping_sub(1))
+ }
+
+ /// Provides a mutable reference to the back element, or `None` if the
+ /// deque is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut d = VecDeque::new();
+ /// assert_eq!(d.back(), None);
+ ///
+ /// d.push_back(1);
+ /// d.push_back(2);
+ /// match d.back_mut() {
+ /// Some(x) => *x = 9,
+ /// None => (),
+ /// }
+ /// assert_eq!(d.back(), Some(&9));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn back_mut(&mut self) -> Option<&mut T> {
+ self.get_mut(self.len().wrapping_sub(1))
+ }
+
+ /// Removes the first element and returns it, or `None` if the deque is
+ /// empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut d = VecDeque::new();
+ /// d.push_back(1);
+ /// d.push_back(2);
+ ///
+ /// assert_eq!(d.pop_front(), Some(1));
+ /// assert_eq!(d.pop_front(), Some(2));
+ /// assert_eq!(d.pop_front(), None);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop_front(&mut self) -> Option<T> {
+ if self.is_empty() {
+ None
+ } else {
+ let tail = self.tail;
+ self.tail = self.wrap_add(self.tail, 1);
+ unsafe { Some(self.buffer_read(tail)) }
+ }
+ }
+
+ /// Removes the last element from the deque and returns it, or `None` if
+ /// it is empty.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// assert_eq!(buf.pop_back(), None);
+ /// buf.push_back(1);
+ /// buf.push_back(3);
+ /// assert_eq!(buf.pop_back(), Some(3));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop_back(&mut self) -> Option<T> {
+ if self.is_empty() {
+ None
+ } else {
+ self.head = self.wrap_sub(self.head, 1);
+ let head = self.head;
+ unsafe { Some(self.buffer_read(head)) }
+ }
+ }
+
+ /// Prepends an element to the deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut d = VecDeque::new();
+ /// d.push_front(1);
+ /// d.push_front(2);
+ /// assert_eq!(d.front(), Some(&2));
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push_front(&mut self, value: T) {
+ if self.is_full() {
+ self.grow();
+ }
+
+ self.tail = self.wrap_sub(self.tail, 1);
+ let tail = self.tail;
+ unsafe {
+ self.buffer_write(tail, value);
+ }
+ }
+
+ /// Appends an element to the back of the deque.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(1);
+ /// buf.push_back(3);
+ /// assert_eq!(3, *buf.back().unwrap());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push_back(&mut self, value: T) {
+ if self.is_full() {
+ self.grow();
+ }
+
+ let head = self.head;
+ self.head = self.wrap_add(self.head, 1);
+ unsafe { self.buffer_write(head, value) }
+ }
+
+ #[inline]
+ fn is_contiguous(&self) -> bool {
+ // FIXME: Should we consider `head == 0` to mean
+ // that `self` is contiguous?
+ self.tail <= self.head
+ }
+
+ /// Removes an element from anywhere in the deque and returns it,
+ /// replacing it with the first element.
+ ///
+ /// This does not preserve ordering, but is *O*(1).
+ ///
+ /// Returns `None` if `index` is out of bounds.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// assert_eq!(buf.swap_remove_front(0), None);
+ /// buf.push_back(1);
+ /// buf.push_back(2);
+ /// buf.push_back(3);
+ /// assert_eq!(buf, [1, 2, 3]);
+ ///
+ /// assert_eq!(buf.swap_remove_front(2), Some(3));
+ /// assert_eq!(buf, [2, 1]);
+ /// ```
+ #[stable(feature = "deque_extras_15", since = "1.5.0")]
+ pub fn swap_remove_front(&mut self, index: usize) -> Option<T> {
+ let length = self.len();
+ if length > 0 && index < length && index != 0 {
+ self.swap(index, 0);
+ } else if index >= length {
+ return None;
+ }
+ self.pop_front()
+ }
+
+ /// Removes an element from anywhere in the deque and returns it,
+ /// replacing it with the last element.
+ ///
+ /// This does not preserve ordering, but is *O*(1).
+ ///
+ /// Returns `None` if `index` is out of bounds.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// assert_eq!(buf.swap_remove_back(0), None);
+ /// buf.push_back(1);
+ /// buf.push_back(2);
+ /// buf.push_back(3);
+ /// assert_eq!(buf, [1, 2, 3]);
+ ///
+ /// assert_eq!(buf.swap_remove_back(0), Some(1));
+ /// assert_eq!(buf, [3, 2]);
+ /// ```
+ #[stable(feature = "deque_extras_15", since = "1.5.0")]
+ pub fn swap_remove_back(&mut self, index: usize) -> Option<T> {
+ let length = self.len();
+ if length > 0 && index < length - 1 {
+ self.swap(index, length - 1);
+ } else if index >= length {
+ return None;
+ }
+ self.pop_back()
+ }
+
+ /// Inserts an element at `index` within the deque, shifting all elements
+ /// with indices greater than or equal to `index` towards the back.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is greater than deque's length
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut vec_deque = VecDeque::new();
+ /// vec_deque.push_back('a');
+ /// vec_deque.push_back('b');
+ /// vec_deque.push_back('c');
+ /// assert_eq!(vec_deque, &['a', 'b', 'c']);
+ ///
+ /// vec_deque.insert(1, 'd');
+ /// assert_eq!(vec_deque, &['a', 'd', 'b', 'c']);
+ /// ```
+ #[stable(feature = "deque_extras_15", since = "1.5.0")]
+ pub fn insert(&mut self, index: usize, value: T) {
+ assert!(index <= self.len(), "index out of bounds");
+ if self.is_full() {
+ self.grow();
+ }
+
+ // Move the least number of elements in the ring buffer and insert
+ // the given object
+ //
+ // At most len/2 - 1 elements will be moved. O(min(n, n-i))
+ //
+ // There are three main cases:
+ // Elements are contiguous
+ // - special case when tail is 0
+ // Elements are discontiguous and the insert is in the tail section
+ // Elements are discontiguous and the insert is in the head section
+ //
+ // For each of those there are two more cases:
+ // Insert is closer to tail
+ // Insert is closer to head
+ //
+ // Key: H - self.head
+ // T - self.tail
+ // o - Valid element
+ // I - Insertion element
+ // A - The element that should be after the insertion point
+ // M - Indicates element was moved
+
+ let idx = self.wrap_add(self.tail, index);
+
+ let distance_to_tail = index;
+ let distance_to_head = self.len() - index;
+
+ let contiguous = self.is_contiguous();
+
+ match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
+ (true, true, _) if index == 0 => {
+ // push_front
+ //
+ // T
+ // I H
+ // [A o o o o o o . . . . . . . . .]
+ //
+ // H T
+ // [A o o o o o o o . . . . . I]
+ //
+
+ self.tail = self.wrap_sub(self.tail, 1);
+ }
+ (true, true, _) => {
+ unsafe {
+ // contiguous, insert closer to tail:
+ //
+ // T I H
+ // [. . . o o A o o o o . . . . . .]
+ //
+ // T H
+ // [. . o o I A o o o o . . . . . .]
+ // M M
+ //
+ // contiguous, insert closer to tail and tail is 0:
+ //
+ //
+ // T I H
+ // [o o A o o o o . . . . . . . . .]
+ //
+ // H T
+ // [o I A o o o o o . . . . . . . o]
+ // M M
+
+ let new_tail = self.wrap_sub(self.tail, 1);
+
+ self.copy(new_tail, self.tail, 1);
+ // Already moved the tail, so we only copy `index - 1` elements.
+ self.copy(self.tail, self.tail + 1, index - 1);
+
+ self.tail = new_tail;
+ }
+ }
+ (true, false, _) => {
+ unsafe {
+ // contiguous, insert closer to head:
+ //
+ // T I H
+ // [. . . o o o o A o o . . . . . .]
+ //
+ // T H
+ // [. . . o o o o I A o o . . . . .]
+ // M M M
+
+ self.copy(idx + 1, idx, self.head - idx);
+ self.head = self.wrap_add(self.head, 1);
+ }
+ }
+ (false, true, true) => {
+ unsafe {
+ // discontiguous, insert closer to tail, tail section:
+ //
+ // H T I
+ // [o o o o o o . . . . . o o A o o]
+ //
+ // H T
+ // [o o o o o o . . . . o o I A o o]
+ // M M
+
+ self.copy(self.tail - 1, self.tail, index);
+ self.tail -= 1;
+ }
+ }
+ (false, false, true) => {
+ unsafe {
+ // discontiguous, insert closer to head, tail section:
+ //
+ // H T I
+ // [o o . . . . . . . o o o o o A o]
+ //
+ // H T
+ // [o o o . . . . . . o o o o o I A]
+ // M M M M
+
+ // copy elements up to new head
+ self.copy(1, 0, self.head);
+
+ // copy last element into empty spot at bottom of buffer
+ self.copy(0, self.cap() - 1, 1);
+
+ // move elements from idx to end forward not including ^ element
+ self.copy(idx + 1, idx, self.cap() - 1 - idx);
+
+ self.head += 1;
+ }
+ }
+ (false, true, false) if idx == 0 => {
+ unsafe {
+ // discontiguous, insert is closer to tail, head section,
+ // and is at index zero in the internal buffer:
+ //
+ // I H T
+ // [A o o o o o o o o o . . . o o o]
+ //
+ // H T
+ // [A o o o o o o o o o . . o o o I]
+ // M M M
+
+ // copy elements up to new tail
+ self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
+
+ // copy last element into empty spot at bottom of buffer
+ self.copy(self.cap() - 1, 0, 1);
+
+ self.tail -= 1;
+ }
+ }
+ (false, true, false) => {
+ unsafe {
+ // discontiguous, insert closer to tail, head section:
+ //
+ // I H T
+ // [o o o A o o o o o o . . . o o o]
+ //
+ // H T
+ // [o o I A o o o o o o . . o o o o]
+ // M M M M M M
+
+ // copy elements up to new tail
+ self.copy(self.tail - 1, self.tail, self.cap() - self.tail);
+
+ // copy last element into empty spot at bottom of buffer
+ self.copy(self.cap() - 1, 0, 1);
+
+ // move elements from idx-1 to end forward not including ^ element
+ self.copy(0, 1, idx - 1);
+
+ self.tail -= 1;
+ }
+ }
+ (false, false, false) => {
+ unsafe {
+ // discontiguous, insert closer to head, head section:
+ //
+ // I H T
+ // [o o o o A o o . . . . . . o o o]
+ //
+ // H T
+ // [o o o o I A o o . . . . . o o o]
+ // M M M
+
+ self.copy(idx + 1, idx, self.head - idx);
+ self.head += 1;
+ }
+ }
+ }
+
+ // tail might've been changed so we need to recalculate
+ let new_idx = self.wrap_add(self.tail, index);
+ unsafe {
+ self.buffer_write(new_idx, value);
+ }
+ }
+
+ /// Removes and returns the element at `index` from the deque.
+ /// Whichever end is closer to the removal point will be moved to make
+ /// room, and all the affected elements will be moved to new positions.
+ /// Returns `None` if `index` is out of bounds.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(1);
+ /// buf.push_back(2);
+ /// buf.push_back(3);
+ /// assert_eq!(buf, [1, 2, 3]);
+ ///
+ /// assert_eq!(buf.remove(1), Some(2));
+ /// assert_eq!(buf, [1, 3]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove(&mut self, index: usize) -> Option<T> {
+ if self.is_empty() || self.len() <= index {
+ return None;
+ }
+
+ // There are three main cases:
+ // Elements are contiguous
+ // Elements are discontiguous and the removal is in the tail section
+ // Elements are discontiguous and the removal is in the head section
+ // - special case when elements are technically contiguous,
+ // but self.head = 0
+ //
+ // For each of those there are two more cases:
+ // Insert is closer to tail
+ // Insert is closer to head
+ //
+ // Key: H - self.head
+ // T - self.tail
+ // o - Valid element
+ // x - Element marked for removal
+ // R - Indicates element that is being removed
+ // M - Indicates element was moved
+
+ let idx = self.wrap_add(self.tail, index);
+
+ let elem = unsafe { Some(self.buffer_read(idx)) };
+
+ let distance_to_tail = index;
+ let distance_to_head = self.len() - index;
+
+ let contiguous = self.is_contiguous();
+
+ match (contiguous, distance_to_tail <= distance_to_head, idx >= self.tail) {
+ (true, true, _) => {
+ unsafe {
+ // contiguous, remove closer to tail:
+ //
+ // T R H
+ // [. . . o o x o o o o . . . . . .]
+ //
+ // T H
+ // [. . . . o o o o o o . . . . . .]
+ // M M
+
+ self.copy(self.tail + 1, self.tail, index);
+ self.tail += 1;
+ }
+ }
+ (true, false, _) => {
+ unsafe {
+ // contiguous, remove closer to head:
+ //
+ // T R H
+ // [. . . o o o o x o o . . . . . .]
+ //
+ // T H
+ // [. . . o o o o o o . . . . . . .]
+ // M M
+
+ self.copy(idx, idx + 1, self.head - idx - 1);
+ self.head -= 1;
+ }
+ }
+ (false, true, true) => {
+ unsafe {
+ // discontiguous, remove closer to tail, tail section:
+ //
+ // H T R
+ // [o o o o o o . . . . . o o x o o]
+ //
+ // H T
+ // [o o o o o o . . . . . . o o o o]
+ // M M
+
+ self.copy(self.tail + 1, self.tail, index);
+ self.tail = self.wrap_add(self.tail, 1);
+ }
+ }
+ (false, false, false) => {
+ unsafe {
+ // discontiguous, remove closer to head, head section:
+ //
+ // R H T
+ // [o o o o x o o . . . . . . o o o]
+ //
+ // H T
+ // [o o o o o o . . . . . . . o o o]
+ // M M
+
+ self.copy(idx, idx + 1, self.head - idx - 1);
+ self.head -= 1;
+ }
+ }
+ (false, false, true) => {
+ unsafe {
+ // discontiguous, remove closer to head, tail section:
+ //
+ // H T R
+ // [o o o . . . . . . o o o o o x o]
+ //
+ // H T
+ // [o o . . . . . . . o o o o o o o]
+ // M M M M
+ //
+ // or quasi-discontiguous, remove next to head, tail section:
+ //
+ // H T R
+ // [. . . . . . . . . o o o o o x o]
+ //
+ // T H
+ // [. . . . . . . . . o o o o o o .]
+ // M
+
+ // draw in elements in the tail section
+ self.copy(idx, idx + 1, self.cap() - idx - 1);
+
+ // Prevents underflow.
+ if self.head != 0 {
+ // copy first element into empty spot
+ self.copy(self.cap() - 1, 0, 1);
+
+ // move elements in the head section backwards
+ self.copy(0, 1, self.head - 1);
+ }
+
+ self.head = self.wrap_sub(self.head, 1);
+ }
+ }
+ (false, true, false) => {
+ unsafe {
+ // discontiguous, remove closer to tail, head section:
+ //
+ // R H T
+ // [o o x o o o o o o o . . . o o o]
+ //
+ // H T
+ // [o o o o o o o o o o . . . . o o]
+ // M M M M M
+
+ // draw in elements up to idx
+ self.copy(1, 0, idx);
+
+ // copy last element into empty spot
+ self.copy(0, self.cap() - 1, 1);
+
+ // move elements from tail to end forward, excluding the last one
+ self.copy(self.tail + 1, self.tail, self.cap() - self.tail - 1);
+
+ self.tail = self.wrap_add(self.tail, 1);
+ }
+ }
+ }
+
+ elem
+ }
+
+ /// Splits the deque into two at the given index.
+ ///
+ /// Returns a newly allocated `VecDeque`. `self` contains elements `[0, at)`,
+ /// and the returned deque contains elements `[at, len)`.
+ ///
+ /// Note that the capacity of `self` does not change.
+ ///
+ /// Element at index 0 is the front of the queue.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf: VecDeque<_> = [1, 2, 3].into();
+ /// let buf2 = buf.split_off(1);
+ /// assert_eq!(buf, [1]);
+ /// assert_eq!(buf2, [2, 3]);
+ /// ```
+ #[inline]
+ #[must_use = "use `.truncate()` if you don't need the other half"]
+ #[stable(feature = "split_off", since = "1.4.0")]
+ pub fn split_off(&mut self, at: usize) -> Self
+ where
+ A: Clone,
+ {
+ let len = self.len();
+ assert!(at <= len, "`at` out of bounds");
+
+ let other_len = len - at;
+ let mut other = VecDeque::with_capacity_in(other_len, self.allocator().clone());
+
+ unsafe {
+ let (first_half, second_half) = self.as_slices();
+
+ let first_len = first_half.len();
+ let second_len = second_half.len();
+ if at < first_len {
+ // `at` lies in the first half.
+ let amount_in_first = first_len - at;
+
+ ptr::copy_nonoverlapping(first_half.as_ptr().add(at), other.ptr(), amount_in_first);
+
+ // just take all of the second half.
+ ptr::copy_nonoverlapping(
+ second_half.as_ptr(),
+ other.ptr().add(amount_in_first),
+ second_len,
+ );
+ } else {
+ // `at` lies in the second half, need to factor in the elements we skipped
+ // in the first half.
+ let offset = at - first_len;
+ let amount_in_second = second_len - offset;
+ ptr::copy_nonoverlapping(
+ second_half.as_ptr().add(offset),
+ other.ptr(),
+ amount_in_second,
+ );
+ }
+ }
+
+ // Cleanup where the ends of the buffers are
+ self.head = self.wrap_sub(self.head, other_len);
+ other.head = other.wrap_index(other_len);
+
+ other
+ }
+
+ /// Moves all the elements of `other` into `self`, leaving `other` empty.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new number of elements in self overflows a `usize`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf: VecDeque<_> = [1, 2].into();
+ /// let mut buf2: VecDeque<_> = [3, 4].into();
+ /// buf.append(&mut buf2);
+ /// assert_eq!(buf, [1, 2, 3, 4]);
+ /// assert_eq!(buf2, []);
+ /// ```
+ #[inline]
+ #[stable(feature = "append", since = "1.4.0")]
+ pub fn append(&mut self, other: &mut Self) {
+ self.reserve(other.len());
+ unsafe {
+ let (left, right) = other.as_slices();
+ self.copy_slice(self.head, left);
+ self.copy_slice(self.wrap_add(self.head, left.len()), right);
+ }
+ // SAFETY: Update pointers after copying to avoid leaving doppelganger
+ // in case of panics.
+ self.head = self.wrap_add(self.head, other.len());
+ // Silently drop values in `other`.
+ other.tail = other.head;
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns false.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.extend(1..5);
+ /// buf.retain(|&x| x % 2 == 0);
+ /// assert_eq!(buf, [2, 4]);
+ /// ```
+ ///
+ /// Because the elements are visited exactly once in the original order,
+ /// external state may be used to decide which elements to keep.
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.extend(1..6);
+ ///
+ /// let keep = [false, true, true, false, true];
+ /// let mut iter = keep.iter();
+ /// buf.retain(|_| *iter.next().unwrap());
+ /// assert_eq!(buf, [2, 3, 5]);
+ /// ```
+ #[stable(feature = "vec_deque_retain", since = "1.4.0")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ self.retain_mut(|elem| f(elem));
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns false.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.extend(1..5);
+ /// buf.retain_mut(|x| if *x % 2 == 0 {
+ /// *x += 1;
+ /// true
+ /// } else {
+ /// false
+ /// });
+ /// assert_eq!(buf, [3, 5]);
+ /// ```
+ #[stable(feature = "vec_retain_mut", since = "1.61.0")]
+ pub fn retain_mut<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ let len = self.len();
+ let mut idx = 0;
+ let mut cur = 0;
+
+ // Stage 1: All values are retained.
+ while cur < len {
+ if !f(&mut self[cur]) {
+ cur += 1;
+ break;
+ }
+ cur += 1;
+ idx += 1;
+ }
+ // Stage 2: Swap retained value into current idx.
+ while cur < len {
+ if !f(&mut self[cur]) {
+ cur += 1;
+ continue;
+ }
+
+ self.swap(idx, cur);
+ cur += 1;
+ idx += 1;
+ }
+ // Stage 3: Truncate all values after idx.
+ if cur != idx {
+ self.truncate(idx);
+ }
+ }
+
+ // Double the buffer size. This method is inline(never), so we expect it to only
+ // be called in cold paths.
+ // This may panic or abort
+ #[inline(never)]
+ fn grow(&mut self) {
+ // Extend or possibly remove this assertion when valid use-cases for growing the
+ // buffer without it being full emerge
+ debug_assert!(self.is_full());
+ let old_cap = self.cap();
+ self.buf.reserve_exact(old_cap, old_cap);
+ assert!(self.cap() == old_cap * 2);
+ unsafe {
+ self.handle_capacity_increase(old_cap);
+ }
+ debug_assert!(!self.is_full());
+ }
+
+ /// Modifies the deque in-place so that `len()` is equal to `new_len`,
+ /// either by removing excess elements from the back or by appending
+ /// elements generated by calling `generator` to the back.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(5);
+ /// buf.push_back(10);
+ /// buf.push_back(15);
+ /// assert_eq!(buf, [5, 10, 15]);
+ ///
+ /// buf.resize_with(5, Default::default);
+ /// assert_eq!(buf, [5, 10, 15, 0, 0]);
+ ///
+ /// buf.resize_with(2, || unreachable!());
+ /// assert_eq!(buf, [5, 10]);
+ ///
+ /// let mut state = 100;
+ /// buf.resize_with(5, || { state += 1; state });
+ /// assert_eq!(buf, [5, 10, 101, 102, 103]);
+ /// ```
+ #[stable(feature = "vec_resize_with", since = "1.33.0")]
+ pub fn resize_with(&mut self, new_len: usize, generator: impl FnMut() -> T) {
+ let len = self.len();
+
+ if new_len > len {
+ self.extend(repeat_with(generator).take(new_len - len))
+ } else {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Rearranges the internal storage of this deque so it is one contiguous
+ /// slice, which is then returned.
+ ///
+ /// This method does not allocate and does not change the order of the
+ /// inserted elements. As it returns a mutable slice, this can be used to
+ /// sort a deque.
+ ///
+ /// Once the internal storage is contiguous, the [`as_slices`] and
+ /// [`as_mut_slices`] methods will return the entire contents of the
+ /// deque in a single slice.
+ ///
+ /// [`as_slices`]: VecDeque::as_slices
+ /// [`as_mut_slices`]: VecDeque::as_mut_slices
+ ///
+ /// # Examples
+ ///
+ /// Sorting the content of a deque.
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::with_capacity(15);
+ ///
+ /// buf.push_back(2);
+ /// buf.push_back(1);
+ /// buf.push_front(3);
+ ///
+ /// // sorting the deque
+ /// buf.make_contiguous().sort();
+ /// assert_eq!(buf.as_slices(), (&[1, 2, 3] as &[_], &[] as &[_]));
+ ///
+ /// // sorting it in reverse order
+ /// buf.make_contiguous().sort_by(|a, b| b.cmp(a));
+ /// assert_eq!(buf.as_slices(), (&[3, 2, 1] as &[_], &[] as &[_]));
+ /// ```
+ ///
+ /// Getting immutable access to the contiguous slice.
+ ///
+ /// ```rust
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ ///
+ /// buf.push_back(2);
+ /// buf.push_back(1);
+ /// buf.push_front(3);
+ ///
+ /// buf.make_contiguous();
+ /// if let (slice, &[]) = buf.as_slices() {
+ /// // we can now be sure that `slice` contains all elements of the deque,
+ /// // while still having immutable access to `buf`.
+ /// assert_eq!(buf.len(), slice.len());
+ /// assert_eq!(slice, &[3, 2, 1] as &[_]);
+ /// }
+ /// ```
+ #[stable(feature = "deque_make_contiguous", since = "1.48.0")]
+ pub fn make_contiguous(&mut self) -> &mut [T] {
+ if self.is_contiguous() {
+ let tail = self.tail;
+ let head = self.head;
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ return unsafe {
+ MaybeUninit::slice_assume_init_mut(
+ RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0,
+ )
+ };
+ }
+
+ let buf = self.buf.ptr();
+ let cap = self.cap();
+ let len = self.len();
+
+ let free = self.tail - self.head;
+ let tail_len = cap - self.tail;
+
+ if free >= tail_len {
+ // there is enough free space to copy the tail in one go,
+ // this means that we first shift the head backwards, and then
+ // copy the tail to the correct position.
+ //
+ // from: DEFGH....ABC
+ // to: ABCDEFGH....
+ unsafe {
+ ptr::copy(buf, buf.add(tail_len), self.head);
+ // ...DEFGH.ABC
+ ptr::copy_nonoverlapping(buf.add(self.tail), buf, tail_len);
+ // ABCDEFGH....
+
+ self.tail = 0;
+ self.head = len;
+ }
+ } else if free > self.head {
+ // FIXME: We currently do not consider ....ABCDEFGH
+ // to be contiguous because `head` would be `0` in this
+ // case. While we probably want to change this it
+ // isn't trivial as a few places expect `is_contiguous`
+ // to mean that we can just slice using `buf[tail..head]`.
+
+ // there is enough free space to copy the head in one go,
+ // this means that we first shift the tail forwards, and then
+ // copy the head to the correct position.
+ //
+ // from: FGH....ABCDE
+ // to: ...ABCDEFGH.
+ unsafe {
+ ptr::copy(buf.add(self.tail), buf.add(self.head), tail_len);
+ // FGHABCDE....
+ ptr::copy_nonoverlapping(buf, buf.add(self.head + tail_len), self.head);
+ // ...ABCDEFGH.
+
+ self.tail = self.head;
+ self.head = self.wrap_add(self.tail, len);
+ }
+ } else {
+ // free is smaller than both head and tail,
+ // this means we have to slowly "swap" the tail and the head.
+ //
+ // from: EFGHI...ABCD or HIJK.ABCDEFG
+ // to: ABCDEFGHI... or ABCDEFGHIJK.
+ let mut left_edge: usize = 0;
+ let mut right_edge: usize = self.tail;
+ unsafe {
+ // The general problem looks like this
+ // GHIJKLM...ABCDEF - before any swaps
+ // ABCDEFM...GHIJKL - after 1 pass of swaps
+ // ABCDEFGHIJM...KL - swap until the left edge reaches the temp store
+ // - then restart the algorithm with a new (smaller) store
+ // Sometimes the temp store is reached when the right edge is at the end
+ // of the buffer - this means we've hit the right order with fewer swaps!
+ // E.g
+ // EF..ABCD
+ // ABCDEF.. - after four only swaps we've finished
+ while left_edge < len && right_edge != cap {
+ let mut right_offset = 0;
+ for i in left_edge..right_edge {
+ right_offset = (i - left_edge) % (cap - right_edge);
+ let src: isize = (right_edge + right_offset) as isize;
+ ptr::swap(buf.add(i), buf.offset(src));
+ }
+ let n_ops = right_edge - left_edge;
+ left_edge += n_ops;
+ right_edge += right_offset + 1;
+ }
+
+ self.tail = 0;
+ self.head = len;
+ }
+ }
+
+ let tail = self.tail;
+ let head = self.head;
+ // Safety:
+ // - `self.head` and `self.tail` in a ring buffer are always valid indices.
+ // - `RingSlices::ring_slices` guarantees that the slices split according to `self.head` and `self.tail` are initialized.
+ unsafe {
+ MaybeUninit::slice_assume_init_mut(
+ RingSlices::ring_slices(self.buffer_as_mut_slice(), head, tail).0,
+ )
+ }
+ }
+
+ /// Rotates the double-ended queue `mid` places to the left.
+ ///
+ /// Equivalently,
+ /// - Rotates item `mid` into the first position.
+ /// - Pops the first `mid` items and pushes them to the end.
+ /// - Rotates `len() - mid` places to the right.
+ ///
+ /// # Panics
+ ///
+ /// If `mid` is greater than `len()`. Note that `mid == len()`
+ /// does _not_ panic and is a no-op rotation.
+ ///
+ /// # Complexity
+ ///
+ /// Takes `*O*(min(mid, len() - mid))` time and no extra space.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf: VecDeque<_> = (0..10).collect();
+ ///
+ /// buf.rotate_left(3);
+ /// assert_eq!(buf, [3, 4, 5, 6, 7, 8, 9, 0, 1, 2]);
+ ///
+ /// for i in 1..10 {
+ /// assert_eq!(i * 3 % 10, buf[0]);
+ /// buf.rotate_left(3);
+ /// }
+ /// assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+ /// ```
+ #[stable(feature = "vecdeque_rotate", since = "1.36.0")]
+ pub fn rotate_left(&mut self, mid: usize) {
+ assert!(mid <= self.len());
+ let k = self.len() - mid;
+ if mid <= k {
+ unsafe { self.rotate_left_inner(mid) }
+ } else {
+ unsafe { self.rotate_right_inner(k) }
+ }
+ }
+
+ /// Rotates the double-ended queue `k` places to the right.
+ ///
+ /// Equivalently,
+ /// - Rotates the first item into position `k`.
+ /// - Pops the last `k` items and pushes them to the front.
+ /// - Rotates `len() - k` places to the left.
+ ///
+ /// # Panics
+ ///
+ /// If `k` is greater than `len()`. Note that `k == len()`
+ /// does _not_ panic and is a no-op rotation.
+ ///
+ /// # Complexity
+ ///
+ /// Takes `*O*(min(k, len() - k))` time and no extra space.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf: VecDeque<_> = (0..10).collect();
+ ///
+ /// buf.rotate_right(3);
+ /// assert_eq!(buf, [7, 8, 9, 0, 1, 2, 3, 4, 5, 6]);
+ ///
+ /// for i in 1..10 {
+ /// assert_eq!(0, buf[i * 3 % 10]);
+ /// buf.rotate_right(3);
+ /// }
+ /// assert_eq!(buf, [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+ /// ```
+ #[stable(feature = "vecdeque_rotate", since = "1.36.0")]
+ pub fn rotate_right(&mut self, k: usize) {
+ assert!(k <= self.len());
+ let mid = self.len() - k;
+ if k <= mid {
+ unsafe { self.rotate_right_inner(k) }
+ } else {
+ unsafe { self.rotate_left_inner(mid) }
+ }
+ }
+
+ // SAFETY: the following two methods require that the rotation amount
+ // be less than half the length of the deque.
+ //
+ // `wrap_copy` requires that `min(x, cap() - x) + copy_len <= cap()`,
+ // but than `min` is never more than half the capacity, regardless of x,
+ // so it's sound to call here because we're calling with something
+ // less than half the length, which is never above half the capacity.
+
+ unsafe fn rotate_left_inner(&mut self, mid: usize) {
+ debug_assert!(mid * 2 <= self.len());
+ unsafe {
+ self.wrap_copy(self.head, self.tail, mid);
+ }
+ self.head = self.wrap_add(self.head, mid);
+ self.tail = self.wrap_add(self.tail, mid);
+ }
+
+ unsafe fn rotate_right_inner(&mut self, k: usize) {
+ debug_assert!(k * 2 <= self.len());
+ self.head = self.wrap_sub(self.head, k);
+ self.tail = self.wrap_sub(self.tail, k);
+ unsafe {
+ self.wrap_copy(self.tail, self.head, k);
+ }
+ }
+
+ /// Binary searches this `VecDeque` for a given element.
+ /// This behaves similarly to [`contains`] if this `VecDeque` is sorted.
+ ///
+ /// If the value is found then [`Result::Ok`] is returned, containing the
+ /// index of the matching element. If there are multiple matches, then any
+ /// one of the matches could be returned. If the value is not found then
+ /// [`Result::Err`] is returned, containing the index where a matching
+ /// element could be inserted while maintaining sorted order.
+ ///
+ /// See also [`binary_search_by`], [`binary_search_by_key`], and [`partition_point`].
+ ///
+ /// [`contains`]: VecDeque::contains
+ /// [`binary_search_by`]: VecDeque::binary_search_by
+ /// [`binary_search_by_key`]: VecDeque::binary_search_by_key
+ /// [`partition_point`]: VecDeque::partition_point
+ ///
+ /// # Examples
+ ///
+ /// Looks up a series of four elements. The first is found, with a
+ /// uniquely determined position; the second and third are not
+ /// found; the fourth could match any position in `[1, 4]`.
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<_> = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55].into();
+ ///
+ /// assert_eq!(deque.binary_search(&13), Ok(9));
+ /// assert_eq!(deque.binary_search(&4), Err(7));
+ /// assert_eq!(deque.binary_search(&100), Err(13));
+ /// let r = deque.binary_search(&1);
+ /// assert!(matches!(r, Ok(1..=4)));
+ /// ```
+ ///
+ /// If you want to insert an item to a sorted deque, while maintaining
+ /// sort order, consider using [`partition_point`]:
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque: VecDeque<_> = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55].into();
+ /// let num = 42;
+ /// let idx = deque.partition_point(|&x| x < num);
+ /// // The above is equivalent to `let idx = deque.binary_search(&num).unwrap_or_else(|x| x);`
+ /// deque.insert(idx, num);
+ /// assert_eq!(deque, &[0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
+ /// ```
+ #[stable(feature = "vecdeque_binary_search", since = "1.54.0")]
+ #[inline]
+ pub fn binary_search(&self, x: &T) -> Result<usize, usize>
+ where
+ T: Ord,
+ {
+ self.binary_search_by(|e| e.cmp(x))
+ }
+
+ /// Binary searches this `VecDeque` with a comparator function.
+ /// This behaves similarly to [`contains`] if this `VecDeque` is sorted.
+ ///
+ /// The comparator function should implement an order consistent
+ /// with the sort order of the deque, returning an order code that
+ /// indicates whether its argument is `Less`, `Equal` or `Greater`
+ /// than the desired target.
+ ///
+ /// If the value is found then [`Result::Ok`] is returned, containing the
+ /// index of the matching element. If there are multiple matches, then any
+ /// one of the matches could be returned. If the value is not found then
+ /// [`Result::Err`] is returned, containing the index where a matching
+ /// element could be inserted while maintaining sorted order.
+ ///
+ /// See also [`binary_search`], [`binary_search_by_key`], and [`partition_point`].
+ ///
+ /// [`contains`]: VecDeque::contains
+ /// [`binary_search`]: VecDeque::binary_search
+ /// [`binary_search_by_key`]: VecDeque::binary_search_by_key
+ /// [`partition_point`]: VecDeque::partition_point
+ ///
+ /// # Examples
+ ///
+ /// Looks up a series of four elements. The first is found, with a
+ /// uniquely determined position; the second and third are not
+ /// found; the fourth could match any position in `[1, 4]`.
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<_> = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55].into();
+ ///
+ /// assert_eq!(deque.binary_search_by(|x| x.cmp(&13)), Ok(9));
+ /// assert_eq!(deque.binary_search_by(|x| x.cmp(&4)), Err(7));
+ /// assert_eq!(deque.binary_search_by(|x| x.cmp(&100)), Err(13));
+ /// let r = deque.binary_search_by(|x| x.cmp(&1));
+ /// assert!(matches!(r, Ok(1..=4)));
+ /// ```
+ #[stable(feature = "vecdeque_binary_search", since = "1.54.0")]
+ pub fn binary_search_by<'a, F>(&'a self, mut f: F) -> Result<usize, usize>
+ where
+ F: FnMut(&'a T) -> Ordering,
+ {
+ let (front, back) = self.as_slices();
+ let cmp_back = back.first().map(|elem| f(elem));
+
+ if let Some(Ordering::Equal) = cmp_back {
+ Ok(front.len())
+ } else if let Some(Ordering::Less) = cmp_back {
+ back.binary_search_by(f).map(|idx| idx + front.len()).map_err(|idx| idx + front.len())
+ } else {
+ front.binary_search_by(f)
+ }
+ }
+
+ /// Binary searches this `VecDeque` with a key extraction function.
+ /// This behaves similarly to [`contains`] if this `VecDeque` is sorted.
+ ///
+ /// Assumes that the deque is sorted by the key, for instance with
+ /// [`make_contiguous().sort_by_key()`] using the same key extraction function.
+ ///
+ /// If the value is found then [`Result::Ok`] is returned, containing the
+ /// index of the matching element. If there are multiple matches, then any
+ /// one of the matches could be returned. If the value is not found then
+ /// [`Result::Err`] is returned, containing the index where a matching
+ /// element could be inserted while maintaining sorted order.
+ ///
+ /// See also [`binary_search`], [`binary_search_by`], and [`partition_point`].
+ ///
+ /// [`contains`]: VecDeque::contains
+ /// [`make_contiguous().sort_by_key()`]: VecDeque::make_contiguous
+ /// [`binary_search`]: VecDeque::binary_search
+ /// [`binary_search_by`]: VecDeque::binary_search_by
+ /// [`partition_point`]: VecDeque::partition_point
+ ///
+ /// # Examples
+ ///
+ /// Looks up a series of four elements in a slice of pairs sorted by
+ /// their second elements. The first is found, with a uniquely
+ /// determined position; the second and third are not found; the
+ /// fourth could match any position in `[1, 4]`.
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<_> = [(0, 0), (2, 1), (4, 1), (5, 1),
+ /// (3, 1), (1, 2), (2, 3), (4, 5), (5, 8), (3, 13),
+ /// (1, 21), (2, 34), (4, 55)].into();
+ ///
+ /// assert_eq!(deque.binary_search_by_key(&13, |&(a, b)| b), Ok(9));
+ /// assert_eq!(deque.binary_search_by_key(&4, |&(a, b)| b), Err(7));
+ /// assert_eq!(deque.binary_search_by_key(&100, |&(a, b)| b), Err(13));
+ /// let r = deque.binary_search_by_key(&1, |&(a, b)| b);
+ /// assert!(matches!(r, Ok(1..=4)));
+ /// ```
+ #[stable(feature = "vecdeque_binary_search", since = "1.54.0")]
+ #[inline]
+ pub fn binary_search_by_key<'a, B, F>(&'a self, b: &B, mut f: F) -> Result<usize, usize>
+ where
+ F: FnMut(&'a T) -> B,
+ B: Ord,
+ {
+ self.binary_search_by(|k| f(k).cmp(b))
+ }
+
+ /// Returns the index of the partition point according to the given predicate
+ /// (the index of the first element of the second partition).
+ ///
+ /// The deque is assumed to be partitioned according to the given predicate.
+ /// This means that all elements for which the predicate returns true are at the start of the deque
+ /// and all elements for which the predicate returns false are at the end.
+ /// For example, [7, 15, 3, 5, 4, 12, 6] is a partitioned under the predicate x % 2 != 0
+ /// (all odd numbers are at the start, all even at the end).
+ ///
+ /// If the deque is not partitioned, the returned result is unspecified and meaningless,
+ /// as this method performs a kind of binary search.
+ ///
+ /// See also [`binary_search`], [`binary_search_by`], and [`binary_search_by_key`].
+ ///
+ /// [`binary_search`]: VecDeque::binary_search
+ /// [`binary_search_by`]: VecDeque::binary_search_by
+ /// [`binary_search_by_key`]: VecDeque::binary_search_by_key
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deque: VecDeque<_> = [1, 2, 3, 3, 5, 6, 7].into();
+ /// let i = deque.partition_point(|&x| x < 5);
+ ///
+ /// assert_eq!(i, 4);
+ /// assert!(deque.iter().take(i).all(|&x| x < 5));
+ /// assert!(deque.iter().skip(i).all(|&x| !(x < 5)));
+ /// ```
+ ///
+ /// If you want to insert an item to a sorted deque, while maintaining
+ /// sort order:
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut deque: VecDeque<_> = [0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55].into();
+ /// let num = 42;
+ /// let idx = deque.partition_point(|&x| x < num);
+ /// deque.insert(idx, num);
+ /// assert_eq!(deque, &[0, 1, 1, 1, 1, 2, 3, 5, 8, 13, 21, 34, 42, 55]);
+ /// ```
+ #[stable(feature = "vecdeque_binary_search", since = "1.54.0")]
+ pub fn partition_point<P>(&self, mut pred: P) -> usize
+ where
+ P: FnMut(&T) -> bool,
+ {
+ let (front, back) = self.as_slices();
+
+ if let Some(true) = back.first().map(|v| pred(v)) {
+ back.partition_point(pred) + front.len()
+ } else {
+ front.partition_point(pred)
+ }
+ }
+}
+
+impl<T: Clone, A: Allocator> VecDeque<T, A> {
+ /// Modifies the deque in-place so that `len()` is equal to new_len,
+ /// either by removing excess elements from the back or by appending clones of `value`
+ /// to the back.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let mut buf = VecDeque::new();
+ /// buf.push_back(5);
+ /// buf.push_back(10);
+ /// buf.push_back(15);
+ /// assert_eq!(buf, [5, 10, 15]);
+ ///
+ /// buf.resize(2, 0);
+ /// assert_eq!(buf, [5, 10]);
+ ///
+ /// buf.resize(5, 20);
+ /// assert_eq!(buf, [5, 10, 20, 20, 20]);
+ /// ```
+ #[stable(feature = "deque_extras", since = "1.16.0")]
+ pub fn resize(&mut self, new_len: usize, value: T) {
+ self.resize_with(new_len, || value.clone());
+ }
+}
+
+/// Returns the index in the underlying buffer for a given logical element index.
+#[inline]
+fn wrap_index(index: usize, size: usize) -> usize {
+ // size is always a power of 2
+ debug_assert!(size.is_power_of_two());
+ index & (size - 1)
+}
+
+/// Calculate the number of elements left to be read in the buffer
+#[inline]
+fn count(tail: usize, head: usize, size: usize) -> usize {
+ // size is always a power of 2
+ (head.wrapping_sub(tail)) & (size - 1)
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialEq, A: Allocator> PartialEq for VecDeque<T, A> {
+ fn eq(&self, other: &Self) -> bool {
+ if self.len() != other.len() {
+ return false;
+ }
+ let (sa, sb) = self.as_slices();
+ let (oa, ob) = other.as_slices();
+ if sa.len() == oa.len() {
+ sa == oa && sb == ob
+ } else if sa.len() < oa.len() {
+ // Always divisible in three sections, for example:
+ // self: [a b c|d e f]
+ // other: [0 1 2 3|4 5]
+ // front = 3, mid = 1,
+ // [a b c] == [0 1 2] && [d] == [3] && [e f] == [4 5]
+ let front = sa.len();
+ let mid = oa.len() - front;
+
+ let (oa_front, oa_mid) = oa.split_at(front);
+ let (sb_mid, sb_back) = sb.split_at(mid);
+ debug_assert_eq!(sa.len(), oa_front.len());
+ debug_assert_eq!(sb_mid.len(), oa_mid.len());
+ debug_assert_eq!(sb_back.len(), ob.len());
+ sa == oa_front && sb_mid == oa_mid && sb_back == ob
+ } else {
+ let front = oa.len();
+ let mid = sa.len() - front;
+
+ let (sa_front, sa_mid) = sa.split_at(front);
+ let (ob_mid, ob_back) = ob.split_at(mid);
+ debug_assert_eq!(sa_front.len(), oa.len());
+ debug_assert_eq!(sa_mid.len(), ob_mid.len());
+ debug_assert_eq!(sb.len(), ob_back.len());
+ sa_front == oa && sa_mid == ob_mid && sb == ob_back
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq, A: Allocator> Eq for VecDeque<T, A> {}
+
+__impl_slice_eq1! { [] VecDeque<T, A>, Vec<U, A>, }
+__impl_slice_eq1! { [] VecDeque<T, A>, &[U], }
+__impl_slice_eq1! { [] VecDeque<T, A>, &mut [U], }
+__impl_slice_eq1! { [const N: usize] VecDeque<T, A>, [U; N], }
+__impl_slice_eq1! { [const N: usize] VecDeque<T, A>, &[U; N], }
+__impl_slice_eq1! { [const N: usize] VecDeque<T, A>, &mut [U; N], }
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd, A: Allocator> PartialOrd for VecDeque<T, A> {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.iter().partial_cmp(other.iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord, A: Allocator> Ord for VecDeque<T, A> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.iter().cmp(other.iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Hash, A: Allocator> Hash for VecDeque<T, A> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ state.write_length_prefix(self.len());
+ // It's not possible to use Hash::hash_slice on slices
+ // returned by as_slices method as their length can vary
+ // in otherwise identical deques.
+ //
+ // Hasher only guarantees equivalence for the exact same
+ // set of calls to its methods.
+ self.iter().for_each(|elem| elem.hash(state));
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Index<usize> for VecDeque<T, A> {
+ type Output = T;
+
+ #[inline]
+ fn index(&self, index: usize) -> &T {
+ self.get(index).expect("Out of bounds access")
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> IndexMut<usize> for VecDeque<T, A> {
+ #[inline]
+ fn index_mut(&mut self, index: usize) -> &mut T {
+ self.get_mut(index).expect("Out of bounds access")
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> FromIterator<T> for VecDeque<T> {
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> VecDeque<T> {
+ let iterator = iter.into_iter();
+ let (lower, _) = iterator.size_hint();
+ let mut deq = VecDeque::with_capacity(lower);
+ deq.extend(iterator);
+ deq
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> IntoIterator for VecDeque<T, A> {
+ type Item = T;
+ type IntoIter = IntoIter<T, A>;
+
+ /// Consumes the deque into a front-to-back iterator yielding elements by
+ /// value.
+ fn into_iter(self) -> IntoIter<T, A> {
+ IntoIter::new(self)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, A: Allocator> IntoIterator for &'a VecDeque<T, A> {
+ type Item = &'a T;
+ type IntoIter = Iter<'a, T>;
+
+ fn into_iter(self) -> Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, A: Allocator> IntoIterator for &'a mut VecDeque<T, A> {
+ type Item = &'a mut T;
+ type IntoIter = IterMut<'a, T>;
+
+ fn into_iter(self) -> IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Extend<T> for VecDeque<T, A> {
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ <Self as SpecExtend<T, I::IntoIter>>::spec_extend(self, iter.into_iter());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, elem: T) {
+ self.push_back(elem);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: 'a + Copy, A: Allocator> Extend<&'a T> for VecDeque<T, A> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.spec_extend(iter.into_iter());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &elem: &T) {
+ self.push_back(elem);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for VecDeque<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_list().entries(self).finish()
+ }
+}
+
+#[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")]
+impl<T, A: Allocator> From<Vec<T, A>> for VecDeque<T, A> {
+ /// Turn a [`Vec<T>`] into a [`VecDeque<T>`].
+ ///
+ /// [`Vec<T>`]: crate::vec::Vec
+ /// [`VecDeque<T>`]: crate::collections::VecDeque
+ ///
+ /// This avoids reallocating where possible, but the conditions for that are
+ /// strict, and subject to change, and so shouldn't be relied upon unless the
+ /// `Vec<T>` came from `From<VecDeque<T>>` and hasn't been reallocated.
+ fn from(mut other: Vec<T, A>) -> Self {
+ let len = other.len();
+ if mem::size_of::<T>() == 0 {
+ // There's no actual allocation for ZSTs to worry about capacity,
+ // but `VecDeque` can't handle as much length as `Vec`.
+ assert!(len < MAXIMUM_ZST_CAPACITY, "capacity overflow");
+ } else {
+ // We need to resize if the capacity is not a power of two, too small or
+ // doesn't have at least one free space. We do this while it's still in
+ // the `Vec` so the items will drop on panic.
+ let min_cap = cmp::max(MINIMUM_CAPACITY, len) + 1;
+ let cap = cmp::max(min_cap, other.capacity()).next_power_of_two();
+ if other.capacity() != cap {
+ other.reserve_exact(cap - len);
+ }
+ }
+
+ unsafe {
+ let (other_buf, len, capacity, alloc) = other.into_raw_parts_with_alloc();
+ let buf = RawVec::from_raw_parts_in(other_buf, capacity, alloc);
+ VecDeque { tail: 0, head: len, buf }
+ }
+ }
+}
+
+#[stable(feature = "vecdeque_vec_conversions", since = "1.10.0")]
+impl<T, A: Allocator> From<VecDeque<T, A>> for Vec<T, A> {
+ /// Turn a [`VecDeque<T>`] into a [`Vec<T>`].
+ ///
+ /// [`Vec<T>`]: crate::vec::Vec
+ /// [`VecDeque<T>`]: crate::collections::VecDeque
+ ///
+ /// This never needs to re-allocate, but does need to do *O*(*n*) data movement if
+ /// the circular buffer doesn't happen to be at the beginning of the allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// // This one is *O*(1).
+ /// let deque: VecDeque<_> = (1..5).collect();
+ /// let ptr = deque.as_slices().0.as_ptr();
+ /// let vec = Vec::from(deque);
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// assert_eq!(vec.as_ptr(), ptr);
+ ///
+ /// // This one needs data rearranging.
+ /// let mut deque: VecDeque<_> = (1..5).collect();
+ /// deque.push_front(9);
+ /// deque.push_front(8);
+ /// let ptr = deque.as_slices().1.as_ptr();
+ /// let vec = Vec::from(deque);
+ /// assert_eq!(vec, [8, 9, 1, 2, 3, 4]);
+ /// assert_eq!(vec.as_ptr(), ptr);
+ /// ```
+ fn from(mut other: VecDeque<T, A>) -> Self {
+ other.make_contiguous();
+
+ unsafe {
+ let other = ManuallyDrop::new(other);
+ let buf = other.buf.ptr();
+ let len = other.len();
+ let cap = other.cap();
+ let alloc = ptr::read(other.allocator());
+
+ if other.tail != 0 {
+ ptr::copy(buf.add(other.tail), buf, len);
+ }
+ Vec::from_raw_parts_in(buf, len, cap, alloc)
+ }
+ }
+}
+
+#[stable(feature = "std_collections_from_array", since = "1.56.0")]
+impl<T, const N: usize> From<[T; N]> for VecDeque<T> {
+ /// Converts a `[T; N]` into a `VecDeque<T>`.
+ ///
+ /// ```
+ /// use std::collections::VecDeque;
+ ///
+ /// let deq1 = VecDeque::from([1, 2, 3, 4]);
+ /// let deq2: VecDeque<_> = [1, 2, 3, 4].into();
+ /// assert_eq!(deq1, deq2);
+ /// ```
+ fn from(arr: [T; N]) -> Self {
+ let mut deq = VecDeque::with_capacity(N);
+ let arr = ManuallyDrop::new(arr);
+ if mem::size_of::<T>() != 0 {
+ // SAFETY: VecDeque::with_capacity ensures that there is enough capacity.
+ unsafe {
+ ptr::copy_nonoverlapping(arr.as_ptr(), deq.ptr(), N);
+ }
+ }
+ deq.tail = 0;
+ deq.head = N;
+ deq
+ }
+}
diff --git a/library/alloc/src/collections/vec_deque/pair_slices.rs b/library/alloc/src/collections/vec_deque/pair_slices.rs
new file mode 100644
index 000000000..6735424a3
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/pair_slices.rs
@@ -0,0 +1,67 @@
+use core::cmp::{self};
+use core::mem::replace;
+
+use crate::alloc::Allocator;
+
+use super::VecDeque;
+
+/// PairSlices pairs up equal length slice parts of two deques
+///
+/// For example, given deques "A" and "B" with the following division into slices:
+///
+/// A: [0 1 2] [3 4 5]
+/// B: [a b] [c d e]
+///
+/// It produces the following sequence of matching slices:
+///
+/// ([0 1], [a b])
+/// (\[2\], \[c\])
+/// ([3 4], [d e])
+///
+/// and the uneven remainder of either A or B is skipped.
+pub struct PairSlices<'a, 'b, T> {
+ a0: &'a mut [T],
+ a1: &'a mut [T],
+ b0: &'b [T],
+ b1: &'b [T],
+}
+
+impl<'a, 'b, T> PairSlices<'a, 'b, T> {
+ pub fn from<A: Allocator>(to: &'a mut VecDeque<T, A>, from: &'b VecDeque<T, A>) -> Self {
+ let (a0, a1) = to.as_mut_slices();
+ let (b0, b1) = from.as_slices();
+ PairSlices { a0, a1, b0, b1 }
+ }
+
+ pub fn has_remainder(&self) -> bool {
+ !self.b0.is_empty()
+ }
+
+ pub fn remainder(self) -> impl Iterator<Item = &'b [T]> {
+ IntoIterator::into_iter([self.b0, self.b1])
+ }
+}
+
+impl<'a, 'b, T> Iterator for PairSlices<'a, 'b, T> {
+ type Item = (&'a mut [T], &'b [T]);
+ fn next(&mut self) -> Option<Self::Item> {
+ // Get next part length
+ let part = cmp::min(self.a0.len(), self.b0.len());
+ if part == 0 {
+ return None;
+ }
+ let (p0, p1) = replace(&mut self.a0, &mut []).split_at_mut(part);
+ let (q0, q1) = self.b0.split_at(part);
+
+ // Move a1 into a0, if it's empty (and b1, b0 the same way).
+ self.a0 = p1;
+ self.b0 = q1;
+ if self.a0.is_empty() {
+ self.a0 = replace(&mut self.a1, &mut []);
+ }
+ if self.b0.is_empty() {
+ self.b0 = replace(&mut self.b1, &[]);
+ }
+ Some((p0, q0))
+ }
+}
diff --git a/library/alloc/src/collections/vec_deque/ring_slices.rs b/library/alloc/src/collections/vec_deque/ring_slices.rs
new file mode 100644
index 000000000..dd0fa7d60
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/ring_slices.rs
@@ -0,0 +1,56 @@
+use core::ptr::{self};
+
+/// Returns the two slices that cover the `VecDeque`'s valid range
+pub trait RingSlices: Sized {
+ fn slice(self, from: usize, to: usize) -> Self;
+ fn split_at(self, i: usize) -> (Self, Self);
+
+ fn ring_slices(buf: Self, head: usize, tail: usize) -> (Self, Self) {
+ let contiguous = tail <= head;
+ if contiguous {
+ let (empty, buf) = buf.split_at(0);
+ (buf.slice(tail, head), empty)
+ } else {
+ let (mid, right) = buf.split_at(tail);
+ let (left, _) = mid.split_at(head);
+ (right, left)
+ }
+ }
+}
+
+impl<T> RingSlices for &[T] {
+ fn slice(self, from: usize, to: usize) -> Self {
+ &self[from..to]
+ }
+ fn split_at(self, i: usize) -> (Self, Self) {
+ (*self).split_at(i)
+ }
+}
+
+impl<T> RingSlices for &mut [T] {
+ fn slice(self, from: usize, to: usize) -> Self {
+ &mut self[from..to]
+ }
+ fn split_at(self, i: usize) -> (Self, Self) {
+ (*self).split_at_mut(i)
+ }
+}
+
+impl<T> RingSlices for *mut [T] {
+ fn slice(self, from: usize, to: usize) -> Self {
+ assert!(from <= to && to < self.len());
+ // Not using `get_unchecked_mut` to keep this a safe operation.
+ let len = to - from;
+ ptr::slice_from_raw_parts_mut(self.as_mut_ptr().wrapping_add(from), len)
+ }
+
+ fn split_at(self, mid: usize) -> (Self, Self) {
+ let len = self.len();
+ let ptr = self.as_mut_ptr();
+ assert!(mid <= len);
+ (
+ ptr::slice_from_raw_parts_mut(ptr, mid),
+ ptr::slice_from_raw_parts_mut(ptr.wrapping_add(mid), len - mid),
+ )
+ }
+}
diff --git a/library/alloc/src/collections/vec_deque/spec_extend.rs b/library/alloc/src/collections/vec_deque/spec_extend.rs
new file mode 100644
index 000000000..97ff8b765
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/spec_extend.rs
@@ -0,0 +1,132 @@
+use crate::alloc::Allocator;
+use crate::vec;
+use core::iter::{ByRefSized, TrustedLen};
+use core::slice;
+
+use super::VecDeque;
+
+// Specialization trait used for VecDeque::extend
+pub(super) trait SpecExtend<T, I> {
+ fn spec_extend(&mut self, iter: I);
+}
+
+impl<T, I, A: Allocator> SpecExtend<T, I> for VecDeque<T, A>
+where
+ I: Iterator<Item = T>,
+{
+ default fn spec_extend(&mut self, mut iter: I) {
+ // This function should be the moral equivalent of:
+ //
+ // for item in iter {
+ // self.push_back(item);
+ // }
+ while let Some(element) = iter.next() {
+ if self.len() == self.capacity() {
+ let (lower, _) = iter.size_hint();
+ self.reserve(lower.saturating_add(1));
+ }
+
+ let head = self.head;
+ self.head = self.wrap_add(self.head, 1);
+ unsafe {
+ self.buffer_write(head, element);
+ }
+ }
+ }
+}
+
+impl<T, I, A: Allocator> SpecExtend<T, I> for VecDeque<T, A>
+where
+ I: TrustedLen<Item = T>,
+{
+ default fn spec_extend(&mut self, mut iter: I) {
+ // This is the case for a TrustedLen iterator.
+ let (low, high) = iter.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.reserve(additional);
+
+ struct WrapAddOnDrop<'a, T, A: Allocator> {
+ vec_deque: &'a mut VecDeque<T, A>,
+ written: usize,
+ }
+
+ impl<'a, T, A: Allocator> Drop for WrapAddOnDrop<'a, T, A> {
+ fn drop(&mut self) {
+ self.vec_deque.head =
+ self.vec_deque.wrap_add(self.vec_deque.head, self.written);
+ }
+ }
+
+ let mut wrapper = WrapAddOnDrop { vec_deque: self, written: 0 };
+
+ let head_room = wrapper.vec_deque.cap() - wrapper.vec_deque.head;
+ unsafe {
+ wrapper.vec_deque.write_iter(
+ wrapper.vec_deque.head,
+ ByRefSized(&mut iter).take(head_room),
+ &mut wrapper.written,
+ );
+
+ if additional > head_room {
+ wrapper.vec_deque.write_iter(0, iter, &mut wrapper.written);
+ }
+ }
+
+ debug_assert_eq!(
+ additional, wrapper.written,
+ "The number of items written to VecDeque doesn't match the TrustedLen size hint"
+ );
+ } else {
+ // Per TrustedLen contract a `None` upper bound means that the iterator length
+ // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
+ // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
+ // This avoids additional codegen for a fallback code path which would eventually
+ // panic anyway.
+ panic!("capacity overflow");
+ }
+ }
+}
+
+impl<T, A: Allocator> SpecExtend<T, vec::IntoIter<T>> for VecDeque<T, A> {
+ fn spec_extend(&mut self, mut iterator: vec::IntoIter<T>) {
+ let slice = iterator.as_slice();
+ self.reserve(slice.len());
+
+ unsafe {
+ self.copy_slice(self.head, slice);
+ self.head = self.wrap_add(self.head, slice.len());
+ }
+ iterator.forget_remaining_elements();
+ }
+}
+
+impl<'a, T: 'a, I, A: Allocator> SpecExtend<&'a T, I> for VecDeque<T, A>
+where
+ I: Iterator<Item = &'a T>,
+ T: Copy,
+{
+ default fn spec_extend(&mut self, iterator: I) {
+ self.spec_extend(iterator.copied())
+ }
+}
+
+impl<'a, T: 'a, A: Allocator> SpecExtend<&'a T, slice::Iter<'a, T>> for VecDeque<T, A>
+where
+ T: Copy,
+{
+ fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
+ let slice = iterator.as_slice();
+ self.reserve(slice.len());
+
+ unsafe {
+ self.copy_slice(self.head, slice);
+ self.head = self.wrap_add(self.head, slice.len());
+ }
+ }
+}
diff --git a/library/alloc/src/collections/vec_deque/tests.rs b/library/alloc/src/collections/vec_deque/tests.rs
new file mode 100644
index 000000000..1f2daef21
--- /dev/null
+++ b/library/alloc/src/collections/vec_deque/tests.rs
@@ -0,0 +1,1110 @@
+use core::iter::TrustedLen;
+
+use super::*;
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_push_back_100(b: &mut test::Bencher) {
+ let mut deq = VecDeque::with_capacity(101);
+ b.iter(|| {
+ for i in 0..100 {
+ deq.push_back(i);
+ }
+ deq.head = 0;
+ deq.tail = 0;
+ })
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_push_front_100(b: &mut test::Bencher) {
+ let mut deq = VecDeque::with_capacity(101);
+ b.iter(|| {
+ for i in 0..100 {
+ deq.push_front(i);
+ }
+ deq.head = 0;
+ deq.tail = 0;
+ })
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_pop_back_100(b: &mut test::Bencher) {
+ let mut deq = VecDeque::<i32>::with_capacity(101);
+
+ b.iter(|| {
+ deq.head = 100;
+ deq.tail = 0;
+ while !deq.is_empty() {
+ test::black_box(deq.pop_back());
+ }
+ })
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_retain_whole_10000(b: &mut test::Bencher) {
+ let v = (1..100000).collect::<VecDeque<u32>>();
+
+ b.iter(|| {
+ let mut v = v.clone();
+ v.retain(|x| *x > 0)
+ })
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_retain_odd_10000(b: &mut test::Bencher) {
+ let v = (1..100000).collect::<VecDeque<u32>>();
+
+ b.iter(|| {
+ let mut v = v.clone();
+ v.retain(|x| x & 1 == 0)
+ })
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_retain_half_10000(b: &mut test::Bencher) {
+ let v = (1..100000).collect::<VecDeque<u32>>();
+
+ b.iter(|| {
+ let mut v = v.clone();
+ v.retain(|x| *x > 50000)
+ })
+}
+
+#[bench]
+#[cfg_attr(miri, ignore)] // isolated Miri does not support benchmarks
+fn bench_pop_front_100(b: &mut test::Bencher) {
+ let mut deq = VecDeque::<i32>::with_capacity(101);
+
+ b.iter(|| {
+ deq.head = 100;
+ deq.tail = 0;
+ while !deq.is_empty() {
+ test::black_box(deq.pop_front());
+ }
+ })
+}
+
+#[test]
+fn test_swap_front_back_remove() {
+ fn test(back: bool) {
+ // This test checks that every single combination of tail position and length is tested.
+ // Capacity 15 should be large enough to cover every case.
+ let mut tester = VecDeque::with_capacity(15);
+ let usable_cap = tester.capacity();
+ let final_len = usable_cap / 2;
+
+ for len in 0..final_len {
+ let expected: VecDeque<_> =
+ if back { (0..len).collect() } else { (0..len).rev().collect() };
+ for tail_pos in 0..usable_cap {
+ tester.tail = tail_pos;
+ tester.head = tail_pos;
+ if back {
+ for i in 0..len * 2 {
+ tester.push_front(i);
+ }
+ for i in 0..len {
+ assert_eq!(tester.swap_remove_back(i), Some(len * 2 - 1 - i));
+ }
+ } else {
+ for i in 0..len * 2 {
+ tester.push_back(i);
+ }
+ for i in 0..len {
+ let idx = tester.len() - 1 - i;
+ assert_eq!(tester.swap_remove_front(idx), Some(len * 2 - 1 - i));
+ }
+ }
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+ assert_eq!(tester, expected);
+ }
+ }
+ }
+ test(true);
+ test(false);
+}
+
+#[test]
+fn test_insert() {
+ // This test checks that every single combination of tail position, length, and
+ // insertion position is tested. Capacity 15 should be large enough to cover every case.
+
+ let mut tester = VecDeque::with_capacity(15);
+ // can't guarantee we got 15, so have to get what we got.
+ // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
+ // this test isn't covering what it wants to
+ let cap = tester.capacity();
+
+ // len is the length *after* insertion
+ let minlen = if cfg!(miri) { cap - 1 } else { 1 }; // Miri is too slow
+ for len in minlen..cap {
+ // 0, 1, 2, .., len - 1
+ let expected = (0..).take(len).collect::<VecDeque<_>>();
+ for tail_pos in 0..cap {
+ for to_insert in 0..len {
+ tester.tail = tail_pos;
+ tester.head = tail_pos;
+ for i in 0..len {
+ if i != to_insert {
+ tester.push_back(i);
+ }
+ }
+ tester.insert(to_insert, to_insert);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+ assert_eq!(tester, expected);
+ }
+ }
+ }
+}
+
+#[test]
+fn test_get() {
+ let mut tester = VecDeque::new();
+ tester.push_back(1);
+ tester.push_back(2);
+ tester.push_back(3);
+
+ assert_eq!(tester.len(), 3);
+
+ assert_eq!(tester.get(1), Some(&2));
+ assert_eq!(tester.get(2), Some(&3));
+ assert_eq!(tester.get(0), Some(&1));
+ assert_eq!(tester.get(3), None);
+
+ tester.remove(0);
+
+ assert_eq!(tester.len(), 2);
+ assert_eq!(tester.get(0), Some(&2));
+ assert_eq!(tester.get(1), Some(&3));
+ assert_eq!(tester.get(2), None);
+}
+
+#[test]
+fn test_get_mut() {
+ let mut tester = VecDeque::new();
+ tester.push_back(1);
+ tester.push_back(2);
+ tester.push_back(3);
+
+ assert_eq!(tester.len(), 3);
+
+ if let Some(elem) = tester.get_mut(0) {
+ assert_eq!(*elem, 1);
+ *elem = 10;
+ }
+
+ if let Some(elem) = tester.get_mut(2) {
+ assert_eq!(*elem, 3);
+ *elem = 30;
+ }
+
+ assert_eq!(tester.get(0), Some(&10));
+ assert_eq!(tester.get(2), Some(&30));
+ assert_eq!(tester.get_mut(3), None);
+
+ tester.remove(2);
+
+ assert_eq!(tester.len(), 2);
+ assert_eq!(tester.get(0), Some(&10));
+ assert_eq!(tester.get(1), Some(&2));
+ assert_eq!(tester.get(2), None);
+}
+
+#[test]
+fn test_swap() {
+ let mut tester = VecDeque::new();
+ tester.push_back(1);
+ tester.push_back(2);
+ tester.push_back(3);
+
+ assert_eq!(tester, [1, 2, 3]);
+
+ tester.swap(0, 0);
+ assert_eq!(tester, [1, 2, 3]);
+ tester.swap(0, 1);
+ assert_eq!(tester, [2, 1, 3]);
+ tester.swap(2, 1);
+ assert_eq!(tester, [2, 3, 1]);
+ tester.swap(1, 2);
+ assert_eq!(tester, [2, 1, 3]);
+ tester.swap(0, 2);
+ assert_eq!(tester, [3, 1, 2]);
+ tester.swap(2, 2);
+ assert_eq!(tester, [3, 1, 2]);
+}
+
+#[test]
+#[should_panic = "assertion failed: j < self.len()"]
+fn test_swap_panic() {
+ let mut tester = VecDeque::new();
+ tester.push_back(1);
+ tester.push_back(2);
+ tester.push_back(3);
+ tester.swap(2, 3);
+}
+
+#[test]
+fn test_reserve_exact() {
+ let mut tester: VecDeque<i32> = VecDeque::with_capacity(1);
+ assert!(tester.capacity() == 1);
+ tester.reserve_exact(50);
+ assert!(tester.capacity() >= 51);
+ tester.reserve_exact(40);
+ assert!(tester.capacity() >= 51);
+ tester.reserve_exact(200);
+ assert!(tester.capacity() >= 200);
+}
+
+#[test]
+#[should_panic = "capacity overflow"]
+fn test_reserve_exact_panic() {
+ let mut tester: VecDeque<i32> = VecDeque::new();
+ tester.reserve_exact(usize::MAX);
+}
+
+#[test]
+fn test_try_reserve_exact() {
+ let mut tester: VecDeque<i32> = VecDeque::with_capacity(1);
+ assert!(tester.capacity() == 1);
+ assert_eq!(tester.try_reserve_exact(100), Ok(()));
+ assert!(tester.capacity() >= 100);
+ assert_eq!(tester.try_reserve_exact(50), Ok(()));
+ assert!(tester.capacity() >= 100);
+ assert_eq!(tester.try_reserve_exact(200), Ok(()));
+ assert!(tester.capacity() >= 200);
+ assert_eq!(tester.try_reserve_exact(0), Ok(()));
+ assert!(tester.capacity() >= 200);
+ assert!(tester.try_reserve_exact(usize::MAX).is_err());
+}
+
+#[test]
+fn test_try_reserve() {
+ let mut tester: VecDeque<i32> = VecDeque::with_capacity(1);
+ assert!(tester.capacity() == 1);
+ assert_eq!(tester.try_reserve(100), Ok(()));
+ assert!(tester.capacity() >= 100);
+ assert_eq!(tester.try_reserve(50), Ok(()));
+ assert!(tester.capacity() >= 100);
+ assert_eq!(tester.try_reserve(200), Ok(()));
+ assert!(tester.capacity() >= 200);
+ assert_eq!(tester.try_reserve(0), Ok(()));
+ assert!(tester.capacity() >= 200);
+ assert!(tester.try_reserve(usize::MAX).is_err());
+}
+
+#[test]
+fn test_contains() {
+ let mut tester = VecDeque::new();
+ tester.push_back(1);
+ tester.push_back(2);
+ tester.push_back(3);
+
+ assert!(tester.contains(&1));
+ assert!(tester.contains(&3));
+ assert!(!tester.contains(&0));
+ assert!(!tester.contains(&4));
+ tester.remove(0);
+ assert!(!tester.contains(&1));
+ assert!(tester.contains(&2));
+ assert!(tester.contains(&3));
+}
+
+#[test]
+fn test_rotate_left_right() {
+ let mut tester: VecDeque<_> = (1..=10).collect();
+
+ assert_eq!(tester.len(), 10);
+
+ tester.rotate_left(0);
+ assert_eq!(tester, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+
+ tester.rotate_right(0);
+ assert_eq!(tester, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+
+ tester.rotate_left(3);
+ assert_eq!(tester, [4, 5, 6, 7, 8, 9, 10, 1, 2, 3]);
+
+ tester.rotate_right(5);
+ assert_eq!(tester, [9, 10, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+ tester.rotate_left(tester.len());
+ assert_eq!(tester, [9, 10, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+ tester.rotate_right(tester.len());
+ assert_eq!(tester, [9, 10, 1, 2, 3, 4, 5, 6, 7, 8]);
+
+ tester.rotate_left(1);
+ assert_eq!(tester, [10, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+}
+
+#[test]
+#[should_panic = "assertion failed: mid <= self.len()"]
+fn test_rotate_left_panic() {
+ let mut tester: VecDeque<_> = (1..=10).collect();
+ tester.rotate_left(tester.len() + 1);
+}
+
+#[test]
+#[should_panic = "assertion failed: k <= self.len()"]
+fn test_rotate_right_panic() {
+ let mut tester: VecDeque<_> = (1..=10).collect();
+ tester.rotate_right(tester.len() + 1);
+}
+
+#[test]
+fn test_binary_search() {
+ // If the givin VecDeque is not sorted, the returned result is unspecified and meaningless,
+ // as this method performs a binary search.
+
+ let tester: VecDeque<_> = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55].into();
+
+ assert_eq!(tester.binary_search(&0), Ok(0));
+ assert_eq!(tester.binary_search(&5), Ok(5));
+ assert_eq!(tester.binary_search(&55), Ok(10));
+ assert_eq!(tester.binary_search(&4), Err(5));
+ assert_eq!(tester.binary_search(&-1), Err(0));
+ assert!(matches!(tester.binary_search(&1), Ok(1..=2)));
+
+ let tester: VecDeque<_> = [1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 3].into();
+ assert_eq!(tester.binary_search(&1), Ok(0));
+ assert!(matches!(tester.binary_search(&2), Ok(1..=4)));
+ assert!(matches!(tester.binary_search(&3), Ok(5..=13)));
+ assert_eq!(tester.binary_search(&-2), Err(0));
+ assert_eq!(tester.binary_search(&0), Err(0));
+ assert_eq!(tester.binary_search(&4), Err(14));
+ assert_eq!(tester.binary_search(&5), Err(14));
+}
+
+#[test]
+fn test_binary_search_by() {
+ // If the givin VecDeque is not sorted, the returned result is unspecified and meaningless,
+ // as this method performs a binary search.
+
+ let tester: VecDeque<_> = [0, 1, 1, 2, 3, 5, 8, 13, 21, 34, 55].into();
+
+ assert_eq!(tester.binary_search_by(|x| x.cmp(&0)), Ok(0));
+ assert_eq!(tester.binary_search_by(|x| x.cmp(&5)), Ok(5));
+ assert_eq!(tester.binary_search_by(|x| x.cmp(&55)), Ok(10));
+ assert_eq!(tester.binary_search_by(|x| x.cmp(&4)), Err(5));
+ assert_eq!(tester.binary_search_by(|x| x.cmp(&-1)), Err(0));
+ assert!(matches!(tester.binary_search_by(|x| x.cmp(&1)), Ok(1..=2)));
+}
+
+#[test]
+fn test_binary_search_key() {
+ // If the givin VecDeque is not sorted, the returned result is unspecified and meaningless,
+ // as this method performs a binary search.
+
+ let tester: VecDeque<_> = [
+ (-1, 0),
+ (2, 10),
+ (6, 5),
+ (7, 1),
+ (8, 10),
+ (10, 2),
+ (20, 3),
+ (24, 5),
+ (25, 18),
+ (28, 13),
+ (31, 21),
+ (32, 4),
+ (54, 25),
+ ]
+ .into();
+
+ assert_eq!(tester.binary_search_by_key(&-1, |&(a, _b)| a), Ok(0));
+ assert_eq!(tester.binary_search_by_key(&8, |&(a, _b)| a), Ok(4));
+ assert_eq!(tester.binary_search_by_key(&25, |&(a, _b)| a), Ok(8));
+ assert_eq!(tester.binary_search_by_key(&54, |&(a, _b)| a), Ok(12));
+ assert_eq!(tester.binary_search_by_key(&-2, |&(a, _b)| a), Err(0));
+ assert_eq!(tester.binary_search_by_key(&1, |&(a, _b)| a), Err(1));
+ assert_eq!(tester.binary_search_by_key(&4, |&(a, _b)| a), Err(2));
+ assert_eq!(tester.binary_search_by_key(&13, |&(a, _b)| a), Err(6));
+ assert_eq!(tester.binary_search_by_key(&55, |&(a, _b)| a), Err(13));
+ assert_eq!(tester.binary_search_by_key(&100, |&(a, _b)| a), Err(13));
+
+ let tester: VecDeque<_> = [
+ (0, 0),
+ (2, 1),
+ (6, 1),
+ (5, 1),
+ (3, 1),
+ (1, 2),
+ (2, 3),
+ (4, 5),
+ (5, 8),
+ (8, 13),
+ (1, 21),
+ (2, 34),
+ (4, 55),
+ ]
+ .into();
+
+ assert_eq!(tester.binary_search_by_key(&0, |&(_a, b)| b), Ok(0));
+ assert!(matches!(tester.binary_search_by_key(&1, |&(_a, b)| b), Ok(1..=4)));
+ assert_eq!(tester.binary_search_by_key(&8, |&(_a, b)| b), Ok(8));
+ assert_eq!(tester.binary_search_by_key(&13, |&(_a, b)| b), Ok(9));
+ assert_eq!(tester.binary_search_by_key(&55, |&(_a, b)| b), Ok(12));
+ assert_eq!(tester.binary_search_by_key(&-1, |&(_a, b)| b), Err(0));
+ assert_eq!(tester.binary_search_by_key(&4, |&(_a, b)| b), Err(7));
+ assert_eq!(tester.binary_search_by_key(&56, |&(_a, b)| b), Err(13));
+ assert_eq!(tester.binary_search_by_key(&100, |&(_a, b)| b), Err(13));
+}
+
+#[test]
+fn make_contiguous_big_tail() {
+ let mut tester = VecDeque::with_capacity(15);
+
+ for i in 0..3 {
+ tester.push_back(i);
+ }
+
+ for i in 3..10 {
+ tester.push_front(i);
+ }
+
+ // 012......9876543
+ assert_eq!(tester.capacity(), 15);
+ assert_eq!((&[9, 8, 7, 6, 5, 4, 3] as &[_], &[0, 1, 2] as &[_]), tester.as_slices());
+
+ let expected_start = tester.head;
+ tester.make_contiguous();
+ assert_eq!(tester.tail, expected_start);
+ assert_eq!((&[9, 8, 7, 6, 5, 4, 3, 0, 1, 2] as &[_], &[] as &[_]), tester.as_slices());
+}
+
+#[test]
+fn make_contiguous_big_head() {
+ let mut tester = VecDeque::with_capacity(15);
+
+ for i in 0..8 {
+ tester.push_back(i);
+ }
+
+ for i in 8..10 {
+ tester.push_front(i);
+ }
+
+ // 01234567......98
+ let expected_start = 0;
+ tester.make_contiguous();
+ assert_eq!(tester.tail, expected_start);
+ assert_eq!((&[9, 8, 0, 1, 2, 3, 4, 5, 6, 7] as &[_], &[] as &[_]), tester.as_slices());
+}
+
+#[test]
+fn make_contiguous_small_free() {
+ let mut tester = VecDeque::with_capacity(15);
+
+ for i in 'A' as u8..'I' as u8 {
+ tester.push_back(i as char);
+ }
+
+ for i in 'I' as u8..'N' as u8 {
+ tester.push_front(i as char);
+ }
+
+ // ABCDEFGH...MLKJI
+ let expected_start = 0;
+ tester.make_contiguous();
+ assert_eq!(tester.tail, expected_start);
+ assert_eq!(
+ (&['M', 'L', 'K', 'J', 'I', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] as &[_], &[] as &[_]),
+ tester.as_slices()
+ );
+
+ tester.clear();
+ for i in 'I' as u8..'N' as u8 {
+ tester.push_back(i as char);
+ }
+
+ for i in 'A' as u8..'I' as u8 {
+ tester.push_front(i as char);
+ }
+
+ // IJKLM...HGFEDCBA
+ let expected_start = 0;
+ tester.make_contiguous();
+ assert_eq!(tester.tail, expected_start);
+ assert_eq!(
+ (&['H', 'G', 'F', 'E', 'D', 'C', 'B', 'A', 'I', 'J', 'K', 'L', 'M'] as &[_], &[] as &[_]),
+ tester.as_slices()
+ );
+}
+
+#[test]
+fn make_contiguous_head_to_end() {
+ let mut dq = VecDeque::with_capacity(3);
+ dq.push_front('B');
+ dq.push_front('A');
+ dq.push_back('C');
+ dq.make_contiguous();
+ let expected_tail = 0;
+ let expected_head = 3;
+ assert_eq!(expected_tail, dq.tail);
+ assert_eq!(expected_head, dq.head);
+ assert_eq!((&['A', 'B', 'C'] as &[_], &[] as &[_]), dq.as_slices());
+}
+
+#[test]
+fn make_contiguous_head_to_end_2() {
+ // Another test case for #79808, taken from #80293.
+
+ let mut dq = VecDeque::from_iter(0..6);
+ dq.pop_front();
+ dq.pop_front();
+ dq.push_back(6);
+ dq.push_back(7);
+ dq.push_back(8);
+ dq.make_contiguous();
+ let collected: Vec<_> = dq.iter().copied().collect();
+ assert_eq!(dq.as_slices(), (&collected[..], &[] as &[_]));
+}
+
+#[test]
+fn test_remove() {
+ // This test checks that every single combination of tail position, length, and
+ // removal position is tested. Capacity 15 should be large enough to cover every case.
+
+ let mut tester = VecDeque::with_capacity(15);
+ // can't guarantee we got 15, so have to get what we got.
+ // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
+ // this test isn't covering what it wants to
+ let cap = tester.capacity();
+
+ // len is the length *after* removal
+ let minlen = if cfg!(miri) { cap - 2 } else { 0 }; // Miri is too slow
+ for len in minlen..cap - 1 {
+ // 0, 1, 2, .., len - 1
+ let expected = (0..).take(len).collect::<VecDeque<_>>();
+ for tail_pos in 0..cap {
+ for to_remove in 0..=len {
+ tester.tail = tail_pos;
+ tester.head = tail_pos;
+ for i in 0..len {
+ if i == to_remove {
+ tester.push_back(1234);
+ }
+ tester.push_back(i);
+ }
+ if to_remove == len {
+ tester.push_back(1234);
+ }
+ tester.remove(to_remove);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+ assert_eq!(tester, expected);
+ }
+ }
+ }
+}
+
+#[test]
+fn test_range() {
+ let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
+
+ let cap = tester.capacity();
+ let minlen = if cfg!(miri) { cap - 1 } else { 0 }; // Miri is too slow
+ for len in minlen..=cap {
+ for tail in 0..=cap {
+ for start in 0..=len {
+ for end in start..=len {
+ tester.tail = tail;
+ tester.head = tail;
+ for i in 0..len {
+ tester.push_back(i);
+ }
+
+ // Check that we iterate over the correct values
+ let range: VecDeque<_> = tester.range(start..end).copied().collect();
+ let expected: VecDeque<_> = (start..end).collect();
+ assert_eq!(range, expected);
+ }
+ }
+ }
+ }
+}
+
+#[test]
+fn test_range_mut() {
+ let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
+
+ let cap = tester.capacity();
+ for len in 0..=cap {
+ for tail in 0..=cap {
+ for start in 0..=len {
+ for end in start..=len {
+ tester.tail = tail;
+ tester.head = tail;
+ for i in 0..len {
+ tester.push_back(i);
+ }
+
+ let head_was = tester.head;
+ let tail_was = tester.tail;
+
+ // Check that we iterate over the correct values
+ let range: VecDeque<_> = tester.range_mut(start..end).map(|v| *v).collect();
+ let expected: VecDeque<_> = (start..end).collect();
+ assert_eq!(range, expected);
+
+ // We shouldn't have changed the capacity or made the
+ // head or tail out of bounds
+ assert_eq!(tester.capacity(), cap);
+ assert_eq!(tester.tail, tail_was);
+ assert_eq!(tester.head, head_was);
+ }
+ }
+ }
+ }
+}
+
+#[test]
+fn test_drain() {
+ let mut tester: VecDeque<usize> = VecDeque::with_capacity(7);
+
+ let cap = tester.capacity();
+ for len in 0..=cap {
+ for tail in 0..=cap {
+ for drain_start in 0..=len {
+ for drain_end in drain_start..=len {
+ tester.tail = tail;
+ tester.head = tail;
+ for i in 0..len {
+ tester.push_back(i);
+ }
+
+ // Check that we drain the correct values
+ let drained: VecDeque<_> = tester.drain(drain_start..drain_end).collect();
+ let drained_expected: VecDeque<_> = (drain_start..drain_end).collect();
+ assert_eq!(drained, drained_expected);
+
+ // We shouldn't have changed the capacity or made the
+ // head or tail out of bounds
+ assert_eq!(tester.capacity(), cap);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+
+ // We should see the correct values in the VecDeque
+ let expected: VecDeque<_> = (0..drain_start).chain(drain_end..len).collect();
+ assert_eq!(expected, tester);
+ }
+ }
+ }
+ }
+}
+
+#[test]
+fn test_shrink_to_fit() {
+ // This test checks that every single combination of head and tail position,
+ // is tested. Capacity 15 should be large enough to cover every case.
+
+ let mut tester = VecDeque::with_capacity(15);
+ // can't guarantee we got 15, so have to get what we got.
+ // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
+ // this test isn't covering what it wants to
+ let cap = tester.capacity();
+ tester.reserve(63);
+ let max_cap = tester.capacity();
+
+ for len in 0..=cap {
+ // 0, 1, 2, .., len - 1
+ let expected = (0..).take(len).collect::<VecDeque<_>>();
+ for tail_pos in 0..=max_cap {
+ tester.tail = tail_pos;
+ tester.head = tail_pos;
+ tester.reserve(63);
+ for i in 0..len {
+ tester.push_back(i);
+ }
+ tester.shrink_to_fit();
+ assert!(tester.capacity() <= cap);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+ assert_eq!(tester, expected);
+ }
+ }
+}
+
+#[test]
+fn test_split_off() {
+ // This test checks that every single combination of tail position, length, and
+ // split position is tested. Capacity 15 should be large enough to cover every case.
+
+ let mut tester = VecDeque::with_capacity(15);
+ // can't guarantee we got 15, so have to get what we got.
+ // 15 would be great, but we will definitely get 2^k - 1, for k >= 4, or else
+ // this test isn't covering what it wants to
+ let cap = tester.capacity();
+
+ // len is the length *before* splitting
+ let minlen = if cfg!(miri) { cap - 1 } else { 0 }; // Miri is too slow
+ for len in minlen..cap {
+ // index to split at
+ for at in 0..=len {
+ // 0, 1, 2, .., at - 1 (may be empty)
+ let expected_self = (0..).take(at).collect::<VecDeque<_>>();
+ // at, at + 1, .., len - 1 (may be empty)
+ let expected_other = (at..).take(len - at).collect::<VecDeque<_>>();
+
+ for tail_pos in 0..cap {
+ tester.tail = tail_pos;
+ tester.head = tail_pos;
+ for i in 0..len {
+ tester.push_back(i);
+ }
+ let result = tester.split_off(at);
+ assert!(tester.tail < tester.cap());
+ assert!(tester.head < tester.cap());
+ assert!(result.tail < result.cap());
+ assert!(result.head < result.cap());
+ assert_eq!(tester, expected_self);
+ assert_eq!(result, expected_other);
+ }
+ }
+ }
+}
+
+#[test]
+fn test_from_vec() {
+ use crate::vec::Vec;
+ for cap in 0..35 {
+ for len in 0..=cap {
+ let mut vec = Vec::with_capacity(cap);
+ vec.extend(0..len);
+
+ let vd = VecDeque::from(vec.clone());
+ assert!(vd.cap().is_power_of_two());
+ assert_eq!(vd.len(), vec.len());
+ assert!(vd.into_iter().eq(vec));
+ }
+ }
+
+ let vec = Vec::from([(); MAXIMUM_ZST_CAPACITY - 1]);
+ let vd = VecDeque::from(vec.clone());
+ assert!(vd.cap().is_power_of_two());
+ assert_eq!(vd.len(), vec.len());
+}
+
+#[test]
+fn test_extend_basic() {
+ test_extend_impl(false);
+}
+
+#[test]
+fn test_extend_trusted_len() {
+ test_extend_impl(true);
+}
+
+fn test_extend_impl(trusted_len: bool) {
+ struct VecDequeTester {
+ test: VecDeque<usize>,
+ expected: VecDeque<usize>,
+ trusted_len: bool,
+ }
+
+ impl VecDequeTester {
+ fn new(trusted_len: bool) -> Self {
+ Self { test: VecDeque::new(), expected: VecDeque::new(), trusted_len }
+ }
+
+ fn test_extend<I>(&mut self, iter: I)
+ where
+ I: Iterator<Item = usize> + TrustedLen + Clone,
+ {
+ struct BasicIterator<I>(I);
+ impl<I> Iterator for BasicIterator<I>
+ where
+ I: Iterator<Item = usize>,
+ {
+ type Item = usize;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.0.next()
+ }
+ }
+
+ if self.trusted_len {
+ self.test.extend(iter.clone());
+ } else {
+ self.test.extend(BasicIterator(iter.clone()));
+ }
+
+ for item in iter {
+ self.expected.push_back(item)
+ }
+
+ assert_eq!(self.test, self.expected);
+ let (a1, b1) = self.test.as_slices();
+ let (a2, b2) = self.expected.as_slices();
+ assert_eq!(a1, a2);
+ assert_eq!(b1, b2);
+ }
+
+ fn drain<R: RangeBounds<usize> + Clone>(&mut self, range: R) {
+ self.test.drain(range.clone());
+ self.expected.drain(range);
+
+ assert_eq!(self.test, self.expected);
+ }
+
+ fn clear(&mut self) {
+ self.test.clear();
+ self.expected.clear();
+ }
+
+ fn remaining_capacity(&self) -> usize {
+ self.test.capacity() - self.test.len()
+ }
+ }
+
+ let mut tester = VecDequeTester::new(trusted_len);
+
+ // Initial capacity
+ tester.test_extend(0..tester.remaining_capacity() - 1);
+
+ // Grow
+ tester.test_extend(1024..2048);
+
+ // Wrap around
+ tester.drain(..128);
+
+ tester.test_extend(0..tester.remaining_capacity() - 1);
+
+ // Continue
+ tester.drain(256..);
+ tester.test_extend(4096..8196);
+
+ tester.clear();
+
+ // Start again
+ tester.test_extend(0..32);
+}
+
+#[test]
+#[should_panic = "capacity overflow"]
+fn test_from_vec_zst_overflow() {
+ use crate::vec::Vec;
+ let vec = Vec::from([(); MAXIMUM_ZST_CAPACITY]);
+ let vd = VecDeque::from(vec.clone()); // no room for +1
+ assert!(vd.cap().is_power_of_two());
+ assert_eq!(vd.len(), vec.len());
+}
+
+#[test]
+fn test_from_array() {
+ fn test<const N: usize>() {
+ let mut array: [usize; N] = [0; N];
+
+ for i in 0..N {
+ array[i] = i;
+ }
+
+ let deq: VecDeque<_> = array.into();
+
+ for i in 0..N {
+ assert_eq!(deq[i], i);
+ }
+
+ assert!(deq.cap().is_power_of_two());
+ assert_eq!(deq.len(), N);
+ }
+ test::<0>();
+ test::<1>();
+ test::<2>();
+ test::<32>();
+ test::<35>();
+
+ let array = [(); MAXIMUM_ZST_CAPACITY - 1];
+ let deq = VecDeque::from(array);
+ assert!(deq.cap().is_power_of_two());
+ assert_eq!(deq.len(), MAXIMUM_ZST_CAPACITY - 1);
+}
+
+#[test]
+fn test_vec_from_vecdeque() {
+ use crate::vec::Vec;
+
+ fn create_vec_and_test_convert(capacity: usize, offset: usize, len: usize) {
+ let mut vd = VecDeque::with_capacity(capacity);
+ for _ in 0..offset {
+ vd.push_back(0);
+ vd.pop_front();
+ }
+ vd.extend(0..len);
+
+ let vec: Vec<_> = Vec::from(vd.clone());
+ assert_eq!(vec.len(), vd.len());
+ assert!(vec.into_iter().eq(vd));
+ }
+
+ // Miri is too slow
+ let max_pwr = if cfg!(miri) { 5 } else { 7 };
+
+ for cap_pwr in 0..max_pwr {
+ // Make capacity as a (2^x)-1, so that the ring size is 2^x
+ let cap = (2i32.pow(cap_pwr) - 1) as usize;
+
+ // In these cases there is enough free space to solve it with copies
+ for len in 0..((cap + 1) / 2) {
+ // Test contiguous cases
+ for offset in 0..(cap - len) {
+ create_vec_and_test_convert(cap, offset, len)
+ }
+
+ // Test cases where block at end of buffer is bigger than block at start
+ for offset in (cap - len)..(cap - (len / 2)) {
+ create_vec_and_test_convert(cap, offset, len)
+ }
+
+ // Test cases where block at start of buffer is bigger than block at end
+ for offset in (cap - (len / 2))..cap {
+ create_vec_and_test_convert(cap, offset, len)
+ }
+ }
+
+ // Now there's not (necessarily) space to straighten the ring with simple copies,
+ // the ring will use swapping when:
+ // (cap + 1 - offset) > (cap + 1 - len) && (len - (cap + 1 - offset)) > (cap + 1 - len))
+ // right block size > free space && left block size > free space
+ for len in ((cap + 1) / 2)..cap {
+ // Test contiguous cases
+ for offset in 0..(cap - len) {
+ create_vec_and_test_convert(cap, offset, len)
+ }
+
+ // Test cases where block at end of buffer is bigger than block at start
+ for offset in (cap - len)..(cap - (len / 2)) {
+ create_vec_and_test_convert(cap, offset, len)
+ }
+
+ // Test cases where block at start of buffer is bigger than block at end
+ for offset in (cap - (len / 2))..cap {
+ create_vec_and_test_convert(cap, offset, len)
+ }
+ }
+ }
+}
+
+#[test]
+fn test_clone_from() {
+ let m = vec![1; 8];
+ let n = vec![2; 12];
+ let limit = if cfg!(miri) { 4 } else { 8 }; // Miri is too slow
+ for pfv in 0..limit {
+ for pfu in 0..limit {
+ for longer in 0..2 {
+ let (vr, ur) = if longer == 0 { (&m, &n) } else { (&n, &m) };
+ let mut v = VecDeque::from(vr.clone());
+ for _ in 0..pfv {
+ v.push_front(1);
+ }
+ let mut u = VecDeque::from(ur.clone());
+ for _ in 0..pfu {
+ u.push_front(2);
+ }
+ v.clone_from(&u);
+ assert_eq!(&v, &u);
+ }
+ }
+ }
+}
+
+#[test]
+fn test_vec_deque_truncate_drop() {
+ static mut DROPS: u32 = 0;
+ #[derive(Clone)]
+ struct Elem(i32);
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let v = vec![Elem(1), Elem(2), Elem(3), Elem(4), Elem(5)];
+ for push_front in 0..=v.len() {
+ let v = v.clone();
+ let mut tester = VecDeque::with_capacity(5);
+ for (index, elem) in v.into_iter().enumerate() {
+ if index < push_front {
+ tester.push_front(elem);
+ } else {
+ tester.push_back(elem);
+ }
+ }
+ assert_eq!(unsafe { DROPS }, 0);
+ tester.truncate(3);
+ assert_eq!(unsafe { DROPS }, 2);
+ tester.truncate(0);
+ assert_eq!(unsafe { DROPS }, 5);
+ unsafe {
+ DROPS = 0;
+ }
+ }
+}
+
+#[test]
+fn issue_53529() {
+ use crate::boxed::Box;
+
+ let mut dst = VecDeque::new();
+ dst.push_front(Box::new(1));
+ dst.push_front(Box::new(2));
+ assert_eq!(*dst.pop_back().unwrap(), 1);
+
+ let mut src = VecDeque::new();
+ src.push_front(Box::new(2));
+ dst.append(&mut src);
+ for a in dst {
+ assert_eq!(*a, 2);
+ }
+}
+
+#[test]
+fn issue_80303() {
+ use core::iter;
+ use core::num::Wrapping;
+
+ // This is a valid, albeit rather bad hash function implementation.
+ struct SimpleHasher(Wrapping<u64>);
+
+ impl Hasher for SimpleHasher {
+ fn finish(&self) -> u64 {
+ self.0.0
+ }
+
+ fn write(&mut self, bytes: &[u8]) {
+ // This particular implementation hashes value 24 in addition to bytes.
+ // Such an implementation is valid as Hasher only guarantees equivalence
+ // for the exact same set of calls to its methods.
+ for &v in iter::once(&24).chain(bytes) {
+ self.0 = Wrapping(31) * self.0 + Wrapping(u64::from(v));
+ }
+ }
+ }
+
+ fn hash_code(value: impl Hash) -> u64 {
+ let mut hasher = SimpleHasher(Wrapping(1));
+ value.hash(&mut hasher);
+ hasher.finish()
+ }
+
+ // This creates two deques for which values returned by as_slices
+ // method differ.
+ let vda: VecDeque<u8> = (0..10).collect();
+ let mut vdb = VecDeque::with_capacity(10);
+ vdb.extend(5..10);
+ (0..5).rev().for_each(|elem| vdb.push_front(elem));
+ assert_ne!(vda.as_slices(), vdb.as_slices());
+ assert_eq!(vda, vdb);
+ assert_eq!(hash_code(vda), hash_code(vdb));
+}
diff --git a/library/alloc/src/ffi/c_str.rs b/library/alloc/src/ffi/c_str.rs
new file mode 100644
index 000000000..ae61b1f1e
--- /dev/null
+++ b/library/alloc/src/ffi/c_str.rs
@@ -0,0 +1,1123 @@
+#[cfg(test)]
+mod tests;
+
+use crate::borrow::{Cow, ToOwned};
+use crate::boxed::Box;
+use crate::rc::Rc;
+use crate::slice::hack::into_vec;
+use crate::string::String;
+use crate::vec::Vec;
+use core::borrow::Borrow;
+use core::ffi::{c_char, CStr};
+use core::fmt;
+use core::mem;
+use core::num::NonZeroU8;
+use core::ops;
+use core::ptr;
+use core::slice;
+use core::slice::memchr;
+use core::str::{self, Utf8Error};
+
+#[cfg(target_has_atomic = "ptr")]
+use crate::sync::Arc;
+
+/// A type representing an owned, C-compatible, nul-terminated string with no nul bytes in the
+/// middle.
+///
+/// This type serves the purpose of being able to safely generate a
+/// C-compatible string from a Rust byte slice or vector. An instance of this
+/// type is a static guarantee that the underlying bytes contain no interior 0
+/// bytes ("nul characters") and that the final byte is 0 ("nul terminator").
+///
+/// `CString` is to <code>&[CStr]</code> as [`String`] is to <code>&[str]</code>: the former
+/// in each pair are owned strings; the latter are borrowed
+/// references.
+///
+/// # Creating a `CString`
+///
+/// A `CString` is created from either a byte slice or a byte vector,
+/// or anything that implements <code>[Into]<[Vec]<[u8]>></code> (for
+/// example, you can build a `CString` straight out of a [`String`] or
+/// a <code>&[str]</code>, since both implement that trait).
+///
+/// The [`CString::new`] method will actually check that the provided <code>&[[u8]]</code>
+/// does not have 0 bytes in the middle, and return an error if it
+/// finds one.
+///
+/// # Extracting a raw pointer to the whole C string
+///
+/// `CString` implements an [`as_ptr`][`CStr::as_ptr`] method through the [`Deref`]
+/// trait. This method will give you a `*const c_char` which you can
+/// feed directly to extern functions that expect a nul-terminated
+/// string, like C's `strdup()`. Notice that [`as_ptr`][`CStr::as_ptr`] returns a
+/// read-only pointer; if the C code writes to it, that causes
+/// undefined behavior.
+///
+/// # Extracting a slice of the whole C string
+///
+/// Alternatively, you can obtain a <code>&[[u8]]</code> slice from a
+/// `CString` with the [`CString::as_bytes`] method. Slices produced in this
+/// way do *not* contain the trailing nul terminator. This is useful
+/// when you will be calling an extern function that takes a `*const
+/// u8` argument which is not necessarily nul-terminated, plus another
+/// argument with the length of the string — like C's `strndup()`.
+/// You can of course get the slice's length with its
+/// [`len`][slice::len] method.
+///
+/// If you need a <code>&[[u8]]</code> slice *with* the nul terminator, you
+/// can use [`CString::as_bytes_with_nul`] instead.
+///
+/// Once you have the kind of slice you need (with or without a nul
+/// terminator), you can call the slice's own
+/// [`as_ptr`][slice::as_ptr] method to get a read-only raw pointer to pass to
+/// extern functions. See the documentation for that function for a
+/// discussion on ensuring the lifetime of the raw pointer.
+///
+/// [str]: prim@str "str"
+/// [`Deref`]: ops::Deref
+///
+/// # Examples
+///
+/// ```ignore (extern-declaration)
+/// # fn main() {
+/// use std::ffi::CString;
+/// use std::os::raw::c_char;
+///
+/// extern "C" {
+/// fn my_printer(s: *const c_char);
+/// }
+///
+/// // We are certain that our string doesn't have 0 bytes in the middle,
+/// // so we can .expect()
+/// let c_to_print = CString::new("Hello, world!").expect("CString::new failed");
+/// unsafe {
+/// my_printer(c_to_print.as_ptr());
+/// }
+/// # }
+/// ```
+///
+/// # Safety
+///
+/// `CString` is intended for working with traditional C-style strings
+/// (a sequence of non-nul bytes terminated by a single nul byte); the
+/// primary use case for these kinds of strings is interoperating with C-like
+/// code. Often you will need to transfer ownership to/from that external
+/// code. It is strongly recommended that you thoroughly read through the
+/// documentation of `CString` before use, as improper ownership management
+/// of `CString` instances can lead to invalid memory accesses, memory leaks,
+/// and other memory errors.
+#[derive(PartialEq, PartialOrd, Eq, Ord, Hash, Clone)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "cstring_type")]
+#[stable(feature = "alloc_c_string", since = "1.64.0")]
+pub struct CString {
+ // Invariant 1: the slice ends with a zero byte and has a length of at least one.
+ // Invariant 2: the slice contains only one zero byte.
+ // Improper usage of unsafe function can break Invariant 2, but not Invariant 1.
+ inner: Box<[u8]>,
+}
+
+/// An error indicating that an interior nul byte was found.
+///
+/// While Rust strings may contain nul bytes in the middle, C strings
+/// can't, as that byte would effectively truncate the string.
+///
+/// This error is created by the [`new`][`CString::new`] method on
+/// [`CString`]. See its documentation for more.
+///
+/// # Examples
+///
+/// ```
+/// use std::ffi::{CString, NulError};
+///
+/// let _: NulError = CString::new(b"f\0oo".to_vec()).unwrap_err();
+/// ```
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[stable(feature = "alloc_c_string", since = "1.64.0")]
+pub struct NulError(usize, Vec<u8>);
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+enum FromBytesWithNulErrorKind {
+ InteriorNul(usize),
+ NotNulTerminated,
+}
+
+/// An error indicating that a nul byte was not in the expected position.
+///
+/// The vector used to create a [`CString`] must have one and only one nul byte,
+/// positioned at the end.
+///
+/// This error is created by the [`CString::from_vec_with_nul`] method.
+/// See its documentation for more.
+///
+/// # Examples
+///
+/// ```
+/// use std::ffi::{CString, FromVecWithNulError};
+///
+/// let _: FromVecWithNulError = CString::from_vec_with_nul(b"f\0oo".to_vec()).unwrap_err();
+/// ```
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[stable(feature = "alloc_c_string", since = "1.64.0")]
+pub struct FromVecWithNulError {
+ error_kind: FromBytesWithNulErrorKind,
+ bytes: Vec<u8>,
+}
+
+#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+impl FromVecWithNulError {
+ /// Returns a slice of [`u8`]s bytes that were attempted to convert to a [`CString`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// // Some invalid bytes in a vector
+ /// let bytes = b"f\0oo".to_vec();
+ ///
+ /// let value = CString::from_vec_with_nul(bytes.clone());
+ ///
+ /// assert_eq!(&bytes[..], value.unwrap_err().as_bytes());
+ /// ```
+ #[must_use]
+ #[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+ pub fn as_bytes(&self) -> &[u8] {
+ &self.bytes[..]
+ }
+
+ /// Returns the bytes that were attempted to convert to a [`CString`].
+ ///
+ /// This method is carefully constructed to avoid allocation. It will
+ /// consume the error, moving out the bytes, so that a copy of the bytes
+ /// does not need to be made.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// // Some invalid bytes in a vector
+ /// let bytes = b"f\0oo".to_vec();
+ ///
+ /// let value = CString::from_vec_with_nul(bytes.clone());
+ ///
+ /// assert_eq!(bytes, value.unwrap_err().into_bytes());
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+ pub fn into_bytes(self) -> Vec<u8> {
+ self.bytes
+ }
+}
+
+/// An error indicating invalid UTF-8 when converting a [`CString`] into a [`String`].
+///
+/// `CString` is just a wrapper over a buffer of bytes with a nul terminator;
+/// [`CString::into_string`] performs UTF-8 validation on those bytes and may
+/// return this error.
+///
+/// This `struct` is created by [`CString::into_string()`]. See
+/// its documentation for more.
+#[derive(Clone, PartialEq, Eq, Debug)]
+#[stable(feature = "alloc_c_string", since = "1.64.0")]
+pub struct IntoStringError {
+ inner: CString,
+ error: Utf8Error,
+}
+
+impl CString {
+ /// Creates a new C-compatible string from a container of bytes.
+ ///
+ /// This function will consume the provided data and use the
+ /// underlying bytes to construct a new string, ensuring that
+ /// there is a trailing 0 byte. This trailing 0 byte will be
+ /// appended by this function; the provided data should *not*
+ /// contain any 0 bytes in it.
+ ///
+ /// # Examples
+ ///
+ /// ```ignore (extern-declaration)
+ /// use std::ffi::CString;
+ /// use std::os::raw::c_char;
+ ///
+ /// extern "C" { fn puts(s: *const c_char); }
+ ///
+ /// let to_print = CString::new("Hello!").expect("CString::new failed");
+ /// unsafe {
+ /// puts(to_print.as_ptr());
+ /// }
+ /// ```
+ ///
+ /// # Errors
+ ///
+ /// This function will return an error if the supplied bytes contain an
+ /// internal 0 byte. The [`NulError`] returned will contain the bytes as well as
+ /// the position of the nul byte.
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new<T: Into<Vec<u8>>>(t: T) -> Result<CString, NulError> {
+ trait SpecNewImpl {
+ fn spec_new_impl(self) -> Result<CString, NulError>;
+ }
+
+ impl<T: Into<Vec<u8>>> SpecNewImpl for T {
+ default fn spec_new_impl(self) -> Result<CString, NulError> {
+ let bytes: Vec<u8> = self.into();
+ match memchr::memchr(0, &bytes) {
+ Some(i) => Err(NulError(i, bytes)),
+ None => Ok(unsafe { CString::_from_vec_unchecked(bytes) }),
+ }
+ }
+ }
+
+ // Specialization for avoiding reallocation
+ #[inline(always)] // Without that it is not inlined into specializations
+ fn spec_new_impl_bytes(bytes: &[u8]) -> Result<CString, NulError> {
+ // We cannot have such large slice that we would overflow here
+ // but using `checked_add` allows LLVM to assume that capacity never overflows
+ // and generate twice shorter code.
+ // `saturating_add` doesn't help for some reason.
+ let capacity = bytes.len().checked_add(1).unwrap();
+
+ // Allocate before validation to avoid duplication of allocation code.
+ // We still need to allocate and copy memory even if we get an error.
+ let mut buffer = Vec::with_capacity(capacity);
+ buffer.extend(bytes);
+
+ // Check memory of self instead of new buffer.
+ // This allows better optimizations if lto enabled.
+ match memchr::memchr(0, bytes) {
+ Some(i) => Err(NulError(i, buffer)),
+ None => Ok(unsafe { CString::_from_vec_unchecked(buffer) }),
+ }
+ }
+
+ impl SpecNewImpl for &'_ [u8] {
+ fn spec_new_impl(self) -> Result<CString, NulError> {
+ spec_new_impl_bytes(self)
+ }
+ }
+
+ impl SpecNewImpl for &'_ str {
+ fn spec_new_impl(self) -> Result<CString, NulError> {
+ spec_new_impl_bytes(self.as_bytes())
+ }
+ }
+
+ impl SpecNewImpl for &'_ mut [u8] {
+ fn spec_new_impl(self) -> Result<CString, NulError> {
+ spec_new_impl_bytes(self)
+ }
+ }
+
+ t.spec_new_impl()
+ }
+
+ /// Creates a C-compatible string by consuming a byte vector,
+ /// without checking for interior 0 bytes.
+ ///
+ /// Trailing 0 byte will be appended by this function.
+ ///
+ /// This method is equivalent to [`CString::new`] except that no runtime
+ /// assertion is made that `v` contains no 0 bytes, and it requires an
+ /// actual byte vector, not anything that can be converted to one with Into.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let raw = b"foo".to_vec();
+ /// unsafe {
+ /// let c_string = CString::from_vec_unchecked(raw);
+ /// }
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn from_vec_unchecked(v: Vec<u8>) -> Self {
+ debug_assert!(memchr::memchr(0, &v).is_none());
+ unsafe { Self::_from_vec_unchecked(v) }
+ }
+
+ unsafe fn _from_vec_unchecked(mut v: Vec<u8>) -> Self {
+ v.reserve_exact(1);
+ v.push(0);
+ Self { inner: v.into_boxed_slice() }
+ }
+
+ /// Retakes ownership of a `CString` that was transferred to C via
+ /// [`CString::into_raw`].
+ ///
+ /// Additionally, the length of the string will be recalculated from the pointer.
+ ///
+ /// # Safety
+ ///
+ /// This should only ever be called with a pointer that was earlier
+ /// obtained by calling [`CString::into_raw`]. Other usage (e.g., trying to take
+ /// ownership of a string that was allocated by foreign code) is likely to lead
+ /// to undefined behavior or allocator corruption.
+ ///
+ /// It should be noted that the length isn't just "recomputed," but that
+ /// the recomputed length must match the original length from the
+ /// [`CString::into_raw`] call. This means the [`CString::into_raw`]/`from_raw`
+ /// methods should not be used when passing the string to C functions that can
+ /// modify the string's length.
+ ///
+ /// > **Note:** If you need to borrow a string that was allocated by
+ /// > foreign code, use [`CStr`]. If you need to take ownership of
+ /// > a string that was allocated by foreign code, you will need to
+ /// > make your own provisions for freeing it appropriately, likely
+ /// > with the foreign code's API to do that.
+ ///
+ /// # Examples
+ ///
+ /// Creates a `CString`, pass ownership to an `extern` function (via raw pointer), then retake
+ /// ownership with `from_raw`:
+ ///
+ /// ```ignore (extern-declaration)
+ /// use std::ffi::CString;
+ /// use std::os::raw::c_char;
+ ///
+ /// extern "C" {
+ /// fn some_extern_function(s: *mut c_char);
+ /// }
+ ///
+ /// let c_string = CString::new("Hello!").expect("CString::new failed");
+ /// let raw = c_string.into_raw();
+ /// unsafe {
+ /// some_extern_function(raw);
+ /// let c_string = CString::from_raw(raw);
+ /// }
+ /// ```
+ #[must_use = "call `drop(from_raw(ptr))` if you intend to drop the `CString`"]
+ #[stable(feature = "cstr_memory", since = "1.4.0")]
+ pub unsafe fn from_raw(ptr: *mut c_char) -> CString {
+ // SAFETY: This is called with a pointer that was obtained from a call
+ // to `CString::into_raw` and the length has not been modified. As such,
+ // we know there is a NUL byte (and only one) at the end and that the
+ // information about the size of the allocation is correct on Rust's
+ // side.
+ unsafe {
+ extern "C" {
+ /// Provided by libc or compiler_builtins.
+ fn strlen(s: *const c_char) -> usize;
+ }
+ let len = strlen(ptr) + 1; // Including the NUL byte
+ let slice = slice::from_raw_parts_mut(ptr, len as usize);
+ CString { inner: Box::from_raw(slice as *mut [c_char] as *mut [u8]) }
+ }
+ }
+
+ /// Consumes the `CString` and transfers ownership of the string to a C caller.
+ ///
+ /// The pointer which this function returns must be returned to Rust and reconstituted using
+ /// [`CString::from_raw`] to be properly deallocated. Specifically, one
+ /// should *not* use the standard C `free()` function to deallocate
+ /// this string.
+ ///
+ /// Failure to call [`CString::from_raw`] will lead to a memory leak.
+ ///
+ /// The C side must **not** modify the length of the string (by writing a
+ /// `null` somewhere inside the string or removing the final one) before
+ /// it makes it back into Rust using [`CString::from_raw`]. See the safety section
+ /// in [`CString::from_raw`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").expect("CString::new failed");
+ ///
+ /// let ptr = c_string.into_raw();
+ ///
+ /// unsafe {
+ /// assert_eq!(b'f', *ptr as u8);
+ /// assert_eq!(b'o', *ptr.offset(1) as u8);
+ /// assert_eq!(b'o', *ptr.offset(2) as u8);
+ /// assert_eq!(b'\0', *ptr.offset(3) as u8);
+ ///
+ /// // retake pointer to free memory
+ /// let _ = CString::from_raw(ptr);
+ /// }
+ /// ```
+ #[inline]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "cstr_memory", since = "1.4.0")]
+ pub fn into_raw(self) -> *mut c_char {
+ Box::into_raw(self.into_inner()) as *mut c_char
+ }
+
+ /// Converts the `CString` into a [`String`] if it contains valid UTF-8 data.
+ ///
+ /// On failure, ownership of the original `CString` is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let valid_utf8 = vec![b'f', b'o', b'o'];
+ /// let cstring = CString::new(valid_utf8).expect("CString::new failed");
+ /// assert_eq!(cstring.into_string().expect("into_string() call failed"), "foo");
+ ///
+ /// let invalid_utf8 = vec![b'f', 0xff, b'o', b'o'];
+ /// let cstring = CString::new(invalid_utf8).expect("CString::new failed");
+ /// let err = cstring.into_string().err().expect("into_string().err() failed");
+ /// assert_eq!(err.utf8_error().valid_up_to(), 1);
+ /// ```
+ #[stable(feature = "cstring_into", since = "1.7.0")]
+ pub fn into_string(self) -> Result<String, IntoStringError> {
+ String::from_utf8(self.into_bytes()).map_err(|e| IntoStringError {
+ error: e.utf8_error(),
+ inner: unsafe { Self::_from_vec_unchecked(e.into_bytes()) },
+ })
+ }
+
+ /// Consumes the `CString` and returns the underlying byte buffer.
+ ///
+ /// The returned buffer does **not** contain the trailing nul
+ /// terminator, and it is guaranteed to not have any interior nul
+ /// bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").expect("CString::new failed");
+ /// let bytes = c_string.into_bytes();
+ /// assert_eq!(bytes, vec![b'f', b'o', b'o']);
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "cstring_into", since = "1.7.0")]
+ pub fn into_bytes(self) -> Vec<u8> {
+ let mut vec = into_vec(self.into_inner());
+ let _nul = vec.pop();
+ debug_assert_eq!(_nul, Some(0u8));
+ vec
+ }
+
+ /// Equivalent to [`CString::into_bytes()`] except that the
+ /// returned vector includes the trailing nul terminator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").expect("CString::new failed");
+ /// let bytes = c_string.into_bytes_with_nul();
+ /// assert_eq!(bytes, vec![b'f', b'o', b'o', b'\0']);
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "cstring_into", since = "1.7.0")]
+ pub fn into_bytes_with_nul(self) -> Vec<u8> {
+ into_vec(self.into_inner())
+ }
+
+ /// Returns the contents of this `CString` as a slice of bytes.
+ ///
+ /// The returned slice does **not** contain the trailing nul
+ /// terminator, and it is guaranteed to not have any interior nul
+ /// bytes. If you need the nul terminator, use
+ /// [`CString::as_bytes_with_nul`] instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").expect("CString::new failed");
+ /// let bytes = c_string.as_bytes();
+ /// assert_eq!(bytes, &[b'f', b'o', b'o']);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn as_bytes(&self) -> &[u8] {
+ // SAFETY: CString has a length at least 1
+ unsafe { self.inner.get_unchecked(..self.inner.len() - 1) }
+ }
+
+ /// Equivalent to [`CString::as_bytes()`] except that the
+ /// returned slice includes the trailing nul terminator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new("foo").expect("CString::new failed");
+ /// let bytes = c_string.as_bytes_with_nul();
+ /// assert_eq!(bytes, &[b'f', b'o', b'o', b'\0']);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn as_bytes_with_nul(&self) -> &[u8] {
+ &self.inner
+ }
+
+ /// Extracts a [`CStr`] slice containing the entire string.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::{CString, CStr};
+ ///
+ /// let c_string = CString::new(b"foo".to_vec()).expect("CString::new failed");
+ /// let cstr = c_string.as_c_str();
+ /// assert_eq!(cstr,
+ /// CStr::from_bytes_with_nul(b"foo\0").expect("CStr::from_bytes_with_nul failed"));
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "as_c_str", since = "1.20.0")]
+ pub fn as_c_str(&self) -> &CStr {
+ &*self
+ }
+
+ /// Converts this `CString` into a boxed [`CStr`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::{CString, CStr};
+ ///
+ /// let c_string = CString::new(b"foo".to_vec()).expect("CString::new failed");
+ /// let boxed = c_string.into_boxed_c_str();
+ /// assert_eq!(&*boxed,
+ /// CStr::from_bytes_with_nul(b"foo\0").expect("CStr::from_bytes_with_nul failed"));
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "into_boxed_c_str", since = "1.20.0")]
+ pub fn into_boxed_c_str(self) -> Box<CStr> {
+ unsafe { Box::from_raw(Box::into_raw(self.into_inner()) as *mut CStr) }
+ }
+
+ /// Bypass "move out of struct which implements [`Drop`] trait" restriction.
+ #[inline]
+ fn into_inner(self) -> Box<[u8]> {
+ // Rationale: `mem::forget(self)` invalidates the previous call to `ptr::read(&self.inner)`
+ // so we use `ManuallyDrop` to ensure `self` is not dropped.
+ // Then we can return the box directly without invalidating it.
+ // See https://github.com/rust-lang/rust/issues/62553.
+ let this = mem::ManuallyDrop::new(self);
+ unsafe { ptr::read(&this.inner) }
+ }
+
+ /// Converts a <code>[Vec]<[u8]></code> to a [`CString`] without checking the
+ /// invariants on the given [`Vec`].
+ ///
+ /// # Safety
+ ///
+ /// The given [`Vec`] **must** have one nul byte as its last element.
+ /// This means it cannot be empty nor have any other nul byte anywhere else.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ /// assert_eq!(
+ /// unsafe { CString::from_vec_with_nul_unchecked(b"abc\0".to_vec()) },
+ /// unsafe { CString::from_vec_unchecked(b"abc".to_vec()) }
+ /// );
+ /// ```
+ #[must_use]
+ #[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+ pub unsafe fn from_vec_with_nul_unchecked(v: Vec<u8>) -> Self {
+ debug_assert!(memchr::memchr(0, &v).unwrap() + 1 == v.len());
+ unsafe { Self::_from_vec_with_nul_unchecked(v) }
+ }
+
+ unsafe fn _from_vec_with_nul_unchecked(v: Vec<u8>) -> Self {
+ Self { inner: v.into_boxed_slice() }
+ }
+
+ /// Attempts to converts a <code>[Vec]<[u8]></code> to a [`CString`].
+ ///
+ /// Runtime checks are present to ensure there is only one nul byte in the
+ /// [`Vec`], its last element.
+ ///
+ /// # Errors
+ ///
+ /// If a nul byte is present and not the last element or no nul bytes
+ /// is present, an error will be returned.
+ ///
+ /// # Examples
+ ///
+ /// A successful conversion will produce the same result as [`CString::new`]
+ /// when called without the ending nul byte.
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ /// assert_eq!(
+ /// CString::from_vec_with_nul(b"abc\0".to_vec())
+ /// .expect("CString::from_vec_with_nul failed"),
+ /// CString::new(b"abc".to_vec()).expect("CString::new failed")
+ /// );
+ /// ```
+ ///
+ /// An incorrectly formatted [`Vec`] will produce an error.
+ ///
+ /// ```
+ /// use std::ffi::{CString, FromVecWithNulError};
+ /// // Interior nul byte
+ /// let _: FromVecWithNulError = CString::from_vec_with_nul(b"a\0bc".to_vec()).unwrap_err();
+ /// // No nul byte
+ /// let _: FromVecWithNulError = CString::from_vec_with_nul(b"abc".to_vec()).unwrap_err();
+ /// ```
+ #[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+ pub fn from_vec_with_nul(v: Vec<u8>) -> Result<Self, FromVecWithNulError> {
+ let nul_pos = memchr::memchr(0, &v);
+ match nul_pos {
+ Some(nul_pos) if nul_pos + 1 == v.len() => {
+ // SAFETY: We know there is only one nul byte, at the end
+ // of the vec.
+ Ok(unsafe { Self::_from_vec_with_nul_unchecked(v) })
+ }
+ Some(nul_pos) => Err(FromVecWithNulError {
+ error_kind: FromBytesWithNulErrorKind::InteriorNul(nul_pos),
+ bytes: v,
+ }),
+ None => Err(FromVecWithNulError {
+ error_kind: FromBytesWithNulErrorKind::NotNulTerminated,
+ bytes: v,
+ }),
+ }
+ }
+}
+
+// Turns this `CString` into an empty string to prevent
+// memory-unsafe code from working by accident. Inline
+// to prevent LLVM from optimizing it away in debug builds.
+#[stable(feature = "cstring_drop", since = "1.13.0")]
+impl Drop for CString {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ *self.inner.get_unchecked_mut(0) = 0;
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Deref for CString {
+ type Target = CStr;
+
+ #[inline]
+ fn deref(&self) -> &CStr {
+ unsafe { CStr::from_bytes_with_nul_unchecked(self.as_bytes_with_nul()) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for CString {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "cstring_into", since = "1.7.0")]
+impl From<CString> for Vec<u8> {
+ /// Converts a [`CString`] into a <code>[Vec]<[u8]></code>.
+ ///
+ /// The conversion consumes the [`CString`], and removes the terminating NUL byte.
+ #[inline]
+ fn from(s: CString) -> Vec<u8> {
+ s.into_bytes()
+ }
+}
+
+#[stable(feature = "cstr_default", since = "1.10.0")]
+impl Default for CString {
+ /// Creates an empty `CString`.
+ fn default() -> CString {
+ let a: &CStr = Default::default();
+ a.to_owned()
+ }
+}
+
+#[stable(feature = "cstr_borrow", since = "1.3.0")]
+impl Borrow<CStr> for CString {
+ #[inline]
+ fn borrow(&self) -> &CStr {
+ self
+ }
+}
+
+#[stable(feature = "cstring_from_cow_cstr", since = "1.28.0")]
+impl<'a> From<Cow<'a, CStr>> for CString {
+ /// Converts a `Cow<'a, CStr>` into a `CString`, by copying the contents if they are
+ /// borrowed.
+ #[inline]
+ fn from(s: Cow<'a, CStr>) -> Self {
+ s.into_owned()
+ }
+}
+
+#[cfg(not(test))]
+#[stable(feature = "box_from_c_str", since = "1.17.0")]
+impl From<&CStr> for Box<CStr> {
+ /// Converts a `&CStr` into a `Box<CStr>`,
+ /// by copying the contents into a newly allocated [`Box`].
+ fn from(s: &CStr) -> Box<CStr> {
+ let boxed: Box<[u8]> = Box::from(s.to_bytes_with_nul());
+ unsafe { Box::from_raw(Box::into_raw(boxed) as *mut CStr) }
+ }
+}
+
+#[stable(feature = "box_from_cow", since = "1.45.0")]
+impl From<Cow<'_, CStr>> for Box<CStr> {
+ /// Converts a `Cow<'a, CStr>` into a `Box<CStr>`,
+ /// by copying the contents if they are borrowed.
+ #[inline]
+ fn from(cow: Cow<'_, CStr>) -> Box<CStr> {
+ match cow {
+ Cow::Borrowed(s) => Box::from(s),
+ Cow::Owned(s) => Box::from(s),
+ }
+ }
+}
+
+#[stable(feature = "c_string_from_box", since = "1.18.0")]
+impl From<Box<CStr>> for CString {
+ /// Converts a <code>[Box]<[CStr]></code> into a [`CString`] without copying or allocating.
+ #[inline]
+ fn from(s: Box<CStr>) -> CString {
+ let raw = Box::into_raw(s) as *mut [u8];
+ CString { inner: unsafe { Box::from_raw(raw) } }
+ }
+}
+
+#[stable(feature = "cstring_from_vec_of_nonzerou8", since = "1.43.0")]
+impl From<Vec<NonZeroU8>> for CString {
+ /// Converts a <code>[Vec]<[NonZeroU8]></code> into a [`CString`] without
+ /// copying nor checking for inner null bytes.
+ #[inline]
+ fn from(v: Vec<NonZeroU8>) -> CString {
+ unsafe {
+ // Transmute `Vec<NonZeroU8>` to `Vec<u8>`.
+ let v: Vec<u8> = {
+ // SAFETY:
+ // - transmuting between `NonZeroU8` and `u8` is sound;
+ // - `alloc::Layout<NonZeroU8> == alloc::Layout<u8>`.
+ let (ptr, len, cap): (*mut NonZeroU8, _, _) = Vec::into_raw_parts(v);
+ Vec::from_raw_parts(ptr.cast::<u8>(), len, cap)
+ };
+ // SAFETY: `v` cannot contain null bytes, given the type-level
+ // invariant of `NonZeroU8`.
+ Self::_from_vec_unchecked(v)
+ }
+ }
+}
+
+#[cfg(not(test))]
+#[stable(feature = "more_box_slice_clone", since = "1.29.0")]
+impl Clone for Box<CStr> {
+ #[inline]
+ fn clone(&self) -> Self {
+ (**self).into()
+ }
+}
+
+#[stable(feature = "box_from_c_string", since = "1.20.0")]
+impl From<CString> for Box<CStr> {
+ /// Converts a [`CString`] into a <code>[Box]<[CStr]></code> without copying or allocating.
+ #[inline]
+ fn from(s: CString) -> Box<CStr> {
+ s.into_boxed_c_str()
+ }
+}
+
+#[stable(feature = "cow_from_cstr", since = "1.28.0")]
+impl<'a> From<CString> for Cow<'a, CStr> {
+ /// Converts a [`CString`] into an owned [`Cow`] without copying or allocating.
+ #[inline]
+ fn from(s: CString) -> Cow<'a, CStr> {
+ Cow::Owned(s)
+ }
+}
+
+#[stable(feature = "cow_from_cstr", since = "1.28.0")]
+impl<'a> From<&'a CStr> for Cow<'a, CStr> {
+ /// Converts a [`CStr`] into a borrowed [`Cow`] without copying or allocating.
+ #[inline]
+ fn from(s: &'a CStr) -> Cow<'a, CStr> {
+ Cow::Borrowed(s)
+ }
+}
+
+#[stable(feature = "cow_from_cstr", since = "1.28.0")]
+impl<'a> From<&'a CString> for Cow<'a, CStr> {
+ /// Converts a `&`[`CString`] into a borrowed [`Cow`] without copying or allocating.
+ #[inline]
+ fn from(s: &'a CString) -> Cow<'a, CStr> {
+ Cow::Borrowed(s.as_c_str())
+ }
+}
+
+#[cfg(target_has_atomic = "ptr")]
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<CString> for Arc<CStr> {
+ /// Converts a [`CString`] into an <code>[Arc]<[CStr]></code> by moving the [`CString`]
+ /// data into a new [`Arc`] buffer.
+ #[inline]
+ fn from(s: CString) -> Arc<CStr> {
+ let arc: Arc<[u8]> = Arc::from(s.into_inner());
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const CStr) }
+ }
+}
+
+#[cfg(target_has_atomic = "ptr")]
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<&CStr> for Arc<CStr> {
+ /// Converts a `&CStr` into a `Arc<CStr>`,
+ /// by copying the contents into a newly allocated [`Arc`].
+ #[inline]
+ fn from(s: &CStr) -> Arc<CStr> {
+ let arc: Arc<[u8]> = Arc::from(s.to_bytes_with_nul());
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const CStr) }
+ }
+}
+
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<CString> for Rc<CStr> {
+ /// Converts a [`CString`] into an <code>[Rc]<[CStr]></code> by moving the [`CString`]
+ /// data into a new [`Arc`] buffer.
+ #[inline]
+ fn from(s: CString) -> Rc<CStr> {
+ let rc: Rc<[u8]> = Rc::from(s.into_inner());
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const CStr) }
+ }
+}
+
+#[stable(feature = "shared_from_slice2", since = "1.24.0")]
+impl From<&CStr> for Rc<CStr> {
+ /// Converts a `&CStr` into a `Rc<CStr>`,
+ /// by copying the contents into a newly allocated [`Rc`].
+ #[inline]
+ fn from(s: &CStr) -> Rc<CStr> {
+ let rc: Rc<[u8]> = Rc::from(s.to_bytes_with_nul());
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const CStr) }
+ }
+}
+
+#[cfg(not(test))]
+#[stable(feature = "default_box_extra", since = "1.17.0")]
+impl Default for Box<CStr> {
+ fn default() -> Box<CStr> {
+ let boxed: Box<[u8]> = Box::from([0]);
+ unsafe { Box::from_raw(Box::into_raw(boxed) as *mut CStr) }
+ }
+}
+
+impl NulError {
+ /// Returns the position of the nul byte in the slice that caused
+ /// [`CString::new`] to fail.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let nul_error = CString::new("foo\0bar").unwrap_err();
+ /// assert_eq!(nul_error.nul_position(), 3);
+ ///
+ /// let nul_error = CString::new("foo bar\0").unwrap_err();
+ /// assert_eq!(nul_error.nul_position(), 7);
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn nul_position(&self) -> usize {
+ self.0
+ }
+
+ /// Consumes this error, returning the underlying vector of bytes which
+ /// generated the error in the first place.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let nul_error = CString::new("foo\0bar").unwrap_err();
+ /// assert_eq!(nul_error.into_vec(), b"foo\0bar");
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_vec(self) -> Vec<u8> {
+ self.1
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for NulError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "nul byte found in provided data at position: {}", self.0)
+ }
+}
+
+#[stable(feature = "cstring_from_vec_with_nul", since = "1.58.0")]
+impl fmt::Display for FromVecWithNulError {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ match self.error_kind {
+ FromBytesWithNulErrorKind::InteriorNul(pos) => {
+ write!(f, "data provided contains an interior nul byte at pos {pos}")
+ }
+ FromBytesWithNulErrorKind::NotNulTerminated => {
+ write!(f, "data provided is not nul terminated")
+ }
+ }
+ }
+}
+
+impl IntoStringError {
+ /// Consumes this error, returning original [`CString`] which generated the
+ /// error.
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "cstring_into", since = "1.7.0")]
+ pub fn into_cstring(self) -> CString {
+ self.inner
+ }
+
+ /// Access the underlying UTF-8 error that was the cause of this error.
+ #[must_use]
+ #[stable(feature = "cstring_into", since = "1.7.0")]
+ pub fn utf8_error(&self) -> Utf8Error {
+ self.error
+ }
+
+ #[doc(hidden)]
+ #[unstable(feature = "cstr_internals", issue = "none")]
+ pub fn __source(&self) -> &Utf8Error {
+ &self.error
+ }
+}
+
+impl IntoStringError {
+ fn description(&self) -> &str {
+ "C string contained non-utf8 bytes"
+ }
+}
+
+#[stable(feature = "cstring_into", since = "1.7.0")]
+impl fmt::Display for IntoStringError {
+ #[allow(deprecated, deprecated_in_future)]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ self.description().fmt(f)
+ }
+}
+
+#[stable(feature = "cstr_borrow", since = "1.3.0")]
+impl ToOwned for CStr {
+ type Owned = CString;
+
+ fn to_owned(&self) -> CString {
+ CString { inner: self.to_bytes_with_nul().into() }
+ }
+
+ fn clone_into(&self, target: &mut CString) {
+ let mut b = into_vec(mem::take(&mut target.inner));
+ self.to_bytes_with_nul().clone_into(&mut b);
+ target.inner = b.into_boxed_slice();
+ }
+}
+
+#[stable(feature = "cstring_asref", since = "1.7.0")]
+impl From<&CStr> for CString {
+ fn from(s: &CStr) -> CString {
+ s.to_owned()
+ }
+}
+
+#[stable(feature = "cstring_asref", since = "1.7.0")]
+impl ops::Index<ops::RangeFull> for CString {
+ type Output = CStr;
+
+ #[inline]
+ fn index(&self, _index: ops::RangeFull) -> &CStr {
+ self
+ }
+}
+
+#[stable(feature = "cstring_asref", since = "1.7.0")]
+impl AsRef<CStr> for CString {
+ #[inline]
+ fn as_ref(&self) -> &CStr {
+ self
+ }
+}
+
+#[cfg(not(test))]
+impl CStr {
+ /// Converts a `CStr` into a <code>[Cow]<[str]></code>.
+ ///
+ /// If the contents of the `CStr` are valid UTF-8 data, this
+ /// function will return a <code>[Cow]::[Borrowed]\(&[str])</code>
+ /// with the corresponding <code>&[str]</code> slice. Otherwise, it will
+ /// replace any invalid UTF-8 sequences with
+ /// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD] and return a
+ /// <code>[Cow]::[Owned]\(&[str])</code> with the result.
+ ///
+ /// [str]: prim@str "str"
+ /// [Borrowed]: Cow::Borrowed
+ /// [Owned]: Cow::Owned
+ /// [U+FFFD]: core::char::REPLACEMENT_CHARACTER "std::char::REPLACEMENT_CHARACTER"
+ ///
+ /// # Examples
+ ///
+ /// Calling `to_string_lossy` on a `CStr` containing valid UTF-8:
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"Hello World\0")
+ /// .expect("CStr::from_bytes_with_nul failed");
+ /// assert_eq!(cstr.to_string_lossy(), Cow::Borrowed("Hello World"));
+ /// ```
+ ///
+ /// Calling `to_string_lossy` on a `CStr` containing invalid UTF-8:
+ ///
+ /// ```
+ /// use std::borrow::Cow;
+ /// use std::ffi::CStr;
+ ///
+ /// let cstr = CStr::from_bytes_with_nul(b"Hello \xF0\x90\x80World\0")
+ /// .expect("CStr::from_bytes_with_nul failed");
+ /// assert_eq!(
+ /// cstr.to_string_lossy(),
+ /// Cow::Owned(String::from("Hello �World")) as Cow<'_, str>
+ /// );
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the result of the operation, \
+ without modifying the original"]
+ #[stable(feature = "cstr_to_str", since = "1.4.0")]
+ pub fn to_string_lossy(&self) -> Cow<'_, str> {
+ String::from_utf8_lossy(self.to_bytes())
+ }
+
+ /// Converts a <code>[Box]<[CStr]></code> into a [`CString`] without copying or allocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ffi::CString;
+ ///
+ /// let c_string = CString::new(b"foo".to_vec()).expect("CString::new failed");
+ /// let boxed = c_string.into_boxed_c_str();
+ /// assert_eq!(boxed.into_c_string(), CString::new("foo").expect("CString::new failed"));
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "into_boxed_c_str", since = "1.20.0")]
+ pub fn into_c_string(self: Box<Self>) -> CString {
+ CString::from(self)
+ }
+}
diff --git a/library/alloc/src/ffi/c_str/tests.rs b/library/alloc/src/ffi/c_str/tests.rs
new file mode 100644
index 000000000..0b7476d5c
--- /dev/null
+++ b/library/alloc/src/ffi/c_str/tests.rs
@@ -0,0 +1,228 @@
+use super::*;
+use crate::rc::Rc;
+use crate::sync::Arc;
+use core::assert_matches::assert_matches;
+use core::ffi::FromBytesUntilNulError;
+use core::hash::{Hash, Hasher};
+
+#[allow(deprecated)]
+use core::hash::SipHasher13 as DefaultHasher;
+
+#[test]
+fn c_to_rust() {
+ let data = b"123\0";
+ let ptr = data.as_ptr() as *const c_char;
+ unsafe {
+ assert_eq!(CStr::from_ptr(ptr).to_bytes(), b"123");
+ assert_eq!(CStr::from_ptr(ptr).to_bytes_with_nul(), b"123\0");
+ }
+}
+
+#[test]
+fn simple() {
+ let s = CString::new("1234").unwrap();
+ assert_eq!(s.as_bytes(), b"1234");
+ assert_eq!(s.as_bytes_with_nul(), b"1234\0");
+}
+
+#[test]
+fn build_with_zero1() {
+ assert!(CString::new(&b"\0"[..]).is_err());
+}
+#[test]
+fn build_with_zero2() {
+ assert!(CString::new(vec![0]).is_err());
+}
+
+#[test]
+fn formatted() {
+ let s = CString::new(&b"abc\x01\x02\n\xE2\x80\xA6\xFF"[..]).unwrap();
+ assert_eq!(format!("{s:?}"), r#""abc\x01\x02\n\xe2\x80\xa6\xff""#);
+}
+
+#[test]
+fn borrowed() {
+ unsafe {
+ let s = CStr::from_ptr(b"12\0".as_ptr() as *const _);
+ assert_eq!(s.to_bytes(), b"12");
+ assert_eq!(s.to_bytes_with_nul(), b"12\0");
+ }
+}
+
+#[test]
+fn to_owned() {
+ let data = b"123\0";
+ let ptr = data.as_ptr() as *const c_char;
+
+ let owned = unsafe { CStr::from_ptr(ptr).to_owned() };
+ assert_eq!(owned.as_bytes_with_nul(), data);
+}
+
+#[test]
+fn equal_hash() {
+ let data = b"123\xE2\xFA\xA6\0";
+ let ptr = data.as_ptr() as *const c_char;
+ let cstr: &'static CStr = unsafe { CStr::from_ptr(ptr) };
+
+ #[allow(deprecated)]
+ let mut s = DefaultHasher::new();
+ cstr.hash(&mut s);
+ let cstr_hash = s.finish();
+ #[allow(deprecated)]
+ let mut s = DefaultHasher::new();
+ CString::new(&data[..data.len() - 1]).unwrap().hash(&mut s);
+ let cstring_hash = s.finish();
+
+ assert_eq!(cstr_hash, cstring_hash);
+}
+
+#[test]
+fn from_bytes_with_nul() {
+ let data = b"123\0";
+ let cstr = CStr::from_bytes_with_nul(data);
+ assert_eq!(cstr.map(CStr::to_bytes), Ok(&b"123"[..]));
+ let cstr = CStr::from_bytes_with_nul(data);
+ assert_eq!(cstr.map(CStr::to_bytes_with_nul), Ok(&b"123\0"[..]));
+
+ unsafe {
+ let cstr = CStr::from_bytes_with_nul(data);
+ let cstr_unchecked = CStr::from_bytes_with_nul_unchecked(data);
+ assert_eq!(cstr, Ok(cstr_unchecked));
+ }
+}
+
+#[test]
+fn from_bytes_with_nul_unterminated() {
+ let data = b"123";
+ let cstr = CStr::from_bytes_with_nul(data);
+ assert!(cstr.is_err());
+}
+
+#[test]
+fn from_bytes_with_nul_interior() {
+ let data = b"1\023\0";
+ let cstr = CStr::from_bytes_with_nul(data);
+ assert!(cstr.is_err());
+}
+
+#[test]
+fn cstr_from_bytes_until_nul() {
+ // Test an empty slice. This should fail because it
+ // does not contain a nul byte.
+ let b = b"";
+ assert_matches!(CStr::from_bytes_until_nul(&b[..]), Err(FromBytesUntilNulError { .. }));
+
+ // Test a non-empty slice, that does not contain a nul byte.
+ let b = b"hello";
+ assert_matches!(CStr::from_bytes_until_nul(&b[..]), Err(FromBytesUntilNulError { .. }));
+
+ // Test an empty nul-terminated string
+ let b = b"\0";
+ let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
+ assert_eq!(r.to_bytes(), b"");
+
+ // Test a slice with the nul byte in the middle
+ let b = b"hello\0world!";
+ let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
+ assert_eq!(r.to_bytes(), b"hello");
+
+ // Test a slice with the nul byte at the end
+ let b = b"hello\0";
+ let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
+ assert_eq!(r.to_bytes(), b"hello");
+
+ // Test a slice with two nul bytes at the end
+ let b = b"hello\0\0";
+ let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
+ assert_eq!(r.to_bytes(), b"hello");
+
+ // Test a slice containing lots of nul bytes
+ let b = b"\0\0\0\0";
+ let r = CStr::from_bytes_until_nul(&b[..]).unwrap();
+ assert_eq!(r.to_bytes(), b"");
+}
+
+#[test]
+fn into_boxed() {
+ let orig: &[u8] = b"Hello, world!\0";
+ let cstr = CStr::from_bytes_with_nul(orig).unwrap();
+ let boxed: Box<CStr> = Box::from(cstr);
+ let cstring = cstr.to_owned().into_boxed_c_str().into_c_string();
+ assert_eq!(cstr, &*boxed);
+ assert_eq!(&*boxed, &*cstring);
+ assert_eq!(&*cstring, cstr);
+}
+
+#[test]
+fn boxed_default() {
+ let boxed = <Box<CStr>>::default();
+ assert_eq!(boxed.to_bytes_with_nul(), &[0]);
+}
+
+#[test]
+fn test_c_str_clone_into() {
+ let mut c_string = CString::new("lorem").unwrap();
+ let c_ptr = c_string.as_ptr();
+ let c_str = CStr::from_bytes_with_nul(b"ipsum\0").unwrap();
+ c_str.clone_into(&mut c_string);
+ assert_eq!(c_str, c_string.as_c_str());
+ // The exact same size shouldn't have needed to move its allocation
+ assert_eq!(c_ptr, c_string.as_ptr());
+}
+
+#[test]
+fn into_rc() {
+ let orig: &[u8] = b"Hello, world!\0";
+ let cstr = CStr::from_bytes_with_nul(orig).unwrap();
+ let rc: Rc<CStr> = Rc::from(cstr);
+ let arc: Arc<CStr> = Arc::from(cstr);
+
+ assert_eq!(&*rc, cstr);
+ assert_eq!(&*arc, cstr);
+
+ let rc2: Rc<CStr> = Rc::from(cstr.to_owned());
+ let arc2: Arc<CStr> = Arc::from(cstr.to_owned());
+
+ assert_eq!(&*rc2, cstr);
+ assert_eq!(&*arc2, cstr);
+}
+
+#[test]
+fn cstr_const_constructor() {
+ const CSTR: &CStr = unsafe { CStr::from_bytes_with_nul_unchecked(b"Hello, world!\0") };
+
+ assert_eq!(CSTR.to_str().unwrap(), "Hello, world!");
+}
+
+#[test]
+fn cstr_index_from() {
+ let original = b"Hello, world!\0";
+ let cstr = CStr::from_bytes_with_nul(original).unwrap();
+ let result = CStr::from_bytes_with_nul(&original[7..]).unwrap();
+
+ assert_eq!(&cstr[7..], result);
+}
+
+#[test]
+#[should_panic]
+fn cstr_index_from_empty() {
+ let original = b"Hello, world!\0";
+ let cstr = CStr::from_bytes_with_nul(original).unwrap();
+ let _ = &cstr[original.len()..];
+}
+
+#[test]
+fn c_string_from_empty_string() {
+ let original = "";
+ let cstring = CString::new(original).unwrap();
+ assert_eq!(original.as_bytes(), cstring.as_bytes());
+ assert_eq!([b'\0'], cstring.as_bytes_with_nul());
+}
+
+#[test]
+fn c_str_from_empty_string() {
+ let original = b"\0";
+ let cstr = CStr::from_bytes_with_nul(original).unwrap();
+ assert_eq!([] as [u8; 0], cstr.to_bytes());
+ assert_eq!([b'\0'], cstr.to_bytes_with_nul());
+}
diff --git a/library/alloc/src/ffi/mod.rs b/library/alloc/src/ffi/mod.rs
new file mode 100644
index 000000000..e8530fbc1
--- /dev/null
+++ b/library/alloc/src/ffi/mod.rs
@@ -0,0 +1,88 @@
+//! Utilities related to FFI bindings.
+//!
+//! This module provides utilities to handle data across non-Rust
+//! interfaces, like other programming languages and the underlying
+//! operating system. It is mainly of use for FFI (Foreign Function
+//! Interface) bindings and code that needs to exchange C-like strings
+//! with other languages.
+//!
+//! # Overview
+//!
+//! Rust represents owned strings with the [`String`] type, and
+//! borrowed slices of strings with the [`str`] primitive. Both are
+//! always in UTF-8 encoding, and may contain nul bytes in the middle,
+//! i.e., if you look at the bytes that make up the string, there may
+//! be a `\0` among them. Both `String` and `str` store their length
+//! explicitly; there are no nul terminators at the end of strings
+//! like in C.
+//!
+//! C strings are different from Rust strings:
+//!
+//! * **Encodings** - Rust strings are UTF-8, but C strings may use
+//! other encodings. If you are using a string from C, you should
+//! check its encoding explicitly, rather than just assuming that it
+//! is UTF-8 like you can do in Rust.
+//!
+//! * **Character size** - C strings may use `char` or `wchar_t`-sized
+//! characters; please **note** that C's `char` is different from Rust's.
+//! The C standard leaves the actual sizes of those types open to
+//! interpretation, but defines different APIs for strings made up of
+//! each character type. Rust strings are always UTF-8, so different
+//! Unicode characters will be encoded in a variable number of bytes
+//! each. The Rust type [`char`] represents a '[Unicode scalar
+//! value]', which is similar to, but not the same as, a '[Unicode
+//! code point]'.
+//!
+//! * **Nul terminators and implicit string lengths** - Often, C
+//! strings are nul-terminated, i.e., they have a `\0` character at the
+//! end. The length of a string buffer is not stored, but has to be
+//! calculated; to compute the length of a string, C code must
+//! manually call a function like `strlen()` for `char`-based strings,
+//! or `wcslen()` for `wchar_t`-based ones. Those functions return
+//! the number of characters in the string excluding the nul
+//! terminator, so the buffer length is really `len+1` characters.
+//! Rust strings don't have a nul terminator; their length is always
+//! stored and does not need to be calculated. While in Rust
+//! accessing a string's length is an *O*(1) operation (because the
+//! length is stored); in C it is an *O*(*n*) operation because the
+//! length needs to be computed by scanning the string for the nul
+//! terminator.
+//!
+//! * **Internal nul characters** - When C strings have a nul
+//! terminator character, this usually means that they cannot have nul
+//! characters in the middle — a nul character would essentially
+//! truncate the string. Rust strings *can* have nul characters in
+//! the middle, because nul does not have to mark the end of the
+//! string in Rust.
+//!
+//! # Representations of non-Rust strings
+//!
+//! [`CString`] and [`CStr`] are useful when you need to transfer
+//! UTF-8 strings to and from languages with a C ABI, like Python.
+//!
+//! * **From Rust to C:** [`CString`] represents an owned, C-friendly
+//! string: it is nul-terminated, and has no internal nul characters.
+//! Rust code can create a [`CString`] out of a normal string (provided
+//! that the string doesn't have nul characters in the middle), and
+//! then use a variety of methods to obtain a raw <code>\*mut [u8]</code> that can
+//! then be passed as an argument to functions which use the C
+//! conventions for strings.
+//!
+//! * **From C to Rust:** [`CStr`] represents a borrowed C string; it
+//! is what you would use to wrap a raw <code>\*const [u8]</code> that you got from
+//! a C function. A [`CStr`] is guaranteed to be a nul-terminated array
+//! of bytes. Once you have a [`CStr`], you can convert it to a Rust
+//! <code>&[str]</code> if it's valid UTF-8, or lossily convert it by adding
+//! replacement characters.
+//!
+//! [`String`]: crate::string::String
+//! [`CStr`]: core::ffi::CStr
+
+#![stable(feature = "alloc_ffi", since = "1.64.0")]
+
+#[stable(feature = "alloc_c_string", since = "1.64.0")]
+pub use self::c_str::FromVecWithNulError;
+#[stable(feature = "alloc_c_string", since = "1.64.0")]
+pub use self::c_str::{CString, IntoStringError, NulError};
+
+mod c_str;
diff --git a/library/alloc/src/fmt.rs b/library/alloc/src/fmt.rs
new file mode 100644
index 000000000..ed398b566
--- /dev/null
+++ b/library/alloc/src/fmt.rs
@@ -0,0 +1,617 @@
+//! Utilities for formatting and printing `String`s.
+//!
+//! This module contains the runtime support for the [`format!`] syntax extension.
+//! This macro is implemented in the compiler to emit calls to this module in
+//! order to format arguments at runtime into strings.
+//!
+//! # Usage
+//!
+//! The [`format!`] macro is intended to be familiar to those coming from C's
+//! `printf`/`fprintf` functions or Python's `str.format` function.
+//!
+//! Some examples of the [`format!`] extension are:
+//!
+//! ```
+//! format!("Hello"); // => "Hello"
+//! format!("Hello, {}!", "world"); // => "Hello, world!"
+//! format!("The number is {}", 1); // => "The number is 1"
+//! format!("{:?}", (3, 4)); // => "(3, 4)"
+//! format!("{value}", value=4); // => "4"
+//! let people = "Rustaceans";
+//! format!("Hello {people}!"); // => "Hello Rustaceans!"
+//! format!("{} {}", 1, 2); // => "1 2"
+//! format!("{:04}", 42); // => "0042" with leading zeros
+//! format!("{:#?}", (100, 200)); // => "(
+//! // 100,
+//! // 200,
+//! // )"
+//! ```
+//!
+//! From these, you can see that the first argument is a format string. It is
+//! required by the compiler for this to be a string literal; it cannot be a
+//! variable passed in (in order to perform validity checking). The compiler
+//! will then parse the format string and determine if the list of arguments
+//! provided is suitable to pass to this format string.
+//!
+//! To convert a single value to a string, use the [`to_string`] method. This
+//! will use the [`Display`] formatting trait.
+//!
+//! ## Positional parameters
+//!
+//! Each formatting argument is allowed to specify which value argument it's
+//! referencing, and if omitted it is assumed to be "the next argument". For
+//! example, the format string `{} {} {}` would take three parameters, and they
+//! would be formatted in the same order as they're given. The format string
+//! `{2} {1} {0}`, however, would format arguments in reverse order.
+//!
+//! Things can get a little tricky once you start intermingling the two types of
+//! positional specifiers. The "next argument" specifier can be thought of as an
+//! iterator over the argument. Each time a "next argument" specifier is seen,
+//! the iterator advances. This leads to behavior like this:
+//!
+//! ```
+//! format!("{1} {} {0} {}", 1, 2); // => "2 1 1 2"
+//! ```
+//!
+//! The internal iterator over the argument has not been advanced by the time
+//! the first `{}` is seen, so it prints the first argument. Then upon reaching
+//! the second `{}`, the iterator has advanced forward to the second argument.
+//! Essentially, parameters that explicitly name their argument do not affect
+//! parameters that do not name an argument in terms of positional specifiers.
+//!
+//! A format string is required to use all of its arguments, otherwise it is a
+//! compile-time error. You may refer to the same argument more than once in the
+//! format string.
+//!
+//! ## Named parameters
+//!
+//! Rust itself does not have a Python-like equivalent of named parameters to a
+//! function, but the [`format!`] macro is a syntax extension that allows it to
+//! leverage named parameters. Named parameters are listed at the end of the
+//! argument list and have the syntax:
+//!
+//! ```text
+//! identifier '=' expression
+//! ```
+//!
+//! For example, the following [`format!`] expressions all use named arguments:
+//!
+//! ```
+//! format!("{argument}", argument = "test"); // => "test"
+//! format!("{name} {}", 1, name = 2); // => "2 1"
+//! format!("{a} {c} {b}", a="a", b='b', c=3); // => "a 3 b"
+//! ```
+//!
+//! If a named parameter does not appear in the argument list, `format!` will
+//! reference a variable with that name in the current scope.
+//!
+//! ```
+//! let argument = 2 + 2;
+//! format!("{argument}"); // => "4"
+//!
+//! fn make_string(a: u32, b: &str) -> String {
+//! format!("{b} {a}")
+//! }
+//! make_string(927, "label"); // => "label 927"
+//! ```
+//!
+//! It is not valid to put positional parameters (those without names) after
+//! arguments that have names. Like with positional parameters, it is not
+//! valid to provide named parameters that are unused by the format string.
+//!
+//! # Formatting Parameters
+//!
+//! Each argument being formatted can be transformed by a number of formatting
+//! parameters (corresponding to `format_spec` in [the syntax](#syntax)). These
+//! parameters affect the string representation of what's being formatted.
+//!
+//! ## Width
+//!
+//! ```
+//! // All of these print "Hello x !"
+//! println!("Hello {:5}!", "x");
+//! println!("Hello {:1$}!", "x", 5);
+//! println!("Hello {1:0$}!", 5, "x");
+//! println!("Hello {:width$}!", "x", width = 5);
+//! let width = 5;
+//! println!("Hello {:width$}!", "x");
+//! ```
+//!
+//! This is a parameter for the "minimum width" that the format should take up.
+//! If the value's string does not fill up this many characters, then the
+//! padding specified by fill/alignment will be used to take up the required
+//! space (see below).
+//!
+//! The value for the width can also be provided as a [`usize`] in the list of
+//! parameters by adding a postfix `$`, indicating that the second argument is
+//! a [`usize`] specifying the width.
+//!
+//! Referring to an argument with the dollar syntax does not affect the "next
+//! argument" counter, so it's usually a good idea to refer to arguments by
+//! position, or use named arguments.
+//!
+//! ## Fill/Alignment
+//!
+//! ```
+//! assert_eq!(format!("Hello {:<5}!", "x"), "Hello x !");
+//! assert_eq!(format!("Hello {:-<5}!", "x"), "Hello x----!");
+//! assert_eq!(format!("Hello {:^5}!", "x"), "Hello x !");
+//! assert_eq!(format!("Hello {:>5}!", "x"), "Hello x!");
+//! ```
+//!
+//! The optional fill character and alignment is provided normally in conjunction with the
+//! [`width`](#width) parameter. It must be defined before `width`, right after the `:`.
+//! This indicates that if the value being formatted is smaller than
+//! `width` some extra characters will be printed around it.
+//! Filling comes in the following variants for different alignments:
+//!
+//! * `[fill]<` - the argument is left-aligned in `width` columns
+//! * `[fill]^` - the argument is center-aligned in `width` columns
+//! * `[fill]>` - the argument is right-aligned in `width` columns
+//!
+//! The default [fill/alignment](#fillalignment) for non-numerics is a space and
+//! left-aligned. The
+//! default for numeric formatters is also a space character but with right-alignment. If
+//! the `0` flag (see below) is specified for numerics, then the implicit fill character is
+//! `0`.
+//!
+//! Note that alignment might not be implemented by some types. In particular, it
+//! is not generally implemented for the `Debug` trait. A good way to ensure
+//! padding is applied is to format your input, then pad this resulting string
+//! to obtain your output:
+//!
+//! ```
+//! println!("Hello {:^15}!", format!("{:?}", Some("hi"))); // => "Hello Some("hi") !"
+//! ```
+//!
+//! ## Sign/`#`/`0`
+//!
+//! ```
+//! assert_eq!(format!("Hello {:+}!", 5), "Hello +5!");
+//! assert_eq!(format!("{:#x}!", 27), "0x1b!");
+//! assert_eq!(format!("Hello {:05}!", 5), "Hello 00005!");
+//! assert_eq!(format!("Hello {:05}!", -5), "Hello -0005!");
+//! assert_eq!(format!("{:#010x}!", 27), "0x0000001b!");
+//! ```
+//!
+//! These are all flags altering the behavior of the formatter.
+//!
+//! * `+` - This is intended for numeric types and indicates that the sign
+//! should always be printed. Positive signs are never printed by
+//! default, and the negative sign is only printed by default for signed values.
+//! This flag indicates that the correct sign (`+` or `-`) should always be printed.
+//! * `-` - Currently not used
+//! * `#` - This flag indicates that the "alternate" form of printing should
+//! be used. The alternate forms are:
+//! * `#?` - pretty-print the [`Debug`] formatting (adds linebreaks and indentation)
+//! * `#x` - precedes the argument with a `0x`
+//! * `#X` - precedes the argument with a `0x`
+//! * `#b` - precedes the argument with a `0b`
+//! * `#o` - precedes the argument with a `0o`
+//! * `0` - This is used to indicate for integer formats that the padding to `width` should
+//! both be done with a `0` character as well as be sign-aware. A format
+//! like `{:08}` would yield `00000001` for the integer `1`, while the
+//! same format would yield `-0000001` for the integer `-1`. Notice that
+//! the negative version has one fewer zero than the positive version.
+//! Note that padding zeros are always placed after the sign (if any)
+//! and before the digits. When used together with the `#` flag, a similar
+//! rule applies: padding zeros are inserted after the prefix but before
+//! the digits. The prefix is included in the total width.
+//!
+//! ## Precision
+//!
+//! For non-numeric types, this can be considered a "maximum width". If the resulting string is
+//! longer than this width, then it is truncated down to this many characters and that truncated
+//! value is emitted with proper `fill`, `alignment` and `width` if those parameters are set.
+//!
+//! For integral types, this is ignored.
+//!
+//! For floating-point types, this indicates how many digits after the decimal point should be
+//! printed.
+//!
+//! There are three possible ways to specify the desired `precision`:
+//!
+//! 1. An integer `.N`:
+//!
+//! the integer `N` itself is the precision.
+//!
+//! 2. An integer or name followed by dollar sign `.N$`:
+//!
+//! use format *argument* `N` (which must be a `usize`) as the precision.
+//!
+//! 3. An asterisk `.*`:
+//!
+//! `.*` means that this `{...}` is associated with *two* format inputs rather than one:
+//! - If a format string in the fashion of `{:<spec>.*}` is used, then the first input holds
+//! the `usize` precision, and the second holds the value to print.
+//! - If a format string in the fashion of `{<arg>:<spec>.*}` is used, then the `<arg>` part
+//! refers to the value to print, and the `precision` is taken like it was specified with an
+//! omitted positional parameter (`{}` instead of `{<arg>:}`).
+//!
+//! For example, the following calls all print the same thing `Hello x is 0.01000`:
+//!
+//! ```
+//! // Hello {arg 0 ("x")} is {arg 1 (0.01) with precision specified inline (5)}
+//! println!("Hello {0} is {1:.5}", "x", 0.01);
+//!
+//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision specified in arg 0 (5)}
+//! println!("Hello {1} is {2:.0$}", 5, "x", 0.01);
+//!
+//! // Hello {arg 0 ("x")} is {arg 2 (0.01) with precision specified in arg 1 (5)}
+//! println!("Hello {0} is {2:.1$}", "x", 5, 0.01);
+//!
+//! // Hello {next arg -> arg 0 ("x")} is {second of next two args -> arg 2 (0.01) with precision
+//! // specified in first of next two args -> arg 1 (5)}
+//! println!("Hello {} is {:.*}", "x", 5, 0.01);
+//!
+//! // Hello {arg 1 ("x")} is {arg 2 (0.01) with precision
+//! // specified in next arg -> arg 0 (5)}
+//! println!("Hello {1} is {2:.*}", 5, "x", 0.01);
+//!
+//! // Hello {next arg -> arg 0 ("x")} is {arg 2 (0.01) with precision
+//! // specified in next arg -> arg 1 (5)}
+//! println!("Hello {} is {2:.*}", "x", 5, 0.01);
+//!
+//! // Hello {next arg -> arg 0 ("x")} is {arg "number" (0.01) with precision specified
+//! // in arg "prec" (5)}
+//! println!("Hello {} is {number:.prec$}", "x", prec = 5, number = 0.01);
+//! ```
+//!
+//! While these:
+//!
+//! ```
+//! println!("{}, `{name:.*}` has 3 fractional digits", "Hello", 3, name=1234.56);
+//! println!("{}, `{name:.*}` has 3 characters", "Hello", 3, name="1234.56");
+//! println!("{}, `{name:>8.*}` has 3 right-aligned characters", "Hello", 3, name="1234.56");
+//! ```
+//!
+//! print three significantly different things:
+//!
+//! ```text
+//! Hello, `1234.560` has 3 fractional digits
+//! Hello, `123` has 3 characters
+//! Hello, ` 123` has 3 right-aligned characters
+//! ```
+//!
+//! ## Localization
+//!
+//! In some programming languages, the behavior of string formatting functions
+//! depends on the operating system's locale setting. The format functions
+//! provided by Rust's standard library do not have any concept of locale and
+//! will produce the same results on all systems regardless of user
+//! configuration.
+//!
+//! For example, the following code will always print `1.5` even if the system
+//! locale uses a decimal separator other than a dot.
+//!
+//! ```
+//! println!("The value is {}", 1.5);
+//! ```
+//!
+//! # Escaping
+//!
+//! The literal characters `{` and `}` may be included in a string by preceding
+//! them with the same character. For example, the `{` character is escaped with
+//! `{{` and the `}` character is escaped with `}}`.
+//!
+//! ```
+//! assert_eq!(format!("Hello {{}}"), "Hello {}");
+//! assert_eq!(format!("{{ Hello"), "{ Hello");
+//! ```
+//!
+//! # Syntax
+//!
+//! To summarize, here you can find the full grammar of format strings.
+//! The syntax for the formatting language used is drawn from other languages,
+//! so it should not be too alien. Arguments are formatted with Python-like
+//! syntax, meaning that arguments are surrounded by `{}` instead of the C-like
+//! `%`. The actual grammar for the formatting syntax is:
+//!
+//! ```text
+//! format_string := text [ maybe_format text ] *
+//! maybe_format := '{' '{' | '}' '}' | format
+//! format := '{' [ argument ] [ ':' format_spec ] [ ws ] * '}'
+//! argument := integer | identifier
+//!
+//! format_spec := [[fill]align][sign]['#']['0'][width]['.' precision]type
+//! fill := character
+//! align := '<' | '^' | '>'
+//! sign := '+' | '-'
+//! width := count
+//! precision := count | '*'
+//! type := '' | '?' | 'x?' | 'X?' | identifier
+//! count := parameter | integer
+//! parameter := argument '$'
+//! ```
+//! In the above grammar,
+//! - `text` must not contain any `'{'` or `'}'` characters,
+//! - `ws` is any character for which [`char::is_whitespace`] returns `true`, has no semantic
+//! meaning and is completely optional,
+//! - `integer` is a decimal integer that may contain leading zeroes and
+//! - `identifier` is an `IDENTIFIER_OR_KEYWORD` (not an `IDENTIFIER`) as defined by the [Rust language reference](https://doc.rust-lang.org/reference/identifiers.html).
+//!
+//! # Formatting traits
+//!
+//! When requesting that an argument be formatted with a particular type, you
+//! are actually requesting that an argument ascribes to a particular trait.
+//! This allows multiple actual types to be formatted via `{:x}` (like [`i8`] as
+//! well as [`isize`]). The current mapping of types to traits is:
+//!
+//! * *nothing* ⇒ [`Display`]
+//! * `?` ⇒ [`Debug`]
+//! * `x?` ⇒ [`Debug`] with lower-case hexadecimal integers
+//! * `X?` ⇒ [`Debug`] with upper-case hexadecimal integers
+//! * `o` ⇒ [`Octal`]
+//! * `x` ⇒ [`LowerHex`]
+//! * `X` ⇒ [`UpperHex`]
+//! * `p` ⇒ [`Pointer`]
+//! * `b` ⇒ [`Binary`]
+//! * `e` ⇒ [`LowerExp`]
+//! * `E` ⇒ [`UpperExp`]
+//!
+//! What this means is that any type of argument which implements the
+//! [`fmt::Binary`][`Binary`] trait can then be formatted with `{:b}`. Implementations
+//! are provided for these traits for a number of primitive types by the
+//! standard library as well. If no format is specified (as in `{}` or `{:6}`),
+//! then the format trait used is the [`Display`] trait.
+//!
+//! When implementing a format trait for your own type, you will have to
+//! implement a method of the signature:
+//!
+//! ```
+//! # #![allow(dead_code)]
+//! # use std::fmt;
+//! # struct Foo; // our custom type
+//! # impl fmt::Display for Foo {
+//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+//! # write!(f, "testing, testing")
+//! # } }
+//! ```
+//!
+//! Your type will be passed as `self` by-reference, and then the function
+//! should emit output into the Formatter `f` which implements `fmt::Write`. It is up to each
+//! format trait implementation to correctly adhere to the requested formatting parameters.
+//! The values of these parameters can be accessed with methods of the
+//! [`Formatter`] struct. In order to help with this, the [`Formatter`] struct also
+//! provides some helper methods.
+//!
+//! Additionally, the return value of this function is [`fmt::Result`] which is a
+//! type alias of <code>[Result]<(), [std::fmt::Error]></code>. Formatting implementations
+//! should ensure that they propagate errors from the [`Formatter`] (e.g., when
+//! calling [`write!`]). However, they should never return errors spuriously. That
+//! is, a formatting implementation must and may only return an error if the
+//! passed-in [`Formatter`] returns an error. This is because, contrary to what
+//! the function signature might suggest, string formatting is an infallible
+//! operation. This function only returns a result because writing to the
+//! underlying stream might fail and it must provide a way to propagate the fact
+//! that an error has occurred back up the stack.
+//!
+//! An example of implementing the formatting traits would look
+//! like:
+//!
+//! ```
+//! use std::fmt;
+//!
+//! #[derive(Debug)]
+//! struct Vector2D {
+//! x: isize,
+//! y: isize,
+//! }
+//!
+//! impl fmt::Display for Vector2D {
+//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+//! // The `f` value implements the `Write` trait, which is what the
+//! // write! macro is expecting. Note that this formatting ignores the
+//! // various flags provided to format strings.
+//! write!(f, "({}, {})", self.x, self.y)
+//! }
+//! }
+//!
+//! // Different traits allow different forms of output of a type. The meaning
+//! // of this format is to print the magnitude of a vector.
+//! impl fmt::Binary for Vector2D {
+//! fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
+//! let magnitude = (self.x * self.x + self.y * self.y) as f64;
+//! let magnitude = magnitude.sqrt();
+//!
+//! // Respect the formatting flags by using the helper method
+//! // `pad_integral` on the Formatter object. See the method
+//! // documentation for details, and the function `pad` can be used
+//! // to pad strings.
+//! let decimals = f.precision().unwrap_or(3);
+//! let string = format!("{:.*}", decimals, magnitude);
+//! f.pad_integral(true, "", &string)
+//! }
+//! }
+//!
+//! fn main() {
+//! let myvector = Vector2D { x: 3, y: 4 };
+//!
+//! println!("{myvector}"); // => "(3, 4)"
+//! println!("{myvector:?}"); // => "Vector2D {x: 3, y:4}"
+//! println!("{myvector:10.3b}"); // => " 5.000"
+//! }
+//! ```
+//!
+//! ### `fmt::Display` vs `fmt::Debug`
+//!
+//! These two formatting traits have distinct purposes:
+//!
+//! - [`fmt::Display`][`Display`] implementations assert that the type can be faithfully
+//! represented as a UTF-8 string at all times. It is **not** expected that
+//! all types implement the [`Display`] trait.
+//! - [`fmt::Debug`][`Debug`] implementations should be implemented for **all** public types.
+//! Output will typically represent the internal state as faithfully as possible.
+//! The purpose of the [`Debug`] trait is to facilitate debugging Rust code. In
+//! most cases, using `#[derive(Debug)]` is sufficient and recommended.
+//!
+//! Some examples of the output from both traits:
+//!
+//! ```
+//! assert_eq!(format!("{} {:?}", 3, 4), "3 4");
+//! assert_eq!(format!("{} {:?}", 'a', 'b'), "a 'b'");
+//! assert_eq!(format!("{} {:?}", "foo\n", "bar\n"), "foo\n \"bar\\n\"");
+//! ```
+//!
+//! # Related macros
+//!
+//! There are a number of related macros in the [`format!`] family. The ones that
+//! are currently implemented are:
+//!
+//! ```ignore (only-for-syntax-highlight)
+//! format! // described above
+//! write! // first argument is either a &mut io::Write or a &mut fmt::Write, the destination
+//! writeln! // same as write but appends a newline
+//! print! // the format string is printed to the standard output
+//! println! // same as print but appends a newline
+//! eprint! // the format string is printed to the standard error
+//! eprintln! // same as eprint but appends a newline
+//! format_args! // described below.
+//! ```
+//!
+//! ### `write!`
+//!
+//! [`write!`] and [`writeln!`] are two macros which are used to emit the format string
+//! to a specified stream. This is used to prevent intermediate allocations of
+//! format strings and instead directly write the output. Under the hood, this
+//! function is actually invoking the [`write_fmt`] function defined on the
+//! [`std::io::Write`] and the [`std::fmt::Write`] trait. Example usage is:
+//!
+//! ```
+//! # #![allow(unused_must_use)]
+//! use std::io::Write;
+//! let mut w = Vec::new();
+//! write!(&mut w, "Hello {}!", "world");
+//! ```
+//!
+//! ### `print!`
+//!
+//! This and [`println!`] emit their output to stdout. Similarly to the [`write!`]
+//! macro, the goal of these macros is to avoid intermediate allocations when
+//! printing output. Example usage is:
+//!
+//! ```
+//! print!("Hello {}!", "world");
+//! println!("I have a newline {}", "character at the end");
+//! ```
+//! ### `eprint!`
+//!
+//! The [`eprint!`] and [`eprintln!`] macros are identical to
+//! [`print!`] and [`println!`], respectively, except they emit their
+//! output to stderr.
+//!
+//! ### `format_args!`
+//!
+//! [`format_args!`] is a curious macro used to safely pass around
+//! an opaque object describing the format string. This object
+//! does not require any heap allocations to create, and it only
+//! references information on the stack. Under the hood, all of
+//! the related macros are implemented in terms of this. First
+//! off, some example usage is:
+//!
+//! ```
+//! # #![allow(unused_must_use)]
+//! use std::fmt;
+//! use std::io::{self, Write};
+//!
+//! let mut some_writer = io::stdout();
+//! write!(&mut some_writer, "{}", format_args!("print with a {}", "macro"));
+//!
+//! fn my_fmt_fn(args: fmt::Arguments) {
+//! write!(&mut io::stdout(), "{}", args);
+//! }
+//! my_fmt_fn(format_args!(", or a {} too", "function"));
+//! ```
+//!
+//! The result of the [`format_args!`] macro is a value of type [`fmt::Arguments`].
+//! This structure can then be passed to the [`write`] and [`format`] functions
+//! inside this module in order to process the format string.
+//! The goal of this macro is to even further prevent intermediate allocations
+//! when dealing with formatting strings.
+//!
+//! For example, a logging library could use the standard formatting syntax, but
+//! it would internally pass around this structure until it has been determined
+//! where output should go to.
+//!
+//! [`fmt::Result`]: Result "fmt::Result"
+//! [Result]: core::result::Result "std::result::Result"
+//! [std::fmt::Error]: Error "fmt::Error"
+//! [`write`]: write() "fmt::write"
+//! [`to_string`]: crate::string::ToString::to_string "ToString::to_string"
+//! [`write_fmt`]: ../../std/io/trait.Write.html#method.write_fmt
+//! [`std::io::Write`]: ../../std/io/trait.Write.html
+//! [`std::fmt::Write`]: ../../std/fmt/trait.Write.html
+//! [`print!`]: ../../std/macro.print.html "print!"
+//! [`println!`]: ../../std/macro.println.html "println!"
+//! [`eprint!`]: ../../std/macro.eprint.html "eprint!"
+//! [`eprintln!`]: ../../std/macro.eprintln.html "eprintln!"
+//! [`format_args!`]: ../../std/macro.format_args.html "format_args!"
+//! [`fmt::Arguments`]: Arguments "fmt::Arguments"
+//! [`format`]: format() "fmt::format"
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[unstable(feature = "fmt_internals", issue = "none")]
+pub use core::fmt::rt;
+#[stable(feature = "fmt_flags_align", since = "1.28.0")]
+pub use core::fmt::Alignment;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::Error;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{write, ArgumentV1, Arguments};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{Binary, Octal};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{Debug, Display};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{DebugList, DebugMap, DebugSet, DebugStruct, DebugTuple};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{Formatter, Result, Write};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{LowerExp, UpperExp};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::fmt::{LowerHex, Pointer, UpperHex};
+
+#[cfg(not(no_global_oom_handling))]
+use crate::string;
+
+/// The `format` function takes an [`Arguments`] struct and returns the resulting
+/// formatted string.
+///
+/// The [`Arguments`] instance can be created with the [`format_args!`] macro.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// use std::fmt;
+///
+/// let s = fmt::format(format_args!("Hello, {}!", "world"));
+/// assert_eq!(s, "Hello, world!");
+/// ```
+///
+/// Please note that using [`format!`] might be preferable.
+/// Example:
+///
+/// ```
+/// let s = format!("Hello, {}!", "world");
+/// assert_eq!(s, "Hello, world!");
+/// ```
+///
+/// [`format_args!`]: core::format_args
+/// [`format!`]: crate::format
+#[cfg(not(no_global_oom_handling))]
+#[must_use]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[inline]
+pub fn format(args: Arguments<'_>) -> string::String {
+ fn format_inner(args: Arguments<'_>) -> string::String {
+ let capacity = args.estimated_capacity();
+ let mut output = string::String::with_capacity(capacity);
+ output.write_fmt(args).expect("a formatting trait implementation returned an error");
+ output
+ }
+
+ args.as_str().map_or_else(|| format_inner(args), crate::borrow::ToOwned::to_owned)
+}
diff --git a/library/alloc/src/lib.rs b/library/alloc/src/lib.rs
new file mode 100644
index 000000000..8b6f40548
--- /dev/null
+++ b/library/alloc/src/lib.rs
@@ -0,0 +1,240 @@
+//! # The Rust core allocation and collections library
+//!
+//! This library provides smart pointers and collections for managing
+//! heap-allocated values.
+//!
+//! This library, like libcore, normally doesn’t need to be used directly
+//! since its contents are re-exported in the [`std` crate](../std/index.html).
+//! Crates that use the `#![no_std]` attribute however will typically
+//! not depend on `std`, so they’d use this crate instead.
+//!
+//! ## Boxed values
+//!
+//! The [`Box`] type is a smart pointer type. There can only be one owner of a
+//! [`Box`], and the owner can decide to mutate the contents, which live on the
+//! heap.
+//!
+//! This type can be sent among threads efficiently as the size of a `Box` value
+//! is the same as that of a pointer. Tree-like data structures are often built
+//! with boxes because each node often has only one owner, the parent.
+//!
+//! ## Reference counted pointers
+//!
+//! The [`Rc`] type is a non-threadsafe reference-counted pointer type intended
+//! for sharing memory within a thread. An [`Rc`] pointer wraps a type, `T`, and
+//! only allows access to `&T`, a shared reference.
+//!
+//! This type is useful when inherited mutability (such as using [`Box`]) is too
+//! constraining for an application, and is often paired with the [`Cell`] or
+//! [`RefCell`] types in order to allow mutation.
+//!
+//! ## Atomically reference counted pointers
+//!
+//! The [`Arc`] type is the threadsafe equivalent of the [`Rc`] type. It
+//! provides all the same functionality of [`Rc`], except it requires that the
+//! contained type `T` is shareable. Additionally, [`Arc<T>`][`Arc`] is itself
+//! sendable while [`Rc<T>`][`Rc`] is not.
+//!
+//! This type allows for shared access to the contained data, and is often
+//! paired with synchronization primitives such as mutexes to allow mutation of
+//! shared resources.
+//!
+//! ## Collections
+//!
+//! Implementations of the most common general purpose data structures are
+//! defined in this library. They are re-exported through the
+//! [standard collections library](../std/collections/index.html).
+//!
+//! ## Heap interfaces
+//!
+//! The [`alloc`](alloc/index.html) module defines the low-level interface to the
+//! default global allocator. It is not compatible with the libc allocator API.
+//!
+//! [`Arc`]: sync
+//! [`Box`]: boxed
+//! [`Cell`]: core::cell
+//! [`Rc`]: rc
+//! [`RefCell`]: core::cell
+
+// To run liballoc tests without x.py without ending up with two copies of liballoc, Miri needs to be
+// able to "empty" this crate. See <https://github.com/rust-lang/miri-test-libstd/issues/4>.
+// rustc itself never sets the feature, so this line has no affect there.
+#![cfg(any(not(feature = "miri-test-libstd"), test, doctest))]
+#![allow(unused_attributes)]
+#![stable(feature = "alloc", since = "1.36.0")]
+#![doc(
+ html_playground_url = "https://play.rust-lang.org/",
+ issue_tracker_base_url = "https://github.com/rust-lang/rust/issues/",
+ test(no_crate_inject, attr(allow(unused_variables), deny(warnings)))
+)]
+#![doc(cfg_hide(
+ not(test),
+ not(any(test, bootstrap)),
+ any(not(feature = "miri-test-libstd"), test, doctest),
+ no_global_oom_handling,
+ not(no_global_oom_handling),
+ target_has_atomic = "ptr"
+))]
+#![no_std]
+#![needs_allocator]
+//
+// Lints:
+#![deny(unsafe_op_in_unsafe_fn)]
+#![warn(deprecated_in_future)]
+#![warn(missing_debug_implementations)]
+#![warn(missing_docs)]
+#![allow(explicit_outlives_requirements)]
+//
+// Library features:
+#![feature(alloc_layout_extra)]
+#![feature(allocator_api)]
+#![feature(array_chunks)]
+#![feature(array_into_iter_constructors)]
+#![feature(array_methods)]
+#![feature(array_windows)]
+#![feature(assert_matches)]
+#![feature(async_iterator)]
+#![feature(coerce_unsized)]
+#![cfg_attr(not(no_global_oom_handling), feature(const_alloc_error))]
+#![feature(const_box)]
+#![cfg_attr(not(no_global_oom_handling), feature(const_btree_new))]
+#![feature(const_cow_is_borrowed)]
+#![feature(const_convert)]
+#![feature(const_size_of_val)]
+#![feature(const_align_of_val)]
+#![feature(const_ptr_read)]
+#![feature(const_maybe_uninit_write)]
+#![feature(const_maybe_uninit_as_mut_ptr)]
+#![feature(const_refs_to_cell)]
+#![feature(core_intrinsics)]
+#![feature(const_eval_select)]
+#![feature(const_pin)]
+#![feature(cstr_from_bytes_until_nul)]
+#![feature(dispatch_from_dyn)]
+#![feature(exact_size_is_empty)]
+#![feature(extend_one)]
+#![feature(fmt_internals)]
+#![feature(fn_traits)]
+#![feature(hasher_prefixfree_extras)]
+#![feature(inplace_iteration)]
+#![feature(iter_advance_by)]
+#![feature(iter_next_chunk)]
+#![feature(layout_for_ptr)]
+#![feature(maybe_uninit_array_assume_init)]
+#![feature(maybe_uninit_slice)]
+#![feature(maybe_uninit_uninit_array)]
+#![cfg_attr(test, feature(new_uninit))]
+#![feature(nonnull_slice_from_raw_parts)]
+#![feature(pattern)]
+#![feature(pointer_byte_offsets)]
+#![feature(ptr_internals)]
+#![feature(ptr_metadata)]
+#![feature(ptr_sub_ptr)]
+#![feature(receiver_trait)]
+#![feature(set_ptr_value)]
+#![feature(slice_from_ptr_range)]
+#![feature(slice_group_by)]
+#![feature(slice_ptr_get)]
+#![feature(slice_ptr_len)]
+#![feature(slice_range)]
+#![feature(str_internals)]
+#![feature(strict_provenance)]
+#![feature(trusted_len)]
+#![feature(trusted_random_access)]
+#![feature(try_trait_v2)]
+#![feature(unchecked_math)]
+#![feature(unicode_internals)]
+#![feature(unsize)]
+#![feature(std_internals)]
+//
+// Language features:
+#![feature(allocator_internals)]
+#![feature(allow_internal_unstable)]
+#![feature(associated_type_bounds)]
+#![feature(cfg_sanitize)]
+#![feature(const_deref)]
+#![feature(const_mut_refs)]
+#![feature(const_ptr_write)]
+#![feature(const_precise_live_drops)]
+#![feature(const_trait_impl)]
+#![feature(const_try)]
+#![feature(dropck_eyepatch)]
+#![feature(exclusive_range_pattern)]
+#![feature(fundamental)]
+#![cfg_attr(not(test), feature(generator_trait))]
+#![feature(hashmap_internals)]
+#![feature(lang_items)]
+#![feature(let_else)]
+#![feature(min_specialization)]
+#![feature(negative_impls)]
+#![feature(never_type)]
+#![feature(rustc_allow_const_fn_unstable)]
+#![feature(rustc_attrs)]
+#![feature(pointer_is_aligned)]
+#![feature(slice_internals)]
+#![feature(staged_api)]
+#![feature(stmt_expr_attributes)]
+#![cfg_attr(test, feature(test))]
+#![feature(unboxed_closures)]
+#![feature(unsized_fn_params)]
+#![feature(c_unwind)]
+//
+// Rustdoc features:
+#![feature(doc_cfg)]
+#![feature(doc_cfg_hide)]
+// Technically, this is a bug in rustdoc: rustdoc sees the documentation on `#[lang = slice_alloc]`
+// blocks is for `&[T]`, which also has documentation using this feature in `core`, and gets mad
+// that the feature-gate isn't enabled. Ideally, it wouldn't check for the feature gate for docs
+// from other crates, but since this can only appear for lang items, it doesn't seem worth fixing.
+#![feature(intra_doc_pointers)]
+
+// Allow testing this library
+#[cfg(test)]
+#[macro_use]
+extern crate std;
+#[cfg(test)]
+extern crate test;
+
+// Module with internal macros used by other modules (needs to be included before other modules).
+#[macro_use]
+mod macros;
+
+mod raw_vec;
+
+// Heaps provided for low-level allocation strategies
+
+pub mod alloc;
+
+// Primitive types using the heaps above
+
+// Need to conditionally define the mod from `boxed.rs` to avoid
+// duplicating the lang-items when building in test cfg; but also need
+// to allow code to have `use boxed::Box;` declarations.
+#[cfg(not(test))]
+pub mod boxed;
+#[cfg(test)]
+mod boxed {
+ pub use std::boxed::Box;
+}
+pub mod borrow;
+pub mod collections;
+#[cfg(not(no_global_oom_handling))]
+pub mod ffi;
+pub mod fmt;
+pub mod rc;
+pub mod slice;
+pub mod str;
+pub mod string;
+#[cfg(target_has_atomic = "ptr")]
+pub mod sync;
+#[cfg(all(not(no_global_oom_handling), target_has_atomic = "ptr"))]
+pub mod task;
+#[cfg(test)]
+mod tests;
+pub mod vec;
+
+#[doc(hidden)]
+#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
+pub mod __export {
+ pub use core::format_args;
+}
diff --git a/library/alloc/src/macros.rs b/library/alloc/src/macros.rs
new file mode 100644
index 000000000..88eb6aa7a
--- /dev/null
+++ b/library/alloc/src/macros.rs
@@ -0,0 +1,129 @@
+/// Creates a [`Vec`] containing the arguments.
+///
+/// `vec!` allows `Vec`s to be defined with the same syntax as array expressions.
+/// There are two forms of this macro:
+///
+/// - Create a [`Vec`] containing a given list of elements:
+///
+/// ```
+/// let v = vec![1, 2, 3];
+/// assert_eq!(v[0], 1);
+/// assert_eq!(v[1], 2);
+/// assert_eq!(v[2], 3);
+/// ```
+///
+/// - Create a [`Vec`] from a given element and size:
+///
+/// ```
+/// let v = vec![1; 3];
+/// assert_eq!(v, [1, 1, 1]);
+/// ```
+///
+/// Note that unlike array expressions this syntax supports all elements
+/// which implement [`Clone`] and the number of elements doesn't have to be
+/// a constant.
+///
+/// This will use `clone` to duplicate an expression, so one should be careful
+/// using this with types having a nonstandard `Clone` implementation. For
+/// example, `vec![Rc::new(1); 5]` will create a vector of five references
+/// to the same boxed integer value, not five references pointing to independently
+/// boxed integers.
+///
+/// Also, note that `vec![expr; 0]` is allowed, and produces an empty vector.
+/// This will still evaluate `expr`, however, and immediately drop the resulting value, so
+/// be mindful of side effects.
+///
+/// [`Vec`]: crate::vec::Vec
+#[cfg(all(not(no_global_oom_handling), not(test)))]
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_diagnostic_item = "vec_macro"]
+#[allow_internal_unstable(rustc_attrs, liballoc_internals)]
+macro_rules! vec {
+ () => (
+ $crate::__rust_force_expr!($crate::vec::Vec::new())
+ );
+ ($elem:expr; $n:expr) => (
+ $crate::__rust_force_expr!($crate::vec::from_elem($elem, $n))
+ );
+ ($($x:expr),+ $(,)?) => (
+ $crate::__rust_force_expr!(<[_]>::into_vec(
+ #[rustc_box]
+ $crate::boxed::Box::new([$($x),+])
+ ))
+ );
+}
+
+// HACK(japaric): with cfg(test) the inherent `[T]::into_vec` method, which is
+// required for this macro definition, is not available. Instead use the
+// `slice::into_vec` function which is only available with cfg(test)
+// NB see the slice::hack module in slice.rs for more information
+#[cfg(all(not(no_global_oom_handling), test))]
+#[allow(unused_macro_rules)]
+macro_rules! vec {
+ () => (
+ $crate::vec::Vec::new()
+ );
+ ($elem:expr; $n:expr) => (
+ $crate::vec::from_elem($elem, $n)
+ );
+ ($($x:expr),*) => (
+ $crate::slice::into_vec($crate::boxed::Box::new([$($x),*]))
+ );
+ ($($x:expr,)*) => (vec![$($x),*])
+}
+
+/// Creates a `String` using interpolation of runtime expressions.
+///
+/// The first argument `format!` receives is a format string. This must be a string
+/// literal. The power of the formatting string is in the `{}`s contained.
+///
+/// Additional parameters passed to `format!` replace the `{}`s within the
+/// formatting string in the order given unless named or positional parameters
+/// are used; see [`std::fmt`] for more information.
+///
+/// A common use for `format!` is concatenation and interpolation of strings.
+/// The same convention is used with [`print!`] and [`write!`] macros,
+/// depending on the intended destination of the string.
+///
+/// To convert a single value to a string, use the [`to_string`] method. This
+/// will use the [`Display`] formatting trait.
+///
+/// [`std::fmt`]: ../std/fmt/index.html
+/// [`print!`]: ../std/macro.print.html
+/// [`write!`]: core::write
+/// [`to_string`]: crate::string::ToString
+/// [`Display`]: core::fmt::Display
+///
+/// # Panics
+///
+/// `format!` panics if a formatting trait implementation returns an error.
+/// This indicates an incorrect implementation
+/// since `fmt::Write for String` never returns an error itself.
+///
+/// # Examples
+///
+/// ```
+/// format!("test");
+/// format!("hello {}", "world!");
+/// format!("x = {}, y = {y}", 10, y = 30);
+/// ```
+#[macro_export]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "format_macro")]
+macro_rules! format {
+ ($($arg:tt)*) => {{
+ let res = $crate::fmt::format($crate::__export::format_args!($($arg)*));
+ res
+ }}
+}
+
+/// Force AST node to an expression to improve diagnostics in pattern position.
+#[doc(hidden)]
+#[macro_export]
+#[unstable(feature = "liballoc_internals", issue = "none", reason = "implementation detail")]
+macro_rules! __rust_force_expr {
+ ($e:expr) => {
+ $e
+ };
+}
diff --git a/library/alloc/src/raw_vec.rs b/library/alloc/src/raw_vec.rs
new file mode 100644
index 000000000..b0f4529ab
--- /dev/null
+++ b/library/alloc/src/raw_vec.rs
@@ -0,0 +1,519 @@
+#![unstable(feature = "raw_vec_internals", reason = "unstable const warnings", issue = "none")]
+
+use core::alloc::LayoutError;
+use core::cmp;
+use core::intrinsics;
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::ops::Drop;
+use core::ptr::{self, NonNull, Unique};
+use core::slice;
+
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::handle_alloc_error;
+use crate::alloc::{Allocator, Global, Layout};
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::collections::TryReserveErrorKind::*;
+
+#[cfg(test)]
+mod tests;
+
+#[cfg(not(no_global_oom_handling))]
+enum AllocInit {
+ /// The contents of the new memory are uninitialized.
+ Uninitialized,
+ /// The new memory is guaranteed to be zeroed.
+ Zeroed,
+}
+
+/// A low-level utility for more ergonomically allocating, reallocating, and deallocating
+/// a buffer of memory on the heap without having to worry about all the corner cases
+/// involved. This type is excellent for building your own data structures like Vec and VecDeque.
+/// In particular:
+///
+/// * Produces `Unique::dangling()` on zero-sized types.
+/// * Produces `Unique::dangling()` on zero-length allocations.
+/// * Avoids freeing `Unique::dangling()`.
+/// * Catches all overflows in capacity computations (promotes them to "capacity overflow" panics).
+/// * Guards against 32-bit systems allocating more than isize::MAX bytes.
+/// * Guards against overflowing your length.
+/// * Calls `handle_alloc_error` for fallible allocations.
+/// * Contains a `ptr::Unique` and thus endows the user with all related benefits.
+/// * Uses the excess returned from the allocator to use the largest available capacity.
+///
+/// This type does not in anyway inspect the memory that it manages. When dropped it *will*
+/// free its memory, but it *won't* try to drop its contents. It is up to the user of `RawVec`
+/// to handle the actual things *stored* inside of a `RawVec`.
+///
+/// Note that the excess of a zero-sized types is always infinite, so `capacity()` always returns
+/// `usize::MAX`. This means that you need to be careful when round-tripping this type with a
+/// `Box<[T]>`, since `capacity()` won't yield the length.
+#[allow(missing_debug_implementations)]
+pub(crate) struct RawVec<T, A: Allocator = Global> {
+ ptr: Unique<T>,
+ cap: usize,
+ alloc: A,
+}
+
+impl<T> RawVec<T, Global> {
+ /// HACK(Centril): This exists because stable `const fn` can only call stable `const fn`, so
+ /// they cannot call `Self::new()`.
+ ///
+ /// If you change `RawVec<T>::new` or dependencies, please take care to not introduce anything
+ /// that would truly const-call something unstable.
+ pub const NEW: Self = Self::new();
+
+ /// Creates the biggest possible `RawVec` (on the system heap)
+ /// without allocating. If `T` has positive size, then this makes a
+ /// `RawVec` with capacity `0`. If `T` is zero-sized, then it makes a
+ /// `RawVec` with capacity `usize::MAX`. Useful for implementing
+ /// delayed allocation.
+ #[must_use]
+ pub const fn new() -> Self {
+ Self::new_in(Global)
+ }
+
+ /// Creates a `RawVec` (on the system heap) with exactly the
+ /// capacity and alignment requirements for a `[T; capacity]`. This is
+ /// equivalent to calling `RawVec::new` when `capacity` is `0` or `T` is
+ /// zero-sized. Note that if `T` is zero-sized this means you will
+ /// *not* get a `RawVec` with the requested capacity.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the requested capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(any(no_global_oom_handling, test)))]
+ #[must_use]
+ #[inline]
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self::with_capacity_in(capacity, Global)
+ }
+
+ /// Like `with_capacity`, but guarantees the buffer is zeroed.
+ #[cfg(not(any(no_global_oom_handling, test)))]
+ #[must_use]
+ #[inline]
+ pub fn with_capacity_zeroed(capacity: usize) -> Self {
+ Self::with_capacity_zeroed_in(capacity, Global)
+ }
+}
+
+impl<T, A: Allocator> RawVec<T, A> {
+ // Tiny Vecs are dumb. Skip to:
+ // - 8 if the element size is 1, because any heap allocators is likely
+ // to round up a request of less than 8 bytes to at least 8 bytes.
+ // - 4 if elements are moderate-sized (<= 1 KiB).
+ // - 1 otherwise, to avoid wasting too much space for very short Vecs.
+ pub(crate) const MIN_NON_ZERO_CAP: usize = if mem::size_of::<T>() == 1 {
+ 8
+ } else if mem::size_of::<T>() <= 1024 {
+ 4
+ } else {
+ 1
+ };
+
+ /// Like `new`, but parameterized over the choice of allocator for
+ /// the returned `RawVec`.
+ pub const fn new_in(alloc: A) -> Self {
+ // `cap: 0` means "unallocated". zero-sized types are ignored.
+ Self { ptr: Unique::dangling(), cap: 0, alloc }
+ }
+
+ /// Like `with_capacity`, but parameterized over the choice of
+ /// allocator for the returned `RawVec`.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+ Self::allocate_in(capacity, AllocInit::Uninitialized, alloc)
+ }
+
+ /// Like `with_capacity_zeroed`, but parameterized over the choice
+ /// of allocator for the returned `RawVec`.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn with_capacity_zeroed_in(capacity: usize, alloc: A) -> Self {
+ Self::allocate_in(capacity, AllocInit::Zeroed, alloc)
+ }
+
+ /// Converts the entire buffer into `Box<[MaybeUninit<T>]>` with the specified `len`.
+ ///
+ /// Note that this will correctly reconstitute any `cap` changes
+ /// that may have been performed. (See description of type for details.)
+ ///
+ /// # Safety
+ ///
+ /// * `len` must be greater than or equal to the most recently requested capacity, and
+ /// * `len` must be less than or equal to `self.capacity()`.
+ ///
+ /// Note, that the requested capacity and `self.capacity()` could differ, as
+ /// an allocator could overallocate and return a greater memory block than requested.
+ pub unsafe fn into_box(self, len: usize) -> Box<[MaybeUninit<T>], A> {
+ // Sanity-check one half of the safety requirement (we cannot check the other half).
+ debug_assert!(
+ len <= self.capacity(),
+ "`len` must be smaller than or equal to `self.capacity()`"
+ );
+
+ let me = ManuallyDrop::new(self);
+ unsafe {
+ let slice = slice::from_raw_parts_mut(me.ptr() as *mut MaybeUninit<T>, len);
+ Box::from_raw_in(slice, ptr::read(&me.alloc))
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ fn allocate_in(capacity: usize, init: AllocInit, alloc: A) -> Self {
+ // Don't allocate here because `Drop` will not deallocate when `capacity` is 0.
+ if mem::size_of::<T>() == 0 || capacity == 0 {
+ Self::new_in(alloc)
+ } else {
+ // We avoid `unwrap_or_else` here because it bloats the amount of
+ // LLVM IR generated.
+ let layout = match Layout::array::<T>(capacity) {
+ Ok(layout) => layout,
+ Err(_) => capacity_overflow(),
+ };
+ match alloc_guard(layout.size()) {
+ Ok(_) => {}
+ Err(_) => capacity_overflow(),
+ }
+ let result = match init {
+ AllocInit::Uninitialized => alloc.allocate(layout),
+ AllocInit::Zeroed => alloc.allocate_zeroed(layout),
+ };
+ let ptr = match result {
+ Ok(ptr) => ptr,
+ Err(_) => handle_alloc_error(layout),
+ };
+
+ // Allocators currently return a `NonNull<[u8]>` whose length
+ // matches the size requested. If that ever changes, the capacity
+ // here should change to `ptr.len() / mem::size_of::<T>()`.
+ Self {
+ ptr: unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) },
+ cap: capacity,
+ alloc,
+ }
+ }
+ }
+
+ /// Reconstitutes a `RawVec` from a pointer, capacity, and allocator.
+ ///
+ /// # Safety
+ ///
+ /// The `ptr` must be allocated (via the given allocator `alloc`), and with the given
+ /// `capacity`.
+ /// The `capacity` cannot exceed `isize::MAX` for sized types. (only a concern on 32-bit
+ /// systems). ZST vectors may have a capacity up to `usize::MAX`.
+ /// If the `ptr` and `capacity` come from a `RawVec` created via `alloc`, then this is
+ /// guaranteed.
+ #[inline]
+ pub unsafe fn from_raw_parts_in(ptr: *mut T, capacity: usize, alloc: A) -> Self {
+ Self { ptr: unsafe { Unique::new_unchecked(ptr) }, cap: capacity, alloc }
+ }
+
+ /// Gets a raw pointer to the start of the allocation. Note that this is
+ /// `Unique::dangling()` if `capacity == 0` or `T` is zero-sized. In the former case, you must
+ /// be careful.
+ #[inline]
+ pub fn ptr(&self) -> *mut T {
+ self.ptr.as_ptr()
+ }
+
+ /// Gets the capacity of the allocation.
+ ///
+ /// This will always be `usize::MAX` if `T` is zero-sized.
+ #[inline(always)]
+ pub fn capacity(&self) -> usize {
+ if mem::size_of::<T>() == 0 { usize::MAX } else { self.cap }
+ }
+
+ /// Returns a shared reference to the allocator backing this `RawVec`.
+ pub fn allocator(&self) -> &A {
+ &self.alloc
+ }
+
+ fn current_memory(&self) -> Option<(NonNull<u8>, Layout)> {
+ if mem::size_of::<T>() == 0 || self.cap == 0 {
+ None
+ } else {
+ // We have an allocated chunk of memory, so we can bypass runtime
+ // checks to get our current layout.
+ unsafe {
+ let layout = Layout::array::<T>(self.cap).unwrap_unchecked();
+ Some((self.ptr.cast().into(), layout))
+ }
+ }
+ }
+
+ /// Ensures that the buffer contains at least enough space to hold `len +
+ /// additional` elements. If it doesn't already have enough capacity, will
+ /// reallocate enough space plus comfortable slack space to get amortized
+ /// *O*(1) behavior. Will limit this behavior if it would needlessly cause
+ /// itself to panic.
+ ///
+ /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
+ /// the requested space. This is not really unsafe, but the unsafe
+ /// code *you* write that relies on the behavior of this function may break.
+ ///
+ /// This is ideal for implementing a bulk-push operation like `extend`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn reserve(&mut self, len: usize, additional: usize) {
+ // Callers expect this function to be very cheap when there is already sufficient capacity.
+ // Therefore, we move all the resizing and error-handling logic from grow_amortized and
+ // handle_reserve behind a call, while making sure that this function is likely to be
+ // inlined as just a comparison and a call if the comparison fails.
+ #[cold]
+ fn do_reserve_and_handle<T, A: Allocator>(
+ slf: &mut RawVec<T, A>,
+ len: usize,
+ additional: usize,
+ ) {
+ handle_reserve(slf.grow_amortized(len, additional));
+ }
+
+ if self.needs_to_grow(len, additional) {
+ do_reserve_and_handle(self, len, additional);
+ }
+ }
+
+ /// A specialized version of `reserve()` used only by the hot and
+ /// oft-instantiated `Vec::push()`, which does its own capacity check.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline(never)]
+ pub fn reserve_for_push(&mut self, len: usize) {
+ handle_reserve(self.grow_amortized(len, 1));
+ }
+
+ /// The same as `reserve`, but returns on errors instead of panicking or aborting.
+ pub fn try_reserve(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ if self.needs_to_grow(len, additional) {
+ self.grow_amortized(len, additional)
+ } else {
+ Ok(())
+ }
+ }
+
+ /// Ensures that the buffer contains at least enough space to hold `len +
+ /// additional` elements. If it doesn't already, will reallocate the
+ /// minimum possible amount of memory necessary. Generally this will be
+ /// exactly the amount of memory necessary, but in principle the allocator
+ /// is free to give back more than we asked for.
+ ///
+ /// If `len` exceeds `self.capacity()`, this may fail to actually allocate
+ /// the requested space. This is not really unsafe, but the unsafe code
+ /// *you* write that relies on the behavior of this function may break.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ pub fn reserve_exact(&mut self, len: usize, additional: usize) {
+ handle_reserve(self.try_reserve_exact(len, additional));
+ }
+
+ /// The same as `reserve_exact`, but returns on errors instead of panicking or aborting.
+ pub fn try_reserve_exact(
+ &mut self,
+ len: usize,
+ additional: usize,
+ ) -> Result<(), TryReserveError> {
+ if self.needs_to_grow(len, additional) { self.grow_exact(len, additional) } else { Ok(()) }
+ }
+
+ /// Shrinks the buffer down to the specified capacity. If the given amount
+ /// is 0, actually completely deallocates.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the given amount is *larger* than the current capacity.
+ ///
+ /// # Aborts
+ ///
+ /// Aborts on OOM.
+ #[cfg(not(no_global_oom_handling))]
+ pub fn shrink_to_fit(&mut self, cap: usize) {
+ handle_reserve(self.shrink(cap));
+ }
+}
+
+impl<T, A: Allocator> RawVec<T, A> {
+ /// Returns if the buffer needs to grow to fulfill the needed extra capacity.
+ /// Mainly used to make inlining reserve-calls possible without inlining `grow`.
+ fn needs_to_grow(&self, len: usize, additional: usize) -> bool {
+ additional > self.capacity().wrapping_sub(len)
+ }
+
+ fn set_ptr_and_cap(&mut self, ptr: NonNull<[u8]>, cap: usize) {
+ // Allocators currently return a `NonNull<[u8]>` whose length matches
+ // the size requested. If that ever changes, the capacity here should
+ // change to `ptr.len() / mem::size_of::<T>()`.
+ self.ptr = unsafe { Unique::new_unchecked(ptr.cast().as_ptr()) };
+ self.cap = cap;
+ }
+
+ // This method is usually instantiated many times. So we want it to be as
+ // small as possible, to improve compile times. But we also want as much of
+ // its contents to be statically computable as possible, to make the
+ // generated code run faster. Therefore, this method is carefully written
+ // so that all of the code that depends on `T` is within it, while as much
+ // of the code that doesn't depend on `T` as possible is in functions that
+ // are non-generic over `T`.
+ fn grow_amortized(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ // This is ensured by the calling contexts.
+ debug_assert!(additional > 0);
+
+ if mem::size_of::<T>() == 0 {
+ // Since we return a capacity of `usize::MAX` when `elem_size` is
+ // 0, getting to here necessarily means the `RawVec` is overfull.
+ return Err(CapacityOverflow.into());
+ }
+
+ // Nothing we can really do about these checks, sadly.
+ let required_cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
+
+ // This guarantees exponential growth. The doubling cannot overflow
+ // because `cap <= isize::MAX` and the type of `cap` is `usize`.
+ let cap = cmp::max(self.cap * 2, required_cap);
+ let cap = cmp::max(Self::MIN_NON_ZERO_CAP, cap);
+
+ let new_layout = Layout::array::<T>(cap);
+
+ // `finish_grow` is non-generic over `T`.
+ let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
+ self.set_ptr_and_cap(ptr, cap);
+ Ok(())
+ }
+
+ // The constraints on this method are much the same as those on
+ // `grow_amortized`, but this method is usually instantiated less often so
+ // it's less critical.
+ fn grow_exact(&mut self, len: usize, additional: usize) -> Result<(), TryReserveError> {
+ if mem::size_of::<T>() == 0 {
+ // Since we return a capacity of `usize::MAX` when the type size is
+ // 0, getting to here necessarily means the `RawVec` is overfull.
+ return Err(CapacityOverflow.into());
+ }
+
+ let cap = len.checked_add(additional).ok_or(CapacityOverflow)?;
+ let new_layout = Layout::array::<T>(cap);
+
+ // `finish_grow` is non-generic over `T`.
+ let ptr = finish_grow(new_layout, self.current_memory(), &mut self.alloc)?;
+ self.set_ptr_and_cap(ptr, cap);
+ Ok(())
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ fn shrink(&mut self, cap: usize) -> Result<(), TryReserveError> {
+ assert!(cap <= self.capacity(), "Tried to shrink to a larger capacity");
+
+ let (ptr, layout) = if let Some(mem) = self.current_memory() { mem } else { return Ok(()) };
+
+ let ptr = unsafe {
+ // `Layout::array` cannot overflow here because it would have
+ // overflowed earlier when capacity was larger.
+ let new_layout = Layout::array::<T>(cap).unwrap_unchecked();
+ self.alloc
+ .shrink(ptr, layout, new_layout)
+ .map_err(|_| AllocError { layout: new_layout, non_exhaustive: () })?
+ };
+ self.set_ptr_and_cap(ptr, cap);
+ Ok(())
+ }
+}
+
+// This function is outside `RawVec` to minimize compile times. See the comment
+// above `RawVec::grow_amortized` for details. (The `A` parameter isn't
+// significant, because the number of different `A` types seen in practice is
+// much smaller than the number of `T` types.)
+#[inline(never)]
+fn finish_grow<A>(
+ new_layout: Result<Layout, LayoutError>,
+ current_memory: Option<(NonNull<u8>, Layout)>,
+ alloc: &mut A,
+) -> Result<NonNull<[u8]>, TryReserveError>
+where
+ A: Allocator,
+{
+ // Check for the error here to minimize the size of `RawVec::grow_*`.
+ let new_layout = new_layout.map_err(|_| CapacityOverflow)?;
+
+ alloc_guard(new_layout.size())?;
+
+ let memory = if let Some((ptr, old_layout)) = current_memory {
+ debug_assert_eq!(old_layout.align(), new_layout.align());
+ unsafe {
+ // The allocator checks for alignment equality
+ intrinsics::assume(old_layout.align() == new_layout.align());
+ alloc.grow(ptr, old_layout, new_layout)
+ }
+ } else {
+ alloc.allocate(new_layout)
+ };
+
+ memory.map_err(|_| AllocError { layout: new_layout, non_exhaustive: () }.into())
+}
+
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for RawVec<T, A> {
+ /// Frees the memory owned by the `RawVec` *without* trying to drop its contents.
+ fn drop(&mut self) {
+ if let Some((ptr, layout)) = self.current_memory() {
+ unsafe { self.alloc.deallocate(ptr, layout) }
+ }
+ }
+}
+
+// Central function for reserve error handling.
+#[cfg(not(no_global_oom_handling))]
+#[inline]
+fn handle_reserve(result: Result<(), TryReserveError>) {
+ match result.map_err(|e| e.kind()) {
+ Err(CapacityOverflow) => capacity_overflow(),
+ Err(AllocError { layout, .. }) => handle_alloc_error(layout),
+ Ok(()) => { /* yay */ }
+ }
+}
+
+// We need to guarantee the following:
+// * We don't ever allocate `> isize::MAX` byte-size objects.
+// * We don't overflow `usize::MAX` and actually allocate too little.
+//
+// On 64-bit we just need to check for overflow since trying to allocate
+// `> isize::MAX` bytes will surely fail. On 32-bit and 16-bit we need to add
+// an extra guard for this in case we're running on a platform which can use
+// all 4GB in user-space, e.g., PAE or x32.
+
+#[inline]
+fn alloc_guard(alloc_size: usize) -> Result<(), TryReserveError> {
+ if usize::BITS < 64 && alloc_size > isize::MAX as usize {
+ Err(CapacityOverflow.into())
+ } else {
+ Ok(())
+ }
+}
+
+// One central function responsible for reporting capacity overflows. This'll
+// ensure that the code generation related to these panics is minimal as there's
+// only one location which panics rather than a bunch throughout the module.
+#[cfg(not(no_global_oom_handling))]
+fn capacity_overflow() -> ! {
+ panic!("capacity overflow");
+}
diff --git a/library/alloc/src/raw_vec/tests.rs b/library/alloc/src/raw_vec/tests.rs
new file mode 100644
index 000000000..ff322f0da
--- /dev/null
+++ b/library/alloc/src/raw_vec/tests.rs
@@ -0,0 +1,163 @@
+use super::*;
+use std::cell::Cell;
+
+#[test]
+fn allocator_param() {
+ use crate::alloc::AllocError;
+
+ // Writing a test of integration between third-party
+ // allocators and `RawVec` is a little tricky because the `RawVec`
+ // API does not expose fallible allocation methods, so we
+ // cannot check what happens when allocator is exhausted
+ // (beyond detecting a panic).
+ //
+ // Instead, this just checks that the `RawVec` methods do at
+ // least go through the Allocator API when it reserves
+ // storage.
+
+ // A dumb allocator that consumes a fixed amount of fuel
+ // before allocation attempts start failing.
+ struct BoundedAlloc {
+ fuel: Cell<usize>,
+ }
+ unsafe impl Allocator for BoundedAlloc {
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ let size = layout.size();
+ if size > self.fuel.get() {
+ return Err(AllocError);
+ }
+ match Global.allocate(layout) {
+ ok @ Ok(_) => {
+ self.fuel.set(self.fuel.get() - size);
+ ok
+ }
+ err @ Err(_) => err,
+ }
+ }
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ unsafe { Global.deallocate(ptr, layout) }
+ }
+ }
+
+ let a = BoundedAlloc { fuel: Cell::new(500) };
+ let mut v: RawVec<u8, _> = RawVec::with_capacity_in(50, a);
+ assert_eq!(v.alloc.fuel.get(), 450);
+ v.reserve(50, 150); // (causes a realloc, thus using 50 + 150 = 200 units of fuel)
+ assert_eq!(v.alloc.fuel.get(), 250);
+}
+
+#[test]
+fn reserve_does_not_overallocate() {
+ {
+ let mut v: RawVec<u32> = RawVec::new();
+ // First, `reserve` allocates like `reserve_exact`.
+ v.reserve(0, 9);
+ assert_eq!(9, v.capacity());
+ }
+
+ {
+ let mut v: RawVec<u32> = RawVec::new();
+ v.reserve(0, 7);
+ assert_eq!(7, v.capacity());
+ // 97 is more than double of 7, so `reserve` should work
+ // like `reserve_exact`.
+ v.reserve(7, 90);
+ assert_eq!(97, v.capacity());
+ }
+
+ {
+ let mut v: RawVec<u32> = RawVec::new();
+ v.reserve(0, 12);
+ assert_eq!(12, v.capacity());
+ v.reserve(12, 3);
+ // 3 is less than half of 12, so `reserve` must grow
+ // exponentially. At the time of writing this test grow
+ // factor is 2, so new capacity is 24, however, grow factor
+ // of 1.5 is OK too. Hence `>= 18` in assert.
+ assert!(v.capacity() >= 12 + 12 / 2);
+ }
+}
+
+struct ZST;
+
+// A `RawVec` holding zero-sized elements should always look like this.
+fn zst_sanity<T>(v: &RawVec<T>) {
+ assert_eq!(v.capacity(), usize::MAX);
+ assert_eq!(v.ptr(), core::ptr::Unique::<T>::dangling().as_ptr());
+ assert_eq!(v.current_memory(), None);
+}
+
+#[test]
+fn zst() {
+ let cap_err = Err(crate::collections::TryReserveErrorKind::CapacityOverflow.into());
+
+ assert_eq!(std::mem::size_of::<ZST>(), 0);
+
+ // All these different ways of creating the RawVec produce the same thing.
+
+ let v: RawVec<ZST> = RawVec::new();
+ zst_sanity(&v);
+
+ let v: RawVec<ZST> = RawVec::with_capacity_in(100, Global);
+ zst_sanity(&v);
+
+ let v: RawVec<ZST> = RawVec::with_capacity_in(100, Global);
+ zst_sanity(&v);
+
+ let v: RawVec<ZST> = RawVec::allocate_in(0, AllocInit::Uninitialized, Global);
+ zst_sanity(&v);
+
+ let v: RawVec<ZST> = RawVec::allocate_in(100, AllocInit::Uninitialized, Global);
+ zst_sanity(&v);
+
+ let mut v: RawVec<ZST> = RawVec::allocate_in(usize::MAX, AllocInit::Uninitialized, Global);
+ zst_sanity(&v);
+
+ // Check all these operations work as expected with zero-sized elements.
+
+ assert!(!v.needs_to_grow(100, usize::MAX - 100));
+ assert!(v.needs_to_grow(101, usize::MAX - 100));
+ zst_sanity(&v);
+
+ v.reserve(100, usize::MAX - 100);
+ //v.reserve(101, usize::MAX - 100); // panics, in `zst_reserve_panic` below
+ zst_sanity(&v);
+
+ v.reserve_exact(100, usize::MAX - 100);
+ //v.reserve_exact(101, usize::MAX - 100); // panics, in `zst_reserve_exact_panic` below
+ zst_sanity(&v);
+
+ assert_eq!(v.try_reserve(100, usize::MAX - 100), Ok(()));
+ assert_eq!(v.try_reserve(101, usize::MAX - 100), cap_err);
+ zst_sanity(&v);
+
+ assert_eq!(v.try_reserve_exact(100, usize::MAX - 100), Ok(()));
+ assert_eq!(v.try_reserve_exact(101, usize::MAX - 100), cap_err);
+ zst_sanity(&v);
+
+ assert_eq!(v.grow_amortized(100, usize::MAX - 100), cap_err);
+ assert_eq!(v.grow_amortized(101, usize::MAX - 100), cap_err);
+ zst_sanity(&v);
+
+ assert_eq!(v.grow_exact(100, usize::MAX - 100), cap_err);
+ assert_eq!(v.grow_exact(101, usize::MAX - 100), cap_err);
+ zst_sanity(&v);
+}
+
+#[test]
+#[should_panic(expected = "capacity overflow")]
+fn zst_reserve_panic() {
+ let mut v: RawVec<ZST> = RawVec::new();
+ zst_sanity(&v);
+
+ v.reserve(101, usize::MAX - 100);
+}
+
+#[test]
+#[should_panic(expected = "capacity overflow")]
+fn zst_reserve_exact_panic() {
+ let mut v: RawVec<ZST> = RawVec::new();
+ zst_sanity(&v);
+
+ v.reserve_exact(101, usize::MAX - 100);
+}
diff --git a/library/alloc/src/rc.rs b/library/alloc/src/rc.rs
new file mode 100644
index 000000000..b89b03683
--- /dev/null
+++ b/library/alloc/src/rc.rs
@@ -0,0 +1,2700 @@
+//! Single-threaded reference-counting pointers. 'Rc' stands for 'Reference
+//! Counted'.
+//!
+//! The type [`Rc<T>`][`Rc`] provides shared ownership of a value of type `T`,
+//! allocated in the heap. Invoking [`clone`][clone] on [`Rc`] produces a new
+//! pointer to the same allocation in the heap. When the last [`Rc`] pointer to a
+//! given allocation is destroyed, the value stored in that allocation (often
+//! referred to as "inner value") is also dropped.
+//!
+//! Shared references in Rust disallow mutation by default, and [`Rc`]
+//! is no exception: you cannot generally obtain a mutable reference to
+//! something inside an [`Rc`]. If you need mutability, put a [`Cell`]
+//! or [`RefCell`] inside the [`Rc`]; see [an example of mutability
+//! inside an `Rc`][mutability].
+//!
+//! [`Rc`] uses non-atomic reference counting. This means that overhead is very
+//! low, but an [`Rc`] cannot be sent between threads, and consequently [`Rc`]
+//! does not implement [`Send`][send]. As a result, the Rust compiler
+//! will check *at compile time* that you are not sending [`Rc`]s between
+//! threads. If you need multi-threaded, atomic reference counting, use
+//! [`sync::Arc`][arc].
+//!
+//! The [`downgrade`][downgrade] method can be used to create a non-owning
+//! [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
+//! to an [`Rc`], but this will return [`None`] if the value stored in the allocation has
+//! already been dropped. In other words, `Weak` pointers do not keep the value
+//! inside the allocation alive; however, they *do* keep the allocation
+//! (the backing store for the inner value) alive.
+//!
+//! A cycle between [`Rc`] pointers will never be deallocated. For this reason,
+//! [`Weak`] is used to break cycles. For example, a tree could have strong
+//! [`Rc`] pointers from parent nodes to children, and [`Weak`] pointers from
+//! children back to their parents.
+//!
+//! `Rc<T>` automatically dereferences to `T` (via the [`Deref`] trait),
+//! so you can call `T`'s methods on a value of type [`Rc<T>`][`Rc`]. To avoid name
+//! clashes with `T`'s methods, the methods of [`Rc<T>`][`Rc`] itself are associated
+//! functions, called using [fully qualified syntax]:
+//!
+//! ```
+//! use std::rc::Rc;
+//!
+//! let my_rc = Rc::new(());
+//! let my_weak = Rc::downgrade(&my_rc);
+//! ```
+//!
+//! `Rc<T>`'s implementations of traits like `Clone` may also be called using
+//! fully qualified syntax. Some people prefer to use fully qualified syntax,
+//! while others prefer using method-call syntax.
+//!
+//! ```
+//! use std::rc::Rc;
+//!
+//! let rc = Rc::new(());
+//! // Method-call syntax
+//! let rc2 = rc.clone();
+//! // Fully qualified syntax
+//! let rc3 = Rc::clone(&rc);
+//! ```
+//!
+//! [`Weak<T>`][`Weak`] does not auto-dereference to `T`, because the inner value may have
+//! already been dropped.
+//!
+//! # Cloning references
+//!
+//! Creating a new reference to the same allocation as an existing reference counted pointer
+//! is done using the `Clone` trait implemented for [`Rc<T>`][`Rc`] and [`Weak<T>`][`Weak`].
+//!
+//! ```
+//! use std::rc::Rc;
+//!
+//! let foo = Rc::new(vec![1.0, 2.0, 3.0]);
+//! // The two syntaxes below are equivalent.
+//! let a = foo.clone();
+//! let b = Rc::clone(&foo);
+//! // a and b both point to the same memory location as foo.
+//! ```
+//!
+//! The `Rc::clone(&from)` syntax is the most idiomatic because it conveys more explicitly
+//! the meaning of the code. In the example above, this syntax makes it easier to see that
+//! this code is creating a new reference rather than copying the whole content of foo.
+//!
+//! # Examples
+//!
+//! Consider a scenario where a set of `Gadget`s are owned by a given `Owner`.
+//! We want to have our `Gadget`s point to their `Owner`. We can't do this with
+//! unique ownership, because more than one gadget may belong to the same
+//! `Owner`. [`Rc`] allows us to share an `Owner` between multiple `Gadget`s,
+//! and have the `Owner` remain allocated as long as any `Gadget` points at it.
+//!
+//! ```
+//! use std::rc::Rc;
+//!
+//! struct Owner {
+//! name: String,
+//! // ...other fields
+//! }
+//!
+//! struct Gadget {
+//! id: i32,
+//! owner: Rc<Owner>,
+//! // ...other fields
+//! }
+//!
+//! fn main() {
+//! // Create a reference-counted `Owner`.
+//! let gadget_owner: Rc<Owner> = Rc::new(
+//! Owner {
+//! name: "Gadget Man".to_string(),
+//! }
+//! );
+//!
+//! // Create `Gadget`s belonging to `gadget_owner`. Cloning the `Rc<Owner>`
+//! // gives us a new pointer to the same `Owner` allocation, incrementing
+//! // the reference count in the process.
+//! let gadget1 = Gadget {
+//! id: 1,
+//! owner: Rc::clone(&gadget_owner),
+//! };
+//! let gadget2 = Gadget {
+//! id: 2,
+//! owner: Rc::clone(&gadget_owner),
+//! };
+//!
+//! // Dispose of our local variable `gadget_owner`.
+//! drop(gadget_owner);
+//!
+//! // Despite dropping `gadget_owner`, we're still able to print out the name
+//! // of the `Owner` of the `Gadget`s. This is because we've only dropped a
+//! // single `Rc<Owner>`, not the `Owner` it points to. As long as there are
+//! // other `Rc<Owner>` pointing at the same `Owner` allocation, it will remain
+//! // live. The field projection `gadget1.owner.name` works because
+//! // `Rc<Owner>` automatically dereferences to `Owner`.
+//! println!("Gadget {} owned by {}", gadget1.id, gadget1.owner.name);
+//! println!("Gadget {} owned by {}", gadget2.id, gadget2.owner.name);
+//!
+//! // At the end of the function, `gadget1` and `gadget2` are destroyed, and
+//! // with them the last counted references to our `Owner`. Gadget Man now
+//! // gets destroyed as well.
+//! }
+//! ```
+//!
+//! If our requirements change, and we also need to be able to traverse from
+//! `Owner` to `Gadget`, we will run into problems. An [`Rc`] pointer from `Owner`
+//! to `Gadget` introduces a cycle. This means that their
+//! reference counts can never reach 0, and the allocation will never be destroyed:
+//! a memory leak. In order to get around this, we can use [`Weak`]
+//! pointers.
+//!
+//! Rust actually makes it somewhat difficult to produce this loop in the first
+//! place. In order to end up with two values that point at each other, one of
+//! them needs to be mutable. This is difficult because [`Rc`] enforces
+//! memory safety by only giving out shared references to the value it wraps,
+//! and these don't allow direct mutation. We need to wrap the part of the
+//! value we wish to mutate in a [`RefCell`], which provides *interior
+//! mutability*: a method to achieve mutability through a shared reference.
+//! [`RefCell`] enforces Rust's borrowing rules at runtime.
+//!
+//! ```
+//! use std::rc::Rc;
+//! use std::rc::Weak;
+//! use std::cell::RefCell;
+//!
+//! struct Owner {
+//! name: String,
+//! gadgets: RefCell<Vec<Weak<Gadget>>>,
+//! // ...other fields
+//! }
+//!
+//! struct Gadget {
+//! id: i32,
+//! owner: Rc<Owner>,
+//! // ...other fields
+//! }
+//!
+//! fn main() {
+//! // Create a reference-counted `Owner`. Note that we've put the `Owner`'s
+//! // vector of `Gadget`s inside a `RefCell` so that we can mutate it through
+//! // a shared reference.
+//! let gadget_owner: Rc<Owner> = Rc::new(
+//! Owner {
+//! name: "Gadget Man".to_string(),
+//! gadgets: RefCell::new(vec![]),
+//! }
+//! );
+//!
+//! // Create `Gadget`s belonging to `gadget_owner`, as before.
+//! let gadget1 = Rc::new(
+//! Gadget {
+//! id: 1,
+//! owner: Rc::clone(&gadget_owner),
+//! }
+//! );
+//! let gadget2 = Rc::new(
+//! Gadget {
+//! id: 2,
+//! owner: Rc::clone(&gadget_owner),
+//! }
+//! );
+//!
+//! // Add the `Gadget`s to their `Owner`.
+//! {
+//! let mut gadgets = gadget_owner.gadgets.borrow_mut();
+//! gadgets.push(Rc::downgrade(&gadget1));
+//! gadgets.push(Rc::downgrade(&gadget2));
+//!
+//! // `RefCell` dynamic borrow ends here.
+//! }
+//!
+//! // Iterate over our `Gadget`s, printing their details out.
+//! for gadget_weak in gadget_owner.gadgets.borrow().iter() {
+//!
+//! // `gadget_weak` is a `Weak<Gadget>`. Since `Weak` pointers can't
+//! // guarantee the allocation still exists, we need to call
+//! // `upgrade`, which returns an `Option<Rc<Gadget>>`.
+//! //
+//! // In this case we know the allocation still exists, so we simply
+//! // `unwrap` the `Option`. In a more complicated program, you might
+//! // need graceful error handling for a `None` result.
+//!
+//! let gadget = gadget_weak.upgrade().unwrap();
+//! println!("Gadget {} owned by {}", gadget.id, gadget.owner.name);
+//! }
+//!
+//! // At the end of the function, `gadget_owner`, `gadget1`, and `gadget2`
+//! // are destroyed. There are now no strong (`Rc`) pointers to the
+//! // gadgets, so they are destroyed. This zeroes the reference count on
+//! // Gadget Man, so he gets destroyed as well.
+//! }
+//! ```
+//!
+//! [clone]: Clone::clone
+//! [`Cell`]: core::cell::Cell
+//! [`RefCell`]: core::cell::RefCell
+//! [send]: core::marker::Send
+//! [arc]: crate::sync::Arc
+//! [`Deref`]: core::ops::Deref
+//! [downgrade]: Rc::downgrade
+//! [upgrade]: Weak::upgrade
+//! [mutability]: core::cell#introducing-mutability-inside-of-something-immutable
+//! [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(not(test))]
+use crate::boxed::Box;
+#[cfg(test)]
+use std::boxed::Box;
+
+use core::any::Any;
+use core::borrow;
+use core::cell::Cell;
+use core::cmp::Ordering;
+use core::convert::{From, TryFrom};
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::intrinsics::abort;
+#[cfg(not(no_global_oom_handling))]
+use core::iter;
+use core::marker::{self, PhantomData, Unpin, Unsize};
+#[cfg(not(no_global_oom_handling))]
+use core::mem::size_of_val;
+use core::mem::{self, align_of_val_raw, forget};
+use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
+use core::panic::{RefUnwindSafe, UnwindSafe};
+#[cfg(not(no_global_oom_handling))]
+use core::pin::Pin;
+use core::ptr::{self, NonNull};
+#[cfg(not(no_global_oom_handling))]
+use core::slice::from_raw_parts_mut;
+
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::handle_alloc_error;
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::{box_free, WriteCloneIntoRaw};
+use crate::alloc::{AllocError, Allocator, Global, Layout};
+use crate::borrow::{Cow, ToOwned};
+#[cfg(not(no_global_oom_handling))]
+use crate::string::String;
+#[cfg(not(no_global_oom_handling))]
+use crate::vec::Vec;
+
+#[cfg(test)]
+mod tests;
+
+// This is repr(C) to future-proof against possible field-reordering, which
+// would interfere with otherwise safe [into|from]_raw() of transmutable
+// inner types.
+#[repr(C)]
+struct RcBox<T: ?Sized> {
+ strong: Cell<usize>,
+ weak: Cell<usize>,
+ value: T,
+}
+
+/// A single-threaded reference-counting pointer. 'Rc' stands for 'Reference
+/// Counted'.
+///
+/// See the [module-level documentation](./index.html) for more details.
+///
+/// The inherent methods of `Rc` are all associated functions, which means
+/// that you have to call them as e.g., [`Rc::get_mut(&mut value)`][get_mut] instead of
+/// `value.get_mut()`. This avoids conflicts with methods of the inner type `T`.
+///
+/// [get_mut]: Rc::get_mut
+#[cfg_attr(not(test), rustc_diagnostic_item = "Rc")]
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_insignificant_dtor]
+pub struct Rc<T: ?Sized> {
+ ptr: NonNull<RcBox<T>>,
+ phantom: PhantomData<RcBox<T>>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !marker::Send for Rc<T> {}
+
+// Note that this negative impl isn't strictly necessary for correctness,
+// as `Rc` transitively contains a `Cell`, which is itself `!Sync`.
+// However, given how important `Rc`'s `!Sync`-ness is,
+// having an explicit negative impl is nice for documentation purposes
+// and results in nicer error messages.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> !marker::Sync for Rc<T> {}
+
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Rc<T> {}
+#[stable(feature = "rc_ref_unwind_safe", since = "1.58.0")]
+impl<T: RefUnwindSafe + ?Sized> RefUnwindSafe for Rc<T> {}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Rc<U>> for Rc<T> {}
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Rc<U>> for Rc<T> {}
+
+impl<T: ?Sized> Rc<T> {
+ #[inline(always)]
+ fn inner(&self) -> &RcBox<T> {
+ // This unsafety is ok because while this Rc is alive we're guaranteed
+ // that the inner pointer is valid.
+ unsafe { self.ptr.as_ref() }
+ }
+
+ unsafe fn from_inner(ptr: NonNull<RcBox<T>>) -> Self {
+ Self { ptr, phantom: PhantomData }
+ }
+
+ unsafe fn from_ptr(ptr: *mut RcBox<T>) -> Self {
+ unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
+ }
+}
+
+impl<T> Rc<T> {
+ /// Constructs a new `Rc<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new(value: T) -> Rc<T> {
+ // There is an implicit weak pointer owned by all the strong
+ // pointers, which ensures that the weak destructor never frees
+ // the allocation while the strong destructor is running, even
+ // if the weak pointer is stored inside the strong one.
+ unsafe {
+ Self::from_inner(
+ Box::leak(Box::new(RcBox { strong: Cell::new(1), weak: Cell::new(1), value }))
+ .into(),
+ )
+ }
+ }
+
+ /// Constructs a new `Rc<T>` while giving you a `Weak<T>` to the allocation,
+ /// to allow you to construct a `T` which holds a weak pointer to itself.
+ ///
+ /// Generally, a structure circularly referencing itself, either directly or
+ /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
+ /// Using this function, you get access to the weak pointer during the
+ /// initialization of `T`, before the `Rc<T>` is created, such that you can
+ /// clone and store it inside the `T`.
+ ///
+ /// `new_cyclic` first allocates the managed allocation for the `Rc<T>`,
+ /// then calls your closure, giving it a `Weak<T>` to this allocation,
+ /// and only afterwards completes the construction of the `Rc<T>` by placing
+ /// the `T` returned from your closure into the allocation.
+ ///
+ /// Since the new `Rc<T>` is not fully-constructed until `Rc<T>::new_cyclic`
+ /// returns, calling [`upgrade`] on the weak reference inside your closure will
+ /// fail and result in a `None` value.
+ ///
+ /// # Panics
+ ///
+ /// If `data_fn` panics, the panic is propagated to the caller, and the
+ /// temporary [`Weak<T>`] is dropped normally.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(dead_code)]
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// struct Gadget {
+ /// me: Weak<Gadget>,
+ /// }
+ ///
+ /// impl Gadget {
+ /// /// Construct a reference counted Gadget.
+ /// fn new() -> Rc<Self> {
+ /// // `me` is a `Weak<Gadget>` pointing at the new allocation of the
+ /// // `Rc` we're constructing.
+ /// Rc::new_cyclic(|me| {
+ /// // Create the actual struct here.
+ /// Gadget { me: me.clone() }
+ /// })
+ /// }
+ ///
+ /// /// Return a reference counted pointer to Self.
+ /// fn me(&self) -> Rc<Self> {
+ /// self.me.upgrade().unwrap()
+ /// }
+ /// }
+ /// ```
+ /// [`upgrade`]: Weak::upgrade
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
+ pub fn new_cyclic<F>(data_fn: F) -> Rc<T>
+ where
+ F: FnOnce(&Weak<T>) -> T,
+ {
+ // Construct the inner in the "uninitialized" state with a single
+ // weak reference.
+ let uninit_ptr: NonNull<_> = Box::leak(Box::new(RcBox {
+ strong: Cell::new(0),
+ weak: Cell::new(1),
+ value: mem::MaybeUninit::<T>::uninit(),
+ }))
+ .into();
+
+ let init_ptr: NonNull<RcBox<T>> = uninit_ptr.cast();
+
+ let weak = Weak { ptr: init_ptr };
+
+ // It's important we don't give up ownership of the weak pointer, or
+ // else the memory might be freed by the time `data_fn` returns. If
+ // we really wanted to pass ownership, we could create an additional
+ // weak pointer for ourselves, but this would result in additional
+ // updates to the weak reference count which might not be necessary
+ // otherwise.
+ let data = data_fn(&weak);
+
+ let strong = unsafe {
+ let inner = init_ptr.as_ptr();
+ ptr::write(ptr::addr_of_mut!((*inner).value), data);
+
+ let prev_value = (*inner).strong.get();
+ debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
+ (*inner).strong.set(1);
+
+ Rc::from_inner(init_ptr)
+ };
+
+ // Strong references should collectively own a shared weak reference,
+ // so don't run the destructor for our old weak reference.
+ mem::forget(weak);
+ strong
+ }
+
+ /// Constructs a new `Rc` with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let mut five = Rc::<u32>::new_uninit();
+ ///
+ /// // Deferred initialization:
+ /// Rc::get_mut(&mut five).unwrap().write(5);
+ ///
+ /// let five = unsafe { five.assume_init() };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_uninit() -> Rc<mem::MaybeUninit<T>> {
+ unsafe {
+ Rc::from_ptr(Rc::allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate(layout),
+ |mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
+ ))
+ }
+ }
+
+ /// Constructs a new `Rc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let zero = Rc::<u32>::new_zeroed();
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_zeroed() -> Rc<mem::MaybeUninit<T>> {
+ unsafe {
+ Rc::from_ptr(Rc::allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
+ ))
+ }
+ }
+
+ /// Constructs a new `Rc<T>`, returning an error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::try_new(5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn try_new(value: T) -> Result<Rc<T>, AllocError> {
+ // There is an implicit weak pointer owned by all the strong
+ // pointers, which ensures that the weak destructor never frees
+ // the allocation while the strong destructor is running, even
+ // if the weak pointer is stored inside the strong one.
+ unsafe {
+ Ok(Self::from_inner(
+ Box::leak(Box::try_new(RcBox { strong: Cell::new(1), weak: Cell::new(1), value })?)
+ .into(),
+ ))
+ }
+ }
+
+ /// Constructs a new `Rc` with uninitialized contents, returning an error if the allocation fails
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let mut five = Rc::<u32>::try_new_uninit()?;
+ ///
+ /// // Deferred initialization:
+ /// Rc::get_mut(&mut five).unwrap().write(5);
+ ///
+ /// let five = unsafe { five.assume_init() };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn try_new_uninit() -> Result<Rc<mem::MaybeUninit<T>>, AllocError> {
+ unsafe {
+ Ok(Rc::from_ptr(Rc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate(layout),
+ |mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
+ )?))
+ }
+ }
+
+ /// Constructs a new `Rc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes, returning an error if the allocation fails
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, new_uninit)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let zero = Rc::<u32>::try_new_zeroed()?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ //#[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn try_new_zeroed() -> Result<Rc<mem::MaybeUninit<T>>, AllocError> {
+ unsafe {
+ Ok(Rc::from_ptr(Rc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| mem as *mut RcBox<mem::MaybeUninit<T>>,
+ )?))
+ }
+ }
+ /// Constructs a new `Pin<Rc<T>>`. If `T` does not implement `Unpin`, then
+ /// `value` will be pinned in memory and unable to be moved.
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[must_use]
+ pub fn pin(value: T) -> Pin<Rc<T>> {
+ unsafe { Pin::new_unchecked(Rc::new(value)) }
+ }
+
+ /// Returns the inner value, if the `Rc` has exactly one strong reference.
+ ///
+ /// Otherwise, an [`Err`] is returned with the same `Rc` that was
+ /// passed in.
+ ///
+ /// This will succeed even if there are outstanding weak references.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let x = Rc::new(3);
+ /// assert_eq!(Rc::try_unwrap(x), Ok(3));
+ ///
+ /// let x = Rc::new(4);
+ /// let _y = Rc::clone(&x);
+ /// assert_eq!(*Rc::try_unwrap(x).unwrap_err(), 4);
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_unique", since = "1.4.0")]
+ pub fn try_unwrap(this: Self) -> Result<T, Self> {
+ if Rc::strong_count(&this) == 1 {
+ unsafe {
+ let val = ptr::read(&*this); // copy the contained object
+
+ // Indicate to Weaks that they can't be promoted by decrementing
+ // the strong count, and then remove the implicit "strong weak"
+ // pointer while also handling drop logic by just crafting a
+ // fake Weak.
+ this.inner().dec_strong();
+ let _weak = Weak { ptr: this.ptr };
+ forget(this);
+ Ok(val)
+ }
+ } else {
+ Err(this)
+ }
+ }
+}
+
+impl<T> Rc<[T]> {
+ /// Constructs a new reference-counted slice with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let mut values = Rc::<[u32]>::new_uninit_slice(3);
+ ///
+ /// // Deferred initialization:
+ /// let data = Rc::get_mut(&mut values).unwrap();
+ /// data[0].write(1);
+ /// data[1].write(2);
+ /// data[2].write(3);
+ ///
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_uninit_slice(len: usize) -> Rc<[mem::MaybeUninit<T>]> {
+ unsafe { Rc::from_ptr(Rc::allocate_for_slice(len)) }
+ }
+
+ /// Constructs a new reference-counted slice with uninitialized contents, with the memory being
+ /// filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let values = Rc::<[u32]>::new_zeroed_slice(3);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_zeroed_slice(len: usize) -> Rc<[mem::MaybeUninit<T>]> {
+ unsafe {
+ Rc::from_ptr(Rc::allocate_for_layout(
+ Layout::array::<T>(len).unwrap(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| {
+ ptr::slice_from_raw_parts_mut(mem as *mut T, len)
+ as *mut RcBox<[mem::MaybeUninit<T>]>
+ },
+ ))
+ }
+ }
+}
+
+impl<T> Rc<mem::MaybeUninit<T>> {
+ /// Converts to `Rc<T>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the inner value
+ /// really is in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let mut five = Rc::<u32>::new_uninit();
+ ///
+ /// // Deferred initialization:
+ /// Rc::get_mut(&mut five).unwrap().write(5);
+ ///
+ /// let five = unsafe { five.assume_init() };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub unsafe fn assume_init(self) -> Rc<T> {
+ unsafe { Rc::from_inner(mem::ManuallyDrop::new(self).ptr.cast()) }
+ }
+}
+
+impl<T> Rc<[mem::MaybeUninit<T>]> {
+ /// Converts to `Rc<[T]>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the inner value
+ /// really is in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let mut values = Rc::<[u32]>::new_uninit_slice(3);
+ ///
+ /// // Deferred initialization:
+ /// let data = Rc::get_mut(&mut values).unwrap();
+ /// data[0].write(1);
+ /// data[1].write(2);
+ /// data[2].write(3);
+ ///
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[inline]
+ pub unsafe fn assume_init(self) -> Rc<[T]> {
+ unsafe { Rc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _) }
+ }
+}
+
+impl<T: ?Sized> Rc<T> {
+ /// Consumes the `Rc`, returning the wrapped pointer.
+ ///
+ /// To avoid a memory leak the pointer must be converted back to an `Rc` using
+ /// [`Rc::from_raw`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let x = Rc::new("hello".to_owned());
+ /// let x_ptr = Rc::into_raw(x);
+ /// assert_eq!(unsafe { &*x_ptr }, "hello");
+ /// ```
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub fn into_raw(this: Self) -> *const T {
+ let ptr = Self::as_ptr(&this);
+ mem::forget(this);
+ ptr
+ }
+
+ /// Provides a raw pointer to the data.
+ ///
+ /// The counts are not affected in any way and the `Rc` is not consumed. The pointer is valid
+ /// for as long there are strong counts in the `Rc`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let x = Rc::new("hello".to_owned());
+ /// let y = Rc::clone(&x);
+ /// let x_ptr = Rc::as_ptr(&x);
+ /// assert_eq!(x_ptr, Rc::as_ptr(&y));
+ /// assert_eq!(unsafe { &*x_ptr }, "hello");
+ /// ```
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub fn as_ptr(this: &Self) -> *const T {
+ let ptr: *mut RcBox<T> = NonNull::as_ptr(this.ptr);
+
+ // SAFETY: This cannot go through Deref::deref or Rc::inner because
+ // this is required to retain raw/mut provenance such that e.g. `get_mut` can
+ // write through the pointer after the Rc is recovered through `from_raw`.
+ unsafe { ptr::addr_of_mut!((*ptr).value) }
+ }
+
+ /// Constructs an `Rc<T>` from a raw pointer.
+ ///
+ /// The raw pointer must have been previously returned by a call to
+ /// [`Rc<U>::into_raw`][into_raw] where `U` must have the same size
+ /// and alignment as `T`. This is trivially true if `U` is `T`.
+ /// Note that if `U` is not `T` but has the same size and alignment, this is
+ /// basically like transmuting references of different types. See
+ /// [`mem::transmute`] for more information on what
+ /// restrictions apply in this case.
+ ///
+ /// The user of `from_raw` has to make sure a specific value of `T` is only
+ /// dropped once.
+ ///
+ /// This function is unsafe because improper use may lead to memory unsafety,
+ /// even if the returned `Rc<T>` is never accessed.
+ ///
+ /// [into_raw]: Rc::into_raw
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let x = Rc::new("hello".to_owned());
+ /// let x_ptr = Rc::into_raw(x);
+ ///
+ /// unsafe {
+ /// // Convert back to an `Rc` to prevent leak.
+ /// let x = Rc::from_raw(x_ptr);
+ /// assert_eq!(&*x, "hello");
+ ///
+ /// // Further calls to `Rc::from_raw(x_ptr)` would be memory-unsafe.
+ /// }
+ ///
+ /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
+ /// ```
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ let offset = unsafe { data_offset(ptr) };
+
+ // Reverse the offset to find the original RcBox.
+ let rc_ptr = unsafe { ptr.byte_sub(offset) as *mut RcBox<T> };
+
+ unsafe { Self::from_ptr(rc_ptr) }
+ }
+
+ /// Creates a new [`Weak`] pointer to this allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// let weak_five = Rc::downgrade(&five);
+ /// ```
+ #[must_use = "this returns a new `Weak` pointer, \
+ without modifying the original `Rc`"]
+ #[stable(feature = "rc_weak", since = "1.4.0")]
+ pub fn downgrade(this: &Self) -> Weak<T> {
+ this.inner().inc_weak();
+ // Make sure we do not create a dangling Weak
+ debug_assert!(!is_dangling(this.ptr.as_ptr()));
+ Weak { ptr: this.ptr }
+ }
+
+ /// Gets the number of [`Weak`] pointers to this allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ /// let _weak_five = Rc::downgrade(&five);
+ ///
+ /// assert_eq!(1, Rc::weak_count(&five));
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_counts", since = "1.15.0")]
+ pub fn weak_count(this: &Self) -> usize {
+ this.inner().weak() - 1
+ }
+
+ /// Gets the number of strong (`Rc`) pointers to this allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ /// let _also_five = Rc::clone(&five);
+ ///
+ /// assert_eq!(2, Rc::strong_count(&five));
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_counts", since = "1.15.0")]
+ pub fn strong_count(this: &Self) -> usize {
+ this.inner().strong()
+ }
+
+ /// Increments the strong reference count on the `Rc<T>` associated with the
+ /// provided pointer by one.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been obtained through `Rc::into_raw`, and the
+ /// associated `Rc` instance must be valid (i.e. the strong count must be at
+ /// least 1) for the duration of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// unsafe {
+ /// let ptr = Rc::into_raw(five);
+ /// Rc::increment_strong_count(ptr);
+ ///
+ /// let five = Rc::from_raw(ptr);
+ /// assert_eq!(2, Rc::strong_count(&five));
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_mutate_strong_count", since = "1.53.0")]
+ pub unsafe fn increment_strong_count(ptr: *const T) {
+ // Retain Rc, but don't touch refcount by wrapping in ManuallyDrop
+ let rc = unsafe { mem::ManuallyDrop::new(Rc::<T>::from_raw(ptr)) };
+ // Now increase refcount, but don't drop new refcount either
+ let _rc_clone: mem::ManuallyDrop<_> = rc.clone();
+ }
+
+ /// Decrements the strong reference count on the `Rc<T>` associated with the
+ /// provided pointer by one.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been obtained through `Rc::into_raw`, and the
+ /// associated `Rc` instance must be valid (i.e. the strong count must be at
+ /// least 1) when invoking this method. This method can be used to release
+ /// the final `Rc` and backing storage, but **should not** be called after
+ /// the final `Rc` has been released.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// unsafe {
+ /// let ptr = Rc::into_raw(five);
+ /// Rc::increment_strong_count(ptr);
+ ///
+ /// let five = Rc::from_raw(ptr);
+ /// assert_eq!(2, Rc::strong_count(&five));
+ /// Rc::decrement_strong_count(ptr);
+ /// assert_eq!(1, Rc::strong_count(&five));
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_mutate_strong_count", since = "1.53.0")]
+ pub unsafe fn decrement_strong_count(ptr: *const T) {
+ unsafe { mem::drop(Rc::from_raw(ptr)) };
+ }
+
+ /// Returns `true` if there are no other `Rc` or [`Weak`] pointers to
+ /// this allocation.
+ #[inline]
+ fn is_unique(this: &Self) -> bool {
+ Rc::weak_count(this) == 0 && Rc::strong_count(this) == 1
+ }
+
+ /// Returns a mutable reference into the given `Rc`, if there are
+ /// no other `Rc` or [`Weak`] pointers to the same allocation.
+ ///
+ /// Returns [`None`] otherwise, because it is not safe to
+ /// mutate a shared value.
+ ///
+ /// See also [`make_mut`][make_mut], which will [`clone`][clone]
+ /// the inner value when there are other `Rc` pointers.
+ ///
+ /// [make_mut]: Rc::make_mut
+ /// [clone]: Clone::clone
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let mut x = Rc::new(3);
+ /// *Rc::get_mut(&mut x).unwrap() = 4;
+ /// assert_eq!(*x, 4);
+ ///
+ /// let _y = Rc::clone(&x);
+ /// assert!(Rc::get_mut(&mut x).is_none());
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_unique", since = "1.4.0")]
+ pub fn get_mut(this: &mut Self) -> Option<&mut T> {
+ if Rc::is_unique(this) { unsafe { Some(Rc::get_mut_unchecked(this)) } } else { None }
+ }
+
+ /// Returns a mutable reference into the given `Rc`,
+ /// without any check.
+ ///
+ /// See also [`get_mut`], which is safe and does appropriate checks.
+ ///
+ /// [`get_mut`]: Rc::get_mut
+ ///
+ /// # Safety
+ ///
+ /// Any other `Rc` or [`Weak`] pointers to the same allocation must not be dereferenced
+ /// for the duration of the returned borrow.
+ /// This is trivially the case if no such pointers exist,
+ /// for example immediately after `Rc::new`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::rc::Rc;
+ ///
+ /// let mut x = Rc::new(String::new());
+ /// unsafe {
+ /// Rc::get_mut_unchecked(&mut x).push_str("foo")
+ /// }
+ /// assert_eq!(*x, "foo");
+ /// ```
+ #[inline]
+ #[unstable(feature = "get_mut_unchecked", issue = "63292")]
+ pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
+ // We are careful to *not* create a reference covering the "count" fields, as
+ // this would conflict with accesses to the reference counts (e.g. by `Weak`).
+ unsafe { &mut (*this.ptr.as_ptr()).value }
+ }
+
+ #[inline]
+ #[stable(feature = "ptr_eq", since = "1.17.0")]
+ /// Returns `true` if the two `Rc`s point to the same allocation
+ /// (in a vein similar to [`ptr::eq`]).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ /// let same_five = Rc::clone(&five);
+ /// let other_five = Rc::new(5);
+ ///
+ /// assert!(Rc::ptr_eq(&five, &same_five));
+ /// assert!(!Rc::ptr_eq(&five, &other_five));
+ /// ```
+ pub fn ptr_eq(this: &Self, other: &Self) -> bool {
+ this.ptr.as_ptr() == other.ptr.as_ptr()
+ }
+}
+
+impl<T: Clone> Rc<T> {
+ /// Makes a mutable reference into the given `Rc`.
+ ///
+ /// If there are other `Rc` pointers to the same allocation, then `make_mut` will
+ /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also
+ /// referred to as clone-on-write.
+ ///
+ /// However, if there are no other `Rc` pointers to this allocation, but some [`Weak`]
+ /// pointers, then the [`Weak`] pointers will be disassociated and the inner value will not
+ /// be cloned.
+ ///
+ /// See also [`get_mut`], which will fail rather than cloning the inner value
+ /// or diassociating [`Weak`] pointers.
+ ///
+ /// [`clone`]: Clone::clone
+ /// [`get_mut`]: Rc::get_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let mut data = Rc::new(5);
+ ///
+ /// *Rc::make_mut(&mut data) += 1; // Won't clone anything
+ /// let mut other_data = Rc::clone(&data); // Won't clone inner data
+ /// *Rc::make_mut(&mut data) += 1; // Clones inner data
+ /// *Rc::make_mut(&mut data) += 1; // Won't clone anything
+ /// *Rc::make_mut(&mut other_data) *= 2; // Won't clone anything
+ ///
+ /// // Now `data` and `other_data` point to different allocations.
+ /// assert_eq!(*data, 8);
+ /// assert_eq!(*other_data, 12);
+ /// ```
+ ///
+ /// [`Weak`] pointers will be disassociated:
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let mut data = Rc::new(75);
+ /// let weak = Rc::downgrade(&data);
+ ///
+ /// assert!(75 == *data);
+ /// assert!(75 == *weak.upgrade().unwrap());
+ ///
+ /// *Rc::make_mut(&mut data) += 1;
+ ///
+ /// assert!(76 == *data);
+ /// assert!(weak.upgrade().is_none());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rc_unique", since = "1.4.0")]
+ pub fn make_mut(this: &mut Self) -> &mut T {
+ if Rc::strong_count(this) != 1 {
+ // Gotta clone the data, there are other Rcs.
+ // Pre-allocate memory to allow writing the cloned value directly.
+ let mut rc = Self::new_uninit();
+ unsafe {
+ let data = Rc::get_mut_unchecked(&mut rc);
+ (**this).write_clone_into_raw(data.as_mut_ptr());
+ *this = rc.assume_init();
+ }
+ } else if Rc::weak_count(this) != 0 {
+ // Can just steal the data, all that's left is Weaks
+ let mut rc = Self::new_uninit();
+ unsafe {
+ let data = Rc::get_mut_unchecked(&mut rc);
+ data.as_mut_ptr().copy_from_nonoverlapping(&**this, 1);
+
+ this.inner().dec_strong();
+ // Remove implicit strong-weak ref (no need to craft a fake
+ // Weak here -- we know other Weaks can clean up for us)
+ this.inner().dec_weak();
+ ptr::write(this, rc.assume_init());
+ }
+ }
+ // This unsafety is ok because we're guaranteed that the pointer
+ // returned is the *only* pointer that will ever be returned to T. Our
+ // reference count is guaranteed to be 1 at this point, and we required
+ // the `Rc<T>` itself to be `mut`, so we're returning the only possible
+ // reference to the allocation.
+ unsafe { &mut this.ptr.as_mut().value }
+ }
+
+ /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
+ /// clone.
+ ///
+ /// Assuming `rc_t` is of type `Rc<T>`, this function is functionally equivalent to
+ /// `(*rc_t).clone()`, but will avoid cloning the inner value where possible.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(arc_unwrap_or_clone)]
+ /// # use std::{ptr, rc::Rc};
+ /// let inner = String::from("test");
+ /// let ptr = inner.as_ptr();
+ ///
+ /// let rc = Rc::new(inner);
+ /// let inner = Rc::unwrap_or_clone(rc);
+ /// // The inner value was not cloned
+ /// assert!(ptr::eq(ptr, inner.as_ptr()));
+ ///
+ /// let rc = Rc::new(inner);
+ /// let rc2 = rc.clone();
+ /// let inner = Rc::unwrap_or_clone(rc);
+ /// // Because there were 2 references, we had to clone the inner value.
+ /// assert!(!ptr::eq(ptr, inner.as_ptr()));
+ /// // `rc2` is the last reference, so when we unwrap it we get back
+ /// // the original `String`.
+ /// let inner = Rc::unwrap_or_clone(rc2);
+ /// assert!(ptr::eq(ptr, inner.as_ptr()));
+ /// ```
+ #[inline]
+ #[unstable(feature = "arc_unwrap_or_clone", issue = "93610")]
+ pub fn unwrap_or_clone(this: Self) -> T {
+ Rc::try_unwrap(this).unwrap_or_else(|rc| (*rc).clone())
+ }
+}
+
+impl Rc<dyn Any> {
+ /// Attempt to downcast the `Rc<dyn Any>` to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ /// use std::rc::Rc;
+ ///
+ /// fn print_if_string(value: Rc<dyn Any>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Rc::new(my_string));
+ /// print_if_string(Rc::new(0i8));
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_downcast", since = "1.29.0")]
+ pub fn downcast<T: Any>(self) -> Result<Rc<T>, Rc<dyn Any>> {
+ if (*self).is::<T>() {
+ unsafe {
+ let ptr = self.ptr.cast::<RcBox<T>>();
+ forget(self);
+ Ok(Rc::from_inner(ptr))
+ }
+ } else {
+ Err(self)
+ }
+ }
+
+ /// Downcasts the `Rc<dyn Any>` to a concrete type.
+ ///
+ /// For a safe alternative see [`downcast`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ /// use std::rc::Rc;
+ ///
+ /// let x: Rc<dyn Any> = Rc::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ ///
+ ///
+ /// [`downcast`]: Self::downcast
+ #[inline]
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ pub unsafe fn downcast_unchecked<T: Any>(self) -> Rc<T> {
+ unsafe {
+ let ptr = self.ptr.cast::<RcBox<T>>();
+ mem::forget(self);
+ Rc::from_inner(ptr)
+ }
+ }
+}
+
+impl<T: ?Sized> Rc<T> {
+ /// Allocates an `RcBox<T>` with sufficient space for
+ /// a possibly-unsized inner value where the value has the layout provided.
+ ///
+ /// The function `mem_to_rcbox` is called with the data pointer
+ /// and must return back a (potentially fat)-pointer for the `RcBox<T>`.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_layout(
+ value_layout: Layout,
+ allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
+ mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>,
+ ) -> *mut RcBox<T> {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
+ // reference (see #54908).
+ let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ unsafe {
+ Rc::try_allocate_for_layout(value_layout, allocate, mem_to_rcbox)
+ .unwrap_or_else(|_| handle_alloc_error(layout))
+ }
+ }
+
+ /// Allocates an `RcBox<T>` with sufficient space for
+ /// a possibly-unsized inner value where the value has the layout provided,
+ /// returning an error if allocation fails.
+ ///
+ /// The function `mem_to_rcbox` is called with the data pointer
+ /// and must return back a (potentially fat)-pointer for the `RcBox<T>`.
+ #[inline]
+ unsafe fn try_allocate_for_layout(
+ value_layout: Layout,
+ allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
+ mem_to_rcbox: impl FnOnce(*mut u8) -> *mut RcBox<T>,
+ ) -> Result<*mut RcBox<T>, AllocError> {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const RcBox<T>)`, but this created a misaligned
+ // reference (see #54908).
+ let layout = Layout::new::<RcBox<()>>().extend(value_layout).unwrap().0.pad_to_align();
+
+ // Allocate for the layout.
+ let ptr = allocate(layout)?;
+
+ // Initialize the RcBox
+ let inner = mem_to_rcbox(ptr.as_non_null_ptr().as_ptr());
+ unsafe {
+ debug_assert_eq!(Layout::for_value(&*inner), layout);
+
+ ptr::write(&mut (*inner).strong, Cell::new(1));
+ ptr::write(&mut (*inner).weak, Cell::new(1));
+ }
+
+ Ok(inner)
+ }
+
+ /// Allocates an `RcBox<T>` with sufficient space for an unsized inner value
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_ptr(ptr: *const T) -> *mut RcBox<T> {
+ // Allocate for the `RcBox<T>` using the given value.
+ unsafe {
+ Self::allocate_for_layout(
+ Layout::for_value(&*ptr),
+ |layout| Global.allocate(layout),
+ |mem| mem.with_metadata_of(ptr as *mut RcBox<T>),
+ )
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ fn from_box(v: Box<T>) -> Rc<T> {
+ unsafe {
+ let (box_unique, alloc) = Box::into_unique(v);
+ let bptr = box_unique.as_ptr();
+
+ let value_size = size_of_val(&*bptr);
+ let ptr = Self::allocate_for_ptr(bptr);
+
+ // Copy value as bytes
+ ptr::copy_nonoverlapping(
+ bptr as *const T as *const u8,
+ &mut (*ptr).value as *mut _ as *mut u8,
+ value_size,
+ );
+
+ // Free the allocation without dropping its contents
+ box_free(box_unique, alloc);
+
+ Self::from_ptr(ptr)
+ }
+ }
+}
+
+impl<T> Rc<[T]> {
+ /// Allocates an `RcBox<[T]>` with the given length.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_slice(len: usize) -> *mut RcBox<[T]> {
+ unsafe {
+ Self::allocate_for_layout(
+ Layout::array::<T>(len).unwrap(),
+ |layout| Global.allocate(layout),
+ |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut RcBox<[T]>,
+ )
+ }
+ }
+
+ /// Copy elements from slice into newly allocated Rc<\[T\]>
+ ///
+ /// Unsafe because the caller must either take ownership or bind `T: Copy`
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn copy_from_slice(v: &[T]) -> Rc<[T]> {
+ unsafe {
+ let ptr = Self::allocate_for_slice(v.len());
+ ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).value as *mut [T] as *mut T, v.len());
+ Self::from_ptr(ptr)
+ }
+ }
+
+ /// Constructs an `Rc<[T]>` from an iterator known to be of a certain size.
+ ///
+ /// Behavior is undefined should the size be wrong.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Rc<[T]> {
+ // Panic guard while cloning T elements.
+ // In the event of a panic, elements that have been written
+ // into the new RcBox will be dropped, then the memory freed.
+ struct Guard<T> {
+ mem: NonNull<u8>,
+ elems: *mut T,
+ layout: Layout,
+ n_elems: usize,
+ }
+
+ impl<T> Drop for Guard<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let slice = from_raw_parts_mut(self.elems, self.n_elems);
+ ptr::drop_in_place(slice);
+
+ Global.deallocate(self.mem, self.layout);
+ }
+ }
+ }
+
+ unsafe {
+ let ptr = Self::allocate_for_slice(len);
+
+ let mem = ptr as *mut _ as *mut u8;
+ let layout = Layout::for_value(&*ptr);
+
+ // Pointer to first element
+ let elems = &mut (*ptr).value as *mut [T] as *mut T;
+
+ let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
+
+ for (i, item) in iter.enumerate() {
+ ptr::write(elems.add(i), item);
+ guard.n_elems += 1;
+ }
+
+ // All clear. Forget the guard so it doesn't free the new RcBox.
+ forget(guard);
+
+ Self::from_ptr(ptr)
+ }
+ }
+}
+
+/// Specialization trait used for `From<&[T]>`.
+trait RcFromSlice<T> {
+ fn from_slice(slice: &[T]) -> Self;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone> RcFromSlice<T> for Rc<[T]> {
+ #[inline]
+ default fn from_slice(v: &[T]) -> Self {
+ unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Copy> RcFromSlice<T> for Rc<[T]> {
+ #[inline]
+ fn from_slice(v: &[T]) -> Self {
+ unsafe { Rc::copy_from_slice(v) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for Rc<T> {
+ type Target = T;
+
+ #[inline(always)]
+ fn deref(&self) -> &T {
+ &self.inner().value
+ }
+}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<T: ?Sized> Receiver for Rc<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T: ?Sized> Drop for Rc<T> {
+ /// Drops the `Rc`.
+ ///
+ /// This will decrement the strong reference count. If the strong reference
+ /// count reaches zero then the only other references (if any) are
+ /// [`Weak`], so we `drop` the inner value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// struct Foo;
+ ///
+ /// impl Drop for Foo {
+ /// fn drop(&mut self) {
+ /// println!("dropped!");
+ /// }
+ /// }
+ ///
+ /// let foo = Rc::new(Foo);
+ /// let foo2 = Rc::clone(&foo);
+ ///
+ /// drop(foo); // Doesn't print anything
+ /// drop(foo2); // Prints "dropped!"
+ /// ```
+ fn drop(&mut self) {
+ unsafe {
+ self.inner().dec_strong();
+ if self.inner().strong() == 0 {
+ // destroy the contained object
+ ptr::drop_in_place(Self::get_mut_unchecked(self));
+
+ // remove the implicit "strong weak" pointer now that we've
+ // destroyed the contents.
+ self.inner().dec_weak();
+
+ if self.inner().weak() == 0 {
+ Global.deallocate(self.ptr.cast(), Layout::for_value(self.ptr.as_ref()));
+ }
+ }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Clone for Rc<T> {
+ /// Makes a clone of the `Rc` pointer.
+ ///
+ /// This creates another pointer to the same allocation, increasing the
+ /// strong reference count.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// let _ = Rc::clone(&five);
+ /// ```
+ #[inline]
+ fn clone(&self) -> Rc<T> {
+ unsafe {
+ self.inner().inc_strong();
+ Self::from_inner(self.ptr)
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Default> Default for Rc<T> {
+ /// Creates a new `Rc<T>`, with the `Default` value for `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let x: Rc<i32> = Default::default();
+ /// assert_eq!(*x, 0);
+ /// ```
+ #[inline]
+ fn default() -> Rc<T> {
+ Rc::new(Default::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+trait RcEqIdent<T: ?Sized + PartialEq> {
+ fn eq(&self, other: &Rc<T>) -> bool;
+ fn ne(&self, other: &Rc<T>) -> bool;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq> RcEqIdent<T> for Rc<T> {
+ #[inline]
+ default fn eq(&self, other: &Rc<T>) -> bool {
+ **self == **other
+ }
+
+ #[inline]
+ default fn ne(&self, other: &Rc<T>) -> bool {
+ **self != **other
+ }
+}
+
+// Hack to allow specializing on `Eq` even though `Eq` has a method.
+#[rustc_unsafe_specialization_marker]
+pub(crate) trait MarkerEq: PartialEq<Self> {}
+
+impl<T: Eq> MarkerEq for T {}
+
+/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
+/// would otherwise add a cost to all equality checks on refs. We assume that `Rc`s are used to
+/// store large values, that are slow to clone, but also heavy to check for equality, causing this
+/// cost to pay off more easily. It's also more likely to have two `Rc` clones, that point to
+/// the same value, than two `&T`s.
+///
+/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + MarkerEq> RcEqIdent<T> for Rc<T> {
+ #[inline]
+ fn eq(&self, other: &Rc<T>) -> bool {
+ Rc::ptr_eq(self, other) || **self == **other
+ }
+
+ #[inline]
+ fn ne(&self, other: &Rc<T>) -> bool {
+ !Rc::ptr_eq(self, other) && **self != **other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq> PartialEq for Rc<T> {
+ /// Equality for two `Rc`s.
+ ///
+ /// Two `Rc`s are equal if their inner values are equal, even if they are
+ /// stored in different allocation.
+ ///
+ /// If `T` also implements `Eq` (implying reflexivity of equality),
+ /// two `Rc`s that point to the same allocation are
+ /// always equal.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert!(five == Rc::new(5));
+ /// ```
+ #[inline]
+ fn eq(&self, other: &Rc<T>) -> bool {
+ RcEqIdent::eq(self, other)
+ }
+
+ /// Inequality for two `Rc`s.
+ ///
+ /// Two `Rc`s are unequal if their inner values are unequal.
+ ///
+ /// If `T` also implements `Eq` (implying reflexivity of equality),
+ /// two `Rc`s that point to the same allocation are
+ /// never unequal.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert!(five != Rc::new(6));
+ /// ```
+ #[inline]
+ fn ne(&self, other: &Rc<T>) -> bool {
+ RcEqIdent::ne(self, other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Eq> Eq for Rc<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialOrd> PartialOrd for Rc<T> {
+ /// Partial comparison for two `Rc`s.
+ ///
+ /// The two are compared by calling `partial_cmp()` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ /// use std::cmp::Ordering;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Rc::new(6)));
+ /// ```
+ #[inline(always)]
+ fn partial_cmp(&self, other: &Rc<T>) -> Option<Ordering> {
+ (**self).partial_cmp(&**other)
+ }
+
+ /// Less-than comparison for two `Rc`s.
+ ///
+ /// The two are compared by calling `<` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert!(five < Rc::new(6));
+ /// ```
+ #[inline(always)]
+ fn lt(&self, other: &Rc<T>) -> bool {
+ **self < **other
+ }
+
+ /// 'Less than or equal to' comparison for two `Rc`s.
+ ///
+ /// The two are compared by calling `<=` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert!(five <= Rc::new(5));
+ /// ```
+ #[inline(always)]
+ fn le(&self, other: &Rc<T>) -> bool {
+ **self <= **other
+ }
+
+ /// Greater-than comparison for two `Rc`s.
+ ///
+ /// The two are compared by calling `>` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert!(five > Rc::new(4));
+ /// ```
+ #[inline(always)]
+ fn gt(&self, other: &Rc<T>) -> bool {
+ **self > **other
+ }
+
+ /// 'Greater than or equal to' comparison for two `Rc`s.
+ ///
+ /// The two are compared by calling `>=` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert!(five >= Rc::new(5));
+ /// ```
+ #[inline(always)]
+ fn ge(&self, other: &Rc<T>) -> bool {
+ **self >= **other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Ord> Ord for Rc<T> {
+ /// Comparison for two `Rc`s.
+ ///
+ /// The two are compared by calling `cmp()` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ /// use std::cmp::Ordering;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// assert_eq!(Ordering::Less, five.cmp(&Rc::new(6)));
+ /// ```
+ #[inline]
+ fn cmp(&self, other: &Rc<T>) -> Ordering {
+ (**self).cmp(&**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Hash> Hash for Rc<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for Rc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Rc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> fmt::Pointer for Rc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&(&**self as *const T), f)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_for_ptrs", since = "1.6.0")]
+impl<T> From<T> for Rc<T> {
+ /// Converts a generic type `T` into an `Rc<T>`
+ ///
+ /// The conversion allocates on the heap and moves `t`
+ /// from the stack into it.
+ ///
+ /// # Example
+ /// ```rust
+ /// # use std::rc::Rc;
+ /// let x = 5;
+ /// let rc = Rc::new(5);
+ ///
+ /// assert_eq!(Rc::from(x), rc);
+ /// ```
+ fn from(t: T) -> Self {
+ Rc::new(t)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl<T: Clone> From<&[T]> for Rc<[T]> {
+ /// Allocate a reference-counted slice and fill it by cloning `v`'s items.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::rc::Rc;
+ /// let original: &[i32] = &[1, 2, 3];
+ /// let shared: Rc<[i32]> = Rc::from(original);
+ /// assert_eq!(&[1, 2, 3], &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: &[T]) -> Rc<[T]> {
+ <Self as RcFromSlice<T>>::from_slice(v)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl From<&str> for Rc<str> {
+ /// Allocate a reference-counted string slice and copy `v` into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::rc::Rc;
+ /// let shared: Rc<str> = Rc::from("statue");
+ /// assert_eq!("statue", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: &str) -> Rc<str> {
+ let rc = Rc::<[u8]>::from(v.as_bytes());
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const str) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl From<String> for Rc<str> {
+ /// Allocate a reference-counted string slice and copy `v` into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::rc::Rc;
+ /// let original: String = "statue".to_owned();
+ /// let shared: Rc<str> = Rc::from(original);
+ /// assert_eq!("statue", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: String) -> Rc<str> {
+ Rc::from(&v[..])
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl<T: ?Sized> From<Box<T>> for Rc<T> {
+ /// Move a boxed object to a new, reference counted, allocation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::rc::Rc;
+ /// let original: Box<i32> = Box::new(1);
+ /// let shared: Rc<i32> = Rc::from(original);
+ /// assert_eq!(1, *shared);
+ /// ```
+ #[inline]
+ fn from(v: Box<T>) -> Rc<T> {
+ Rc::from_box(v)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl<T> From<Vec<T>> for Rc<[T]> {
+ /// Allocate a reference-counted slice and move `v`'s items into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::rc::Rc;
+ /// let original: Box<Vec<i32>> = Box::new(vec![1, 2, 3]);
+ /// let shared: Rc<Vec<i32>> = Rc::from(original);
+ /// assert_eq!(vec![1, 2, 3], *shared);
+ /// ```
+ #[inline]
+ fn from(mut v: Vec<T>) -> Rc<[T]> {
+ unsafe {
+ let rc = Rc::copy_from_slice(&v);
+
+ // Allow the Vec to free its memory, but not destroy its contents
+ v.set_len(0);
+
+ rc
+ }
+ }
+}
+
+#[stable(feature = "shared_from_cow", since = "1.45.0")]
+impl<'a, B> From<Cow<'a, B>> for Rc<B>
+where
+ B: ToOwned + ?Sized,
+ Rc<B>: From<&'a B> + From<B::Owned>,
+{
+ /// Create a reference-counted pointer from
+ /// a clone-on-write pointer by copying its content.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// # use std::rc::Rc;
+ /// # use std::borrow::Cow;
+ /// let cow: Cow<str> = Cow::Borrowed("eggplant");
+ /// let shared: Rc<str> = Rc::from(cow);
+ /// assert_eq!("eggplant", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(cow: Cow<'a, B>) -> Rc<B> {
+ match cow {
+ Cow::Borrowed(s) => Rc::from(s),
+ Cow::Owned(s) => Rc::from(s),
+ }
+ }
+}
+
+#[stable(feature = "shared_from_str", since = "1.62.0")]
+impl From<Rc<str>> for Rc<[u8]> {
+ /// Converts a reference-counted string slice into a byte slice.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::rc::Rc;
+ /// let string: Rc<str> = Rc::from("eggplant");
+ /// let bytes: Rc<[u8]> = Rc::from(string);
+ /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
+ /// ```
+ #[inline]
+ fn from(rc: Rc<str>) -> Self {
+ // SAFETY: `str` has the same layout as `[u8]`.
+ unsafe { Rc::from_raw(Rc::into_raw(rc) as *const [u8]) }
+ }
+}
+
+#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
+impl<T, const N: usize> TryFrom<Rc<[T]>> for Rc<[T; N]> {
+ type Error = Rc<[T]>;
+
+ fn try_from(boxed_slice: Rc<[T]>) -> Result<Self, Self::Error> {
+ if boxed_slice.len() == N {
+ Ok(unsafe { Rc::from_raw(Rc::into_raw(boxed_slice) as *mut [T; N]) })
+ } else {
+ Err(boxed_slice)
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_iter", since = "1.37.0")]
+impl<T> iter::FromIterator<T> for Rc<[T]> {
+ /// Takes each element in the `Iterator` and collects it into an `Rc<[T]>`.
+ ///
+ /// # Performance characteristics
+ ///
+ /// ## The general case
+ ///
+ /// In the general case, collecting into `Rc<[T]>` is done by first
+ /// collecting into a `Vec<T>`. That is, when writing the following:
+ ///
+ /// ```rust
+ /// # use std::rc::Rc;
+ /// let evens: Rc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
+ /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+ /// ```
+ ///
+ /// this behaves as if we wrote:
+ ///
+ /// ```rust
+ /// # use std::rc::Rc;
+ /// let evens: Rc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
+ /// .collect::<Vec<_>>() // The first set of allocations happens here.
+ /// .into(); // A second allocation for `Rc<[T]>` happens here.
+ /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+ /// ```
+ ///
+ /// This will allocate as many times as needed for constructing the `Vec<T>`
+ /// and then it will allocate once for turning the `Vec<T>` into the `Rc<[T]>`.
+ ///
+ /// ## Iterators of known length
+ ///
+ /// When your `Iterator` implements `TrustedLen` and is of an exact size,
+ /// a single allocation will be made for the `Rc<[T]>`. For example:
+ ///
+ /// ```rust
+ /// # use std::rc::Rc;
+ /// let evens: Rc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
+ /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
+ /// ```
+ fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
+ ToRcSlice::to_rc_slice(iter.into_iter())
+ }
+}
+
+/// Specialization trait used for collecting into `Rc<[T]>`.
+#[cfg(not(no_global_oom_handling))]
+trait ToRcSlice<T>: Iterator<Item = T> + Sized {
+ fn to_rc_slice(self) -> Rc<[T]>;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I: Iterator<Item = T>> ToRcSlice<T> for I {
+ default fn to_rc_slice(self) -> Rc<[T]> {
+ self.collect::<Vec<T>>().into()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I: iter::TrustedLen<Item = T>> ToRcSlice<T> for I {
+ fn to_rc_slice(self) -> Rc<[T]> {
+ // This is the case for a `TrustedLen` iterator.
+ let (low, high) = self.size_hint();
+ if let Some(high) = high {
+ debug_assert_eq!(
+ low,
+ high,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+
+ unsafe {
+ // SAFETY: We need to ensure that the iterator has an exact length and we have.
+ Rc::from_iter_exact(self, low)
+ }
+ } else {
+ // TrustedLen contract guarantees that `upper_bound == `None` implies an iterator
+ // length exceeding `usize::MAX`.
+ // The default implementation would collect into a vec which would panic.
+ // Thus we panic here immediately without invoking `Vec` code.
+ panic!("capacity overflow");
+ }
+ }
+}
+
+/// `Weak` is a version of [`Rc`] that holds a non-owning reference to the
+/// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
+/// pointer, which returns an <code>[Option]<[Rc]\<T>></code>.
+///
+/// Since a `Weak` reference does not count towards ownership, it will not
+/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
+/// guarantees about the value still being present. Thus it may return [`None`]
+/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
+/// itself (the backing store) from being deallocated.
+///
+/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
+/// managed by [`Rc`] without preventing its inner value from being dropped. It is also used to
+/// prevent circular references between [`Rc`] pointers, since mutual owning references
+/// would never allow either [`Rc`] to be dropped. For example, a tree could
+/// have strong [`Rc`] pointers from parent nodes to children, and `Weak`
+/// pointers from children back to their parents.
+///
+/// The typical way to obtain a `Weak` pointer is to call [`Rc::downgrade`].
+///
+/// [`upgrade`]: Weak::upgrade
+#[stable(feature = "rc_weak", since = "1.4.0")]
+pub struct Weak<T: ?Sized> {
+ // This is a `NonNull` to allow optimizing the size of this type in enums,
+ // but it is not necessarily a valid pointer.
+ // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
+ // to allocate space on the heap. That's not a value a real pointer
+ // will ever have because RcBox has alignment at least 2.
+ // This is only possible when `T: Sized`; unsized `T` never dangle.
+ ptr: NonNull<RcBox<T>>,
+}
+
+#[stable(feature = "rc_weak", since = "1.4.0")]
+impl<T: ?Sized> !marker::Send for Weak<T> {}
+#[stable(feature = "rc_weak", since = "1.4.0")]
+impl<T: ?Sized> !marker::Sync for Weak<T> {}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
+
+impl<T> Weak<T> {
+ /// Constructs a new `Weak<T>`, without allocating any memory.
+ /// Calling [`upgrade`] on the return value always gives [`None`].
+ ///
+ /// [`upgrade`]: Weak::upgrade
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Weak;
+ ///
+ /// let empty: Weak<i64> = Weak::new();
+ /// assert!(empty.upgrade().is_none());
+ /// ```
+ #[stable(feature = "downgraded_weak", since = "1.10.0")]
+ #[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")]
+ #[must_use]
+ pub const fn new() -> Weak<T> {
+ Weak { ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<RcBox<T>>(usize::MAX)) } }
+ }
+}
+
+pub(crate) fn is_dangling<T: ?Sized>(ptr: *mut T) -> bool {
+ (ptr as *mut ()).addr() == usize::MAX
+}
+
+/// Helper type to allow accessing the reference counts without
+/// making any assertions about the data field.
+struct WeakInner<'a> {
+ weak: &'a Cell<usize>,
+ strong: &'a Cell<usize>,
+}
+
+impl<T: ?Sized> Weak<T> {
+ /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
+ ///
+ /// The pointer is valid only if there are some strong references. The pointer may be dangling,
+ /// unaligned or even [`null`] otherwise.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ /// use std::ptr;
+ ///
+ /// let strong = Rc::new("hello".to_owned());
+ /// let weak = Rc::downgrade(&strong);
+ /// // Both point to the same object
+ /// assert!(ptr::eq(&*strong, weak.as_ptr()));
+ /// // The strong here keeps it alive, so we can still access the object.
+ /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
+ ///
+ /// drop(strong);
+ /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
+ /// // undefined behaviour.
+ /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
+ /// ```
+ ///
+ /// [`null`]: ptr::null
+ #[must_use]
+ #[stable(feature = "rc_as_ptr", since = "1.45.0")]
+ pub fn as_ptr(&self) -> *const T {
+ let ptr: *mut RcBox<T> = NonNull::as_ptr(self.ptr);
+
+ if is_dangling(ptr) {
+ // If the pointer is dangling, we return the sentinel directly. This cannot be
+ // a valid payload address, as the payload is at least as aligned as RcBox (usize).
+ ptr as *const T
+ } else {
+ // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
+ // The payload may be dropped at this point, and we have to maintain provenance,
+ // so use raw pointer manipulation.
+ unsafe { ptr::addr_of_mut!((*ptr).value) }
+ }
+ }
+
+ /// Consumes the `Weak<T>` and turns it into a raw pointer.
+ ///
+ /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
+ /// one weak reference (the weak count is not modified by this operation). It can be turned
+ /// back into the `Weak<T>` with [`from_raw`].
+ ///
+ /// The same restrictions of accessing the target of the pointer as with
+ /// [`as_ptr`] apply.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// let strong = Rc::new("hello".to_owned());
+ /// let weak = Rc::downgrade(&strong);
+ /// let raw = weak.into_raw();
+ ///
+ /// assert_eq!(1, Rc::weak_count(&strong));
+ /// assert_eq!("hello", unsafe { &*raw });
+ ///
+ /// drop(unsafe { Weak::from_raw(raw) });
+ /// assert_eq!(0, Rc::weak_count(&strong));
+ /// ```
+ ///
+ /// [`from_raw`]: Weak::from_raw
+ /// [`as_ptr`]: Weak::as_ptr
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub fn into_raw(self) -> *const T {
+ let result = self.as_ptr();
+ mem::forget(self);
+ result
+ }
+
+ /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
+ ///
+ /// This can be used to safely get a strong reference (by calling [`upgrade`]
+ /// later) or to deallocate the weak count by dropping the `Weak<T>`.
+ ///
+ /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
+ /// as these don't own anything; the method still works on them).
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have originated from the [`into_raw`] and must still own its potential
+ /// weak reference.
+ ///
+ /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
+ /// takes ownership of one weak reference currently represented as a raw pointer (the weak
+ /// count is not modified by this operation) and therefore it must be paired with a previous
+ /// call to [`into_raw`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// let strong = Rc::new("hello".to_owned());
+ ///
+ /// let raw_1 = Rc::downgrade(&strong).into_raw();
+ /// let raw_2 = Rc::downgrade(&strong).into_raw();
+ ///
+ /// assert_eq!(2, Rc::weak_count(&strong));
+ ///
+ /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
+ /// assert_eq!(1, Rc::weak_count(&strong));
+ ///
+ /// drop(strong);
+ ///
+ /// // Decrement the last weak count.
+ /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
+ /// ```
+ ///
+ /// [`into_raw`]: Weak::into_raw
+ /// [`upgrade`]: Weak::upgrade
+ /// [`new`]: Weak::new
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // See Weak::as_ptr for context on how the input pointer is derived.
+
+ let ptr = if is_dangling(ptr as *mut T) {
+ // This is a dangling Weak.
+ ptr as *mut RcBox<T>
+ } else {
+ // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
+ // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
+ let offset = unsafe { data_offset(ptr) };
+ // Thus, we reverse the offset to get the whole RcBox.
+ // SAFETY: the pointer originated from a Weak, so this offset is safe.
+ unsafe { ptr.byte_sub(offset) as *mut RcBox<T> }
+ };
+
+ // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
+ Weak { ptr: unsafe { NonNull::new_unchecked(ptr) } }
+ }
+
+ /// Attempts to upgrade the `Weak` pointer to an [`Rc`], delaying
+ /// dropping of the inner value if successful.
+ ///
+ /// Returns [`None`] if the inner value has since been dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let five = Rc::new(5);
+ ///
+ /// let weak_five = Rc::downgrade(&five);
+ ///
+ /// let strong_five: Option<Rc<_>> = weak_five.upgrade();
+ /// assert!(strong_five.is_some());
+ ///
+ /// // Destroy all strong pointers.
+ /// drop(strong_five);
+ /// drop(five);
+ ///
+ /// assert!(weak_five.upgrade().is_none());
+ /// ```
+ #[must_use = "this returns a new `Rc`, \
+ without modifying the original weak pointer"]
+ #[stable(feature = "rc_weak", since = "1.4.0")]
+ pub fn upgrade(&self) -> Option<Rc<T>> {
+ let inner = self.inner()?;
+
+ if inner.strong() == 0 {
+ None
+ } else {
+ unsafe {
+ inner.inc_strong();
+ Some(Rc::from_inner(self.ptr))
+ }
+ }
+ }
+
+ /// Gets the number of strong (`Rc`) pointers pointing to this allocation.
+ ///
+ /// If `self` was created using [`Weak::new`], this will return 0.
+ #[must_use]
+ #[stable(feature = "weak_counts", since = "1.41.0")]
+ pub fn strong_count(&self) -> usize {
+ if let Some(inner) = self.inner() { inner.strong() } else { 0 }
+ }
+
+ /// Gets the number of `Weak` pointers pointing to this allocation.
+ ///
+ /// If no strong pointers remain, this will return zero.
+ #[must_use]
+ #[stable(feature = "weak_counts", since = "1.41.0")]
+ pub fn weak_count(&self) -> usize {
+ self.inner()
+ .map(|inner| {
+ if inner.strong() > 0 {
+ inner.weak() - 1 // subtract the implicit weak ptr
+ } else {
+ 0
+ }
+ })
+ .unwrap_or(0)
+ }
+
+ /// Returns `None` when the pointer is dangling and there is no allocated `RcBox`,
+ /// (i.e., when this `Weak` was created by `Weak::new`).
+ #[inline]
+ fn inner(&self) -> Option<WeakInner<'_>> {
+ if is_dangling(self.ptr.as_ptr()) {
+ None
+ } else {
+ // We are careful to *not* create a reference covering the "data" field, as
+ // the field may be mutated concurrently (for example, if the last `Rc`
+ // is dropped, the data field will be dropped in-place).
+ Some(unsafe {
+ let ptr = self.ptr.as_ptr();
+ WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak }
+ })
+ }
+ }
+
+ /// Returns `true` if the two `Weak`s point to the same allocation (similar to
+ /// [`ptr::eq`]), or if both don't point to any allocation
+ /// (because they were created with `Weak::new()`).
+ ///
+ /// # Notes
+ ///
+ /// Since this compares pointers it means that `Weak::new()` will equal each
+ /// other, even though they don't point to any allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Rc;
+ ///
+ /// let first_rc = Rc::new(5);
+ /// let first = Rc::downgrade(&first_rc);
+ /// let second = Rc::downgrade(&first_rc);
+ ///
+ /// assert!(first.ptr_eq(&second));
+ ///
+ /// let third_rc = Rc::new(5);
+ /// let third = Rc::downgrade(&third_rc);
+ ///
+ /// assert!(!first.ptr_eq(&third));
+ /// ```
+ ///
+ /// Comparing `Weak::new`.
+ ///
+ /// ```
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// let first = Weak::new();
+ /// let second = Weak::new();
+ /// assert!(first.ptr_eq(&second));
+ ///
+ /// let third_rc = Rc::new(());
+ /// let third = Rc::downgrade(&third_rc);
+ /// assert!(!first.ptr_eq(&third));
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
+ pub fn ptr_eq(&self, other: &Self) -> bool {
+ self.ptr.as_ptr() == other.ptr.as_ptr()
+ }
+}
+
+#[stable(feature = "rc_weak", since = "1.4.0")]
+unsafe impl<#[may_dangle] T: ?Sized> Drop for Weak<T> {
+ /// Drops the `Weak` pointer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// struct Foo;
+ ///
+ /// impl Drop for Foo {
+ /// fn drop(&mut self) {
+ /// println!("dropped!");
+ /// }
+ /// }
+ ///
+ /// let foo = Rc::new(Foo);
+ /// let weak_foo = Rc::downgrade(&foo);
+ /// let other_weak_foo = Weak::clone(&weak_foo);
+ ///
+ /// drop(weak_foo); // Doesn't print anything
+ /// drop(foo); // Prints "dropped!"
+ ///
+ /// assert!(other_weak_foo.upgrade().is_none());
+ /// ```
+ fn drop(&mut self) {
+ let inner = if let Some(inner) = self.inner() { inner } else { return };
+
+ inner.dec_weak();
+ // the weak count starts at 1, and will only go to zero if all
+ // the strong pointers have disappeared.
+ if inner.weak() == 0 {
+ unsafe {
+ Global.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr()));
+ }
+ }
+ }
+}
+
+#[stable(feature = "rc_weak", since = "1.4.0")]
+impl<T: ?Sized> Clone for Weak<T> {
+ /// Makes a clone of the `Weak` pointer that points to the same allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::{Rc, Weak};
+ ///
+ /// let weak_five = Rc::downgrade(&Rc::new(5));
+ ///
+ /// let _ = Weak::clone(&weak_five);
+ /// ```
+ #[inline]
+ fn clone(&self) -> Weak<T> {
+ if let Some(inner) = self.inner() {
+ inner.inc_weak()
+ }
+ Weak { ptr: self.ptr }
+ }
+}
+
+#[stable(feature = "rc_weak", since = "1.4.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "(Weak)")
+ }
+}
+
+#[stable(feature = "downgraded_weak", since = "1.10.0")]
+impl<T> Default for Weak<T> {
+ /// Constructs a new `Weak<T>`, without allocating any memory.
+ /// Calling [`upgrade`] on the return value always gives [`None`].
+ ///
+ /// [`upgrade`]: Weak::upgrade
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::rc::Weak;
+ ///
+ /// let empty: Weak<i64> = Default::default();
+ /// assert!(empty.upgrade().is_none());
+ /// ```
+ fn default() -> Weak<T> {
+ Weak::new()
+ }
+}
+
+// NOTE: We checked_add here to deal with mem::forget safely. In particular
+// if you mem::forget Rcs (or Weaks), the ref-count can overflow, and then
+// you can free the allocation while outstanding Rcs (or Weaks) exist.
+// We abort because this is such a degenerate scenario that we don't care about
+// what happens -- no real program should ever experience this.
+//
+// This should have negligible overhead since you don't actually need to
+// clone these much in Rust thanks to ownership and move-semantics.
+
+#[doc(hidden)]
+trait RcInnerPtr {
+ fn weak_ref(&self) -> &Cell<usize>;
+ fn strong_ref(&self) -> &Cell<usize>;
+
+ #[inline]
+ fn strong(&self) -> usize {
+ self.strong_ref().get()
+ }
+
+ #[inline]
+ fn inc_strong(&self) {
+ let strong = self.strong();
+
+ // We insert an `assume` here to hint LLVM at an otherwise
+ // missed optimization.
+ // SAFETY: The reference count will never be zero when this is
+ // called.
+ unsafe {
+ core::intrinsics::assume(strong != 0);
+ }
+
+ let strong = strong.wrapping_add(1);
+ self.strong_ref().set(strong);
+
+ // We want to abort on overflow instead of dropping the value.
+ // Checking for overflow after the store instead of before
+ // allows for slightly better code generation.
+ if core::intrinsics::unlikely(strong == 0) {
+ abort();
+ }
+ }
+
+ #[inline]
+ fn dec_strong(&self) {
+ self.strong_ref().set(self.strong() - 1);
+ }
+
+ #[inline]
+ fn weak(&self) -> usize {
+ self.weak_ref().get()
+ }
+
+ #[inline]
+ fn inc_weak(&self) {
+ let weak = self.weak();
+
+ // We insert an `assume` here to hint LLVM at an otherwise
+ // missed optimization.
+ // SAFETY: The reference count will never be zero when this is
+ // called.
+ unsafe {
+ core::intrinsics::assume(weak != 0);
+ }
+
+ let weak = weak.wrapping_add(1);
+ self.weak_ref().set(weak);
+
+ // We want to abort on overflow instead of dropping the value.
+ // Checking for overflow after the store instead of before
+ // allows for slightly better code generation.
+ if core::intrinsics::unlikely(weak == 0) {
+ abort();
+ }
+ }
+
+ #[inline]
+ fn dec_weak(&self) {
+ self.weak_ref().set(self.weak() - 1);
+ }
+}
+
+impl<T: ?Sized> RcInnerPtr for RcBox<T> {
+ #[inline(always)]
+ fn weak_ref(&self) -> &Cell<usize> {
+ &self.weak
+ }
+
+ #[inline(always)]
+ fn strong_ref(&self) -> &Cell<usize> {
+ &self.strong
+ }
+}
+
+impl<'a> RcInnerPtr for WeakInner<'a> {
+ #[inline(always)]
+ fn weak_ref(&self) -> &Cell<usize> {
+ self.weak
+ }
+
+ #[inline(always)]
+ fn strong_ref(&self) -> &Cell<usize> {
+ self.strong
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> borrow::Borrow<T> for Rc<T> {
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
+impl<T: ?Sized> AsRef<T> for Rc<T> {
+ fn as_ref(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<T: ?Sized> Unpin for Rc<T> {}
+
+/// Get the offset within an `RcBox` for the payload behind a pointer.
+///
+/// # Safety
+///
+/// The pointer must point to (and have valid metadata for) a previously
+/// valid instance of T, but the T is allowed to be dropped.
+unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
+ // Align the unsized value to the end of the RcBox.
+ // Because RcBox is repr(C), it will always be the last field in memory.
+ // SAFETY: since the only unsized types possible are slices, trait objects,
+ // and extern types, the input safety requirement is currently enough to
+ // satisfy the requirements of align_of_val_raw; this is an implementation
+ // detail of the language that must not be relied upon outside of std.
+ unsafe { data_offset_align(align_of_val_raw(ptr)) }
+}
+
+#[inline]
+fn data_offset_align(align: usize) -> usize {
+ let layout = Layout::new::<RcBox<()>>();
+ layout.size() + layout.padding_needed_for(align)
+}
diff --git a/library/alloc/src/rc/tests.rs b/library/alloc/src/rc/tests.rs
new file mode 100644
index 000000000..32433cfbd
--- /dev/null
+++ b/library/alloc/src/rc/tests.rs
@@ -0,0 +1,561 @@
+use super::*;
+
+use std::boxed::Box;
+use std::cell::RefCell;
+use std::clone::Clone;
+use std::convert::{From, TryInto};
+use std::mem::drop;
+use std::option::Option::{self, None, Some};
+use std::result::Result::{Err, Ok};
+
+#[test]
+fn test_clone() {
+ let x = Rc::new(RefCell::new(5));
+ let y = x.clone();
+ *x.borrow_mut() = 20;
+ assert_eq!(*y.borrow(), 20);
+}
+
+#[test]
+fn test_simple() {
+ let x = Rc::new(5);
+ assert_eq!(*x, 5);
+}
+
+#[test]
+fn test_simple_clone() {
+ let x = Rc::new(5);
+ let y = x.clone();
+ assert_eq!(*x, 5);
+ assert_eq!(*y, 5);
+}
+
+#[test]
+fn test_destructor() {
+ let x: Rc<Box<_>> = Rc::new(Box::new(5));
+ assert_eq!(**x, 5);
+}
+
+#[test]
+fn test_live() {
+ let x = Rc::new(5);
+ let y = Rc::downgrade(&x);
+ assert!(y.upgrade().is_some());
+}
+
+#[test]
+fn test_dead() {
+ let x = Rc::new(5);
+ let y = Rc::downgrade(&x);
+ drop(x);
+ assert!(y.upgrade().is_none());
+}
+
+#[test]
+fn weak_self_cyclic() {
+ struct Cycle {
+ x: RefCell<Option<Weak<Cycle>>>,
+ }
+
+ let a = Rc::new(Cycle { x: RefCell::new(None) });
+ let b = Rc::downgrade(&a.clone());
+ *a.x.borrow_mut() = Some(b);
+
+ // hopefully we don't double-free (or leak)...
+}
+
+#[test]
+fn is_unique() {
+ let x = Rc::new(3);
+ assert!(Rc::is_unique(&x));
+ let y = x.clone();
+ assert!(!Rc::is_unique(&x));
+ drop(y);
+ assert!(Rc::is_unique(&x));
+ let w = Rc::downgrade(&x);
+ assert!(!Rc::is_unique(&x));
+ drop(w);
+ assert!(Rc::is_unique(&x));
+}
+
+#[test]
+fn test_strong_count() {
+ let a = Rc::new(0);
+ assert!(Rc::strong_count(&a) == 1);
+ let w = Rc::downgrade(&a);
+ assert!(Rc::strong_count(&a) == 1);
+ let b = w.upgrade().expect("upgrade of live rc failed");
+ assert!(Rc::strong_count(&b) == 2);
+ assert!(Rc::strong_count(&a) == 2);
+ drop(w);
+ drop(a);
+ assert!(Rc::strong_count(&b) == 1);
+ let c = b.clone();
+ assert!(Rc::strong_count(&b) == 2);
+ assert!(Rc::strong_count(&c) == 2);
+}
+
+#[test]
+fn test_weak_count() {
+ let a = Rc::new(0);
+ assert!(Rc::strong_count(&a) == 1);
+ assert!(Rc::weak_count(&a) == 0);
+ let w = Rc::downgrade(&a);
+ assert!(Rc::strong_count(&a) == 1);
+ assert!(Rc::weak_count(&a) == 1);
+ drop(w);
+ assert!(Rc::strong_count(&a) == 1);
+ assert!(Rc::weak_count(&a) == 0);
+ let c = a.clone();
+ assert!(Rc::strong_count(&a) == 2);
+ assert!(Rc::weak_count(&a) == 0);
+ drop(c);
+}
+
+#[test]
+fn weak_counts() {
+ assert_eq!(Weak::weak_count(&Weak::<u64>::new()), 0);
+ assert_eq!(Weak::strong_count(&Weak::<u64>::new()), 0);
+
+ let a = Rc::new(0);
+ let w = Rc::downgrade(&a);
+ assert_eq!(Weak::strong_count(&w), 1);
+ assert_eq!(Weak::weak_count(&w), 1);
+ let w2 = w.clone();
+ assert_eq!(Weak::strong_count(&w), 1);
+ assert_eq!(Weak::weak_count(&w), 2);
+ assert_eq!(Weak::strong_count(&w2), 1);
+ assert_eq!(Weak::weak_count(&w2), 2);
+ drop(w);
+ assert_eq!(Weak::strong_count(&w2), 1);
+ assert_eq!(Weak::weak_count(&w2), 1);
+ let a2 = a.clone();
+ assert_eq!(Weak::strong_count(&w2), 2);
+ assert_eq!(Weak::weak_count(&w2), 1);
+ drop(a2);
+ drop(a);
+ assert_eq!(Weak::strong_count(&w2), 0);
+ assert_eq!(Weak::weak_count(&w2), 0);
+ drop(w2);
+}
+
+#[test]
+fn try_unwrap() {
+ let x = Rc::new(3);
+ assert_eq!(Rc::try_unwrap(x), Ok(3));
+ let x = Rc::new(4);
+ let _y = x.clone();
+ assert_eq!(Rc::try_unwrap(x), Err(Rc::new(4)));
+ let x = Rc::new(5);
+ let _w = Rc::downgrade(&x);
+ assert_eq!(Rc::try_unwrap(x), Ok(5));
+}
+
+#[test]
+fn into_from_raw() {
+ let x = Rc::new(Box::new("hello"));
+ let y = x.clone();
+
+ let x_ptr = Rc::into_raw(x);
+ drop(y);
+ unsafe {
+ assert_eq!(**x_ptr, "hello");
+
+ let x = Rc::from_raw(x_ptr);
+ assert_eq!(**x, "hello");
+
+ assert_eq!(Rc::try_unwrap(x).map(|x| *x), Ok("hello"));
+ }
+}
+
+#[test]
+fn test_into_from_raw_unsized() {
+ use std::fmt::Display;
+ use std::string::ToString;
+
+ let rc: Rc<str> = Rc::from("foo");
+
+ let ptr = Rc::into_raw(rc.clone());
+ let rc2 = unsafe { Rc::from_raw(ptr) };
+
+ assert_eq!(unsafe { &*ptr }, "foo");
+ assert_eq!(rc, rc2);
+
+ let rc: Rc<dyn Display> = Rc::new(123);
+
+ let ptr = Rc::into_raw(rc.clone());
+ let rc2 = unsafe { Rc::from_raw(ptr) };
+
+ assert_eq!(unsafe { &*ptr }.to_string(), "123");
+ assert_eq!(rc2.to_string(), "123");
+}
+
+#[test]
+fn into_from_weak_raw() {
+ let x = Rc::new(Box::new("hello"));
+ let y = Rc::downgrade(&x);
+
+ let y_ptr = Weak::into_raw(y);
+ unsafe {
+ assert_eq!(**y_ptr, "hello");
+
+ let y = Weak::from_raw(y_ptr);
+ let y_up = Weak::upgrade(&y).unwrap();
+ assert_eq!(**y_up, "hello");
+ drop(y_up);
+
+ assert_eq!(Rc::try_unwrap(x).map(|x| *x), Ok("hello"));
+ }
+}
+
+#[test]
+fn test_into_from_weak_raw_unsized() {
+ use std::fmt::Display;
+ use std::string::ToString;
+
+ let arc: Rc<str> = Rc::from("foo");
+ let weak: Weak<str> = Rc::downgrade(&arc);
+
+ let ptr = Weak::into_raw(weak.clone());
+ let weak2 = unsafe { Weak::from_raw(ptr) };
+
+ assert_eq!(unsafe { &*ptr }, "foo");
+ assert!(weak.ptr_eq(&weak2));
+
+ let arc: Rc<dyn Display> = Rc::new(123);
+ let weak: Weak<dyn Display> = Rc::downgrade(&arc);
+
+ let ptr = Weak::into_raw(weak.clone());
+ let weak2 = unsafe { Weak::from_raw(ptr) };
+
+ assert_eq!(unsafe { &*ptr }.to_string(), "123");
+ assert!(weak.ptr_eq(&weak2));
+}
+
+#[test]
+fn get_mut() {
+ let mut x = Rc::new(3);
+ *Rc::get_mut(&mut x).unwrap() = 4;
+ assert_eq!(*x, 4);
+ let y = x.clone();
+ assert!(Rc::get_mut(&mut x).is_none());
+ drop(y);
+ assert!(Rc::get_mut(&mut x).is_some());
+ let _w = Rc::downgrade(&x);
+ assert!(Rc::get_mut(&mut x).is_none());
+}
+
+#[test]
+fn test_cowrc_clone_make_unique() {
+ let mut cow0 = Rc::new(75);
+ let mut cow1 = cow0.clone();
+ let mut cow2 = cow1.clone();
+
+ assert!(75 == *Rc::make_mut(&mut cow0));
+ assert!(75 == *Rc::make_mut(&mut cow1));
+ assert!(75 == *Rc::make_mut(&mut cow2));
+
+ *Rc::make_mut(&mut cow0) += 1;
+ *Rc::make_mut(&mut cow1) += 2;
+ *Rc::make_mut(&mut cow2) += 3;
+
+ assert!(76 == *cow0);
+ assert!(77 == *cow1);
+ assert!(78 == *cow2);
+
+ // none should point to the same backing memory
+ assert!(*cow0 != *cow1);
+ assert!(*cow0 != *cow2);
+ assert!(*cow1 != *cow2);
+}
+
+#[test]
+fn test_cowrc_clone_unique2() {
+ let mut cow0 = Rc::new(75);
+ let cow1 = cow0.clone();
+ let cow2 = cow1.clone();
+
+ assert!(75 == *cow0);
+ assert!(75 == *cow1);
+ assert!(75 == *cow2);
+
+ *Rc::make_mut(&mut cow0) += 1;
+
+ assert!(76 == *cow0);
+ assert!(75 == *cow1);
+ assert!(75 == *cow2);
+
+ // cow1 and cow2 should share the same contents
+ // cow0 should have a unique reference
+ assert!(*cow0 != *cow1);
+ assert!(*cow0 != *cow2);
+ assert!(*cow1 == *cow2);
+}
+
+#[test]
+fn test_cowrc_clone_weak() {
+ let mut cow0 = Rc::new(75);
+ let cow1_weak = Rc::downgrade(&cow0);
+
+ assert!(75 == *cow0);
+ assert!(75 == *cow1_weak.upgrade().unwrap());
+
+ *Rc::make_mut(&mut cow0) += 1;
+
+ assert!(76 == *cow0);
+ assert!(cow1_weak.upgrade().is_none());
+}
+
+#[test]
+fn test_show() {
+ let foo = Rc::new(75);
+ assert_eq!(format!("{foo:?}"), "75");
+}
+
+#[test]
+fn test_unsized() {
+ let foo: Rc<[i32]> = Rc::new([1, 2, 3]);
+ assert_eq!(foo, foo.clone());
+}
+
+#[test]
+fn test_maybe_thin_unsized() {
+ // If/when custom thin DSTs exist, this test should be updated to use one
+ use std::ffi::{CStr, CString};
+
+ let x: Rc<CStr> = Rc::from(CString::new("swordfish").unwrap().into_boxed_c_str());
+ assert_eq!(format!("{x:?}"), "\"swordfish\"");
+ let y: Weak<CStr> = Rc::downgrade(&x);
+ drop(x);
+
+ // At this point, the weak points to a dropped DST
+ assert!(y.upgrade().is_none());
+ // But we still need to be able to get the alloc layout to drop.
+ // CStr has no drop glue, but custom DSTs might, and need to work.
+ drop(y);
+}
+
+#[test]
+fn test_from_owned() {
+ let foo = 123;
+ let foo_rc = Rc::from(foo);
+ assert!(123 == *foo_rc);
+}
+
+#[test]
+fn test_new_weak() {
+ let foo: Weak<usize> = Weak::new();
+ assert!(foo.upgrade().is_none());
+}
+
+#[test]
+fn test_ptr_eq() {
+ let five = Rc::new(5);
+ let same_five = five.clone();
+ let other_five = Rc::new(5);
+
+ assert!(Rc::ptr_eq(&five, &same_five));
+ assert!(!Rc::ptr_eq(&five, &other_five));
+}
+
+#[test]
+fn test_from_str() {
+ let r: Rc<str> = Rc::from("foo");
+
+ assert_eq!(&r[..], "foo");
+}
+
+#[test]
+fn test_copy_from_slice() {
+ let s: &[u32] = &[1, 2, 3];
+ let r: Rc<[u32]> = Rc::from(s);
+
+ assert_eq!(&r[..], [1, 2, 3]);
+}
+
+#[test]
+fn test_clone_from_slice() {
+ #[derive(Clone, Debug, Eq, PartialEq)]
+ struct X(u32);
+
+ let s: &[X] = &[X(1), X(2), X(3)];
+ let r: Rc<[X]> = Rc::from(s);
+
+ assert_eq!(&r[..], s);
+}
+
+#[test]
+#[should_panic]
+fn test_clone_from_slice_panic() {
+ use std::string::{String, ToString};
+
+ struct Fail(u32, String);
+
+ impl Clone for Fail {
+ fn clone(&self) -> Fail {
+ if self.0 == 2 {
+ panic!();
+ }
+ Fail(self.0, self.1.clone())
+ }
+ }
+
+ let s: &[Fail] =
+ &[Fail(0, "foo".to_string()), Fail(1, "bar".to_string()), Fail(2, "baz".to_string())];
+
+ // Should panic, but not cause memory corruption
+ let _r: Rc<[Fail]> = Rc::from(s);
+}
+
+#[test]
+fn test_from_box() {
+ let b: Box<u32> = Box::new(123);
+ let r: Rc<u32> = Rc::from(b);
+
+ assert_eq!(*r, 123);
+}
+
+#[test]
+fn test_from_box_str() {
+ use std::string::String;
+
+ let s = String::from("foo").into_boxed_str();
+ let r: Rc<str> = Rc::from(s);
+
+ assert_eq!(&r[..], "foo");
+}
+
+#[test]
+fn test_from_box_slice() {
+ let s = vec![1, 2, 3].into_boxed_slice();
+ let r: Rc<[u32]> = Rc::from(s);
+
+ assert_eq!(&r[..], [1, 2, 3]);
+}
+
+#[test]
+fn test_from_box_trait() {
+ use std::fmt::Display;
+ use std::string::ToString;
+
+ let b: Box<dyn Display> = Box::new(123);
+ let r: Rc<dyn Display> = Rc::from(b);
+
+ assert_eq!(r.to_string(), "123");
+}
+
+#[test]
+fn test_from_box_trait_zero_sized() {
+ use std::fmt::Debug;
+
+ let b: Box<dyn Debug> = Box::new(());
+ let r: Rc<dyn Debug> = Rc::from(b);
+
+ assert_eq!(format!("{r:?}"), "()");
+}
+
+#[test]
+fn test_from_vec() {
+ let v = vec![1, 2, 3];
+ let r: Rc<[u32]> = Rc::from(v);
+
+ assert_eq!(&r[..], [1, 2, 3]);
+}
+
+#[test]
+fn test_downcast() {
+ use std::any::Any;
+
+ let r1: Rc<dyn Any> = Rc::new(i32::MAX);
+ let r2: Rc<dyn Any> = Rc::new("abc");
+
+ assert!(r1.clone().downcast::<u32>().is_err());
+
+ let r1i32 = r1.downcast::<i32>();
+ assert!(r1i32.is_ok());
+ assert_eq!(r1i32.unwrap(), Rc::new(i32::MAX));
+
+ assert!(r2.clone().downcast::<i32>().is_err());
+
+ let r2str = r2.downcast::<&'static str>();
+ assert!(r2str.is_ok());
+ assert_eq!(r2str.unwrap(), Rc::new("abc"));
+}
+
+#[test]
+fn test_array_from_slice() {
+ let v = vec![1, 2, 3];
+ let r: Rc<[u32]> = Rc::from(v);
+
+ let a: Result<Rc<[u32; 3]>, _> = r.clone().try_into();
+ assert!(a.is_ok());
+
+ let a: Result<Rc<[u32; 2]>, _> = r.clone().try_into();
+ assert!(a.is_err());
+}
+
+#[test]
+fn test_rc_cyclic_with_zero_refs() {
+ struct ZeroRefs {
+ inner: Weak<ZeroRefs>,
+ }
+
+ let zero_refs = Rc::new_cyclic(|inner| {
+ assert_eq!(inner.strong_count(), 0);
+ assert!(inner.upgrade().is_none());
+ ZeroRefs { inner: Weak::new() }
+ });
+
+ assert_eq!(Rc::strong_count(&zero_refs), 1);
+ assert_eq!(Rc::weak_count(&zero_refs), 0);
+ assert_eq!(zero_refs.inner.strong_count(), 0);
+ assert_eq!(zero_refs.inner.weak_count(), 0);
+}
+
+#[test]
+fn test_rc_cyclic_with_one_ref() {
+ struct OneRef {
+ inner: Weak<OneRef>,
+ }
+
+ let one_ref = Rc::new_cyclic(|inner| {
+ assert_eq!(inner.strong_count(), 0);
+ assert!(inner.upgrade().is_none());
+ OneRef { inner: inner.clone() }
+ });
+
+ assert_eq!(Rc::strong_count(&one_ref), 1);
+ assert_eq!(Rc::weak_count(&one_ref), 1);
+
+ let one_ref2 = Weak::upgrade(&one_ref.inner).unwrap();
+ assert!(Rc::ptr_eq(&one_ref, &one_ref2));
+
+ assert_eq!(one_ref.inner.strong_count(), 2);
+ assert_eq!(one_ref.inner.weak_count(), 1);
+}
+
+#[test]
+fn test_rc_cyclic_with_two_ref() {
+ struct TwoRefs {
+ inner: Weak<TwoRefs>,
+ inner1: Weak<TwoRefs>,
+ }
+
+ let two_refs = Rc::new_cyclic(|inner| {
+ assert_eq!(inner.strong_count(), 0);
+ assert!(inner.upgrade().is_none());
+ TwoRefs { inner: inner.clone(), inner1: inner.clone() }
+ });
+
+ assert_eq!(Rc::strong_count(&two_refs), 1);
+ assert_eq!(Rc::weak_count(&two_refs), 2);
+
+ let two_ref3 = Weak::upgrade(&two_refs.inner).unwrap();
+ assert!(Rc::ptr_eq(&two_refs, &two_ref3));
+
+ let two_ref2 = Weak::upgrade(&two_refs.inner1).unwrap();
+ assert!(Rc::ptr_eq(&two_refs, &two_ref2));
+
+ assert_eq!(Rc::strong_count(&two_refs), 3);
+ assert_eq!(Rc::weak_count(&two_refs), 2);
+}
diff --git a/library/alloc/src/slice.rs b/library/alloc/src/slice.rs
new file mode 100644
index 000000000..63d4d9452
--- /dev/null
+++ b/library/alloc/src/slice.rs
@@ -0,0 +1,1204 @@
+//! A dynamically-sized view into a contiguous sequence, `[T]`.
+//!
+//! *[See also the slice primitive type](slice).*
+//!
+//! Slices are a view into a block of memory represented as a pointer and a
+//! length.
+//!
+//! ```
+//! // slicing a Vec
+//! let vec = vec![1, 2, 3];
+//! let int_slice = &vec[..];
+//! // coercing an array to a slice
+//! let str_slice: &[&str] = &["one", "two", "three"];
+//! ```
+//!
+//! Slices are either mutable or shared. The shared slice type is `&[T]`,
+//! while the mutable slice type is `&mut [T]`, where `T` represents the element
+//! type. For example, you can mutate the block of memory that a mutable slice
+//! points to:
+//!
+//! ```
+//! let x = &mut [1, 2, 3];
+//! x[1] = 7;
+//! assert_eq!(x, &[1, 7, 3]);
+//! ```
+//!
+//! Here are some of the things this module contains:
+//!
+//! ## Structs
+//!
+//! There are several structs that are useful for slices, such as [`Iter`], which
+//! represents iteration over a slice.
+//!
+//! ## Trait Implementations
+//!
+//! There are several implementations of common traits for slices. Some examples
+//! include:
+//!
+//! * [`Clone`]
+//! * [`Eq`], [`Ord`] - for slices whose element type are [`Eq`] or [`Ord`].
+//! * [`Hash`] - for slices whose element type is [`Hash`].
+//!
+//! ## Iteration
+//!
+//! The slices implement `IntoIterator`. The iterator yields references to the
+//! slice elements.
+//!
+//! ```
+//! let numbers = &[0, 1, 2];
+//! for n in numbers {
+//! println!("{n} is a number!");
+//! }
+//! ```
+//!
+//! The mutable slice yields mutable references to the elements:
+//!
+//! ```
+//! let mut scores = [7, 8, 9];
+//! for score in &mut scores[..] {
+//! *score += 1;
+//! }
+//! ```
+//!
+//! This iterator yields mutable references to the slice's elements, so while
+//! the element type of the slice is `i32`, the element type of the iterator is
+//! `&mut i32`.
+//!
+//! * [`.iter`] and [`.iter_mut`] are the explicit methods to return the default
+//! iterators.
+//! * Further methods that return iterators are [`.split`], [`.splitn`],
+//! [`.chunks`], [`.windows`] and more.
+//!
+//! [`Hash`]: core::hash::Hash
+//! [`.iter`]: slice::iter
+//! [`.iter_mut`]: slice::iter_mut
+//! [`.split`]: slice::split
+//! [`.splitn`]: slice::splitn
+//! [`.chunks`]: slice::chunks
+//! [`.windows`]: slice::windows
+#![stable(feature = "rust1", since = "1.0.0")]
+// Many of the usings in this module are only used in the test configuration.
+// It's cleaner to just turn off the unused_imports warning than to fix them.
+#![cfg_attr(test, allow(unused_imports, dead_code))]
+
+use core::borrow::{Borrow, BorrowMut};
+#[cfg(not(no_global_oom_handling))]
+use core::cmp::Ordering::{self, Less};
+#[cfg(not(no_global_oom_handling))]
+use core::mem;
+#[cfg(not(no_global_oom_handling))]
+use core::mem::size_of;
+#[cfg(not(no_global_oom_handling))]
+use core::ptr;
+
+use crate::alloc::Allocator;
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::Global;
+#[cfg(not(no_global_oom_handling))]
+use crate::borrow::ToOwned;
+use crate::boxed::Box;
+use crate::vec::Vec;
+
+#[unstable(feature = "slice_range", issue = "76393")]
+pub use core::slice::range;
+#[unstable(feature = "array_chunks", issue = "74985")]
+pub use core::slice::ArrayChunks;
+#[unstable(feature = "array_chunks", issue = "74985")]
+pub use core::slice::ArrayChunksMut;
+#[unstable(feature = "array_windows", issue = "75027")]
+pub use core::slice::ArrayWindows;
+#[stable(feature = "inherent_ascii_escape", since = "1.60.0")]
+pub use core::slice::EscapeAscii;
+#[stable(feature = "slice_get_slice", since = "1.28.0")]
+pub use core::slice::SliceIndex;
+#[stable(feature = "from_ref", since = "1.28.0")]
+pub use core::slice::{from_mut, from_ref};
+#[unstable(feature = "slice_from_ptr_range", issue = "89792")]
+pub use core::slice::{from_mut_ptr_range, from_ptr_range};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{from_raw_parts, from_raw_parts_mut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{Chunks, Windows};
+#[stable(feature = "chunks_exact", since = "1.31.0")]
+pub use core::slice::{ChunksExact, ChunksExactMut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{ChunksMut, Split, SplitMut};
+#[unstable(feature = "slice_group_by", issue = "80552")]
+pub use core::slice::{GroupBy, GroupByMut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{Iter, IterMut};
+#[stable(feature = "rchunks", since = "1.31.0")]
+pub use core::slice::{RChunks, RChunksExact, RChunksExactMut, RChunksMut};
+#[stable(feature = "slice_rsplit", since = "1.27.0")]
+pub use core::slice::{RSplit, RSplitMut};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::slice::{RSplitN, RSplitNMut, SplitN, SplitNMut};
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+pub use core::slice::{SplitInclusive, SplitInclusiveMut};
+
+////////////////////////////////////////////////////////////////////////////////
+// Basic slice extension methods
+////////////////////////////////////////////////////////////////////////////////
+
+// HACK(japaric) needed for the implementation of `vec!` macro during testing
+// N.B., see the `hack` module in this file for more details.
+#[cfg(test)]
+pub use hack::into_vec;
+
+// HACK(japaric) needed for the implementation of `Vec::clone` during testing
+// N.B., see the `hack` module in this file for more details.
+#[cfg(test)]
+pub use hack::to_vec;
+
+// HACK(japaric): With cfg(test) `impl [T]` is not available, these three
+// functions are actually methods that are in `impl [T]` but not in
+// `core::slice::SliceExt` - we need to supply these functions for the
+// `test_permutations` test
+pub(crate) mod hack {
+ use core::alloc::Allocator;
+
+ use crate::boxed::Box;
+ use crate::vec::Vec;
+
+ // We shouldn't add inline attribute to this since this is used in
+ // `vec!` macro mostly and causes perf regression. See #71204 for
+ // discussion and perf results.
+ pub fn into_vec<T, A: Allocator>(b: Box<[T], A>) -> Vec<T, A> {
+ unsafe {
+ let len = b.len();
+ let (b, alloc) = Box::into_raw_with_allocator(b);
+ Vec::from_raw_parts_in(b as *mut T, len, len, alloc)
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ pub fn to_vec<T: ConvertVec, A: Allocator>(s: &[T], alloc: A) -> Vec<T, A> {
+ T::to_vec(s, alloc)
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ pub trait ConvertVec {
+ fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A>
+ where
+ Self: Sized;
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ impl<T: Clone> ConvertVec for T {
+ #[inline]
+ default fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
+ struct DropGuard<'a, T, A: Allocator> {
+ vec: &'a mut Vec<T, A>,
+ num_init: usize,
+ }
+ impl<'a, T, A: Allocator> Drop for DropGuard<'a, T, A> {
+ #[inline]
+ fn drop(&mut self) {
+ // SAFETY:
+ // items were marked initialized in the loop below
+ unsafe {
+ self.vec.set_len(self.num_init);
+ }
+ }
+ }
+ let mut vec = Vec::with_capacity_in(s.len(), alloc);
+ let mut guard = DropGuard { vec: &mut vec, num_init: 0 };
+ let slots = guard.vec.spare_capacity_mut();
+ // .take(slots.len()) is necessary for LLVM to remove bounds checks
+ // and has better codegen than zip.
+ for (i, b) in s.iter().enumerate().take(slots.len()) {
+ guard.num_init = i;
+ slots[i].write(b.clone());
+ }
+ core::mem::forget(guard);
+ // SAFETY:
+ // the vec was allocated and initialized above to at least this length.
+ unsafe {
+ vec.set_len(s.len());
+ }
+ vec
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ impl<T: Copy> ConvertVec for T {
+ #[inline]
+ fn to_vec<A: Allocator>(s: &[Self], alloc: A) -> Vec<Self, A> {
+ let mut v = Vec::with_capacity_in(s.len(), alloc);
+ // SAFETY:
+ // allocated above with the capacity of `s`, and initialize to `s.len()` in
+ // ptr::copy_to_non_overlapping below.
+ unsafe {
+ s.as_ptr().copy_to_nonoverlapping(v.as_mut_ptr(), s.len());
+ v.set_len(s.len());
+ }
+ v
+ }
+ }
+}
+
+#[cfg(not(test))]
+impl<T> [T] {
+ /// Sorts the slice.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// When applicable, unstable sorting is preferred because it is generally faster than stable
+ /// sorting and it doesn't allocate auxiliary memory.
+ /// See [`sort_unstable`](slice::sort_unstable).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is an adaptive, iterative merge sort inspired by
+ /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+ /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+ /// two or more sorted sequences concatenated one after another.
+ ///
+ /// Also, it allocates temporary storage half the size of `self`, but for short slices a
+ /// non-allocating insertion sort is used instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5, 4, 1, -3, 2];
+ ///
+ /// v.sort();
+ /// assert!(v == [-5, -3, 1, 2, 4]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sort(&mut self)
+ where
+ T: Ord,
+ {
+ merge_sort(self, |a, b| a.lt(b));
+ }
+
+ /// Sorts the slice with a comparator function.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*n* \* log(*n*)) worst-case.
+ ///
+ /// The comparator function must define a total ordering for the elements in the slice. If
+ /// the ordering is not total, the order of the elements is unspecified. An order is a
+ /// total order if it is (for all `a`, `b` and `c`):
+ ///
+ /// * total and antisymmetric: exactly one of `a < b`, `a == b` or `a > b` is true, and
+ /// * transitive, `a < b` and `b < c` implies `a < c`. The same must hold for both `==` and `>`.
+ ///
+ /// For example, while [`f64`] doesn't implement [`Ord`] because `NaN != NaN`, we can use
+ /// `partial_cmp` as our sort function when we know the slice doesn't contain a `NaN`.
+ ///
+ /// ```
+ /// let mut floats = [5f64, 4.0, 1.0, 3.0, 2.0];
+ /// floats.sort_by(|a, b| a.partial_cmp(b).unwrap());
+ /// assert_eq!(floats, [1.0, 2.0, 3.0, 4.0, 5.0]);
+ /// ```
+ ///
+ /// When applicable, unstable sorting is preferred because it is generally faster than stable
+ /// sorting and it doesn't allocate auxiliary memory.
+ /// See [`sort_unstable_by`](slice::sort_unstable_by).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is an adaptive, iterative merge sort inspired by
+ /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+ /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+ /// two or more sorted sequences concatenated one after another.
+ ///
+ /// Also, it allocates temporary storage half the size of `self`, but for short slices a
+ /// non-allocating insertion sort is used instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [5, 4, 1, 3, 2];
+ /// v.sort_by(|a, b| a.cmp(b));
+ /// assert!(v == [1, 2, 3, 4, 5]);
+ ///
+ /// // reverse sorting
+ /// v.sort_by(|a, b| b.cmp(a));
+ /// assert!(v == [5, 4, 3, 2, 1]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn sort_by<F>(&mut self, mut compare: F)
+ where
+ F: FnMut(&T, &T) -> Ordering,
+ {
+ merge_sort(self, |a, b| compare(a, b) == Less);
+ }
+
+ /// Sorts the slice with a key extraction function.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* \* log(*n*))
+ /// worst-case, where the key function is *O*(*m*).
+ ///
+ /// For expensive key functions (e.g. functions that are not simple property accesses or
+ /// basic operations), [`sort_by_cached_key`](slice::sort_by_cached_key) is likely to be
+ /// significantly faster, as it does not recompute element keys.
+ ///
+ /// When applicable, unstable sorting is preferred because it is generally faster than stable
+ /// sorting and it doesn't allocate auxiliary memory.
+ /// See [`sort_unstable_by_key`](slice::sort_unstable_by_key).
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is an adaptive, iterative merge sort inspired by
+ /// [timsort](https://en.wikipedia.org/wiki/Timsort).
+ /// It is designed to be very fast in cases where the slice is nearly sorted, or consists of
+ /// two or more sorted sequences concatenated one after another.
+ ///
+ /// Also, it allocates temporary storage half the size of `self`, but for short slices a
+ /// non-allocating insertion sort is used instead.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 1, -3, 2];
+ ///
+ /// v.sort_by_key(|k| k.abs());
+ /// assert!(v == [1, 2, -3, 4, -5]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "slice_sort_by_key", since = "1.7.0")]
+ #[inline]
+ pub fn sort_by_key<K, F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> K,
+ K: Ord,
+ {
+ merge_sort(self, |a, b| f(a).lt(&f(b)));
+ }
+
+ /// Sorts the slice with a key extraction function.
+ ///
+ /// During sorting, the key function is called at most once per element, by using
+ /// temporary storage to remember the results of key evaluation.
+ /// The order of calls to the key function is unspecified and may change in future versions
+ /// of the standard library.
+ ///
+ /// This sort is stable (i.e., does not reorder equal elements) and *O*(*m* \* *n* + *n* \* log(*n*))
+ /// worst-case, where the key function is *O*(*m*).
+ ///
+ /// For simple key functions (e.g., functions that are property accesses or
+ /// basic operations), [`sort_by_key`](slice::sort_by_key) is likely to be
+ /// faster.
+ ///
+ /// # Current implementation
+ ///
+ /// The current algorithm is based on [pattern-defeating quicksort][pdqsort] by Orson Peters,
+ /// which combines the fast average case of randomized quicksort with the fast worst case of
+ /// heapsort, while achieving linear time on slices with certain patterns. It uses some
+ /// randomization to avoid degenerate cases, but with a fixed seed to always provide
+ /// deterministic behavior.
+ ///
+ /// In the worst case, the algorithm allocates temporary storage in a `Vec<(K, usize)>` the
+ /// length of the slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = [-5i32, 4, 32, -3, 2];
+ ///
+ /// v.sort_by_cached_key(|k| k.to_string());
+ /// assert!(v == [-3, -5, 2, 32, 4]);
+ /// ```
+ ///
+ /// [pdqsort]: https://github.com/orlp/pdqsort
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "slice_sort_by_cached_key", since = "1.34.0")]
+ #[inline]
+ pub fn sort_by_cached_key<K, F>(&mut self, f: F)
+ where
+ F: FnMut(&T) -> K,
+ K: Ord,
+ {
+ // Helper macro for indexing our vector by the smallest possible type, to reduce allocation.
+ macro_rules! sort_by_key {
+ ($t:ty, $slice:ident, $f:ident) => {{
+ let mut indices: Vec<_> =
+ $slice.iter().map($f).enumerate().map(|(i, k)| (k, i as $t)).collect();
+ // The elements of `indices` are unique, as they are indexed, so any sort will be
+ // stable with respect to the original slice. We use `sort_unstable` here because
+ // it requires less memory allocation.
+ indices.sort_unstable();
+ for i in 0..$slice.len() {
+ let mut index = indices[i].1;
+ while (index as usize) < i {
+ index = indices[index as usize].1;
+ }
+ indices[i].1 = index;
+ $slice.swap(i, index as usize);
+ }
+ }};
+ }
+
+ let sz_u8 = mem::size_of::<(K, u8)>();
+ let sz_u16 = mem::size_of::<(K, u16)>();
+ let sz_u32 = mem::size_of::<(K, u32)>();
+ let sz_usize = mem::size_of::<(K, usize)>();
+
+ let len = self.len();
+ if len < 2 {
+ return;
+ }
+ if sz_u8 < sz_u16 && len <= (u8::MAX as usize) {
+ return sort_by_key!(u8, self, f);
+ }
+ if sz_u16 < sz_u32 && len <= (u16::MAX as usize) {
+ return sort_by_key!(u16, self, f);
+ }
+ if sz_u32 < sz_usize && len <= (u32::MAX as usize) {
+ return sort_by_key!(u32, self, f);
+ }
+ sort_by_key!(usize, self, f)
+ }
+
+ /// Copies `self` into a new `Vec`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = [10, 40, 30];
+ /// let x = s.to_vec();
+ /// // Here, `s` and `x` can be modified independently.
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[rustc_conversion_suggestion]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn to_vec(&self) -> Vec<T>
+ where
+ T: Clone,
+ {
+ self.to_vec_in(Global)
+ }
+
+ /// Copies `self` into a new `Vec` with an allocator.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let s = [10, 40, 30];
+ /// let x = s.to_vec_in(System);
+ /// // Here, `s` and `x` can be modified independently.
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn to_vec_in<A: Allocator>(&self, alloc: A) -> Vec<T, A>
+ where
+ T: Clone,
+ {
+ // N.B., see the `hack` module in this file for more details.
+ hack::to_vec(self, alloc)
+ }
+
+ /// Converts `self` into a vector without clones or allocation.
+ ///
+ /// The resulting vector can be converted back into a box via
+ /// `Vec<T>`'s `into_boxed_slice` method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s: Box<[i32]> = Box::new([10, 40, 30]);
+ /// let x = s.into_vec();
+ /// // `s` cannot be used anymore because it has been converted into `x`.
+ ///
+ /// assert_eq!(x, vec![10, 40, 30]);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn into_vec<A: Allocator>(self: Box<Self, A>) -> Vec<T, A> {
+ // N.B., see the `hack` module in this file for more details.
+ hack::into_vec(self)
+ }
+
+ /// Creates a vector by repeating a slice `n` times.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the capacity would overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!([1, 2].repeat(3), vec![1, 2, 1, 2, 1, 2]);
+ /// ```
+ ///
+ /// A panic upon overflow:
+ ///
+ /// ```should_panic
+ /// // this will panic at runtime
+ /// b"0123456789abcdef".repeat(usize::MAX);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "repeat_generic_slice", since = "1.40.0")]
+ pub fn repeat(&self, n: usize) -> Vec<T>
+ where
+ T: Copy,
+ {
+ if n == 0 {
+ return Vec::new();
+ }
+
+ // If `n` is larger than zero, it can be split as
+ // `n = 2^expn + rem (2^expn > rem, expn >= 0, rem >= 0)`.
+ // `2^expn` is the number represented by the leftmost '1' bit of `n`,
+ // and `rem` is the remaining part of `n`.
+
+ // Using `Vec` to access `set_len()`.
+ let capacity = self.len().checked_mul(n).expect("capacity overflow");
+ let mut buf = Vec::with_capacity(capacity);
+
+ // `2^expn` repetition is done by doubling `buf` `expn`-times.
+ buf.extend(self);
+ {
+ let mut m = n >> 1;
+ // If `m > 0`, there are remaining bits up to the leftmost '1'.
+ while m > 0 {
+ // `buf.extend(buf)`:
+ unsafe {
+ ptr::copy_nonoverlapping(
+ buf.as_ptr(),
+ (buf.as_mut_ptr() as *mut T).add(buf.len()),
+ buf.len(),
+ );
+ // `buf` has capacity of `self.len() * n`.
+ let buf_len = buf.len();
+ buf.set_len(buf_len * 2);
+ }
+
+ m >>= 1;
+ }
+ }
+
+ // `rem` (`= n - 2^expn`) repetition is done by copying
+ // first `rem` repetitions from `buf` itself.
+ let rem_len = capacity - buf.len(); // `self.len() * rem`
+ if rem_len > 0 {
+ // `buf.extend(buf[0 .. rem_len])`:
+ unsafe {
+ // This is non-overlapping since `2^expn > rem`.
+ ptr::copy_nonoverlapping(
+ buf.as_ptr(),
+ (buf.as_mut_ptr() as *mut T).add(buf.len()),
+ rem_len,
+ );
+ // `buf.len() + rem_len` equals to `buf.capacity()` (`= self.len() * n`).
+ buf.set_len(capacity);
+ }
+ }
+ buf
+ }
+
+ /// Flattens a slice of `T` into a single value `Self::Output`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(["hello", "world"].concat(), "helloworld");
+ /// assert_eq!([[1, 2], [3, 4]].concat(), [1, 2, 3, 4]);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn concat<Item: ?Sized>(&self) -> <Self as Concat<Item>>::Output
+ where
+ Self: Concat<Item>,
+ {
+ Concat::concat(self)
+ }
+
+ /// Flattens a slice of `T` into a single value `Self::Output`, placing a
+ /// given separator between each.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(["hello", "world"].join(" "), "hello world");
+ /// assert_eq!([[1, 2], [3, 4]].join(&0), [1, 2, 0, 3, 4]);
+ /// assert_eq!([[1, 2], [3, 4]].join(&[0, 0][..]), [1, 2, 0, 0, 3, 4]);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rename_connect_to_join", since = "1.3.0")]
+ pub fn join<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
+ where
+ Self: Join<Separator>,
+ {
+ Join::join(self, sep)
+ }
+
+ /// Flattens a slice of `T` into a single value `Self::Output`, placing a
+ /// given separator between each.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(deprecated)]
+ /// assert_eq!(["hello", "world"].connect(" "), "hello world");
+ /// assert_eq!([[1, 2], [3, 4]].connect(&0), [1, 2, 0, 3, 4]);
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[deprecated(since = "1.3.0", note = "renamed to join")]
+ pub fn connect<Separator>(&self, sep: Separator) -> <Self as Join<Separator>>::Output
+ where
+ Self: Join<Separator>,
+ {
+ Join::join(self, sep)
+ }
+}
+
+#[cfg(not(test))]
+impl [u8] {
+ /// Returns a vector containing a copy of this slice where each byte
+ /// is mapped to its ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`make_ascii_uppercase`].
+ ///
+ /// [`make_ascii_uppercase`]: slice::make_ascii_uppercase
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the uppercase bytes as a new Vec, \
+ without modifying the original"]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_uppercase(&self) -> Vec<u8> {
+ let mut me = self.to_vec();
+ me.make_ascii_uppercase();
+ me
+ }
+
+ /// Returns a vector containing a copy of this slice where each byte
+ /// is mapped to its ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`make_ascii_lowercase`].
+ ///
+ /// [`make_ascii_lowercase`]: slice::make_ascii_lowercase
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the lowercase bytes as a new Vec, \
+ without modifying the original"]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_lowercase(&self) -> Vec<u8> {
+ let mut me = self.to_vec();
+ me.make_ascii_lowercase();
+ me
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Extension traits for slices over specific kinds of data
+////////////////////////////////////////////////////////////////////////////////
+
+/// Helper trait for [`[T]::concat`](slice::concat).
+///
+/// Note: the `Item` type parameter is not used in this trait,
+/// but it allows impls to be more generic.
+/// Without it, we get this error:
+///
+/// ```error
+/// error[E0207]: the type parameter `T` is not constrained by the impl trait, self type, or predica
+/// --> src/liballoc/slice.rs:608:6
+/// |
+/// 608 | impl<T: Clone, V: Borrow<[T]>> Concat for [V] {
+/// | ^ unconstrained type parameter
+/// ```
+///
+/// This is because there could exist `V` types with multiple `Borrow<[_]>` impls,
+/// such that multiple `T` types would apply:
+///
+/// ```
+/// # #[allow(dead_code)]
+/// pub struct Foo(Vec<u32>, Vec<String>);
+///
+/// impl std::borrow::Borrow<[u32]> for Foo {
+/// fn borrow(&self) -> &[u32] { &self.0 }
+/// }
+///
+/// impl std::borrow::Borrow<[String]> for Foo {
+/// fn borrow(&self) -> &[String] { &self.1 }
+/// }
+/// ```
+#[unstable(feature = "slice_concat_trait", issue = "27747")]
+pub trait Concat<Item: ?Sized> {
+ #[unstable(feature = "slice_concat_trait", issue = "27747")]
+ /// The resulting type after concatenation
+ type Output;
+
+ /// Implementation of [`[T]::concat`](slice::concat)
+ #[unstable(feature = "slice_concat_trait", issue = "27747")]
+ fn concat(slice: &Self) -> Self::Output;
+}
+
+/// Helper trait for [`[T]::join`](slice::join)
+#[unstable(feature = "slice_concat_trait", issue = "27747")]
+pub trait Join<Separator> {
+ #[unstable(feature = "slice_concat_trait", issue = "27747")]
+ /// The resulting type after concatenation
+ type Output;
+
+ /// Implementation of [`[T]::join`](slice::join)
+ #[unstable(feature = "slice_concat_trait", issue = "27747")]
+ fn join(slice: &Self, sep: Separator) -> Self::Output;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<T: Clone, V: Borrow<[T]>> Concat<T> for [V] {
+ type Output = Vec<T>;
+
+ fn concat(slice: &Self) -> Vec<T> {
+ let size = slice.iter().map(|slice| slice.borrow().len()).sum();
+ let mut result = Vec::with_capacity(size);
+ for v in slice {
+ result.extend_from_slice(v.borrow())
+ }
+ result
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<T: Clone, V: Borrow<[T]>> Join<&T> for [V] {
+ type Output = Vec<T>;
+
+ fn join(slice: &Self, sep: &T) -> Vec<T> {
+ let mut iter = slice.iter();
+ let first = match iter.next() {
+ Some(first) => first,
+ None => return vec![],
+ };
+ let size = slice.iter().map(|v| v.borrow().len()).sum::<usize>() + slice.len() - 1;
+ let mut result = Vec::with_capacity(size);
+ result.extend_from_slice(first.borrow());
+
+ for v in iter {
+ result.push(sep.clone());
+ result.extend_from_slice(v.borrow())
+ }
+ result
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<T: Clone, V: Borrow<[T]>> Join<&[T]> for [V] {
+ type Output = Vec<T>;
+
+ fn join(slice: &Self, sep: &[T]) -> Vec<T> {
+ let mut iter = slice.iter();
+ let first = match iter.next() {
+ Some(first) => first,
+ None => return vec![],
+ };
+ let size =
+ slice.iter().map(|v| v.borrow().len()).sum::<usize>() + sep.len() * (slice.len() - 1);
+ let mut result = Vec::with_capacity(size);
+ result.extend_from_slice(first.borrow());
+
+ for v in iter {
+ result.extend_from_slice(sep);
+ result.extend_from_slice(v.borrow())
+ }
+ result
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Standard trait implementations for slices
+////////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Borrow<[T]> for Vec<T, A> {
+ fn borrow(&self) -> &[T] {
+ &self[..]
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> BorrowMut<[T]> for Vec<T, A> {
+ fn borrow_mut(&mut self) -> &mut [T] {
+ &mut self[..]
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> ToOwned for [T] {
+ type Owned = Vec<T>;
+ #[cfg(not(test))]
+ fn to_owned(&self) -> Vec<T> {
+ self.to_vec()
+ }
+
+ #[cfg(test)]
+ fn to_owned(&self) -> Vec<T> {
+ hack::to_vec(self, Global)
+ }
+
+ fn clone_into(&self, target: &mut Vec<T>) {
+ // drop anything in target that will not be overwritten
+ target.truncate(self.len());
+
+ // target.len <= self.len due to the truncate above, so the
+ // slices here are always in-bounds.
+ let (init, tail) = self.split_at(target.len());
+
+ // reuse the contained values' allocations/resources.
+ target.clone_from_slice(init);
+ target.extend_from_slice(tail);
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Sorting
+////////////////////////////////////////////////////////////////////////////////
+
+/// Inserts `v[0]` into pre-sorted sequence `v[1..]` so that whole `v[..]` becomes sorted.
+///
+/// This is the integral subroutine of insertion sort.
+#[cfg(not(no_global_oom_handling))]
+fn insert_head<T, F>(v: &mut [T], is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ if v.len() >= 2 && is_less(&v[1], &v[0]) {
+ unsafe {
+ // There are three ways to implement insertion here:
+ //
+ // 1. Swap adjacent elements until the first one gets to its final destination.
+ // However, this way we copy data around more than is necessary. If elements are big
+ // structures (costly to copy), this method will be slow.
+ //
+ // 2. Iterate until the right place for the first element is found. Then shift the
+ // elements succeeding it to make room for it and finally place it into the
+ // remaining hole. This is a good method.
+ //
+ // 3. Copy the first element into a temporary variable. Iterate until the right place
+ // for it is found. As we go along, copy every traversed element into the slot
+ // preceding it. Finally, copy data from the temporary variable into the remaining
+ // hole. This method is very good. Benchmarks demonstrated slightly better
+ // performance than with the 2nd method.
+ //
+ // All methods were benchmarked, and the 3rd showed best results. So we chose that one.
+ let tmp = mem::ManuallyDrop::new(ptr::read(&v[0]));
+
+ // Intermediate state of the insertion process is always tracked by `hole`, which
+ // serves two purposes:
+ // 1. Protects integrity of `v` from panics in `is_less`.
+ // 2. Fills the remaining hole in `v` in the end.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the process, `hole` will get dropped and
+ // fill the hole in `v` with `tmp`, thus ensuring that `v` still holds every object it
+ // initially held exactly once.
+ let mut hole = InsertionHole { src: &*tmp, dest: &mut v[1] };
+ ptr::copy_nonoverlapping(&v[1], &mut v[0], 1);
+
+ for i in 2..v.len() {
+ if !is_less(&v[i], &*tmp) {
+ break;
+ }
+ ptr::copy_nonoverlapping(&v[i], &mut v[i - 1], 1);
+ hole.dest = &mut v[i];
+ }
+ // `hole` gets dropped and thus copies `tmp` into the remaining hole in `v`.
+ }
+ }
+
+ // When dropped, copies from `src` into `dest`.
+ struct InsertionHole<T> {
+ src: *const T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for InsertionHole<T> {
+ fn drop(&mut self) {
+ unsafe {
+ ptr::copy_nonoverlapping(self.src, self.dest, 1);
+ }
+ }
+ }
+}
+
+/// Merges non-decreasing runs `v[..mid]` and `v[mid..]` using `buf` as temporary storage, and
+/// stores the result into `v[..]`.
+///
+/// # Safety
+///
+/// The two slices must be non-empty and `mid` must be in bounds. Buffer `buf` must be long enough
+/// to hold a copy of the shorter slice. Also, `T` must not be a zero-sized type.
+#[cfg(not(no_global_oom_handling))]
+unsafe fn merge<T, F>(v: &mut [T], mid: usize, buf: *mut T, is_less: &mut F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ let len = v.len();
+ let v = v.as_mut_ptr();
+ let (v_mid, v_end) = unsafe { (v.add(mid), v.add(len)) };
+
+ // The merge process first copies the shorter run into `buf`. Then it traces the newly copied
+ // run and the longer run forwards (or backwards), comparing their next unconsumed elements and
+ // copying the lesser (or greater) one into `v`.
+ //
+ // As soon as the shorter run is fully consumed, the process is done. If the longer run gets
+ // consumed first, then we must copy whatever is left of the shorter run into the remaining
+ // hole in `v`.
+ //
+ // Intermediate state of the process is always tracked by `hole`, which serves two purposes:
+ // 1. Protects integrity of `v` from panics in `is_less`.
+ // 2. Fills the remaining hole in `v` if the longer run gets consumed first.
+ //
+ // Panic safety:
+ //
+ // If `is_less` panics at any point during the process, `hole` will get dropped and fill the
+ // hole in `v` with the unconsumed range in `buf`, thus ensuring that `v` still holds every
+ // object it initially held exactly once.
+ let mut hole;
+
+ if mid <= len - mid {
+ // The left run is shorter.
+ unsafe {
+ ptr::copy_nonoverlapping(v, buf, mid);
+ hole = MergeHole { start: buf, end: buf.add(mid), dest: v };
+ }
+
+ // Initially, these pointers point to the beginnings of their arrays.
+ let left = &mut hole.start;
+ let mut right = v_mid;
+ let out = &mut hole.dest;
+
+ while *left < hole.end && right < v_end {
+ // Consume the lesser side.
+ // If equal, prefer the left run to maintain stability.
+ unsafe {
+ let to_copy = if is_less(&*right, &**left) {
+ get_and_increment(&mut right)
+ } else {
+ get_and_increment(left)
+ };
+ ptr::copy_nonoverlapping(to_copy, get_and_increment(out), 1);
+ }
+ }
+ } else {
+ // The right run is shorter.
+ unsafe {
+ ptr::copy_nonoverlapping(v_mid, buf, len - mid);
+ hole = MergeHole { start: buf, end: buf.add(len - mid), dest: v_mid };
+ }
+
+ // Initially, these pointers point past the ends of their arrays.
+ let left = &mut hole.dest;
+ let right = &mut hole.end;
+ let mut out = v_end;
+
+ while v < *left && buf < *right {
+ // Consume the greater side.
+ // If equal, prefer the right run to maintain stability.
+ unsafe {
+ let to_copy = if is_less(&*right.offset(-1), &*left.offset(-1)) {
+ decrement_and_get(left)
+ } else {
+ decrement_and_get(right)
+ };
+ ptr::copy_nonoverlapping(to_copy, decrement_and_get(&mut out), 1);
+ }
+ }
+ }
+ // Finally, `hole` gets dropped. If the shorter run was not fully consumed, whatever remains of
+ // it will now be copied into the hole in `v`.
+
+ unsafe fn get_and_increment<T>(ptr: &mut *mut T) -> *mut T {
+ let old = *ptr;
+ *ptr = unsafe { ptr.offset(1) };
+ old
+ }
+
+ unsafe fn decrement_and_get<T>(ptr: &mut *mut T) -> *mut T {
+ *ptr = unsafe { ptr.offset(-1) };
+ *ptr
+ }
+
+ // When dropped, copies the range `start..end` into `dest..`.
+ struct MergeHole<T> {
+ start: *mut T,
+ end: *mut T,
+ dest: *mut T,
+ }
+
+ impl<T> Drop for MergeHole<T> {
+ fn drop(&mut self) {
+ // `T` is not a zero-sized type, and these are pointers into a slice's elements.
+ unsafe {
+ let len = self.end.sub_ptr(self.start);
+ ptr::copy_nonoverlapping(self.start, self.dest, len);
+ }
+ }
+ }
+}
+
+/// This merge sort borrows some (but not all) ideas from TimSort, which is described in detail
+/// [here](https://github.com/python/cpython/blob/main/Objects/listsort.txt).
+///
+/// The algorithm identifies strictly descending and non-descending subsequences, which are called
+/// natural runs. There is a stack of pending runs yet to be merged. Each newly found run is pushed
+/// onto the stack, and then some pairs of adjacent runs are merged until these two invariants are
+/// satisfied:
+///
+/// 1. for every `i` in `1..runs.len()`: `runs[i - 1].len > runs[i].len`
+/// 2. for every `i` in `2..runs.len()`: `runs[i - 2].len > runs[i - 1].len + runs[i].len`
+///
+/// The invariants ensure that the total running time is *O*(*n* \* log(*n*)) worst-case.
+#[cfg(not(no_global_oom_handling))]
+fn merge_sort<T, F>(v: &mut [T], mut is_less: F)
+where
+ F: FnMut(&T, &T) -> bool,
+{
+ // Slices of up to this length get sorted using insertion sort.
+ const MAX_INSERTION: usize = 20;
+ // Very short runs are extended using insertion sort to span at least this many elements.
+ const MIN_RUN: usize = 10;
+
+ // Sorting has no meaningful behavior on zero-sized types.
+ if size_of::<T>() == 0 {
+ return;
+ }
+
+ let len = v.len();
+
+ // Short arrays get sorted in-place via insertion sort to avoid allocations.
+ if len <= MAX_INSERTION {
+ if len >= 2 {
+ for i in (0..len - 1).rev() {
+ insert_head(&mut v[i..], &mut is_less);
+ }
+ }
+ return;
+ }
+
+ // Allocate a buffer to use as scratch memory. We keep the length 0 so we can keep in it
+ // shallow copies of the contents of `v` without risking the dtors running on copies if
+ // `is_less` panics. When merging two sorted runs, this buffer holds a copy of the shorter run,
+ // which will always have length at most `len / 2`.
+ let mut buf = Vec::with_capacity(len / 2);
+
+ // In order to identify natural runs in `v`, we traverse it backwards. That might seem like a
+ // strange decision, but consider the fact that merges more often go in the opposite direction
+ // (forwards). According to benchmarks, merging forwards is slightly faster than merging
+ // backwards. To conclude, identifying runs by traversing backwards improves performance.
+ let mut runs = vec![];
+ let mut end = len;
+ while end > 0 {
+ // Find the next natural run, and reverse it if it's strictly descending.
+ let mut start = end - 1;
+ if start > 0 {
+ start -= 1;
+ unsafe {
+ if is_less(v.get_unchecked(start + 1), v.get_unchecked(start)) {
+ while start > 0 && is_less(v.get_unchecked(start), v.get_unchecked(start - 1)) {
+ start -= 1;
+ }
+ v[start..end].reverse();
+ } else {
+ while start > 0 && !is_less(v.get_unchecked(start), v.get_unchecked(start - 1))
+ {
+ start -= 1;
+ }
+ }
+ }
+ }
+
+ // Insert some more elements into the run if it's too short. Insertion sort is faster than
+ // merge sort on short sequences, so this significantly improves performance.
+ while start > 0 && end - start < MIN_RUN {
+ start -= 1;
+ insert_head(&mut v[start..end], &mut is_less);
+ }
+
+ // Push this run onto the stack.
+ runs.push(Run { start, len: end - start });
+ end = start;
+
+ // Merge some pairs of adjacent runs to satisfy the invariants.
+ while let Some(r) = collapse(&runs) {
+ let left = runs[r + 1];
+ let right = runs[r];
+ unsafe {
+ merge(
+ &mut v[left.start..right.start + right.len],
+ left.len,
+ buf.as_mut_ptr(),
+ &mut is_less,
+ );
+ }
+ runs[r] = Run { start: left.start, len: left.len + right.len };
+ runs.remove(r + 1);
+ }
+ }
+
+ // Finally, exactly one run must remain in the stack.
+ debug_assert!(runs.len() == 1 && runs[0].start == 0 && runs[0].len == len);
+
+ // Examines the stack of runs and identifies the next pair of runs to merge. More specifically,
+ // if `Some(r)` is returned, that means `runs[r]` and `runs[r + 1]` must be merged next. If the
+ // algorithm should continue building a new run instead, `None` is returned.
+ //
+ // TimSort is infamous for its buggy implementations, as described here:
+ // http://envisage-project.eu/timsort-specification-and-verification/
+ //
+ // The gist of the story is: we must enforce the invariants on the top four runs on the stack.
+ // Enforcing them on just top three is not sufficient to ensure that the invariants will still
+ // hold for *all* runs in the stack.
+ //
+ // This function correctly checks invariants for the top four runs. Additionally, if the top
+ // run starts at index 0, it will always demand a merge operation until the stack is fully
+ // collapsed, in order to complete the sort.
+ #[inline]
+ fn collapse(runs: &[Run]) -> Option<usize> {
+ let n = runs.len();
+ if n >= 2
+ && (runs[n - 1].start == 0
+ || runs[n - 2].len <= runs[n - 1].len
+ || (n >= 3 && runs[n - 3].len <= runs[n - 2].len + runs[n - 1].len)
+ || (n >= 4 && runs[n - 4].len <= runs[n - 3].len + runs[n - 2].len))
+ {
+ if n >= 3 && runs[n - 3].len < runs[n - 1].len { Some(n - 3) } else { Some(n - 2) }
+ } else {
+ None
+ }
+ }
+
+ #[derive(Clone, Copy)]
+ struct Run {
+ start: usize,
+ len: usize,
+ }
+}
diff --git a/library/alloc/src/str.rs b/library/alloc/src/str.rs
new file mode 100644
index 000000000..d5ed2c4ad
--- /dev/null
+++ b/library/alloc/src/str.rs
@@ -0,0 +1,686 @@
+//! Unicode string slices.
+//!
+//! *[See also the `str` primitive type](str).*
+//!
+//! The `&str` type is one of the two main string types, the other being `String`.
+//! Unlike its `String` counterpart, its contents are borrowed.
+//!
+//! # Basic Usage
+//!
+//! A basic string declaration of `&str` type:
+//!
+//! ```
+//! let hello_world = "Hello, World!";
+//! ```
+//!
+//! Here we have declared a string literal, also known as a string slice.
+//! String literals have a static lifetime, which means the string `hello_world`
+//! is guaranteed to be valid for the duration of the entire program.
+//! We can explicitly specify `hello_world`'s lifetime as well:
+//!
+//! ```
+//! let hello_world: &'static str = "Hello, world!";
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+// Many of the usings in this module are only used in the test configuration.
+// It's cleaner to just turn off the unused_imports warning than to fix them.
+#![allow(unused_imports)]
+
+use core::borrow::{Borrow, BorrowMut};
+use core::iter::FusedIterator;
+use core::mem;
+use core::ptr;
+use core::str::pattern::{DoubleEndedSearcher, Pattern, ReverseSearcher, Searcher};
+use core::unicode::conversions;
+
+use crate::borrow::ToOwned;
+use crate::boxed::Box;
+use crate::slice::{Concat, Join, SliceIndex};
+use crate::string::String;
+use crate::vec::Vec;
+
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::pattern;
+#[stable(feature = "encode_utf16", since = "1.8.0")]
+pub use core::str::EncodeUtf16;
+#[stable(feature = "split_ascii_whitespace", since = "1.34.0")]
+pub use core::str::SplitAsciiWhitespace;
+#[stable(feature = "split_inclusive", since = "1.51.0")]
+pub use core::str::SplitInclusive;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::SplitWhitespace;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{from_utf8, from_utf8_mut, Bytes, CharIndices, Chars};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{from_utf8_unchecked, from_utf8_unchecked_mut, ParseBoolError};
+#[stable(feature = "str_escape", since = "1.34.0")]
+pub use core::str::{EscapeDebug, EscapeDefault, EscapeUnicode};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{FromStr, Utf8Error};
+#[allow(deprecated)]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{Lines, LinesAny};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{MatchIndices, RMatchIndices};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{Matches, RMatches};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{RSplit, Split};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{RSplitN, SplitN};
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use core::str::{RSplitTerminator, SplitTerminator};
+
+/// Note: `str` in `Concat<str>` is not meaningful here.
+/// This type parameter of the trait only exists to enable another impl.
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<S: Borrow<str>> Concat<str> for [S] {
+ type Output = String;
+
+ fn concat(slice: &Self) -> String {
+ Join::join(slice, "")
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "slice_concat_ext", issue = "27747")]
+impl<S: Borrow<str>> Join<&str> for [S] {
+ type Output = String;
+
+ fn join(slice: &Self, sep: &str) -> String {
+ unsafe { String::from_utf8_unchecked(join_generic_copy(slice, sep.as_bytes())) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+macro_rules! specialize_for_lengths {
+ ($separator:expr, $target:expr, $iter:expr; $($num:expr),*) => {{
+ let mut target = $target;
+ let iter = $iter;
+ let sep_bytes = $separator;
+ match $separator.len() {
+ $(
+ // loops with hardcoded sizes run much faster
+ // specialize the cases with small separator lengths
+ $num => {
+ for s in iter {
+ copy_slice_and_advance!(target, sep_bytes);
+ let content_bytes = s.borrow().as_ref();
+ copy_slice_and_advance!(target, content_bytes);
+ }
+ },
+ )*
+ _ => {
+ // arbitrary non-zero size fallback
+ for s in iter {
+ copy_slice_and_advance!(target, sep_bytes);
+ let content_bytes = s.borrow().as_ref();
+ copy_slice_and_advance!(target, content_bytes);
+ }
+ }
+ }
+ target
+ }}
+}
+
+#[cfg(not(no_global_oom_handling))]
+macro_rules! copy_slice_and_advance {
+ ($target:expr, $bytes:expr) => {
+ let len = $bytes.len();
+ let (head, tail) = { $target }.split_at_mut(len);
+ head.copy_from_slice($bytes);
+ $target = tail;
+ };
+}
+
+// Optimized join implementation that works for both Vec<T> (T: Copy) and String's inner vec
+// Currently (2018-05-13) there is a bug with type inference and specialization (see issue #36262)
+// For this reason SliceConcat<T> is not specialized for T: Copy and SliceConcat<str> is the
+// only user of this function. It is left in place for the time when that is fixed.
+//
+// the bounds for String-join are S: Borrow<str> and for Vec-join Borrow<[T]>
+// [T] and str both impl AsRef<[T]> for some T
+// => s.borrow().as_ref() and we always have slices
+#[cfg(not(no_global_oom_handling))]
+fn join_generic_copy<B, T, S>(slice: &[S], sep: &[T]) -> Vec<T>
+where
+ T: Copy,
+ B: AsRef<[T]> + ?Sized,
+ S: Borrow<B>,
+{
+ let sep_len = sep.len();
+ let mut iter = slice.iter();
+
+ // the first slice is the only one without a separator preceding it
+ let first = match iter.next() {
+ Some(first) => first,
+ None => return vec![],
+ };
+
+ // compute the exact total length of the joined Vec
+ // if the `len` calculation overflows, we'll panic
+ // we would have run out of memory anyway and the rest of the function requires
+ // the entire Vec pre-allocated for safety
+ let reserved_len = sep_len
+ .checked_mul(iter.len())
+ .and_then(|n| {
+ slice.iter().map(|s| s.borrow().as_ref().len()).try_fold(n, usize::checked_add)
+ })
+ .expect("attempt to join into collection with len > usize::MAX");
+
+ // prepare an uninitialized buffer
+ let mut result = Vec::with_capacity(reserved_len);
+ debug_assert!(result.capacity() >= reserved_len);
+
+ result.extend_from_slice(first.borrow().as_ref());
+
+ unsafe {
+ let pos = result.len();
+ let target = result.spare_capacity_mut().get_unchecked_mut(..reserved_len - pos);
+
+ // Convert the separator and slices to slices of MaybeUninit
+ // to simplify implementation in specialize_for_lengths
+ let sep_uninit = core::slice::from_raw_parts(sep.as_ptr().cast(), sep.len());
+ let iter_uninit = iter.map(|it| {
+ let it = it.borrow().as_ref();
+ core::slice::from_raw_parts(it.as_ptr().cast(), it.len())
+ });
+
+ // copy separator and slices over without bounds checks
+ // generate loops with hardcoded offsets for small separators
+ // massive improvements possible (~ x2)
+ let remain = specialize_for_lengths!(sep_uninit, target, iter_uninit; 0, 1, 2, 3, 4);
+
+ // A weird borrow implementation may return different
+ // slices for the length calculation and the actual copy.
+ // Make sure we don't expose uninitialized bytes to the caller.
+ let result_len = reserved_len - remain.len();
+ result.set_len(result_len);
+ }
+ result
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Borrow<str> for String {
+ #[inline]
+ fn borrow(&self) -> &str {
+ &self[..]
+ }
+}
+
+#[stable(feature = "string_borrow_mut", since = "1.36.0")]
+impl BorrowMut<str> for String {
+ #[inline]
+ fn borrow_mut(&mut self) -> &mut str {
+ &mut self[..]
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ToOwned for str {
+ type Owned = String;
+ #[inline]
+ fn to_owned(&self) -> String {
+ unsafe { String::from_utf8_unchecked(self.as_bytes().to_owned()) }
+ }
+
+ fn clone_into(&self, target: &mut String) {
+ let mut b = mem::take(target).into_bytes();
+ self.as_bytes().clone_into(&mut b);
+ *target = unsafe { String::from_utf8_unchecked(b) }
+ }
+}
+
+/// Methods for string slices.
+#[cfg(not(test))]
+impl str {
+ /// Converts a `Box<str>` into a `Box<[u8]>` without copying or allocating.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "this is a string";
+ /// let boxed_str = s.to_owned().into_boxed_str();
+ /// let boxed_bytes = boxed_str.into_boxed_bytes();
+ /// assert_eq!(*boxed_bytes, *s.as_bytes());
+ /// ```
+ #[rustc_allow_incoherent_impl]
+ #[stable(feature = "str_box_extras", since = "1.20.0")]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub fn into_boxed_bytes(self: Box<str>) -> Box<[u8]> {
+ self.into()
+ }
+
+ /// Replaces all matches of a pattern with another string.
+ ///
+ /// `replace` creates a new [`String`], and copies the data from this string slice into it.
+ /// While doing so, it attempts to find matches of a pattern. If it finds any, it
+ /// replaces them with the replacement string slice.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "this is old";
+ ///
+ /// assert_eq!("this is new", s.replace("old", "new"));
+ /// assert_eq!("than an old", s.replace("is", "an"));
+ /// ```
+ ///
+ /// When the pattern doesn't match:
+ ///
+ /// ```
+ /// let s = "this is old";
+ /// assert_eq!(s, s.replace("cookie monster", "little lamb"));
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the replaced string as a new allocation, \
+ without modifying the original"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn replace<'a, P: Pattern<'a>>(&'a self, from: P, to: &str) -> String {
+ let mut result = String::new();
+ let mut last_end = 0;
+ for (start, part) in self.match_indices(from) {
+ result.push_str(unsafe { self.get_unchecked(last_end..start) });
+ result.push_str(to);
+ last_end = start + part.len();
+ }
+ result.push_str(unsafe { self.get_unchecked(last_end..self.len()) });
+ result
+ }
+
+ /// Replaces first N matches of a pattern with another string.
+ ///
+ /// `replacen` creates a new [`String`], and copies the data from this string slice into it.
+ /// While doing so, it attempts to find matches of a pattern. If it finds any, it
+ /// replaces them with the replacement string slice at most `count` times.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "foo foo 123 foo";
+ /// assert_eq!("new new 123 foo", s.replacen("foo", "new", 2));
+ /// assert_eq!("faa fao 123 foo", s.replacen('o', "a", 3));
+ /// assert_eq!("foo foo new23 foo", s.replacen(char::is_numeric, "new", 1));
+ /// ```
+ ///
+ /// When the pattern doesn't match:
+ ///
+ /// ```
+ /// let s = "this is old";
+ /// assert_eq!(s, s.replacen("cookie monster", "little lamb", 10));
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the replaced string as a new allocation, \
+ without modifying the original"]
+ #[stable(feature = "str_replacen", since = "1.16.0")]
+ pub fn replacen<'a, P: Pattern<'a>>(&'a self, pat: P, to: &str, count: usize) -> String {
+ // Hope to reduce the times of re-allocation
+ let mut result = String::with_capacity(32);
+ let mut last_end = 0;
+ for (start, part) in self.match_indices(pat).take(count) {
+ result.push_str(unsafe { self.get_unchecked(last_end..start) });
+ result.push_str(to);
+ last_end = start + part.len();
+ }
+ result.push_str(unsafe { self.get_unchecked(last_end..self.len()) });
+ result
+ }
+
+ /// Returns the lowercase equivalent of this string slice, as a new [`String`].
+ ///
+ /// 'Lowercase' is defined according to the terms of the Unicode Derived Core Property
+ /// `Lowercase`.
+ ///
+ /// Since some characters can expand into multiple characters when changing
+ /// the case, this function returns a [`String`] instead of modifying the
+ /// parameter in-place.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "HELLO";
+ ///
+ /// assert_eq!("hello", s.to_lowercase());
+ /// ```
+ ///
+ /// A tricky example, with sigma:
+ ///
+ /// ```
+ /// let sigma = "Σ";
+ ///
+ /// assert_eq!("σ", sigma.to_lowercase());
+ ///
+ /// // but at the end of a word, it's ς, not σ:
+ /// let odysseus = "ὈΔΥΣΣΕΎΣ";
+ ///
+ /// assert_eq!("ὀδυσσεύς", odysseus.to_lowercase());
+ /// ```
+ ///
+ /// Languages without case are not changed:
+ ///
+ /// ```
+ /// let new_year = "农历新年";
+ ///
+ /// assert_eq!(new_year, new_year.to_lowercase());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the lowercase string as a new String, \
+ without modifying the original"]
+ #[stable(feature = "unicode_case_mapping", since = "1.2.0")]
+ pub fn to_lowercase(&self) -> String {
+ let out = convert_while_ascii(self.as_bytes(), u8::to_ascii_lowercase);
+
+ // Safety: we know this is a valid char boundary since
+ // out.len() is only progressed if ascii bytes are found
+ let rest = unsafe { self.get_unchecked(out.len()..) };
+
+ // Safety: We have written only valid ASCII to our vec
+ let mut s = unsafe { String::from_utf8_unchecked(out) };
+
+ for (i, c) in rest[..].char_indices() {
+ if c == 'Σ' {
+ // Σ maps to σ, except at the end of a word where it maps to ς.
+ // This is the only conditional (contextual) but language-independent mapping
+ // in `SpecialCasing.txt`,
+ // so hard-code it rather than have a generic "condition" mechanism.
+ // See https://github.com/rust-lang/rust/issues/26035
+ map_uppercase_sigma(rest, i, &mut s)
+ } else {
+ match conversions::to_lower(c) {
+ [a, '\0', _] => s.push(a),
+ [a, b, '\0'] => {
+ s.push(a);
+ s.push(b);
+ }
+ [a, b, c] => {
+ s.push(a);
+ s.push(b);
+ s.push(c);
+ }
+ }
+ }
+ }
+ return s;
+
+ fn map_uppercase_sigma(from: &str, i: usize, to: &mut String) {
+ // See https://www.unicode.org/versions/Unicode7.0.0/ch03.pdf#G33992
+ // for the definition of `Final_Sigma`.
+ debug_assert!('Σ'.len_utf8() == 2);
+ let is_word_final = case_ignoreable_then_cased(from[..i].chars().rev())
+ && !case_ignoreable_then_cased(from[i + 2..].chars());
+ to.push_str(if is_word_final { "ς" } else { "σ" });
+ }
+
+ fn case_ignoreable_then_cased<I: Iterator<Item = char>>(iter: I) -> bool {
+ use core::unicode::{Case_Ignorable, Cased};
+ match iter.skip_while(|&c| Case_Ignorable(c)).next() {
+ Some(c) => Cased(c),
+ None => false,
+ }
+ }
+ }
+
+ /// Returns the uppercase equivalent of this string slice, as a new [`String`].
+ ///
+ /// 'Uppercase' is defined according to the terms of the Unicode Derived Core Property
+ /// `Uppercase`.
+ ///
+ /// Since some characters can expand into multiple characters when changing
+ /// the case, this function returns a [`String`] instead of modifying the
+ /// parameter in-place.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = "hello";
+ ///
+ /// assert_eq!("HELLO", s.to_uppercase());
+ /// ```
+ ///
+ /// Scripts without case are not changed:
+ ///
+ /// ```
+ /// let new_year = "农历新年";
+ ///
+ /// assert_eq!(new_year, new_year.to_uppercase());
+ /// ```
+ ///
+ /// One character can become multiple:
+ /// ```
+ /// let s = "tschüß";
+ ///
+ /// assert_eq!("TSCHÜSS", s.to_uppercase());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "this returns the uppercase string as a new String, \
+ without modifying the original"]
+ #[stable(feature = "unicode_case_mapping", since = "1.2.0")]
+ pub fn to_uppercase(&self) -> String {
+ let out = convert_while_ascii(self.as_bytes(), u8::to_ascii_uppercase);
+
+ // Safety: we know this is a valid char boundary since
+ // out.len() is only progressed if ascii bytes are found
+ let rest = unsafe { self.get_unchecked(out.len()..) };
+
+ // Safety: We have written only valid ASCII to our vec
+ let mut s = unsafe { String::from_utf8_unchecked(out) };
+
+ for c in rest.chars() {
+ match conversions::to_upper(c) {
+ [a, '\0', _] => s.push(a),
+ [a, b, '\0'] => {
+ s.push(a);
+ s.push(b);
+ }
+ [a, b, c] => {
+ s.push(a);
+ s.push(b);
+ s.push(c);
+ }
+ }
+ }
+ s
+ }
+
+ /// Converts a [`Box<str>`] into a [`String`] without copying or allocating.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let string = String::from("birthday gift");
+ /// let boxed_str = string.clone().into_boxed_str();
+ ///
+ /// assert_eq!(boxed_str.into_string(), string);
+ /// ```
+ #[stable(feature = "box_str", since = "1.4.0")]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub fn into_string(self: Box<str>) -> String {
+ let slice = Box::<[u8]>::from(self);
+ unsafe { String::from_utf8_unchecked(slice.into_vec()) }
+ }
+
+ /// Creates a new [`String`] by repeating a string `n` times.
+ ///
+ /// # Panics
+ ///
+ /// This function will panic if the capacity would overflow.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// assert_eq!("abc".repeat(4), String::from("abcabcabcabc"));
+ /// ```
+ ///
+ /// A panic upon overflow:
+ ///
+ /// ```should_panic
+ /// // this will panic at runtime
+ /// let huge = "0123456789abcdef".repeat(usize::MAX);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use]
+ #[stable(feature = "repeat_str", since = "1.16.0")]
+ pub fn repeat(&self, n: usize) -> String {
+ unsafe { String::from_utf8_unchecked(self.as_bytes().repeat(n)) }
+ }
+
+ /// Returns a copy of this string where each character is mapped to its
+ /// ASCII upper case equivalent.
+ ///
+ /// ASCII letters 'a' to 'z' are mapped to 'A' to 'Z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To uppercase the value in-place, use [`make_ascii_uppercase`].
+ ///
+ /// To uppercase ASCII characters in addition to non-ASCII characters, use
+ /// [`to_uppercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = "Grüße, Jürgen ❤";
+ ///
+ /// assert_eq!("GRüßE, JüRGEN ❤", s.to_ascii_uppercase());
+ /// ```
+ ///
+ /// [`make_ascii_uppercase`]: str::make_ascii_uppercase
+ /// [`to_uppercase`]: #method.to_uppercase
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "to uppercase the value in-place, use `make_ascii_uppercase()`"]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_uppercase(&self) -> String {
+ let mut bytes = self.as_bytes().to_vec();
+ bytes.make_ascii_uppercase();
+ // make_ascii_uppercase() preserves the UTF-8 invariant.
+ unsafe { String::from_utf8_unchecked(bytes) }
+ }
+
+ /// Returns a copy of this string where each character is mapped to its
+ /// ASCII lower case equivalent.
+ ///
+ /// ASCII letters 'A' to 'Z' are mapped to 'a' to 'z',
+ /// but non-ASCII letters are unchanged.
+ ///
+ /// To lowercase the value in-place, use [`make_ascii_lowercase`].
+ ///
+ /// To lowercase ASCII characters in addition to non-ASCII characters, use
+ /// [`to_lowercase`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let s = "Grüße, Jürgen ❤";
+ ///
+ /// assert_eq!("grüße, jürgen ❤", s.to_ascii_lowercase());
+ /// ```
+ ///
+ /// [`make_ascii_lowercase`]: str::make_ascii_lowercase
+ /// [`to_lowercase`]: #method.to_lowercase
+ #[cfg(not(no_global_oom_handling))]
+ #[rustc_allow_incoherent_impl]
+ #[must_use = "to lowercase the value in-place, use `make_ascii_lowercase()`"]
+ #[stable(feature = "ascii_methods_on_intrinsics", since = "1.23.0")]
+ #[inline]
+ pub fn to_ascii_lowercase(&self) -> String {
+ let mut bytes = self.as_bytes().to_vec();
+ bytes.make_ascii_lowercase();
+ // make_ascii_lowercase() preserves the UTF-8 invariant.
+ unsafe { String::from_utf8_unchecked(bytes) }
+ }
+}
+
+/// Converts a boxed slice of bytes to a boxed string slice without checking
+/// that the string contains valid UTF-8.
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// let smile_utf8 = Box::new([226, 152, 186]);
+/// let smile = unsafe { std::str::from_boxed_utf8_unchecked(smile_utf8) };
+///
+/// assert_eq!("☺", &*smile);
+/// ```
+#[stable(feature = "str_box_extras", since = "1.20.0")]
+#[must_use]
+#[inline]
+pub unsafe fn from_boxed_utf8_unchecked(v: Box<[u8]>) -> Box<str> {
+ unsafe { Box::from_raw(Box::into_raw(v) as *mut str) }
+}
+
+/// Converts the bytes while the bytes are still ascii.
+/// For better average performance, this is happens in chunks of `2*size_of::<usize>()`.
+/// Returns a vec with the converted bytes.
+#[inline]
+#[cfg(not(test))]
+#[cfg(not(no_global_oom_handling))]
+fn convert_while_ascii(b: &[u8], convert: fn(&u8) -> u8) -> Vec<u8> {
+ let mut out = Vec::with_capacity(b.len());
+
+ const USIZE_SIZE: usize = mem::size_of::<usize>();
+ const MAGIC_UNROLL: usize = 2;
+ const N: usize = USIZE_SIZE * MAGIC_UNROLL;
+ const NONASCII_MASK: usize = usize::from_ne_bytes([0x80; USIZE_SIZE]);
+
+ let mut i = 0;
+ unsafe {
+ while i + N <= b.len() {
+ // Safety: we have checks the sizes `b` and `out` to know that our
+ let in_chunk = b.get_unchecked(i..i + N);
+ let out_chunk = out.spare_capacity_mut().get_unchecked_mut(i..i + N);
+
+ let mut bits = 0;
+ for j in 0..MAGIC_UNROLL {
+ // read the bytes 1 usize at a time (unaligned since we haven't checked the alignment)
+ // safety: in_chunk is valid bytes in the range
+ bits |= in_chunk.as_ptr().cast::<usize>().add(j).read_unaligned();
+ }
+ // if our chunks aren't ascii, then return only the prior bytes as init
+ if bits & NONASCII_MASK != 0 {
+ break;
+ }
+
+ // perform the case conversions on N bytes (gets heavily autovec'd)
+ for j in 0..N {
+ // safety: in_chunk and out_chunk is valid bytes in the range
+ let out = out_chunk.get_unchecked_mut(j);
+ out.write(convert(in_chunk.get_unchecked(j)));
+ }
+
+ // mark these bytes as initialised
+ i += N;
+ }
+ out.set_len(i);
+ }
+
+ out
+}
diff --git a/library/alloc/src/string.rs b/library/alloc/src/string.rs
new file mode 100644
index 000000000..a5118e533
--- /dev/null
+++ b/library/alloc/src/string.rs
@@ -0,0 +1,2951 @@
+//! A UTF-8–encoded, growable string.
+//!
+//! This module contains the [`String`] type, the [`ToString`] trait for
+//! converting to strings, and several error types that may result from
+//! working with [`String`]s.
+//!
+//! # Examples
+//!
+//! There are multiple ways to create a new [`String`] from a string literal:
+//!
+//! ```
+//! let s = "Hello".to_string();
+//!
+//! let s = String::from("world");
+//! let s: String = "also this".into();
+//! ```
+//!
+//! You can create a new [`String`] from an existing one by concatenating with
+//! `+`:
+//!
+//! ```
+//! let s = "Hello".to_string();
+//!
+//! let message = s + " world!";
+//! ```
+//!
+//! If you have a vector of valid UTF-8 bytes, you can make a [`String`] out of
+//! it. You can do the reverse too.
+//!
+//! ```
+//! let sparkle_heart = vec![240, 159, 146, 150];
+//!
+//! // We know these bytes are valid, so we'll use `unwrap()`.
+//! let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
+//!
+//! assert_eq!("💖", sparkle_heart);
+//!
+//! let bytes = sparkle_heart.into_bytes();
+//!
+//! assert_eq!(bytes, [240, 159, 146, 150]);
+//! ```
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(not(no_global_oom_handling))]
+use core::char::{decode_utf16, REPLACEMENT_CHARACTER};
+use core::fmt;
+use core::hash;
+use core::iter::FusedIterator;
+#[cfg(not(no_global_oom_handling))]
+use core::iter::{from_fn, FromIterator};
+#[cfg(not(no_global_oom_handling))]
+use core::ops::Add;
+#[cfg(not(no_global_oom_handling))]
+use core::ops::AddAssign;
+#[cfg(not(no_global_oom_handling))]
+use core::ops::Bound::{Excluded, Included, Unbounded};
+use core::ops::{self, Index, IndexMut, Range, RangeBounds};
+use core::ptr;
+use core::slice;
+#[cfg(not(no_global_oom_handling))]
+use core::str::lossy;
+use core::str::pattern::Pattern;
+
+#[cfg(not(no_global_oom_handling))]
+use crate::borrow::{Cow, ToOwned};
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::str::{self, Chars, Utf8Error};
+#[cfg(not(no_global_oom_handling))]
+use crate::str::{from_boxed_utf8_unchecked, FromStr};
+use crate::vec::Vec;
+
+/// A UTF-8–encoded, growable string.
+///
+/// The `String` type is the most common string type that has ownership over the
+/// contents of the string. It has a close relationship with its borrowed
+/// counterpart, the primitive [`str`].
+///
+/// # Examples
+///
+/// You can create a `String` from [a literal string][`&str`] with [`String::from`]:
+///
+/// [`String::from`]: From::from
+///
+/// ```
+/// let hello = String::from("Hello, world!");
+/// ```
+///
+/// You can append a [`char`] to a `String` with the [`push`] method, and
+/// append a [`&str`] with the [`push_str`] method:
+///
+/// ```
+/// let mut hello = String::from("Hello, ");
+///
+/// hello.push('w');
+/// hello.push_str("orld!");
+/// ```
+///
+/// [`push`]: String::push
+/// [`push_str`]: String::push_str
+///
+/// If you have a vector of UTF-8 bytes, you can create a `String` from it with
+/// the [`from_utf8`] method:
+///
+/// ```
+/// // some bytes, in a vector
+/// let sparkle_heart = vec![240, 159, 146, 150];
+///
+/// // We know these bytes are valid, so we'll use `unwrap()`.
+/// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
+///
+/// assert_eq!("💖", sparkle_heart);
+/// ```
+///
+/// [`from_utf8`]: String::from_utf8
+///
+/// # UTF-8
+///
+/// `String`s are always valid UTF-8. If you need a non-UTF-8 string, consider
+/// [`OsString`]. It is similar, but without the UTF-8 constraint. Because UTF-8
+/// is a variable width encoding, `String`s are typically smaller than an array of
+/// the same `chars`:
+///
+/// ```
+/// use std::mem;
+///
+/// // `s` is ASCII which represents each `char` as one byte
+/// let s = "hello";
+/// assert_eq!(s.len(), 5);
+///
+/// // A `char` array with the same contents would be longer because
+/// // every `char` is four bytes
+/// let s = ['h', 'e', 'l', 'l', 'o'];
+/// let size: usize = s.into_iter().map(|c| mem::size_of_val(&c)).sum();
+/// assert_eq!(size, 20);
+///
+/// // However, for non-ASCII strings, the difference will be smaller
+/// // and sometimes they are the same
+/// let s = "💖💖💖💖💖";
+/// assert_eq!(s.len(), 20);
+///
+/// let s = ['💖', '💖', '💖', '💖', '💖'];
+/// let size: usize = s.into_iter().map(|c| mem::size_of_val(&c)).sum();
+/// assert_eq!(size, 20);
+/// ```
+///
+/// This raises interesting questions as to how `s[i]` should work.
+/// What should `i` be here? Several options include byte indices and
+/// `char` indices but, because of UTF-8 encoding, only byte indices
+/// would provide constant time indexing. Getting the `i`th `char`, for
+/// example, is available using [`chars`]:
+///
+/// ```
+/// let s = "hello";
+/// let third_character = s.chars().nth(2);
+/// assert_eq!(third_character, Some('l'));
+///
+/// let s = "💖💖💖💖💖";
+/// let third_character = s.chars().nth(2);
+/// assert_eq!(third_character, Some('💖'));
+/// ```
+///
+/// Next, what should `s[i]` return? Because indexing returns a reference
+/// to underlying data it could be `&u8`, `&[u8]`, or something else similar.
+/// Since we're only providing one index, `&u8` makes the most sense but that
+/// might not be what the user expects and can be explicitly achieved with
+/// [`as_bytes()`]:
+///
+/// ```
+/// // The first byte is 104 - the byte value of `'h'`
+/// let s = "hello";
+/// assert_eq!(s.as_bytes()[0], 104);
+/// // or
+/// assert_eq!(s.as_bytes()[0], b'h');
+///
+/// // The first byte is 240 which isn't obviously useful
+/// let s = "💖💖💖💖💖";
+/// assert_eq!(s.as_bytes()[0], 240);
+/// ```
+///
+/// Due to these ambiguities/restrictions, indexing with a `usize` is simply
+/// forbidden:
+///
+/// ```compile_fail,E0277
+/// let s = "hello";
+///
+/// // The following will not compile!
+/// println!("The first letter of s is {}", s[0]);
+/// ```
+///
+/// It is more clear, however, how `&s[i..j]` should work (that is,
+/// indexing with a range). It should accept byte indices (to be constant-time)
+/// and return a `&str` which is UTF-8 encoded. This is also called "string slicing".
+/// Note this will panic if the byte indices provided are not character
+/// boundaries - see [`is_char_boundary`] for more details. See the implementations
+/// for [`SliceIndex<str>`] for more details on string slicing. For a non-panicking
+/// version of string slicing, see [`get`].
+///
+/// [`OsString`]: ../../std/ffi/struct.OsString.html "ffi::OsString"
+/// [`SliceIndex<str>`]: core::slice::SliceIndex
+/// [`as_bytes()`]: str::as_bytes
+/// [`get`]: str::get
+/// [`is_char_boundary`]: str::is_char_boundary
+///
+/// The [`bytes`] and [`chars`] methods return iterators over the bytes and
+/// codepoints of the string, respectively. To iterate over codepoints along
+/// with byte indices, use [`char_indices`].
+///
+/// [`bytes`]: str::bytes
+/// [`chars`]: str::chars
+/// [`char_indices`]: str::char_indices
+///
+/// # Deref
+///
+/// `String` implements <code>[Deref]<Target = [str]></code>, and so inherits all of [`str`]'s
+/// methods. In addition, this means that you can pass a `String` to a
+/// function which takes a [`&str`] by using an ampersand (`&`):
+///
+/// ```
+/// fn takes_str(s: &str) { }
+///
+/// let s = String::from("Hello");
+///
+/// takes_str(&s);
+/// ```
+///
+/// This will create a [`&str`] from the `String` and pass it in. This
+/// conversion is very inexpensive, and so generally, functions will accept
+/// [`&str`]s as arguments unless they need a `String` for some specific
+/// reason.
+///
+/// In certain cases Rust doesn't have enough information to make this
+/// conversion, known as [`Deref`] coercion. In the following example a string
+/// slice [`&'a str`][`&str`] implements the trait `TraitExample`, and the function
+/// `example_func` takes anything that implements the trait. In this case Rust
+/// would need to make two implicit conversions, which Rust doesn't have the
+/// means to do. For that reason, the following example will not compile.
+///
+/// ```compile_fail,E0277
+/// trait TraitExample {}
+///
+/// impl<'a> TraitExample for &'a str {}
+///
+/// fn example_func<A: TraitExample>(example_arg: A) {}
+///
+/// let example_string = String::from("example_string");
+/// example_func(&example_string);
+/// ```
+///
+/// There are two options that would work instead. The first would be to
+/// change the line `example_func(&example_string);` to
+/// `example_func(example_string.as_str());`, using the method [`as_str()`]
+/// to explicitly extract the string slice containing the string. The second
+/// way changes `example_func(&example_string);` to
+/// `example_func(&*example_string);`. In this case we are dereferencing a
+/// `String` to a [`str`], then referencing the [`str`] back to
+/// [`&str`]. The second way is more idiomatic, however both work to do the
+/// conversion explicitly rather than relying on the implicit conversion.
+///
+/// # Representation
+///
+/// A `String` is made up of three components: a pointer to some bytes, a
+/// length, and a capacity. The pointer points to an internal buffer `String`
+/// uses to store its data. The length is the number of bytes currently stored
+/// in the buffer, and the capacity is the size of the buffer in bytes. As such,
+/// the length will always be less than or equal to the capacity.
+///
+/// This buffer is always stored on the heap.
+///
+/// You can look at these with the [`as_ptr`], [`len`], and [`capacity`]
+/// methods:
+///
+/// ```
+/// use std::mem;
+///
+/// let story = String::from("Once upon a time...");
+///
+// FIXME Update this when vec_into_raw_parts is stabilized
+/// // Prevent automatically dropping the String's data
+/// let mut story = mem::ManuallyDrop::new(story);
+///
+/// let ptr = story.as_mut_ptr();
+/// let len = story.len();
+/// let capacity = story.capacity();
+///
+/// // story has nineteen bytes
+/// assert_eq!(19, len);
+///
+/// // We can re-build a String out of ptr, len, and capacity. This is all
+/// // unsafe because we are responsible for making sure the components are
+/// // valid:
+/// let s = unsafe { String::from_raw_parts(ptr, len, capacity) } ;
+///
+/// assert_eq!(String::from("Once upon a time..."), s);
+/// ```
+///
+/// [`as_ptr`]: str::as_ptr
+/// [`len`]: String::len
+/// [`capacity`]: String::capacity
+///
+/// If a `String` has enough capacity, adding elements to it will not
+/// re-allocate. For example, consider this program:
+///
+/// ```
+/// let mut s = String::new();
+///
+/// println!("{}", s.capacity());
+///
+/// for _ in 0..5 {
+/// s.push_str("hello");
+/// println!("{}", s.capacity());
+/// }
+/// ```
+///
+/// This will output the following:
+///
+/// ```text
+/// 0
+/// 8
+/// 16
+/// 16
+/// 32
+/// 32
+/// ```
+///
+/// At first, we have no memory allocated at all, but as we append to the
+/// string, it increases its capacity appropriately. If we instead use the
+/// [`with_capacity`] method to allocate the correct capacity initially:
+///
+/// ```
+/// let mut s = String::with_capacity(25);
+///
+/// println!("{}", s.capacity());
+///
+/// for _ in 0..5 {
+/// s.push_str("hello");
+/// println!("{}", s.capacity());
+/// }
+/// ```
+///
+/// [`with_capacity`]: String::with_capacity
+///
+/// We end up with a different output:
+///
+/// ```text
+/// 25
+/// 25
+/// 25
+/// 25
+/// 25
+/// 25
+/// ```
+///
+/// Here, there's no need to allocate more memory inside the loop.
+///
+/// [str]: prim@str "str"
+/// [`str`]: prim@str "str"
+/// [`&str`]: prim@str "&str"
+/// [Deref]: core::ops::Deref "ops::Deref"
+/// [`Deref`]: core::ops::Deref "ops::Deref"
+/// [`as_str()`]: String::as_str
+#[derive(PartialOrd, Eq, Ord)]
+#[cfg_attr(not(test), rustc_diagnostic_item = "String")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct String {
+ vec: Vec<u8>,
+}
+
+/// A possible error value when converting a `String` from a UTF-8 byte vector.
+///
+/// This type is the error type for the [`from_utf8`] method on [`String`]. It
+/// is designed in such a way to carefully avoid reallocations: the
+/// [`into_bytes`] method will give back the byte vector that was used in the
+/// conversion attempt.
+///
+/// [`from_utf8`]: String::from_utf8
+/// [`into_bytes`]: FromUtf8Error::into_bytes
+///
+/// The [`Utf8Error`] type provided by [`std::str`] represents an error that may
+/// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's
+/// an analogue to `FromUtf8Error`, and you can get one from a `FromUtf8Error`
+/// through the [`utf8_error`] method.
+///
+/// [`Utf8Error`]: str::Utf8Error "std::str::Utf8Error"
+/// [`std::str`]: core::str "std::str"
+/// [`&str`]: prim@str "&str"
+/// [`utf8_error`]: FromUtf8Error::utf8_error
+///
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // some invalid bytes, in a vector
+/// let bytes = vec![0, 159];
+///
+/// let value = String::from_utf8(bytes);
+///
+/// assert!(value.is_err());
+/// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(no_global_oom_handling), derive(Clone))]
+#[derive(Debug, PartialEq, Eq)]
+pub struct FromUtf8Error {
+ bytes: Vec<u8>,
+ error: Utf8Error,
+}
+
+/// A possible error value when converting a `String` from a UTF-16 byte slice.
+///
+/// This type is the error type for the [`from_utf16`] method on [`String`].
+///
+/// [`from_utf16`]: String::from_utf16
+/// # Examples
+///
+/// Basic usage:
+///
+/// ```
+/// // 𝄞mu<invalid>ic
+/// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
+/// 0xD800, 0x0069, 0x0063];
+///
+/// assert!(String::from_utf16(v).is_err());
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[derive(Debug)]
+pub struct FromUtf16Error(());
+
+impl String {
+ /// Creates a new empty `String`.
+ ///
+ /// Given that the `String` is empty, this will not allocate any initial
+ /// buffer. While that means that this initial operation is very
+ /// inexpensive, it may cause excessive allocation later when you add
+ /// data. If you have an idea of how much data the `String` will hold,
+ /// consider the [`with_capacity`] method to prevent excessive
+ /// re-allocation.
+ ///
+ /// [`with_capacity`]: String::with_capacity
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::new();
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_string_new", since = "1.39.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub const fn new() -> String {
+ String { vec: Vec::new() }
+ }
+
+ /// Creates a new empty `String` with at least the specified capacity.
+ ///
+ /// `String`s have an internal buffer to hold their data. The capacity is
+ /// the length of that buffer, and can be queried with the [`capacity`]
+ /// method. This method creates an empty `String`, but one with an initial
+ /// buffer that can hold at least `capacity` bytes. This is useful when you
+ /// may be appending a bunch of data to the `String`, reducing the number of
+ /// reallocations it needs to do.
+ ///
+ /// [`capacity`]: String::capacity
+ ///
+ /// If the given capacity is `0`, no allocation will occur, and this method
+ /// is identical to the [`new`] method.
+ ///
+ /// [`new`]: String::new
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::with_capacity(10);
+ ///
+ /// // The String contains no chars, even though it has capacity for more
+ /// assert_eq!(s.len(), 0);
+ ///
+ /// // These are all done without reallocating...
+ /// let cap = s.capacity();
+ /// for _ in 0..10 {
+ /// s.push('a');
+ /// }
+ ///
+ /// assert_eq!(s.capacity(), cap);
+ ///
+ /// // ...but this may make the string reallocate
+ /// s.push('a');
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn with_capacity(capacity: usize) -> String {
+ String { vec: Vec::with_capacity(capacity) }
+ }
+
+ // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
+ // required for this method definition, is not available. Since we don't
+ // require this method for testing purposes, I'll just stub it
+ // NB see the slice::hack module in slice.rs for more information
+ #[inline]
+ #[cfg(test)]
+ pub fn from_str(_: &str) -> String {
+ panic!("not available with cfg(test)");
+ }
+
+ /// Converts a vector of bytes to a `String`.
+ ///
+ /// A string ([`String`]) is made of bytes ([`u8`]), and a vector of bytes
+ /// ([`Vec<u8>`]) is made of bytes, so this function converts between the
+ /// two. Not all byte slices are valid `String`s, however: `String`
+ /// requires that it is valid UTF-8. `from_utf8()` checks to ensure that
+ /// the bytes are valid UTF-8, and then does the conversion.
+ ///
+ /// If you are sure that the byte slice is valid UTF-8, and you don't want
+ /// to incur the overhead of the validity check, there is an unsafe version
+ /// of this function, [`from_utf8_unchecked`], which has the same behavior
+ /// but skips the check.
+ ///
+ /// This method will take care to not copy the vector, for efficiency's
+ /// sake.
+ ///
+ /// If you need a [`&str`] instead of a `String`, consider
+ /// [`str::from_utf8`].
+ ///
+ /// The inverse of this method is [`into_bytes`].
+ ///
+ /// # Errors
+ ///
+ /// Returns [`Err`] if the slice is not UTF-8 with a description as to why the
+ /// provided bytes are not UTF-8. The vector you moved in is also included.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some bytes, in a vector
+ /// let sparkle_heart = vec![240, 159, 146, 150];
+ ///
+ /// // We know these bytes are valid, so we'll use `unwrap()`.
+ /// let sparkle_heart = String::from_utf8(sparkle_heart).unwrap();
+ ///
+ /// assert_eq!("💖", sparkle_heart);
+ /// ```
+ ///
+ /// Incorrect bytes:
+ ///
+ /// ```
+ /// // some invalid bytes, in a vector
+ /// let sparkle_heart = vec![0, 159, 146, 150];
+ ///
+ /// assert!(String::from_utf8(sparkle_heart).is_err());
+ /// ```
+ ///
+ /// See the docs for [`FromUtf8Error`] for more details on what you can do
+ /// with this error.
+ ///
+ /// [`from_utf8_unchecked`]: String::from_utf8_unchecked
+ /// [`Vec<u8>`]: crate::vec::Vec "Vec"
+ /// [`&str`]: prim@str "&str"
+ /// [`into_bytes`]: String::into_bytes
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_utf8(vec: Vec<u8>) -> Result<String, FromUtf8Error> {
+ match str::from_utf8(&vec) {
+ Ok(..) => Ok(String { vec }),
+ Err(e) => Err(FromUtf8Error { bytes: vec, error: e }),
+ }
+ }
+
+ /// Converts a slice of bytes to a string, including invalid characters.
+ ///
+ /// Strings are made of bytes ([`u8`]), and a slice of bytes
+ /// ([`&[u8]`][byteslice]) is made of bytes, so this function converts
+ /// between the two. Not all byte slices are valid strings, however: strings
+ /// are required to be valid UTF-8. During this conversion,
+ /// `from_utf8_lossy()` will replace any invalid UTF-8 sequences with
+ /// [`U+FFFD REPLACEMENT CHARACTER`][U+FFFD], which looks like this: �
+ ///
+ /// [byteslice]: prim@slice
+ /// [U+FFFD]: core::char::REPLACEMENT_CHARACTER
+ ///
+ /// If you are sure that the byte slice is valid UTF-8, and you don't want
+ /// to incur the overhead of the conversion, there is an unsafe version
+ /// of this function, [`from_utf8_unchecked`], which has the same behavior
+ /// but skips the checks.
+ ///
+ /// [`from_utf8_unchecked`]: String::from_utf8_unchecked
+ ///
+ /// This function returns a [`Cow<'a, str>`]. If our byte slice is invalid
+ /// UTF-8, then we need to insert the replacement characters, which will
+ /// change the size of the string, and hence, require a `String`. But if
+ /// it's already valid UTF-8, we don't need a new allocation. This return
+ /// type allows us to handle both cases.
+ ///
+ /// [`Cow<'a, str>`]: crate::borrow::Cow "borrow::Cow"
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some bytes, in a vector
+ /// let sparkle_heart = vec![240, 159, 146, 150];
+ ///
+ /// let sparkle_heart = String::from_utf8_lossy(&sparkle_heart);
+ ///
+ /// assert_eq!("💖", sparkle_heart);
+ /// ```
+ ///
+ /// Incorrect bytes:
+ ///
+ /// ```
+ /// // some invalid bytes
+ /// let input = b"Hello \xF0\x90\x80World";
+ /// let output = String::from_utf8_lossy(input);
+ ///
+ /// assert_eq!("Hello �World", output);
+ /// ```
+ #[must_use]
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_utf8_lossy(v: &[u8]) -> Cow<'_, str> {
+ let mut iter = lossy::Utf8Lossy::from_bytes(v).chunks();
+
+ let first_valid = if let Some(chunk) = iter.next() {
+ let lossy::Utf8LossyChunk { valid, broken } = chunk;
+ if broken.is_empty() {
+ debug_assert_eq!(valid.len(), v.len());
+ return Cow::Borrowed(valid);
+ }
+ valid
+ } else {
+ return Cow::Borrowed("");
+ };
+
+ const REPLACEMENT: &str = "\u{FFFD}";
+
+ let mut res = String::with_capacity(v.len());
+ res.push_str(first_valid);
+ res.push_str(REPLACEMENT);
+
+ for lossy::Utf8LossyChunk { valid, broken } in iter {
+ res.push_str(valid);
+ if !broken.is_empty() {
+ res.push_str(REPLACEMENT);
+ }
+ }
+
+ Cow::Owned(res)
+ }
+
+ /// Decode a UTF-16–encoded vector `v` into a `String`, returning [`Err`]
+ /// if `v` contains any invalid data.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // 𝄞music
+ /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
+ /// 0x0073, 0x0069, 0x0063];
+ /// assert_eq!(String::from("𝄞music"),
+ /// String::from_utf16(v).unwrap());
+ ///
+ /// // 𝄞mu<invalid>ic
+ /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
+ /// 0xD800, 0x0069, 0x0063];
+ /// assert!(String::from_utf16(v).is_err());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_utf16(v: &[u16]) -> Result<String, FromUtf16Error> {
+ // This isn't done via collect::<Result<_, _>>() for performance reasons.
+ // FIXME: the function can be simplified again when #48994 is closed.
+ let mut ret = String::with_capacity(v.len());
+ for c in decode_utf16(v.iter().cloned()) {
+ if let Ok(c) = c {
+ ret.push(c);
+ } else {
+ return Err(FromUtf16Error(()));
+ }
+ }
+ Ok(ret)
+ }
+
+ /// Decode a UTF-16–encoded slice `v` into a `String`, replacing
+ /// invalid data with [the replacement character (`U+FFFD`)][U+FFFD].
+ ///
+ /// Unlike [`from_utf8_lossy`] which returns a [`Cow<'a, str>`],
+ /// `from_utf16_lossy` returns a `String` since the UTF-16 to UTF-8
+ /// conversion requires a memory allocation.
+ ///
+ /// [`from_utf8_lossy`]: String::from_utf8_lossy
+ /// [`Cow<'a, str>`]: crate::borrow::Cow "borrow::Cow"
+ /// [U+FFFD]: core::char::REPLACEMENT_CHARACTER
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // 𝄞mus<invalid>ic<invalid>
+ /// let v = &[0xD834, 0xDD1E, 0x006d, 0x0075,
+ /// 0x0073, 0xDD1E, 0x0069, 0x0063,
+ /// 0xD834];
+ ///
+ /// assert_eq!(String::from("𝄞mus\u{FFFD}ic\u{FFFD}"),
+ /// String::from_utf16_lossy(v));
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[must_use]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn from_utf16_lossy(v: &[u16]) -> String {
+ decode_utf16(v.iter().cloned()).map(|r| r.unwrap_or(REPLACEMENT_CHARACTER)).collect()
+ }
+
+ /// Decomposes a `String` into its raw components.
+ ///
+ /// Returns the raw pointer to the underlying data, the length of
+ /// the string (in bytes), and the allocated capacity of the data
+ /// (in bytes). These are the same arguments in the same order as
+ /// the arguments to [`from_raw_parts`].
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `String`. The only way to do
+ /// this is to convert the raw pointer, length, and capacity back
+ /// into a `String` with the [`from_raw_parts`] function, allowing
+ /// the destructor to perform the cleanup.
+ ///
+ /// [`from_raw_parts`]: String::from_raw_parts
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(vec_into_raw_parts)]
+ /// let s = String::from("hello");
+ ///
+ /// let (ptr, len, cap) = s.into_raw_parts();
+ ///
+ /// let rebuilt = unsafe { String::from_raw_parts(ptr, len, cap) };
+ /// assert_eq!(rebuilt, "hello");
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
+ pub fn into_raw_parts(self) -> (*mut u8, usize, usize) {
+ self.vec.into_raw_parts()
+ }
+
+ /// Creates a new `String` from a length, capacity, and pointer.
+ ///
+ /// # Safety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * The memory at `buf` needs to have been previously allocated by the
+ /// same allocator the standard library uses, with a required alignment of exactly 1.
+ /// * `length` needs to be less than or equal to `capacity`.
+ /// * `capacity` needs to be the correct value.
+ /// * The first `length` bytes at `buf` need to be valid UTF-8.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal data structures. For example, it is normally **not** safe to
+ /// build a `String` from a pointer to a C `char` array containing UTF-8
+ /// _unless_ you are certain that array was originally allocated by the
+ /// Rust standard library's allocator.
+ ///
+ /// The ownership of `buf` is effectively transferred to the
+ /// `String` which may then deallocate, reallocate or change the
+ /// contents of memory pointed to by the pointer at will. Ensure
+ /// that nothing else uses the pointer after calling this
+ /// function.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// use std::mem;
+ ///
+ /// unsafe {
+ /// let s = String::from("hello");
+ ///
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Prevent automatically dropping the String's data
+ /// let mut s = mem::ManuallyDrop::new(s);
+ ///
+ /// let ptr = s.as_mut_ptr();
+ /// let len = s.len();
+ /// let capacity = s.capacity();
+ ///
+ /// let s = String::from_raw_parts(ptr, len, capacity);
+ ///
+ /// assert_eq!(String::from("hello"), s);
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn from_raw_parts(buf: *mut u8, length: usize, capacity: usize) -> String {
+ unsafe { String { vec: Vec::from_raw_parts(buf, length, capacity) } }
+ }
+
+ /// Converts a vector of bytes to a `String` without checking that the
+ /// string contains valid UTF-8.
+ ///
+ /// See the safe version, [`from_utf8`], for more details.
+ ///
+ /// [`from_utf8`]: String::from_utf8
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because it does not check that the bytes passed
+ /// to it are valid UTF-8. If this constraint is violated, it may cause
+ /// memory unsafety issues with future users of the `String`, as the rest of
+ /// the standard library assumes that `String`s are valid UTF-8.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some bytes, in a vector
+ /// let sparkle_heart = vec![240, 159, 146, 150];
+ ///
+ /// let sparkle_heart = unsafe {
+ /// String::from_utf8_unchecked(sparkle_heart)
+ /// };
+ ///
+ /// assert_eq!("💖", sparkle_heart);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn from_utf8_unchecked(bytes: Vec<u8>) -> String {
+ String { vec: bytes }
+ }
+
+ /// Converts a `String` into a byte vector.
+ ///
+ /// This consumes the `String`, so we do not need to copy its contents.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::from("hello");
+ /// let bytes = s.into_bytes();
+ ///
+ /// assert_eq!(&[104, 101, 108, 108, 111][..], &bytes[..]);
+ /// ```
+ #[inline]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_bytes(self) -> Vec<u8> {
+ self.vec
+ }
+
+ /// Extracts a string slice containing the entire `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::from("foo");
+ ///
+ /// assert_eq!("foo", s.as_str());
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "string_as_str", since = "1.7.0")]
+ pub fn as_str(&self) -> &str {
+ self
+ }
+
+ /// Converts a `String` into a mutable string slice.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foobar");
+ /// let s_mut_str = s.as_mut_str();
+ ///
+ /// s_mut_str.make_ascii_uppercase();
+ ///
+ /// assert_eq!("FOOBAR", s_mut_str);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "string_as_str", since = "1.7.0")]
+ pub fn as_mut_str(&mut self) -> &mut str {
+ self
+ }
+
+ /// Appends a given string slice onto the end of this `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// s.push_str("bar");
+ ///
+ /// assert_eq!("foobar", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push_str(&mut self, string: &str) {
+ self.vec.extend_from_slice(string.as_bytes())
+ }
+
+ /// Copies elements from `src` range to the end of the string.
+ ///
+ /// ## Panics
+ ///
+ /// Panics if the starting point or end point do not lie on a [`char`]
+ /// boundary, or if they're out of bounds.
+ ///
+ /// ## Examples
+ ///
+ /// ```
+ /// #![feature(string_extend_from_within)]
+ /// let mut string = String::from("abcde");
+ ///
+ /// string.extend_from_within(2..);
+ /// assert_eq!(string, "abcdecde");
+ ///
+ /// string.extend_from_within(..2);
+ /// assert_eq!(string, "abcdecdeab");
+ ///
+ /// string.extend_from_within(4..8);
+ /// assert_eq!(string, "abcdecdeabecde");
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "string_extend_from_within", issue = "none")]
+ pub fn extend_from_within<R>(&mut self, src: R)
+ where
+ R: RangeBounds<usize>,
+ {
+ let src @ Range { start, end } = slice::range(src, ..self.len());
+
+ assert!(self.is_char_boundary(start));
+ assert!(self.is_char_boundary(end));
+
+ self.vec.extend_from_within(src);
+ }
+
+ /// Returns this `String`'s capacity, in bytes.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::with_capacity(10);
+ ///
+ /// assert!(s.capacity() >= 10);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.vec.capacity()
+ }
+
+ /// Reserves capacity for at least `additional` bytes more than the
+ /// current length. The allocator may reserve more space to speculatively
+ /// avoid frequent allocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if capacity is already sufficient.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows [`usize`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::new();
+ ///
+ /// s.reserve(10);
+ ///
+ /// assert!(s.capacity() >= 10);
+ /// ```
+ ///
+ /// This might not actually increase the capacity:
+ ///
+ /// ```
+ /// let mut s = String::with_capacity(10);
+ /// s.push('a');
+ /// s.push('b');
+ ///
+ /// // s now has a length of 2 and a capacity of at least 10
+ /// let capacity = s.capacity();
+ /// assert_eq!(2, s.len());
+ /// assert!(capacity >= 10);
+ ///
+ /// // Since we already have at least an extra 8 capacity, calling this...
+ /// s.reserve(8);
+ ///
+ /// // ... doesn't actually increase.
+ /// assert_eq!(capacity, s.capacity());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ self.vec.reserve(additional)
+ }
+
+ /// Reserves the minimum capacity for at least `additional` bytes more than
+ /// the current length. Unlike [`reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional`. Does nothing if the capacity is already
+ /// sufficient.
+ ///
+ /// [`reserve`]: String::reserve
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity overflows [`usize`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::new();
+ ///
+ /// s.reserve_exact(10);
+ ///
+ /// assert!(s.capacity() >= 10);
+ /// ```
+ ///
+ /// This might not actually increase the capacity:
+ ///
+ /// ```
+ /// let mut s = String::with_capacity(10);
+ /// s.push('a');
+ /// s.push('b');
+ ///
+ /// // s now has a length of 2 and a capacity of at least 10
+ /// let capacity = s.capacity();
+ /// assert_eq!(2, s.len());
+ /// assert!(capacity >= 10);
+ ///
+ /// // Since we already have at least an extra 8 capacity, calling this...
+ /// s.reserve_exact(8);
+ ///
+ /// // ... doesn't actually increase.
+ /// assert_eq!(capacity, s.capacity());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.vec.reserve_exact(additional)
+ }
+
+ /// Tries to reserve capacity for at least `additional` bytes more than the
+ /// current length. The allocator may reserve more space to speculatively
+ /// avoid frequent allocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional` if it returns
+ /// `Ok(())`. Does nothing if capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &str) -> Result<String, TryReserveError> {
+ /// let mut output = String::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.push_str(data);
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.vec.try_reserve(additional)
+ }
+
+ /// Tries to reserve the minimum capacity for at least `additional` bytes
+ /// more than the current length. Unlike [`try_reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `try_reserve_exact`, capacity will be greater than or
+ /// equal to `self.len() + additional` if it returns `Ok(())`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: String::try_reserve
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &str) -> Result<String, TryReserveError> {
+ /// let mut output = String::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve_exact(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.push_str(data);
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data("rust").expect("why is the test harness OOMing on 4 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.vec.try_reserve_exact(additional)
+ }
+
+ /// Shrinks the capacity of this `String` to match its length.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// s.reserve(100);
+ /// assert!(s.capacity() >= 100);
+ ///
+ /// s.shrink_to_fit();
+ /// assert_eq!(3, s.capacity());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shrink_to_fit(&mut self) {
+ self.vec.shrink_to_fit()
+ }
+
+ /// Shrinks the capacity of this `String` with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// s.reserve(100);
+ /// assert!(s.capacity() >= 100);
+ ///
+ /// s.shrink_to(10);
+ /// assert!(s.capacity() >= 10);
+ /// s.shrink_to(0);
+ /// assert!(s.capacity() >= 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ self.vec.shrink_to(min_capacity)
+ }
+
+ /// Appends the given [`char`] to the end of this `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("abc");
+ ///
+ /// s.push('1');
+ /// s.push('2');
+ /// s.push('3');
+ ///
+ /// assert_eq!("abc123", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push(&mut self, ch: char) {
+ match ch.len_utf8() {
+ 1 => self.vec.push(ch as u8),
+ _ => self.vec.extend_from_slice(ch.encode_utf8(&mut [0; 4]).as_bytes()),
+ }
+ }
+
+ /// Returns a byte slice of this `String`'s contents.
+ ///
+ /// The inverse of this method is [`from_utf8`].
+ ///
+ /// [`from_utf8`]: String::from_utf8
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::from("hello");
+ ///
+ /// assert_eq!(&[104, 101, 108, 108, 111], s.as_bytes());
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn as_bytes(&self) -> &[u8] {
+ &self.vec
+ }
+
+ /// Shortens this `String` to the specified length.
+ ///
+ /// If `new_len` is greater than the string's current length, this has no
+ /// effect.
+ ///
+ /// Note that this method has no effect on the allocated capacity
+ /// of the string
+ ///
+ /// # Panics
+ ///
+ /// Panics if `new_len` does not lie on a [`char`] boundary.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("hello");
+ ///
+ /// s.truncate(2);
+ ///
+ /// assert_eq!("he", s);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn truncate(&mut self, new_len: usize) {
+ if new_len <= self.len() {
+ assert!(self.is_char_boundary(new_len));
+ self.vec.truncate(new_len)
+ }
+ }
+
+ /// Removes the last character from the string buffer and returns it.
+ ///
+ /// Returns [`None`] if this `String` is empty.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// assert_eq!(s.pop(), Some('o'));
+ /// assert_eq!(s.pop(), Some('o'));
+ /// assert_eq!(s.pop(), Some('f'));
+ ///
+ /// assert_eq!(s.pop(), None);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop(&mut self) -> Option<char> {
+ let ch = self.chars().rev().next()?;
+ let newlen = self.len() - ch.len_utf8();
+ unsafe {
+ self.vec.set_len(newlen);
+ }
+ Some(ch)
+ }
+
+ /// Removes a [`char`] from this `String` at a byte position and returns it.
+ ///
+ /// This is an *O*(*n*) operation, as it requires copying every element in the
+ /// buffer.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `idx` is larger than or equal to the `String`'s length,
+ /// or if it does not lie on a [`char`] boundary.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// assert_eq!(s.remove(0), 'f');
+ /// assert_eq!(s.remove(1), 'o');
+ /// assert_eq!(s.remove(0), 'o');
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn remove(&mut self, idx: usize) -> char {
+ let ch = match self[idx..].chars().next() {
+ Some(ch) => ch,
+ None => panic!("cannot remove a char from the end of a string"),
+ };
+
+ let next = idx + ch.len_utf8();
+ let len = self.len();
+ unsafe {
+ ptr::copy(self.vec.as_ptr().add(next), self.vec.as_mut_ptr().add(idx), len - next);
+ self.vec.set_len(len - (next - idx));
+ }
+ ch
+ }
+
+ /// Remove all matches of pattern `pat` in the `String`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(string_remove_matches)]
+ /// let mut s = String::from("Trees are not green, the sky is not blue.");
+ /// s.remove_matches("not ");
+ /// assert_eq!("Trees are green, the sky is blue.", s);
+ /// ```
+ ///
+ /// Matches will be detected and removed iteratively, so in cases where
+ /// patterns overlap, only the first pattern will be removed:
+ ///
+ /// ```
+ /// #![feature(string_remove_matches)]
+ /// let mut s = String::from("banana");
+ /// s.remove_matches("ana");
+ /// assert_eq!("bna", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "string_remove_matches", reason = "new API", issue = "72826")]
+ pub fn remove_matches<'a, P>(&'a mut self, pat: P)
+ where
+ P: for<'x> Pattern<'x>,
+ {
+ use core::str::pattern::Searcher;
+
+ let rejections = {
+ let mut searcher = pat.into_searcher(self);
+ // Per Searcher::next:
+ //
+ // A Match result needs to contain the whole matched pattern,
+ // however Reject results may be split up into arbitrary many
+ // adjacent fragments. Both ranges may have zero length.
+ //
+ // In practice the implementation of Searcher::next_match tends to
+ // be more efficient, so we use it here and do some work to invert
+ // matches into rejections since that's what we want to copy below.
+ let mut front = 0;
+ let rejections: Vec<_> = from_fn(|| {
+ let (start, end) = searcher.next_match()?;
+ let prev_front = front;
+ front = end;
+ Some((prev_front, start))
+ })
+ .collect();
+ rejections.into_iter().chain(core::iter::once((front, self.len())))
+ };
+
+ let mut len = 0;
+ let ptr = self.vec.as_mut_ptr();
+
+ for (start, end) in rejections {
+ let count = end - start;
+ if start != len {
+ // SAFETY: per Searcher::next:
+ //
+ // The stream of Match and Reject values up to a Done will
+ // contain index ranges that are adjacent, non-overlapping,
+ // covering the whole haystack, and laying on utf8
+ // boundaries.
+ unsafe {
+ ptr::copy(ptr.add(start), ptr.add(len), count);
+ }
+ }
+ len += count;
+ }
+
+ unsafe {
+ self.vec.set_len(len);
+ }
+ }
+
+ /// Retains only the characters specified by the predicate.
+ ///
+ /// In other words, remove all characters `c` such that `f(c)` returns `false`.
+ /// This method operates in place, visiting each character exactly once in the
+ /// original order, and preserves the order of the retained characters.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("f_o_ob_ar");
+ ///
+ /// s.retain(|c| c != '_');
+ ///
+ /// assert_eq!(s, "foobar");
+ /// ```
+ ///
+ /// Because the elements are visited exactly once in the original order,
+ /// external state may be used to decide which elements to keep.
+ ///
+ /// ```
+ /// let mut s = String::from("abcde");
+ /// let keep = [false, true, true, false, true];
+ /// let mut iter = keep.iter();
+ /// s.retain(|_| *iter.next().unwrap());
+ /// assert_eq!(s, "bce");
+ /// ```
+ #[inline]
+ #[stable(feature = "string_retain", since = "1.26.0")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(char) -> bool,
+ {
+ struct SetLenOnDrop<'a> {
+ s: &'a mut String,
+ idx: usize,
+ del_bytes: usize,
+ }
+
+ impl<'a> Drop for SetLenOnDrop<'a> {
+ fn drop(&mut self) {
+ let new_len = self.idx - self.del_bytes;
+ debug_assert!(new_len <= self.s.len());
+ unsafe { self.s.vec.set_len(new_len) };
+ }
+ }
+
+ let len = self.len();
+ let mut guard = SetLenOnDrop { s: self, idx: 0, del_bytes: 0 };
+
+ while guard.idx < len {
+ let ch =
+ // SAFETY: `guard.idx` is positive-or-zero and less that len so the `get_unchecked`
+ // is in bound. `self` is valid UTF-8 like string and the returned slice starts at
+ // a unicode code point so the `Chars` always return one character.
+ unsafe { guard.s.get_unchecked(guard.idx..len).chars().next().unwrap_unchecked() };
+ let ch_len = ch.len_utf8();
+
+ if !f(ch) {
+ guard.del_bytes += ch_len;
+ } else if guard.del_bytes > 0 {
+ // SAFETY: `guard.idx` is in bound and `guard.del_bytes` represent the number of
+ // bytes that are erased from the string so the resulting `guard.idx -
+ // guard.del_bytes` always represent a valid unicode code point.
+ //
+ // `guard.del_bytes` >= `ch.len_utf8()`, so taking a slice with `ch.len_utf8()` len
+ // is safe.
+ ch.encode_utf8(unsafe {
+ crate::slice::from_raw_parts_mut(
+ guard.s.as_mut_ptr().add(guard.idx - guard.del_bytes),
+ ch.len_utf8(),
+ )
+ });
+ }
+
+ // Point idx to the next char
+ guard.idx += ch_len;
+ }
+
+ drop(guard);
+ }
+
+ /// Inserts a character into this `String` at a byte position.
+ ///
+ /// This is an *O*(*n*) operation as it requires copying every element in the
+ /// buffer.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `idx` is larger than the `String`'s length, or if it does not
+ /// lie on a [`char`] boundary.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::with_capacity(3);
+ ///
+ /// s.insert(0, 'f');
+ /// s.insert(1, 'o');
+ /// s.insert(2, 'o');
+ ///
+ /// assert_eq!("foo", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, idx: usize, ch: char) {
+ assert!(self.is_char_boundary(idx));
+ let mut bits = [0; 4];
+ let bits = ch.encode_utf8(&mut bits).as_bytes();
+
+ unsafe {
+ self.insert_bytes(idx, bits);
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn insert_bytes(&mut self, idx: usize, bytes: &[u8]) {
+ let len = self.len();
+ let amt = bytes.len();
+ self.vec.reserve(amt);
+
+ unsafe {
+ ptr::copy(self.vec.as_ptr().add(idx), self.vec.as_mut_ptr().add(idx + amt), len - idx);
+ ptr::copy_nonoverlapping(bytes.as_ptr(), self.vec.as_mut_ptr().add(idx), amt);
+ self.vec.set_len(len + amt);
+ }
+ }
+
+ /// Inserts a string slice into this `String` at a byte position.
+ ///
+ /// This is an *O*(*n*) operation as it requires copying every element in the
+ /// buffer.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `idx` is larger than the `String`'s length, or if it does not
+ /// lie on a [`char`] boundary.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("bar");
+ ///
+ /// s.insert_str(0, "foo");
+ ///
+ /// assert_eq!("foobar", s);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "insert_str", since = "1.16.0")]
+ pub fn insert_str(&mut self, idx: usize, string: &str) {
+ assert!(self.is_char_boundary(idx));
+
+ unsafe {
+ self.insert_bytes(idx, string.as_bytes());
+ }
+ }
+
+ /// Returns a mutable reference to the contents of this `String`.
+ ///
+ /// # Safety
+ ///
+ /// This function is unsafe because the returned `&mut Vec` allows writing
+ /// bytes which are not valid UTF-8. If this constraint is violated, using
+ /// the original `String` after dropping the `&mut Vec` may violate memory
+ /// safety, as the rest of the standard library assumes that `String`s are
+ /// valid UTF-8.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("hello");
+ ///
+ /// unsafe {
+ /// let vec = s.as_mut_vec();
+ /// assert_eq!(&[104, 101, 108, 108, 111][..], &vec[..]);
+ ///
+ /// vec.reverse();
+ /// }
+ /// assert_eq!(s, "olleh");
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn as_mut_vec(&mut self) -> &mut Vec<u8> {
+ &mut self.vec
+ }
+
+ /// Returns the length of this `String`, in bytes, not [`char`]s or
+ /// graphemes. In other words, it might not be what a human considers the
+ /// length of the string.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let a = String::from("foo");
+ /// assert_eq!(a.len(), 3);
+ ///
+ /// let fancy_f = String::from("ƒoo");
+ /// assert_eq!(fancy_f.len(), 4);
+ /// assert_eq!(fancy_f.chars().count(), 3);
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.vec.len()
+ }
+
+ /// Returns `true` if this `String` has a length of zero, and `false` otherwise.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut v = String::new();
+ /// assert!(v.is_empty());
+ ///
+ /// v.push('a');
+ /// assert!(!v.is_empty());
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Splits the string into two at the given byte index.
+ ///
+ /// Returns a newly allocated `String`. `self` contains bytes `[0, at)`, and
+ /// the returned `String` contains bytes `[at, len)`. `at` must be on the
+ /// boundary of a UTF-8 code point.
+ ///
+ /// Note that the capacity of `self` does not change.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at` is not on a `UTF-8` code point boundary, or if it is beyond the last
+ /// code point of the string.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # fn main() {
+ /// let mut hello = String::from("Hello, World!");
+ /// let world = hello.split_off(7);
+ /// assert_eq!(hello, "Hello, ");
+ /// assert_eq!(world, "World!");
+ /// # }
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "string_split_off", since = "1.16.0")]
+ #[must_use = "use `.truncate()` if you don't need the other half"]
+ pub fn split_off(&mut self, at: usize) -> String {
+ assert!(self.is_char_boundary(at));
+ let other = self.vec.split_off(at);
+ unsafe { String::from_utf8_unchecked(other) }
+ }
+
+ /// Truncates this `String`, removing all contents.
+ ///
+ /// While this means the `String` will have a length of zero, it does not
+ /// touch its capacity.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("foo");
+ ///
+ /// s.clear();
+ ///
+ /// assert!(s.is_empty());
+ /// assert_eq!(0, s.len());
+ /// assert_eq!(3, s.capacity());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ self.vec.clear()
+ }
+
+ /// Removes the specified range from the string in bulk, returning all
+ /// removed characters as an iterator.
+ ///
+ /// The returned iterator keeps a mutable borrow on the string to optimize
+ /// its implementation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point or end point do not lie on a [`char`]
+ /// boundary, or if they're out of bounds.
+ ///
+ /// # Leaking
+ ///
+ /// If the returned iterator goes out of scope without being dropped (due to
+ /// [`core::mem::forget`], for example), the string may still contain a copy
+ /// of any drained characters, or may have lost characters arbitrarily,
+ /// including characters outside the range.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("α is alpha, β is beta");
+ /// let beta_offset = s.find('β').unwrap_or(s.len());
+ ///
+ /// // Remove the range up until the β from the string
+ /// let t: String = s.drain(..beta_offset).collect();
+ /// assert_eq!(t, "α is alpha, ");
+ /// assert_eq!(s, "β is beta");
+ ///
+ /// // A full range clears the string, like `clear()` does
+ /// s.drain(..);
+ /// assert_eq!(s, "");
+ /// ```
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_>
+ where
+ R: RangeBounds<usize>,
+ {
+ // Memory safety
+ //
+ // The String version of Drain does not have the memory safety issues
+ // of the vector version. The data is just plain bytes.
+ // Because the range removal happens in Drop, if the Drain iterator is leaked,
+ // the removal will not happen.
+ let Range { start, end } = slice::range(range, ..self.len());
+ assert!(self.is_char_boundary(start));
+ assert!(self.is_char_boundary(end));
+
+ // Take out two simultaneous borrows. The &mut String won't be accessed
+ // until iteration is over, in Drop.
+ let self_ptr = self as *mut _;
+ // SAFETY: `slice::range` and `is_char_boundary` do the appropriate bounds checks.
+ let chars_iter = unsafe { self.get_unchecked(start..end) }.chars();
+
+ Drain { start, end, iter: chars_iter, string: self_ptr }
+ }
+
+ /// Removes the specified range in the string,
+ /// and replaces it with the given string.
+ /// The given string doesn't need to be the same length as the range.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point or end point do not lie on a [`char`]
+ /// boundary, or if they're out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let mut s = String::from("α is alpha, β is beta");
+ /// let beta_offset = s.find('β').unwrap_or(s.len());
+ ///
+ /// // Replace the range up until the β from the string
+ /// s.replace_range(..beta_offset, "Α is capital alpha; ");
+ /// assert_eq!(s, "Α is capital alpha; β is beta");
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "splice", since = "1.27.0")]
+ pub fn replace_range<R>(&mut self, range: R, replace_with: &str)
+ where
+ R: RangeBounds<usize>,
+ {
+ // Memory safety
+ //
+ // Replace_range does not have the memory safety issues of a vector Splice.
+ // of the vector version. The data is just plain bytes.
+
+ // WARNING: Inlining this variable would be unsound (#81138)
+ let start = range.start_bound();
+ match start {
+ Included(&n) => assert!(self.is_char_boundary(n)),
+ Excluded(&n) => assert!(self.is_char_boundary(n + 1)),
+ Unbounded => {}
+ };
+ // WARNING: Inlining this variable would be unsound (#81138)
+ let end = range.end_bound();
+ match end {
+ Included(&n) => assert!(self.is_char_boundary(n + 1)),
+ Excluded(&n) => assert!(self.is_char_boundary(n)),
+ Unbounded => {}
+ };
+
+ // Using `range` again would be unsound (#81138)
+ // We assume the bounds reported by `range` remain the same, but
+ // an adversarial implementation could change between calls
+ unsafe { self.as_mut_vec() }.splice((start, end), replace_with.bytes());
+ }
+
+ /// Converts this `String` into a <code>[Box]<[str]></code>.
+ ///
+ /// This will drop any excess capacity.
+ ///
+ /// [str]: prim@str "str"
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s = String::from("hello");
+ ///
+ /// let b = s.into_boxed_str();
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "box_str", since = "1.4.0")]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub fn into_boxed_str(self) -> Box<str> {
+ let slice = self.vec.into_boxed_slice();
+ unsafe { from_boxed_utf8_unchecked(slice) }
+ }
+}
+
+impl FromUtf8Error {
+ /// Returns a slice of [`u8`]s bytes that were attempted to convert to a `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some invalid bytes, in a vector
+ /// let bytes = vec![0, 159];
+ ///
+ /// let value = String::from_utf8(bytes);
+ ///
+ /// assert_eq!(&[0, 159], value.unwrap_err().as_bytes());
+ /// ```
+ #[must_use]
+ #[stable(feature = "from_utf8_error_as_bytes", since = "1.26.0")]
+ pub fn as_bytes(&self) -> &[u8] {
+ &self.bytes[..]
+ }
+
+ /// Returns the bytes that were attempted to convert to a `String`.
+ ///
+ /// This method is carefully constructed to avoid allocation. It will
+ /// consume the error, moving out the bytes, so that a copy of the bytes
+ /// does not need to be made.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some invalid bytes, in a vector
+ /// let bytes = vec![0, 159];
+ ///
+ /// let value = String::from_utf8(bytes);
+ ///
+ /// assert_eq!(vec![0, 159], value.unwrap_err().into_bytes());
+ /// ```
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_bytes(self) -> Vec<u8> {
+ self.bytes
+ }
+
+ /// Fetch a `Utf8Error` to get more details about the conversion failure.
+ ///
+ /// The [`Utf8Error`] type provided by [`std::str`] represents an error that may
+ /// occur when converting a slice of [`u8`]s to a [`&str`]. In this sense, it's
+ /// an analogue to `FromUtf8Error`. See its documentation for more details
+ /// on using it.
+ ///
+ /// [`std::str`]: core::str "std::str"
+ /// [`&str`]: prim@str "&str"
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// // some invalid bytes, in a vector
+ /// let bytes = vec![0, 159];
+ ///
+ /// let error = String::from_utf8(bytes).unwrap_err().utf8_error();
+ ///
+ /// // the first byte is invalid here
+ /// assert_eq!(1, error.valid_up_to());
+ /// ```
+ #[must_use]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn utf8_error(&self) -> Utf8Error {
+ self.error
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for FromUtf8Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&self.error, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for FromUtf16Error {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt("invalid utf-16: lone surrogate found", f)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Clone for String {
+ fn clone(&self) -> Self {
+ String { vec: self.vec.clone() }
+ }
+
+ fn clone_from(&mut self, source: &Self) {
+ self.vec.clone_from(&source.vec);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl FromIterator<char> for String {
+ fn from_iter<I: IntoIterator<Item = char>>(iter: I) -> String {
+ let mut buf = String::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_from_iter_by_ref", since = "1.17.0")]
+impl<'a> FromIterator<&'a char> for String {
+ fn from_iter<I: IntoIterator<Item = &'a char>>(iter: I) -> String {
+ let mut buf = String::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> FromIterator<&'a str> for String {
+ fn from_iter<I: IntoIterator<Item = &'a str>>(iter: I) -> String {
+ let mut buf = String::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "extend_string", since = "1.4.0")]
+impl FromIterator<String> for String {
+ fn from_iter<I: IntoIterator<Item = String>>(iter: I) -> String {
+ let mut iterator = iter.into_iter();
+
+ // Because we're iterating over `String`s, we can avoid at least
+ // one allocation by getting the first string from the iterator
+ // and appending to it all the subsequent strings.
+ match iterator.next() {
+ None => String::new(),
+ Some(mut buf) => {
+ buf.extend(iterator);
+ buf
+ }
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_str2", since = "1.45.0")]
+impl FromIterator<Box<str>> for String {
+ fn from_iter<I: IntoIterator<Item = Box<str>>>(iter: I) -> String {
+ let mut buf = String::new();
+ buf.extend(iter);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "herd_cows", since = "1.19.0")]
+impl<'a> FromIterator<Cow<'a, str>> for String {
+ fn from_iter<I: IntoIterator<Item = Cow<'a, str>>>(iter: I) -> String {
+ let mut iterator = iter.into_iter();
+
+ // Because we're iterating over CoWs, we can (potentially) avoid at least
+ // one allocation by getting the first item and appending to it all the
+ // subsequent items.
+ match iterator.next() {
+ None => String::new(),
+ Some(cow) => {
+ let mut buf = cow.into_owned();
+ buf.extend(iterator);
+ buf
+ }
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Extend<char> for String {
+ fn extend<I: IntoIterator<Item = char>>(&mut self, iter: I) {
+ let iterator = iter.into_iter();
+ let (lower_bound, _) = iterator.size_hint();
+ self.reserve(lower_bound);
+ iterator.for_each(move |c| self.push(c));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, c: char) {
+ self.push(c);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a> Extend<&'a char> for String {
+ fn extend<I: IntoIterator<Item = &'a char>>(&mut self, iter: I) {
+ self.extend(iter.into_iter().cloned());
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &c: &'a char) {
+ self.push(c);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> Extend<&'a str> for String {
+ fn extend<I: IntoIterator<Item = &'a str>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(s));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, s: &'a str) {
+ self.push_str(s);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_str2", since = "1.45.0")]
+impl Extend<Box<str>> for String {
+ fn extend<I: IntoIterator<Item = Box<str>>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(&s));
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "extend_string", since = "1.4.0")]
+impl Extend<String> for String {
+ fn extend<I: IntoIterator<Item = String>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(&s));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, s: String) {
+ self.push_str(&s);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "herd_cows", since = "1.19.0")]
+impl<'a> Extend<Cow<'a, str>> for String {
+ fn extend<I: IntoIterator<Item = Cow<'a, str>>>(&mut self, iter: I) {
+ iter.into_iter().for_each(move |s| self.push_str(&s));
+ }
+
+ #[inline]
+ fn extend_one(&mut self, s: Cow<'a, str>) {
+ self.push_str(&s);
+ }
+}
+
+/// A convenience impl that delegates to the impl for `&str`.
+///
+/// # Examples
+///
+/// ```
+/// assert_eq!(String::from("Hello world").find("world"), Some(6));
+/// ```
+#[unstable(
+ feature = "pattern",
+ reason = "API not fully fleshed out and ready to be stabilized",
+ issue = "27721"
+)]
+impl<'a, 'b> Pattern<'a> for &'b String {
+ type Searcher = <&'b str as Pattern<'a>>::Searcher;
+
+ fn into_searcher(self, haystack: &'a str) -> <&'b str as Pattern<'a>>::Searcher {
+ self[..].into_searcher(haystack)
+ }
+
+ #[inline]
+ fn is_contained_in(self, haystack: &'a str) -> bool {
+ self[..].is_contained_in(haystack)
+ }
+
+ #[inline]
+ fn is_prefix_of(self, haystack: &'a str) -> bool {
+ self[..].is_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_prefix_of(self, haystack: &'a str) -> Option<&'a str> {
+ self[..].strip_prefix_of(haystack)
+ }
+
+ #[inline]
+ fn is_suffix_of(self, haystack: &'a str) -> bool {
+ self[..].is_suffix_of(haystack)
+ }
+
+ #[inline]
+ fn strip_suffix_of(self, haystack: &'a str) -> Option<&'a str> {
+ self[..].strip_suffix_of(haystack)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl PartialEq for String {
+ #[inline]
+ fn eq(&self, other: &String) -> bool {
+ PartialEq::eq(&self[..], &other[..])
+ }
+ #[inline]
+ fn ne(&self, other: &String) -> bool {
+ PartialEq::ne(&self[..], &other[..])
+ }
+}
+
+macro_rules! impl_eq {
+ ($lhs:ty, $rhs: ty) => {
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow(unused_lifetimes)]
+ impl<'a, 'b> PartialEq<$rhs> for $lhs {
+ #[inline]
+ fn eq(&self, other: &$rhs) -> bool {
+ PartialEq::eq(&self[..], &other[..])
+ }
+ #[inline]
+ fn ne(&self, other: &$rhs) -> bool {
+ PartialEq::ne(&self[..], &other[..])
+ }
+ }
+
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[allow(unused_lifetimes)]
+ impl<'a, 'b> PartialEq<$lhs> for $rhs {
+ #[inline]
+ fn eq(&self, other: &$lhs) -> bool {
+ PartialEq::eq(&self[..], &other[..])
+ }
+ #[inline]
+ fn ne(&self, other: &$lhs) -> bool {
+ PartialEq::ne(&self[..], &other[..])
+ }
+ }
+ };
+}
+
+impl_eq! { String, str }
+impl_eq! { String, &'a str }
+#[cfg(not(no_global_oom_handling))]
+impl_eq! { Cow<'a, str>, str }
+#[cfg(not(no_global_oom_handling))]
+impl_eq! { Cow<'a, str>, &'b str }
+#[cfg(not(no_global_oom_handling))]
+impl_eq! { Cow<'a, str>, String }
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl const Default for String {
+ /// Creates an empty `String`.
+ #[inline]
+ fn default() -> String {
+ String::new()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Display for String {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Debug for String {
+ #[inline]
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl hash::Hash for String {
+ #[inline]
+ fn hash<H: hash::Hasher>(&self, hasher: &mut H) {
+ (**self).hash(hasher)
+ }
+}
+
+/// Implements the `+` operator for concatenating two strings.
+///
+/// This consumes the `String` on the left-hand side and re-uses its buffer (growing it if
+/// necessary). This is done to avoid allocating a new `String` and copying the entire contents on
+/// every operation, which would lead to *O*(*n*^2) running time when building an *n*-byte string by
+/// repeated concatenation.
+///
+/// The string on the right-hand side is only borrowed; its contents are copied into the returned
+/// `String`.
+///
+/// # Examples
+///
+/// Concatenating two `String`s takes the first by value and borrows the second:
+///
+/// ```
+/// let a = String::from("hello");
+/// let b = String::from(" world");
+/// let c = a + &b;
+/// // `a` is moved and can no longer be used here.
+/// ```
+///
+/// If you want to keep using the first `String`, you can clone it and append to the clone instead:
+///
+/// ```
+/// let a = String::from("hello");
+/// let b = String::from(" world");
+/// let c = a.clone() + &b;
+/// // `a` is still valid here.
+/// ```
+///
+/// Concatenating `&str` slices can be done by converting the first to a `String`:
+///
+/// ```
+/// let a = "hello";
+/// let b = " world";
+/// let c = a.to_string() + b;
+/// ```
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl Add<&str> for String {
+ type Output = String;
+
+ #[inline]
+ fn add(mut self, other: &str) -> String {
+ self.push_str(other);
+ self
+ }
+}
+
+/// Implements the `+=` operator for appending to a `String`.
+///
+/// This has the same behavior as the [`push_str`][String::push_str] method.
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "stringaddassign", since = "1.12.0")]
+impl AddAssign<&str> for String {
+ #[inline]
+ fn add_assign(&mut self, other: &str) {
+ self.push_str(other);
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::Range<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::Range<usize>) -> &str {
+ &self[..][index]
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::RangeTo<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::RangeTo<usize>) -> &str {
+ &self[..][index]
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::RangeFrom<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::RangeFrom<usize>) -> &str {
+ &self[..][index]
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Index<ops::RangeFull> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, _index: ops::RangeFull) -> &str {
+ unsafe { str::from_utf8_unchecked(&self.vec) }
+ }
+}
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl ops::Index<ops::RangeInclusive<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::RangeInclusive<usize>) -> &str {
+ Index::index(&**self, index)
+ }
+}
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl ops::Index<ops::RangeToInclusive<usize>> for String {
+ type Output = str;
+
+ #[inline]
+ fn index(&self, index: ops::RangeToInclusive<usize>) -> &str {
+ Index::index(&**self, index)
+ }
+}
+
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::IndexMut<ops::Range<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::Range<usize>) -> &mut str {
+ &mut self[..][index]
+ }
+}
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::IndexMut<ops::RangeTo<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeTo<usize>) -> &mut str {
+ &mut self[..][index]
+ }
+}
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::IndexMut<ops::RangeFrom<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeFrom<usize>) -> &mut str {
+ &mut self[..][index]
+ }
+}
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::IndexMut<ops::RangeFull> for String {
+ #[inline]
+ fn index_mut(&mut self, _index: ops::RangeFull) -> &mut str {
+ unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) }
+ }
+}
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl ops::IndexMut<ops::RangeInclusive<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeInclusive<usize>) -> &mut str {
+ IndexMut::index_mut(&mut **self, index)
+ }
+}
+#[stable(feature = "inclusive_range", since = "1.26.0")]
+impl ops::IndexMut<ops::RangeToInclusive<usize>> for String {
+ #[inline]
+ fn index_mut(&mut self, index: ops::RangeToInclusive<usize>) -> &mut str {
+ IndexMut::index_mut(&mut **self, index)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl ops::Deref for String {
+ type Target = str;
+
+ #[inline]
+ fn deref(&self) -> &str {
+ unsafe { str::from_utf8_unchecked(&self.vec) }
+ }
+}
+
+#[stable(feature = "derefmut_for_string", since = "1.3.0")]
+impl ops::DerefMut for String {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut str {
+ unsafe { str::from_utf8_unchecked_mut(&mut *self.vec) }
+ }
+}
+
+/// A type alias for [`Infallible`].
+///
+/// This alias exists for backwards compatibility, and may be eventually deprecated.
+///
+/// [`Infallible`]: core::convert::Infallible "convert::Infallible"
+#[stable(feature = "str_parse_error", since = "1.5.0")]
+pub type ParseError = core::convert::Infallible;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl FromStr for String {
+ type Err = core::convert::Infallible;
+ #[inline]
+ fn from_str(s: &str) -> Result<String, Self::Err> {
+ Ok(String::from(s))
+ }
+}
+
+/// A trait for converting a value to a `String`.
+///
+/// This trait is automatically implemented for any type which implements the
+/// [`Display`] trait. As such, `ToString` shouldn't be implemented directly:
+/// [`Display`] should be implemented instead, and you get the `ToString`
+/// implementation for free.
+///
+/// [`Display`]: fmt::Display
+#[cfg_attr(not(test), rustc_diagnostic_item = "ToString")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub trait ToString {
+ /// Converts the given value to a `String`.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let i = 5;
+ /// let five = String::from("5");
+ ///
+ /// assert_eq!(five, i.to_string());
+ /// ```
+ #[rustc_conversion_suggestion]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ fn to_string(&self) -> String;
+}
+
+/// # Panics
+///
+/// In this implementation, the `to_string` method panics
+/// if the `Display` implementation returns an error.
+/// This indicates an incorrect `Display` implementation
+/// since `fmt::Write for String` never returns an error itself.
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Display + ?Sized> ToString for T {
+ // A common guideline is to not inline generic functions. However,
+ // removing `#[inline]` from this method causes non-negligible regressions.
+ // See <https://github.com/rust-lang/rust/pull/74852>, the last attempt
+ // to try to remove it.
+ #[inline]
+ default fn to_string(&self) -> String {
+ let mut buf = String::new();
+ let mut formatter = core::fmt::Formatter::new(&mut buf);
+ // Bypass format_args!() to avoid write_str with zero-length strs
+ fmt::Display::fmt(self, &mut formatter)
+ .expect("a Display implementation returned an error unexpectedly");
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "char_to_string_specialization", since = "1.46.0")]
+impl ToString for char {
+ #[inline]
+ fn to_string(&self) -> String {
+ String::from(self.encode_utf8(&mut [0; 4]))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "u8_to_string_specialization", since = "1.54.0")]
+impl ToString for u8 {
+ #[inline]
+ fn to_string(&self) -> String {
+ let mut buf = String::with_capacity(3);
+ let mut n = *self;
+ if n >= 10 {
+ if n >= 100 {
+ buf.push((b'0' + n / 100) as char);
+ n %= 100;
+ }
+ buf.push((b'0' + n / 10) as char);
+ n %= 10;
+ }
+ buf.push((b'0' + n) as char);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "i8_to_string_specialization", since = "1.54.0")]
+impl ToString for i8 {
+ #[inline]
+ fn to_string(&self) -> String {
+ let mut buf = String::with_capacity(4);
+ if self.is_negative() {
+ buf.push('-');
+ }
+ let mut n = self.unsigned_abs();
+ if n >= 10 {
+ if n >= 100 {
+ buf.push('1');
+ n -= 100;
+ }
+ buf.push((b'0' + n / 10) as char);
+ n %= 10;
+ }
+ buf.push((b'0' + n) as char);
+ buf
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "str_to_string_specialization", since = "1.9.0")]
+impl ToString for str {
+ #[inline]
+ fn to_string(&self) -> String {
+ String::from(self)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_str_to_string_specialization", since = "1.17.0")]
+impl ToString for Cow<'_, str> {
+ #[inline]
+ fn to_string(&self) -> String {
+ self[..].to_owned()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_to_string_specialization", since = "1.17.0")]
+impl ToString for String {
+ #[inline]
+ fn to_string(&self) -> String {
+ self.to_owned()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<str> for String {
+ #[inline]
+ fn as_ref(&self) -> &str {
+ self
+ }
+}
+
+#[stable(feature = "string_as_mut", since = "1.43.0")]
+impl AsMut<str> for String {
+ #[inline]
+ fn as_mut(&mut self) -> &mut str {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl AsRef<[u8]> for String {
+ #[inline]
+ fn as_ref(&self) -> &[u8] {
+ self.as_bytes()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<&str> for String {
+ /// Converts a `&str` into a [`String`].
+ ///
+ /// The result is allocated on the heap.
+ #[inline]
+ fn from(s: &str) -> String {
+ s.to_owned()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_mut_str_for_string", since = "1.44.0")]
+impl From<&mut str> for String {
+ /// Converts a `&mut str` into a [`String`].
+ ///
+ /// The result is allocated on the heap.
+ #[inline]
+ fn from(s: &mut str) -> String {
+ s.to_owned()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_ref_string", since = "1.35.0")]
+impl From<&String> for String {
+ /// Converts a `&String` into a [`String`].
+ ///
+ /// This clones `s` and returns the clone.
+ #[inline]
+ fn from(s: &String) -> String {
+ s.clone()
+ }
+}
+
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(test))]
+#[stable(feature = "string_from_box", since = "1.18.0")]
+impl From<Box<str>> for String {
+ /// Converts the given boxed `str` slice to a [`String`].
+ /// It is notable that the `str` slice is owned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s1: String = String::from("hello world");
+ /// let s2: Box<str> = s1.into_boxed_str();
+ /// let s3: String = String::from(s2);
+ ///
+ /// assert_eq!("hello world", s3)
+ /// ```
+ fn from(s: Box<str>) -> String {
+ s.into_string()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "box_from_str", since = "1.20.0")]
+impl From<String> for Box<str> {
+ /// Converts the given [`String`] to a boxed `str` slice that is owned.
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s1: String = String::from("hello world");
+ /// let s2: Box<str> = Box::from(s1);
+ /// let s3: String = String::from(s2);
+ ///
+ /// assert_eq!("hello world", s3)
+ /// ```
+ fn from(s: String) -> Box<str> {
+ s.into_boxed_str()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "string_from_cow_str", since = "1.14.0")]
+impl<'a> From<Cow<'a, str>> for String {
+ /// Converts a clone-on-write string to an owned
+ /// instance of [`String`].
+ ///
+ /// This extracts the owned string,
+ /// clones the string if it is not already owned.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// // If the string is not owned...
+ /// let cow: Cow<str> = Cow::Borrowed("eggplant");
+ /// // It will allocate on the heap and copy the string.
+ /// let owned: String = String::from(cow);
+ /// assert_eq!(&owned[..], "eggplant");
+ /// ```
+ fn from(s: Cow<'a, str>) -> String {
+ s.into_owned()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> From<&'a str> for Cow<'a, str> {
+ /// Converts a string slice into a [`Borrowed`] variant.
+ /// No heap allocation is performed, and the string
+ /// is not copied.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// assert_eq!(Cow::from("eggplant"), Cow::Borrowed("eggplant"));
+ /// ```
+ ///
+ /// [`Borrowed`]: crate::borrow::Cow::Borrowed "borrow::Cow::Borrowed"
+ #[inline]
+ fn from(s: &'a str) -> Cow<'a, str> {
+ Cow::Borrowed(s)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a> From<String> for Cow<'a, str> {
+ /// Converts a [`String`] into an [`Owned`] variant.
+ /// No heap allocation is performed, and the string
+ /// is not copied.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// let s = "eggplant".to_string();
+ /// let s2 = "eggplant".to_string();
+ /// assert_eq!(Cow::from(s), Cow::<'static, str>::Owned(s2));
+ /// ```
+ ///
+ /// [`Owned`]: crate::borrow::Cow::Owned "borrow::Cow::Owned"
+ #[inline]
+ fn from(s: String) -> Cow<'a, str> {
+ Cow::Owned(s)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_from_string_ref", since = "1.28.0")]
+impl<'a> From<&'a String> for Cow<'a, str> {
+ /// Converts a [`String`] reference into a [`Borrowed`] variant.
+ /// No heap allocation is performed, and the string
+ /// is not copied.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// let s = "eggplant".to_string();
+ /// assert_eq!(Cow::from(&s), Cow::Borrowed("eggplant"));
+ /// ```
+ ///
+ /// [`Borrowed`]: crate::borrow::Cow::Borrowed "borrow::Cow::Borrowed"
+ #[inline]
+ fn from(s: &'a String) -> Cow<'a, str> {
+ Cow::Borrowed(s.as_str())
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
+impl<'a> FromIterator<char> for Cow<'a, str> {
+ fn from_iter<I: IntoIterator<Item = char>>(it: I) -> Cow<'a, str> {
+ Cow::Owned(FromIterator::from_iter(it))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
+impl<'a, 'b> FromIterator<&'b str> for Cow<'a, str> {
+ fn from_iter<I: IntoIterator<Item = &'b str>>(it: I) -> Cow<'a, str> {
+ Cow::Owned(FromIterator::from_iter(it))
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "cow_str_from_iter", since = "1.12.0")]
+impl<'a> FromIterator<String> for Cow<'a, str> {
+ fn from_iter<I: IntoIterator<Item = String>>(it: I) -> Cow<'a, str> {
+ Cow::Owned(FromIterator::from_iter(it))
+ }
+}
+
+#[stable(feature = "from_string_for_vec_u8", since = "1.14.0")]
+impl From<String> for Vec<u8> {
+ /// Converts the given [`String`] to a vector [`Vec`] that holds values of type [`u8`].
+ ///
+ /// # Examples
+ ///
+ /// Basic usage:
+ ///
+ /// ```
+ /// let s1 = String::from("hello world");
+ /// let v1 = Vec::from(s1);
+ ///
+ /// for b in v1 {
+ /// println!("{b}");
+ /// }
+ /// ```
+ fn from(string: String) -> Vec<u8> {
+ string.into_bytes()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl fmt::Write for String {
+ #[inline]
+ fn write_str(&mut self, s: &str) -> fmt::Result {
+ self.push_str(s);
+ Ok(())
+ }
+
+ #[inline]
+ fn write_char(&mut self, c: char) -> fmt::Result {
+ self.push(c);
+ Ok(())
+ }
+}
+
+/// A draining iterator for `String`.
+///
+/// This struct is created by the [`drain`] method on [`String`]. See its
+/// documentation for more.
+///
+/// [`drain`]: String::drain
+#[stable(feature = "drain", since = "1.6.0")]
+pub struct Drain<'a> {
+ /// Will be used as &'a mut String in the destructor
+ string: *mut String,
+ /// Start of part to remove
+ start: usize,
+ /// End of part to remove
+ end: usize,
+ /// Current remaining range to remove
+ iter: Chars<'a>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl fmt::Debug for Drain<'_> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Drain").field(&self.as_str()).finish()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl Sync for Drain<'_> {}
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl Send for Drain<'_> {}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl Drop for Drain<'_> {
+ fn drop(&mut self) {
+ unsafe {
+ // Use Vec::drain. "Reaffirm" the bounds checks to avoid
+ // panic code being inserted again.
+ let self_vec = (*self.string).as_mut_vec();
+ if self.start <= self.end && self.end <= self_vec.len() {
+ self_vec.drain(self.start..self.end);
+ }
+ }
+ }
+}
+
+impl<'a> Drain<'a> {
+ /// Returns the remaining (sub)string of this iterator as a slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut s = String::from("abc");
+ /// let mut drain = s.drain(..);
+ /// assert_eq!(drain.as_str(), "abc");
+ /// let _ = drain.next().unwrap();
+ /// assert_eq!(drain.as_str(), "bc");
+ /// ```
+ #[must_use]
+ #[stable(feature = "string_drain_as_str", since = "1.55.0")]
+ pub fn as_str(&self) -> &str {
+ self.iter.as_str()
+ }
+}
+
+#[stable(feature = "string_drain_as_str", since = "1.55.0")]
+impl<'a> AsRef<str> for Drain<'a> {
+ fn as_ref(&self) -> &str {
+ self.as_str()
+ }
+}
+
+#[stable(feature = "string_drain_as_str", since = "1.55.0")]
+impl<'a> AsRef<[u8]> for Drain<'a> {
+ fn as_ref(&self) -> &[u8] {
+ self.as_str().as_bytes()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl Iterator for Drain<'_> {
+ type Item = char;
+
+ #[inline]
+ fn next(&mut self) -> Option<char> {
+ self.iter.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+
+ #[inline]
+ fn last(mut self) -> Option<char> {
+ self.next_back()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl DoubleEndedIterator for Drain<'_> {
+ #[inline]
+ fn next_back(&mut self) -> Option<char> {
+ self.iter.next_back()
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl FusedIterator for Drain<'_> {}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_char_for_string", since = "1.46.0")]
+impl From<char> for String {
+ /// Allocates an owned [`String`] from a single character.
+ ///
+ /// # Example
+ /// ```rust
+ /// let c: char = 'a';
+ /// let s: String = String::from(c);
+ /// assert_eq!("a", &s[..]);
+ /// ```
+ #[inline]
+ fn from(c: char) -> Self {
+ c.to_string()
+ }
+}
diff --git a/library/alloc/src/sync.rs b/library/alloc/src/sync.rs
new file mode 100644
index 000000000..4c03cc3ed
--- /dev/null
+++ b/library/alloc/src/sync.rs
@@ -0,0 +1,2765 @@
+#![stable(feature = "rust1", since = "1.0.0")]
+
+//! Thread-safe reference-counting pointers.
+//!
+//! See the [`Arc<T>`][Arc] documentation for more details.
+
+use core::any::Any;
+use core::borrow;
+use core::cmp::Ordering;
+use core::convert::{From, TryFrom};
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::hint;
+use core::intrinsics::abort;
+#[cfg(not(no_global_oom_handling))]
+use core::iter;
+use core::marker::{PhantomData, Unpin, Unsize};
+#[cfg(not(no_global_oom_handling))]
+use core::mem::size_of_val;
+use core::mem::{self, align_of_val_raw};
+use core::ops::{CoerceUnsized, Deref, DispatchFromDyn, Receiver};
+use core::panic::{RefUnwindSafe, UnwindSafe};
+use core::pin::Pin;
+use core::ptr::{self, NonNull};
+#[cfg(not(no_global_oom_handling))]
+use core::slice::from_raw_parts_mut;
+use core::sync::atomic;
+use core::sync::atomic::Ordering::{Acquire, Relaxed, Release};
+
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::handle_alloc_error;
+#[cfg(not(no_global_oom_handling))]
+use crate::alloc::{box_free, WriteCloneIntoRaw};
+use crate::alloc::{AllocError, Allocator, Global, Layout};
+use crate::borrow::{Cow, ToOwned};
+use crate::boxed::Box;
+use crate::rc::is_dangling;
+#[cfg(not(no_global_oom_handling))]
+use crate::string::String;
+#[cfg(not(no_global_oom_handling))]
+use crate::vec::Vec;
+
+#[cfg(test)]
+mod tests;
+
+/// A soft limit on the amount of references that may be made to an `Arc`.
+///
+/// Going above this limit will abort your program (although not
+/// necessarily) at _exactly_ `MAX_REFCOUNT + 1` references.
+const MAX_REFCOUNT: usize = (isize::MAX) as usize;
+
+#[cfg(not(sanitize = "thread"))]
+macro_rules! acquire {
+ ($x:expr) => {
+ atomic::fence(Acquire)
+ };
+}
+
+// ThreadSanitizer does not support memory fences. To avoid false positive
+// reports in Arc / Weak implementation use atomic loads for synchronization
+// instead.
+#[cfg(sanitize = "thread")]
+macro_rules! acquire {
+ ($x:expr) => {
+ $x.load(Acquire)
+ };
+}
+
+/// A thread-safe reference-counting pointer. 'Arc' stands for 'Atomically
+/// Reference Counted'.
+///
+/// The type `Arc<T>` provides shared ownership of a value of type `T`,
+/// allocated in the heap. Invoking [`clone`][clone] on `Arc` produces
+/// a new `Arc` instance, which points to the same allocation on the heap as the
+/// source `Arc`, while increasing a reference count. When the last `Arc`
+/// pointer to a given allocation is destroyed, the value stored in that allocation (often
+/// referred to as "inner value") is also dropped.
+///
+/// Shared references in Rust disallow mutation by default, and `Arc` is no
+/// exception: you cannot generally obtain a mutable reference to something
+/// inside an `Arc`. If you need to mutate through an `Arc`, use
+/// [`Mutex`][mutex], [`RwLock`][rwlock], or one of the [`Atomic`][atomic]
+/// types.
+///
+/// ## Thread Safety
+///
+/// Unlike [`Rc<T>`], `Arc<T>` uses atomic operations for its reference
+/// counting. This means that it is thread-safe. The disadvantage is that
+/// atomic operations are more expensive than ordinary memory accesses. If you
+/// are not sharing reference-counted allocations between threads, consider using
+/// [`Rc<T>`] for lower overhead. [`Rc<T>`] is a safe default, because the
+/// compiler will catch any attempt to send an [`Rc<T>`] between threads.
+/// However, a library might choose `Arc<T>` in order to give library consumers
+/// more flexibility.
+///
+/// `Arc<T>` will implement [`Send`] and [`Sync`] as long as the `T` implements
+/// [`Send`] and [`Sync`]. Why can't you put a non-thread-safe type `T` in an
+/// `Arc<T>` to make it thread-safe? This may be a bit counter-intuitive at
+/// first: after all, isn't the point of `Arc<T>` thread safety? The key is
+/// this: `Arc<T>` makes it thread safe to have multiple ownership of the same
+/// data, but it doesn't add thread safety to its data. Consider
+/// <code>Arc<[RefCell\<T>]></code>. [`RefCell<T>`] isn't [`Sync`], and if `Arc<T>` was always
+/// [`Send`], <code>Arc<[RefCell\<T>]></code> would be as well. But then we'd have a problem:
+/// [`RefCell<T>`] is not thread safe; it keeps track of the borrowing count using
+/// non-atomic operations.
+///
+/// In the end, this means that you may need to pair `Arc<T>` with some sort of
+/// [`std::sync`] type, usually [`Mutex<T>`][mutex].
+///
+/// ## Breaking cycles with `Weak`
+///
+/// The [`downgrade`][downgrade] method can be used to create a non-owning
+/// [`Weak`] pointer. A [`Weak`] pointer can be [`upgrade`][upgrade]d
+/// to an `Arc`, but this will return [`None`] if the value stored in the allocation has
+/// already been dropped. In other words, `Weak` pointers do not keep the value
+/// inside the allocation alive; however, they *do* keep the allocation
+/// (the backing store for the value) alive.
+///
+/// A cycle between `Arc` pointers will never be deallocated. For this reason,
+/// [`Weak`] is used to break cycles. For example, a tree could have
+/// strong `Arc` pointers from parent nodes to children, and [`Weak`]
+/// pointers from children back to their parents.
+///
+/// # Cloning references
+///
+/// Creating a new reference from an existing reference-counted pointer is done using the
+/// `Clone` trait implemented for [`Arc<T>`][Arc] and [`Weak<T>`][Weak].
+///
+/// ```
+/// use std::sync::Arc;
+/// let foo = Arc::new(vec![1.0, 2.0, 3.0]);
+/// // The two syntaxes below are equivalent.
+/// let a = foo.clone();
+/// let b = Arc::clone(&foo);
+/// // a, b, and foo are all Arcs that point to the same memory location
+/// ```
+///
+/// ## `Deref` behavior
+///
+/// `Arc<T>` automatically dereferences to `T` (via the [`Deref`][deref] trait),
+/// so you can call `T`'s methods on a value of type `Arc<T>`. To avoid name
+/// clashes with `T`'s methods, the methods of `Arc<T>` itself are associated
+/// functions, called using [fully qualified syntax]:
+///
+/// ```
+/// use std::sync::Arc;
+///
+/// let my_arc = Arc::new(());
+/// let my_weak = Arc::downgrade(&my_arc);
+/// ```
+///
+/// `Arc<T>`'s implementations of traits like `Clone` may also be called using
+/// fully qualified syntax. Some people prefer to use fully qualified syntax,
+/// while others prefer using method-call syntax.
+///
+/// ```
+/// use std::sync::Arc;
+///
+/// let arc = Arc::new(());
+/// // Method-call syntax
+/// let arc2 = arc.clone();
+/// // Fully qualified syntax
+/// let arc3 = Arc::clone(&arc);
+/// ```
+///
+/// [`Weak<T>`][Weak] does not auto-dereference to `T`, because the inner value may have
+/// already been dropped.
+///
+/// [`Rc<T>`]: crate::rc::Rc
+/// [clone]: Clone::clone
+/// [mutex]: ../../std/sync/struct.Mutex.html
+/// [rwlock]: ../../std/sync/struct.RwLock.html
+/// [atomic]: core::sync::atomic
+/// [`Send`]: core::marker::Send
+/// [`Sync`]: core::marker::Sync
+/// [deref]: core::ops::Deref
+/// [downgrade]: Arc::downgrade
+/// [upgrade]: Weak::upgrade
+/// [RefCell\<T>]: core::cell::RefCell
+/// [`RefCell<T>`]: core::cell::RefCell
+/// [`std::sync`]: ../../std/sync/index.html
+/// [`Arc::clone(&from)`]: Arc::clone
+/// [fully qualified syntax]: https://doc.rust-lang.org/book/ch19-03-advanced-traits.html#fully-qualified-syntax-for-disambiguation-calling-methods-with-the-same-name
+///
+/// # Examples
+///
+/// Sharing some immutable data between threads:
+///
+// Note that we **do not** run these tests here. The windows builders get super
+// unhappy if a thread outlives the main thread and then exits at the same time
+// (something deadlocks) so we just avoid this entirely by not running these
+// tests.
+/// ```no_run
+/// use std::sync::Arc;
+/// use std::thread;
+///
+/// let five = Arc::new(5);
+///
+/// for _ in 0..10 {
+/// let five = Arc::clone(&five);
+///
+/// thread::spawn(move || {
+/// println!("{five:?}");
+/// });
+/// }
+/// ```
+///
+/// Sharing a mutable [`AtomicUsize`]:
+///
+/// [`AtomicUsize`]: core::sync::atomic::AtomicUsize "sync::atomic::AtomicUsize"
+///
+/// ```no_run
+/// use std::sync::Arc;
+/// use std::sync::atomic::{AtomicUsize, Ordering};
+/// use std::thread;
+///
+/// let val = Arc::new(AtomicUsize::new(5));
+///
+/// for _ in 0..10 {
+/// let val = Arc::clone(&val);
+///
+/// thread::spawn(move || {
+/// let v = val.fetch_add(1, Ordering::SeqCst);
+/// println!("{v:?}");
+/// });
+/// }
+/// ```
+///
+/// See the [`rc` documentation][rc_examples] for more examples of reference
+/// counting in general.
+///
+/// [rc_examples]: crate::rc#examples
+#[cfg_attr(not(test), rustc_diagnostic_item = "Arc")]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub struct Arc<T: ?Sized> {
+ ptr: NonNull<ArcInner<T>>,
+ phantom: PhantomData<ArcInner<T>>,
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: ?Sized + Sync + Send> Send for Arc<T> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: ?Sized + Sync + Send> Sync for Arc<T> {}
+
+#[stable(feature = "catch_unwind", since = "1.9.0")]
+impl<T: RefUnwindSafe + ?Sized> UnwindSafe for Arc<T> {}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Arc<U>> for Arc<T> {}
+
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Arc<U>> for Arc<T> {}
+
+impl<T: ?Sized> Arc<T> {
+ unsafe fn from_inner(ptr: NonNull<ArcInner<T>>) -> Self {
+ Self { ptr, phantom: PhantomData }
+ }
+
+ unsafe fn from_ptr(ptr: *mut ArcInner<T>) -> Self {
+ unsafe { Self::from_inner(NonNull::new_unchecked(ptr)) }
+ }
+}
+
+/// `Weak` is a version of [`Arc`] that holds a non-owning reference to the
+/// managed allocation. The allocation is accessed by calling [`upgrade`] on the `Weak`
+/// pointer, which returns an <code>[Option]<[Arc]\<T>></code>.
+///
+/// Since a `Weak` reference does not count towards ownership, it will not
+/// prevent the value stored in the allocation from being dropped, and `Weak` itself makes no
+/// guarantees about the value still being present. Thus it may return [`None`]
+/// when [`upgrade`]d. Note however that a `Weak` reference *does* prevent the allocation
+/// itself (the backing store) from being deallocated.
+///
+/// A `Weak` pointer is useful for keeping a temporary reference to the allocation
+/// managed by [`Arc`] without preventing its inner value from being dropped. It is also used to
+/// prevent circular references between [`Arc`] pointers, since mutual owning references
+/// would never allow either [`Arc`] to be dropped. For example, a tree could
+/// have strong [`Arc`] pointers from parent nodes to children, and `Weak`
+/// pointers from children back to their parents.
+///
+/// The typical way to obtain a `Weak` pointer is to call [`Arc::downgrade`].
+///
+/// [`upgrade`]: Weak::upgrade
+#[stable(feature = "arc_weak", since = "1.4.0")]
+pub struct Weak<T: ?Sized> {
+ // This is a `NonNull` to allow optimizing the size of this type in enums,
+ // but it is not necessarily a valid pointer.
+ // `Weak::new` sets this to `usize::MAX` so that it doesn’t need
+ // to allocate space on the heap. That's not a value a real pointer
+ // will ever have because RcBox has alignment at least 2.
+ // This is only possible when `T: Sized`; unsized `T` never dangle.
+ ptr: NonNull<ArcInner<T>>,
+}
+
+#[stable(feature = "arc_weak", since = "1.4.0")]
+unsafe impl<T: ?Sized + Sync + Send> Send for Weak<T> {}
+#[stable(feature = "arc_weak", since = "1.4.0")]
+unsafe impl<T: ?Sized + Sync + Send> Sync for Weak<T> {}
+
+#[unstable(feature = "coerce_unsized", issue = "27732")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> CoerceUnsized<Weak<U>> for Weak<T> {}
+#[unstable(feature = "dispatch_from_dyn", issue = "none")]
+impl<T: ?Sized + Unsize<U>, U: ?Sized> DispatchFromDyn<Weak<U>> for Weak<T> {}
+
+#[stable(feature = "arc_weak", since = "1.4.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Weak<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ write!(f, "(Weak)")
+ }
+}
+
+// This is repr(C) to future-proof against possible field-reordering, which
+// would interfere with otherwise safe [into|from]_raw() of transmutable
+// inner types.
+#[repr(C)]
+struct ArcInner<T: ?Sized> {
+ strong: atomic::AtomicUsize,
+
+ // the value usize::MAX acts as a sentinel for temporarily "locking" the
+ // ability to upgrade weak pointers or downgrade strong ones; this is used
+ // to avoid races in `make_mut` and `get_mut`.
+ weak: atomic::AtomicUsize,
+
+ data: T,
+}
+
+unsafe impl<T: ?Sized + Sync + Send> Send for ArcInner<T> {}
+unsafe impl<T: ?Sized + Sync + Send> Sync for ArcInner<T> {}
+
+impl<T> Arc<T> {
+ /// Constructs a new `Arc<T>`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn new(data: T) -> Arc<T> {
+ // Start the weak pointer count as 1 which is the weak pointer that's
+ // held by all the strong pointers (kinda), see std/rc.rs for more info
+ let x: Box<_> = Box::new(ArcInner {
+ strong: atomic::AtomicUsize::new(1),
+ weak: atomic::AtomicUsize::new(1),
+ data,
+ });
+ unsafe { Self::from_inner(Box::leak(x).into()) }
+ }
+
+ /// Constructs a new `Arc<T>` while giving you a `Weak<T>` to the allocation,
+ /// to allow you to construct a `T` which holds a weak pointer to itself.
+ ///
+ /// Generally, a structure circularly referencing itself, either directly or
+ /// indirectly, should not hold a strong reference to itself to prevent a memory leak.
+ /// Using this function, you get access to the weak pointer during the
+ /// initialization of `T`, before the `Arc<T>` is created, such that you can
+ /// clone and store it inside the `T`.
+ ///
+ /// `new_cyclic` first allocates the managed allocation for the `Arc<T>`,
+ /// then calls your closure, giving it a `Weak<T>` to this allocation,
+ /// and only afterwards completes the construction of the `Arc<T>` by placing
+ /// the `T` returned from your closure into the allocation.
+ ///
+ /// Since the new `Arc<T>` is not fully-constructed until `Arc<T>::new_cyclic`
+ /// returns, calling [`upgrade`] on the weak reference inside your closure will
+ /// fail and result in a `None` value.
+ ///
+ /// # Panics
+ ///
+ /// If `data_fn` panics, the panic is propagated to the caller, and the
+ /// temporary [`Weak<T>`] is dropped normally.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # #![allow(dead_code)]
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// struct Gadget {
+ /// me: Weak<Gadget>,
+ /// }
+ ///
+ /// impl Gadget {
+ /// /// Construct a reference counted Gadget.
+ /// fn new() -> Arc<Self> {
+ /// // `me` is a `Weak<Gadget>` pointing at the new allocation of the
+ /// // `Arc` we're constructing.
+ /// Arc::new_cyclic(|me| {
+ /// // Create the actual struct here.
+ /// Gadget { me: me.clone() }
+ /// })
+ /// }
+ ///
+ /// /// Return a reference counted pointer to Self.
+ /// fn me(&self) -> Arc<Self> {
+ /// self.me.upgrade().unwrap()
+ /// }
+ /// }
+ /// ```
+ /// [`upgrade`]: Weak::upgrade
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "arc_new_cyclic", since = "1.60.0")]
+ pub fn new_cyclic<F>(data_fn: F) -> Arc<T>
+ where
+ F: FnOnce(&Weak<T>) -> T,
+ {
+ // Construct the inner in the "uninitialized" state with a single
+ // weak reference.
+ let uninit_ptr: NonNull<_> = Box::leak(Box::new(ArcInner {
+ strong: atomic::AtomicUsize::new(0),
+ weak: atomic::AtomicUsize::new(1),
+ data: mem::MaybeUninit::<T>::uninit(),
+ }))
+ .into();
+ let init_ptr: NonNull<ArcInner<T>> = uninit_ptr.cast();
+
+ let weak = Weak { ptr: init_ptr };
+
+ // It's important we don't give up ownership of the weak pointer, or
+ // else the memory might be freed by the time `data_fn` returns. If
+ // we really wanted to pass ownership, we could create an additional
+ // weak pointer for ourselves, but this would result in additional
+ // updates to the weak reference count which might not be necessary
+ // otherwise.
+ let data = data_fn(&weak);
+
+ // Now we can properly initialize the inner value and turn our weak
+ // reference into a strong reference.
+ let strong = unsafe {
+ let inner = init_ptr.as_ptr();
+ ptr::write(ptr::addr_of_mut!((*inner).data), data);
+
+ // The above write to the data field must be visible to any threads which
+ // observe a non-zero strong count. Therefore we need at least "Release" ordering
+ // in order to synchronize with the `compare_exchange_weak` in `Weak::upgrade`.
+ //
+ // "Acquire" ordering is not required. When considering the possible behaviours
+ // of `data_fn` we only need to look at what it could do with a reference to a
+ // non-upgradeable `Weak`:
+ // - It can *clone* the `Weak`, increasing the weak reference count.
+ // - It can drop those clones, decreasing the weak reference count (but never to zero).
+ //
+ // These side effects do not impact us in any way, and no other side effects are
+ // possible with safe code alone.
+ let prev_value = (*inner).strong.fetch_add(1, Release);
+ debug_assert_eq!(prev_value, 0, "No prior strong references should exist");
+
+ Arc::from_inner(init_ptr)
+ };
+
+ // Strong references should collectively own a shared weak reference,
+ // so don't run the destructor for our old weak reference.
+ mem::forget(weak);
+ strong
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut five = Arc::<u32>::new_uninit();
+ ///
+ /// // Deferred initialization:
+ /// Arc::get_mut(&mut five).unwrap().write(5);
+ ///
+ /// let five = unsafe { five.assume_init() };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_uninit() -> Arc<mem::MaybeUninit<T>> {
+ unsafe {
+ Arc::from_ptr(Arc::allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate(layout),
+ |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ ))
+ }
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let zero = Arc::<u32>::new_zeroed();
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0)
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_zeroed() -> Arc<mem::MaybeUninit<T>> {
+ unsafe {
+ Arc::from_ptr(Arc::allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ ))
+ }
+ }
+
+ /// Constructs a new `Pin<Arc<T>>`. If `T` does not implement `Unpin`, then
+ /// `data` will be pinned in memory and unable to be moved.
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "pin", since = "1.33.0")]
+ #[must_use]
+ pub fn pin(data: T) -> Pin<Arc<T>> {
+ unsafe { Pin::new_unchecked(Arc::new(data)) }
+ }
+
+ /// Constructs a new `Pin<Arc<T>>`, return an error if allocation fails.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_pin(data: T) -> Result<Pin<Arc<T>>, AllocError> {
+ unsafe { Ok(Pin::new_unchecked(Arc::try_new(data)?)) }
+ }
+
+ /// Constructs a new `Arc<T>`, returning an error if allocation fails.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::try_new(5)?;
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn try_new(data: T) -> Result<Arc<T>, AllocError> {
+ // Start the weak pointer count as 1 which is the weak pointer that's
+ // held by all the strong pointers (kinda), see std/rc.rs for more info
+ let x: Box<_> = Box::try_new(ArcInner {
+ strong: atomic::AtomicUsize::new(1),
+ weak: atomic::AtomicUsize::new(1),
+ data,
+ })?;
+ unsafe { Ok(Self::from_inner(Box::leak(x).into())) }
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents, returning an error
+ /// if allocation fails.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit, allocator_api)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut five = Arc::<u32>::try_new_uninit()?;
+ ///
+ /// // Deferred initialization:
+ /// Arc::get_mut(&mut five).unwrap().write(5);
+ ///
+ /// let five = unsafe { five.assume_init() };
+ ///
+ /// assert_eq!(*five, 5);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn try_new_uninit() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
+ unsafe {
+ Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate(layout),
+ |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ )?))
+ }
+ }
+
+ /// Constructs a new `Arc` with uninitialized contents, with the memory
+ /// being filled with `0` bytes, returning an error if allocation fails.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and incorrect usage
+ /// of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit, allocator_api)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let zero = Arc::<u32>::try_new_zeroed()?;
+ /// let zero = unsafe { zero.assume_init() };
+ ///
+ /// assert_eq!(*zero, 0);
+ /// # Ok::<(), std::alloc::AllocError>(())
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "new_uninit", issue = "63291")]
+ pub fn try_new_zeroed() -> Result<Arc<mem::MaybeUninit<T>>, AllocError> {
+ unsafe {
+ Ok(Arc::from_ptr(Arc::try_allocate_for_layout(
+ Layout::new::<T>(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| mem as *mut ArcInner<mem::MaybeUninit<T>>,
+ )?))
+ }
+ }
+ /// Returns the inner value, if the `Arc` has exactly one strong reference.
+ ///
+ /// Otherwise, an [`Err`] is returned with the same `Arc` that was
+ /// passed in.
+ ///
+ /// This will succeed even if there are outstanding weak references.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let x = Arc::new(3);
+ /// assert_eq!(Arc::try_unwrap(x), Ok(3));
+ ///
+ /// let x = Arc::new(4);
+ /// let _y = Arc::clone(&x);
+ /// assert_eq!(*Arc::try_unwrap(x).unwrap_err(), 4);
+ /// ```
+ #[inline]
+ #[stable(feature = "arc_unique", since = "1.4.0")]
+ pub fn try_unwrap(this: Self) -> Result<T, Self> {
+ if this.inner().strong.compare_exchange(1, 0, Relaxed, Relaxed).is_err() {
+ return Err(this);
+ }
+
+ acquire!(this.inner().strong);
+
+ unsafe {
+ let elem = ptr::read(&this.ptr.as_ref().data);
+
+ // Make a weak pointer to clean up the implicit strong-weak reference
+ let _weak = Weak { ptr: this.ptr };
+ mem::forget(this);
+
+ Ok(elem)
+ }
+ }
+}
+
+impl<T> Arc<[T]> {
+ /// Constructs a new atomically reference-counted slice with uninitialized contents.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
+ ///
+ /// // Deferred initialization:
+ /// let data = Arc::get_mut(&mut values).unwrap();
+ /// data[0].write(1);
+ /// data[1].write(2);
+ /// data[2].write(3);
+ ///
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_uninit_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
+ unsafe { Arc::from_ptr(Arc::allocate_for_slice(len)) }
+ }
+
+ /// Constructs a new atomically reference-counted slice with uninitialized contents, with the memory being
+ /// filled with `0` bytes.
+ ///
+ /// See [`MaybeUninit::zeroed`][zeroed] for examples of correct and
+ /// incorrect usage of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let values = Arc::<[u32]>::new_zeroed_slice(3);
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [0, 0, 0])
+ /// ```
+ ///
+ /// [zeroed]: mem::MaybeUninit::zeroed
+ #[cfg(not(no_global_oom_handling))]
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use]
+ pub fn new_zeroed_slice(len: usize) -> Arc<[mem::MaybeUninit<T>]> {
+ unsafe {
+ Arc::from_ptr(Arc::allocate_for_layout(
+ Layout::array::<T>(len).unwrap(),
+ |layout| Global.allocate_zeroed(layout),
+ |mem| {
+ ptr::slice_from_raw_parts_mut(mem as *mut T, len)
+ as *mut ArcInner<[mem::MaybeUninit<T>]>
+ },
+ ))
+ }
+ }
+}
+
+impl<T> Arc<mem::MaybeUninit<T>> {
+ /// Converts to `Arc<T>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the inner value
+ /// really is in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut five = Arc::<u32>::new_uninit();
+ ///
+ /// // Deferred initialization:
+ /// Arc::get_mut(&mut five).unwrap().write(5);
+ ///
+ /// let five = unsafe { five.assume_init() };
+ ///
+ /// assert_eq!(*five, 5)
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub unsafe fn assume_init(self) -> Arc<T> {
+ unsafe { Arc::from_inner(mem::ManuallyDrop::new(self).ptr.cast()) }
+ }
+}
+
+impl<T> Arc<[mem::MaybeUninit<T>]> {
+ /// Converts to `Arc<[T]>`.
+ ///
+ /// # Safety
+ ///
+ /// As with [`MaybeUninit::assume_init`],
+ /// it is up to the caller to guarantee that the inner value
+ /// really is in an initialized state.
+ /// Calling this when the content is not yet fully initialized
+ /// causes immediate undefined behavior.
+ ///
+ /// [`MaybeUninit::assume_init`]: mem::MaybeUninit::assume_init
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(new_uninit)]
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut values = Arc::<[u32]>::new_uninit_slice(3);
+ ///
+ /// // Deferred initialization:
+ /// let data = Arc::get_mut(&mut values).unwrap();
+ /// data[0].write(1);
+ /// data[1].write(2);
+ /// data[2].write(3);
+ ///
+ /// let values = unsafe { values.assume_init() };
+ ///
+ /// assert_eq!(*values, [1, 2, 3])
+ /// ```
+ #[unstable(feature = "new_uninit", issue = "63291")]
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[inline]
+ pub unsafe fn assume_init(self) -> Arc<[T]> {
+ unsafe { Arc::from_ptr(mem::ManuallyDrop::new(self).ptr.as_ptr() as _) }
+ }
+}
+
+impl<T: ?Sized> Arc<T> {
+ /// Consumes the `Arc`, returning the wrapped pointer.
+ ///
+ /// To avoid a memory leak the pointer must be converted back to an `Arc` using
+ /// [`Arc::from_raw`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let x = Arc::new("hello".to_owned());
+ /// let x_ptr = Arc::into_raw(x);
+ /// assert_eq!(unsafe { &*x_ptr }, "hello");
+ /// ```
+ #[must_use = "losing the pointer will leak memory"]
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub fn into_raw(this: Self) -> *const T {
+ let ptr = Self::as_ptr(&this);
+ mem::forget(this);
+ ptr
+ }
+
+ /// Provides a raw pointer to the data.
+ ///
+ /// The counts are not affected in any way and the `Arc` is not consumed. The pointer is valid for
+ /// as long as there are strong counts in the `Arc`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let x = Arc::new("hello".to_owned());
+ /// let y = Arc::clone(&x);
+ /// let x_ptr = Arc::as_ptr(&x);
+ /// assert_eq!(x_ptr, Arc::as_ptr(&y));
+ /// assert_eq!(unsafe { &*x_ptr }, "hello");
+ /// ```
+ #[must_use]
+ #[stable(feature = "rc_as_ptr", since = "1.45.0")]
+ pub fn as_ptr(this: &Self) -> *const T {
+ let ptr: *mut ArcInner<T> = NonNull::as_ptr(this.ptr);
+
+ // SAFETY: This cannot go through Deref::deref or RcBoxPtr::inner because
+ // this is required to retain raw/mut provenance such that e.g. `get_mut` can
+ // write through the pointer after the Rc is recovered through `from_raw`.
+ unsafe { ptr::addr_of_mut!((*ptr).data) }
+ }
+
+ /// Constructs an `Arc<T>` from a raw pointer.
+ ///
+ /// The raw pointer must have been previously returned by a call to
+ /// [`Arc<U>::into_raw`][into_raw] where `U` must have the same size and
+ /// alignment as `T`. This is trivially true if `U` is `T`.
+ /// Note that if `U` is not `T` but has the same size and alignment, this is
+ /// basically like transmuting references of different types. See
+ /// [`mem::transmute`][transmute] for more information on what
+ /// restrictions apply in this case.
+ ///
+ /// The user of `from_raw` has to make sure a specific value of `T` is only
+ /// dropped once.
+ ///
+ /// This function is unsafe because improper use may lead to memory unsafety,
+ /// even if the returned `Arc<T>` is never accessed.
+ ///
+ /// [into_raw]: Arc::into_raw
+ /// [transmute]: core::mem::transmute
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let x = Arc::new("hello".to_owned());
+ /// let x_ptr = Arc::into_raw(x);
+ ///
+ /// unsafe {
+ /// // Convert back to an `Arc` to prevent leak.
+ /// let x = Arc::from_raw(x_ptr);
+ /// assert_eq!(&*x, "hello");
+ ///
+ /// // Further calls to `Arc::from_raw(x_ptr)` would be memory-unsafe.
+ /// }
+ ///
+ /// // The memory was freed when `x` went out of scope above, so `x_ptr` is now dangling!
+ /// ```
+ #[stable(feature = "rc_raw", since = "1.17.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ unsafe {
+ let offset = data_offset(ptr);
+
+ // Reverse the offset to find the original ArcInner.
+ let arc_ptr = ptr.byte_sub(offset) as *mut ArcInner<T>;
+
+ Self::from_ptr(arc_ptr)
+ }
+ }
+
+ /// Creates a new [`Weak`] pointer to this allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// let weak_five = Arc::downgrade(&five);
+ /// ```
+ #[must_use = "this returns a new `Weak` pointer, \
+ without modifying the original `Arc`"]
+ #[stable(feature = "arc_weak", since = "1.4.0")]
+ pub fn downgrade(this: &Self) -> Weak<T> {
+ // This Relaxed is OK because we're checking the value in the CAS
+ // below.
+ let mut cur = this.inner().weak.load(Relaxed);
+
+ loop {
+ // check if the weak counter is currently "locked"; if so, spin.
+ if cur == usize::MAX {
+ hint::spin_loop();
+ cur = this.inner().weak.load(Relaxed);
+ continue;
+ }
+
+ // NOTE: this code currently ignores the possibility of overflow
+ // into usize::MAX; in general both Rc and Arc need to be adjusted
+ // to deal with overflow.
+
+ // Unlike with Clone(), we need this to be an Acquire read to
+ // synchronize with the write coming from `is_unique`, so that the
+ // events prior to that write happen before this read.
+ match this.inner().weak.compare_exchange_weak(cur, cur + 1, Acquire, Relaxed) {
+ Ok(_) => {
+ // Make sure we do not create a dangling Weak
+ debug_assert!(!is_dangling(this.ptr.as_ptr()));
+ return Weak { ptr: this.ptr };
+ }
+ Err(old) => cur = old,
+ }
+ }
+ }
+
+ /// Gets the number of [`Weak`] pointers to this allocation.
+ ///
+ /// # Safety
+ ///
+ /// This method by itself is safe, but using it correctly requires extra care.
+ /// Another thread can change the weak count at any time,
+ /// including potentially between calling this method and acting on the result.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ /// let _weak_five = Arc::downgrade(&five);
+ ///
+ /// // This assertion is deterministic because we haven't shared
+ /// // the `Arc` or `Weak` between threads.
+ /// assert_eq!(1, Arc::weak_count(&five));
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "arc_counts", since = "1.15.0")]
+ pub fn weak_count(this: &Self) -> usize {
+ let cnt = this.inner().weak.load(Acquire);
+ // If the weak count is currently locked, the value of the
+ // count was 0 just before taking the lock.
+ if cnt == usize::MAX { 0 } else { cnt - 1 }
+ }
+
+ /// Gets the number of strong (`Arc`) pointers to this allocation.
+ ///
+ /// # Safety
+ ///
+ /// This method by itself is safe, but using it correctly requires extra care.
+ /// Another thread can change the strong count at any time,
+ /// including potentially between calling this method and acting on the result.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ /// let _also_five = Arc::clone(&five);
+ ///
+ /// // This assertion is deterministic because we haven't shared
+ /// // the `Arc` between threads.
+ /// assert_eq!(2, Arc::strong_count(&five));
+ /// ```
+ #[inline]
+ #[must_use]
+ #[stable(feature = "arc_counts", since = "1.15.0")]
+ pub fn strong_count(this: &Self) -> usize {
+ this.inner().strong.load(Acquire)
+ }
+
+ /// Increments the strong reference count on the `Arc<T>` associated with the
+ /// provided pointer by one.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been obtained through `Arc::into_raw`, and the
+ /// associated `Arc` instance must be valid (i.e. the strong count must be at
+ /// least 1) for the duration of this method.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// unsafe {
+ /// let ptr = Arc::into_raw(five);
+ /// Arc::increment_strong_count(ptr);
+ ///
+ /// // This assertion is deterministic because we haven't shared
+ /// // the `Arc` between threads.
+ /// let five = Arc::from_raw(ptr);
+ /// assert_eq!(2, Arc::strong_count(&five));
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
+ pub unsafe fn increment_strong_count(ptr: *const T) {
+ // Retain Arc, but don't touch refcount by wrapping in ManuallyDrop
+ let arc = unsafe { mem::ManuallyDrop::new(Arc::<T>::from_raw(ptr)) };
+ // Now increase refcount, but don't drop new refcount either
+ let _arc_clone: mem::ManuallyDrop<_> = arc.clone();
+ }
+
+ /// Decrements the strong reference count on the `Arc<T>` associated with the
+ /// provided pointer by one.
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have been obtained through `Arc::into_raw`, and the
+ /// associated `Arc` instance must be valid (i.e. the strong count must be at
+ /// least 1) when invoking this method. This method can be used to release the final
+ /// `Arc` and backing storage, but **should not** be called after the final `Arc` has been
+ /// released.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// unsafe {
+ /// let ptr = Arc::into_raw(five);
+ /// Arc::increment_strong_count(ptr);
+ ///
+ /// // Those assertions are deterministic because we haven't shared
+ /// // the `Arc` between threads.
+ /// let five = Arc::from_raw(ptr);
+ /// assert_eq!(2, Arc::strong_count(&five));
+ /// Arc::decrement_strong_count(ptr);
+ /// assert_eq!(1, Arc::strong_count(&five));
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "arc_mutate_strong_count", since = "1.51.0")]
+ pub unsafe fn decrement_strong_count(ptr: *const T) {
+ unsafe { mem::drop(Arc::from_raw(ptr)) };
+ }
+
+ #[inline]
+ fn inner(&self) -> &ArcInner<T> {
+ // This unsafety is ok because while this arc is alive we're guaranteed
+ // that the inner pointer is valid. Furthermore, we know that the
+ // `ArcInner` structure itself is `Sync` because the inner data is
+ // `Sync` as well, so we're ok loaning out an immutable pointer to these
+ // contents.
+ unsafe { self.ptr.as_ref() }
+ }
+
+ // Non-inlined part of `drop`.
+ #[inline(never)]
+ unsafe fn drop_slow(&mut self) {
+ // Destroy the data at this time, even though we must not free the box
+ // allocation itself (there might still be weak pointers lying around).
+ unsafe { ptr::drop_in_place(Self::get_mut_unchecked(self)) };
+
+ // Drop the weak ref collectively held by all strong references
+ drop(Weak { ptr: self.ptr });
+ }
+
+ /// Returns `true` if the two `Arc`s point to the same allocation
+ /// (in a vein similar to [`ptr::eq`]).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ /// let same_five = Arc::clone(&five);
+ /// let other_five = Arc::new(5);
+ ///
+ /// assert!(Arc::ptr_eq(&five, &same_five));
+ /// assert!(!Arc::ptr_eq(&five, &other_five));
+ /// ```
+ ///
+ /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
+ #[inline]
+ #[must_use]
+ #[stable(feature = "ptr_eq", since = "1.17.0")]
+ pub fn ptr_eq(this: &Self, other: &Self) -> bool {
+ this.ptr.as_ptr() == other.ptr.as_ptr()
+ }
+}
+
+impl<T: ?Sized> Arc<T> {
+ /// Allocates an `ArcInner<T>` with sufficient space for
+ /// a possibly-unsized inner value where the value has the layout provided.
+ ///
+ /// The function `mem_to_arcinner` is called with the data pointer
+ /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_layout(
+ value_layout: Layout,
+ allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
+ mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
+ ) -> *mut ArcInner<T> {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
+ // reference (see #54908).
+ let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
+ unsafe {
+ Arc::try_allocate_for_layout(value_layout, allocate, mem_to_arcinner)
+ .unwrap_or_else(|_| handle_alloc_error(layout))
+ }
+ }
+
+ /// Allocates an `ArcInner<T>` with sufficient space for
+ /// a possibly-unsized inner value where the value has the layout provided,
+ /// returning an error if allocation fails.
+ ///
+ /// The function `mem_to_arcinner` is called with the data pointer
+ /// and must return back a (potentially fat)-pointer for the `ArcInner<T>`.
+ unsafe fn try_allocate_for_layout(
+ value_layout: Layout,
+ allocate: impl FnOnce(Layout) -> Result<NonNull<[u8]>, AllocError>,
+ mem_to_arcinner: impl FnOnce(*mut u8) -> *mut ArcInner<T>,
+ ) -> Result<*mut ArcInner<T>, AllocError> {
+ // Calculate layout using the given value layout.
+ // Previously, layout was calculated on the expression
+ // `&*(ptr as *const ArcInner<T>)`, but this created a misaligned
+ // reference (see #54908).
+ let layout = Layout::new::<ArcInner<()>>().extend(value_layout).unwrap().0.pad_to_align();
+
+ let ptr = allocate(layout)?;
+
+ // Initialize the ArcInner
+ let inner = mem_to_arcinner(ptr.as_non_null_ptr().as_ptr());
+ debug_assert_eq!(unsafe { Layout::for_value(&*inner) }, layout);
+
+ unsafe {
+ ptr::write(&mut (*inner).strong, atomic::AtomicUsize::new(1));
+ ptr::write(&mut (*inner).weak, atomic::AtomicUsize::new(1));
+ }
+
+ Ok(inner)
+ }
+
+ /// Allocates an `ArcInner<T>` with sufficient space for an unsized inner value.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_ptr(ptr: *const T) -> *mut ArcInner<T> {
+ // Allocate for the `ArcInner<T>` using the given value.
+ unsafe {
+ Self::allocate_for_layout(
+ Layout::for_value(&*ptr),
+ |layout| Global.allocate(layout),
+ |mem| mem.with_metadata_of(ptr as *mut ArcInner<T>),
+ )
+ }
+ }
+
+ #[cfg(not(no_global_oom_handling))]
+ fn from_box(v: Box<T>) -> Arc<T> {
+ unsafe {
+ let (box_unique, alloc) = Box::into_unique(v);
+ let bptr = box_unique.as_ptr();
+
+ let value_size = size_of_val(&*bptr);
+ let ptr = Self::allocate_for_ptr(bptr);
+
+ // Copy value as bytes
+ ptr::copy_nonoverlapping(
+ bptr as *const T as *const u8,
+ &mut (*ptr).data as *mut _ as *mut u8,
+ value_size,
+ );
+
+ // Free the allocation without dropping its contents
+ box_free(box_unique, alloc);
+
+ Self::from_ptr(ptr)
+ }
+ }
+}
+
+impl<T> Arc<[T]> {
+ /// Allocates an `ArcInner<[T]>` with the given length.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn allocate_for_slice(len: usize) -> *mut ArcInner<[T]> {
+ unsafe {
+ Self::allocate_for_layout(
+ Layout::array::<T>(len).unwrap(),
+ |layout| Global.allocate(layout),
+ |mem| ptr::slice_from_raw_parts_mut(mem as *mut T, len) as *mut ArcInner<[T]>,
+ )
+ }
+ }
+
+ /// Copy elements from slice into newly allocated Arc<\[T\]>
+ ///
+ /// Unsafe because the caller must either take ownership or bind `T: Copy`.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn copy_from_slice(v: &[T]) -> Arc<[T]> {
+ unsafe {
+ let ptr = Self::allocate_for_slice(v.len());
+
+ ptr::copy_nonoverlapping(v.as_ptr(), &mut (*ptr).data as *mut [T] as *mut T, v.len());
+
+ Self::from_ptr(ptr)
+ }
+ }
+
+ /// Constructs an `Arc<[T]>` from an iterator known to be of a certain size.
+ ///
+ /// Behavior is undefined should the size be wrong.
+ #[cfg(not(no_global_oom_handling))]
+ unsafe fn from_iter_exact(iter: impl iter::Iterator<Item = T>, len: usize) -> Arc<[T]> {
+ // Panic guard while cloning T elements.
+ // In the event of a panic, elements that have been written
+ // into the new ArcInner will be dropped, then the memory freed.
+ struct Guard<T> {
+ mem: NonNull<u8>,
+ elems: *mut T,
+ layout: Layout,
+ n_elems: usize,
+ }
+
+ impl<T> Drop for Guard<T> {
+ fn drop(&mut self) {
+ unsafe {
+ let slice = from_raw_parts_mut(self.elems, self.n_elems);
+ ptr::drop_in_place(slice);
+
+ Global.deallocate(self.mem, self.layout);
+ }
+ }
+ }
+
+ unsafe {
+ let ptr = Self::allocate_for_slice(len);
+
+ let mem = ptr as *mut _ as *mut u8;
+ let layout = Layout::for_value(&*ptr);
+
+ // Pointer to first element
+ let elems = &mut (*ptr).data as *mut [T] as *mut T;
+
+ let mut guard = Guard { mem: NonNull::new_unchecked(mem), elems, layout, n_elems: 0 };
+
+ for (i, item) in iter.enumerate() {
+ ptr::write(elems.add(i), item);
+ guard.n_elems += 1;
+ }
+
+ // All clear. Forget the guard so it doesn't free the new ArcInner.
+ mem::forget(guard);
+
+ Self::from_ptr(ptr)
+ }
+ }
+}
+
+/// Specialization trait used for `From<&[T]>`.
+#[cfg(not(no_global_oom_handling))]
+trait ArcFromSlice<T> {
+ fn from_slice(slice: &[T]) -> Self;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone> ArcFromSlice<T> for Arc<[T]> {
+ #[inline]
+ default fn from_slice(v: &[T]) -> Self {
+ unsafe { Self::from_iter_exact(v.iter().cloned(), v.len()) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Copy> ArcFromSlice<T> for Arc<[T]> {
+ #[inline]
+ fn from_slice(v: &[T]) -> Self {
+ unsafe { Arc::copy_from_slice(v) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Clone for Arc<T> {
+ /// Makes a clone of the `Arc` pointer.
+ ///
+ /// This creates another pointer to the same allocation, increasing the
+ /// strong reference count.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// let _ = Arc::clone(&five);
+ /// ```
+ #[inline]
+ fn clone(&self) -> Arc<T> {
+ // Using a relaxed ordering is alright here, as knowledge of the
+ // original reference prevents other threads from erroneously deleting
+ // the object.
+ //
+ // As explained in the [Boost documentation][1], Increasing the
+ // reference counter can always be done with memory_order_relaxed: New
+ // references to an object can only be formed from an existing
+ // reference, and passing an existing reference from one thread to
+ // another must already provide any required synchronization.
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ let old_size = self.inner().strong.fetch_add(1, Relaxed);
+
+ // However we need to guard against massive refcounts in case someone is `mem::forget`ing
+ // Arcs. If we don't do this the count can overflow and users will use-after free. This
+ // branch will never be taken in any realistic program. We abort because such a program is
+ // incredibly degenerate, and we don't care to support it.
+ //
+ // This check is not 100% water-proof: we error when the refcount grows beyond `isize::MAX`.
+ // But we do that check *after* having done the increment, so there is a chance here that
+ // the worst already happened and we actually do overflow the `usize` counter. However, that
+ // requires the counter to grow from `isize::MAX` to `usize::MAX` between the increment
+ // above and the `abort` below, which seems exceedingly unlikely.
+ if old_size > MAX_REFCOUNT {
+ abort();
+ }
+
+ unsafe { Self::from_inner(self.ptr) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> Deref for Arc<T> {
+ type Target = T;
+
+ #[inline]
+ fn deref(&self) -> &T {
+ &self.inner().data
+ }
+}
+
+#[unstable(feature = "receiver_trait", issue = "none")]
+impl<T: ?Sized> Receiver for Arc<T> {}
+
+impl<T: Clone> Arc<T> {
+ /// Makes a mutable reference into the given `Arc`.
+ ///
+ /// If there are other `Arc` pointers to the same allocation, then `make_mut` will
+ /// [`clone`] the inner value to a new allocation to ensure unique ownership. This is also
+ /// referred to as clone-on-write.
+ ///
+ /// However, if there are no other `Arc` pointers to this allocation, but some [`Weak`]
+ /// pointers, then the [`Weak`] pointers will be dissociated and the inner value will not
+ /// be cloned.
+ ///
+ /// See also [`get_mut`], which will fail rather than cloning the inner value
+ /// or dissociating [`Weak`] pointers.
+ ///
+ /// [`clone`]: Clone::clone
+ /// [`get_mut`]: Arc::get_mut
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let mut data = Arc::new(5);
+ ///
+ /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
+ /// let mut other_data = Arc::clone(&data); // Won't clone inner data
+ /// *Arc::make_mut(&mut data) += 1; // Clones inner data
+ /// *Arc::make_mut(&mut data) += 1; // Won't clone anything
+ /// *Arc::make_mut(&mut other_data) *= 2; // Won't clone anything
+ ///
+ /// // Now `data` and `other_data` point to different allocations.
+ /// assert_eq!(*data, 8);
+ /// assert_eq!(*other_data, 12);
+ /// ```
+ ///
+ /// [`Weak`] pointers will be dissociated:
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let mut data = Arc::new(75);
+ /// let weak = Arc::downgrade(&data);
+ ///
+ /// assert!(75 == *data);
+ /// assert!(75 == *weak.upgrade().unwrap());
+ ///
+ /// *Arc::make_mut(&mut data) += 1;
+ ///
+ /// assert!(76 == *data);
+ /// assert!(weak.upgrade().is_none());
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "arc_unique", since = "1.4.0")]
+ pub fn make_mut(this: &mut Self) -> &mut T {
+ // Note that we hold both a strong reference and a weak reference.
+ // Thus, releasing our strong reference only will not, by itself, cause
+ // the memory to be deallocated.
+ //
+ // Use Acquire to ensure that we see any writes to `weak` that happen
+ // before release writes (i.e., decrements) to `strong`. Since we hold a
+ // weak count, there's no chance the ArcInner itself could be
+ // deallocated.
+ if this.inner().strong.compare_exchange(1, 0, Acquire, Relaxed).is_err() {
+ // Another strong pointer exists, so we must clone.
+ // Pre-allocate memory to allow writing the cloned value directly.
+ let mut arc = Self::new_uninit();
+ unsafe {
+ let data = Arc::get_mut_unchecked(&mut arc);
+ (**this).write_clone_into_raw(data.as_mut_ptr());
+ *this = arc.assume_init();
+ }
+ } else if this.inner().weak.load(Relaxed) != 1 {
+ // Relaxed suffices in the above because this is fundamentally an
+ // optimization: we are always racing with weak pointers being
+ // dropped. Worst case, we end up allocated a new Arc unnecessarily.
+
+ // We removed the last strong ref, but there are additional weak
+ // refs remaining. We'll move the contents to a new Arc, and
+ // invalidate the other weak refs.
+
+ // Note that it is not possible for the read of `weak` to yield
+ // usize::MAX (i.e., locked), since the weak count can only be
+ // locked by a thread with a strong reference.
+
+ // Materialize our own implicit weak pointer, so that it can clean
+ // up the ArcInner as needed.
+ let _weak = Weak { ptr: this.ptr };
+
+ // Can just steal the data, all that's left is Weaks
+ let mut arc = Self::new_uninit();
+ unsafe {
+ let data = Arc::get_mut_unchecked(&mut arc);
+ data.as_mut_ptr().copy_from_nonoverlapping(&**this, 1);
+ ptr::write(this, arc.assume_init());
+ }
+ } else {
+ // We were the sole reference of either kind; bump back up the
+ // strong ref count.
+ this.inner().strong.store(1, Release);
+ }
+
+ // As with `get_mut()`, the unsafety is ok because our reference was
+ // either unique to begin with, or became one upon cloning the contents.
+ unsafe { Self::get_mut_unchecked(this) }
+ }
+
+ /// If we have the only reference to `T` then unwrap it. Otherwise, clone `T` and return the
+ /// clone.
+ ///
+ /// Assuming `arc_t` is of type `Arc<T>`, this function is functionally equivalent to
+ /// `(*arc_t).clone()`, but will avoid cloning the inner value where possible.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(arc_unwrap_or_clone)]
+ /// # use std::{ptr, sync::Arc};
+ /// let inner = String::from("test");
+ /// let ptr = inner.as_ptr();
+ ///
+ /// let arc = Arc::new(inner);
+ /// let inner = Arc::unwrap_or_clone(arc);
+ /// // The inner value was not cloned
+ /// assert!(ptr::eq(ptr, inner.as_ptr()));
+ ///
+ /// let arc = Arc::new(inner);
+ /// let arc2 = arc.clone();
+ /// let inner = Arc::unwrap_or_clone(arc);
+ /// // Because there were 2 references, we had to clone the inner value.
+ /// assert!(!ptr::eq(ptr, inner.as_ptr()));
+ /// // `arc2` is the last reference, so when we unwrap it we get back
+ /// // the original `String`.
+ /// let inner = Arc::unwrap_or_clone(arc2);
+ /// assert!(ptr::eq(ptr, inner.as_ptr()));
+ /// ```
+ #[inline]
+ #[unstable(feature = "arc_unwrap_or_clone", issue = "93610")]
+ pub fn unwrap_or_clone(this: Self) -> T {
+ Arc::try_unwrap(this).unwrap_or_else(|arc| (*arc).clone())
+ }
+}
+
+impl<T: ?Sized> Arc<T> {
+ /// Returns a mutable reference into the given `Arc`, if there are
+ /// no other `Arc` or [`Weak`] pointers to the same allocation.
+ ///
+ /// Returns [`None`] otherwise, because it is not safe to
+ /// mutate a shared value.
+ ///
+ /// See also [`make_mut`][make_mut], which will [`clone`][clone]
+ /// the inner value when there are other `Arc` pointers.
+ ///
+ /// [make_mut]: Arc::make_mut
+ /// [clone]: Clone::clone
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let mut x = Arc::new(3);
+ /// *Arc::get_mut(&mut x).unwrap() = 4;
+ /// assert_eq!(*x, 4);
+ ///
+ /// let _y = Arc::clone(&x);
+ /// assert!(Arc::get_mut(&mut x).is_none());
+ /// ```
+ #[inline]
+ #[stable(feature = "arc_unique", since = "1.4.0")]
+ pub fn get_mut(this: &mut Self) -> Option<&mut T> {
+ if this.is_unique() {
+ // This unsafety is ok because we're guaranteed that the pointer
+ // returned is the *only* pointer that will ever be returned to T. Our
+ // reference count is guaranteed to be 1 at this point, and we required
+ // the Arc itself to be `mut`, so we're returning the only possible
+ // reference to the inner data.
+ unsafe { Some(Arc::get_mut_unchecked(this)) }
+ } else {
+ None
+ }
+ }
+
+ /// Returns a mutable reference into the given `Arc`,
+ /// without any check.
+ ///
+ /// See also [`get_mut`], which is safe and does appropriate checks.
+ ///
+ /// [`get_mut`]: Arc::get_mut
+ ///
+ /// # Safety
+ ///
+ /// Any other `Arc` or [`Weak`] pointers to the same allocation must not be dereferenced
+ /// for the duration of the returned borrow.
+ /// This is trivially the case if no such pointers exist,
+ /// for example immediately after `Arc::new`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(get_mut_unchecked)]
+ ///
+ /// use std::sync::Arc;
+ ///
+ /// let mut x = Arc::new(String::new());
+ /// unsafe {
+ /// Arc::get_mut_unchecked(&mut x).push_str("foo")
+ /// }
+ /// assert_eq!(*x, "foo");
+ /// ```
+ #[inline]
+ #[unstable(feature = "get_mut_unchecked", issue = "63292")]
+ pub unsafe fn get_mut_unchecked(this: &mut Self) -> &mut T {
+ // We are careful to *not* create a reference covering the "count" fields, as
+ // this would alias with concurrent access to the reference counts (e.g. by `Weak`).
+ unsafe { &mut (*this.ptr.as_ptr()).data }
+ }
+
+ /// Determine whether this is the unique reference (including weak refs) to
+ /// the underlying data.
+ ///
+ /// Note that this requires locking the weak ref count.
+ fn is_unique(&mut self) -> bool {
+ // lock the weak pointer count if we appear to be the sole weak pointer
+ // holder.
+ //
+ // The acquire label here ensures a happens-before relationship with any
+ // writes to `strong` (in particular in `Weak::upgrade`) prior to decrements
+ // of the `weak` count (via `Weak::drop`, which uses release). If the upgraded
+ // weak ref was never dropped, the CAS here will fail so we do not care to synchronize.
+ if self.inner().weak.compare_exchange(1, usize::MAX, Acquire, Relaxed).is_ok() {
+ // This needs to be an `Acquire` to synchronize with the decrement of the `strong`
+ // counter in `drop` -- the only access that happens when any but the last reference
+ // is being dropped.
+ let unique = self.inner().strong.load(Acquire) == 1;
+
+ // The release write here synchronizes with a read in `downgrade`,
+ // effectively preventing the above read of `strong` from happening
+ // after the write.
+ self.inner().weak.store(1, Release); // release the lock
+ unique
+ } else {
+ false
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T: ?Sized> Drop for Arc<T> {
+ /// Drops the `Arc`.
+ ///
+ /// This will decrement the strong reference count. If the strong reference
+ /// count reaches zero then the only other references (if any) are
+ /// [`Weak`], so we `drop` the inner value.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// struct Foo;
+ ///
+ /// impl Drop for Foo {
+ /// fn drop(&mut self) {
+ /// println!("dropped!");
+ /// }
+ /// }
+ ///
+ /// let foo = Arc::new(Foo);
+ /// let foo2 = Arc::clone(&foo);
+ ///
+ /// drop(foo); // Doesn't print anything
+ /// drop(foo2); // Prints "dropped!"
+ /// ```
+ #[inline]
+ fn drop(&mut self) {
+ // Because `fetch_sub` is already atomic, we do not need to synchronize
+ // with other threads unless we are going to delete the object. This
+ // same logic applies to the below `fetch_sub` to the `weak` count.
+ if self.inner().strong.fetch_sub(1, Release) != 1 {
+ return;
+ }
+
+ // This fence is needed to prevent reordering of use of the data and
+ // deletion of the data. Because it is marked `Release`, the decreasing
+ // of the reference count synchronizes with this `Acquire` fence. This
+ // means that use of the data happens before decreasing the reference
+ // count, which happens before this fence, which happens before the
+ // deletion of the data.
+ //
+ // As explained in the [Boost documentation][1],
+ //
+ // > It is important to enforce any possible access to the object in one
+ // > thread (through an existing reference) to *happen before* deleting
+ // > the object in a different thread. This is achieved by a "release"
+ // > operation after dropping a reference (any access to the object
+ // > through this reference must obviously happened before), and an
+ // > "acquire" operation before deleting the object.
+ //
+ // In particular, while the contents of an Arc are usually immutable, it's
+ // possible to have interior writes to something like a Mutex<T>. Since a
+ // Mutex is not acquired when it is deleted, we can't rely on its
+ // synchronization logic to make writes in thread A visible to a destructor
+ // running in thread B.
+ //
+ // Also note that the Acquire fence here could probably be replaced with an
+ // Acquire load, which could improve performance in highly-contended
+ // situations. See [2].
+ //
+ // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
+ // [2]: (https://github.com/rust-lang/rust/pull/41714)
+ acquire!(self.inner().strong);
+
+ unsafe {
+ self.drop_slow();
+ }
+ }
+}
+
+impl Arc<dyn Any + Send + Sync> {
+ /// Attempt to downcast the `Arc<dyn Any + Send + Sync>` to a concrete type.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::any::Any;
+ /// use std::sync::Arc;
+ ///
+ /// fn print_if_string(value: Arc<dyn Any + Send + Sync>) {
+ /// if let Ok(string) = value.downcast::<String>() {
+ /// println!("String ({}): {}", string.len(), string);
+ /// }
+ /// }
+ ///
+ /// let my_string = "Hello World".to_string();
+ /// print_if_string(Arc::new(my_string));
+ /// print_if_string(Arc::new(0i8));
+ /// ```
+ #[inline]
+ #[stable(feature = "rc_downcast", since = "1.29.0")]
+ pub fn downcast<T>(self) -> Result<Arc<T>, Self>
+ where
+ T: Any + Send + Sync,
+ {
+ if (*self).is::<T>() {
+ unsafe {
+ let ptr = self.ptr.cast::<ArcInner<T>>();
+ mem::forget(self);
+ Ok(Arc::from_inner(ptr))
+ }
+ } else {
+ Err(self)
+ }
+ }
+
+ /// Downcasts the `Arc<dyn Any + Send + Sync>` to a concrete type.
+ ///
+ /// For a safe alternative see [`downcast`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(downcast_unchecked)]
+ ///
+ /// use std::any::Any;
+ /// use std::sync::Arc;
+ ///
+ /// let x: Arc<dyn Any + Send + Sync> = Arc::new(1_usize);
+ ///
+ /// unsafe {
+ /// assert_eq!(*x.downcast_unchecked::<usize>(), 1);
+ /// }
+ /// ```
+ ///
+ /// # Safety
+ ///
+ /// The contained value must be of type `T`. Calling this method
+ /// with the incorrect type is *undefined behavior*.
+ ///
+ ///
+ /// [`downcast`]: Self::downcast
+ #[inline]
+ #[unstable(feature = "downcast_unchecked", issue = "90850")]
+ pub unsafe fn downcast_unchecked<T>(self) -> Arc<T>
+ where
+ T: Any + Send + Sync,
+ {
+ unsafe {
+ let ptr = self.ptr.cast::<ArcInner<T>>();
+ mem::forget(self);
+ Arc::from_inner(ptr)
+ }
+ }
+}
+
+impl<T> Weak<T> {
+ /// Constructs a new `Weak<T>`, without allocating any memory.
+ /// Calling [`upgrade`] on the return value always gives [`None`].
+ ///
+ /// [`upgrade`]: Weak::upgrade
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Weak;
+ ///
+ /// let empty: Weak<i64> = Weak::new();
+ /// assert!(empty.upgrade().is_none());
+ /// ```
+ #[stable(feature = "downgraded_weak", since = "1.10.0")]
+ #[rustc_const_unstable(feature = "const_weak_new", issue = "95091", reason = "recently added")]
+ #[must_use]
+ pub const fn new() -> Weak<T> {
+ Weak { ptr: unsafe { NonNull::new_unchecked(ptr::invalid_mut::<ArcInner<T>>(usize::MAX)) } }
+ }
+}
+
+/// Helper type to allow accessing the reference counts without
+/// making any assertions about the data field.
+struct WeakInner<'a> {
+ weak: &'a atomic::AtomicUsize,
+ strong: &'a atomic::AtomicUsize,
+}
+
+impl<T: ?Sized> Weak<T> {
+ /// Returns a raw pointer to the object `T` pointed to by this `Weak<T>`.
+ ///
+ /// The pointer is valid only if there are some strong references. The pointer may be dangling,
+ /// unaligned or even [`null`] otherwise.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use std::ptr;
+ ///
+ /// let strong = Arc::new("hello".to_owned());
+ /// let weak = Arc::downgrade(&strong);
+ /// // Both point to the same object
+ /// assert!(ptr::eq(&*strong, weak.as_ptr()));
+ /// // The strong here keeps it alive, so we can still access the object.
+ /// assert_eq!("hello", unsafe { &*weak.as_ptr() });
+ ///
+ /// drop(strong);
+ /// // But not any more. We can do weak.as_ptr(), but accessing the pointer would lead to
+ /// // undefined behaviour.
+ /// // assert_eq!("hello", unsafe { &*weak.as_ptr() });
+ /// ```
+ ///
+ /// [`null`]: core::ptr::null "ptr::null"
+ #[must_use]
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub fn as_ptr(&self) -> *const T {
+ let ptr: *mut ArcInner<T> = NonNull::as_ptr(self.ptr);
+
+ if is_dangling(ptr) {
+ // If the pointer is dangling, we return the sentinel directly. This cannot be
+ // a valid payload address, as the payload is at least as aligned as ArcInner (usize).
+ ptr as *const T
+ } else {
+ // SAFETY: if is_dangling returns false, then the pointer is dereferenceable.
+ // The payload may be dropped at this point, and we have to maintain provenance,
+ // so use raw pointer manipulation.
+ unsafe { ptr::addr_of_mut!((*ptr).data) }
+ }
+ }
+
+ /// Consumes the `Weak<T>` and turns it into a raw pointer.
+ ///
+ /// This converts the weak pointer into a raw pointer, while still preserving the ownership of
+ /// one weak reference (the weak count is not modified by this operation). It can be turned
+ /// back into the `Weak<T>` with [`from_raw`].
+ ///
+ /// The same restrictions of accessing the target of the pointer as with
+ /// [`as_ptr`] apply.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// let strong = Arc::new("hello".to_owned());
+ /// let weak = Arc::downgrade(&strong);
+ /// let raw = weak.into_raw();
+ ///
+ /// assert_eq!(1, Arc::weak_count(&strong));
+ /// assert_eq!("hello", unsafe { &*raw });
+ ///
+ /// drop(unsafe { Weak::from_raw(raw) });
+ /// assert_eq!(0, Arc::weak_count(&strong));
+ /// ```
+ ///
+ /// [`from_raw`]: Weak::from_raw
+ /// [`as_ptr`]: Weak::as_ptr
+ #[must_use = "`self` will be dropped if the result is not used"]
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub fn into_raw(self) -> *const T {
+ let result = self.as_ptr();
+ mem::forget(self);
+ result
+ }
+
+ /// Converts a raw pointer previously created by [`into_raw`] back into `Weak<T>`.
+ ///
+ /// This can be used to safely get a strong reference (by calling [`upgrade`]
+ /// later) or to deallocate the weak count by dropping the `Weak<T>`.
+ ///
+ /// It takes ownership of one weak reference (with the exception of pointers created by [`new`],
+ /// as these don't own anything; the method still works on them).
+ ///
+ /// # Safety
+ ///
+ /// The pointer must have originated from the [`into_raw`] and must still own its potential
+ /// weak reference.
+ ///
+ /// It is allowed for the strong count to be 0 at the time of calling this. Nevertheless, this
+ /// takes ownership of one weak reference currently represented as a raw pointer (the weak
+ /// count is not modified by this operation) and therefore it must be paired with a previous
+ /// call to [`into_raw`].
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// let strong = Arc::new("hello".to_owned());
+ ///
+ /// let raw_1 = Arc::downgrade(&strong).into_raw();
+ /// let raw_2 = Arc::downgrade(&strong).into_raw();
+ ///
+ /// assert_eq!(2, Arc::weak_count(&strong));
+ ///
+ /// assert_eq!("hello", &*unsafe { Weak::from_raw(raw_1) }.upgrade().unwrap());
+ /// assert_eq!(1, Arc::weak_count(&strong));
+ ///
+ /// drop(strong);
+ ///
+ /// // Decrement the last weak count.
+ /// assert!(unsafe { Weak::from_raw(raw_2) }.upgrade().is_none());
+ /// ```
+ ///
+ /// [`new`]: Weak::new
+ /// [`into_raw`]: Weak::into_raw
+ /// [`upgrade`]: Weak::upgrade
+ #[stable(feature = "weak_into_raw", since = "1.45.0")]
+ pub unsafe fn from_raw(ptr: *const T) -> Self {
+ // See Weak::as_ptr for context on how the input pointer is derived.
+
+ let ptr = if is_dangling(ptr as *mut T) {
+ // This is a dangling Weak.
+ ptr as *mut ArcInner<T>
+ } else {
+ // Otherwise, we're guaranteed the pointer came from a nondangling Weak.
+ // SAFETY: data_offset is safe to call, as ptr references a real (potentially dropped) T.
+ let offset = unsafe { data_offset(ptr) };
+ // Thus, we reverse the offset to get the whole RcBox.
+ // SAFETY: the pointer originated from a Weak, so this offset is safe.
+ unsafe { ptr.byte_sub(offset) as *mut ArcInner<T> }
+ };
+
+ // SAFETY: we now have recovered the original Weak pointer, so can create the Weak.
+ Weak { ptr: unsafe { NonNull::new_unchecked(ptr) } }
+ }
+}
+
+impl<T: ?Sized> Weak<T> {
+ /// Attempts to upgrade the `Weak` pointer to an [`Arc`], delaying
+ /// dropping of the inner value if successful.
+ ///
+ /// Returns [`None`] if the inner value has since been dropped.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// let weak_five = Arc::downgrade(&five);
+ ///
+ /// let strong_five: Option<Arc<_>> = weak_five.upgrade();
+ /// assert!(strong_five.is_some());
+ ///
+ /// // Destroy all strong pointers.
+ /// drop(strong_five);
+ /// drop(five);
+ ///
+ /// assert!(weak_five.upgrade().is_none());
+ /// ```
+ #[must_use = "this returns a new `Arc`, \
+ without modifying the original weak pointer"]
+ #[stable(feature = "arc_weak", since = "1.4.0")]
+ pub fn upgrade(&self) -> Option<Arc<T>> {
+ // We use a CAS loop to increment the strong count instead of a
+ // fetch_add as this function should never take the reference count
+ // from zero to one.
+ let inner = self.inner()?;
+
+ // Relaxed load because any write of 0 that we can observe
+ // leaves the field in a permanently zero state (so a
+ // "stale" read of 0 is fine), and any other value is
+ // confirmed via the CAS below.
+ let mut n = inner.strong.load(Relaxed);
+
+ loop {
+ if n == 0 {
+ return None;
+ }
+
+ // See comments in `Arc::clone` for why we do this (for `mem::forget`).
+ if n > MAX_REFCOUNT {
+ abort();
+ }
+
+ // Relaxed is fine for the failure case because we don't have any expectations about the new state.
+ // Acquire is necessary for the success case to synchronise with `Arc::new_cyclic`, when the inner
+ // value can be initialized after `Weak` references have already been created. In that case, we
+ // expect to observe the fully initialized value.
+ match inner.strong.compare_exchange_weak(n, n + 1, Acquire, Relaxed) {
+ Ok(_) => return Some(unsafe { Arc::from_inner(self.ptr) }), // null checked above
+ Err(old) => n = old,
+ }
+ }
+ }
+
+ /// Gets the number of strong (`Arc`) pointers pointing to this allocation.
+ ///
+ /// If `self` was created using [`Weak::new`], this will return 0.
+ #[must_use]
+ #[stable(feature = "weak_counts", since = "1.41.0")]
+ pub fn strong_count(&self) -> usize {
+ if let Some(inner) = self.inner() { inner.strong.load(Acquire) } else { 0 }
+ }
+
+ /// Gets an approximation of the number of `Weak` pointers pointing to this
+ /// allocation.
+ ///
+ /// If `self` was created using [`Weak::new`], or if there are no remaining
+ /// strong pointers, this will return 0.
+ ///
+ /// # Accuracy
+ ///
+ /// Due to implementation details, the returned value can be off by 1 in
+ /// either direction when other threads are manipulating any `Arc`s or
+ /// `Weak`s pointing to the same allocation.
+ #[must_use]
+ #[stable(feature = "weak_counts", since = "1.41.0")]
+ pub fn weak_count(&self) -> usize {
+ self.inner()
+ .map(|inner| {
+ let weak = inner.weak.load(Acquire);
+ let strong = inner.strong.load(Acquire);
+ if strong == 0 {
+ 0
+ } else {
+ // Since we observed that there was at least one strong pointer
+ // after reading the weak count, we know that the implicit weak
+ // reference (present whenever any strong references are alive)
+ // was still around when we observed the weak count, and can
+ // therefore safely subtract it.
+ weak - 1
+ }
+ })
+ .unwrap_or(0)
+ }
+
+ /// Returns `None` when the pointer is dangling and there is no allocated `ArcInner`,
+ /// (i.e., when this `Weak` was created by `Weak::new`).
+ #[inline]
+ fn inner(&self) -> Option<WeakInner<'_>> {
+ if is_dangling(self.ptr.as_ptr()) {
+ None
+ } else {
+ // We are careful to *not* create a reference covering the "data" field, as
+ // the field may be mutated concurrently (for example, if the last `Arc`
+ // is dropped, the data field will be dropped in-place).
+ Some(unsafe {
+ let ptr = self.ptr.as_ptr();
+ WeakInner { strong: &(*ptr).strong, weak: &(*ptr).weak }
+ })
+ }
+ }
+
+ /// Returns `true` if the two `Weak`s point to the same allocation (similar to
+ /// [`ptr::eq`]), or if both don't point to any allocation
+ /// (because they were created with `Weak::new()`).
+ ///
+ /// # Notes
+ ///
+ /// Since this compares pointers it means that `Weak::new()` will equal each
+ /// other, even though they don't point to any allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let first_rc = Arc::new(5);
+ /// let first = Arc::downgrade(&first_rc);
+ /// let second = Arc::downgrade(&first_rc);
+ ///
+ /// assert!(first.ptr_eq(&second));
+ ///
+ /// let third_rc = Arc::new(5);
+ /// let third = Arc::downgrade(&third_rc);
+ ///
+ /// assert!(!first.ptr_eq(&third));
+ /// ```
+ ///
+ /// Comparing `Weak::new`.
+ ///
+ /// ```
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// let first = Weak::new();
+ /// let second = Weak::new();
+ /// assert!(first.ptr_eq(&second));
+ ///
+ /// let third_rc = Arc::new(());
+ /// let third = Arc::downgrade(&third_rc);
+ /// assert!(!first.ptr_eq(&third));
+ /// ```
+ ///
+ /// [`ptr::eq`]: core::ptr::eq "ptr::eq"
+ #[inline]
+ #[must_use]
+ #[stable(feature = "weak_ptr_eq", since = "1.39.0")]
+ pub fn ptr_eq(&self, other: &Self) -> bool {
+ self.ptr.as_ptr() == other.ptr.as_ptr()
+ }
+}
+
+#[stable(feature = "arc_weak", since = "1.4.0")]
+impl<T: ?Sized> Clone for Weak<T> {
+ /// Makes a clone of the `Weak` pointer that points to the same allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// let weak_five = Arc::downgrade(&Arc::new(5));
+ ///
+ /// let _ = Weak::clone(&weak_five);
+ /// ```
+ #[inline]
+ fn clone(&self) -> Weak<T> {
+ let inner = if let Some(inner) = self.inner() {
+ inner
+ } else {
+ return Weak { ptr: self.ptr };
+ };
+ // See comments in Arc::clone() for why this is relaxed. This can use a
+ // fetch_add (ignoring the lock) because the weak count is only locked
+ // where are *no other* weak pointers in existence. (So we can't be
+ // running this code in that case).
+ let old_size = inner.weak.fetch_add(1, Relaxed);
+
+ // See comments in Arc::clone() for why we do this (for mem::forget).
+ if old_size > MAX_REFCOUNT {
+ abort();
+ }
+
+ Weak { ptr: self.ptr }
+ }
+}
+
+#[stable(feature = "downgraded_weak", since = "1.10.0")]
+impl<T> Default for Weak<T> {
+ /// Constructs a new `Weak<T>`, without allocating memory.
+ /// Calling [`upgrade`] on the return value always
+ /// gives [`None`].
+ ///
+ /// [`upgrade`]: Weak::upgrade
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Weak;
+ ///
+ /// let empty: Weak<i64> = Default::default();
+ /// assert!(empty.upgrade().is_none());
+ /// ```
+ fn default() -> Weak<T> {
+ Weak::new()
+ }
+}
+
+#[stable(feature = "arc_weak", since = "1.4.0")]
+unsafe impl<#[may_dangle] T: ?Sized> Drop for Weak<T> {
+ /// Drops the `Weak` pointer.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::{Arc, Weak};
+ ///
+ /// struct Foo;
+ ///
+ /// impl Drop for Foo {
+ /// fn drop(&mut self) {
+ /// println!("dropped!");
+ /// }
+ /// }
+ ///
+ /// let foo = Arc::new(Foo);
+ /// let weak_foo = Arc::downgrade(&foo);
+ /// let other_weak_foo = Weak::clone(&weak_foo);
+ ///
+ /// drop(weak_foo); // Doesn't print anything
+ /// drop(foo); // Prints "dropped!"
+ ///
+ /// assert!(other_weak_foo.upgrade().is_none());
+ /// ```
+ fn drop(&mut self) {
+ // If we find out that we were the last weak pointer, then its time to
+ // deallocate the data entirely. See the discussion in Arc::drop() about
+ // the memory orderings
+ //
+ // It's not necessary to check for the locked state here, because the
+ // weak count can only be locked if there was precisely one weak ref,
+ // meaning that drop could only subsequently run ON that remaining weak
+ // ref, which can only happen after the lock is released.
+ let inner = if let Some(inner) = self.inner() { inner } else { return };
+
+ if inner.weak.fetch_sub(1, Release) == 1 {
+ acquire!(inner.weak);
+ unsafe { Global.deallocate(self.ptr.cast(), Layout::for_value_raw(self.ptr.as_ptr())) }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+trait ArcEqIdent<T: ?Sized + PartialEq> {
+ fn eq(&self, other: &Arc<T>) -> bool;
+ fn ne(&self, other: &Arc<T>) -> bool;
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq> ArcEqIdent<T> for Arc<T> {
+ #[inline]
+ default fn eq(&self, other: &Arc<T>) -> bool {
+ **self == **other
+ }
+ #[inline]
+ default fn ne(&self, other: &Arc<T>) -> bool {
+ **self != **other
+ }
+}
+
+/// We're doing this specialization here, and not as a more general optimization on `&T`, because it
+/// would otherwise add a cost to all equality checks on refs. We assume that `Arc`s are used to
+/// store large values, that are slow to clone, but also heavy to check for equality, causing this
+/// cost to pay off more easily. It's also more likely to have two `Arc` clones, that point to
+/// the same value, than two `&T`s.
+///
+/// We can only do this when `T: Eq` as a `PartialEq` might be deliberately irreflexive.
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + crate::rc::MarkerEq> ArcEqIdent<T> for Arc<T> {
+ #[inline]
+ fn eq(&self, other: &Arc<T>) -> bool {
+ Arc::ptr_eq(self, other) || **self == **other
+ }
+
+ #[inline]
+ fn ne(&self, other: &Arc<T>) -> bool {
+ !Arc::ptr_eq(self, other) && **self != **other
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialEq> PartialEq for Arc<T> {
+ /// Equality for two `Arc`s.
+ ///
+ /// Two `Arc`s are equal if their inner values are equal, even if they are
+ /// stored in different allocation.
+ ///
+ /// If `T` also implements `Eq` (implying reflexivity of equality),
+ /// two `Arc`s that point to the same allocation are always equal.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert!(five == Arc::new(5));
+ /// ```
+ #[inline]
+ fn eq(&self, other: &Arc<T>) -> bool {
+ ArcEqIdent::eq(self, other)
+ }
+
+ /// Inequality for two `Arc`s.
+ ///
+ /// Two `Arc`s are unequal if their inner values are unequal.
+ ///
+ /// If `T` also implements `Eq` (implying reflexivity of equality),
+ /// two `Arc`s that point to the same value are never unequal.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert!(five != Arc::new(6));
+ /// ```
+ #[inline]
+ fn ne(&self, other: &Arc<T>) -> bool {
+ ArcEqIdent::ne(self, other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + PartialOrd> PartialOrd for Arc<T> {
+ /// Partial comparison for two `Arc`s.
+ ///
+ /// The two are compared by calling `partial_cmp()` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use std::cmp::Ordering;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert_eq!(Some(Ordering::Less), five.partial_cmp(&Arc::new(6)));
+ /// ```
+ fn partial_cmp(&self, other: &Arc<T>) -> Option<Ordering> {
+ (**self).partial_cmp(&**other)
+ }
+
+ /// Less-than comparison for two `Arc`s.
+ ///
+ /// The two are compared by calling `<` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert!(five < Arc::new(6));
+ /// ```
+ fn lt(&self, other: &Arc<T>) -> bool {
+ *(*self) < *(*other)
+ }
+
+ /// 'Less than or equal to' comparison for two `Arc`s.
+ ///
+ /// The two are compared by calling `<=` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert!(five <= Arc::new(5));
+ /// ```
+ fn le(&self, other: &Arc<T>) -> bool {
+ *(*self) <= *(*other)
+ }
+
+ /// Greater-than comparison for two `Arc`s.
+ ///
+ /// The two are compared by calling `>` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert!(five > Arc::new(4));
+ /// ```
+ fn gt(&self, other: &Arc<T>) -> bool {
+ *(*self) > *(*other)
+ }
+
+ /// 'Greater than or equal to' comparison for two `Arc`s.
+ ///
+ /// The two are compared by calling `>=` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert!(five >= Arc::new(5));
+ /// ```
+ fn ge(&self, other: &Arc<T>) -> bool {
+ *(*self) >= *(*other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Ord> Ord for Arc<T> {
+ /// Comparison for two `Arc`s.
+ ///
+ /// The two are compared by calling `cmp()` on their inner values.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ /// use std::cmp::Ordering;
+ ///
+ /// let five = Arc::new(5);
+ ///
+ /// assert_eq!(Ordering::Less, five.cmp(&Arc::new(6)));
+ /// ```
+ fn cmp(&self, other: &Arc<T>) -> Ordering {
+ (**self).cmp(&**other)
+ }
+}
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Eq> Eq for Arc<T> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + fmt::Display> fmt::Display for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Display::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + fmt::Debug> fmt::Debug for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> fmt::Pointer for Arc<T> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Pointer::fmt(&(&**self as *const T), f)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Default> Default for Arc<T> {
+ /// Creates a new `Arc<T>`, with the `Default` value for `T`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::sync::Arc;
+ ///
+ /// let x: Arc<i32> = Default::default();
+ /// assert_eq!(*x, 0);
+ /// ```
+ fn default() -> Arc<T> {
+ Arc::new(Default::default())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized + Hash> Hash for Arc<T> {
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ (**self).hash(state)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "from_for_ptrs", since = "1.6.0")]
+impl<T> From<T> for Arc<T> {
+ /// Converts a `T` into an `Arc<T>`
+ ///
+ /// The conversion moves the value into a
+ /// newly allocated `Arc`. It is equivalent to
+ /// calling `Arc::new(t)`.
+ ///
+ /// # Example
+ /// ```rust
+ /// # use std::sync::Arc;
+ /// let x = 5;
+ /// let arc = Arc::new(5);
+ ///
+ /// assert_eq!(Arc::from(x), arc);
+ /// ```
+ fn from(t: T) -> Self {
+ Arc::new(t)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl<T: Clone> From<&[T]> for Arc<[T]> {
+ /// Allocate a reference-counted slice and fill it by cloning `v`'s items.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let original: &[i32] = &[1, 2, 3];
+ /// let shared: Arc<[i32]> = Arc::from(original);
+ /// assert_eq!(&[1, 2, 3], &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: &[T]) -> Arc<[T]> {
+ <Self as ArcFromSlice<T>>::from_slice(v)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl From<&str> for Arc<str> {
+ /// Allocate a reference-counted `str` and copy `v` into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let shared: Arc<str> = Arc::from("eggplant");
+ /// assert_eq!("eggplant", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: &str) -> Arc<str> {
+ let arc = Arc::<[u8]>::from(v.as_bytes());
+ unsafe { Arc::from_raw(Arc::into_raw(arc) as *const str) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl From<String> for Arc<str> {
+ /// Allocate a reference-counted `str` and copy `v` into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let unique: String = "eggplant".to_owned();
+ /// let shared: Arc<str> = Arc::from(unique);
+ /// assert_eq!("eggplant", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: String) -> Arc<str> {
+ Arc::from(&v[..])
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl<T: ?Sized> From<Box<T>> for Arc<T> {
+ /// Move a boxed object to a new, reference-counted allocation.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let unique: Box<str> = Box::from("eggplant");
+ /// let shared: Arc<str> = Arc::from(unique);
+ /// assert_eq!("eggplant", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(v: Box<T>) -> Arc<T> {
+ Arc::from_box(v)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_slice", since = "1.21.0")]
+impl<T> From<Vec<T>> for Arc<[T]> {
+ /// Allocate a reference-counted slice and move `v`'s items into it.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let unique: Vec<i32> = vec![1, 2, 3];
+ /// let shared: Arc<[i32]> = Arc::from(unique);
+ /// assert_eq!(&[1, 2, 3], &shared[..]);
+ /// ```
+ #[inline]
+ fn from(mut v: Vec<T>) -> Arc<[T]> {
+ unsafe {
+ let arc = Arc::copy_from_slice(&v);
+
+ // Allow the Vec to free its memory, but not destroy its contents
+ v.set_len(0);
+
+ arc
+ }
+ }
+}
+
+#[stable(feature = "shared_from_cow", since = "1.45.0")]
+impl<'a, B> From<Cow<'a, B>> for Arc<B>
+where
+ B: ToOwned + ?Sized,
+ Arc<B>: From<&'a B> + From<B::Owned>,
+{
+ /// Create an atomically reference-counted pointer from
+ /// a clone-on-write pointer by copying its content.
+ ///
+ /// # Example
+ ///
+ /// ```rust
+ /// # use std::sync::Arc;
+ /// # use std::borrow::Cow;
+ /// let cow: Cow<str> = Cow::Borrowed("eggplant");
+ /// let shared: Arc<str> = Arc::from(cow);
+ /// assert_eq!("eggplant", &shared[..]);
+ /// ```
+ #[inline]
+ fn from(cow: Cow<'a, B>) -> Arc<B> {
+ match cow {
+ Cow::Borrowed(s) => Arc::from(s),
+ Cow::Owned(s) => Arc::from(s),
+ }
+ }
+}
+
+#[stable(feature = "shared_from_str", since = "1.62.0")]
+impl From<Arc<str>> for Arc<[u8]> {
+ /// Converts an atomically reference-counted string slice into a byte slice.
+ ///
+ /// # Example
+ ///
+ /// ```
+ /// # use std::sync::Arc;
+ /// let string: Arc<str> = Arc::from("eggplant");
+ /// let bytes: Arc<[u8]> = Arc::from(string);
+ /// assert_eq!("eggplant".as_bytes(), bytes.as_ref());
+ /// ```
+ #[inline]
+ fn from(rc: Arc<str>) -> Self {
+ // SAFETY: `str` has the same layout as `[u8]`.
+ unsafe { Arc::from_raw(Arc::into_raw(rc) as *const [u8]) }
+ }
+}
+
+#[stable(feature = "boxed_slice_try_from", since = "1.43.0")]
+impl<T, const N: usize> TryFrom<Arc<[T]>> for Arc<[T; N]> {
+ type Error = Arc<[T]>;
+
+ fn try_from(boxed_slice: Arc<[T]>) -> Result<Self, Self::Error> {
+ if boxed_slice.len() == N {
+ Ok(unsafe { Arc::from_raw(Arc::into_raw(boxed_slice) as *mut [T; N]) })
+ } else {
+ Err(boxed_slice)
+ }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "shared_from_iter", since = "1.37.0")]
+impl<T> iter::FromIterator<T> for Arc<[T]> {
+ /// Takes each element in the `Iterator` and collects it into an `Arc<[T]>`.
+ ///
+ /// # Performance characteristics
+ ///
+ /// ## The general case
+ ///
+ /// In the general case, collecting into `Arc<[T]>` is done by first
+ /// collecting into a `Vec<T>`. That is, when writing the following:
+ ///
+ /// ```rust
+ /// # use std::sync::Arc;
+ /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0).collect();
+ /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+ /// ```
+ ///
+ /// this behaves as if we wrote:
+ ///
+ /// ```rust
+ /// # use std::sync::Arc;
+ /// let evens: Arc<[u8]> = (0..10).filter(|&x| x % 2 == 0)
+ /// .collect::<Vec<_>>() // The first set of allocations happens here.
+ /// .into(); // A second allocation for `Arc<[T]>` happens here.
+ /// # assert_eq!(&*evens, &[0, 2, 4, 6, 8]);
+ /// ```
+ ///
+ /// This will allocate as many times as needed for constructing the `Vec<T>`
+ /// and then it will allocate once for turning the `Vec<T>` into the `Arc<[T]>`.
+ ///
+ /// ## Iterators of known length
+ ///
+ /// When your `Iterator` implements `TrustedLen` and is of an exact size,
+ /// a single allocation will be made for the `Arc<[T]>`. For example:
+ ///
+ /// ```rust
+ /// # use std::sync::Arc;
+ /// let evens: Arc<[u8]> = (0..10).collect(); // Just a single allocation happens here.
+ /// # assert_eq!(&*evens, &*(0..10).collect::<Vec<_>>());
+ /// ```
+ fn from_iter<I: iter::IntoIterator<Item = T>>(iter: I) -> Self {
+ ToArcSlice::to_arc_slice(iter.into_iter())
+ }
+}
+
+/// Specialization trait used for collecting into `Arc<[T]>`.
+trait ToArcSlice<T>: Iterator<Item = T> + Sized {
+ fn to_arc_slice(self) -> Arc<[T]>;
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I: Iterator<Item = T>> ToArcSlice<T> for I {
+ default fn to_arc_slice(self) -> Arc<[T]> {
+ self.collect::<Vec<T>>().into()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T, I: iter::TrustedLen<Item = T>> ToArcSlice<T> for I {
+ fn to_arc_slice(self) -> Arc<[T]> {
+ // This is the case for a `TrustedLen` iterator.
+ let (low, high) = self.size_hint();
+ if let Some(high) = high {
+ debug_assert_eq!(
+ low,
+ high,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+
+ unsafe {
+ // SAFETY: We need to ensure that the iterator has an exact length and we have.
+ Arc::from_iter_exact(self, low)
+ }
+ } else {
+ // TrustedLen contract guarantees that `upper_bound == `None` implies an iterator
+ // length exceeding `usize::MAX`.
+ // The default implementation would collect into a vec which would panic.
+ // Thus we panic here immediately without invoking `Vec` code.
+ panic!("capacity overflow");
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: ?Sized> borrow::Borrow<T> for Arc<T> {
+ fn borrow(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(since = "1.5.0", feature = "smart_ptr_as_ref")]
+impl<T: ?Sized> AsRef<T> for Arc<T> {
+ fn as_ref(&self) -> &T {
+ &**self
+ }
+}
+
+#[stable(feature = "pin", since = "1.33.0")]
+impl<T: ?Sized> Unpin for Arc<T> {}
+
+/// Get the offset within an `ArcInner` for the payload behind a pointer.
+///
+/// # Safety
+///
+/// The pointer must point to (and have valid metadata for) a previously
+/// valid instance of T, but the T is allowed to be dropped.
+unsafe fn data_offset<T: ?Sized>(ptr: *const T) -> usize {
+ // Align the unsized value to the end of the ArcInner.
+ // Because RcBox is repr(C), it will always be the last field in memory.
+ // SAFETY: since the only unsized types possible are slices, trait objects,
+ // and extern types, the input safety requirement is currently enough to
+ // satisfy the requirements of align_of_val_raw; this is an implementation
+ // detail of the language that must not be relied upon outside of std.
+ unsafe { data_offset_align(align_of_val_raw(ptr)) }
+}
+
+#[inline]
+fn data_offset_align(align: usize) -> usize {
+ let layout = Layout::new::<ArcInner<()>>();
+ layout.size() + layout.padding_needed_for(align)
+}
diff --git a/library/alloc/src/sync/tests.rs b/library/alloc/src/sync/tests.rs
new file mode 100644
index 000000000..202d0e7f0
--- /dev/null
+++ b/library/alloc/src/sync/tests.rs
@@ -0,0 +1,620 @@
+use super::*;
+
+use std::boxed::Box;
+use std::clone::Clone;
+use std::convert::{From, TryInto};
+use std::mem::drop;
+use std::ops::Drop;
+use std::option::Option::{self, None, Some};
+use std::sync::atomic::{
+ self,
+ Ordering::{Acquire, SeqCst},
+};
+use std::sync::mpsc::channel;
+use std::sync::Mutex;
+use std::thread;
+
+use crate::vec::Vec;
+
+struct Canary(*mut atomic::AtomicUsize);
+
+impl Drop for Canary {
+ fn drop(&mut self) {
+ unsafe {
+ match *self {
+ Canary(c) => {
+ (*c).fetch_add(1, SeqCst);
+ }
+ }
+ }
+ }
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn manually_share_arc() {
+ let v = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+ let arc_v = Arc::new(v);
+
+ let (tx, rx) = channel();
+
+ let _t = thread::spawn(move || {
+ let arc_v: Arc<Vec<i32>> = rx.recv().unwrap();
+ assert_eq!((*arc_v)[3], 4);
+ });
+
+ tx.send(arc_v.clone()).unwrap();
+
+ assert_eq!((*arc_v)[2], 3);
+ assert_eq!((*arc_v)[4], 5);
+}
+
+#[test]
+fn test_arc_get_mut() {
+ let mut x = Arc::new(3);
+ *Arc::get_mut(&mut x).unwrap() = 4;
+ assert_eq!(*x, 4);
+ let y = x.clone();
+ assert!(Arc::get_mut(&mut x).is_none());
+ drop(y);
+ assert!(Arc::get_mut(&mut x).is_some());
+ let _w = Arc::downgrade(&x);
+ assert!(Arc::get_mut(&mut x).is_none());
+}
+
+#[test]
+fn weak_counts() {
+ assert_eq!(Weak::weak_count(&Weak::<u64>::new()), 0);
+ assert_eq!(Weak::strong_count(&Weak::<u64>::new()), 0);
+
+ let a = Arc::new(0);
+ let w = Arc::downgrade(&a);
+ assert_eq!(Weak::strong_count(&w), 1);
+ assert_eq!(Weak::weak_count(&w), 1);
+ let w2 = w.clone();
+ assert_eq!(Weak::strong_count(&w), 1);
+ assert_eq!(Weak::weak_count(&w), 2);
+ assert_eq!(Weak::strong_count(&w2), 1);
+ assert_eq!(Weak::weak_count(&w2), 2);
+ drop(w);
+ assert_eq!(Weak::strong_count(&w2), 1);
+ assert_eq!(Weak::weak_count(&w2), 1);
+ let a2 = a.clone();
+ assert_eq!(Weak::strong_count(&w2), 2);
+ assert_eq!(Weak::weak_count(&w2), 1);
+ drop(a2);
+ drop(a);
+ assert_eq!(Weak::strong_count(&w2), 0);
+ assert_eq!(Weak::weak_count(&w2), 0);
+ drop(w2);
+}
+
+#[test]
+fn try_unwrap() {
+ let x = Arc::new(3);
+ assert_eq!(Arc::try_unwrap(x), Ok(3));
+ let x = Arc::new(4);
+ let _y = x.clone();
+ assert_eq!(Arc::try_unwrap(x), Err(Arc::new(4)));
+ let x = Arc::new(5);
+ let _w = Arc::downgrade(&x);
+ assert_eq!(Arc::try_unwrap(x), Ok(5));
+}
+
+#[test]
+fn into_from_raw() {
+ let x = Arc::new(Box::new("hello"));
+ let y = x.clone();
+
+ let x_ptr = Arc::into_raw(x);
+ drop(y);
+ unsafe {
+ assert_eq!(**x_ptr, "hello");
+
+ let x = Arc::from_raw(x_ptr);
+ assert_eq!(**x, "hello");
+
+ assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello"));
+ }
+}
+
+#[test]
+fn test_into_from_raw_unsized() {
+ use std::fmt::Display;
+ use std::string::ToString;
+
+ let arc: Arc<str> = Arc::from("foo");
+
+ let ptr = Arc::into_raw(arc.clone());
+ let arc2 = unsafe { Arc::from_raw(ptr) };
+
+ assert_eq!(unsafe { &*ptr }, "foo");
+ assert_eq!(arc, arc2);
+
+ let arc: Arc<dyn Display> = Arc::new(123);
+
+ let ptr = Arc::into_raw(arc.clone());
+ let arc2 = unsafe { Arc::from_raw(ptr) };
+
+ assert_eq!(unsafe { &*ptr }.to_string(), "123");
+ assert_eq!(arc2.to_string(), "123");
+}
+
+#[test]
+fn into_from_weak_raw() {
+ let x = Arc::new(Box::new("hello"));
+ let y = Arc::downgrade(&x);
+
+ let y_ptr = Weak::into_raw(y);
+ unsafe {
+ assert_eq!(**y_ptr, "hello");
+
+ let y = Weak::from_raw(y_ptr);
+ let y_up = Weak::upgrade(&y).unwrap();
+ assert_eq!(**y_up, "hello");
+ drop(y_up);
+
+ assert_eq!(Arc::try_unwrap(x).map(|x| *x), Ok("hello"));
+ }
+}
+
+#[test]
+fn test_into_from_weak_raw_unsized() {
+ use std::fmt::Display;
+ use std::string::ToString;
+
+ let arc: Arc<str> = Arc::from("foo");
+ let weak: Weak<str> = Arc::downgrade(&arc);
+
+ let ptr = Weak::into_raw(weak.clone());
+ let weak2 = unsafe { Weak::from_raw(ptr) };
+
+ assert_eq!(unsafe { &*ptr }, "foo");
+ assert!(weak.ptr_eq(&weak2));
+
+ let arc: Arc<dyn Display> = Arc::new(123);
+ let weak: Weak<dyn Display> = Arc::downgrade(&arc);
+
+ let ptr = Weak::into_raw(weak.clone());
+ let weak2 = unsafe { Weak::from_raw(ptr) };
+
+ assert_eq!(unsafe { &*ptr }.to_string(), "123");
+ assert!(weak.ptr_eq(&weak2));
+}
+
+#[test]
+fn test_cowarc_clone_make_mut() {
+ let mut cow0 = Arc::new(75);
+ let mut cow1 = cow0.clone();
+ let mut cow2 = cow1.clone();
+
+ assert!(75 == *Arc::make_mut(&mut cow0));
+ assert!(75 == *Arc::make_mut(&mut cow1));
+ assert!(75 == *Arc::make_mut(&mut cow2));
+
+ *Arc::make_mut(&mut cow0) += 1;
+ *Arc::make_mut(&mut cow1) += 2;
+ *Arc::make_mut(&mut cow2) += 3;
+
+ assert!(76 == *cow0);
+ assert!(77 == *cow1);
+ assert!(78 == *cow2);
+
+ // none should point to the same backing memory
+ assert!(*cow0 != *cow1);
+ assert!(*cow0 != *cow2);
+ assert!(*cow1 != *cow2);
+}
+
+#[test]
+fn test_cowarc_clone_unique2() {
+ let mut cow0 = Arc::new(75);
+ let cow1 = cow0.clone();
+ let cow2 = cow1.clone();
+
+ assert!(75 == *cow0);
+ assert!(75 == *cow1);
+ assert!(75 == *cow2);
+
+ *Arc::make_mut(&mut cow0) += 1;
+ assert!(76 == *cow0);
+ assert!(75 == *cow1);
+ assert!(75 == *cow2);
+
+ // cow1 and cow2 should share the same contents
+ // cow0 should have a unique reference
+ assert!(*cow0 != *cow1);
+ assert!(*cow0 != *cow2);
+ assert!(*cow1 == *cow2);
+}
+
+#[test]
+fn test_cowarc_clone_weak() {
+ let mut cow0 = Arc::new(75);
+ let cow1_weak = Arc::downgrade(&cow0);
+
+ assert!(75 == *cow0);
+ assert!(75 == *cow1_weak.upgrade().unwrap());
+
+ *Arc::make_mut(&mut cow0) += 1;
+
+ assert!(76 == *cow0);
+ assert!(cow1_weak.upgrade().is_none());
+}
+
+#[test]
+fn test_live() {
+ let x = Arc::new(5);
+ let y = Arc::downgrade(&x);
+ assert!(y.upgrade().is_some());
+}
+
+#[test]
+fn test_dead() {
+ let x = Arc::new(5);
+ let y = Arc::downgrade(&x);
+ drop(x);
+ assert!(y.upgrade().is_none());
+}
+
+#[test]
+fn weak_self_cyclic() {
+ struct Cycle {
+ x: Mutex<Option<Weak<Cycle>>>,
+ }
+
+ let a = Arc::new(Cycle { x: Mutex::new(None) });
+ let b = Arc::downgrade(&a.clone());
+ *a.x.lock().unwrap() = Some(b);
+
+ // hopefully we don't double-free (or leak)...
+}
+
+#[test]
+fn drop_arc() {
+ let mut canary = atomic::AtomicUsize::new(0);
+ let x = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
+ drop(x);
+ assert!(canary.load(Acquire) == 1);
+}
+
+#[test]
+fn drop_arc_weak() {
+ let mut canary = atomic::AtomicUsize::new(0);
+ let arc = Arc::new(Canary(&mut canary as *mut atomic::AtomicUsize));
+ let arc_weak = Arc::downgrade(&arc);
+ assert!(canary.load(Acquire) == 0);
+ drop(arc);
+ assert!(canary.load(Acquire) == 1);
+ drop(arc_weak);
+}
+
+#[test]
+fn test_strong_count() {
+ let a = Arc::new(0);
+ assert!(Arc::strong_count(&a) == 1);
+ let w = Arc::downgrade(&a);
+ assert!(Arc::strong_count(&a) == 1);
+ let b = w.upgrade().expect("");
+ assert!(Arc::strong_count(&b) == 2);
+ assert!(Arc::strong_count(&a) == 2);
+ drop(w);
+ drop(a);
+ assert!(Arc::strong_count(&b) == 1);
+ let c = b.clone();
+ assert!(Arc::strong_count(&b) == 2);
+ assert!(Arc::strong_count(&c) == 2);
+}
+
+#[test]
+fn test_weak_count() {
+ let a = Arc::new(0);
+ assert!(Arc::strong_count(&a) == 1);
+ assert!(Arc::weak_count(&a) == 0);
+ let w = Arc::downgrade(&a);
+ assert!(Arc::strong_count(&a) == 1);
+ assert!(Arc::weak_count(&a) == 1);
+ let x = w.clone();
+ assert!(Arc::weak_count(&a) == 2);
+ drop(w);
+ drop(x);
+ assert!(Arc::strong_count(&a) == 1);
+ assert!(Arc::weak_count(&a) == 0);
+ let c = a.clone();
+ assert!(Arc::strong_count(&a) == 2);
+ assert!(Arc::weak_count(&a) == 0);
+ let d = Arc::downgrade(&c);
+ assert!(Arc::weak_count(&c) == 1);
+ assert!(Arc::strong_count(&c) == 2);
+
+ drop(a);
+ drop(c);
+ drop(d);
+}
+
+#[test]
+fn show_arc() {
+ let a = Arc::new(5);
+ assert_eq!(format!("{a:?}"), "5");
+}
+
+// Make sure deriving works with Arc<T>
+#[derive(Eq, Ord, PartialEq, PartialOrd, Clone, Debug, Default)]
+struct Foo {
+ inner: Arc<i32>,
+}
+
+#[test]
+fn test_unsized() {
+ let x: Arc<[i32]> = Arc::new([1, 2, 3]);
+ assert_eq!(format!("{x:?}"), "[1, 2, 3]");
+ let y = Arc::downgrade(&x.clone());
+ drop(x);
+ assert!(y.upgrade().is_none());
+}
+
+#[test]
+fn test_maybe_thin_unsized() {
+ // If/when custom thin DSTs exist, this test should be updated to use one
+ use std::ffi::{CStr, CString};
+
+ let x: Arc<CStr> = Arc::from(CString::new("swordfish").unwrap().into_boxed_c_str());
+ assert_eq!(format!("{x:?}"), "\"swordfish\"");
+ let y: Weak<CStr> = Arc::downgrade(&x);
+ drop(x);
+
+ // At this point, the weak points to a dropped DST
+ assert!(y.upgrade().is_none());
+ // But we still need to be able to get the alloc layout to drop.
+ // CStr has no drop glue, but custom DSTs might, and need to work.
+ drop(y);
+}
+
+#[test]
+fn test_from_owned() {
+ let foo = 123;
+ let foo_arc = Arc::from(foo);
+ assert!(123 == *foo_arc);
+}
+
+#[test]
+fn test_new_weak() {
+ let foo: Weak<usize> = Weak::new();
+ assert!(foo.upgrade().is_none());
+}
+
+#[test]
+fn test_ptr_eq() {
+ let five = Arc::new(5);
+ let same_five = five.clone();
+ let other_five = Arc::new(5);
+
+ assert!(Arc::ptr_eq(&five, &same_five));
+ assert!(!Arc::ptr_eq(&five, &other_five));
+}
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn test_weak_count_locked() {
+ let mut a = Arc::new(atomic::AtomicBool::new(false));
+ let a2 = a.clone();
+ let t = thread::spawn(move || {
+ // Miri is too slow
+ let count = if cfg!(miri) { 1000 } else { 1000000 };
+ for _i in 0..count {
+ Arc::get_mut(&mut a);
+ }
+ a.store(true, SeqCst);
+ });
+
+ while !a2.load(SeqCst) {
+ let n = Arc::weak_count(&a2);
+ assert!(n < 2, "bad weak count: {}", n);
+ #[cfg(miri)] // Miri's scheduler does not guarantee liveness, and thus needs this hint.
+ std::hint::spin_loop();
+ }
+ t.join().unwrap();
+}
+
+#[test]
+fn test_from_str() {
+ let r: Arc<str> = Arc::from("foo");
+
+ assert_eq!(&r[..], "foo");
+}
+
+#[test]
+fn test_copy_from_slice() {
+ let s: &[u32] = &[1, 2, 3];
+ let r: Arc<[u32]> = Arc::from(s);
+
+ assert_eq!(&r[..], [1, 2, 3]);
+}
+
+#[test]
+fn test_clone_from_slice() {
+ #[derive(Clone, Debug, Eq, PartialEq)]
+ struct X(u32);
+
+ let s: &[X] = &[X(1), X(2), X(3)];
+ let r: Arc<[X]> = Arc::from(s);
+
+ assert_eq!(&r[..], s);
+}
+
+#[test]
+#[should_panic]
+fn test_clone_from_slice_panic() {
+ use std::string::{String, ToString};
+
+ struct Fail(u32, String);
+
+ impl Clone for Fail {
+ fn clone(&self) -> Fail {
+ if self.0 == 2 {
+ panic!();
+ }
+ Fail(self.0, self.1.clone())
+ }
+ }
+
+ let s: &[Fail] =
+ &[Fail(0, "foo".to_string()), Fail(1, "bar".to_string()), Fail(2, "baz".to_string())];
+
+ // Should panic, but not cause memory corruption
+ let _r: Arc<[Fail]> = Arc::from(s);
+}
+
+#[test]
+fn test_from_box() {
+ let b: Box<u32> = Box::new(123);
+ let r: Arc<u32> = Arc::from(b);
+
+ assert_eq!(*r, 123);
+}
+
+#[test]
+fn test_from_box_str() {
+ use std::string::String;
+
+ let s = String::from("foo").into_boxed_str();
+ let r: Arc<str> = Arc::from(s);
+
+ assert_eq!(&r[..], "foo");
+}
+
+#[test]
+fn test_from_box_slice() {
+ let s = vec![1, 2, 3].into_boxed_slice();
+ let r: Arc<[u32]> = Arc::from(s);
+
+ assert_eq!(&r[..], [1, 2, 3]);
+}
+
+#[test]
+fn test_from_box_trait() {
+ use std::fmt::Display;
+ use std::string::ToString;
+
+ let b: Box<dyn Display> = Box::new(123);
+ let r: Arc<dyn Display> = Arc::from(b);
+
+ assert_eq!(r.to_string(), "123");
+}
+
+#[test]
+fn test_from_box_trait_zero_sized() {
+ use std::fmt::Debug;
+
+ let b: Box<dyn Debug> = Box::new(());
+ let r: Arc<dyn Debug> = Arc::from(b);
+
+ assert_eq!(format!("{r:?}"), "()");
+}
+
+#[test]
+fn test_from_vec() {
+ let v = vec![1, 2, 3];
+ let r: Arc<[u32]> = Arc::from(v);
+
+ assert_eq!(&r[..], [1, 2, 3]);
+}
+
+#[test]
+fn test_downcast() {
+ use std::any::Any;
+
+ let r1: Arc<dyn Any + Send + Sync> = Arc::new(i32::MAX);
+ let r2: Arc<dyn Any + Send + Sync> = Arc::new("abc");
+
+ assert!(r1.clone().downcast::<u32>().is_err());
+
+ let r1i32 = r1.downcast::<i32>();
+ assert!(r1i32.is_ok());
+ assert_eq!(r1i32.unwrap(), Arc::new(i32::MAX));
+
+ assert!(r2.clone().downcast::<i32>().is_err());
+
+ let r2str = r2.downcast::<&'static str>();
+ assert!(r2str.is_ok());
+ assert_eq!(r2str.unwrap(), Arc::new("abc"));
+}
+
+#[test]
+fn test_array_from_slice() {
+ let v = vec![1, 2, 3];
+ let r: Arc<[u32]> = Arc::from(v);
+
+ let a: Result<Arc<[u32; 3]>, _> = r.clone().try_into();
+ assert!(a.is_ok());
+
+ let a: Result<Arc<[u32; 2]>, _> = r.clone().try_into();
+ assert!(a.is_err());
+}
+
+#[test]
+fn test_arc_cyclic_with_zero_refs() {
+ struct ZeroRefs {
+ inner: Weak<ZeroRefs>,
+ }
+ let zero_refs = Arc::new_cyclic(|inner| {
+ assert_eq!(inner.strong_count(), 0);
+ assert!(inner.upgrade().is_none());
+ ZeroRefs { inner: Weak::new() }
+ });
+
+ assert_eq!(Arc::strong_count(&zero_refs), 1);
+ assert_eq!(Arc::weak_count(&zero_refs), 0);
+ assert_eq!(zero_refs.inner.strong_count(), 0);
+ assert_eq!(zero_refs.inner.weak_count(), 0);
+}
+
+#[test]
+fn test_arc_new_cyclic_one_ref() {
+ struct OneRef {
+ inner: Weak<OneRef>,
+ }
+ let one_ref = Arc::new_cyclic(|inner| {
+ assert_eq!(inner.strong_count(), 0);
+ assert!(inner.upgrade().is_none());
+ OneRef { inner: inner.clone() }
+ });
+
+ assert_eq!(Arc::strong_count(&one_ref), 1);
+ assert_eq!(Arc::weak_count(&one_ref), 1);
+
+ let one_ref2 = Weak::upgrade(&one_ref.inner).unwrap();
+ assert!(Arc::ptr_eq(&one_ref, &one_ref2));
+
+ assert_eq!(Arc::strong_count(&one_ref), 2);
+ assert_eq!(Arc::weak_count(&one_ref), 1);
+}
+
+#[test]
+fn test_arc_cyclic_two_refs() {
+ struct TwoRefs {
+ inner1: Weak<TwoRefs>,
+ inner2: Weak<TwoRefs>,
+ }
+ let two_refs = Arc::new_cyclic(|inner| {
+ assert_eq!(inner.strong_count(), 0);
+ assert!(inner.upgrade().is_none());
+
+ let inner1 = inner.clone();
+ let inner2 = inner1.clone();
+
+ TwoRefs { inner1, inner2 }
+ });
+
+ assert_eq!(Arc::strong_count(&two_refs), 1);
+ assert_eq!(Arc::weak_count(&two_refs), 2);
+
+ let two_refs1 = Weak::upgrade(&two_refs.inner1).unwrap();
+ assert!(Arc::ptr_eq(&two_refs, &two_refs1));
+
+ let two_refs2 = Weak::upgrade(&two_refs.inner2).unwrap();
+ assert!(Arc::ptr_eq(&two_refs, &two_refs2));
+
+ assert_eq!(Arc::strong_count(&two_refs), 3);
+ assert_eq!(Arc::weak_count(&two_refs), 2);
+}
diff --git a/library/alloc/src/task.rs b/library/alloc/src/task.rs
new file mode 100644
index 000000000..528ee4ff1
--- /dev/null
+++ b/library/alloc/src/task.rs
@@ -0,0 +1,147 @@
+#![stable(feature = "wake_trait", since = "1.51.0")]
+//! Types and Traits for working with asynchronous tasks.
+use core::mem::ManuallyDrop;
+use core::task::{RawWaker, RawWakerVTable, Waker};
+
+use crate::sync::Arc;
+
+/// The implementation of waking a task on an executor.
+///
+/// This trait can be used to create a [`Waker`]. An executor can define an
+/// implementation of this trait, and use that to construct a Waker to pass
+/// to the tasks that are executed on that executor.
+///
+/// This trait is a memory-safe and ergonomic alternative to constructing a
+/// [`RawWaker`]. It supports the common executor design in which the data used
+/// to wake up a task is stored in an [`Arc`]. Some executors (especially
+/// those for embedded systems) cannot use this API, which is why [`RawWaker`]
+/// exists as an alternative for those systems.
+///
+/// [arc]: ../../std/sync/struct.Arc.html
+///
+/// # Examples
+///
+/// A basic `block_on` function that takes a future and runs it to completion on
+/// the current thread.
+///
+/// **Note:** This example trades correctness for simplicity. In order to prevent
+/// deadlocks, production-grade implementations will also need to handle
+/// intermediate calls to `thread::unpark` as well as nested invocations.
+///
+/// ```rust
+/// use std::future::Future;
+/// use std::sync::Arc;
+/// use std::task::{Context, Poll, Wake};
+/// use std::thread::{self, Thread};
+///
+/// /// A waker that wakes up the current thread when called.
+/// struct ThreadWaker(Thread);
+///
+/// impl Wake for ThreadWaker {
+/// fn wake(self: Arc<Self>) {
+/// self.0.unpark();
+/// }
+/// }
+///
+/// /// Run a future to completion on the current thread.
+/// fn block_on<T>(fut: impl Future<Output = T>) -> T {
+/// // Pin the future so it can be polled.
+/// let mut fut = Box::pin(fut);
+///
+/// // Create a new context to be passed to the future.
+/// let t = thread::current();
+/// let waker = Arc::new(ThreadWaker(t)).into();
+/// let mut cx = Context::from_waker(&waker);
+///
+/// // Run the future to completion.
+/// loop {
+/// match fut.as_mut().poll(&mut cx) {
+/// Poll::Ready(res) => return res,
+/// Poll::Pending => thread::park(),
+/// }
+/// }
+/// }
+///
+/// block_on(async {
+/// println!("Hi from inside a future!");
+/// });
+/// ```
+#[stable(feature = "wake_trait", since = "1.51.0")]
+pub trait Wake {
+ /// Wake this task.
+ #[stable(feature = "wake_trait", since = "1.51.0")]
+ fn wake(self: Arc<Self>);
+
+ /// Wake this task without consuming the waker.
+ ///
+ /// If an executor supports a cheaper way to wake without consuming the
+ /// waker, it should override this method. By default, it clones the
+ /// [`Arc`] and calls [`wake`] on the clone.
+ ///
+ /// [`wake`]: Wake::wake
+ #[stable(feature = "wake_trait", since = "1.51.0")]
+ fn wake_by_ref(self: &Arc<Self>) {
+ self.clone().wake();
+ }
+}
+
+#[stable(feature = "wake_trait", since = "1.51.0")]
+impl<W: Wake + Send + Sync + 'static> From<Arc<W>> for Waker {
+ /// Use a `Wake`-able type as a `Waker`.
+ ///
+ /// No heap allocations or atomic operations are used for this conversion.
+ fn from(waker: Arc<W>) -> Waker {
+ // SAFETY: This is safe because raw_waker safely constructs
+ // a RawWaker from Arc<W>.
+ unsafe { Waker::from_raw(raw_waker(waker)) }
+ }
+}
+
+#[stable(feature = "wake_trait", since = "1.51.0")]
+impl<W: Wake + Send + Sync + 'static> From<Arc<W>> for RawWaker {
+ /// Use a `Wake`-able type as a `RawWaker`.
+ ///
+ /// No heap allocations or atomic operations are used for this conversion.
+ fn from(waker: Arc<W>) -> RawWaker {
+ raw_waker(waker)
+ }
+}
+
+// NB: This private function for constructing a RawWaker is used, rather than
+// inlining this into the `From<Arc<W>> for RawWaker` impl, to ensure that
+// the safety of `From<Arc<W>> for Waker` does not depend on the correct
+// trait dispatch - instead both impls call this function directly and
+// explicitly.
+#[inline(always)]
+fn raw_waker<W: Wake + Send + Sync + 'static>(waker: Arc<W>) -> RawWaker {
+ // Increment the reference count of the arc to clone it.
+ unsafe fn clone_waker<W: Wake + Send + Sync + 'static>(waker: *const ()) -> RawWaker {
+ unsafe { Arc::increment_strong_count(waker as *const W) };
+ RawWaker::new(
+ waker as *const (),
+ &RawWakerVTable::new(clone_waker::<W>, wake::<W>, wake_by_ref::<W>, drop_waker::<W>),
+ )
+ }
+
+ // Wake by value, moving the Arc into the Wake::wake function
+ unsafe fn wake<W: Wake + Send + Sync + 'static>(waker: *const ()) {
+ let waker = unsafe { Arc::from_raw(waker as *const W) };
+ <W as Wake>::wake(waker);
+ }
+
+ // Wake by reference, wrap the waker in ManuallyDrop to avoid dropping it
+ unsafe fn wake_by_ref<W: Wake + Send + Sync + 'static>(waker: *const ()) {
+ let waker = unsafe { ManuallyDrop::new(Arc::from_raw(waker as *const W)) };
+ <W as Wake>::wake_by_ref(&waker);
+ }
+
+ // Decrement the reference count of the Arc on drop
+ unsafe fn drop_waker<W: Wake + Send + Sync + 'static>(waker: *const ()) {
+ unsafe { Arc::decrement_strong_count(waker as *const W) };
+ }
+
+ RawWaker::new(
+ Arc::into_raw(waker) as *const (),
+ &RawWakerVTable::new(clone_waker::<W>, wake::<W>, wake_by_ref::<W>, drop_waker::<W>),
+ )
+}
diff --git a/library/alloc/src/tests.rs b/library/alloc/src/tests.rs
new file mode 100644
index 000000000..299ed156a
--- /dev/null
+++ b/library/alloc/src/tests.rs
@@ -0,0 +1,151 @@
+//! Test for `boxed` mod.
+
+use core::any::Any;
+use core::clone::Clone;
+use core::convert::TryInto;
+use core::ops::Deref;
+use core::result::Result::{Err, Ok};
+
+use std::boxed::Box;
+
+#[test]
+fn test_owned_clone() {
+ let a = Box::new(5);
+ let b: Box<i32> = a.clone();
+ assert!(a == b);
+}
+
+#[derive(PartialEq, Eq)]
+struct Test;
+
+#[test]
+fn any_move() {
+ let a = Box::new(8) as Box<dyn Any>;
+ let b = Box::new(Test) as Box<dyn Any>;
+
+ match a.downcast::<i32>() {
+ Ok(a) => {
+ assert!(a == Box::new(8));
+ }
+ Err(..) => panic!(),
+ }
+ match b.downcast::<Test>() {
+ Ok(a) => {
+ assert!(a == Box::new(Test));
+ }
+ Err(..) => panic!(),
+ }
+
+ let a = Box::new(8) as Box<dyn Any>;
+ let b = Box::new(Test) as Box<dyn Any>;
+
+ assert!(a.downcast::<Box<Test>>().is_err());
+ assert!(b.downcast::<Box<i32>>().is_err());
+}
+
+#[test]
+fn test_show() {
+ let a = Box::new(8) as Box<dyn Any>;
+ let b = Box::new(Test) as Box<dyn Any>;
+ let a_str = format!("{a:?}");
+ let b_str = format!("{b:?}");
+ assert_eq!(a_str, "Any { .. }");
+ assert_eq!(b_str, "Any { .. }");
+
+ static EIGHT: usize = 8;
+ static TEST: Test = Test;
+ let a = &EIGHT as &dyn Any;
+ let b = &TEST as &dyn Any;
+ let s = format!("{a:?}");
+ assert_eq!(s, "Any { .. }");
+ let s = format!("{b:?}");
+ assert_eq!(s, "Any { .. }");
+}
+
+#[test]
+fn deref() {
+ fn homura<T: Deref<Target = i32>>(_: T) {}
+ homura(Box::new(765));
+}
+
+#[test]
+fn raw_sized() {
+ let x = Box::new(17);
+ let p = Box::into_raw(x);
+ unsafe {
+ assert_eq!(17, *p);
+ *p = 19;
+ let y = Box::from_raw(p);
+ assert_eq!(19, *y);
+ }
+}
+
+#[test]
+fn raw_trait() {
+ trait Foo {
+ fn get(&self) -> u32;
+ fn set(&mut self, value: u32);
+ }
+
+ struct Bar(u32);
+
+ impl Foo for Bar {
+ fn get(&self) -> u32 {
+ self.0
+ }
+
+ fn set(&mut self, value: u32) {
+ self.0 = value;
+ }
+ }
+
+ let x: Box<dyn Foo> = Box::new(Bar(17));
+ let p = Box::into_raw(x);
+ unsafe {
+ assert_eq!(17, (*p).get());
+ (*p).set(19);
+ let y: Box<dyn Foo> = Box::from_raw(p);
+ assert_eq!(19, y.get());
+ }
+}
+
+#[test]
+fn f64_slice() {
+ let slice: &[f64] = &[-1.0, 0.0, 1.0, f64::INFINITY];
+ let boxed: Box<[f64]> = Box::from(slice);
+ assert_eq!(&*boxed, slice)
+}
+
+#[test]
+fn i64_slice() {
+ let slice: &[i64] = &[i64::MIN, -2, -1, 0, 1, 2, i64::MAX];
+ let boxed: Box<[i64]> = Box::from(slice);
+ assert_eq!(&*boxed, slice)
+}
+
+#[test]
+fn str_slice() {
+ let s = "Hello, world!";
+ let boxed: Box<str> = Box::from(s);
+ assert_eq!(&*boxed, s)
+}
+
+#[test]
+fn boxed_slice_from_iter() {
+ let iter = 0..100;
+ let boxed: Box<[u32]> = iter.collect();
+ assert_eq!(boxed.len(), 100);
+ assert_eq!(boxed[7], 7);
+}
+
+#[test]
+fn test_array_from_slice() {
+ let v = vec![1, 2, 3];
+ let r: Box<[u32]> = v.into_boxed_slice();
+
+ let a: Result<Box<[u32; 3]>, _> = r.clone().try_into();
+ assert!(a.is_ok());
+
+ let a: Result<Box<[u32; 2]>, _> = r.clone().try_into();
+ assert!(a.is_err());
+}
diff --git a/library/alloc/src/vec/cow.rs b/library/alloc/src/vec/cow.rs
new file mode 100644
index 000000000..64943a273
--- /dev/null
+++ b/library/alloc/src/vec/cow.rs
@@ -0,0 +1,53 @@
+use crate::borrow::Cow;
+use core::iter::FromIterator;
+
+use super::Vec;
+
+#[stable(feature = "cow_from_vec", since = "1.8.0")]
+impl<'a, T: Clone> From<&'a [T]> for Cow<'a, [T]> {
+ /// Creates a [`Borrowed`] variant of [`Cow`]
+ /// from a slice.
+ ///
+ /// This conversion does not allocate or clone the data.
+ ///
+ /// [`Borrowed`]: crate::borrow::Cow::Borrowed
+ fn from(s: &'a [T]) -> Cow<'a, [T]> {
+ Cow::Borrowed(s)
+ }
+}
+
+#[stable(feature = "cow_from_vec", since = "1.8.0")]
+impl<'a, T: Clone> From<Vec<T>> for Cow<'a, [T]> {
+ /// Creates an [`Owned`] variant of [`Cow`]
+ /// from an owned instance of [`Vec`].
+ ///
+ /// This conversion does not allocate or clone the data.
+ ///
+ /// [`Owned`]: crate::borrow::Cow::Owned
+ fn from(v: Vec<T>) -> Cow<'a, [T]> {
+ Cow::Owned(v)
+ }
+}
+
+#[stable(feature = "cow_from_vec_ref", since = "1.28.0")]
+impl<'a, T: Clone> From<&'a Vec<T>> for Cow<'a, [T]> {
+ /// Creates a [`Borrowed`] variant of [`Cow`]
+ /// from a reference to [`Vec`].
+ ///
+ /// This conversion does not allocate or clone the data.
+ ///
+ /// [`Borrowed`]: crate::borrow::Cow::Borrowed
+ fn from(v: &'a Vec<T>) -> Cow<'a, [T]> {
+ Cow::Borrowed(v.as_slice())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T> FromIterator<T> for Cow<'a, [T]>
+where
+ T: Clone,
+{
+ fn from_iter<I: IntoIterator<Item = T>>(it: I) -> Cow<'a, [T]> {
+ Cow::Owned(FromIterator::from_iter(it))
+ }
+}
diff --git a/library/alloc/src/vec/drain.rs b/library/alloc/src/vec/drain.rs
new file mode 100644
index 000000000..5cdee0bd4
--- /dev/null
+++ b/library/alloc/src/vec/drain.rs
@@ -0,0 +1,184 @@
+use crate::alloc::{Allocator, Global};
+use core::fmt;
+use core::iter::{FusedIterator, TrustedLen};
+use core::mem;
+use core::ptr::{self, NonNull};
+use core::slice::{self};
+
+use super::Vec;
+
+/// A draining iterator for `Vec<T>`.
+///
+/// This `struct` is created by [`Vec::drain`].
+/// See its documentation for more.
+///
+/// # Example
+///
+/// ```
+/// let mut v = vec![0, 1, 2];
+/// let iter: std::vec::Drain<_> = v.drain(..);
+/// ```
+#[stable(feature = "drain", since = "1.6.0")]
+pub struct Drain<
+ 'a,
+ T: 'a,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global,
+> {
+ /// Index of tail to preserve
+ pub(super) tail_start: usize,
+ /// Length of tail
+ pub(super) tail_len: usize,
+ /// Current remaining range to remove
+ pub(super) iter: slice::Iter<'a, T>,
+ pub(super) vec: NonNull<Vec<T, A>>,
+}
+
+#[stable(feature = "collection_debug", since = "1.17.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Drain<'_, T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("Drain").field(&self.iter.as_slice()).finish()
+ }
+}
+
+impl<'a, T, A: Allocator> Drain<'a, T, A> {
+ /// Returns the remaining items of this iterator as a slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!['a', 'b', 'c'];
+ /// let mut drain = vec.drain(..);
+ /// assert_eq!(drain.as_slice(), &['a', 'b', 'c']);
+ /// let _ = drain.next().unwrap();
+ /// assert_eq!(drain.as_slice(), &['b', 'c']);
+ /// ```
+ #[must_use]
+ #[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
+ pub fn as_slice(&self) -> &[T] {
+ self.iter.as_slice()
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[must_use]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ unsafe { self.vec.as_ref().allocator() }
+ }
+}
+
+#[stable(feature = "vec_drain_as_slice", since = "1.46.0")]
+impl<'a, T, A: Allocator> AsRef<[T]> for Drain<'a, T, A> {
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl<T: Sync, A: Sync + Allocator> Sync for Drain<'_, T, A> {}
+#[stable(feature = "drain", since = "1.6.0")]
+unsafe impl<T: Send, A: Send + Allocator> Send for Drain<'_, T, A> {}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> Iterator for Drain<'_, T, A> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ self.iter.next().map(|elt| unsafe { ptr::read(elt as *const _) })
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.iter.size_hint()
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> DoubleEndedIterator for Drain<'_, T, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ self.iter.next_back().map(|elt| unsafe { ptr::read(elt as *const _) })
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> Drop for Drain<'_, T, A> {
+ fn drop(&mut self) {
+ /// Moves back the un-`Drain`ed elements to restore the original `Vec`.
+ struct DropGuard<'r, 'a, T, A: Allocator>(&'r mut Drain<'a, T, A>);
+
+ impl<'r, 'a, T, A: Allocator> Drop for DropGuard<'r, 'a, T, A> {
+ fn drop(&mut self) {
+ if self.0.tail_len > 0 {
+ unsafe {
+ let source_vec = self.0.vec.as_mut();
+ // memmove back untouched tail, update to new length
+ let start = source_vec.len();
+ let tail = self.0.tail_start;
+ if tail != start {
+ let src = source_vec.as_ptr().add(tail);
+ let dst = source_vec.as_mut_ptr().add(start);
+ ptr::copy(src, dst, self.0.tail_len);
+ }
+ source_vec.set_len(start + self.0.tail_len);
+ }
+ }
+ }
+ }
+
+ let iter = mem::replace(&mut self.iter, (&mut []).iter());
+ let drop_len = iter.len();
+
+ let mut vec = self.vec;
+
+ if mem::size_of::<T>() == 0 {
+ // ZSTs have no identity, so we don't need to move them around, we only need to drop the correct amount.
+ // this can be achieved by manipulating the Vec length instead of moving values out from `iter`.
+ unsafe {
+ let vec = vec.as_mut();
+ let old_len = vec.len();
+ vec.set_len(old_len + drop_len + self.tail_len);
+ vec.truncate(old_len + self.tail_len);
+ }
+
+ return;
+ }
+
+ // ensure elements are moved back into their appropriate places, even when drop_in_place panics
+ let _guard = DropGuard(self);
+
+ if drop_len == 0 {
+ return;
+ }
+
+ // as_slice() must only be called when iter.len() is > 0 because
+ // vec::Splice modifies vec::Drain fields and may grow the vec which would invalidate
+ // the iterator's internal pointers. Creating a reference to deallocated memory
+ // is invalid even when it is zero-length
+ let drop_ptr = iter.as_slice().as_ptr();
+
+ unsafe {
+ // drop_ptr comes from a slice::Iter which only gives us a &[T] but for drop_in_place
+ // a pointer with mutable provenance is necessary. Therefore we must reconstruct
+ // it from the original vec but also avoid creating a &mut to the front since that could
+ // invalidate raw pointers to it which some unsafe code might rely on.
+ let vec_ptr = vec.as_mut().as_mut_ptr();
+ let drop_offset = drop_ptr.sub_ptr(vec_ptr);
+ let to_drop = ptr::slice_from_raw_parts_mut(vec_ptr.add(drop_offset), drop_len);
+ ptr::drop_in_place(to_drop);
+ }
+ }
+}
+
+#[stable(feature = "drain", since = "1.6.0")]
+impl<T, A: Allocator> ExactSizeIterator for Drain<'_, T, A> {
+ fn is_empty(&self) -> bool {
+ self.iter.is_empty()
+ }
+}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, A: Allocator> TrustedLen for Drain<'_, T, A> {}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator> FusedIterator for Drain<'_, T, A> {}
diff --git a/library/alloc/src/vec/drain_filter.rs b/library/alloc/src/vec/drain_filter.rs
new file mode 100644
index 000000000..3c37c92ae
--- /dev/null
+++ b/library/alloc/src/vec/drain_filter.rs
@@ -0,0 +1,143 @@
+use crate::alloc::{Allocator, Global};
+use core::ptr::{self};
+use core::slice::{self};
+
+use super::Vec;
+
+/// An iterator which uses a closure to determine if an element should be removed.
+///
+/// This struct is created by [`Vec::drain_filter`].
+/// See its documentation for more.
+///
+/// # Example
+///
+/// ```
+/// #![feature(drain_filter)]
+///
+/// let mut v = vec![0, 1, 2];
+/// let iter: std::vec::DrainFilter<_, _> = v.drain_filter(|x| *x % 2 == 0);
+/// ```
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+#[derive(Debug)]
+pub struct DrainFilter<
+ 'a,
+ T,
+ F,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> where
+ F: FnMut(&mut T) -> bool,
+{
+ pub(super) vec: &'a mut Vec<T, A>,
+ /// The index of the item that will be inspected by the next call to `next`.
+ pub(super) idx: usize,
+ /// The number of items that have been drained (removed) thus far.
+ pub(super) del: usize,
+ /// The original length of `vec` prior to draining.
+ pub(super) old_len: usize,
+ /// The filter test predicate.
+ pub(super) pred: F,
+ /// A flag that indicates a panic has occurred in the filter test predicate.
+ /// This is used as a hint in the drop implementation to prevent consumption
+ /// of the remainder of the `DrainFilter`. Any unprocessed items will be
+ /// backshifted in the `vec`, but no further items will be dropped or
+ /// tested by the filter predicate.
+ pub(super) panic_flag: bool,
+}
+
+impl<T, F, A: Allocator> DrainFilter<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ self.vec.allocator()
+ }
+}
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+impl<T, F, A: Allocator> Iterator for DrainFilter<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ type Item = T;
+
+ fn next(&mut self) -> Option<T> {
+ unsafe {
+ while self.idx < self.old_len {
+ let i = self.idx;
+ let v = slice::from_raw_parts_mut(self.vec.as_mut_ptr(), self.old_len);
+ self.panic_flag = true;
+ let drained = (self.pred)(&mut v[i]);
+ self.panic_flag = false;
+ // Update the index *after* the predicate is called. If the index
+ // is updated prior and the predicate panics, the element at this
+ // index would be leaked.
+ self.idx += 1;
+ if drained {
+ self.del += 1;
+ return Some(ptr::read(&v[i]));
+ } else if self.del > 0 {
+ let del = self.del;
+ let src: *const T = &v[i];
+ let dst: *mut T = &mut v[i - del];
+ ptr::copy_nonoverlapping(src, dst, 1);
+ }
+ }
+ None
+ }
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (0, Some(self.old_len - self.idx))
+ }
+}
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+impl<T, F, A: Allocator> Drop for DrainFilter<'_, T, F, A>
+where
+ F: FnMut(&mut T) -> bool,
+{
+ fn drop(&mut self) {
+ struct BackshiftOnDrop<'a, 'b, T, F, A: Allocator>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ drain: &'b mut DrainFilter<'a, T, F, A>,
+ }
+
+ impl<'a, 'b, T, F, A: Allocator> Drop for BackshiftOnDrop<'a, 'b, T, F, A>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ fn drop(&mut self) {
+ unsafe {
+ if self.drain.idx < self.drain.old_len && self.drain.del > 0 {
+ // This is a pretty messed up state, and there isn't really an
+ // obviously right thing to do. We don't want to keep trying
+ // to execute `pred`, so we just backshift all the unprocessed
+ // elements and tell the vec that they still exist. The backshift
+ // is required to prevent a double-drop of the last successfully
+ // drained item prior to a panic in the predicate.
+ let ptr = self.drain.vec.as_mut_ptr();
+ let src = ptr.add(self.drain.idx);
+ let dst = src.sub(self.drain.del);
+ let tail_len = self.drain.old_len - self.drain.idx;
+ src.copy_to(dst, tail_len);
+ }
+ self.drain.vec.set_len(self.drain.old_len - self.drain.del);
+ }
+ }
+ }
+
+ let backshift = BackshiftOnDrop { drain: self };
+
+ // Attempt to consume any remaining elements if the filter predicate
+ // has not yet panicked. We'll backshift any remaining elements
+ // whether we've already panicked or if the consumption here panics.
+ if !backshift.drain.panic_flag {
+ backshift.drain.for_each(drop);
+ }
+ }
+}
diff --git a/library/alloc/src/vec/in_place_collect.rs b/library/alloc/src/vec/in_place_collect.rs
new file mode 100644
index 000000000..55dcb84ad
--- /dev/null
+++ b/library/alloc/src/vec/in_place_collect.rs
@@ -0,0 +1,302 @@
+//! Inplace iterate-and-collect specialization for `Vec`
+//!
+//! Note: This documents Vec internals, some of the following sections explain implementation
+//! details and are best read together with the source of this module.
+//!
+//! The specialization in this module applies to iterators in the shape of
+//! `source.adapter().adapter().adapter().collect::<Vec<U>>()`
+//! where `source` is an owning iterator obtained from [`Vec<T>`], [`Box<[T]>`][box] (by conversion to `Vec`)
+//! or [`BinaryHeap<T>`], the adapters each consume one or more items per step
+//! (represented by [`InPlaceIterable`]), provide transitive access to `source` (via [`SourceIter`])
+//! and thus the underlying allocation. And finally the layouts of `T` and `U` must
+//! have the same size and alignment, this is currently ensured via const eval instead of trait bounds
+//! in the specialized [`SpecFromIter`] implementation.
+//!
+//! [`BinaryHeap<T>`]: crate::collections::BinaryHeap
+//! [box]: crate::boxed::Box
+//!
+//! By extension some other collections which use `collect::<Vec<_>>()` internally in their
+//! `FromIterator` implementation benefit from this too.
+//!
+//! Access to the underlying source goes through a further layer of indirection via the private
+//! trait [`AsVecIntoIter`] to hide the implementation detail that other collections may use
+//! `vec::IntoIter` internally.
+//!
+//! In-place iteration depends on the interaction of several unsafe traits, implementation
+//! details of multiple parts in the iterator pipeline and often requires holistic reasoning
+//! across multiple structs since iterators are executed cooperatively rather than having
+//! a central evaluator/visitor struct executing all iterator components.
+//!
+//! # Reading from and writing to the same allocation
+//!
+//! By its nature collecting in place means that the reader and writer side of the iterator
+//! use the same allocation. Since `try_fold()` (used in [`SpecInPlaceCollect`]) takes a
+//! reference to the iterator for the duration of the iteration that means we can't interleave
+//! the step of reading a value and getting a reference to write to. Instead raw pointers must be
+//! used on the reader and writer side.
+//!
+//! That writes never clobber a yet-to-be-read item is ensured by the [`InPlaceIterable`] requirements.
+//!
+//! # Layout constraints
+//!
+//! [`Allocator`] requires that `allocate()` and `deallocate()` have matching alignment and size.
+//! Additionally this specialization doesn't make sense for ZSTs as there is no reallocation to
+//! avoid and it would make pointer arithmetic more difficult.
+//!
+//! [`Allocator`]: core::alloc::Allocator
+//!
+//! # Drop- and panic-safety
+//!
+//! Iteration can panic, requiring dropping the already written parts but also the remainder of
+//! the source. Iteration can also leave some source items unconsumed which must be dropped.
+//! All those drops in turn can panic which then must either leak the allocation or abort to avoid
+//! double-drops.
+//!
+//! This is handled by the [`InPlaceDrop`] guard for sink items (`U`) and by
+//! [`vec::IntoIter::forget_allocation_drop_remaining()`] for remaining source items (`T`).
+//!
+//! [`vec::IntoIter::forget_allocation_drop_remaining()`]: super::IntoIter::forget_allocation_drop_remaining()
+//!
+//! # O(1) collect
+//!
+//! The main iteration itself is further specialized when the iterator implements
+//! [`TrustedRandomAccessNoCoerce`] to let the optimizer see that it is a counted loop with a single
+//! [induction variable]. This can turn some iterators into a noop, i.e. it reduces them from O(n) to
+//! O(1). This particular optimization is quite fickle and doesn't always work, see [#79308]
+//!
+//! [#79308]: https://github.com/rust-lang/rust/issues/79308
+//! [induction variable]: https://en.wikipedia.org/wiki/Induction_variable
+//!
+//! Since unchecked accesses through that trait do not advance the read pointer of `IntoIter`
+//! this would interact unsoundly with the requirements about dropping the tail described above.
+//! But since the normal `Drop` implementation of `IntoIter` would suffer from the same problem it
+//! is only correct for `TrustedRandomAccessNoCoerce` to be implemented when the items don't
+//! have a destructor. Thus that implicit requirement also makes the specialization safe to use for
+//! in-place collection.
+//! Note that this safety concern is about the correctness of `impl Drop for IntoIter`,
+//! not the guarantees of `InPlaceIterable`.
+//!
+//! # Adapter implementations
+//!
+//! The invariants for adapters are documented in [`SourceIter`] and [`InPlaceIterable`], but
+//! getting them right can be rather subtle for multiple, sometimes non-local reasons.
+//! For example `InPlaceIterable` would be valid to implement for [`Peekable`], except
+//! that it is stateful, cloneable and `IntoIter`'s clone implementation shortens the underlying
+//! allocation which means if the iterator has been peeked and then gets cloned there no longer is
+//! enough room, thus breaking an invariant ([#85322]).
+//!
+//! [#85322]: https://github.com/rust-lang/rust/issues/85322
+//! [`Peekable`]: core::iter::Peekable
+//!
+//!
+//! # Examples
+//!
+//! Some cases that are optimized by this specialization, more can be found in the `Vec`
+//! benchmarks:
+//!
+//! ```rust
+//! # #[allow(dead_code)]
+//! /// Converts a usize vec into an isize one.
+//! pub fn cast(vec: Vec<usize>) -> Vec<isize> {
+//! // Does not allocate, free or panic. On optlevel>=2 it does not loop.
+//! // Of course this particular case could and should be written with `into_raw_parts` and
+//! // `from_raw_parts` instead.
+//! vec.into_iter().map(|u| u as isize).collect()
+//! }
+//! ```
+//!
+//! ```rust
+//! # #[allow(dead_code)]
+//! /// Drops remaining items in `src` and if the layouts of `T` and `U` match it
+//! /// returns an empty Vec backed by the original allocation. Otherwise it returns a new
+//! /// empty vec.
+//! pub fn recycle_allocation<T, U>(src: Vec<T>) -> Vec<U> {
+//! src.into_iter().filter_map(|_| None).collect()
+//! }
+//! ```
+//!
+//! ```rust
+//! let vec = vec![13usize; 1024];
+//! let _ = vec.into_iter()
+//! .enumerate()
+//! .filter_map(|(idx, val)| if idx % 2 == 0 { Some(val+idx) } else {None})
+//! .collect::<Vec<_>>();
+//!
+//! // is equivalent to the following, but doesn't require bounds checks
+//!
+//! let mut vec = vec![13usize; 1024];
+//! let mut write_idx = 0;
+//! for idx in 0..vec.len() {
+//! if idx % 2 == 0 {
+//! vec[write_idx] = vec[idx] + idx;
+//! write_idx += 1;
+//! }
+//! }
+//! vec.truncate(write_idx);
+//! ```
+use core::iter::{InPlaceIterable, SourceIter, TrustedRandomAccessNoCoerce};
+use core::mem::{self, ManuallyDrop};
+use core::ptr::{self};
+
+use super::{InPlaceDrop, SpecFromIter, SpecFromIterNested, Vec};
+
+/// Specialization marker for collecting an iterator pipeline into a Vec while reusing the
+/// source allocation, i.e. executing the pipeline in place.
+#[rustc_unsafe_specialization_marker]
+pub(super) trait InPlaceIterableMarker {}
+
+impl<T> InPlaceIterableMarker for T where T: InPlaceIterable {}
+
+impl<T, I> SpecFromIter<T, I> for Vec<T>
+where
+ I: Iterator<Item = T> + SourceIter<Source: AsVecIntoIter> + InPlaceIterableMarker,
+{
+ default fn from_iter(mut iterator: I) -> Self {
+ // See "Layout constraints" section in the module documentation. We rely on const
+ // optimization here since these conditions currently cannot be expressed as trait bounds
+ if mem::size_of::<T>() == 0
+ || mem::size_of::<T>()
+ != mem::size_of::<<<I as SourceIter>::Source as AsVecIntoIter>::Item>()
+ || mem::align_of::<T>()
+ != mem::align_of::<<<I as SourceIter>::Source as AsVecIntoIter>::Item>()
+ {
+ // fallback to more generic implementations
+ return SpecFromIterNested::from_iter(iterator);
+ }
+
+ let (src_buf, src_ptr, dst_buf, dst_end, cap) = unsafe {
+ let inner = iterator.as_inner().as_into_iter();
+ (
+ inner.buf.as_ptr(),
+ inner.ptr,
+ inner.buf.as_ptr() as *mut T,
+ inner.end as *const T,
+ inner.cap,
+ )
+ };
+
+ let len = SpecInPlaceCollect::collect_in_place(&mut iterator, dst_buf, dst_end);
+
+ let src = unsafe { iterator.as_inner().as_into_iter() };
+ // check if SourceIter contract was upheld
+ // caveat: if they weren't we might not even make it to this point
+ debug_assert_eq!(src_buf, src.buf.as_ptr());
+ // check InPlaceIterable contract. This is only possible if the iterator advanced the
+ // source pointer at all. If it uses unchecked access via TrustedRandomAccess
+ // then the source pointer will stay in its initial position and we can't use it as reference
+ if src.ptr != src_ptr {
+ debug_assert!(
+ unsafe { dst_buf.add(len) as *const _ } <= src.ptr,
+ "InPlaceIterable contract violation, write pointer advanced beyond read pointer"
+ );
+ }
+
+ // Drop any remaining values at the tail of the source but prevent drop of the allocation
+ // itself once IntoIter goes out of scope.
+ // If the drop panics then we also leak any elements collected into dst_buf.
+ //
+ // Note: This access to the source wouldn't be allowed by the TrustedRandomIteratorNoCoerce
+ // contract (used by SpecInPlaceCollect below). But see the "O(1) collect" section in the
+ // module documenttation why this is ok anyway.
+ src.forget_allocation_drop_remaining();
+
+ let vec = unsafe { Vec::from_raw_parts(dst_buf, len, cap) };
+
+ vec
+ }
+}
+
+fn write_in_place_with_drop<T>(
+ src_end: *const T,
+) -> impl FnMut(InPlaceDrop<T>, T) -> Result<InPlaceDrop<T>, !> {
+ move |mut sink, item| {
+ unsafe {
+ // the InPlaceIterable contract cannot be verified precisely here since
+ // try_fold has an exclusive reference to the source pointer
+ // all we can do is check if it's still in range
+ debug_assert!(sink.dst as *const _ <= src_end, "InPlaceIterable contract violation");
+ ptr::write(sink.dst, item);
+ // Since this executes user code which can panic we have to bump the pointer
+ // after each step.
+ sink.dst = sink.dst.add(1);
+ }
+ Ok(sink)
+ }
+}
+
+/// Helper trait to hold specialized implementations of the in-place iterate-collect loop
+trait SpecInPlaceCollect<T, I>: Iterator<Item = T> {
+ /// Collects an iterator (`self`) into the destination buffer (`dst`) and returns the number of items
+ /// collected. `end` is the last writable element of the allocation and used for bounds checks.
+ ///
+ /// This method is specialized and one of its implementations makes use of
+ /// `Iterator::__iterator_get_unchecked` calls with a `TrustedRandomAccessNoCoerce` bound
+ /// on `I` which means the caller of this method must take the safety conditions
+ /// of that trait into consideration.
+ fn collect_in_place(&mut self, dst: *mut T, end: *const T) -> usize;
+}
+
+impl<T, I> SpecInPlaceCollect<T, I> for I
+where
+ I: Iterator<Item = T>,
+{
+ #[inline]
+ default fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
+ // use try-fold since
+ // - it vectorizes better for some iterator adapters
+ // - unlike most internal iteration methods, it only takes a &mut self
+ // - it lets us thread the write pointer through its innards and get it back in the end
+ let sink = InPlaceDrop { inner: dst_buf, dst: dst_buf };
+ let sink =
+ self.try_fold::<_, _, Result<_, !>>(sink, write_in_place_with_drop(end)).unwrap();
+ // iteration succeeded, don't drop head
+ unsafe { ManuallyDrop::new(sink).dst.sub_ptr(dst_buf) }
+ }
+}
+
+impl<T, I> SpecInPlaceCollect<T, I> for I
+where
+ I: Iterator<Item = T> + TrustedRandomAccessNoCoerce,
+{
+ #[inline]
+ fn collect_in_place(&mut self, dst_buf: *mut T, end: *const T) -> usize {
+ let len = self.size();
+ let mut drop_guard = InPlaceDrop { inner: dst_buf, dst: dst_buf };
+ for i in 0..len {
+ // Safety: InplaceIterable contract guarantees that for every element we read
+ // one slot in the underlying storage will have been freed up and we can immediately
+ // write back the result.
+ unsafe {
+ let dst = dst_buf.offset(i as isize);
+ debug_assert!(dst as *const _ <= end, "InPlaceIterable contract violation");
+ ptr::write(dst, self.__iterator_get_unchecked(i));
+ // Since this executes user code which can panic we have to bump the pointer
+ // after each step.
+ drop_guard.dst = dst.add(1);
+ }
+ }
+ mem::forget(drop_guard);
+ len
+ }
+}
+
+/// Internal helper trait for in-place iteration specialization.
+///
+/// Currently this is only implemented by [`vec::IntoIter`] - returning a reference to itself - and
+/// [`binary_heap::IntoIter`] which returns a reference to its inner representation.
+///
+/// Since this is an internal trait it hides the implementation detail `binary_heap::IntoIter`
+/// uses `vec::IntoIter` internally.
+///
+/// [`vec::IntoIter`]: super::IntoIter
+/// [`binary_heap::IntoIter`]: crate::collections::binary_heap::IntoIter
+///
+/// # Safety
+///
+/// In-place iteration relies on implementation details of `vec::IntoIter`, most importantly that
+/// it does not create references to the whole allocation during iteration, only raw pointers
+#[rustc_specialization_trait]
+pub(crate) unsafe trait AsVecIntoIter {
+ type Item;
+ fn as_into_iter(&mut self) -> &mut super::IntoIter<Self::Item>;
+}
diff --git a/library/alloc/src/vec/in_place_drop.rs b/library/alloc/src/vec/in_place_drop.rs
new file mode 100644
index 000000000..1b1ef9130
--- /dev/null
+++ b/library/alloc/src/vec/in_place_drop.rs
@@ -0,0 +1,24 @@
+use core::ptr::{self};
+use core::slice::{self};
+
+// A helper struct for in-place iteration that drops the destination slice of iteration,
+// i.e. the head. The source slice (the tail) is dropped by IntoIter.
+pub(super) struct InPlaceDrop<T> {
+ pub(super) inner: *mut T,
+ pub(super) dst: *mut T,
+}
+
+impl<T> InPlaceDrop<T> {
+ fn len(&self) -> usize {
+ unsafe { self.dst.sub_ptr(self.inner) }
+ }
+}
+
+impl<T> Drop for InPlaceDrop<T> {
+ #[inline]
+ fn drop(&mut self) {
+ unsafe {
+ ptr::drop_in_place(slice::from_raw_parts_mut(self.inner, self.len()));
+ }
+ }
+}
diff --git a/library/alloc/src/vec/into_iter.rs b/library/alloc/src/vec/into_iter.rs
new file mode 100644
index 000000000..1b483e3fc
--- /dev/null
+++ b/library/alloc/src/vec/into_iter.rs
@@ -0,0 +1,401 @@
+#[cfg(not(no_global_oom_handling))]
+use super::AsVecIntoIter;
+use crate::alloc::{Allocator, Global};
+use crate::raw_vec::RawVec;
+use core::array;
+use core::fmt;
+use core::intrinsics::arith_offset;
+use core::iter::{
+ FusedIterator, InPlaceIterable, SourceIter, TrustedLen, TrustedRandomAccessNoCoerce,
+};
+use core::marker::PhantomData;
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+#[cfg(not(no_global_oom_handling))]
+use core::ops::Deref;
+use core::ptr::{self, NonNull};
+use core::slice::{self};
+
+/// An iterator that moves out of a vector.
+///
+/// This `struct` is created by the `into_iter` method on [`Vec`](super::Vec)
+/// (provided by the [`IntoIterator`] trait).
+///
+/// # Example
+///
+/// ```
+/// let v = vec![0, 1, 2];
+/// let iter: std::vec::IntoIter<_> = v.into_iter();
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_insignificant_dtor]
+pub struct IntoIter<
+ T,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global,
+> {
+ pub(super) buf: NonNull<T>,
+ pub(super) phantom: PhantomData<T>,
+ pub(super) cap: usize,
+ // the drop impl reconstructs a RawVec from buf, cap and alloc
+ // to avoid dropping the allocator twice we need to wrap it into ManuallyDrop
+ pub(super) alloc: ManuallyDrop<A>,
+ pub(super) ptr: *const T,
+ pub(super) end: *const T,
+}
+
+#[stable(feature = "vec_intoiter_debug", since = "1.13.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for IntoIter<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.debug_tuple("IntoIter").field(&self.as_slice()).finish()
+ }
+}
+
+impl<T, A: Allocator> IntoIter<T, A> {
+ /// Returns the remaining items of this iterator as a slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let vec = vec!['a', 'b', 'c'];
+ /// let mut into_iter = vec.into_iter();
+ /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
+ /// let _ = into_iter.next().unwrap();
+ /// assert_eq!(into_iter.as_slice(), &['b', 'c']);
+ /// ```
+ #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
+ pub fn as_slice(&self) -> &[T] {
+ unsafe { slice::from_raw_parts(self.ptr, self.len()) }
+ }
+
+ /// Returns the remaining items of this iterator as a mutable slice.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let vec = vec!['a', 'b', 'c'];
+ /// let mut into_iter = vec.into_iter();
+ /// assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
+ /// into_iter.as_mut_slice()[2] = 'z';
+ /// assert_eq!(into_iter.next().unwrap(), 'a');
+ /// assert_eq!(into_iter.next().unwrap(), 'b');
+ /// assert_eq!(into_iter.next().unwrap(), 'z');
+ /// ```
+ #[stable(feature = "vec_into_iter_as_slice", since = "1.15.0")]
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ unsafe { &mut *self.as_raw_mut_slice() }
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ &self.alloc
+ }
+
+ fn as_raw_mut_slice(&mut self) -> *mut [T] {
+ ptr::slice_from_raw_parts_mut(self.ptr as *mut T, self.len())
+ }
+
+ /// Drops remaining elements and relinquishes the backing allocation.
+ ///
+ /// This is roughly equivalent to the following, but more efficient
+ ///
+ /// ```
+ /// # let mut into_iter = Vec::<u8>::with_capacity(10).into_iter();
+ /// (&mut into_iter).for_each(core::mem::drop);
+ /// unsafe { core::ptr::write(&mut into_iter, Vec::new().into_iter()); }
+ /// ```
+ ///
+ /// This method is used by in-place iteration, refer to the vec::in_place_collect
+ /// documentation for an overview.
+ #[cfg(not(no_global_oom_handling))]
+ pub(super) fn forget_allocation_drop_remaining(&mut self) {
+ let remaining = self.as_raw_mut_slice();
+
+ // overwrite the individual fields instead of creating a new
+ // struct and then overwriting &mut self.
+ // this creates less assembly
+ self.cap = 0;
+ self.buf = unsafe { NonNull::new_unchecked(RawVec::NEW.ptr()) };
+ self.ptr = self.buf.as_ptr();
+ self.end = self.buf.as_ptr();
+
+ unsafe {
+ ptr::drop_in_place(remaining);
+ }
+ }
+
+ /// Forgets to Drop the remaining elements while still allowing the backing allocation to be freed.
+ pub(crate) fn forget_remaining_elements(&mut self) {
+ self.ptr = self.end;
+ }
+}
+
+#[stable(feature = "vec_intoiter_as_ref", since = "1.46.0")]
+impl<T, A: Allocator> AsRef<[T]> for IntoIter<T, A> {
+ fn as_ref(&self) -> &[T] {
+ self.as_slice()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Send, A: Allocator + Send> Send for IntoIter<T, A> {}
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<T: Sync, A: Allocator + Sync> Sync for IntoIter<T, A> {}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Iterator for IntoIter<T, A> {
+ type Item = T;
+
+ #[inline]
+ fn next(&mut self) -> Option<T> {
+ if self.ptr as *const _ == self.end {
+ None
+ } else if mem::size_of::<T>() == 0 {
+ // purposefully don't use 'ptr.offset' because for
+ // vectors with 0-size elements this would return the
+ // same pointer.
+ self.ptr = unsafe { arith_offset(self.ptr as *const i8, 1) as *mut T };
+
+ // Make up a value of this ZST.
+ Some(unsafe { mem::zeroed() })
+ } else {
+ let old = self.ptr;
+ self.ptr = unsafe { self.ptr.offset(1) };
+
+ Some(unsafe { ptr::read(old) })
+ }
+ }
+
+ #[inline]
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ let exact = if mem::size_of::<T>() == 0 {
+ self.end.addr().wrapping_sub(self.ptr.addr())
+ } else {
+ unsafe { self.end.sub_ptr(self.ptr) }
+ };
+ (exact, Some(exact))
+ }
+
+ #[inline]
+ fn advance_by(&mut self, n: usize) -> Result<(), usize> {
+ let step_size = self.len().min(n);
+ let to_drop = ptr::slice_from_raw_parts_mut(self.ptr as *mut T, step_size);
+ if mem::size_of::<T>() == 0 {
+ // SAFETY: due to unchecked casts of unsigned amounts to signed offsets the wraparound
+ // effectively results in unsigned pointers representing positions 0..usize::MAX,
+ // which is valid for ZSTs.
+ self.ptr = unsafe { arith_offset(self.ptr as *const i8, step_size as isize) as *mut T }
+ } else {
+ // SAFETY: the min() above ensures that step_size is in bounds
+ self.ptr = unsafe { self.ptr.add(step_size) };
+ }
+ // SAFETY: the min() above ensures that step_size is in bounds
+ unsafe {
+ ptr::drop_in_place(to_drop);
+ }
+ if step_size < n {
+ return Err(step_size);
+ }
+ Ok(())
+ }
+
+ #[inline]
+ fn count(self) -> usize {
+ self.len()
+ }
+
+ #[inline]
+ fn next_chunk<const N: usize>(&mut self) -> Result<[T; N], core::array::IntoIter<T, N>> {
+ let mut raw_ary = MaybeUninit::uninit_array();
+
+ let len = self.len();
+
+ if mem::size_of::<T>() == 0 {
+ if len < N {
+ self.forget_remaining_elements();
+ // Safety: ZSTs can be conjured ex nihilo, only the amount has to be correct
+ return Err(unsafe { array::IntoIter::new_unchecked(raw_ary, 0..len) });
+ }
+
+ self.ptr = unsafe { arith_offset(self.ptr as *const i8, N as isize) as *mut T };
+ // Safety: ditto
+ return Ok(unsafe { MaybeUninit::array_assume_init(raw_ary) });
+ }
+
+ if len < N {
+ // Safety: `len` indicates that this many elements are available and we just checked that
+ // it fits into the array.
+ unsafe {
+ ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, len);
+ self.forget_remaining_elements();
+ return Err(array::IntoIter::new_unchecked(raw_ary, 0..len));
+ }
+ }
+
+ // Safety: `len` is larger than the array size. Copy a fixed amount here to fully initialize
+ // the array.
+ return unsafe {
+ ptr::copy_nonoverlapping(self.ptr, raw_ary.as_mut_ptr() as *mut T, N);
+ self.ptr = self.ptr.add(N);
+ Ok(MaybeUninit::array_assume_init(raw_ary))
+ };
+ }
+
+ unsafe fn __iterator_get_unchecked(&mut self, i: usize) -> Self::Item
+ where
+ Self: TrustedRandomAccessNoCoerce,
+ {
+ // SAFETY: the caller must guarantee that `i` is in bounds of the
+ // `Vec<T>`, so `i` cannot overflow an `isize`, and the `self.ptr.add(i)`
+ // is guaranteed to pointer to an element of the `Vec<T>` and
+ // thus guaranteed to be valid to dereference.
+ //
+ // Also note the implementation of `Self: TrustedRandomAccess` requires
+ // that `T: Copy` so reading elements from the buffer doesn't invalidate
+ // them for `Drop`.
+ unsafe {
+ if mem::size_of::<T>() == 0 { mem::zeroed() } else { ptr::read(self.ptr.add(i)) }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> DoubleEndedIterator for IntoIter<T, A> {
+ #[inline]
+ fn next_back(&mut self) -> Option<T> {
+ if self.end == self.ptr {
+ None
+ } else if mem::size_of::<T>() == 0 {
+ // See above for why 'ptr.offset' isn't used
+ self.end = unsafe { arith_offset(self.end as *const i8, -1) as *mut T };
+
+ // Make up a value of this ZST.
+ Some(unsafe { mem::zeroed() })
+ } else {
+ self.end = unsafe { self.end.offset(-1) };
+
+ Some(unsafe { ptr::read(self.end) })
+ }
+ }
+
+ #[inline]
+ fn advance_back_by(&mut self, n: usize) -> Result<(), usize> {
+ let step_size = self.len().min(n);
+ if mem::size_of::<T>() == 0 {
+ // SAFETY: same as for advance_by()
+ self.end = unsafe {
+ arith_offset(self.end as *const i8, step_size.wrapping_neg() as isize) as *mut T
+ }
+ } else {
+ // SAFETY: same as for advance_by()
+ self.end = unsafe { self.end.offset(step_size.wrapping_neg() as isize) };
+ }
+ let to_drop = ptr::slice_from_raw_parts_mut(self.end as *mut T, step_size);
+ // SAFETY: same as for advance_by()
+ unsafe {
+ ptr::drop_in_place(to_drop);
+ }
+ if step_size < n {
+ return Err(step_size);
+ }
+ Ok(())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> ExactSizeIterator for IntoIter<T, A> {
+ fn is_empty(&self) -> bool {
+ self.ptr == self.end
+ }
+}
+
+#[stable(feature = "fused", since = "1.26.0")]
+impl<T, A: Allocator> FusedIterator for IntoIter<T, A> {}
+
+#[unstable(feature = "trusted_len", issue = "37572")]
+unsafe impl<T, A: Allocator> TrustedLen for IntoIter<T, A> {}
+
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "std_internals")]
+#[rustc_unsafe_specialization_marker]
+pub trait NonDrop {}
+
+// T: Copy as approximation for !Drop since get_unchecked does not advance self.ptr
+// and thus we can't implement drop-handling
+#[unstable(issue = "none", feature = "std_internals")]
+impl<T: Copy> NonDrop for T {}
+
+#[doc(hidden)]
+#[unstable(issue = "none", feature = "std_internals")]
+// TrustedRandomAccess (without NoCoerce) must not be implemented because
+// subtypes/supertypes of `T` might not be `NonDrop`
+unsafe impl<T, A: Allocator> TrustedRandomAccessNoCoerce for IntoIter<T, A>
+where
+ T: NonDrop,
+{
+ const MAY_HAVE_SIDE_EFFECT: bool = false;
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_into_iter_clone", since = "1.8.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for IntoIter<T, A> {
+ #[cfg(not(test))]
+ fn clone(&self) -> Self {
+ self.as_slice().to_vec_in(self.alloc.deref().clone()).into_iter()
+ }
+ #[cfg(test)]
+ fn clone(&self) -> Self {
+ crate::slice::to_vec(self.as_slice(), self.alloc.deref().clone()).into_iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for IntoIter<T, A> {
+ fn drop(&mut self) {
+ struct DropGuard<'a, T, A: Allocator>(&'a mut IntoIter<T, A>);
+
+ impl<T, A: Allocator> Drop for DropGuard<'_, T, A> {
+ fn drop(&mut self) {
+ unsafe {
+ // `IntoIter::alloc` is not used anymore after this and will be dropped by RawVec
+ let alloc = ManuallyDrop::take(&mut self.0.alloc);
+ // RawVec handles deallocation
+ let _ = RawVec::from_raw_parts_in(self.0.buf.as_ptr(), self.0.cap, alloc);
+ }
+ }
+ }
+
+ let guard = DropGuard(self);
+ // destroy the remaining elements
+ unsafe {
+ ptr::drop_in_place(guard.0.as_raw_mut_slice());
+ }
+ // now `guard` will be dropped and do the rest
+ }
+}
+
+// In addition to the SAFETY invariants of the following three unsafe traits
+// also refer to the vec::in_place_collect module documentation to get an overview
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<T, A: Allocator> InPlaceIterable for IntoIter<T, A> {}
+
+#[unstable(issue = "none", feature = "inplace_iteration")]
+#[doc(hidden)]
+unsafe impl<T, A: Allocator> SourceIter for IntoIter<T, A> {
+ type Source = Self;
+
+ #[inline]
+ unsafe fn as_inner(&mut self) -> &mut Self::Source {
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+unsafe impl<T> AsVecIntoIter for IntoIter<T> {
+ type Item = T;
+
+ fn as_into_iter(&mut self) -> &mut IntoIter<Self::Item> {
+ self
+ }
+}
diff --git a/library/alloc/src/vec/is_zero.rs b/library/alloc/src/vec/is_zero.rs
new file mode 100644
index 000000000..92a32779b
--- /dev/null
+++ b/library/alloc/src/vec/is_zero.rs
@@ -0,0 +1,146 @@
+use crate::boxed::Box;
+
+#[rustc_specialization_trait]
+pub(super) unsafe trait IsZero {
+ /// Whether this value's representation is all zeros
+ fn is_zero(&self) -> bool;
+}
+
+macro_rules! impl_is_zero {
+ ($t:ty, $is_zero:expr) => {
+ unsafe impl IsZero for $t {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ $is_zero(*self)
+ }
+ }
+ };
+}
+
+impl_is_zero!(i8, |x| x == 0); // It is needed to impl for arrays and tuples of i8.
+impl_is_zero!(i16, |x| x == 0);
+impl_is_zero!(i32, |x| x == 0);
+impl_is_zero!(i64, |x| x == 0);
+impl_is_zero!(i128, |x| x == 0);
+impl_is_zero!(isize, |x| x == 0);
+
+impl_is_zero!(u8, |x| x == 0); // It is needed to impl for arrays and tuples of u8.
+impl_is_zero!(u16, |x| x == 0);
+impl_is_zero!(u32, |x| x == 0);
+impl_is_zero!(u64, |x| x == 0);
+impl_is_zero!(u128, |x| x == 0);
+impl_is_zero!(usize, |x| x == 0);
+
+impl_is_zero!(bool, |x| x == false);
+impl_is_zero!(char, |x| x == '\0');
+
+impl_is_zero!(f32, |x: f32| x.to_bits() == 0);
+impl_is_zero!(f64, |x: f64| x.to_bits() == 0);
+
+unsafe impl<T> IsZero for *const T {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ (*self).is_null()
+ }
+}
+
+unsafe impl<T> IsZero for *mut T {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ (*self).is_null()
+ }
+}
+
+unsafe impl<T: IsZero, const N: usize> IsZero for [T; N] {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ // Because this is generated as a runtime check, it's not obvious that
+ // it's worth doing if the array is really long. The threshold here
+ // is largely arbitrary, but was picked because as of 2022-07-01 LLVM
+ // fails to const-fold the check in `vec![[1; 32]; n]`
+ // See https://github.com/rust-lang/rust/pull/97581#issuecomment-1166628022
+ // Feel free to tweak if you have better evidence.
+
+ N <= 16 && self.iter().all(IsZero::is_zero)
+ }
+}
+
+// This is recursive macro.
+macro_rules! impl_for_tuples {
+ // Stopper
+ () => {
+ // No use for implementing for empty tuple because it is ZST.
+ };
+ ($first_arg:ident $(,$rest:ident)*) => {
+ unsafe impl <$first_arg: IsZero, $($rest: IsZero,)*> IsZero for ($first_arg, $($rest,)*){
+ #[inline]
+ fn is_zero(&self) -> bool{
+ // Destructure tuple to N references
+ // Rust allows to hide generic params by local variable names.
+ #[allow(non_snake_case)]
+ let ($first_arg, $($rest,)*) = self;
+
+ $first_arg.is_zero()
+ $( && $rest.is_zero() )*
+ }
+ }
+
+ impl_for_tuples!($($rest),*);
+ }
+}
+
+impl_for_tuples!(A, B, C, D, E, F, G, H);
+
+// `Option<&T>` and `Option<Box<T>>` are guaranteed to represent `None` as null.
+// For fat pointers, the bytes that would be the pointer metadata in the `Some`
+// variant are padding in the `None` variant, so ignoring them and
+// zero-initializing instead is ok.
+// `Option<&mut T>` never implements `Clone`, so there's no need for an impl of
+// `SpecFromElem`.
+
+unsafe impl<T: ?Sized> IsZero for Option<&T> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.is_none()
+ }
+}
+
+unsafe impl<T: ?Sized> IsZero for Option<Box<T>> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.is_none()
+ }
+}
+
+// `Option<num::NonZeroU32>` and similar have a representation guarantee that
+// they're the same size as the corresponding `u32` type, as well as a guarantee
+// that transmuting between `NonZeroU32` and `Option<num::NonZeroU32>` works.
+// While the documentation officially makes it UB to transmute from `None`,
+// we're the standard library so we can make extra inferences, and we know that
+// the only niche available to represent `None` is the one that's all zeros.
+
+macro_rules! impl_is_zero_option_of_nonzero {
+ ($($t:ident,)+) => {$(
+ unsafe impl IsZero for Option<core::num::$t> {
+ #[inline]
+ fn is_zero(&self) -> bool {
+ self.is_none()
+ }
+ }
+ )+};
+}
+
+impl_is_zero_option_of_nonzero!(
+ NonZeroU8,
+ NonZeroU16,
+ NonZeroU32,
+ NonZeroU64,
+ NonZeroU128,
+ NonZeroI8,
+ NonZeroI16,
+ NonZeroI32,
+ NonZeroI64,
+ NonZeroI128,
+ NonZeroUsize,
+ NonZeroIsize,
+);
diff --git a/library/alloc/src/vec/mod.rs b/library/alloc/src/vec/mod.rs
new file mode 100644
index 000000000..fa9f2131c
--- /dev/null
+++ b/library/alloc/src/vec/mod.rs
@@ -0,0 +1,3157 @@
+//! A contiguous growable array type with heap-allocated contents, written
+//! `Vec<T>`.
+//!
+//! Vectors have *O*(1) indexing, amortized *O*(1) push (to the end) and
+//! *O*(1) pop (from the end).
+//!
+//! Vectors ensure they never allocate more than `isize::MAX` bytes.
+//!
+//! # Examples
+//!
+//! You can explicitly create a [`Vec`] with [`Vec::new`]:
+//!
+//! ```
+//! let v: Vec<i32> = Vec::new();
+//! ```
+//!
+//! ...or by using the [`vec!`] macro:
+//!
+//! ```
+//! let v: Vec<i32> = vec![];
+//!
+//! let v = vec![1, 2, 3, 4, 5];
+//!
+//! let v = vec![0; 10]; // ten zeroes
+//! ```
+//!
+//! You can [`push`] values onto the end of a vector (which will grow the vector
+//! as needed):
+//!
+//! ```
+//! let mut v = vec![1, 2];
+//!
+//! v.push(3);
+//! ```
+//!
+//! Popping values works in much the same way:
+//!
+//! ```
+//! let mut v = vec![1, 2];
+//!
+//! let two = v.pop();
+//! ```
+//!
+//! Vectors also support indexing (through the [`Index`] and [`IndexMut`] traits):
+//!
+//! ```
+//! let mut v = vec![1, 2, 3];
+//! let three = v[2];
+//! v[1] = v[1] + 5;
+//! ```
+//!
+//! [`push`]: Vec::push
+
+#![stable(feature = "rust1", since = "1.0.0")]
+
+#[cfg(not(no_global_oom_handling))]
+use core::cmp;
+use core::cmp::Ordering;
+use core::convert::TryFrom;
+use core::fmt;
+use core::hash::{Hash, Hasher};
+use core::intrinsics::{arith_offset, assume};
+use core::iter;
+#[cfg(not(no_global_oom_handling))]
+use core::iter::FromIterator;
+use core::marker::PhantomData;
+use core::mem::{self, ManuallyDrop, MaybeUninit};
+use core::ops::{self, Index, IndexMut, Range, RangeBounds};
+use core::ptr::{self, NonNull};
+use core::slice::{self, SliceIndex};
+
+use crate::alloc::{Allocator, Global};
+use crate::borrow::{Cow, ToOwned};
+use crate::boxed::Box;
+use crate::collections::TryReserveError;
+use crate::raw_vec::RawVec;
+
+#[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+pub use self::drain_filter::DrainFilter;
+
+mod drain_filter;
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_splice", since = "1.21.0")]
+pub use self::splice::Splice;
+
+#[cfg(not(no_global_oom_handling))]
+mod splice;
+
+#[stable(feature = "drain", since = "1.6.0")]
+pub use self::drain::Drain;
+
+mod drain;
+
+#[cfg(not(no_global_oom_handling))]
+mod cow;
+
+#[cfg(not(no_global_oom_handling))]
+pub(crate) use self::in_place_collect::AsVecIntoIter;
+#[stable(feature = "rust1", since = "1.0.0")]
+pub use self::into_iter::IntoIter;
+
+mod into_iter;
+
+#[cfg(not(no_global_oom_handling))]
+use self::is_zero::IsZero;
+
+mod is_zero;
+
+#[cfg(not(no_global_oom_handling))]
+mod in_place_collect;
+
+mod partial_eq;
+
+#[cfg(not(no_global_oom_handling))]
+use self::spec_from_elem::SpecFromElem;
+
+#[cfg(not(no_global_oom_handling))]
+mod spec_from_elem;
+
+#[cfg(not(no_global_oom_handling))]
+use self::set_len_on_drop::SetLenOnDrop;
+
+#[cfg(not(no_global_oom_handling))]
+mod set_len_on_drop;
+
+#[cfg(not(no_global_oom_handling))]
+use self::in_place_drop::InPlaceDrop;
+
+#[cfg(not(no_global_oom_handling))]
+mod in_place_drop;
+
+#[cfg(not(no_global_oom_handling))]
+use self::spec_from_iter_nested::SpecFromIterNested;
+
+#[cfg(not(no_global_oom_handling))]
+mod spec_from_iter_nested;
+
+#[cfg(not(no_global_oom_handling))]
+use self::spec_from_iter::SpecFromIter;
+
+#[cfg(not(no_global_oom_handling))]
+mod spec_from_iter;
+
+#[cfg(not(no_global_oom_handling))]
+use self::spec_extend::SpecExtend;
+
+#[cfg(not(no_global_oom_handling))]
+mod spec_extend;
+
+/// A contiguous growable array type, written as `Vec<T>`, short for 'vector'.
+///
+/// # Examples
+///
+/// ```
+/// let mut vec = Vec::new();
+/// vec.push(1);
+/// vec.push(2);
+///
+/// assert_eq!(vec.len(), 2);
+/// assert_eq!(vec[0], 1);
+///
+/// assert_eq!(vec.pop(), Some(2));
+/// assert_eq!(vec.len(), 1);
+///
+/// vec[0] = 7;
+/// assert_eq!(vec[0], 7);
+///
+/// vec.extend([1, 2, 3].iter().copied());
+///
+/// for x in &vec {
+/// println!("{x}");
+/// }
+/// assert_eq!(vec, [7, 1, 2, 3]);
+/// ```
+///
+/// The [`vec!`] macro is provided for convenient initialization:
+///
+/// ```
+/// let mut vec1 = vec![1, 2, 3];
+/// vec1.push(4);
+/// let vec2 = Vec::from([1, 2, 3, 4]);
+/// assert_eq!(vec1, vec2);
+/// ```
+///
+/// It can also initialize each element of a `Vec<T>` with a given value.
+/// This may be more efficient than performing allocation and initialization
+/// in separate steps, especially when initializing a vector of zeros:
+///
+/// ```
+/// let vec = vec![0; 5];
+/// assert_eq!(vec, [0, 0, 0, 0, 0]);
+///
+/// // The following is equivalent, but potentially slower:
+/// let mut vec = Vec::with_capacity(5);
+/// vec.resize(5, 0);
+/// assert_eq!(vec, [0, 0, 0, 0, 0]);
+/// ```
+///
+/// For more information, see
+/// [Capacity and Reallocation](#capacity-and-reallocation).
+///
+/// Use a `Vec<T>` as an efficient stack:
+///
+/// ```
+/// let mut stack = Vec::new();
+///
+/// stack.push(1);
+/// stack.push(2);
+/// stack.push(3);
+///
+/// while let Some(top) = stack.pop() {
+/// // Prints 3, 2, 1
+/// println!("{top}");
+/// }
+/// ```
+///
+/// # Indexing
+///
+/// The `Vec` type allows to access values by index, because it implements the
+/// [`Index`] trait. An example will be more explicit:
+///
+/// ```
+/// let v = vec![0, 2, 4, 6];
+/// println!("{}", v[1]); // it will display '2'
+/// ```
+///
+/// However be careful: if you try to access an index which isn't in the `Vec`,
+/// your software will panic! You cannot do this:
+///
+/// ```should_panic
+/// let v = vec![0, 2, 4, 6];
+/// println!("{}", v[6]); // it will panic!
+/// ```
+///
+/// Use [`get`] and [`get_mut`] if you want to check whether the index is in
+/// the `Vec`.
+///
+/// # Slicing
+///
+/// A `Vec` can be mutable. On the other hand, slices are read-only objects.
+/// To get a [slice][prim@slice], use [`&`]. Example:
+///
+/// ```
+/// fn read_slice(slice: &[usize]) {
+/// // ...
+/// }
+///
+/// let v = vec![0, 1];
+/// read_slice(&v);
+///
+/// // ... and that's all!
+/// // you can also do it like this:
+/// let u: &[usize] = &v;
+/// // or like this:
+/// let u: &[_] = &v;
+/// ```
+///
+/// In Rust, it's more common to pass slices as arguments rather than vectors
+/// when you just want to provide read access. The same goes for [`String`] and
+/// [`&str`].
+///
+/// # Capacity and reallocation
+///
+/// The capacity of a vector is the amount of space allocated for any future
+/// elements that will be added onto the vector. This is not to be confused with
+/// the *length* of a vector, which specifies the number of actual elements
+/// within the vector. If a vector's length exceeds its capacity, its capacity
+/// will automatically be increased, but its elements will have to be
+/// reallocated.
+///
+/// For example, a vector with capacity 10 and length 0 would be an empty vector
+/// with space for 10 more elements. Pushing 10 or fewer elements onto the
+/// vector will not change its capacity or cause reallocation to occur. However,
+/// if the vector's length is increased to 11, it will have to reallocate, which
+/// can be slow. For this reason, it is recommended to use [`Vec::with_capacity`]
+/// whenever possible to specify how big the vector is expected to get.
+///
+/// # Guarantees
+///
+/// Due to its incredibly fundamental nature, `Vec` makes a lot of guarantees
+/// about its design. This ensures that it's as low-overhead as possible in
+/// the general case, and can be correctly manipulated in primitive ways
+/// by unsafe code. Note that these guarantees refer to an unqualified `Vec<T>`.
+/// If additional type parameters are added (e.g., to support custom allocators),
+/// overriding their defaults may change the behavior.
+///
+/// Most fundamentally, `Vec` is and always will be a (pointer, capacity, length)
+/// triplet. No more, no less. The order of these fields is completely
+/// unspecified, and you should use the appropriate methods to modify these.
+/// The pointer will never be null, so this type is null-pointer-optimized.
+///
+/// However, the pointer might not actually point to allocated memory. In particular,
+/// if you construct a `Vec` with capacity 0 via [`Vec::new`], [`vec![]`][`vec!`],
+/// [`Vec::with_capacity(0)`][`Vec::with_capacity`], or by calling [`shrink_to_fit`]
+/// on an empty Vec, it will not allocate memory. Similarly, if you store zero-sized
+/// types inside a `Vec`, it will not allocate space for them. *Note that in this case
+/// the `Vec` might not report a [`capacity`] of 0*. `Vec` will allocate if and only
+/// if <code>[mem::size_of::\<T>]\() * [capacity]\() > 0</code>. In general, `Vec`'s allocation
+/// details are very subtle --- if you intend to allocate memory using a `Vec`
+/// and use it for something else (either to pass to unsafe code, or to build your
+/// own memory-backed collection), be sure to deallocate this memory by using
+/// `from_raw_parts` to recover the `Vec` and then dropping it.
+///
+/// If a `Vec` *has* allocated memory, then the memory it points to is on the heap
+/// (as defined by the allocator Rust is configured to use by default), and its
+/// pointer points to [`len`] initialized, contiguous elements in order (what
+/// you would see if you coerced it to a slice), followed by <code>[capacity] - [len]</code>
+/// logically uninitialized, contiguous elements.
+///
+/// A vector containing the elements `'a'` and `'b'` with capacity 4 can be
+/// visualized as below. The top part is the `Vec` struct, it contains a
+/// pointer to the head of the allocation in the heap, length and capacity.
+/// The bottom part is the allocation on the heap, a contiguous memory block.
+///
+/// ```text
+/// ptr len capacity
+/// +--------+--------+--------+
+/// | 0x0123 | 2 | 4 |
+/// +--------+--------+--------+
+/// |
+/// v
+/// Heap +--------+--------+--------+--------+
+/// | 'a' | 'b' | uninit | uninit |
+/// +--------+--------+--------+--------+
+/// ```
+///
+/// - **uninit** represents memory that is not initialized, see [`MaybeUninit`].
+/// - Note: the ABI is not stable and `Vec` makes no guarantees about its memory
+/// layout (including the order of fields).
+///
+/// `Vec` will never perform a "small optimization" where elements are actually
+/// stored on the stack for two reasons:
+///
+/// * It would make it more difficult for unsafe code to correctly manipulate
+/// a `Vec`. The contents of a `Vec` wouldn't have a stable address if it were
+/// only moved, and it would be more difficult to determine if a `Vec` had
+/// actually allocated memory.
+///
+/// * It would penalize the general case, incurring an additional branch
+/// on every access.
+///
+/// `Vec` will never automatically shrink itself, even if completely empty. This
+/// ensures no unnecessary allocations or deallocations occur. Emptying a `Vec`
+/// and then filling it back up to the same [`len`] should incur no calls to
+/// the allocator. If you wish to free up unused memory, use
+/// [`shrink_to_fit`] or [`shrink_to`].
+///
+/// [`push`] and [`insert`] will never (re)allocate if the reported capacity is
+/// sufficient. [`push`] and [`insert`] *will* (re)allocate if
+/// <code>[len] == [capacity]</code>. That is, the reported capacity is completely
+/// accurate, and can be relied on. It can even be used to manually free the memory
+/// allocated by a `Vec` if desired. Bulk insertion methods *may* reallocate, even
+/// when not necessary.
+///
+/// `Vec` does not guarantee any particular growth strategy when reallocating
+/// when full, nor when [`reserve`] is called. The current strategy is basic
+/// and it may prove desirable to use a non-constant growth factor. Whatever
+/// strategy is used will of course guarantee *O*(1) amortized [`push`].
+///
+/// `vec![x; n]`, `vec![a, b, c, d]`, and
+/// [`Vec::with_capacity(n)`][`Vec::with_capacity`], will all produce a `Vec`
+/// with exactly the requested capacity. If <code>[len] == [capacity]</code>,
+/// (as is the case for the [`vec!`] macro), then a `Vec<T>` can be converted to
+/// and from a [`Box<[T]>`][owned slice] without reallocating or moving the elements.
+///
+/// `Vec` will not specifically overwrite any data that is removed from it,
+/// but also won't specifically preserve it. Its uninitialized memory is
+/// scratch space that it may use however it wants. It will generally just do
+/// whatever is most efficient or otherwise easy to implement. Do not rely on
+/// removed data to be erased for security purposes. Even if you drop a `Vec`, its
+/// buffer may simply be reused by another allocation. Even if you zero a `Vec`'s memory
+/// first, that might not actually happen because the optimizer does not consider
+/// this a side-effect that must be preserved. There is one case which we will
+/// not break, however: using `unsafe` code to write to the excess capacity,
+/// and then increasing the length to match, is always valid.
+///
+/// Currently, `Vec` does not guarantee the order in which elements are dropped.
+/// The order has changed in the past and may change again.
+///
+/// [`get`]: ../../std/vec/struct.Vec.html#method.get
+/// [`get_mut`]: ../../std/vec/struct.Vec.html#method.get_mut
+/// [`String`]: crate::string::String
+/// [`&str`]: type@str
+/// [`shrink_to_fit`]: Vec::shrink_to_fit
+/// [`shrink_to`]: Vec::shrink_to
+/// [capacity]: Vec::capacity
+/// [`capacity`]: Vec::capacity
+/// [mem::size_of::\<T>]: core::mem::size_of
+/// [len]: Vec::len
+/// [`len`]: Vec::len
+/// [`push`]: Vec::push
+/// [`insert`]: Vec::insert
+/// [`reserve`]: Vec::reserve
+/// [`MaybeUninit`]: core::mem::MaybeUninit
+/// [owned slice]: Box
+#[stable(feature = "rust1", since = "1.0.0")]
+#[cfg_attr(not(test), rustc_diagnostic_item = "Vec")]
+#[rustc_insignificant_dtor]
+pub struct Vec<T, #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator = Global> {
+ buf: RawVec<T, A>,
+ len: usize,
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Inherent methods
+////////////////////////////////////////////////////////////////////////////////
+
+impl<T> Vec<T> {
+ /// Constructs a new, empty `Vec<T>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # #![allow(unused_mut)]
+ /// let mut vec: Vec<i32> = Vec::new();
+ /// ```
+ #[inline]
+ #[rustc_const_stable(feature = "const_vec_new", since = "1.39.0")]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub const fn new() -> Self {
+ Vec { buf: RawVec::NEW, len: 0 }
+ }
+
+ /// Constructs a new, empty `Vec<T>` with at least the specified capacity.
+ ///
+ /// The vector will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the vector will not allocate.
+ ///
+ /// It is important to note that although the returned vector has the
+ /// minimum *capacity* specified, the vector will have a zero *length*. For
+ /// an explanation of the difference between length and capacity, see
+ /// *[Capacity and reallocation]*.
+ ///
+ /// If it is imporant to know the exact allocated capacity of a `Vec`,
+ /// always use the [`capacity`] method after construction.
+ ///
+ /// For `Vec<T>` where `T` is a zero-sized type, there will be no allocation
+ /// and the capacity will always be `usize::MAX`.
+ ///
+ /// [Capacity and reallocation]: #capacity-and-reallocation
+ /// [`capacity`]: Vec::capacity
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ ///
+ /// // The vector contains no items, even though it has capacity for more
+ /// assert_eq!(vec.len(), 0);
+ /// assert!(vec.capacity() >= 10);
+ ///
+ /// // These are all done without reallocating...
+ /// for i in 0..10 {
+ /// vec.push(i);
+ /// }
+ /// assert_eq!(vec.len(), 10);
+ /// assert!(vec.capacity() >= 10);
+ ///
+ /// // ...but this may make the vector reallocate
+ /// vec.push(11);
+ /// assert_eq!(vec.len(), 11);
+ /// assert!(vec.capacity() >= 11);
+ ///
+ /// // A vector of a zero-sized type will always over-allocate, since no
+ /// // allocation is necessary
+ /// let vec_units = Vec::<()>::with_capacity(10);
+ /// assert_eq!(vec_units.capacity(), usize::MAX);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[must_use]
+ pub fn with_capacity(capacity: usize) -> Self {
+ Self::with_capacity_in(capacity, Global)
+ }
+
+ /// Creates a `Vec<T>` directly from the raw components of another vector.
+ ///
+ /// # Safety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
+ /// (at least, it's highly likely to be incorrect if it wasn't).
+ /// * `T` needs to have the same alignment as what `ptr` was allocated with.
+ /// (`T` having a less strict alignment is not sufficient, the alignment really
+ /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
+ /// allocated and deallocated with the same layout.)
+ /// * The size of `T` times the `capacity` (ie. the allocated size in bytes) needs
+ /// to be the same size as the pointer was allocated with. (Because similar to
+ /// alignment, [`dealloc`] must be called with the same layout `size`.)
+ /// * `length` needs to be less than or equal to `capacity`.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal data structures. For example it is normally **not** safe
+ /// to build a `Vec<u8>` from a pointer to a C `char` array with length
+ /// `size_t`, doing so is only safe if the array was initially allocated by
+ /// a `Vec` or `String`.
+ /// It's also not safe to build one from a `Vec<u16>` and its length, because
+ /// the allocator cares about the alignment, and these two types have different
+ /// alignments. The buffer was allocated with alignment 2 (for `u16`), but after
+ /// turning it into a `Vec<u8>` it'll be deallocated with alignment 1. To avoid
+ /// these issues, it is often preferable to do casting/transmuting using
+ /// [`slice::from_raw_parts`] instead.
+ ///
+ /// The ownership of `ptr` is effectively transferred to the
+ /// `Vec<T>` which may then deallocate, reallocate or change the
+ /// contents of memory pointed to by the pointer at will. Ensure
+ /// that nothing else uses the pointer after calling this
+ /// function.
+ ///
+ /// [`String`]: crate::string::String
+ /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::ptr;
+ /// use std::mem;
+ ///
+ /// let v = vec![1, 2, 3];
+ ///
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Prevent running `v`'s destructor so we are in complete control
+ /// // of the allocation.
+ /// let mut v = mem::ManuallyDrop::new(v);
+ ///
+ /// // Pull out the various important pieces of information about `v`
+ /// let p = v.as_mut_ptr();
+ /// let len = v.len();
+ /// let cap = v.capacity();
+ ///
+ /// unsafe {
+ /// // Overwrite memory with 4, 5, 6
+ /// for i in 0..len as isize {
+ /// ptr::write(p.offset(i), 4 + i);
+ /// }
+ ///
+ /// // Put everything back together into a Vec
+ /// let rebuilt = Vec::from_raw_parts(p, len, cap);
+ /// assert_eq!(rebuilt, [4, 5, 6]);
+ /// }
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn from_raw_parts(ptr: *mut T, length: usize, capacity: usize) -> Self {
+ unsafe { Self::from_raw_parts_in(ptr, length, capacity, Global) }
+ }
+}
+
+impl<T, A: Allocator> Vec<T, A> {
+ /// Constructs a new, empty `Vec<T, A>`.
+ ///
+ /// The vector will not allocate until elements are pushed onto it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// # #[allow(unused_mut)]
+ /// let mut vec: Vec<i32, _> = Vec::new_in(System);
+ /// ```
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub const fn new_in(alloc: A) -> Self {
+ Vec { buf: RawVec::new_in(alloc), len: 0 }
+ }
+
+ /// Constructs a new, empty `Vec<T, A>` with at least the specified capacity
+ /// with the provided allocator.
+ ///
+ /// The vector will be able to hold at least `capacity` elements without
+ /// reallocating. This method is allowed to allocate for more elements than
+ /// `capacity`. If `capacity` is 0, the vector will not allocate.
+ ///
+ /// It is important to note that although the returned vector has the
+ /// minimum *capacity* specified, the vector will have a zero *length*. For
+ /// an explanation of the difference between length and capacity, see
+ /// *[Capacity and reallocation]*.
+ ///
+ /// If it is imporant to know the exact allocated capacity of a `Vec`,
+ /// always use the [`capacity`] method after construction.
+ ///
+ /// For `Vec<T, A>` where `T` is a zero-sized type, there will be no allocation
+ /// and the capacity will always be `usize::MAX`.
+ ///
+ /// [Capacity and reallocation]: #capacity-and-reallocation
+ /// [`capacity`]: Vec::capacity
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut vec = Vec::with_capacity_in(10, System);
+ ///
+ /// // The vector contains no items, even though it has capacity for more
+ /// assert_eq!(vec.len(), 0);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // These are all done without reallocating...
+ /// for i in 0..10 {
+ /// vec.push(i);
+ /// }
+ /// assert_eq!(vec.len(), 10);
+ /// assert_eq!(vec.capacity(), 10);
+ ///
+ /// // ...but this may make the vector reallocate
+ /// vec.push(11);
+ /// assert_eq!(vec.len(), 11);
+ /// assert!(vec.capacity() >= 11);
+ ///
+ /// // A vector of a zero-sized type will always over-allocate, since no
+ /// // allocation is necessary
+ /// let vec_units = Vec::<(), System>::with_capacity_in(10, System);
+ /// assert_eq!(vec_units.capacity(), usize::MAX);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub fn with_capacity_in(capacity: usize, alloc: A) -> Self {
+ Vec { buf: RawVec::with_capacity_in(capacity, alloc), len: 0 }
+ }
+
+ /// Creates a `Vec<T, A>` directly from the raw components of another vector.
+ ///
+ /// # Safety
+ ///
+ /// This is highly unsafe, due to the number of invariants that aren't
+ /// checked:
+ ///
+ /// * `ptr` needs to have been previously allocated via [`String`]/`Vec<T>`
+ /// (at least, it's highly likely to be incorrect if it wasn't).
+ /// * `T` needs to have the same size and alignment as what `ptr` was allocated with.
+ /// (`T` having a less strict alignment is not sufficient, the alignment really
+ /// needs to be equal to satisfy the [`dealloc`] requirement that memory must be
+ /// allocated and deallocated with the same layout.)
+ /// * `length` needs to be less than or equal to `capacity`.
+ /// * `capacity` needs to be the capacity that the pointer was allocated with.
+ ///
+ /// Violating these may cause problems like corrupting the allocator's
+ /// internal data structures. For example it is **not** safe
+ /// to build a `Vec<u8>` from a pointer to a C `char` array with length `size_t`.
+ /// It's also not safe to build one from a `Vec<u16>` and its length, because
+ /// the allocator cares about the alignment, and these two types have different
+ /// alignments. The buffer was allocated with alignment 2 (for `u16`), but after
+ /// turning it into a `Vec<u8>` it'll be deallocated with alignment 1.
+ ///
+ /// The ownership of `ptr` is effectively transferred to the
+ /// `Vec<T>` which may then deallocate, reallocate or change the
+ /// contents of memory pointed to by the pointer at will. Ensure
+ /// that nothing else uses the pointer after calling this
+ /// function.
+ ///
+ /// [`String`]: crate::string::String
+ /// [`dealloc`]: crate::alloc::GlobalAlloc::dealloc
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// use std::ptr;
+ /// use std::mem;
+ ///
+ /// let mut v = Vec::with_capacity_in(3, System);
+ /// v.push(1);
+ /// v.push(2);
+ /// v.push(3);
+ ///
+ // FIXME Update this when vec_into_raw_parts is stabilized
+ /// // Prevent running `v`'s destructor so we are in complete control
+ /// // of the allocation.
+ /// let mut v = mem::ManuallyDrop::new(v);
+ ///
+ /// // Pull out the various important pieces of information about `v`
+ /// let p = v.as_mut_ptr();
+ /// let len = v.len();
+ /// let cap = v.capacity();
+ /// let alloc = v.allocator();
+ ///
+ /// unsafe {
+ /// // Overwrite memory with 4, 5, 6
+ /// for i in 0..len as isize {
+ /// ptr::write(p.offset(i), 4 + i);
+ /// }
+ ///
+ /// // Put everything back together into a Vec
+ /// let rebuilt = Vec::from_raw_parts_in(p, len, cap, alloc.clone());
+ /// assert_eq!(rebuilt, [4, 5, 6]);
+ /// }
+ /// ```
+ #[inline]
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ pub unsafe fn from_raw_parts_in(ptr: *mut T, length: usize, capacity: usize, alloc: A) -> Self {
+ unsafe { Vec { buf: RawVec::from_raw_parts_in(ptr, capacity, alloc), len: length } }
+ }
+
+ /// Decomposes a `Vec<T>` into its raw components.
+ ///
+ /// Returns the raw pointer to the underlying data, the length of
+ /// the vector (in elements), and the allocated capacity of the
+ /// data (in elements). These are the same arguments in the same
+ /// order as the arguments to [`from_raw_parts`].
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Vec`. The only way to do
+ /// this is to convert the raw pointer, length, and capacity back
+ /// into a `Vec` with the [`from_raw_parts`] function, allowing
+ /// the destructor to perform the cleanup.
+ ///
+ /// [`from_raw_parts`]: Vec::from_raw_parts
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(vec_into_raw_parts)]
+ /// let v: Vec<i32> = vec![-1, 0, 1];
+ ///
+ /// let (ptr, len, cap) = v.into_raw_parts();
+ ///
+ /// let rebuilt = unsafe {
+ /// // We can now make changes to the components, such as
+ /// // transmuting the raw pointer to a compatible type.
+ /// let ptr = ptr as *mut u32;
+ ///
+ /// Vec::from_raw_parts(ptr, len, cap)
+ /// };
+ /// assert_eq!(rebuilt, [4294967295, 0, 1]);
+ /// ```
+ #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
+ pub fn into_raw_parts(self) -> (*mut T, usize, usize) {
+ let mut me = ManuallyDrop::new(self);
+ (me.as_mut_ptr(), me.len(), me.capacity())
+ }
+
+ /// Decomposes a `Vec<T>` into its raw components.
+ ///
+ /// Returns the raw pointer to the underlying data, the length of the vector (in elements),
+ /// the allocated capacity of the data (in elements), and the allocator. These are the same
+ /// arguments in the same order as the arguments to [`from_raw_parts_in`].
+ ///
+ /// After calling this function, the caller is responsible for the
+ /// memory previously managed by the `Vec`. The only way to do
+ /// this is to convert the raw pointer, length, and capacity back
+ /// into a `Vec` with the [`from_raw_parts_in`] function, allowing
+ /// the destructor to perform the cleanup.
+ ///
+ /// [`from_raw_parts_in`]: Vec::from_raw_parts_in
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(allocator_api, vec_into_raw_parts)]
+ ///
+ /// use std::alloc::System;
+ ///
+ /// let mut v: Vec<i32, System> = Vec::new_in(System);
+ /// v.push(-1);
+ /// v.push(0);
+ /// v.push(1);
+ ///
+ /// let (ptr, len, cap, alloc) = v.into_raw_parts_with_alloc();
+ ///
+ /// let rebuilt = unsafe {
+ /// // We can now make changes to the components, such as
+ /// // transmuting the raw pointer to a compatible type.
+ /// let ptr = ptr as *mut u32;
+ ///
+ /// Vec::from_raw_parts_in(ptr, len, cap, alloc)
+ /// };
+ /// assert_eq!(rebuilt, [4294967295, 0, 1]);
+ /// ```
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ // #[unstable(feature = "vec_into_raw_parts", reason = "new API", issue = "65816")]
+ pub fn into_raw_parts_with_alloc(self) -> (*mut T, usize, usize, A) {
+ let mut me = ManuallyDrop::new(self);
+ let len = me.len();
+ let capacity = me.capacity();
+ let ptr = me.as_mut_ptr();
+ let alloc = unsafe { ptr::read(me.allocator()) };
+ (ptr, len, capacity, alloc)
+ }
+
+ /// Returns the number of elements the vector can hold without
+ /// reallocating.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let vec: Vec<i32> = Vec::with_capacity(10);
+ /// assert_eq!(vec.capacity(), 10);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn capacity(&self) -> usize {
+ self.buf.capacity()
+ }
+
+ /// Reserves capacity for at least `additional` more elements to be inserted
+ /// in the given `Vec<T>`. The collection may reserve more space to
+ /// speculatively avoid frequent reallocations. After calling `reserve`,
+ /// capacity will be greater than or equal to `self.len() + additional`.
+ /// Does nothing if capacity is already sufficient.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.reserve(10);
+ /// assert!(vec.capacity() >= 11);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve(&mut self, additional: usize) {
+ self.buf.reserve(self.len, additional);
+ }
+
+ /// Reserves the minimum capacity for at least `additional` more elements to
+ /// be inserted in the given `Vec<T>`. Unlike [`reserve`], this will not
+ /// deliberately over-allocate to speculatively avoid frequent allocations.
+ /// After calling `reserve_exact`, capacity will be greater than or equal to
+ /// `self.len() + additional`. Does nothing if the capacity is already
+ /// sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`reserve`] if future insertions are expected.
+ ///
+ /// [`reserve`]: Vec::reserve
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.reserve_exact(10);
+ /// assert!(vec.capacity() >= 11);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn reserve_exact(&mut self, additional: usize) {
+ self.buf.reserve_exact(self.len, additional);
+ }
+
+ /// Tries to reserve capacity for at least `additional` more elements to be inserted
+ /// in the given `Vec<T>`. The collection may reserve more space to speculatively avoid
+ /// frequent reallocations. After calling `try_reserve`, capacity will be
+ /// greater than or equal to `self.len() + additional` if it returns
+ /// `Ok(())`. Does nothing if capacity is already sufficient.
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &[u32]) -> Result<Vec<u32>, TryReserveError> {
+ /// let mut output = Vec::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.extend(data.iter().map(|&val| {
+ /// val * 2 + 5 // very complicated
+ /// }));
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.buf.try_reserve(self.len, additional)
+ }
+
+ /// Tries to reserve the minimum capacity for at least `additional`
+ /// elements to be inserted in the given `Vec<T>`. Unlike [`try_reserve`],
+ /// this will not deliberately over-allocate to speculatively avoid frequent
+ /// allocations. After calling `try_reserve_exact`, capacity will be greater
+ /// than or equal to `self.len() + additional` if it returns `Ok(())`.
+ /// Does nothing if the capacity is already sufficient.
+ ///
+ /// Note that the allocator may give the collection more space than it
+ /// requests. Therefore, capacity can not be relied upon to be precisely
+ /// minimal. Prefer [`try_reserve`] if future insertions are expected.
+ ///
+ /// [`try_reserve`]: Vec::try_reserve
+ ///
+ /// # Errors
+ ///
+ /// If the capacity overflows, or the allocator reports a failure, then an error
+ /// is returned.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::collections::TryReserveError;
+ ///
+ /// fn process_data(data: &[u32]) -> Result<Vec<u32>, TryReserveError> {
+ /// let mut output = Vec::new();
+ ///
+ /// // Pre-reserve the memory, exiting if we can't
+ /// output.try_reserve_exact(data.len())?;
+ ///
+ /// // Now we know this can't OOM in the middle of our complex work
+ /// output.extend(data.iter().map(|&val| {
+ /// val * 2 + 5 // very complicated
+ /// }));
+ ///
+ /// Ok(output)
+ /// }
+ /// # process_data(&[1, 2, 3]).expect("why is the test harness OOMing on 12 bytes?");
+ /// ```
+ #[stable(feature = "try_reserve", since = "1.57.0")]
+ pub fn try_reserve_exact(&mut self, additional: usize) -> Result<(), TryReserveError> {
+ self.buf.try_reserve_exact(self.len, additional)
+ }
+
+ /// Shrinks the capacity of the vector as much as possible.
+ ///
+ /// It will drop down as close as possible to the length but the allocator
+ /// may still inform the vector that there is space for a few more elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ /// assert_eq!(vec.capacity(), 10);
+ /// vec.shrink_to_fit();
+ /// assert!(vec.capacity() >= 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn shrink_to_fit(&mut self) {
+ // The capacity is never less than the length, and there's nothing to do when
+ // they are equal, so we can avoid the panic case in `RawVec::shrink_to_fit`
+ // by only calling it with a greater capacity.
+ if self.capacity() > self.len {
+ self.buf.shrink_to_fit(self.len);
+ }
+ }
+
+ /// Shrinks the capacity of the vector with a lower bound.
+ ///
+ /// The capacity will remain at least as large as both the length
+ /// and the supplied value.
+ ///
+ /// If the current capacity is less than the lower limit, this is a no-op.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ /// assert_eq!(vec.capacity(), 10);
+ /// vec.shrink_to(4);
+ /// assert!(vec.capacity() >= 4);
+ /// vec.shrink_to(0);
+ /// assert!(vec.capacity() >= 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "shrink_to", since = "1.56.0")]
+ pub fn shrink_to(&mut self, min_capacity: usize) {
+ if self.capacity() > min_capacity {
+ self.buf.shrink_to_fit(cmp::max(self.len, min_capacity));
+ }
+ }
+
+ /// Converts the vector into [`Box<[T]>`][owned slice].
+ ///
+ /// Note that this will drop any excess capacity.
+ ///
+ /// [owned slice]: Box
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = vec![1, 2, 3];
+ ///
+ /// let slice = v.into_boxed_slice();
+ /// ```
+ ///
+ /// Any excess capacity is removed:
+ ///
+ /// ```
+ /// let mut vec = Vec::with_capacity(10);
+ /// vec.extend([1, 2, 3]);
+ ///
+ /// assert_eq!(vec.capacity(), 10);
+ /// let slice = vec.into_boxed_slice();
+ /// assert_eq!(slice.into_vec().capacity(), 3);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn into_boxed_slice(mut self) -> Box<[T], A> {
+ unsafe {
+ self.shrink_to_fit();
+ let me = ManuallyDrop::new(self);
+ let buf = ptr::read(&me.buf);
+ let len = me.len();
+ buf.into_box(len).assume_init()
+ }
+ }
+
+ /// Shortens the vector, keeping the first `len` elements and dropping
+ /// the rest.
+ ///
+ /// If `len` is greater than the vector's current length, this has no
+ /// effect.
+ ///
+ /// The [`drain`] method can emulate `truncate`, but causes the excess
+ /// elements to be returned instead of dropped.
+ ///
+ /// Note that this method has no effect on the allocated capacity
+ /// of the vector.
+ ///
+ /// # Examples
+ ///
+ /// Truncating a five element vector to two elements:
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4, 5];
+ /// vec.truncate(2);
+ /// assert_eq!(vec, [1, 2]);
+ /// ```
+ ///
+ /// No truncation occurs when `len` is greater than the vector's current
+ /// length:
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.truncate(8);
+ /// assert_eq!(vec, [1, 2, 3]);
+ /// ```
+ ///
+ /// Truncating when `len == 0` is equivalent to calling the [`clear`]
+ /// method.
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.truncate(0);
+ /// assert_eq!(vec, []);
+ /// ```
+ ///
+ /// [`clear`]: Vec::clear
+ /// [`drain`]: Vec::drain
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn truncate(&mut self, len: usize) {
+ // This is safe because:
+ //
+ // * the slice passed to `drop_in_place` is valid; the `len > self.len`
+ // case avoids creating an invalid slice, and
+ // * the `len` of the vector is shrunk before calling `drop_in_place`,
+ // such that no value will be dropped twice in case `drop_in_place`
+ // were to panic once (if it panics twice, the program aborts).
+ unsafe {
+ // Note: It's intentional that this is `>` and not `>=`.
+ // Changing it to `>=` has negative performance
+ // implications in some cases. See #78884 for more.
+ if len > self.len {
+ return;
+ }
+ let remaining_len = self.len - len;
+ let s = ptr::slice_from_raw_parts_mut(self.as_mut_ptr().add(len), remaining_len);
+ self.len = len;
+ ptr::drop_in_place(s);
+ }
+ }
+
+ /// Extracts a slice containing the entire vector.
+ ///
+ /// Equivalent to `&s[..]`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{self, Write};
+ /// let buffer = vec![1, 2, 3, 5, 8];
+ /// io::sink().write(buffer.as_slice()).unwrap();
+ /// ```
+ #[inline]
+ #[stable(feature = "vec_as_slice", since = "1.7.0")]
+ pub fn as_slice(&self) -> &[T] {
+ self
+ }
+
+ /// Extracts a mutable slice of the entire vector.
+ ///
+ /// Equivalent to `&mut s[..]`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// use std::io::{self, Read};
+ /// let mut buffer = vec![0; 3];
+ /// io::repeat(0b101).read_exact(buffer.as_mut_slice()).unwrap();
+ /// ```
+ #[inline]
+ #[stable(feature = "vec_as_slice", since = "1.7.0")]
+ pub fn as_mut_slice(&mut self) -> &mut [T] {
+ self
+ }
+
+ /// Returns a raw pointer to the vector's buffer, or a dangling raw pointer
+ /// valid for zero sized reads if the vector didn't allocate.
+ ///
+ /// The caller must ensure that the vector outlives the pointer this
+ /// function returns, or else it will end up pointing to garbage.
+ /// Modifying the vector may cause its buffer to be reallocated,
+ /// which would also make any pointers to it invalid.
+ ///
+ /// The caller must also ensure that the memory the pointer (non-transitively) points to
+ /// is never written to (except inside an `UnsafeCell`) using this pointer or any pointer
+ /// derived from it. If you need to mutate the contents of the slice, use [`as_mut_ptr`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let x = vec![1, 2, 4];
+ /// let x_ptr = x.as_ptr();
+ ///
+ /// unsafe {
+ /// for i in 0..x.len() {
+ /// assert_eq!(*x_ptr.add(i), 1 << i);
+ /// }
+ /// }
+ /// ```
+ ///
+ /// [`as_mut_ptr`]: Vec::as_mut_ptr
+ #[stable(feature = "vec_as_ptr", since = "1.37.0")]
+ #[inline]
+ pub fn as_ptr(&self) -> *const T {
+ // We shadow the slice method of the same name to avoid going through
+ // `deref`, which creates an intermediate reference.
+ let ptr = self.buf.ptr();
+ unsafe {
+ assume(!ptr.is_null());
+ }
+ ptr
+ }
+
+ /// Returns an unsafe mutable pointer to the vector's buffer, or a dangling
+ /// raw pointer valid for zero sized reads if the vector didn't allocate.
+ ///
+ /// The caller must ensure that the vector outlives the pointer this
+ /// function returns, or else it will end up pointing to garbage.
+ /// Modifying the vector may cause its buffer to be reallocated,
+ /// which would also make any pointers to it invalid.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// // Allocate vector big enough for 4 elements.
+ /// let size = 4;
+ /// let mut x: Vec<i32> = Vec::with_capacity(size);
+ /// let x_ptr = x.as_mut_ptr();
+ ///
+ /// // Initialize elements via raw pointer writes, then set length.
+ /// unsafe {
+ /// for i in 0..size {
+ /// *x_ptr.add(i) = i as i32;
+ /// }
+ /// x.set_len(size);
+ /// }
+ /// assert_eq!(&*x, &[0, 1, 2, 3]);
+ /// ```
+ #[stable(feature = "vec_as_ptr", since = "1.37.0")]
+ #[inline]
+ pub fn as_mut_ptr(&mut self) -> *mut T {
+ // We shadow the slice method of the same name to avoid going through
+ // `deref_mut`, which creates an intermediate reference.
+ let ptr = self.buf.ptr();
+ unsafe {
+ assume(!ptr.is_null());
+ }
+ ptr
+ }
+
+ /// Returns a reference to the underlying allocator.
+ #[unstable(feature = "allocator_api", issue = "32838")]
+ #[inline]
+ pub fn allocator(&self) -> &A {
+ self.buf.allocator()
+ }
+
+ /// Forces the length of the vector to `new_len`.
+ ///
+ /// This is a low-level operation that maintains none of the normal
+ /// invariants of the type. Normally changing the length of a vector
+ /// is done using one of the safe operations instead, such as
+ /// [`truncate`], [`resize`], [`extend`], or [`clear`].
+ ///
+ /// [`truncate`]: Vec::truncate
+ /// [`resize`]: Vec::resize
+ /// [`extend`]: Extend::extend
+ /// [`clear`]: Vec::clear
+ ///
+ /// # Safety
+ ///
+ /// - `new_len` must be less than or equal to [`capacity()`].
+ /// - The elements at `old_len..new_len` must be initialized.
+ ///
+ /// [`capacity()`]: Vec::capacity
+ ///
+ /// # Examples
+ ///
+ /// This method can be useful for situations in which the vector
+ /// is serving as a buffer for other code, particularly over FFI:
+ ///
+ /// ```no_run
+ /// # #![allow(dead_code)]
+ /// # // This is just a minimal skeleton for the doc example;
+ /// # // don't use this as a starting point for a real library.
+ /// # pub struct StreamWrapper { strm: *mut std::ffi::c_void }
+ /// # const Z_OK: i32 = 0;
+ /// # extern "C" {
+ /// # fn deflateGetDictionary(
+ /// # strm: *mut std::ffi::c_void,
+ /// # dictionary: *mut u8,
+ /// # dictLength: *mut usize,
+ /// # ) -> i32;
+ /// # }
+ /// # impl StreamWrapper {
+ /// pub fn get_dictionary(&self) -> Option<Vec<u8>> {
+ /// // Per the FFI method's docs, "32768 bytes is always enough".
+ /// let mut dict = Vec::with_capacity(32_768);
+ /// let mut dict_length = 0;
+ /// // SAFETY: When `deflateGetDictionary` returns `Z_OK`, it holds that:
+ /// // 1. `dict_length` elements were initialized.
+ /// // 2. `dict_length` <= the capacity (32_768)
+ /// // which makes `set_len` safe to call.
+ /// unsafe {
+ /// // Make the FFI call...
+ /// let r = deflateGetDictionary(self.strm, dict.as_mut_ptr(), &mut dict_length);
+ /// if r == Z_OK {
+ /// // ...and update the length to what was initialized.
+ /// dict.set_len(dict_length);
+ /// Some(dict)
+ /// } else {
+ /// None
+ /// }
+ /// }
+ /// }
+ /// # }
+ /// ```
+ ///
+ /// While the following example is sound, there is a memory leak since
+ /// the inner vectors were not freed prior to the `set_len` call:
+ ///
+ /// ```
+ /// let mut vec = vec![vec![1, 0, 0],
+ /// vec![0, 1, 0],
+ /// vec![0, 0, 1]];
+ /// // SAFETY:
+ /// // 1. `old_len..0` is empty so no elements need to be initialized.
+ /// // 2. `0 <= capacity` always holds whatever `capacity` is.
+ /// unsafe {
+ /// vec.set_len(0);
+ /// }
+ /// ```
+ ///
+ /// Normally, here, one would use [`clear`] instead to correctly drop
+ /// the contents and thus not leak memory.
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub unsafe fn set_len(&mut self, new_len: usize) {
+ debug_assert!(new_len <= self.capacity());
+
+ self.len = new_len;
+ }
+
+ /// Removes an element from the vector and returns it.
+ ///
+ /// The removed element is replaced by the last element of the vector.
+ ///
+ /// This does not preserve ordering, but is *O*(1).
+ /// If you need to preserve the element order, use [`remove`] instead.
+ ///
+ /// [`remove`]: Vec::remove
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec!["foo", "bar", "baz", "qux"];
+ ///
+ /// assert_eq!(v.swap_remove(1), "bar");
+ /// assert_eq!(v, ["foo", "qux", "baz"]);
+ ///
+ /// assert_eq!(v.swap_remove(0), "foo");
+ /// assert_eq!(v, ["baz", "qux"]);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn swap_remove(&mut self, index: usize) -> T {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!("swap_remove index (is {index}) should be < len (is {len})");
+ }
+
+ let len = self.len();
+ if index >= len {
+ assert_failed(index, len);
+ }
+ unsafe {
+ // We replace self[index] with the last element. Note that if the
+ // bounds check above succeeds there must be a last element (which
+ // can be self[index] itself).
+ let value = ptr::read(self.as_ptr().add(index));
+ let base_ptr = self.as_mut_ptr();
+ ptr::copy(base_ptr.add(len - 1), base_ptr.add(index), 1);
+ self.set_len(len - 1);
+ value
+ }
+ }
+
+ /// Inserts an element at position `index` within the vector, shifting all
+ /// elements after it to the right.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.insert(1, 4);
+ /// assert_eq!(vec, [1, 4, 2, 3]);
+ /// vec.insert(4, 5);
+ /// assert_eq!(vec, [1, 4, 2, 3, 5]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn insert(&mut self, index: usize, element: T) {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!("insertion index (is {index}) should be <= len (is {len})");
+ }
+
+ let len = self.len();
+
+ // space for the new element
+ if len == self.buf.capacity() {
+ self.reserve(1);
+ }
+
+ unsafe {
+ // infallible
+ // The spot to put the new value
+ {
+ let p = self.as_mut_ptr().add(index);
+ if index < len {
+ // Shift everything over to make space. (Duplicating the
+ // `index`th element into two consecutive places.)
+ ptr::copy(p, p.offset(1), len - index);
+ } else if index == len {
+ // No elements need shifting.
+ } else {
+ assert_failed(index, len);
+ }
+ // Write it in, overwriting the first copy of the `index`th
+ // element.
+ ptr::write(p, element);
+ }
+ self.set_len(len + 1);
+ }
+ }
+
+ /// Removes and returns the element at position `index` within the vector,
+ /// shifting all elements after it to the left.
+ ///
+ /// Note: Because this shifts over the remaining elements, it has a
+ /// worst-case performance of *O*(*n*). If you don't need the order of elements
+ /// to be preserved, use [`swap_remove`] instead. If you'd like to remove
+ /// elements from the beginning of the `Vec`, consider using
+ /// [`VecDeque::pop_front`] instead.
+ ///
+ /// [`swap_remove`]: Vec::swap_remove
+ /// [`VecDeque::pop_front`]: crate::collections::VecDeque::pop_front
+ ///
+ /// # Panics
+ ///
+ /// Panics if `index` is out of bounds.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ /// assert_eq!(v.remove(1), 2);
+ /// assert_eq!(v, [1, 3]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[track_caller]
+ pub fn remove(&mut self, index: usize) -> T {
+ #[cold]
+ #[inline(never)]
+ #[track_caller]
+ fn assert_failed(index: usize, len: usize) -> ! {
+ panic!("removal index (is {index}) should be < len (is {len})");
+ }
+
+ let len = self.len();
+ if index >= len {
+ assert_failed(index, len);
+ }
+ unsafe {
+ // infallible
+ let ret;
+ {
+ // the place we are taking from.
+ let ptr = self.as_mut_ptr().add(index);
+ // copy it out, unsafely having a copy of the value on
+ // the stack and in the vector at the same time.
+ ret = ptr::read(ptr);
+
+ // Shift everything down to fill in that spot.
+ ptr::copy(ptr.offset(1), ptr, len - index - 1);
+ }
+ self.set_len(len - 1);
+ ret
+ }
+ }
+
+ /// Retains only the elements specified by the predicate.
+ ///
+ /// In other words, remove all elements `e` for which `f(&e)` returns `false`.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.retain(|&x| x % 2 == 0);
+ /// assert_eq!(vec, [2, 4]);
+ /// ```
+ ///
+ /// Because the elements are visited exactly once in the original order,
+ /// external state may be used to decide which elements to keep.
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4, 5];
+ /// let keep = [false, true, true, false, true];
+ /// let mut iter = keep.iter();
+ /// vec.retain(|_| *iter.next().unwrap());
+ /// assert_eq!(vec, [2, 3, 5]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn retain<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&T) -> bool,
+ {
+ self.retain_mut(|elem| f(elem));
+ }
+
+ /// Retains only the elements specified by the predicate, passing a mutable reference to it.
+ ///
+ /// In other words, remove all elements `e` such that `f(&mut e)` returns `false`.
+ /// This method operates in place, visiting each element exactly once in the
+ /// original order, and preserves the order of the retained elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.retain_mut(|x| if *x <= 3 {
+ /// *x += 1;
+ /// true
+ /// } else {
+ /// false
+ /// });
+ /// assert_eq!(vec, [2, 3, 4]);
+ /// ```
+ #[stable(feature = "vec_retain_mut", since = "1.61.0")]
+ pub fn retain_mut<F>(&mut self, mut f: F)
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ let original_len = self.len();
+ // Avoid double drop if the drop guard is not executed,
+ // since we may make some holes during the process.
+ unsafe { self.set_len(0) };
+
+ // Vec: [Kept, Kept, Hole, Hole, Hole, Hole, Unchecked, Unchecked]
+ // |<- processed len ->| ^- next to check
+ // |<- deleted cnt ->|
+ // |<- original_len ->|
+ // Kept: Elements which predicate returns true on.
+ // Hole: Moved or dropped element slot.
+ // Unchecked: Unchecked valid elements.
+ //
+ // This drop guard will be invoked when predicate or `drop` of element panicked.
+ // It shifts unchecked elements to cover holes and `set_len` to the correct length.
+ // In cases when predicate and `drop` never panick, it will be optimized out.
+ struct BackshiftOnDrop<'a, T, A: Allocator> {
+ v: &'a mut Vec<T, A>,
+ processed_len: usize,
+ deleted_cnt: usize,
+ original_len: usize,
+ }
+
+ impl<T, A: Allocator> Drop for BackshiftOnDrop<'_, T, A> {
+ fn drop(&mut self) {
+ if self.deleted_cnt > 0 {
+ // SAFETY: Trailing unchecked items must be valid since we never touch them.
+ unsafe {
+ ptr::copy(
+ self.v.as_ptr().add(self.processed_len),
+ self.v.as_mut_ptr().add(self.processed_len - self.deleted_cnt),
+ self.original_len - self.processed_len,
+ );
+ }
+ }
+ // SAFETY: After filling holes, all items are in contiguous memory.
+ unsafe {
+ self.v.set_len(self.original_len - self.deleted_cnt);
+ }
+ }
+ }
+
+ let mut g = BackshiftOnDrop { v: self, processed_len: 0, deleted_cnt: 0, original_len };
+
+ fn process_loop<F, T, A: Allocator, const DELETED: bool>(
+ original_len: usize,
+ f: &mut F,
+ g: &mut BackshiftOnDrop<'_, T, A>,
+ ) where
+ F: FnMut(&mut T) -> bool,
+ {
+ while g.processed_len != original_len {
+ // SAFETY: Unchecked element must be valid.
+ let cur = unsafe { &mut *g.v.as_mut_ptr().add(g.processed_len) };
+ if !f(cur) {
+ // Advance early to avoid double drop if `drop_in_place` panicked.
+ g.processed_len += 1;
+ g.deleted_cnt += 1;
+ // SAFETY: We never touch this element again after dropped.
+ unsafe { ptr::drop_in_place(cur) };
+ // We already advanced the counter.
+ if DELETED {
+ continue;
+ } else {
+ break;
+ }
+ }
+ if DELETED {
+ // SAFETY: `deleted_cnt` > 0, so the hole slot must not overlap with current element.
+ // We use copy for move, and never touch this element again.
+ unsafe {
+ let hole_slot = g.v.as_mut_ptr().add(g.processed_len - g.deleted_cnt);
+ ptr::copy_nonoverlapping(cur, hole_slot, 1);
+ }
+ }
+ g.processed_len += 1;
+ }
+ }
+
+ // Stage 1: Nothing was deleted.
+ process_loop::<F, T, A, false>(original_len, &mut f, &mut g);
+
+ // Stage 2: Some elements were deleted.
+ process_loop::<F, T, A, true>(original_len, &mut f, &mut g);
+
+ // All item are processed. This can be optimized to `set_len` by LLVM.
+ drop(g);
+ }
+
+ /// Removes all but the first of consecutive elements in the vector that resolve to the same
+ /// key.
+ ///
+ /// If the vector is sorted, this removes all duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![10, 20, 21, 30, 20];
+ ///
+ /// vec.dedup_by_key(|i| *i / 10);
+ ///
+ /// assert_eq!(vec, [10, 20, 30, 20]);
+ /// ```
+ #[stable(feature = "dedup_by", since = "1.16.0")]
+ #[inline]
+ pub fn dedup_by_key<F, K>(&mut self, mut key: F)
+ where
+ F: FnMut(&mut T) -> K,
+ K: PartialEq,
+ {
+ self.dedup_by(|a, b| key(a) == key(b))
+ }
+
+ /// Removes all but the first of consecutive elements in the vector satisfying a given equality
+ /// relation.
+ ///
+ /// The `same_bucket` function is passed references to two elements from the vector and
+ /// must determine if the elements compare equal. The elements are passed in opposite order
+ /// from their order in the slice, so if `same_bucket(a, b)` returns `true`, `a` is removed.
+ ///
+ /// If the vector is sorted, this removes all duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!["foo", "bar", "Bar", "baz", "bar"];
+ ///
+ /// vec.dedup_by(|a, b| a.eq_ignore_ascii_case(b));
+ ///
+ /// assert_eq!(vec, ["foo", "bar", "baz", "bar"]);
+ /// ```
+ #[stable(feature = "dedup_by", since = "1.16.0")]
+ pub fn dedup_by<F>(&mut self, mut same_bucket: F)
+ where
+ F: FnMut(&mut T, &mut T) -> bool,
+ {
+ let len = self.len();
+ if len <= 1 {
+ return;
+ }
+
+ /* INVARIANT: vec.len() > read >= write > write-1 >= 0 */
+ struct FillGapOnDrop<'a, T, A: core::alloc::Allocator> {
+ /* Offset of the element we want to check if it is duplicate */
+ read: usize,
+
+ /* Offset of the place where we want to place the non-duplicate
+ * when we find it. */
+ write: usize,
+
+ /* The Vec that would need correction if `same_bucket` panicked */
+ vec: &'a mut Vec<T, A>,
+ }
+
+ impl<'a, T, A: core::alloc::Allocator> Drop for FillGapOnDrop<'a, T, A> {
+ fn drop(&mut self) {
+ /* This code gets executed when `same_bucket` panics */
+
+ /* SAFETY: invariant guarantees that `read - write`
+ * and `len - read` never overflow and that the copy is always
+ * in-bounds. */
+ unsafe {
+ let ptr = self.vec.as_mut_ptr();
+ let len = self.vec.len();
+
+ /* How many items were left when `same_bucket` panicked.
+ * Basically vec[read..].len() */
+ let items_left = len.wrapping_sub(self.read);
+
+ /* Pointer to first item in vec[write..write+items_left] slice */
+ let dropped_ptr = ptr.add(self.write);
+ /* Pointer to first item in vec[read..] slice */
+ let valid_ptr = ptr.add(self.read);
+
+ /* Copy `vec[read..]` to `vec[write..write+items_left]`.
+ * The slices can overlap, so `copy_nonoverlapping` cannot be used */
+ ptr::copy(valid_ptr, dropped_ptr, items_left);
+
+ /* How many items have been already dropped
+ * Basically vec[read..write].len() */
+ let dropped = self.read.wrapping_sub(self.write);
+
+ self.vec.set_len(len - dropped);
+ }
+ }
+ }
+
+ let mut gap = FillGapOnDrop { read: 1, write: 1, vec: self };
+ let ptr = gap.vec.as_mut_ptr();
+
+ /* Drop items while going through Vec, it should be more efficient than
+ * doing slice partition_dedup + truncate */
+
+ /* SAFETY: Because of the invariant, read_ptr, prev_ptr and write_ptr
+ * are always in-bounds and read_ptr never aliases prev_ptr */
+ unsafe {
+ while gap.read < len {
+ let read_ptr = ptr.add(gap.read);
+ let prev_ptr = ptr.add(gap.write.wrapping_sub(1));
+
+ if same_bucket(&mut *read_ptr, &mut *prev_ptr) {
+ // Increase `gap.read` now since the drop may panic.
+ gap.read += 1;
+ /* We have found duplicate, drop it in-place */
+ ptr::drop_in_place(read_ptr);
+ } else {
+ let write_ptr = ptr.add(gap.write);
+
+ /* Because `read_ptr` can be equal to `write_ptr`, we either
+ * have to use `copy` or conditional `copy_nonoverlapping`.
+ * Looks like the first option is faster. */
+ ptr::copy(read_ptr, write_ptr, 1);
+
+ /* We have filled that place, so go further */
+ gap.write += 1;
+ gap.read += 1;
+ }
+ }
+
+ /* Technically we could let `gap` clean up with its Drop, but
+ * when `same_bucket` is guaranteed to not panic, this bloats a little
+ * the codegen, so we just do it manually */
+ gap.vec.set_len(gap.write);
+ mem::forget(gap);
+ }
+ }
+
+ /// Appends an element to the back of a collection.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2];
+ /// vec.push(3);
+ /// assert_eq!(vec, [1, 2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn push(&mut self, value: T) {
+ // This will panic or abort if we would allocate > isize::MAX bytes
+ // or if the length increment would overflow for zero-sized types.
+ if self.len == self.buf.capacity() {
+ self.buf.reserve_for_push(self.len);
+ }
+ unsafe {
+ let end = self.as_mut_ptr().add(self.len);
+ ptr::write(end, value);
+ self.len += 1;
+ }
+ }
+
+ /// Removes the last element from a vector and returns it, or [`None`] if it
+ /// is empty.
+ ///
+ /// If you'd like to pop the first element, consider using
+ /// [`VecDeque::pop_front`] instead.
+ ///
+ /// [`VecDeque::pop_front`]: crate::collections::VecDeque::pop_front
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// assert_eq!(vec.pop(), Some(3));
+ /// assert_eq!(vec, [1, 2]);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn pop(&mut self) -> Option<T> {
+ if self.len == 0 {
+ None
+ } else {
+ unsafe {
+ self.len -= 1;
+ Some(ptr::read(self.as_ptr().add(self.len())))
+ }
+ }
+ }
+
+ /// Moves all the elements of `other` into `self`, leaving `other` empty.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the new capacity exceeds `isize::MAX` bytes.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// let mut vec2 = vec![4, 5, 6];
+ /// vec.append(&mut vec2);
+ /// assert_eq!(vec, [1, 2, 3, 4, 5, 6]);
+ /// assert_eq!(vec2, []);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "append", since = "1.4.0")]
+ pub fn append(&mut self, other: &mut Self) {
+ unsafe {
+ self.append_elements(other.as_slice() as _);
+ other.set_len(0);
+ }
+ }
+
+ /// Appends elements to `self` from other buffer.
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ unsafe fn append_elements(&mut self, other: *const [T]) {
+ let count = unsafe { (*other).len() };
+ self.reserve(count);
+ let len = self.len();
+ unsafe { ptr::copy_nonoverlapping(other as *const T, self.as_mut_ptr().add(len), count) };
+ self.len += count;
+ }
+
+ /// Removes the specified range from the vector in bulk, returning all
+ /// removed elements as an iterator. If the iterator is dropped before
+ /// being fully consumed, it drops the remaining removed elements.
+ ///
+ /// The returned iterator keeps a mutable borrow on the vector to optimize
+ /// its implementation.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ ///
+ /// # Leaking
+ ///
+ /// If the returned iterator goes out of scope without being dropped (due to
+ /// [`mem::forget`], for example), the vector may have lost and leaked
+ /// elements arbitrarily, including elements outside the range.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ /// let u: Vec<_> = v.drain(1..).collect();
+ /// assert_eq!(v, &[1]);
+ /// assert_eq!(u, &[2, 3]);
+ ///
+ /// // A full range clears the vector, like `clear()` does
+ /// v.drain(..);
+ /// assert_eq!(v, &[]);
+ /// ```
+ #[stable(feature = "drain", since = "1.6.0")]
+ pub fn drain<R>(&mut self, range: R) -> Drain<'_, T, A>
+ where
+ R: RangeBounds<usize>,
+ {
+ // Memory safety
+ //
+ // When the Drain is first created, it shortens the length of
+ // the source vector to make sure no uninitialized or moved-from elements
+ // are accessible at all if the Drain's destructor never gets to run.
+ //
+ // Drain will ptr::read out the values to remove.
+ // When finished, remaining tail of the vec is copied back to cover
+ // the hole, and the vector length is restored to the new length.
+ //
+ let len = self.len();
+ let Range { start, end } = slice::range(range, ..len);
+
+ unsafe {
+ // set self.vec length's to start, to be safe in case Drain is leaked
+ self.set_len(start);
+ // Use the borrow in the IterMut to indicate borrowing behavior of the
+ // whole Drain iterator (like &mut T).
+ let range_slice = slice::from_raw_parts_mut(self.as_mut_ptr().add(start), end - start);
+ Drain {
+ tail_start: end,
+ tail_len: len - end,
+ iter: range_slice.iter(),
+ vec: NonNull::from(self),
+ }
+ }
+ }
+
+ /// Clears the vector, removing all values.
+ ///
+ /// Note that this method has no effect on the allocated capacity
+ /// of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3];
+ ///
+ /// v.clear();
+ ///
+ /// assert!(v.is_empty());
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn clear(&mut self) {
+ let elems: *mut [T] = self.as_mut_slice();
+
+ // SAFETY:
+ // - `elems` comes directly from `as_mut_slice` and is therefore valid.
+ // - Setting `self.len` before calling `drop_in_place` means that,
+ // if an element's `Drop` impl panics, the vector's `Drop` impl will
+ // do nothing (leaking the rest of the elements) instead of dropping
+ // some twice.
+ unsafe {
+ self.len = 0;
+ ptr::drop_in_place(elems);
+ }
+ }
+
+ /// Returns the number of elements in the vector, also referred to
+ /// as its 'length'.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let a = vec![1, 2, 3];
+ /// assert_eq!(a.len(), 3);
+ /// ```
+ #[inline]
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn len(&self) -> usize {
+ self.len
+ }
+
+ /// Returns `true` if the vector contains no elements.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = Vec::new();
+ /// assert!(v.is_empty());
+ ///
+ /// v.push(1);
+ /// assert!(!v.is_empty());
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ pub fn is_empty(&self) -> bool {
+ self.len() == 0
+ }
+
+ /// Splits the collection into two at the given index.
+ ///
+ /// Returns a newly allocated vector containing the elements in the range
+ /// `[at, len)`. After the call, the original vector will be left containing
+ /// the elements `[0, at)` with its previous capacity unchanged.
+ ///
+ /// # Panics
+ ///
+ /// Panics if `at > len`.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// let vec2 = vec.split_off(1);
+ /// assert_eq!(vec, [1]);
+ /// assert_eq!(vec2, [2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[must_use = "use `.truncate()` if you don't need the other half"]
+ #[stable(feature = "split_off", since = "1.4.0")]
+ pub fn split_off(&mut self, at: usize) -> Self
+ where
+ A: Clone,
+ {
+ #[cold]
+ #[inline(never)]
+ fn assert_failed(at: usize, len: usize) -> ! {
+ panic!("`at` split index (is {at}) should be <= len (is {len})");
+ }
+
+ if at > self.len() {
+ assert_failed(at, self.len());
+ }
+
+ if at == 0 {
+ // the new vector can take over the original buffer and avoid the copy
+ return mem::replace(
+ self,
+ Vec::with_capacity_in(self.capacity(), self.allocator().clone()),
+ );
+ }
+
+ let other_len = self.len - at;
+ let mut other = Vec::with_capacity_in(other_len, self.allocator().clone());
+
+ // Unsafely `set_len` and copy items to `other`.
+ unsafe {
+ self.set_len(at);
+ other.set_len(other_len);
+
+ ptr::copy_nonoverlapping(self.as_ptr().add(at), other.as_mut_ptr(), other.len());
+ }
+ other
+ }
+
+ /// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `Vec` is extended by the
+ /// difference, with each additional slot filled with the result of
+ /// calling the closure `f`. The return values from `f` will end up
+ /// in the `Vec` in the order they have been generated.
+ ///
+ /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ ///
+ /// This method uses a closure to create new values on every push. If
+ /// you'd rather [`Clone`] a given value, use [`Vec::resize`]. If you
+ /// want to use the [`Default`] trait to generate values, you can
+ /// pass [`Default::default`] as the second argument.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 3];
+ /// vec.resize_with(5, Default::default);
+ /// assert_eq!(vec, [1, 2, 3, 0, 0]);
+ ///
+ /// let mut vec = vec![];
+ /// let mut p = 1;
+ /// vec.resize_with(4, || { p *= 2; p });
+ /// assert_eq!(vec, [2, 4, 8, 16]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_resize_with", since = "1.33.0")]
+ pub fn resize_with<F>(&mut self, new_len: usize, f: F)
+ where
+ F: FnMut() -> T,
+ {
+ let len = self.len();
+ if new_len > len {
+ self.extend_with(new_len - len, ExtendFunc(f));
+ } else {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Consumes and leaks the `Vec`, returning a mutable reference to the contents,
+ /// `&'a mut [T]`. Note that the type `T` must outlive the chosen lifetime
+ /// `'a`. If the type has only static references, or none at all, then this
+ /// may be chosen to be `'static`.
+ ///
+ /// As of Rust 1.57, this method does not reallocate or shrink the `Vec`,
+ /// so the leaked allocation may include unused capacity that is not part
+ /// of the returned slice.
+ ///
+ /// This function is mainly useful for data that lives for the remainder of
+ /// the program's life. Dropping the returned reference will cause a memory
+ /// leak.
+ ///
+ /// # Examples
+ ///
+ /// Simple usage:
+ ///
+ /// ```
+ /// let x = vec![1, 2, 3];
+ /// let static_ref: &'static mut [usize] = x.leak();
+ /// static_ref[0] += 1;
+ /// assert_eq!(static_ref, &[2, 2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_leak", since = "1.47.0")]
+ #[inline]
+ pub fn leak<'a>(self) -> &'a mut [T]
+ where
+ A: 'a,
+ {
+ let mut me = ManuallyDrop::new(self);
+ unsafe { slice::from_raw_parts_mut(me.as_mut_ptr(), me.len) }
+ }
+
+ /// Returns the remaining spare capacity of the vector as a slice of
+ /// `MaybeUninit<T>`.
+ ///
+ /// The returned slice can be used to fill the vector with data (e.g. by
+ /// reading from a file) before marking the data as initialized using the
+ /// [`set_len`] method.
+ ///
+ /// [`set_len`]: Vec::set_len
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// // Allocate vector big enough for 10 elements.
+ /// let mut v = Vec::with_capacity(10);
+ ///
+ /// // Fill in the first 3 elements.
+ /// let uninit = v.spare_capacity_mut();
+ /// uninit[0].write(0);
+ /// uninit[1].write(1);
+ /// uninit[2].write(2);
+ ///
+ /// // Mark the first 3 elements of the vector as being initialized.
+ /// unsafe {
+ /// v.set_len(3);
+ /// }
+ ///
+ /// assert_eq!(&v, &[0, 1, 2]);
+ /// ```
+ #[stable(feature = "vec_spare_capacity", since = "1.60.0")]
+ #[inline]
+ pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<T>] {
+ // Note:
+ // This method is not implemented in terms of `split_at_spare_mut`,
+ // to prevent invalidation of pointers to the buffer.
+ unsafe {
+ slice::from_raw_parts_mut(
+ self.as_mut_ptr().add(self.len) as *mut MaybeUninit<T>,
+ self.buf.capacity() - self.len,
+ )
+ }
+ }
+
+ /// Returns vector content as a slice of `T`, along with the remaining spare
+ /// capacity of the vector as a slice of `MaybeUninit<T>`.
+ ///
+ /// The returned spare capacity slice can be used to fill the vector with data
+ /// (e.g. by reading from a file) before marking the data as initialized using
+ /// the [`set_len`] method.
+ ///
+ /// [`set_len`]: Vec::set_len
+ ///
+ /// Note that this is a low-level API, which should be used with care for
+ /// optimization purposes. If you need to append data to a `Vec`
+ /// you can use [`push`], [`extend`], [`extend_from_slice`],
+ /// [`extend_from_within`], [`insert`], [`append`], [`resize`] or
+ /// [`resize_with`], depending on your exact needs.
+ ///
+ /// [`push`]: Vec::push
+ /// [`extend`]: Vec::extend
+ /// [`extend_from_slice`]: Vec::extend_from_slice
+ /// [`extend_from_within`]: Vec::extend_from_within
+ /// [`insert`]: Vec::insert
+ /// [`append`]: Vec::append
+ /// [`resize`]: Vec::resize
+ /// [`resize_with`]: Vec::resize_with
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(vec_split_at_spare)]
+ ///
+ /// let mut v = vec![1, 1, 2];
+ ///
+ /// // Reserve additional space big enough for 10 elements.
+ /// v.reserve(10);
+ ///
+ /// let (init, uninit) = v.split_at_spare_mut();
+ /// let sum = init.iter().copied().sum::<u32>();
+ ///
+ /// // Fill in the next 4 elements.
+ /// uninit[0].write(sum);
+ /// uninit[1].write(sum * 2);
+ /// uninit[2].write(sum * 3);
+ /// uninit[3].write(sum * 4);
+ ///
+ /// // Mark the 4 elements of the vector as being initialized.
+ /// unsafe {
+ /// let len = v.len();
+ /// v.set_len(len + 4);
+ /// }
+ ///
+ /// assert_eq!(&v, &[1, 1, 2, 4, 8, 12, 16]);
+ /// ```
+ #[unstable(feature = "vec_split_at_spare", issue = "81944")]
+ #[inline]
+ pub fn split_at_spare_mut(&mut self) -> (&mut [T], &mut [MaybeUninit<T>]) {
+ // SAFETY:
+ // - len is ignored and so never changed
+ let (init, spare, _) = unsafe { self.split_at_spare_mut_with_len() };
+ (init, spare)
+ }
+
+ /// Safety: changing returned .2 (&mut usize) is considered the same as calling `.set_len(_)`.
+ ///
+ /// This method provides unique access to all vec parts at once in `extend_from_within`.
+ unsafe fn split_at_spare_mut_with_len(
+ &mut self,
+ ) -> (&mut [T], &mut [MaybeUninit<T>], &mut usize) {
+ let ptr = self.as_mut_ptr();
+ // SAFETY:
+ // - `ptr` is guaranteed to be valid for `self.len` elements
+ // - but the allocation extends out to `self.buf.capacity()` elements, possibly
+ // uninitialized
+ let spare_ptr = unsafe { ptr.add(self.len) };
+ let spare_ptr = spare_ptr.cast::<MaybeUninit<T>>();
+ let spare_len = self.buf.capacity() - self.len;
+
+ // SAFETY:
+ // - `ptr` is guaranteed to be valid for `self.len` elements
+ // - `spare_ptr` is pointing one element past the buffer, so it doesn't overlap with `initialized`
+ unsafe {
+ let initialized = slice::from_raw_parts_mut(ptr, self.len);
+ let spare = slice::from_raw_parts_mut(spare_ptr, spare_len);
+
+ (initialized, spare, &mut self.len)
+ }
+ }
+}
+
+impl<T: Clone, A: Allocator> Vec<T, A> {
+ /// Resizes the `Vec` in-place so that `len` is equal to `new_len`.
+ ///
+ /// If `new_len` is greater than `len`, the `Vec` is extended by the
+ /// difference, with each additional slot filled with `value`.
+ /// If `new_len` is less than `len`, the `Vec` is simply truncated.
+ ///
+ /// This method requires `T` to implement [`Clone`],
+ /// in order to be able to clone the passed value.
+ /// If you need more flexibility (or want to rely on [`Default`] instead of
+ /// [`Clone`]), use [`Vec::resize_with`].
+ /// If you only need to resize to a smaller size, use [`Vec::truncate`].
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec!["hello"];
+ /// vec.resize(3, "world");
+ /// assert_eq!(vec, ["hello", "world", "world"]);
+ ///
+ /// let mut vec = vec![1, 2, 3, 4];
+ /// vec.resize(2, 0);
+ /// assert_eq!(vec, [1, 2]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_resize", since = "1.5.0")]
+ pub fn resize(&mut self, new_len: usize, value: T) {
+ let len = self.len();
+
+ if new_len > len {
+ self.extend_with(new_len - len, ExtendElement(value))
+ } else {
+ self.truncate(new_len);
+ }
+ }
+
+ /// Clones and appends all elements in a slice to the `Vec`.
+ ///
+ /// Iterates over the slice `other`, clones each element, and then appends
+ /// it to this `Vec`. The `other` slice is traversed in-order.
+ ///
+ /// Note that this function is same as [`extend`] except that it is
+ /// specialized to work with slices instead. If and when Rust gets
+ /// specialization this function will likely be deprecated (but still
+ /// available).
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1];
+ /// vec.extend_from_slice(&[2, 3, 4]);
+ /// assert_eq!(vec, [1, 2, 3, 4]);
+ /// ```
+ ///
+ /// [`extend`]: Vec::extend
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_extend_from_slice", since = "1.6.0")]
+ pub fn extend_from_slice(&mut self, other: &[T]) {
+ self.spec_extend(other.iter())
+ }
+
+ /// Copies elements from `src` range to the end of the vector.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![0, 1, 2, 3, 4];
+ ///
+ /// vec.extend_from_within(2..);
+ /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4]);
+ ///
+ /// vec.extend_from_within(..2);
+ /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4, 0, 1]);
+ ///
+ /// vec.extend_from_within(4..8);
+ /// assert_eq!(vec, [0, 1, 2, 3, 4, 2, 3, 4, 0, 1, 4, 2, 3, 4]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[stable(feature = "vec_extend_from_within", since = "1.53.0")]
+ pub fn extend_from_within<R>(&mut self, src: R)
+ where
+ R: RangeBounds<usize>,
+ {
+ let range = slice::range(src, ..self.len());
+ self.reserve(range.len());
+
+ // SAFETY:
+ // - `slice::range` guarantees that the given range is valid for indexing self
+ unsafe {
+ self.spec_extend_from_within(range);
+ }
+ }
+}
+
+impl<T, A: Allocator, const N: usize> Vec<[T; N], A> {
+ /// Takes a `Vec<[T; N]>` and flattens it into a `Vec<T>`.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the length of the resulting vector would overflow a `usize`.
+ ///
+ /// This is only possible when flattening a vector of arrays of zero-sized
+ /// types, and thus tends to be irrelevant in practice. If
+ /// `size_of::<T>() > 0`, this will never panic.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// #![feature(slice_flatten)]
+ ///
+ /// let mut vec = vec![[1, 2, 3], [4, 5, 6], [7, 8, 9]];
+ /// assert_eq!(vec.pop(), Some([7, 8, 9]));
+ ///
+ /// let mut flattened = vec.into_flattened();
+ /// assert_eq!(flattened.pop(), Some(6));
+ /// ```
+ #[unstable(feature = "slice_flatten", issue = "95629")]
+ pub fn into_flattened(self) -> Vec<T, A> {
+ let (ptr, len, cap, alloc) = self.into_raw_parts_with_alloc();
+ let (new_len, new_cap) = if mem::size_of::<T>() == 0 {
+ (len.checked_mul(N).expect("vec len overflow"), usize::MAX)
+ } else {
+ // SAFETY:
+ // - `cap * N` cannot overflow because the allocation is already in
+ // the address space.
+ // - Each `[T; N]` has `N` valid elements, so there are `len * N`
+ // valid elements in the allocation.
+ unsafe { (len.unchecked_mul(N), cap.unchecked_mul(N)) }
+ };
+ // SAFETY:
+ // - `ptr` was allocated by `self`
+ // - `ptr` is well-aligned because `[T; N]` has the same alignment as `T`.
+ // - `new_cap` refers to the same sized allocation as `cap` because
+ // `new_cap * size_of::<T>()` == `cap * size_of::<[T; N]>()`
+ // - `len` <= `cap`, so `len * N` <= `cap * N`.
+ unsafe { Vec::<T, A>::from_raw_parts_in(ptr.cast(), new_len, new_cap, alloc) }
+ }
+}
+
+// This code generalizes `extend_with_{element,default}`.
+trait ExtendWith<T> {
+ fn next(&mut self) -> T;
+ fn last(self) -> T;
+}
+
+struct ExtendElement<T>(T);
+impl<T: Clone> ExtendWith<T> for ExtendElement<T> {
+ fn next(&mut self) -> T {
+ self.0.clone()
+ }
+ fn last(self) -> T {
+ self.0
+ }
+}
+
+struct ExtendFunc<F>(F);
+impl<T, F: FnMut() -> T> ExtendWith<T> for ExtendFunc<F> {
+ fn next(&mut self) -> T {
+ (self.0)()
+ }
+ fn last(mut self) -> T {
+ (self.0)()
+ }
+}
+
+impl<T, A: Allocator> Vec<T, A> {
+ #[cfg(not(no_global_oom_handling))]
+ /// Extend the vector by `n` values, using the given generator.
+ fn extend_with<E: ExtendWith<T>>(&mut self, n: usize, mut value: E) {
+ self.reserve(n);
+
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ // Use SetLenOnDrop to work around bug where compiler
+ // might not realize the store through `ptr` through self.set_len()
+ // don't alias.
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+
+ // Write all elements except the last one
+ for _ in 1..n {
+ ptr::write(ptr, value.next());
+ ptr = ptr.offset(1);
+ // Increment the length in every step in case next() panics
+ local_len.increment_len(1);
+ }
+
+ if n > 0 {
+ // We can write the last element directly without cloning needlessly
+ ptr::write(ptr, value.last());
+ local_len.increment_len(1);
+ }
+
+ // len set by scope guard
+ }
+ }
+}
+
+impl<T: PartialEq, A: Allocator> Vec<T, A> {
+ /// Removes consecutive repeated elements in the vector according to the
+ /// [`PartialEq`] trait implementation.
+ ///
+ /// If the vector is sorted, this removes all duplicates.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut vec = vec![1, 2, 2, 3, 2];
+ ///
+ /// vec.dedup();
+ ///
+ /// assert_eq!(vec, [1, 2, 3, 2]);
+ /// ```
+ #[stable(feature = "rust1", since = "1.0.0")]
+ #[inline]
+ pub fn dedup(&mut self) {
+ self.dedup_by(|a, b| a == b)
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Internal methods and functions
+////////////////////////////////////////////////////////////////////////////////
+
+#[doc(hidden)]
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+pub fn from_elem<T: Clone>(elem: T, n: usize) -> Vec<T> {
+ <T as SpecFromElem>::from_elem(elem, n, Global)
+}
+
+#[doc(hidden)]
+#[cfg(not(no_global_oom_handling))]
+#[unstable(feature = "allocator_api", issue = "32838")]
+pub fn from_elem_in<T: Clone, A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<T, A> {
+ <T as SpecFromElem>::from_elem(elem, n, alloc)
+}
+
+trait ExtendFromWithinSpec {
+ /// # Safety
+ ///
+ /// - `src` needs to be valid index
+ /// - `self.capacity() - self.len()` must be `>= src.len()`
+ unsafe fn spec_extend_from_within(&mut self, src: Range<usize>);
+}
+
+impl<T: Clone, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
+ default unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
+ // SAFETY:
+ // - len is increased only after initializing elements
+ let (this, spare, len) = unsafe { self.split_at_spare_mut_with_len() };
+
+ // SAFETY:
+ // - caller guaratees that src is a valid index
+ let to_clone = unsafe { this.get_unchecked(src) };
+
+ iter::zip(to_clone, spare)
+ .map(|(src, dst)| dst.write(src.clone()))
+ // Note:
+ // - Element was just initialized with `MaybeUninit::write`, so it's ok to increase len
+ // - len is increased after each element to prevent leaks (see issue #82533)
+ .for_each(|_| *len += 1);
+ }
+}
+
+impl<T: Copy, A: Allocator> ExtendFromWithinSpec for Vec<T, A> {
+ unsafe fn spec_extend_from_within(&mut self, src: Range<usize>) {
+ let count = src.len();
+ {
+ let (init, spare) = self.split_at_spare_mut();
+
+ // SAFETY:
+ // - caller guaratees that `src` is a valid index
+ let source = unsafe { init.get_unchecked(src) };
+
+ // SAFETY:
+ // - Both pointers are created from unique slice references (`&mut [_]`)
+ // so they are valid and do not overlap.
+ // - Elements are :Copy so it's OK to copy them, without doing
+ // anything with the original values
+ // - `count` is equal to the len of `source`, so source is valid for
+ // `count` reads
+ // - `.reserve(count)` guarantees that `spare.len() >= count` so spare
+ // is valid for `count` writes
+ unsafe { ptr::copy_nonoverlapping(source.as_ptr(), spare.as_mut_ptr() as _, count) };
+ }
+
+ // SAFETY:
+ // - The elements were just initialized by `copy_nonoverlapping`
+ self.len += count;
+ }
+}
+
+////////////////////////////////////////////////////////////////////////////////
+// Common trait implementations for Vec
+////////////////////////////////////////////////////////////////////////////////
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> ops::Deref for Vec<T, A> {
+ type Target = [T];
+
+ #[inline]
+ fn deref(&self) -> &[T] {
+ unsafe { slice::from_raw_parts(self.as_ptr(), self.len) }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> ops::DerefMut for Vec<T, A> {
+ #[inline]
+ fn deref_mut(&mut self) -> &mut [T] {
+ unsafe { slice::from_raw_parts_mut(self.as_mut_ptr(), self.len) }
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+trait SpecCloneFrom {
+ fn clone_from(this: &mut Self, other: &Self);
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Clone, A: Allocator> SpecCloneFrom for Vec<T, A> {
+ default fn clone_from(this: &mut Self, other: &Self) {
+ // drop anything that will not be overwritten
+ this.truncate(other.len());
+
+ // self.len <= other.len due to the truncate above, so the
+ // slices here are always in-bounds.
+ let (init, tail) = other.split_at(this.len());
+
+ // reuse the contained values' allocations/resources.
+ this.clone_from_slice(init);
+ this.extend_from_slice(tail);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+impl<T: Copy, A: Allocator> SpecCloneFrom for Vec<T, A> {
+ fn clone_from(this: &mut Self, other: &Self) {
+ this.clear();
+ this.extend_from_slice(other);
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone, A: Allocator + Clone> Clone for Vec<T, A> {
+ #[cfg(not(test))]
+ fn clone(&self) -> Self {
+ let alloc = self.allocator().clone();
+ <[T]>::to_vec_in(&**self, alloc)
+ }
+
+ // HACK(japaric): with cfg(test) the inherent `[T]::to_vec` method, which is
+ // required for this method definition, is not available. Instead use the
+ // `slice::to_vec` function which is only available with cfg(test)
+ // NB see the slice::hack module in slice.rs for more information
+ #[cfg(test)]
+ fn clone(&self) -> Self {
+ let alloc = self.allocator().clone();
+ crate::slice::to_vec(&**self, alloc)
+ }
+
+ fn clone_from(&mut self, other: &Self) {
+ SpecCloneFrom::clone_from(self, other)
+ }
+}
+
+/// The hash of a vector is the same as that of the corresponding slice,
+/// as required by the `core::borrow::Borrow` implementation.
+///
+/// ```
+/// #![feature(build_hasher_simple_hash_one)]
+/// use std::hash::BuildHasher;
+///
+/// let b = std::collections::hash_map::RandomState::new();
+/// let v: Vec<u8> = vec![0xa8, 0x3c, 0x09];
+/// let s: &[u8] = &[0xa8, 0x3c, 0x09];
+/// assert_eq!(b.hash_one(v), b.hash_one(s));
+/// ```
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Hash, A: Allocator> Hash for Vec<T, A> {
+ #[inline]
+ fn hash<H: Hasher>(&self, state: &mut H) {
+ Hash::hash(&**self, state)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "vector indices are of type `usize` or ranges of `usize`",
+ label = "vector indices are of type `usize` or ranges of `usize`"
+)]
+impl<T, I: SliceIndex<[T]>, A: Allocator> Index<I> for Vec<T, A> {
+ type Output = I::Output;
+
+ #[inline]
+ fn index(&self, index: I) -> &Self::Output {
+ Index::index(&**self, index)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_on_unimplemented(
+ message = "vector indices are of type `usize` or ranges of `usize`",
+ label = "vector indices are of type `usize` or ranges of `usize`"
+)]
+impl<T, I: SliceIndex<[T]>, A: Allocator> IndexMut<I> for Vec<T, A> {
+ #[inline]
+ fn index_mut(&mut self, index: I) -> &mut Self::Output {
+ IndexMut::index_mut(&mut **self, index)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T> FromIterator<T> for Vec<T> {
+ #[inline]
+ fn from_iter<I: IntoIterator<Item = T>>(iter: I) -> Vec<T> {
+ <Self as SpecFromIter<T, I::IntoIter>>::from_iter(iter.into_iter())
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> IntoIterator for Vec<T, A> {
+ type Item = T;
+ type IntoIter = IntoIter<T, A>;
+
+ /// Creates a consuming iterator, that is, one that moves each value out of
+ /// the vector (from start to end). The vector cannot be used after calling
+ /// this.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let v = vec!["a".to_string(), "b".to_string()];
+ /// let mut v_iter = v.into_iter();
+ ///
+ /// let first_element: Option<String> = v_iter.next();
+ ///
+ /// assert_eq!(first_element, Some("a".to_string()));
+ /// assert_eq!(v_iter.next(), Some("b".to_string()));
+ /// assert_eq!(v_iter.next(), None);
+ /// ```
+ #[inline]
+ fn into_iter(self) -> IntoIter<T, A> {
+ unsafe {
+ let mut me = ManuallyDrop::new(self);
+ let alloc = ManuallyDrop::new(ptr::read(me.allocator()));
+ let begin = me.as_mut_ptr();
+ let end = if mem::size_of::<T>() == 0 {
+ arith_offset(begin as *const i8, me.len() as isize) as *const T
+ } else {
+ begin.add(me.len()) as *const T
+ };
+ let cap = me.buf.capacity();
+ IntoIter {
+ buf: NonNull::new_unchecked(begin),
+ phantom: PhantomData,
+ cap,
+ alloc,
+ ptr: begin,
+ end,
+ }
+ }
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, A: Allocator> IntoIterator for &'a Vec<T, A> {
+ type Item = &'a T;
+ type IntoIter = slice::Iter<'a, T>;
+
+ fn into_iter(self) -> slice::Iter<'a, T> {
+ self.iter()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<'a, T, A: Allocator> IntoIterator for &'a mut Vec<T, A> {
+ type Item = &'a mut T;
+ type IntoIter = slice::IterMut<'a, T>;
+
+ fn into_iter(self) -> slice::IterMut<'a, T> {
+ self.iter_mut()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> Extend<T> for Vec<T, A> {
+ #[inline]
+ fn extend<I: IntoIterator<Item = T>>(&mut self, iter: I) {
+ <Self as SpecExtend<T, I::IntoIter>>::spec_extend(self, iter.into_iter())
+ }
+
+ #[inline]
+ fn extend_one(&mut self, item: T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+impl<T, A: Allocator> Vec<T, A> {
+ // leaf method to which various SpecFrom/SpecExtend implementations delegate when
+ // they have no further optimizations to apply
+ #[cfg(not(no_global_oom_handling))]
+ fn extend_desugared<I: Iterator<Item = T>>(&mut self, mut iterator: I) {
+ // This is the case for a general iterator.
+ //
+ // This function should be the moral equivalent of:
+ //
+ // for item in iterator {
+ // self.push(item);
+ // }
+ while let Some(element) = iterator.next() {
+ let len = self.len();
+ if len == self.capacity() {
+ let (lower, _) = iterator.size_hint();
+ self.reserve(lower.saturating_add(1));
+ }
+ unsafe {
+ ptr::write(self.as_mut_ptr().add(len), element);
+ // Since next() executes user code which can panic we have to bump the length
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ self.set_len(len + 1);
+ }
+ }
+ }
+
+ /// Creates a splicing iterator that replaces the specified range in the vector
+ /// with the given `replace_with` iterator and yields the removed items.
+ /// `replace_with` does not need to be the same length as `range`.
+ ///
+ /// `range` is removed even if the iterator is not consumed until the end.
+ ///
+ /// It is unspecified how many elements are removed from the vector
+ /// if the `Splice` value is leaked.
+ ///
+ /// The input iterator `replace_with` is only consumed when the `Splice` value is dropped.
+ ///
+ /// This is optimal if:
+ ///
+ /// * The tail (elements in the vector after `range`) is empty,
+ /// * or `replace_with` yields fewer or equal elements than `range`’s length
+ /// * or the lower bound of its `size_hint()` is exact.
+ ///
+ /// Otherwise, a temporary vector is allocated and the tail is moved twice.
+ ///
+ /// # Panics
+ ///
+ /// Panics if the starting point is greater than the end point or if
+ /// the end point is greater than the length of the vector.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let mut v = vec![1, 2, 3, 4];
+ /// let new = [7, 8, 9];
+ /// let u: Vec<_> = v.splice(1..3, new).collect();
+ /// assert_eq!(v, &[1, 7, 8, 9, 4]);
+ /// assert_eq!(u, &[2, 3]);
+ /// ```
+ #[cfg(not(no_global_oom_handling))]
+ #[inline]
+ #[stable(feature = "vec_splice", since = "1.21.0")]
+ pub fn splice<R, I>(&mut self, range: R, replace_with: I) -> Splice<'_, I::IntoIter, A>
+ where
+ R: RangeBounds<usize>,
+ I: IntoIterator<Item = T>,
+ {
+ Splice { drain: self.drain(range), replace_with: replace_with.into_iter() }
+ }
+
+ /// Creates an iterator which uses a closure to determine if an element should be removed.
+ ///
+ /// If the closure returns true, then the element is removed and yielded.
+ /// If the closure returns false, the element will remain in the vector and will not be yielded
+ /// by the iterator.
+ ///
+ /// Using this method is equivalent to the following code:
+ ///
+ /// ```
+ /// # let some_predicate = |x: &mut i32| { *x == 2 || *x == 3 || *x == 6 };
+ /// # let mut vec = vec![1, 2, 3, 4, 5, 6];
+ /// let mut i = 0;
+ /// while i < vec.len() {
+ /// if some_predicate(&mut vec[i]) {
+ /// let val = vec.remove(i);
+ /// // your code here
+ /// } else {
+ /// i += 1;
+ /// }
+ /// }
+ ///
+ /// # assert_eq!(vec, vec![1, 4, 5]);
+ /// ```
+ ///
+ /// But `drain_filter` is easier to use. `drain_filter` is also more efficient,
+ /// because it can backshift the elements of the array in bulk.
+ ///
+ /// Note that `drain_filter` also lets you mutate every element in the filter closure,
+ /// regardless of whether you choose to keep or remove it.
+ ///
+ /// # Examples
+ ///
+ /// Splitting an array into evens and odds, reusing the original allocation:
+ ///
+ /// ```
+ /// #![feature(drain_filter)]
+ /// let mut numbers = vec![1, 2, 3, 4, 5, 6, 8, 9, 11, 13, 14, 15];
+ ///
+ /// let evens = numbers.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ /// let odds = numbers;
+ ///
+ /// assert_eq!(evens, vec![2, 4, 6, 8, 14]);
+ /// assert_eq!(odds, vec![1, 3, 5, 9, 11, 13, 15]);
+ /// ```
+ #[unstable(feature = "drain_filter", reason = "recently added", issue = "43244")]
+ pub fn drain_filter<F>(&mut self, filter: F) -> DrainFilter<'_, T, F, A>
+ where
+ F: FnMut(&mut T) -> bool,
+ {
+ let old_len = self.len();
+
+ // Guard against us getting leaked (leak amplification)
+ unsafe {
+ self.set_len(0);
+ }
+
+ DrainFilter { vec: self, idx: 0, del: 0, old_len, pred: filter, panic_flag: false }
+ }
+}
+
+/// Extend implementation that copies elements out of references before pushing them onto the Vec.
+///
+/// This implementation is specialized for slice iterators, where it uses [`copy_from_slice`] to
+/// append the entire slice at once.
+///
+/// [`copy_from_slice`]: slice::copy_from_slice
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "extend_ref", since = "1.2.0")]
+impl<'a, T: Copy + 'a, A: Allocator + 'a> Extend<&'a T> for Vec<T, A> {
+ fn extend<I: IntoIterator<Item = &'a T>>(&mut self, iter: I) {
+ self.spec_extend(iter.into_iter())
+ }
+
+ #[inline]
+ fn extend_one(&mut self, &item: &'a T) {
+ self.push(item);
+ }
+
+ #[inline]
+ fn extend_reserve(&mut self, additional: usize) {
+ self.reserve(additional);
+ }
+}
+
+/// Implements comparison of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: PartialOrd, A: Allocator> PartialOrd for Vec<T, A> {
+ #[inline]
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ PartialOrd::partial_cmp(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Eq, A: Allocator> Eq for Vec<T, A> {}
+
+/// Implements ordering of vectors, [lexicographically](core::cmp::Ord#lexicographical-comparison).
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Ord, A: Allocator> Ord for Vec<T, A> {
+ #[inline]
+ fn cmp(&self, other: &Self) -> Ordering {
+ Ord::cmp(&**self, &**other)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+unsafe impl<#[may_dangle] T, A: Allocator> Drop for Vec<T, A> {
+ fn drop(&mut self) {
+ unsafe {
+ // use drop for [T]
+ // use a raw slice to refer to the elements of the vector as weakest necessary type;
+ // could avoid questions of validity in certain cases
+ ptr::drop_in_place(ptr::slice_from_raw_parts_mut(self.as_mut_ptr(), self.len))
+ }
+ // RawVec handles deallocation
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+#[rustc_const_unstable(feature = "const_default_impls", issue = "87864")]
+impl<T> const Default for Vec<T> {
+ /// Creates an empty `Vec<T>`.
+ fn default() -> Vec<T> {
+ Vec::new()
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: fmt::Debug, A: Allocator> fmt::Debug for Vec<T, A> {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ fmt::Debug::fmt(&**self, f)
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> AsRef<Vec<T, A>> for Vec<T, A> {
+ fn as_ref(&self) -> &Vec<T, A> {
+ self
+ }
+}
+
+#[stable(feature = "vec_as_mut", since = "1.5.0")]
+impl<T, A: Allocator> AsMut<Vec<T, A>> for Vec<T, A> {
+ fn as_mut(&mut self) -> &mut Vec<T, A> {
+ self
+ }
+}
+
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T, A: Allocator> AsRef<[T]> for Vec<T, A> {
+ fn as_ref(&self) -> &[T] {
+ self
+ }
+}
+
+#[stable(feature = "vec_as_mut", since = "1.5.0")]
+impl<T, A: Allocator> AsMut<[T]> for Vec<T, A> {
+ fn as_mut(&mut self) -> &mut [T] {
+ self
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl<T: Clone> From<&[T]> for Vec<T> {
+ /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from(&[1, 2, 3][..]), vec![1, 2, 3]);
+ /// ```
+ #[cfg(not(test))]
+ fn from(s: &[T]) -> Vec<T> {
+ s.to_vec()
+ }
+ #[cfg(test)]
+ fn from(s: &[T]) -> Vec<T> {
+ crate::slice::to_vec(s, Global)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_from_mut", since = "1.19.0")]
+impl<T: Clone> From<&mut [T]> for Vec<T> {
+ /// Allocate a `Vec<T>` and fill it by cloning `s`'s items.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from(&mut [1, 2, 3][..]), vec![1, 2, 3]);
+ /// ```
+ #[cfg(not(test))]
+ fn from(s: &mut [T]) -> Vec<T> {
+ s.to_vec()
+ }
+ #[cfg(test)]
+ fn from(s: &mut [T]) -> Vec<T> {
+ crate::slice::to_vec(s, Global)
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "vec_from_array", since = "1.44.0")]
+impl<T, const N: usize> From<[T; N]> for Vec<T> {
+ /// Allocate a `Vec<T>` and move `s`'s items into it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from([1, 2, 3]), vec![1, 2, 3]);
+ /// ```
+ #[cfg(not(test))]
+ fn from(s: [T; N]) -> Vec<T> {
+ <[T]>::into_vec(
+ #[rustc_box]
+ Box::new(s),
+ )
+ }
+
+ #[cfg(test)]
+ fn from(s: [T; N]) -> Vec<T> {
+ crate::slice::into_vec(Box::new(s))
+ }
+}
+
+#[stable(feature = "vec_from_cow_slice", since = "1.14.0")]
+impl<'a, T> From<Cow<'a, [T]>> for Vec<T>
+where
+ [T]: ToOwned<Owned = Vec<T>>,
+{
+ /// Convert a clone-on-write slice into a vector.
+ ///
+ /// If `s` already owns a `Vec<T>`, it will be returned directly.
+ /// If `s` is borrowing a slice, a new `Vec<T>` will be allocated and
+ /// filled by cloning `s`'s items into it.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// # use std::borrow::Cow;
+ /// let o: Cow<[i32]> = Cow::Owned(vec![1, 2, 3]);
+ /// let b: Cow<[i32]> = Cow::Borrowed(&[1, 2, 3]);
+ /// assert_eq!(Vec::from(o), Vec::from(b));
+ /// ```
+ fn from(s: Cow<'a, [T]>) -> Vec<T> {
+ s.into_owned()
+ }
+}
+
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(test))]
+#[stable(feature = "vec_from_box", since = "1.18.0")]
+impl<T, A: Allocator> From<Box<[T], A>> for Vec<T, A> {
+ /// Convert a boxed slice into a vector by transferring ownership of
+ /// the existing heap allocation.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// let b: Box<[i32]> = vec![1, 2, 3].into_boxed_slice();
+ /// assert_eq!(Vec::from(b), vec![1, 2, 3]);
+ /// ```
+ fn from(s: Box<[T], A>) -> Self {
+ s.into_vec()
+ }
+}
+
+// note: test pulls in libstd, which causes errors here
+#[cfg(not(no_global_oom_handling))]
+#[cfg(not(test))]
+#[stable(feature = "box_from_vec", since = "1.20.0")]
+impl<T, A: Allocator> From<Vec<T, A>> for Box<[T], A> {
+ /// Convert a vector into a boxed slice.
+ ///
+ /// If `v` has excess capacity, its items will be moved into a
+ /// newly-allocated buffer with exactly the right capacity.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Box::from(vec![1, 2, 3]), vec![1, 2, 3].into_boxed_slice());
+ /// ```
+ fn from(v: Vec<T, A>) -> Self {
+ v.into_boxed_slice()
+ }
+}
+
+#[cfg(not(no_global_oom_handling))]
+#[stable(feature = "rust1", since = "1.0.0")]
+impl From<&str> for Vec<u8> {
+ /// Allocate a `Vec<u8>` and fill it with a UTF-8 string.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(Vec::from("123"), vec![b'1', b'2', b'3']);
+ /// ```
+ fn from(s: &str) -> Vec<u8> {
+ From::from(s.as_bytes())
+ }
+}
+
+#[stable(feature = "array_try_from_vec", since = "1.48.0")]
+impl<T, A: Allocator, const N: usize> TryFrom<Vec<T, A>> for [T; N] {
+ type Error = Vec<T, A>;
+
+ /// Gets the entire contents of the `Vec<T>` as an array,
+ /// if its size exactly matches that of the requested array.
+ ///
+ /// # Examples
+ ///
+ /// ```
+ /// assert_eq!(vec![1, 2, 3].try_into(), Ok([1, 2, 3]));
+ /// assert_eq!(<Vec<i32>>::new().try_into(), Ok([]));
+ /// ```
+ ///
+ /// If the length doesn't match, the input comes back in `Err`:
+ /// ```
+ /// let r: Result<[i32; 4], _> = (0..10).collect::<Vec<_>>().try_into();
+ /// assert_eq!(r, Err(vec![0, 1, 2, 3, 4, 5, 6, 7, 8, 9]));
+ /// ```
+ ///
+ /// If you're fine with just getting a prefix of the `Vec<T>`,
+ /// you can call [`.truncate(N)`](Vec::truncate) first.
+ /// ```
+ /// let mut v = String::from("hello world").into_bytes();
+ /// v.sort();
+ /// v.truncate(2);
+ /// let [a, b]: [_; 2] = v.try_into().unwrap();
+ /// assert_eq!(a, b' ');
+ /// assert_eq!(b, b'd');
+ /// ```
+ fn try_from(mut vec: Vec<T, A>) -> Result<[T; N], Vec<T, A>> {
+ if vec.len() != N {
+ return Err(vec);
+ }
+
+ // SAFETY: `.set_len(0)` is always sound.
+ unsafe { vec.set_len(0) };
+
+ // SAFETY: A `Vec`'s pointer is always aligned properly, and
+ // the alignment the array needs is the same as the items.
+ // We checked earlier that we have sufficient items.
+ // The items will not double-drop as the `set_len`
+ // tells the `Vec` not to also drop them.
+ let array = unsafe { ptr::read(vec.as_ptr() as *const [T; N]) };
+ Ok(array)
+ }
+}
diff --git a/library/alloc/src/vec/partial_eq.rs b/library/alloc/src/vec/partial_eq.rs
new file mode 100644
index 000000000..b0cf72577
--- /dev/null
+++ b/library/alloc/src/vec/partial_eq.rs
@@ -0,0 +1,47 @@
+use crate::alloc::Allocator;
+#[cfg(not(no_global_oom_handling))]
+use crate::borrow::Cow;
+
+use super::Vec;
+
+macro_rules! __impl_slice_eq1 {
+ ([$($vars:tt)*] $lhs:ty, $rhs:ty $(where $ty:ty: $bound:ident)?, #[$stability:meta]) => {
+ #[$stability]
+ impl<T, U, $($vars)*> PartialEq<$rhs> for $lhs
+ where
+ T: PartialEq<U>,
+ $($ty: $bound)?
+ {
+ #[inline]
+ fn eq(&self, other: &$rhs) -> bool { self[..] == other[..] }
+ #[inline]
+ fn ne(&self, other: &$rhs) -> bool { self[..] != other[..] }
+ }
+ }
+}
+
+__impl_slice_eq1! { [A1: Allocator, A2: Allocator] Vec<T, A1>, Vec<U, A2>, #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &[U], #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, &mut [U], #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator] &[T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
+__impl_slice_eq1! { [A: Allocator] &mut [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_ref_slice", since = "1.46.0")] }
+__impl_slice_eq1! { [A: Allocator] Vec<T, A>, [U], #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] }
+__impl_slice_eq1! { [A: Allocator] [T], Vec<U, A>, #[stable(feature = "partialeq_vec_for_slice", since = "1.48.0")] }
+#[cfg(not(no_global_oom_handling))]
+__impl_slice_eq1! { [A: Allocator] Cow<'_, [T]>, Vec<U, A> where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
+#[cfg(not(no_global_oom_handling))]
+__impl_slice_eq1! { [] Cow<'_, [T]>, &[U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
+#[cfg(not(no_global_oom_handling))]
+__impl_slice_eq1! { [] Cow<'_, [T]>, &mut [U] where T: Clone, #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, [U; N], #[stable(feature = "rust1", since = "1.0.0")] }
+__impl_slice_eq1! { [A: Allocator, const N: usize] Vec<T, A>, &[U; N], #[stable(feature = "rust1", since = "1.0.0")] }
+
+// NOTE: some less important impls are omitted to reduce code bloat
+// FIXME(Centril): Reconsider this?
+//__impl_slice_eq1! { [const N: usize] Vec<A>, &mut [B; N], }
+//__impl_slice_eq1! { [const N: usize] [A; N], Vec<B>, }
+//__impl_slice_eq1! { [const N: usize] &[A; N], Vec<B>, }
+//__impl_slice_eq1! { [const N: usize] &mut [A; N], Vec<B>, }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, [B; N], }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &[B; N], }
+//__impl_slice_eq1! { [const N: usize] Cow<'a, [A]>, &mut [B; N], }
diff --git a/library/alloc/src/vec/set_len_on_drop.rs b/library/alloc/src/vec/set_len_on_drop.rs
new file mode 100644
index 000000000..8b66bc812
--- /dev/null
+++ b/library/alloc/src/vec/set_len_on_drop.rs
@@ -0,0 +1,28 @@
+// Set the length of the vec when the `SetLenOnDrop` value goes out of scope.
+//
+// The idea is: The length field in SetLenOnDrop is a local variable
+// that the optimizer will see does not alias with any stores through the Vec's data
+// pointer. This is a workaround for alias analysis issue #32155
+pub(super) struct SetLenOnDrop<'a> {
+ len: &'a mut usize,
+ local_len: usize,
+}
+
+impl<'a> SetLenOnDrop<'a> {
+ #[inline]
+ pub(super) fn new(len: &'a mut usize) -> Self {
+ SetLenOnDrop { local_len: *len, len }
+ }
+
+ #[inline]
+ pub(super) fn increment_len(&mut self, increment: usize) {
+ self.local_len += increment;
+ }
+}
+
+impl Drop for SetLenOnDrop<'_> {
+ #[inline]
+ fn drop(&mut self) {
+ *self.len = self.local_len;
+ }
+}
diff --git a/library/alloc/src/vec/spec_extend.rs b/library/alloc/src/vec/spec_extend.rs
new file mode 100644
index 000000000..506ee0ecf
--- /dev/null
+++ b/library/alloc/src/vec/spec_extend.rs
@@ -0,0 +1,87 @@
+use crate::alloc::Allocator;
+use core::iter::TrustedLen;
+use core::ptr::{self};
+use core::slice::{self};
+
+use super::{IntoIter, SetLenOnDrop, Vec};
+
+// Specialization trait used for Vec::extend
+pub(super) trait SpecExtend<T, I> {
+ fn spec_extend(&mut self, iter: I);
+}
+
+impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
+where
+ I: Iterator<Item = T>,
+{
+ default fn spec_extend(&mut self, iter: I) {
+ self.extend_desugared(iter)
+ }
+}
+
+impl<T, I, A: Allocator> SpecExtend<T, I> for Vec<T, A>
+where
+ I: TrustedLen<Item = T>,
+{
+ default fn spec_extend(&mut self, iterator: I) {
+ // This is the case for a TrustedLen iterator.
+ let (low, high) = iterator.size_hint();
+ if let Some(additional) = high {
+ debug_assert_eq!(
+ low,
+ additional,
+ "TrustedLen iterator's size hint is not exact: {:?}",
+ (low, high)
+ );
+ self.reserve(additional);
+ unsafe {
+ let mut ptr = self.as_mut_ptr().add(self.len());
+ let mut local_len = SetLenOnDrop::new(&mut self.len);
+ iterator.for_each(move |element| {
+ ptr::write(ptr, element);
+ ptr = ptr.offset(1);
+ // Since the loop executes user code which can panic we have to bump the pointer
+ // after each step.
+ // NB can't overflow since we would have had to alloc the address space
+ local_len.increment_len(1);
+ });
+ }
+ } else {
+ // Per TrustedLen contract a `None` upper bound means that the iterator length
+ // truly exceeds usize::MAX, which would eventually lead to a capacity overflow anyway.
+ // Since the other branch already panics eagerly (via `reserve()`) we do the same here.
+ // This avoids additional codegen for a fallback code path which would eventually
+ // panic anyway.
+ panic!("capacity overflow");
+ }
+ }
+}
+
+impl<T, A: Allocator> SpecExtend<T, IntoIter<T>> for Vec<T, A> {
+ fn spec_extend(&mut self, mut iterator: IntoIter<T>) {
+ unsafe {
+ self.append_elements(iterator.as_slice() as _);
+ }
+ iterator.forget_remaining_elements();
+ }
+}
+
+impl<'a, T: 'a, I, A: Allocator + 'a> SpecExtend<&'a T, I> for Vec<T, A>
+where
+ I: Iterator<Item = &'a T>,
+ T: Clone,
+{
+ default fn spec_extend(&mut self, iterator: I) {
+ self.spec_extend(iterator.cloned())
+ }
+}
+
+impl<'a, T: 'a, A: Allocator + 'a> SpecExtend<&'a T, slice::Iter<'a, T>> for Vec<T, A>
+where
+ T: Copy,
+{
+ fn spec_extend(&mut self, iterator: slice::Iter<'a, T>) {
+ let slice = iterator.as_slice();
+ unsafe { self.append_elements(slice) };
+ }
+}
diff --git a/library/alloc/src/vec/spec_from_elem.rs b/library/alloc/src/vec/spec_from_elem.rs
new file mode 100644
index 000000000..ff364c033
--- /dev/null
+++ b/library/alloc/src/vec/spec_from_elem.rs
@@ -0,0 +1,61 @@
+use core::ptr;
+
+use crate::alloc::Allocator;
+use crate::raw_vec::RawVec;
+
+use super::{ExtendElement, IsZero, Vec};
+
+// Specialization trait used for Vec::from_elem
+pub(super) trait SpecFromElem: Sized {
+ fn from_elem<A: Allocator>(elem: Self, n: usize, alloc: A) -> Vec<Self, A>;
+}
+
+impl<T: Clone> SpecFromElem for T {
+ default fn from_elem<A: Allocator>(elem: Self, n: usize, alloc: A) -> Vec<Self, A> {
+ let mut v = Vec::with_capacity_in(n, alloc);
+ v.extend_with(n, ExtendElement(elem));
+ v
+ }
+}
+
+impl<T: Clone + IsZero> SpecFromElem for T {
+ #[inline]
+ default fn from_elem<A: Allocator>(elem: T, n: usize, alloc: A) -> Vec<T, A> {
+ if elem.is_zero() {
+ return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n };
+ }
+ let mut v = Vec::with_capacity_in(n, alloc);
+ v.extend_with(n, ExtendElement(elem));
+ v
+ }
+}
+
+impl SpecFromElem for i8 {
+ #[inline]
+ fn from_elem<A: Allocator>(elem: i8, n: usize, alloc: A) -> Vec<i8, A> {
+ if elem == 0 {
+ return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n };
+ }
+ unsafe {
+ let mut v = Vec::with_capacity_in(n, alloc);
+ ptr::write_bytes(v.as_mut_ptr(), elem as u8, n);
+ v.set_len(n);
+ v
+ }
+ }
+}
+
+impl SpecFromElem for u8 {
+ #[inline]
+ fn from_elem<A: Allocator>(elem: u8, n: usize, alloc: A) -> Vec<u8, A> {
+ if elem == 0 {
+ return Vec { buf: RawVec::with_capacity_zeroed_in(n, alloc), len: n };
+ }
+ unsafe {
+ let mut v = Vec::with_capacity_in(n, alloc);
+ ptr::write_bytes(v.as_mut_ptr(), elem, n);
+ v.set_len(n);
+ v
+ }
+ }
+}
diff --git a/library/alloc/src/vec/spec_from_iter.rs b/library/alloc/src/vec/spec_from_iter.rs
new file mode 100644
index 000000000..efa686847
--- /dev/null
+++ b/library/alloc/src/vec/spec_from_iter.rs
@@ -0,0 +1,64 @@
+use core::mem::ManuallyDrop;
+use core::ptr::{self};
+
+use super::{IntoIter, SpecExtend, SpecFromIterNested, Vec};
+
+/// Specialization trait used for Vec::from_iter
+///
+/// ## The delegation graph:
+///
+/// ```text
+/// +-------------+
+/// |FromIterator |
+/// +-+-----------+
+/// |
+/// v
+/// +-+-------------------------------+ +---------------------+
+/// |SpecFromIter +---->+SpecFromIterNested |
+/// |where I: | | |where I: |
+/// | Iterator (default)----------+ | | Iterator (default) |
+/// | vec::IntoIter | | | TrustedLen |
+/// | SourceIterMarker---fallback-+ | +---------------------+
+/// +---------------------------------+
+/// ```
+pub(super) trait SpecFromIter<T, I> {
+ fn from_iter(iter: I) -> Self;
+}
+
+impl<T, I> SpecFromIter<T, I> for Vec<T>
+where
+ I: Iterator<Item = T>,
+{
+ default fn from_iter(iterator: I) -> Self {
+ SpecFromIterNested::from_iter(iterator)
+ }
+}
+
+impl<T> SpecFromIter<T, IntoIter<T>> for Vec<T> {
+ fn from_iter(iterator: IntoIter<T>) -> Self {
+ // A common case is passing a vector into a function which immediately
+ // re-collects into a vector. We can short circuit this if the IntoIter
+ // has not been advanced at all.
+ // When it has been advanced We can also reuse the memory and move the data to the front.
+ // But we only do so when the resulting Vec wouldn't have more unused capacity
+ // than creating it through the generic FromIterator implementation would. That limitation
+ // is not strictly necessary as Vec's allocation behavior is intentionally unspecified.
+ // But it is a conservative choice.
+ let has_advanced = iterator.buf.as_ptr() as *const _ != iterator.ptr;
+ if !has_advanced || iterator.len() >= iterator.cap / 2 {
+ unsafe {
+ let it = ManuallyDrop::new(iterator);
+ if has_advanced {
+ ptr::copy(it.ptr, it.buf.as_ptr(), it.len());
+ }
+ return Vec::from_raw_parts(it.buf.as_ptr(), it.len(), it.cap);
+ }
+ }
+
+ let mut vec = Vec::new();
+ // must delegate to spec_extend() since extend() itself delegates
+ // to spec_from for empty Vecs
+ vec.spec_extend(iterator);
+ vec
+ }
+}
diff --git a/library/alloc/src/vec/spec_from_iter_nested.rs b/library/alloc/src/vec/spec_from_iter_nested.rs
new file mode 100644
index 000000000..f915ebb86
--- /dev/null
+++ b/library/alloc/src/vec/spec_from_iter_nested.rs
@@ -0,0 +1,65 @@
+use core::cmp;
+use core::iter::TrustedLen;
+use core::ptr;
+
+use crate::raw_vec::RawVec;
+
+use super::{SpecExtend, Vec};
+
+/// Another specialization trait for Vec::from_iter
+/// necessary to manually prioritize overlapping specializations
+/// see [`SpecFromIter`](super::SpecFromIter) for details.
+pub(super) trait SpecFromIterNested<T, I> {
+ fn from_iter(iter: I) -> Self;
+}
+
+impl<T, I> SpecFromIterNested<T, I> for Vec<T>
+where
+ I: Iterator<Item = T>,
+{
+ default fn from_iter(mut iterator: I) -> Self {
+ // Unroll the first iteration, as the vector is going to be
+ // expanded on this iteration in every case when the iterable is not
+ // empty, but the loop in extend_desugared() is not going to see the
+ // vector being full in the few subsequent loop iterations.
+ // So we get better branch prediction.
+ let mut vector = match iterator.next() {
+ None => return Vec::new(),
+ Some(element) => {
+ let (lower, _) = iterator.size_hint();
+ let initial_capacity =
+ cmp::max(RawVec::<T>::MIN_NON_ZERO_CAP, lower.saturating_add(1));
+ let mut vector = Vec::with_capacity(initial_capacity);
+ unsafe {
+ // SAFETY: We requested capacity at least 1
+ ptr::write(vector.as_mut_ptr(), element);
+ vector.set_len(1);
+ }
+ vector
+ }
+ };
+ // must delegate to spec_extend() since extend() itself delegates
+ // to spec_from for empty Vecs
+ <Vec<T> as SpecExtend<T, I>>::spec_extend(&mut vector, iterator);
+ vector
+ }
+}
+
+impl<T, I> SpecFromIterNested<T, I> for Vec<T>
+where
+ I: TrustedLen<Item = T>,
+{
+ fn from_iter(iterator: I) -> Self {
+ let mut vector = match iterator.size_hint() {
+ (_, Some(upper)) => Vec::with_capacity(upper),
+ // TrustedLen contract guarantees that `size_hint() == (_, None)` means that there
+ // are more than `usize::MAX` elements.
+ // Since the previous branch would eagerly panic if the capacity is too large
+ // (via `with_capacity`) we do the same here.
+ _ => panic!("capacity overflow"),
+ };
+ // reuse extend specialization for TrustedLen
+ vector.spec_extend(iterator);
+ vector
+ }
+}
diff --git a/library/alloc/src/vec/splice.rs b/library/alloc/src/vec/splice.rs
new file mode 100644
index 000000000..bad765c7f
--- /dev/null
+++ b/library/alloc/src/vec/splice.rs
@@ -0,0 +1,133 @@
+use crate::alloc::{Allocator, Global};
+use core::ptr::{self};
+use core::slice::{self};
+
+use super::{Drain, Vec};
+
+/// A splicing iterator for `Vec`.
+///
+/// This struct is created by [`Vec::splice()`].
+/// See its documentation for more.
+///
+/// # Example
+///
+/// ```
+/// let mut v = vec![0, 1, 2];
+/// let new = [7, 8];
+/// let iter: std::vec::Splice<_> = v.splice(1.., new);
+/// ```
+#[derive(Debug)]
+#[stable(feature = "vec_splice", since = "1.21.0")]
+pub struct Splice<
+ 'a,
+ I: Iterator + 'a,
+ #[unstable(feature = "allocator_api", issue = "32838")] A: Allocator + 'a = Global,
+> {
+ pub(super) drain: Drain<'a, I::Item, A>,
+ pub(super) replace_with: I,
+}
+
+#[stable(feature = "vec_splice", since = "1.21.0")]
+impl<I: Iterator, A: Allocator> Iterator for Splice<'_, I, A> {
+ type Item = I::Item;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.drain.next()
+ }
+
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ self.drain.size_hint()
+ }
+}
+
+#[stable(feature = "vec_splice", since = "1.21.0")]
+impl<I: Iterator, A: Allocator> DoubleEndedIterator for Splice<'_, I, A> {
+ fn next_back(&mut self) -> Option<Self::Item> {
+ self.drain.next_back()
+ }
+}
+
+#[stable(feature = "vec_splice", since = "1.21.0")]
+impl<I: Iterator, A: Allocator> ExactSizeIterator for Splice<'_, I, A> {}
+
+#[stable(feature = "vec_splice", since = "1.21.0")]
+impl<I: Iterator, A: Allocator> Drop for Splice<'_, I, A> {
+ fn drop(&mut self) {
+ self.drain.by_ref().for_each(drop);
+
+ unsafe {
+ if self.drain.tail_len == 0 {
+ self.drain.vec.as_mut().extend(self.replace_with.by_ref());
+ return;
+ }
+
+ // First fill the range left by drain().
+ if !self.drain.fill(&mut self.replace_with) {
+ return;
+ }
+
+ // There may be more elements. Use the lower bound as an estimate.
+ // FIXME: Is the upper bound a better guess? Or something else?
+ let (lower_bound, _upper_bound) = self.replace_with.size_hint();
+ if lower_bound > 0 {
+ self.drain.move_tail(lower_bound);
+ if !self.drain.fill(&mut self.replace_with) {
+ return;
+ }
+ }
+
+ // Collect any remaining elements.
+ // This is a zero-length vector which does not allocate if `lower_bound` was exact.
+ let mut collected = self.replace_with.by_ref().collect::<Vec<I::Item>>().into_iter();
+ // Now we have an exact count.
+ if collected.len() > 0 {
+ self.drain.move_tail(collected.len());
+ let filled = self.drain.fill(&mut collected);
+ debug_assert!(filled);
+ debug_assert_eq!(collected.len(), 0);
+ }
+ }
+ // Let `Drain::drop` move the tail back if necessary and restore `vec.len`.
+ }
+}
+
+/// Private helper methods for `Splice::drop`
+impl<T, A: Allocator> Drain<'_, T, A> {
+ /// The range from `self.vec.len` to `self.tail_start` contains elements
+ /// that have been moved out.
+ /// Fill that range as much as possible with new elements from the `replace_with` iterator.
+ /// Returns `true` if we filled the entire range. (`replace_with.next()` didn’t return `None`.)
+ unsafe fn fill<I: Iterator<Item = T>>(&mut self, replace_with: &mut I) -> bool {
+ let vec = unsafe { self.vec.as_mut() };
+ let range_start = vec.len;
+ let range_end = self.tail_start;
+ let range_slice = unsafe {
+ slice::from_raw_parts_mut(vec.as_mut_ptr().add(range_start), range_end - range_start)
+ };
+
+ for place in range_slice {
+ if let Some(new_item) = replace_with.next() {
+ unsafe { ptr::write(place, new_item) };
+ vec.len += 1;
+ } else {
+ return false;
+ }
+ }
+ true
+ }
+
+ /// Makes room for inserting more elements before the tail.
+ unsafe fn move_tail(&mut self, additional: usize) {
+ let vec = unsafe { self.vec.as_mut() };
+ let len = self.tail_start + self.tail_len;
+ vec.buf.reserve(len, additional);
+
+ let new_tail_start = self.tail_start + additional;
+ unsafe {
+ let src = vec.as_ptr().add(self.tail_start);
+ let dst = vec.as_mut_ptr().add(new_tail_start);
+ ptr::copy(src, dst, self.tail_len);
+ }
+ self.tail_start = new_tail_start;
+ }
+}
diff --git a/library/alloc/tests/arc.rs b/library/alloc/tests/arc.rs
new file mode 100644
index 000000000..ce40b5c9b
--- /dev/null
+++ b/library/alloc/tests/arc.rs
@@ -0,0 +1,212 @@
+use std::any::Any;
+use std::cell::RefCell;
+use std::cmp::PartialEq;
+use std::iter::TrustedLen;
+use std::mem;
+use std::sync::{Arc, Weak};
+
+#[test]
+fn uninhabited() {
+ enum Void {}
+ let mut a = Weak::<Void>::new();
+ a = a.clone();
+ assert!(a.upgrade().is_none());
+
+ let mut a: Weak<dyn Any> = a; // Unsizing
+ a = a.clone();
+ assert!(a.upgrade().is_none());
+}
+
+#[test]
+fn slice() {
+ let a: Arc<[u32; 3]> = Arc::new([3, 2, 1]);
+ let a: Arc<[u32]> = a; // Unsizing
+ let b: Arc<[u32]> = Arc::from(&[3, 2, 1][..]); // Conversion
+ assert_eq!(a, b);
+
+ // Exercise is_dangling() with a DST
+ let mut a = Arc::downgrade(&a);
+ a = a.clone();
+ assert!(a.upgrade().is_some());
+}
+
+#[test]
+fn trait_object() {
+ let a: Arc<u32> = Arc::new(4);
+ let a: Arc<dyn Any> = a; // Unsizing
+
+ // Exercise is_dangling() with a DST
+ let mut a = Arc::downgrade(&a);
+ a = a.clone();
+ assert!(a.upgrade().is_some());
+
+ let mut b = Weak::<u32>::new();
+ b = b.clone();
+ assert!(b.upgrade().is_none());
+ let mut b: Weak<dyn Any> = b; // Unsizing
+ b = b.clone();
+ assert!(b.upgrade().is_none());
+}
+
+#[test]
+fn float_nan_ne() {
+ let x = Arc::new(f32::NAN);
+ assert!(x != x);
+ assert!(!(x == x));
+}
+
+#[test]
+fn partial_eq() {
+ struct TestPEq(RefCell<usize>);
+ impl PartialEq for TestPEq {
+ fn eq(&self, other: &TestPEq) -> bool {
+ *self.0.borrow_mut() += 1;
+ *other.0.borrow_mut() += 1;
+ true
+ }
+ }
+ let x = Arc::new(TestPEq(RefCell::new(0)));
+ assert!(x == x);
+ assert!(!(x != x));
+ assert_eq!(*x.0.borrow(), 4);
+}
+
+#[test]
+fn eq() {
+ #[derive(Eq)]
+ struct TestEq(RefCell<usize>);
+ impl PartialEq for TestEq {
+ fn eq(&self, other: &TestEq) -> bool {
+ *self.0.borrow_mut() += 1;
+ *other.0.borrow_mut() += 1;
+ true
+ }
+ }
+ let x = Arc::new(TestEq(RefCell::new(0)));
+ assert!(x == x);
+ assert!(!(x != x));
+ assert_eq!(*x.0.borrow(), 0);
+}
+
+// The test code below is identical to that in `rc.rs`.
+// For better maintainability we therefore define this type alias.
+type Rc<T> = Arc<T>;
+
+const SHARED_ITER_MAX: u16 = 100;
+
+fn assert_trusted_len<I: TrustedLen>(_: &I) {}
+
+#[test]
+fn shared_from_iter_normal() {
+ // Exercise the base implementation for non-`TrustedLen` iterators.
+ {
+ // `Filter` is never `TrustedLen` since we don't
+ // know statically how many elements will be kept:
+ let iter = (0..SHARED_ITER_MAX).filter(|x| x % 2 == 0).map(Box::new);
+
+ // Collecting into a `Vec<T>` or `Rc<[T]>` should make no difference:
+ let vec = iter.clone().collect::<Vec<_>>();
+ let rc = iter.collect::<Rc<[_]>>();
+ assert_eq!(&*vec, &*rc);
+
+ // Clone a bit and let these get dropped.
+ {
+ let _rc_2 = rc.clone();
+ let _rc_3 = rc.clone();
+ let _rc_4 = Rc::downgrade(&_rc_3);
+ }
+ } // Drop what hasn't been here.
+}
+
+#[test]
+fn shared_from_iter_trustedlen_normal() {
+ // Exercise the `TrustedLen` implementation under normal circumstances
+ // where `size_hint()` matches `(_, Some(exact_len))`.
+ {
+ let iter = (0..SHARED_ITER_MAX).map(Box::new);
+ assert_trusted_len(&iter);
+
+ // Collecting into a `Vec<T>` or `Rc<[T]>` should make no difference:
+ let vec = iter.clone().collect::<Vec<_>>();
+ let rc = iter.collect::<Rc<[_]>>();
+ assert_eq!(&*vec, &*rc);
+ assert_eq!(mem::size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc));
+
+ // Clone a bit and let these get dropped.
+ {
+ let _rc_2 = rc.clone();
+ let _rc_3 = rc.clone();
+ let _rc_4 = Rc::downgrade(&_rc_3);
+ }
+ } // Drop what hasn't been here.
+
+ // Try a ZST to make sure it is handled well.
+ {
+ let iter = (0..SHARED_ITER_MAX).map(drop);
+ let vec = iter.clone().collect::<Vec<_>>();
+ let rc = iter.collect::<Rc<[_]>>();
+ assert_eq!(&*vec, &*rc);
+ assert_eq!(0, mem::size_of_val(&*rc));
+ {
+ let _rc_2 = rc.clone();
+ let _rc_3 = rc.clone();
+ let _rc_4 = Rc::downgrade(&_rc_3);
+ }
+ }
+}
+
+#[test]
+#[should_panic = "I've almost got 99 problems."]
+fn shared_from_iter_trustedlen_panic() {
+ // Exercise the `TrustedLen` implementation when `size_hint()` matches
+ // `(_, Some(exact_len))` but where `.next()` drops before the last iteration.
+ let iter = (0..SHARED_ITER_MAX).map(|val| match val {
+ 98 => panic!("I've almost got 99 problems."),
+ _ => Box::new(val),
+ });
+ assert_trusted_len(&iter);
+ let _ = iter.collect::<Rc<[_]>>();
+
+ panic!("I am unreachable.");
+}
+
+#[test]
+fn shared_from_iter_trustedlen_no_fuse() {
+ // Exercise the `TrustedLen` implementation when `size_hint()` matches
+ // `(_, Some(exact_len))` but where the iterator does not behave in a fused manner.
+ struct Iter(std::vec::IntoIter<Option<Box<u8>>>);
+
+ unsafe impl TrustedLen for Iter {}
+
+ impl Iterator for Iter {
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (2, Some(2))
+ }
+
+ type Item = Box<u8>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.0.next().flatten()
+ }
+ }
+
+ let vec = vec![Some(Box::new(42)), Some(Box::new(24)), None, Some(Box::new(12))];
+ let iter = Iter(vec.into_iter());
+ assert_trusted_len(&iter);
+ assert_eq!(&[Box::new(42), Box::new(24)], &*iter.collect::<Rc<[_]>>());
+}
+
+#[test]
+fn weak_may_dangle() {
+ fn hmm<'a>(val: &'a mut Weak<&'a str>) -> Weak<&'a str> {
+ val.clone()
+ }
+
+ // Without #[may_dangle] we get:
+ let mut val = Weak::new();
+ hmm(&mut val);
+ // ~~~~~~~~ borrowed value does not live long enough
+ //
+ // `val` dropped here while still borrowed
+ // borrow might be used here, when `val` is dropped and runs the `Drop` code for type `std::sync::Weak`
+}
diff --git a/library/alloc/tests/borrow.rs b/library/alloc/tests/borrow.rs
new file mode 100644
index 000000000..57976aa6c
--- /dev/null
+++ b/library/alloc/tests/borrow.rs
@@ -0,0 +1,60 @@
+use std::borrow::{Cow, ToOwned};
+use std::ffi::{CStr, OsStr};
+use std::path::Path;
+use std::rc::Rc;
+use std::sync::Arc;
+
+macro_rules! test_from_cow {
+ ($value:ident => $($ty:ty),+) => {$(
+ let borrowed = <$ty>::from(Cow::Borrowed($value));
+ let owned = <$ty>::from(Cow::Owned($value.to_owned()));
+ assert_eq!($value, &*borrowed);
+ assert_eq!($value, &*owned);
+ )+};
+ ($value:ident : & $ty:ty) => {
+ test_from_cow!($value => Box<$ty>, Rc<$ty>, Arc<$ty>);
+ }
+}
+
+#[test]
+fn test_from_cow_slice() {
+ let slice: &[i32] = &[1, 2, 3];
+ test_from_cow!(slice: &[i32]);
+}
+
+#[test]
+fn test_from_cow_str() {
+ let string = "hello";
+ test_from_cow!(string: &str);
+}
+
+#[test]
+fn test_from_cow_c_str() {
+ let string = CStr::from_bytes_with_nul(b"hello\0").unwrap();
+ test_from_cow!(string: &CStr);
+}
+
+#[test]
+fn test_from_cow_os_str() {
+ let string = OsStr::new("hello");
+ test_from_cow!(string: &OsStr);
+}
+
+#[test]
+fn test_from_cow_path() {
+ let path = Path::new("hello");
+ test_from_cow!(path: &Path);
+}
+
+#[test]
+fn cow_const() {
+ // test that the methods of `Cow` are usable in a const context
+
+ const COW: Cow<'_, str> = Cow::Borrowed("moo");
+
+ const IS_BORROWED: bool = COW.is_borrowed();
+ assert!(IS_BORROWED);
+
+ const IS_OWNED: bool = COW.is_owned();
+ assert!(!IS_OWNED);
+}
diff --git a/library/alloc/tests/boxed.rs b/library/alloc/tests/boxed.rs
new file mode 100644
index 000000000..9e5123be9
--- /dev/null
+++ b/library/alloc/tests/boxed.rs
@@ -0,0 +1,167 @@
+use core::alloc::{AllocError, Allocator, Layout};
+use core::cell::Cell;
+use core::mem::MaybeUninit;
+use core::ptr::NonNull;
+
+#[test]
+fn uninitialized_zero_size_box() {
+ assert_eq!(
+ &*Box::<()>::new_uninit() as *const _,
+ NonNull::<MaybeUninit<()>>::dangling().as_ptr(),
+ );
+ assert_eq!(
+ Box::<[()]>::new_uninit_slice(4).as_ptr(),
+ NonNull::<MaybeUninit<()>>::dangling().as_ptr(),
+ );
+ assert_eq!(
+ Box::<[String]>::new_uninit_slice(0).as_ptr(),
+ NonNull::<MaybeUninit<String>>::dangling().as_ptr(),
+ );
+}
+
+#[derive(Clone, PartialEq, Eq, Debug)]
+struct Dummy {
+ _data: u8,
+}
+
+#[test]
+fn box_clone_and_clone_from_equivalence() {
+ for size in (0..8).map(|i| 2usize.pow(i)) {
+ let control = vec![Dummy { _data: 42 }; size].into_boxed_slice();
+ let clone = control.clone();
+ let mut copy = vec![Dummy { _data: 84 }; size].into_boxed_slice();
+ copy.clone_from(&control);
+ assert_eq!(control, clone);
+ assert_eq!(control, copy);
+ }
+}
+
+/// This test might give a false positive in case the box reallocates,
+/// but the allocator keeps the original pointer.
+///
+/// On the other hand, it won't give a false negative: If it fails, then the
+/// memory was definitely not reused.
+#[test]
+fn box_clone_from_ptr_stability() {
+ for size in (0..8).map(|i| 2usize.pow(i)) {
+ let control = vec![Dummy { _data: 42 }; size].into_boxed_slice();
+ let mut copy = vec![Dummy { _data: 84 }; size].into_boxed_slice();
+ let copy_raw = copy.as_ptr() as usize;
+ copy.clone_from(&control);
+ assert_eq!(copy.as_ptr() as usize, copy_raw);
+ }
+}
+
+#[test]
+fn box_deref_lval() {
+ let x = Box::new(Cell::new(5));
+ x.set(1000);
+ assert_eq!(x.get(), 1000);
+}
+
+pub struct ConstAllocator;
+
+unsafe impl const Allocator for ConstAllocator {
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ match layout.size() {
+ 0 => Ok(NonNull::slice_from_raw_parts(layout.dangling(), 0)),
+ _ => unsafe {
+ let ptr = core::intrinsics::const_allocate(layout.size(), layout.align());
+ Ok(NonNull::new_unchecked(ptr as *mut [u8; 0] as *mut [u8]))
+ },
+ }
+ }
+
+ unsafe fn deallocate(&self, _ptr: NonNull<u8>, layout: Layout) {
+ match layout.size() {
+ 0 => { /* do nothing */ }
+ _ => { /* do nothing too */ }
+ }
+ }
+
+ fn allocate_zeroed(&self, layout: Layout) -> Result<NonNull<[u8]>, AllocError> {
+ let ptr = self.allocate(layout)?;
+ if layout.size() > 0 {
+ unsafe {
+ ptr.as_mut_ptr().write_bytes(0, layout.size());
+ }
+ }
+ Ok(ptr)
+ }
+
+ unsafe fn grow(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() >= old_layout.size(),
+ "`new_layout.size()` must be greater than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.allocate(new_layout)?;
+ if new_layout.size() > 0 {
+ new_ptr.as_mut_ptr().copy_from_nonoverlapping(ptr.as_ptr(), old_layout.size());
+ self.deallocate(ptr, old_layout);
+ }
+ Ok(new_ptr)
+ }
+
+ unsafe fn grow_zeroed(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ let new_ptr = self.grow(ptr, old_layout, new_layout)?;
+ if new_layout.size() > 0 {
+ let old_size = old_layout.size();
+ let new_size = new_layout.size();
+ let raw_ptr = new_ptr.as_mut_ptr();
+ raw_ptr.add(old_size).write_bytes(0, new_size - old_size);
+ }
+ Ok(new_ptr)
+ }
+
+ unsafe fn shrink(
+ &self,
+ ptr: NonNull<u8>,
+ old_layout: Layout,
+ new_layout: Layout,
+ ) -> Result<NonNull<[u8]>, AllocError> {
+ debug_assert!(
+ new_layout.size() <= old_layout.size(),
+ "`new_layout.size()` must be smaller than or equal to `old_layout.size()`"
+ );
+
+ let new_ptr = self.allocate(new_layout)?;
+ if new_layout.size() > 0 {
+ new_ptr.as_mut_ptr().copy_from_nonoverlapping(ptr.as_ptr(), new_layout.size());
+ self.deallocate(ptr, old_layout);
+ }
+ Ok(new_ptr)
+ }
+
+ fn by_ref(&self) -> &Self
+ where
+ Self: Sized,
+ {
+ self
+ }
+}
+
+#[test]
+fn const_box() {
+ const VALUE: u32 = {
+ let mut boxed = Box::new_in(1u32, ConstAllocator);
+ assert!(*boxed == 1);
+
+ *boxed = 42;
+ assert!(*boxed == 42);
+
+ *Box::leak(boxed)
+ };
+
+ assert!(VALUE == 42);
+}
diff --git a/library/alloc/tests/btree_set_hash.rs b/library/alloc/tests/btree_set_hash.rs
new file mode 100644
index 000000000..ab275ac43
--- /dev/null
+++ b/library/alloc/tests/btree_set_hash.rs
@@ -0,0 +1,29 @@
+use crate::hash;
+use std::collections::BTreeSet;
+
+#[test]
+fn test_hash() {
+ let mut x = BTreeSet::new();
+ let mut y = BTreeSet::new();
+
+ x.insert(1);
+ x.insert(2);
+ x.insert(3);
+
+ y.insert(3);
+ y.insert(2);
+ y.insert(1);
+
+ assert_eq!(hash(&x), hash(&y));
+}
+
+#[test]
+fn test_prefix_free() {
+ let x = BTreeSet::from([1, 2, 3]);
+ let y = BTreeSet::<i32>::new();
+
+ // If hashed by iteration alone, `(x, y)` and `(y, x)` would visit the same
+ // order of elements, resulting in the same hash. But now that we also hash
+ // the length, they get distinct sequences of hashed data.
+ assert_ne!(hash(&(&x, &y)), hash(&(&y, &x)));
+}
diff --git a/library/alloc/tests/c_str.rs b/library/alloc/tests/c_str.rs
new file mode 100644
index 000000000..4a5817939
--- /dev/null
+++ b/library/alloc/tests/c_str.rs
@@ -0,0 +1,19 @@
+use std::borrow::Cow::{Borrowed, Owned};
+use std::ffi::CStr;
+use std::os::raw::c_char;
+
+#[test]
+fn to_str() {
+ let data = b"123\xE2\x80\xA6\0";
+ let ptr = data.as_ptr() as *const c_char;
+ unsafe {
+ assert_eq!(CStr::from_ptr(ptr).to_str(), Ok("123…"));
+ assert_eq!(CStr::from_ptr(ptr).to_string_lossy(), Borrowed("123…"));
+ }
+ let data = b"123\xE2\0";
+ let ptr = data.as_ptr() as *const c_char;
+ unsafe {
+ assert!(CStr::from_ptr(ptr).to_str().is_err());
+ assert_eq!(CStr::from_ptr(ptr).to_string_lossy(), Owned::<str>(format!("123\u{FFFD}")));
+ }
+}
diff --git a/library/alloc/tests/const_fns.rs b/library/alloc/tests/const_fns.rs
new file mode 100644
index 000000000..49b837bec
--- /dev/null
+++ b/library/alloc/tests/const_fns.rs
@@ -0,0 +1,35 @@
+// Test const functions in the library
+
+pub const MY_VEC: Vec<usize> = Vec::new();
+pub const MY_VEC2: Vec<usize> = Default::default();
+
+pub const MY_STRING: String = String::new();
+pub const MY_STRING2: String = Default::default();
+
+pub const MY_BOXED_SLICE: Box<[usize]> = Default::default();
+pub const MY_BOXED_STR: Box<str> = Default::default();
+
+use std::collections::{BTreeMap, BTreeSet};
+
+pub const MY_BTREEMAP: BTreeMap<usize, usize> = BTreeMap::new();
+pub const MAP: &'static BTreeMap<usize, usize> = &MY_BTREEMAP;
+pub const MAP_LEN: usize = MAP.len();
+pub const MAP_IS_EMPTY: bool = MAP.is_empty();
+
+pub const MY_BTREESET: BTreeSet<usize> = BTreeSet::new();
+pub const SET: &'static BTreeSet<usize> = &MY_BTREESET;
+pub const SET_LEN: usize = SET.len();
+pub const SET_IS_EMPTY: bool = SET.is_empty();
+
+#[test]
+fn test_const() {
+ assert_eq!(MY_VEC, MY_VEC2);
+ assert_eq!(MY_STRING, MY_STRING2);
+
+ assert_eq!(MY_VEC, *MY_BOXED_SLICE);
+ assert_eq!(MY_STRING, *MY_BOXED_STR);
+
+ assert_eq!(MAP_LEN, 0);
+ assert_eq!(SET_LEN, 0);
+ assert!(MAP_IS_EMPTY && SET_IS_EMPTY);
+}
diff --git a/library/alloc/tests/cow_str.rs b/library/alloc/tests/cow_str.rs
new file mode 100644
index 000000000..62a5c245a
--- /dev/null
+++ b/library/alloc/tests/cow_str.rs
@@ -0,0 +1,144 @@
+use std::borrow::Cow;
+
+// check that Cow<'a, str> implements addition
+#[test]
+fn check_cow_add_cow() {
+ let borrowed1 = Cow::Borrowed("Hello, ");
+ let borrowed2 = Cow::Borrowed("World!");
+ let borrow_empty = Cow::Borrowed("");
+
+ let owned1: Cow<'_, str> = Cow::Owned(String::from("Hi, "));
+ let owned2: Cow<'_, str> = Cow::Owned(String::from("Rustaceans!"));
+ let owned_empty: Cow<'_, str> = Cow::Owned(String::new());
+
+ assert_eq!("Hello, World!", borrowed1.clone() + borrowed2.clone());
+ assert_eq!("Hello, Rustaceans!", borrowed1.clone() + owned2.clone());
+
+ assert_eq!("Hi, World!", owned1.clone() + borrowed2.clone());
+ assert_eq!("Hi, Rustaceans!", owned1.clone() + owned2.clone());
+
+ if let Cow::Owned(_) = borrowed1.clone() + borrow_empty.clone() {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ if let Cow::Owned(_) = borrow_empty.clone() + borrowed1.clone() {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ if let Cow::Owned(_) = borrowed1.clone() + owned_empty.clone() {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ if let Cow::Owned(_) = owned_empty.clone() + borrowed1.clone() {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+}
+
+#[test]
+fn check_cow_add_str() {
+ let borrowed = Cow::Borrowed("Hello, ");
+ let borrow_empty = Cow::Borrowed("");
+
+ let owned: Cow<'_, str> = Cow::Owned(String::from("Hi, "));
+ let owned_empty: Cow<'_, str> = Cow::Owned(String::new());
+
+ assert_eq!("Hello, World!", borrowed.clone() + "World!");
+
+ assert_eq!("Hi, World!", owned.clone() + "World!");
+
+ if let Cow::Owned(_) = borrowed.clone() + "" {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ if let Cow::Owned(_) = borrow_empty.clone() + "Hello, " {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ if let Cow::Owned(_) = owned_empty.clone() + "Hello, " {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+}
+
+#[test]
+fn check_cow_add_assign_cow() {
+ let mut borrowed1 = Cow::Borrowed("Hello, ");
+ let borrowed2 = Cow::Borrowed("World!");
+ let borrow_empty = Cow::Borrowed("");
+
+ let mut owned1: Cow<'_, str> = Cow::Owned(String::from("Hi, "));
+ let owned2: Cow<'_, str> = Cow::Owned(String::from("Rustaceans!"));
+ let owned_empty: Cow<'_, str> = Cow::Owned(String::new());
+
+ let mut s = borrowed1.clone();
+ s += borrow_empty.clone();
+ assert_eq!("Hello, ", s);
+ if let Cow::Owned(_) = s {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ let mut s = borrow_empty.clone();
+ s += borrowed1.clone();
+ assert_eq!("Hello, ", s);
+ if let Cow::Owned(_) = s {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ let mut s = borrowed1.clone();
+ s += owned_empty.clone();
+ assert_eq!("Hello, ", s);
+ if let Cow::Owned(_) = s {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ let mut s = owned_empty.clone();
+ s += borrowed1.clone();
+ assert_eq!("Hello, ", s);
+ if let Cow::Owned(_) = s {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+
+ owned1 += borrowed2;
+ borrowed1 += owned2;
+
+ assert_eq!("Hi, World!", owned1);
+ assert_eq!("Hello, Rustaceans!", borrowed1);
+}
+
+#[test]
+fn check_cow_add_assign_str() {
+ let mut borrowed = Cow::Borrowed("Hello, ");
+ let borrow_empty = Cow::Borrowed("");
+
+ let mut owned: Cow<'_, str> = Cow::Owned(String::from("Hi, "));
+ let owned_empty: Cow<'_, str> = Cow::Owned(String::new());
+
+ let mut s = borrowed.clone();
+ s += "";
+ assert_eq!("Hello, ", s);
+ if let Cow::Owned(_) = s {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ let mut s = borrow_empty.clone();
+ s += "World!";
+ assert_eq!("World!", s);
+ if let Cow::Owned(_) = s {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+ let mut s = owned_empty.clone();
+ s += "World!";
+ assert_eq!("World!", s);
+ if let Cow::Owned(_) = s {
+ panic!("Adding empty strings to a borrow should note allocate");
+ }
+
+ owned += "World!";
+ borrowed += "World!";
+
+ assert_eq!("Hi, World!", owned);
+ assert_eq!("Hello, World!", borrowed);
+}
+
+#[test]
+fn check_cow_clone_from() {
+ let mut c1: Cow<'_, str> = Cow::Owned(String::with_capacity(25));
+ let s: String = "hi".to_string();
+ assert!(s.capacity() < 25);
+ let c2: Cow<'_, str> = Cow::Owned(s);
+ c1.clone_from(&c2);
+ assert!(c1.into_owned().capacity() >= 25);
+ let mut c3: Cow<'_, str> = Cow::Borrowed("bye");
+ c3.clone_from(&c2);
+ assert_eq!(c2, c3);
+}
diff --git a/library/alloc/tests/fmt.rs b/library/alloc/tests/fmt.rs
new file mode 100644
index 000000000..5ee6db43f
--- /dev/null
+++ b/library/alloc/tests/fmt.rs
@@ -0,0 +1,323 @@
+#![deny(warnings)]
+
+use std::cell::RefCell;
+use std::fmt::{self, Write};
+
+#[test]
+fn test_format() {
+ let s = fmt::format(format_args!("Hello, {}!", "world"));
+ assert_eq!(s, "Hello, world!");
+}
+
+struct A;
+struct B;
+struct C;
+struct D;
+
+impl fmt::LowerHex for A {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("aloha")
+ }
+}
+impl fmt::UpperHex for B {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("adios")
+ }
+}
+impl fmt::Display for C {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.pad_integral(true, "☃", "123")
+ }
+}
+impl fmt::Binary for D {
+ fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
+ f.write_str("aa")?;
+ f.write_char('☃')?;
+ f.write_str("bb")
+ }
+}
+
+macro_rules! t {
+ ($a:expr, $b:expr) => {
+ assert_eq!($a, $b)
+ };
+}
+
+#[test]
+fn test_format_macro_interface() {
+ // Various edge cases without formats
+ t!(format!(""), "");
+ t!(format!("hello"), "hello");
+ t!(format!("hello {{"), "hello {");
+
+ // default formatters should work
+ t!(format!("{}", 1.0f32), "1");
+ t!(format!("{}", 1.0f64), "1");
+ t!(format!("{}", "a"), "a");
+ t!(format!("{}", "a".to_string()), "a");
+ t!(format!("{}", false), "false");
+ t!(format!("{}", 'a'), "a");
+
+ // At least exercise all the formats
+ t!(format!("{}", true), "true");
+ t!(format!("{}", '☃'), "☃");
+ t!(format!("{}", 10), "10");
+ t!(format!("{}", 10_usize), "10");
+ t!(format!("{:?}", '☃'), "'☃'");
+ t!(format!("{:?}", 10), "10");
+ t!(format!("{:?}", 10_usize), "10");
+ t!(format!("{:?}", "true"), "\"true\"");
+ t!(format!("{:?}", "foo\nbar"), "\"foo\\nbar\"");
+ t!(format!("{:?}", "foo\n\"bar\"\r\n\'baz\'\t\\qux\\"), r#""foo\n\"bar\"\r\n'baz'\t\\qux\\""#);
+ t!(format!("{:?}", "foo\0bar\x01baz\u{7f}q\u{75}x"), r#""foo\0bar\u{1}baz\u{7f}qux""#);
+ t!(format!("{:o}", 10_usize), "12");
+ t!(format!("{:x}", 10_usize), "a");
+ t!(format!("{:X}", 10_usize), "A");
+ t!(format!("{}", "foo"), "foo");
+ t!(format!("{}", "foo".to_string()), "foo");
+ if cfg!(target_pointer_width = "32") {
+ t!(format!("{:#p}", 0x1234 as *const isize), "0x00001234");
+ t!(format!("{:#p}", 0x1234 as *mut isize), "0x00001234");
+ } else {
+ t!(format!("{:#p}", 0x1234 as *const isize), "0x0000000000001234");
+ t!(format!("{:#p}", 0x1234 as *mut isize), "0x0000000000001234");
+ }
+ t!(format!("{:p}", 0x1234 as *const isize), "0x1234");
+ t!(format!("{:p}", 0x1234 as *mut isize), "0x1234");
+ t!(format!("{A:x}"), "aloha");
+ t!(format!("{B:X}"), "adios");
+ t!(format!("foo {} ☃☃☃☃☃☃", "bar"), "foo bar ☃☃☃☃☃☃");
+ t!(format!("{1} {0}", 0, 1), "1 0");
+ t!(format!("{foo} {bar}", foo = 0, bar = 1), "0 1");
+ t!(format!("{foo} {1} {bar} {0}", 0, 1, foo = 2, bar = 3), "2 1 3 0");
+ t!(format!("{} {0}", "a"), "a a");
+ t!(format!("{_foo}", _foo = 6usize), "6");
+ t!(format!("{foo_bar}", foo_bar = 1), "1");
+ t!(format!("{}", 5 + 5), "10");
+ t!(format!("{C:#4}"), "☃123");
+ t!(format!("{D:b}"), "aa☃bb");
+
+ let a: &dyn fmt::Debug = &1;
+ t!(format!("{a:?}"), "1");
+
+ // Formatting strings and their arguments
+ t!(format!("{}", "a"), "a");
+ t!(format!("{:4}", "a"), "a ");
+ t!(format!("{:4}", "☃"), "☃ ");
+ t!(format!("{:>4}", "a"), " a");
+ t!(format!("{:<4}", "a"), "a ");
+ t!(format!("{:^5}", "a"), " a ");
+ t!(format!("{:^5}", "aa"), " aa ");
+ t!(format!("{:^4}", "a"), " a ");
+ t!(format!("{:^4}", "aa"), " aa ");
+ t!(format!("{:.4}", "a"), "a");
+ t!(format!("{:4.4}", "a"), "a ");
+ t!(format!("{:4.4}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
+ t!(format!("{:<4.4}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
+ t!(format!("{:>4.4}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
+ t!(format!("{:^4.4}", "aaaaaaaaaaaaaaaaaa"), "aaaa");
+ t!(format!("{:>10.4}", "aaaaaaaaaaaaaaaaaa"), " aaaa");
+ t!(format!("{:2.4}", "aaaaa"), "aaaa");
+ t!(format!("{:2.4}", "aaaa"), "aaaa");
+ t!(format!("{:2.4}", "aaa"), "aaa");
+ t!(format!("{:2.4}", "aa"), "aa");
+ t!(format!("{:2.4}", "a"), "a ");
+ t!(format!("{:0>2}", "a"), "0a");
+ t!(format!("{:.*}", 4, "aaaaaaaaaaaaaaaaaa"), "aaaa");
+ t!(format!("{:.1$}", "aaaaaaaaaaaaaaaaaa", 4), "aaaa");
+ t!(format!("{:.a$}", "aaaaaaaaaaaaaaaaaa", a = 4), "aaaa");
+ t!(format!("{:._a$}", "aaaaaaaaaaaaaaaaaa", _a = 4), "aaaa");
+ t!(format!("{:1$}", "a", 4), "a ");
+ t!(format!("{1:0$}", 4, "a"), "a ");
+ t!(format!("{:a$}", "a", a = 4), "a ");
+ t!(format!("{:-#}", "a"), "a");
+ t!(format!("{:+#}", "a"), "a");
+ t!(format!("{:/^10.8}", "1234567890"), "/12345678/");
+
+ // Some float stuff
+ t!(format!("{:}", 1.0f32), "1");
+ t!(format!("{:}", 1.0f64), "1");
+ t!(format!("{:.3}", 1.0f64), "1.000");
+ t!(format!("{:10.3}", 1.0f64), " 1.000");
+ t!(format!("{:+10.3}", 1.0f64), " +1.000");
+ t!(format!("{:+10.3}", -1.0f64), " -1.000");
+
+ t!(format!("{:e}", 1.2345e6f32), "1.2345e6");
+ t!(format!("{:e}", 1.2345e6f64), "1.2345e6");
+ t!(format!("{:E}", 1.2345e6f64), "1.2345E6");
+ t!(format!("{:.3e}", 1.2345e6f64), "1.234e6");
+ t!(format!("{:10.3e}", 1.2345e6f64), " 1.234e6");
+ t!(format!("{:+10.3e}", 1.2345e6f64), " +1.234e6");
+ t!(format!("{:+10.3e}", -1.2345e6f64), " -1.234e6");
+
+ // Float edge cases
+ t!(format!("{}", -0.0), "-0");
+ t!(format!("{:?}", 0.0), "0.0");
+
+ // sign aware zero padding
+ t!(format!("{:<3}", 1), "1 ");
+ t!(format!("{:>3}", 1), " 1");
+ t!(format!("{:^3}", 1), " 1 ");
+ t!(format!("{:03}", 1), "001");
+ t!(format!("{:<03}", 1), "001");
+ t!(format!("{:>03}", 1), "001");
+ t!(format!("{:^03}", 1), "001");
+ t!(format!("{:+03}", 1), "+01");
+ t!(format!("{:<+03}", 1), "+01");
+ t!(format!("{:>+03}", 1), "+01");
+ t!(format!("{:^+03}", 1), "+01");
+ t!(format!("{:#05x}", 1), "0x001");
+ t!(format!("{:<#05x}", 1), "0x001");
+ t!(format!("{:>#05x}", 1), "0x001");
+ t!(format!("{:^#05x}", 1), "0x001");
+ t!(format!("{:05}", 1.2), "001.2");
+ t!(format!("{:<05}", 1.2), "001.2");
+ t!(format!("{:>05}", 1.2), "001.2");
+ t!(format!("{:^05}", 1.2), "001.2");
+ t!(format!("{:05}", -1.2), "-01.2");
+ t!(format!("{:<05}", -1.2), "-01.2");
+ t!(format!("{:>05}", -1.2), "-01.2");
+ t!(format!("{:^05}", -1.2), "-01.2");
+ t!(format!("{:+05}", 1.2), "+01.2");
+ t!(format!("{:<+05}", 1.2), "+01.2");
+ t!(format!("{:>+05}", 1.2), "+01.2");
+ t!(format!("{:^+05}", 1.2), "+01.2");
+
+ // Ergonomic format_args!
+ t!(format!("{0:x} {0:X}", 15), "f F");
+ t!(format!("{0:x} {0:X} {}", 15), "f F 15");
+ t!(format!("{:x}{0:X}{a:x}{:X}{1:x}{a:X}", 13, 14, a = 15), "dDfEeF");
+ t!(format!("{a:x} {a:X}", a = 15), "f F");
+
+ // And its edge cases
+ t!(
+ format!(
+ "{a:.0$} {b:.0$} {0:.0$}\n{a:.c$} {b:.c$} {c:.c$}",
+ 4,
+ a = "abcdefg",
+ b = "hijklmn",
+ c = 3
+ ),
+ "abcd hijk 4\nabc hij 3"
+ );
+ t!(format!("{a:.*} {0} {:.*}", 4, 3, "efgh", a = "abcdef"), "abcd 4 efg");
+ t!(format!("{:.a$} {a} {a:#x}", "aaaaaa", a = 2), "aa 2 0x2");
+
+ // Test that pointers don't get truncated.
+ {
+ let val = usize::MAX;
+ let exp = format!("{val:#x}");
+ t!(format!("{:p}", std::ptr::invalid::<isize>(val)), exp);
+ }
+
+ // Escaping
+ t!(format!("{{"), "{");
+ t!(format!("}}"), "}");
+
+ // make sure that format! doesn't move out of local variables
+ let a = Box::new(3);
+ format!("{a}");
+ format!("{a}");
+
+ // make sure that format! doesn't cause spurious unused-unsafe warnings when
+ // it's inside of an outer unsafe block
+ unsafe {
+ let a: isize = ::std::mem::transmute(3_usize);
+ format!("{a}");
+ }
+
+ // test that trailing commas are acceptable
+ format!("{}", "test",);
+ format!("{foo}", foo = "test",);
+}
+
+// Basic test to make sure that we can invoke the `write!` macro with an
+// fmt::Write instance.
+#[test]
+fn test_write() {
+ let mut buf = String::new();
+ let _ = write!(&mut buf, "{}", 3);
+ {
+ let w = &mut buf;
+ let _ = write!(w, "{foo}", foo = 4);
+ let _ = write!(w, "{}", "hello");
+ let _ = writeln!(w, "{}", "line");
+ let _ = writeln!(w, "{foo}", foo = "bar");
+ let _ = w.write_char('☃');
+ let _ = w.write_str("str");
+ }
+
+ t!(buf, "34helloline\nbar\n☃str");
+}
+
+// Just make sure that the macros are defined, there's not really a lot that we
+// can do with them just yet (to test the output)
+#[test]
+fn test_print() {
+ print!("hi");
+ print!("{:?}", vec![0u8]);
+ println!("hello");
+ println!("this is a {}", "test");
+ println!("{foo}", foo = "bar");
+}
+
+// Just make sure that the macros are defined, there's not really a lot that we
+// can do with them just yet (to test the output)
+#[test]
+fn test_format_args() {
+ let mut buf = String::new();
+ {
+ let w = &mut buf;
+ let _ = write!(w, "{}", format_args!("{}", 1));
+ let _ = write!(w, "{}", format_args!("test"));
+ let _ = write!(w, "{}", format_args!("{test}", test = 3));
+ }
+ let s = buf;
+ t!(s, "1test3");
+
+ let s = fmt::format(format_args!("hello {}", "world"));
+ t!(s, "hello world");
+ let s = format!("{}: {}", "args were", format_args!("hello {}", "world"));
+ t!(s, "args were: hello world");
+}
+
+#[test]
+fn test_order() {
+ // Make sure format!() arguments are always evaluated in a left-to-right
+ // ordering
+ fn foo() -> isize {
+ static mut FOO: isize = 0;
+ unsafe {
+ FOO += 1;
+ FOO
+ }
+ }
+ assert_eq!(
+ format!("{} {} {a} {b} {} {c}", foo(), foo(), foo(), a = foo(), b = foo(), c = foo()),
+ "1 2 4 5 3 6".to_string()
+ );
+}
+
+#[test]
+fn test_once() {
+ // Make sure each argument are evaluated only once even though it may be
+ // formatted multiple times
+ fn foo() -> isize {
+ static mut FOO: isize = 0;
+ unsafe {
+ FOO += 1;
+ FOO
+ }
+ }
+ assert_eq!(format!("{0} {0} {0} {a} {a} {a}", foo(), a = foo()), "1 1 1 2 2 2".to_string());
+}
+
+#[test]
+fn test_refcell() {
+ let refcell = RefCell::new(5);
+ assert_eq!(format!("{refcell:?}"), "RefCell { value: 5 }");
+ let borrow = refcell.borrow_mut();
+ assert_eq!(format!("{refcell:?}"), "RefCell { value: <borrowed> }");
+ drop(borrow);
+ assert_eq!(format!("{refcell:?}"), "RefCell { value: 5 }");
+}
diff --git a/library/alloc/tests/heap.rs b/library/alloc/tests/heap.rs
new file mode 100644
index 000000000..246b341ee
--- /dev/null
+++ b/library/alloc/tests/heap.rs
@@ -0,0 +1,44 @@
+use std::alloc::{Allocator, Global, Layout, System};
+
+/// Issue #45955 and #62251.
+#[test]
+fn alloc_system_overaligned_request() {
+ check_overalign_requests(System)
+}
+
+#[test]
+fn std_heap_overaligned_request() {
+ check_overalign_requests(Global)
+}
+
+fn check_overalign_requests<T: Allocator>(allocator: T) {
+ for &align in &[4, 8, 16, 32] {
+ // less than and bigger than `MIN_ALIGN`
+ for &size in &[align / 2, align - 1] {
+ // size less than alignment
+ let iterations = 128;
+ unsafe {
+ let pointers: Vec<_> = (0..iterations)
+ .map(|_| {
+ allocator.allocate(Layout::from_size_align(size, align).unwrap()).unwrap()
+ })
+ .collect();
+ for &ptr in &pointers {
+ assert_eq!(
+ (ptr.as_non_null_ptr().as_ptr() as usize) % align,
+ 0,
+ "Got a pointer less aligned than requested"
+ )
+ }
+
+ // Clean up
+ for &ptr in &pointers {
+ allocator.deallocate(
+ ptr.as_non_null_ptr(),
+ Layout::from_size_align(size, align).unwrap(),
+ )
+ }
+ }
+ }
+ }
+}
diff --git a/library/alloc/tests/lib.rs b/library/alloc/tests/lib.rs
new file mode 100644
index 000000000..d83cd29dd
--- /dev/null
+++ b/library/alloc/tests/lib.rs
@@ -0,0 +1,88 @@
+#![feature(allocator_api)]
+#![feature(alloc_layout_extra)]
+#![feature(assert_matches)]
+#![feature(box_syntax)]
+#![feature(cow_is_borrowed)]
+#![feature(const_box)]
+#![feature(const_convert)]
+#![feature(const_cow_is_borrowed)]
+#![feature(const_heap)]
+#![feature(const_mut_refs)]
+#![feature(const_nonnull_slice_from_raw_parts)]
+#![feature(const_ptr_write)]
+#![feature(const_try)]
+#![feature(core_intrinsics)]
+#![feature(drain_filter)]
+#![feature(exact_size_is_empty)]
+#![feature(new_uninit)]
+#![feature(pattern)]
+#![feature(trusted_len)]
+#![feature(try_reserve_kind)]
+#![feature(unboxed_closures)]
+#![feature(associated_type_bounds)]
+#![feature(binary_heap_into_iter_sorted)]
+#![feature(binary_heap_drain_sorted)]
+#![feature(slice_ptr_get)]
+#![feature(binary_heap_retain)]
+#![feature(binary_heap_as_slice)]
+#![feature(inplace_iteration)]
+#![feature(iter_advance_by)]
+#![feature(iter_next_chunk)]
+#![feature(round_char_boundary)]
+#![feature(slice_group_by)]
+#![feature(slice_partition_dedup)]
+#![feature(string_remove_matches)]
+#![feature(const_btree_new)]
+#![feature(const_default_impls)]
+#![feature(const_trait_impl)]
+#![feature(const_str_from_utf8)]
+#![feature(nonnull_slice_from_raw_parts)]
+#![feature(panic_update_hook)]
+#![feature(slice_flatten)]
+#![feature(thin_box)]
+#![feature(bench_black_box)]
+#![feature(strict_provenance)]
+#![feature(once_cell)]
+
+use std::collections::hash_map::DefaultHasher;
+use std::hash::{Hash, Hasher};
+
+mod arc;
+mod borrow;
+mod boxed;
+mod btree_set_hash;
+mod c_str;
+mod const_fns;
+mod cow_str;
+mod fmt;
+mod heap;
+mod linked_list;
+mod rc;
+mod slice;
+mod str;
+mod string;
+mod thin_box;
+mod vec;
+mod vec_deque;
+
+fn hash<T: Hash>(t: &T) -> u64 {
+ let mut s = DefaultHasher::new();
+ t.hash(&mut s);
+ s.finish()
+}
+
+// FIXME: Instantiated functions with i128 in the signature is not supported in Emscripten.
+// See https://github.com/kripken/emscripten-fastcomp/issues/169
+#[cfg(not(target_os = "emscripten"))]
+#[test]
+fn test_boxed_hasher() {
+ let ordinary_hash = hash(&5u32);
+
+ let mut hasher_1 = Box::new(DefaultHasher::new());
+ 5u32.hash(&mut hasher_1);
+ assert_eq!(ordinary_hash, hasher_1.finish());
+
+ let mut hasher_2 = Box::new(DefaultHasher::new()) as Box<dyn Hasher>;
+ 5u32.hash(&mut hasher_2);
+ assert_eq!(ordinary_hash, hasher_2.finish());
+}
diff --git a/library/alloc/tests/linked_list.rs b/library/alloc/tests/linked_list.rs
new file mode 100644
index 000000000..65b09cb00
--- /dev/null
+++ b/library/alloc/tests/linked_list.rs
@@ -0,0 +1,21 @@
+use std::collections::LinkedList;
+
+#[test]
+fn test_hash() {
+ use crate::hash;
+
+ let mut x = LinkedList::new();
+ let mut y = LinkedList::new();
+
+ assert!(hash(&x) == hash(&y));
+
+ x.push_back(1);
+ x.push_back(2);
+ x.push_back(3);
+
+ y.push_front(3);
+ y.push_front(2);
+ y.push_front(1);
+
+ assert!(hash(&x) == hash(&y));
+}
diff --git a/library/alloc/tests/rc.rs b/library/alloc/tests/rc.rs
new file mode 100644
index 000000000..efb39a609
--- /dev/null
+++ b/library/alloc/tests/rc.rs
@@ -0,0 +1,208 @@
+use std::any::Any;
+use std::cell::RefCell;
+use std::cmp::PartialEq;
+use std::iter::TrustedLen;
+use std::mem;
+use std::rc::{Rc, Weak};
+
+#[test]
+fn uninhabited() {
+ enum Void {}
+ let mut a = Weak::<Void>::new();
+ a = a.clone();
+ assert!(a.upgrade().is_none());
+
+ let mut a: Weak<dyn Any> = a; // Unsizing
+ a = a.clone();
+ assert!(a.upgrade().is_none());
+}
+
+#[test]
+fn slice() {
+ let a: Rc<[u32; 3]> = Rc::new([3, 2, 1]);
+ let a: Rc<[u32]> = a; // Unsizing
+ let b: Rc<[u32]> = Rc::from(&[3, 2, 1][..]); // Conversion
+ assert_eq!(a, b);
+
+ // Exercise is_dangling() with a DST
+ let mut a = Rc::downgrade(&a);
+ a = a.clone();
+ assert!(a.upgrade().is_some());
+}
+
+#[test]
+fn trait_object() {
+ let a: Rc<u32> = Rc::new(4);
+ let a: Rc<dyn Any> = a; // Unsizing
+
+ // Exercise is_dangling() with a DST
+ let mut a = Rc::downgrade(&a);
+ a = a.clone();
+ assert!(a.upgrade().is_some());
+
+ let mut b = Weak::<u32>::new();
+ b = b.clone();
+ assert!(b.upgrade().is_none());
+ let mut b: Weak<dyn Any> = b; // Unsizing
+ b = b.clone();
+ assert!(b.upgrade().is_none());
+}
+
+#[test]
+fn float_nan_ne() {
+ let x = Rc::new(f32::NAN);
+ assert!(x != x);
+ assert!(!(x == x));
+}
+
+#[test]
+fn partial_eq() {
+ struct TestPEq(RefCell<usize>);
+ impl PartialEq for TestPEq {
+ fn eq(&self, other: &TestPEq) -> bool {
+ *self.0.borrow_mut() += 1;
+ *other.0.borrow_mut() += 1;
+ true
+ }
+ }
+ let x = Rc::new(TestPEq(RefCell::new(0)));
+ assert!(x == x);
+ assert!(!(x != x));
+ assert_eq!(*x.0.borrow(), 4);
+}
+
+#[test]
+fn eq() {
+ #[derive(Eq)]
+ struct TestEq(RefCell<usize>);
+ impl PartialEq for TestEq {
+ fn eq(&self, other: &TestEq) -> bool {
+ *self.0.borrow_mut() += 1;
+ *other.0.borrow_mut() += 1;
+ true
+ }
+ }
+ let x = Rc::new(TestEq(RefCell::new(0)));
+ assert!(x == x);
+ assert!(!(x != x));
+ assert_eq!(*x.0.borrow(), 0);
+}
+
+const SHARED_ITER_MAX: u16 = 100;
+
+fn assert_trusted_len<I: TrustedLen>(_: &I) {}
+
+#[test]
+fn shared_from_iter_normal() {
+ // Exercise the base implementation for non-`TrustedLen` iterators.
+ {
+ // `Filter` is never `TrustedLen` since we don't
+ // know statically how many elements will be kept:
+ let iter = (0..SHARED_ITER_MAX).filter(|x| x % 2 == 0).map(Box::new);
+
+ // Collecting into a `Vec<T>` or `Rc<[T]>` should make no difference:
+ let vec = iter.clone().collect::<Vec<_>>();
+ let rc = iter.collect::<Rc<[_]>>();
+ assert_eq!(&*vec, &*rc);
+
+ // Clone a bit and let these get dropped.
+ {
+ let _rc_2 = rc.clone();
+ let _rc_3 = rc.clone();
+ let _rc_4 = Rc::downgrade(&_rc_3);
+ }
+ } // Drop what hasn't been here.
+}
+
+#[test]
+fn shared_from_iter_trustedlen_normal() {
+ // Exercise the `TrustedLen` implementation under normal circumstances
+ // where `size_hint()` matches `(_, Some(exact_len))`.
+ {
+ let iter = (0..SHARED_ITER_MAX).map(Box::new);
+ assert_trusted_len(&iter);
+
+ // Collecting into a `Vec<T>` or `Rc<[T]>` should make no difference:
+ let vec = iter.clone().collect::<Vec<_>>();
+ let rc = iter.collect::<Rc<[_]>>();
+ assert_eq!(&*vec, &*rc);
+ assert_eq!(mem::size_of::<Box<u16>>() * SHARED_ITER_MAX as usize, mem::size_of_val(&*rc));
+
+ // Clone a bit and let these get dropped.
+ {
+ let _rc_2 = rc.clone();
+ let _rc_3 = rc.clone();
+ let _rc_4 = Rc::downgrade(&_rc_3);
+ }
+ } // Drop what hasn't been here.
+
+ // Try a ZST to make sure it is handled well.
+ {
+ let iter = (0..SHARED_ITER_MAX).map(drop);
+ let vec = iter.clone().collect::<Vec<_>>();
+ let rc = iter.collect::<Rc<[_]>>();
+ assert_eq!(&*vec, &*rc);
+ assert_eq!(0, mem::size_of_val(&*rc));
+ {
+ let _rc_2 = rc.clone();
+ let _rc_3 = rc.clone();
+ let _rc_4 = Rc::downgrade(&_rc_3);
+ }
+ }
+}
+
+#[test]
+#[should_panic = "I've almost got 99 problems."]
+fn shared_from_iter_trustedlen_panic() {
+ // Exercise the `TrustedLen` implementation when `size_hint()` matches
+ // `(_, Some(exact_len))` but where `.next()` drops before the last iteration.
+ let iter = (0..SHARED_ITER_MAX).map(|val| match val {
+ 98 => panic!("I've almost got 99 problems."),
+ _ => Box::new(val),
+ });
+ assert_trusted_len(&iter);
+ let _ = iter.collect::<Rc<[_]>>();
+
+ panic!("I am unreachable.");
+}
+
+#[test]
+fn shared_from_iter_trustedlen_no_fuse() {
+ // Exercise the `TrustedLen` implementation when `size_hint()` matches
+ // `(_, Some(exact_len))` but where the iterator does not behave in a fused manner.
+ struct Iter(std::vec::IntoIter<Option<Box<u8>>>);
+
+ unsafe impl TrustedLen for Iter {}
+
+ impl Iterator for Iter {
+ fn size_hint(&self) -> (usize, Option<usize>) {
+ (2, Some(2))
+ }
+
+ type Item = Box<u8>;
+
+ fn next(&mut self) -> Option<Self::Item> {
+ self.0.next().flatten()
+ }
+ }
+
+ let vec = vec![Some(Box::new(42)), Some(Box::new(24)), None, Some(Box::new(12))];
+ let iter = Iter(vec.into_iter());
+ assert_trusted_len(&iter);
+ assert_eq!(&[Box::new(42), Box::new(24)], &*iter.collect::<Rc<[_]>>());
+}
+
+#[test]
+fn weak_may_dangle() {
+ fn hmm<'a>(val: &'a mut Weak<&'a str>) -> Weak<&'a str> {
+ val.clone()
+ }
+
+ // Without #[may_dangle] we get:
+ let mut val = Weak::new();
+ hmm(&mut val);
+ // ~~~~~~~~ borrowed value does not live long enough
+ //
+ // `val` dropped here while still borrowed
+ // borrow might be used here, when `val` is dropped and runs the `Drop` code for type `std::rc::Weak`
+}
diff --git a/library/alloc/tests/slice.rs b/library/alloc/tests/slice.rs
new file mode 100644
index 000000000..21f894343
--- /dev/null
+++ b/library/alloc/tests/slice.rs
@@ -0,0 +1,2018 @@
+use std::cell::Cell;
+use std::cmp::Ordering::{self, Equal, Greater, Less};
+use std::convert::identity;
+use std::fmt;
+use std::mem;
+use std::panic;
+use std::rc::Rc;
+use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
+
+use rand::distributions::Standard;
+use rand::seq::SliceRandom;
+use rand::{thread_rng, Rng, RngCore};
+
+fn square(n: usize) -> usize {
+ n * n
+}
+
+fn is_odd(n: &usize) -> bool {
+ *n % 2 == 1
+}
+
+#[test]
+fn test_from_fn() {
+ // Test on-stack from_fn.
+ let mut v: Vec<_> = (0..3).map(square).collect();
+ {
+ let v = v;
+ assert_eq!(v.len(), 3);
+ assert_eq!(v[0], 0);
+ assert_eq!(v[1], 1);
+ assert_eq!(v[2], 4);
+ }
+
+ // Test on-heap from_fn.
+ v = (0..5).map(square).collect();
+ {
+ let v = v;
+ assert_eq!(v.len(), 5);
+ assert_eq!(v[0], 0);
+ assert_eq!(v[1], 1);
+ assert_eq!(v[2], 4);
+ assert_eq!(v[3], 9);
+ assert_eq!(v[4], 16);
+ }
+}
+
+#[test]
+fn test_from_elem() {
+ // Test on-stack from_elem.
+ let mut v = vec![10, 10];
+ {
+ let v = v;
+ assert_eq!(v.len(), 2);
+ assert_eq!(v[0], 10);
+ assert_eq!(v[1], 10);
+ }
+
+ // Test on-heap from_elem.
+ v = vec![20; 6];
+ {
+ let v = &v[..];
+ assert_eq!(v[0], 20);
+ assert_eq!(v[1], 20);
+ assert_eq!(v[2], 20);
+ assert_eq!(v[3], 20);
+ assert_eq!(v[4], 20);
+ assert_eq!(v[5], 20);
+ }
+}
+
+#[test]
+fn test_is_empty() {
+ let xs: [i32; 0] = [];
+ assert!(xs.is_empty());
+ assert!(![0].is_empty());
+}
+
+#[test]
+fn test_len_divzero() {
+ type Z = [i8; 0];
+ let v0: &[Z] = &[];
+ let v1: &[Z] = &[[]];
+ let v2: &[Z] = &[[], []];
+ assert_eq!(mem::size_of::<Z>(), 0);
+ assert_eq!(v0.len(), 0);
+ assert_eq!(v1.len(), 1);
+ assert_eq!(v2.len(), 2);
+}
+
+#[test]
+fn test_get() {
+ let mut a = vec![11];
+ assert_eq!(a.get(1), None);
+ a = vec![11, 12];
+ assert_eq!(a.get(1).unwrap(), &12);
+ a = vec![11, 12, 13];
+ assert_eq!(a.get(1).unwrap(), &12);
+}
+
+#[test]
+fn test_first() {
+ let mut a = vec![];
+ assert_eq!(a.first(), None);
+ a = vec![11];
+ assert_eq!(a.first().unwrap(), &11);
+ a = vec![11, 12];
+ assert_eq!(a.first().unwrap(), &11);
+}
+
+#[test]
+fn test_first_mut() {
+ let mut a = vec![];
+ assert_eq!(a.first_mut(), None);
+ a = vec![11];
+ assert_eq!(*a.first_mut().unwrap(), 11);
+ a = vec![11, 12];
+ assert_eq!(*a.first_mut().unwrap(), 11);
+}
+
+#[test]
+fn test_split_first() {
+ let mut a = vec![11];
+ let b: &[i32] = &[];
+ assert!(b.split_first().is_none());
+ assert_eq!(a.split_first(), Some((&11, b)));
+ a = vec![11, 12];
+ let b: &[i32] = &[12];
+ assert_eq!(a.split_first(), Some((&11, b)));
+}
+
+#[test]
+fn test_split_first_mut() {
+ let mut a = vec![11];
+ let b: &mut [i32] = &mut [];
+ assert!(b.split_first_mut().is_none());
+ assert!(a.split_first_mut() == Some((&mut 11, b)));
+ a = vec![11, 12];
+ let b: &mut [_] = &mut [12];
+ assert!(a.split_first_mut() == Some((&mut 11, b)));
+}
+
+#[test]
+fn test_split_last() {
+ let mut a = vec![11];
+ let b: &[i32] = &[];
+ assert!(b.split_last().is_none());
+ assert_eq!(a.split_last(), Some((&11, b)));
+ a = vec![11, 12];
+ let b: &[_] = &[11];
+ assert_eq!(a.split_last(), Some((&12, b)));
+}
+
+#[test]
+fn test_split_last_mut() {
+ let mut a = vec![11];
+ let b: &mut [i32] = &mut [];
+ assert!(b.split_last_mut().is_none());
+ assert!(a.split_last_mut() == Some((&mut 11, b)));
+
+ a = vec![11, 12];
+ let b: &mut [_] = &mut [11];
+ assert!(a.split_last_mut() == Some((&mut 12, b)));
+}
+
+#[test]
+fn test_last() {
+ let mut a = vec![];
+ assert_eq!(a.last(), None);
+ a = vec![11];
+ assert_eq!(a.last().unwrap(), &11);
+ a = vec![11, 12];
+ assert_eq!(a.last().unwrap(), &12);
+}
+
+#[test]
+fn test_last_mut() {
+ let mut a = vec![];
+ assert_eq!(a.last_mut(), None);
+ a = vec![11];
+ assert_eq!(*a.last_mut().unwrap(), 11);
+ a = vec![11, 12];
+ assert_eq!(*a.last_mut().unwrap(), 12);
+}
+
+#[test]
+fn test_slice() {
+ // Test fixed length vector.
+ let vec_fixed = [1, 2, 3, 4];
+ let v_a = vec_fixed[1..vec_fixed.len()].to_vec();
+ assert_eq!(v_a.len(), 3);
+
+ assert_eq!(v_a[0], 2);
+ assert_eq!(v_a[1], 3);
+ assert_eq!(v_a[2], 4);
+
+ // Test on stack.
+ let vec_stack: &[_] = &[1, 2, 3];
+ let v_b = vec_stack[1..3].to_vec();
+ assert_eq!(v_b.len(), 2);
+
+ assert_eq!(v_b[0], 2);
+ assert_eq!(v_b[1], 3);
+
+ // Test `Box<[T]>`
+ let vec_unique = vec![1, 2, 3, 4, 5, 6];
+ let v_d = vec_unique[1..6].to_vec();
+ assert_eq!(v_d.len(), 5);
+
+ assert_eq!(v_d[0], 2);
+ assert_eq!(v_d[1], 3);
+ assert_eq!(v_d[2], 4);
+ assert_eq!(v_d[3], 5);
+ assert_eq!(v_d[4], 6);
+}
+
+#[test]
+fn test_slice_from() {
+ let vec: &[_] = &[1, 2, 3, 4];
+ assert_eq!(&vec[..], vec);
+ let b: &[_] = &[3, 4];
+ assert_eq!(&vec[2..], b);
+ let b: &[_] = &[];
+ assert_eq!(&vec[4..], b);
+}
+
+#[test]
+fn test_slice_to() {
+ let vec: &[_] = &[1, 2, 3, 4];
+ assert_eq!(&vec[..4], vec);
+ let b: &[_] = &[1, 2];
+ assert_eq!(&vec[..2], b);
+ let b: &[_] = &[];
+ assert_eq!(&vec[..0], b);
+}
+
+#[test]
+fn test_pop() {
+ let mut v = vec![5];
+ let e = v.pop();
+ assert_eq!(v.len(), 0);
+ assert_eq!(e, Some(5));
+ let f = v.pop();
+ assert_eq!(f, None);
+ let g = v.pop();
+ assert_eq!(g, None);
+}
+
+#[test]
+fn test_swap_remove() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let mut e = v.swap_remove(0);
+ assert_eq!(e, 1);
+ assert_eq!(v, [5, 2, 3, 4]);
+ e = v.swap_remove(3);
+ assert_eq!(e, 4);
+ assert_eq!(v, [5, 2, 3]);
+}
+
+#[test]
+#[should_panic]
+fn test_swap_remove_fail() {
+ let mut v = vec![1];
+ let _ = v.swap_remove(0);
+ let _ = v.swap_remove(0);
+}
+
+#[test]
+fn test_swap_remove_noncopyable() {
+ // Tests that we don't accidentally run destructors twice.
+ let mut v: Vec<Box<_>> = Vec::new();
+ v.push(Box::new(0));
+ v.push(Box::new(0));
+ v.push(Box::new(0));
+ let mut _e = v.swap_remove(0);
+ assert_eq!(v.len(), 2);
+ _e = v.swap_remove(1);
+ assert_eq!(v.len(), 1);
+ _e = v.swap_remove(0);
+ assert_eq!(v.len(), 0);
+}
+
+#[test]
+fn test_push() {
+ // Test on-stack push().
+ let mut v = vec![];
+ v.push(1);
+ assert_eq!(v.len(), 1);
+ assert_eq!(v[0], 1);
+
+ // Test on-heap push().
+ v.push(2);
+ assert_eq!(v.len(), 2);
+ assert_eq!(v[0], 1);
+ assert_eq!(v[1], 2);
+}
+
+#[test]
+fn test_truncate() {
+ let mut v: Vec<Box<_>> = vec![Box::new(6), Box::new(5), Box::new(4)];
+ v.truncate(1);
+ let v = v;
+ assert_eq!(v.len(), 1);
+ assert_eq!(*(v[0]), 6);
+ // If the unsafe block didn't drop things properly, we blow up here.
+}
+
+#[test]
+fn test_clear() {
+ let mut v: Vec<Box<_>> = vec![Box::new(6), Box::new(5), Box::new(4)];
+ v.clear();
+ assert_eq!(v.len(), 0);
+ // If the unsafe block didn't drop things properly, we blow up here.
+}
+
+#[test]
+fn test_retain() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ v.retain(is_odd);
+ assert_eq!(v, [1, 3, 5]);
+}
+
+#[test]
+fn test_binary_search() {
+ assert_eq!([1, 2, 3, 4, 5].binary_search(&5).ok(), Some(4));
+ assert_eq!([1, 2, 3, 4, 5].binary_search(&4).ok(), Some(3));
+ assert_eq!([1, 2, 3, 4, 5].binary_search(&3).ok(), Some(2));
+ assert_eq!([1, 2, 3, 4, 5].binary_search(&2).ok(), Some(1));
+ assert_eq!([1, 2, 3, 4, 5].binary_search(&1).ok(), Some(0));
+
+ assert_eq!([2, 4, 6, 8, 10].binary_search(&1).ok(), None);
+ assert_eq!([2, 4, 6, 8, 10].binary_search(&5).ok(), None);
+ assert_eq!([2, 4, 6, 8, 10].binary_search(&4).ok(), Some(1));
+ assert_eq!([2, 4, 6, 8, 10].binary_search(&10).ok(), Some(4));
+
+ assert_eq!([2, 4, 6, 8].binary_search(&1).ok(), None);
+ assert_eq!([2, 4, 6, 8].binary_search(&5).ok(), None);
+ assert_eq!([2, 4, 6, 8].binary_search(&4).ok(), Some(1));
+ assert_eq!([2, 4, 6, 8].binary_search(&8).ok(), Some(3));
+
+ assert_eq!([2, 4, 6].binary_search(&1).ok(), None);
+ assert_eq!([2, 4, 6].binary_search(&5).ok(), None);
+ assert_eq!([2, 4, 6].binary_search(&4).ok(), Some(1));
+ assert_eq!([2, 4, 6].binary_search(&6).ok(), Some(2));
+
+ assert_eq!([2, 4].binary_search(&1).ok(), None);
+ assert_eq!([2, 4].binary_search(&5).ok(), None);
+ assert_eq!([2, 4].binary_search(&2).ok(), Some(0));
+ assert_eq!([2, 4].binary_search(&4).ok(), Some(1));
+
+ assert_eq!([2].binary_search(&1).ok(), None);
+ assert_eq!([2].binary_search(&5).ok(), None);
+ assert_eq!([2].binary_search(&2).ok(), Some(0));
+
+ assert_eq!([].binary_search(&1).ok(), None);
+ assert_eq!([].binary_search(&5).ok(), None);
+
+ assert!([1, 1, 1, 1, 1].binary_search(&1).ok() != None);
+ assert!([1, 1, 1, 1, 2].binary_search(&1).ok() != None);
+ assert!([1, 1, 1, 2, 2].binary_search(&1).ok() != None);
+ assert!([1, 1, 2, 2, 2].binary_search(&1).ok() != None);
+ assert_eq!([1, 2, 2, 2, 2].binary_search(&1).ok(), Some(0));
+
+ assert_eq!([1, 2, 3, 4, 5].binary_search(&6).ok(), None);
+ assert_eq!([1, 2, 3, 4, 5].binary_search(&0).ok(), None);
+}
+
+#[test]
+fn test_reverse() {
+ let mut v = vec![10, 20];
+ assert_eq!(v[0], 10);
+ assert_eq!(v[1], 20);
+ v.reverse();
+ assert_eq!(v[0], 20);
+ assert_eq!(v[1], 10);
+
+ let mut v3 = Vec::<i32>::new();
+ v3.reverse();
+ assert!(v3.is_empty());
+
+ // check the 1-byte-types path
+ let mut v = (-50..51i8).collect::<Vec<_>>();
+ v.reverse();
+ assert_eq!(v, (-50..51i8).rev().collect::<Vec<_>>());
+
+ // check the 2-byte-types path
+ let mut v = (-50..51i16).collect::<Vec<_>>();
+ v.reverse();
+ assert_eq!(v, (-50..51i16).rev().collect::<Vec<_>>());
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn test_sort() {
+ let mut rng = thread_rng();
+
+ for len in (2..25).chain(500..510) {
+ for &modulus in &[5, 10, 100, 1000] {
+ for _ in 0..10 {
+ let orig: Vec<_> =
+ rng.sample_iter::<i32, _>(&Standard).map(|x| x % modulus).take(len).collect();
+
+ // Sort in default order.
+ let mut v = orig.clone();
+ v.sort();
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+
+ // Sort in ascending order.
+ let mut v = orig.clone();
+ v.sort_by(|a, b| a.cmp(b));
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+
+ // Sort in descending order.
+ let mut v = orig.clone();
+ v.sort_by(|a, b| b.cmp(a));
+ assert!(v.windows(2).all(|w| w[0] >= w[1]));
+
+ // Sort in lexicographic order.
+ let mut v1 = orig.clone();
+ let mut v2 = orig.clone();
+ v1.sort_by_key(|x| x.to_string());
+ v2.sort_by_cached_key(|x| x.to_string());
+ assert!(v1.windows(2).all(|w| w[0].to_string() <= w[1].to_string()));
+ assert!(v1 == v2);
+
+ // Sort with many pre-sorted runs.
+ let mut v = orig.clone();
+ v.sort();
+ v.reverse();
+ for _ in 0..5 {
+ let a = rng.gen::<usize>() % len;
+ let b = rng.gen::<usize>() % len;
+ if a < b {
+ v[a..b].reverse();
+ } else {
+ v.swap(a, b);
+ }
+ }
+ v.sort();
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+ }
+ }
+ }
+
+ // Sort using a completely random comparison function.
+ // This will reorder the elements *somehow*, but won't panic.
+ let mut v = [0; 500];
+ for i in 0..v.len() {
+ v[i] = i as i32;
+ }
+ v.sort_by(|_, _| *[Less, Equal, Greater].choose(&mut rng).unwrap());
+ v.sort();
+ for i in 0..v.len() {
+ assert_eq!(v[i], i as i32);
+ }
+
+ // Should not panic.
+ [0i32; 0].sort();
+ [(); 10].sort();
+ [(); 100].sort();
+
+ let mut v = [0xDEADBEEFu64];
+ v.sort();
+ assert!(v == [0xDEADBEEF]);
+}
+
+#[test]
+fn test_sort_stability() {
+ // Miri is too slow
+ let large_range = if cfg!(miri) { 0..0 } else { 500..510 };
+ let rounds = if cfg!(miri) { 1 } else { 10 };
+
+ for len in (2..25).chain(large_range) {
+ for _ in 0..rounds {
+ let mut counts = [0; 10];
+
+ // create a vector like [(6, 1), (5, 1), (6, 2), ...],
+ // where the first item of each tuple is random, but
+ // the second item represents which occurrence of that
+ // number this element is, i.e., the second elements
+ // will occur in sorted order.
+ let orig: Vec<_> = (0..len)
+ .map(|_| {
+ let n = thread_rng().gen::<usize>() % 10;
+ counts[n] += 1;
+ (n, counts[n])
+ })
+ .collect();
+
+ let mut v = orig.clone();
+ // Only sort on the first element, so an unstable sort
+ // may mix up the counts.
+ v.sort_by(|&(a, _), &(b, _)| a.cmp(&b));
+
+ // This comparison includes the count (the second item
+ // of the tuple), so elements with equal first items
+ // will need to be ordered with increasing
+ // counts... i.e., exactly asserting that this sort is
+ // stable.
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+
+ let mut v = orig.clone();
+ v.sort_by_cached_key(|&(x, _)| x);
+ assert!(v.windows(2).all(|w| w[0] <= w[1]));
+ }
+ }
+}
+
+#[test]
+fn test_rotate_left() {
+ let expected: Vec<_> = (0..13).collect();
+ let mut v = Vec::new();
+
+ // no-ops
+ v.clone_from(&expected);
+ v.rotate_left(0);
+ assert_eq!(v, expected);
+ v.rotate_left(expected.len());
+ assert_eq!(v, expected);
+ let mut zst_array = [(), (), ()];
+ zst_array.rotate_left(2);
+
+ // happy path
+ v = (5..13).chain(0..5).collect();
+ v.rotate_left(8);
+ assert_eq!(v, expected);
+
+ let expected: Vec<_> = (0..1000).collect();
+
+ // small rotations in large slice, uses ptr::copy
+ v = (2..1000).chain(0..2).collect();
+ v.rotate_left(998);
+ assert_eq!(v, expected);
+ v = (998..1000).chain(0..998).collect();
+ v.rotate_left(2);
+ assert_eq!(v, expected);
+
+ // non-small prime rotation, has a few rounds of swapping
+ v = (389..1000).chain(0..389).collect();
+ v.rotate_left(1000 - 389);
+ assert_eq!(v, expected);
+}
+
+#[test]
+fn test_rotate_right() {
+ let expected: Vec<_> = (0..13).collect();
+ let mut v = Vec::new();
+
+ // no-ops
+ v.clone_from(&expected);
+ v.rotate_right(0);
+ assert_eq!(v, expected);
+ v.rotate_right(expected.len());
+ assert_eq!(v, expected);
+ let mut zst_array = [(), (), ()];
+ zst_array.rotate_right(2);
+
+ // happy path
+ v = (5..13).chain(0..5).collect();
+ v.rotate_right(5);
+ assert_eq!(v, expected);
+
+ let expected: Vec<_> = (0..1000).collect();
+
+ // small rotations in large slice, uses ptr::copy
+ v = (2..1000).chain(0..2).collect();
+ v.rotate_right(2);
+ assert_eq!(v, expected);
+ v = (998..1000).chain(0..998).collect();
+ v.rotate_right(998);
+ assert_eq!(v, expected);
+
+ // non-small prime rotation, has a few rounds of swapping
+ v = (389..1000).chain(0..389).collect();
+ v.rotate_right(389);
+ assert_eq!(v, expected);
+}
+
+#[test]
+fn test_concat() {
+ let v: [Vec<i32>; 0] = [];
+ let c = v.concat();
+ assert_eq!(c, []);
+ let d = [vec![1], vec![2, 3]].concat();
+ assert_eq!(d, [1, 2, 3]);
+
+ let v: &[&[_]] = &[&[1], &[2, 3]];
+ assert_eq!(v.join(&0), [1, 0, 2, 3]);
+ let v: &[&[_]] = &[&[1], &[2], &[3]];
+ assert_eq!(v.join(&0), [1, 0, 2, 0, 3]);
+}
+
+#[test]
+fn test_join() {
+ let v: [Vec<i32>; 0] = [];
+ assert_eq!(v.join(&0), []);
+ assert_eq!([vec![1], vec![2, 3]].join(&0), [1, 0, 2, 3]);
+ assert_eq!([vec![1], vec![2], vec![3]].join(&0), [1, 0, 2, 0, 3]);
+
+ let v: [&[_]; 2] = [&[1], &[2, 3]];
+ assert_eq!(v.join(&0), [1, 0, 2, 3]);
+ let v: [&[_]; 3] = [&[1], &[2], &[3]];
+ assert_eq!(v.join(&0), [1, 0, 2, 0, 3]);
+}
+
+#[test]
+fn test_join_nocopy() {
+ let v: [String; 0] = [];
+ assert_eq!(v.join(","), "");
+ assert_eq!(["a".to_string(), "ab".into()].join(","), "a,ab");
+ assert_eq!(["a".to_string(), "ab".into(), "abc".into()].join(","), "a,ab,abc");
+ assert_eq!(["a".to_string(), "ab".into(), "".into()].join(","), "a,ab,");
+}
+
+#[test]
+fn test_insert() {
+ let mut a = vec![1, 2, 4];
+ a.insert(2, 3);
+ assert_eq!(a, [1, 2, 3, 4]);
+
+ let mut a = vec![1, 2, 3];
+ a.insert(0, 0);
+ assert_eq!(a, [0, 1, 2, 3]);
+
+ let mut a = vec![1, 2, 3];
+ a.insert(3, 4);
+ assert_eq!(a, [1, 2, 3, 4]);
+
+ let mut a = vec![];
+ a.insert(0, 1);
+ assert_eq!(a, [1]);
+}
+
+#[test]
+#[should_panic]
+fn test_insert_oob() {
+ let mut a = vec![1, 2, 3];
+ a.insert(4, 5);
+}
+
+#[test]
+fn test_remove() {
+ let mut a = vec![1, 2, 3, 4];
+
+ assert_eq!(a.remove(2), 3);
+ assert_eq!(a, [1, 2, 4]);
+
+ assert_eq!(a.remove(2), 4);
+ assert_eq!(a, [1, 2]);
+
+ assert_eq!(a.remove(0), 1);
+ assert_eq!(a, [2]);
+
+ assert_eq!(a.remove(0), 2);
+ assert_eq!(a, []);
+}
+
+#[test]
+#[should_panic]
+fn test_remove_fail() {
+ let mut a = vec![1];
+ let _ = a.remove(0);
+ let _ = a.remove(0);
+}
+
+#[test]
+fn test_capacity() {
+ let mut v = vec![0];
+ v.reserve_exact(10);
+ assert!(v.capacity() >= 11);
+}
+
+#[test]
+fn test_slice_2() {
+ let v = vec![1, 2, 3, 4, 5];
+ let v = &v[1..3];
+ assert_eq!(v.len(), 2);
+ assert_eq!(v[0], 2);
+ assert_eq!(v[1], 3);
+}
+
+macro_rules! assert_order {
+ (Greater, $a:expr, $b:expr) => {
+ assert_eq!($a.cmp($b), Greater);
+ assert!($a > $b);
+ };
+ (Less, $a:expr, $b:expr) => {
+ assert_eq!($a.cmp($b), Less);
+ assert!($a < $b);
+ };
+ (Equal, $a:expr, $b:expr) => {
+ assert_eq!($a.cmp($b), Equal);
+ assert_eq!($a, $b);
+ };
+}
+
+#[test]
+fn test_total_ord_u8() {
+ let c = &[1u8, 2, 3];
+ assert_order!(Greater, &[1u8, 2, 3, 4][..], &c[..]);
+ let c = &[1u8, 2, 3, 4];
+ assert_order!(Less, &[1u8, 2, 3][..], &c[..]);
+ let c = &[1u8, 2, 3, 6];
+ assert_order!(Equal, &[1u8, 2, 3, 6][..], &c[..]);
+ let c = &[1u8, 2, 3, 4, 5, 6];
+ assert_order!(Less, &[1u8, 2, 3, 4, 5, 5, 5, 5][..], &c[..]);
+ let c = &[1u8, 2, 3, 4];
+ assert_order!(Greater, &[2u8, 2][..], &c[..]);
+}
+
+#[test]
+fn test_total_ord_i32() {
+ let c = &[1, 2, 3];
+ assert_order!(Greater, &[1, 2, 3, 4][..], &c[..]);
+ let c = &[1, 2, 3, 4];
+ assert_order!(Less, &[1, 2, 3][..], &c[..]);
+ let c = &[1, 2, 3, 6];
+ assert_order!(Equal, &[1, 2, 3, 6][..], &c[..]);
+ let c = &[1, 2, 3, 4, 5, 6];
+ assert_order!(Less, &[1, 2, 3, 4, 5, 5, 5, 5][..], &c[..]);
+ let c = &[1, 2, 3, 4];
+ assert_order!(Greater, &[2, 2][..], &c[..]);
+}
+
+#[test]
+fn test_iterator() {
+ let xs = [1, 2, 5, 10, 11];
+ let mut it = xs.iter();
+ assert_eq!(it.size_hint(), (5, Some(5)));
+ assert_eq!(it.next().unwrap(), &1);
+ assert_eq!(it.size_hint(), (4, Some(4)));
+ assert_eq!(it.next().unwrap(), &2);
+ assert_eq!(it.size_hint(), (3, Some(3)));
+ assert_eq!(it.next().unwrap(), &5);
+ assert_eq!(it.size_hint(), (2, Some(2)));
+ assert_eq!(it.next().unwrap(), &10);
+ assert_eq!(it.size_hint(), (1, Some(1)));
+ assert_eq!(it.next().unwrap(), &11);
+ assert_eq!(it.size_hint(), (0, Some(0)));
+ assert!(it.next().is_none());
+}
+
+#[test]
+fn test_iter_size_hints() {
+ let mut xs = [1, 2, 5, 10, 11];
+ assert_eq!(xs.iter().size_hint(), (5, Some(5)));
+ assert_eq!(xs.iter_mut().size_hint(), (5, Some(5)));
+}
+
+#[test]
+fn test_iter_as_slice() {
+ let xs = [1, 2, 5, 10, 11];
+ let mut iter = xs.iter();
+ assert_eq!(iter.as_slice(), &[1, 2, 5, 10, 11]);
+ iter.next();
+ assert_eq!(iter.as_slice(), &[2, 5, 10, 11]);
+}
+
+#[test]
+fn test_iter_as_ref() {
+ let xs = [1, 2, 5, 10, 11];
+ let mut iter = xs.iter();
+ assert_eq!(iter.as_ref(), &[1, 2, 5, 10, 11]);
+ iter.next();
+ assert_eq!(iter.as_ref(), &[2, 5, 10, 11]);
+}
+
+#[test]
+fn test_iter_clone() {
+ let xs = [1, 2, 5];
+ let mut it = xs.iter();
+ it.next();
+ let mut jt = it.clone();
+ assert_eq!(it.next(), jt.next());
+ assert_eq!(it.next(), jt.next());
+ assert_eq!(it.next(), jt.next());
+}
+
+#[test]
+fn test_iter_is_empty() {
+ let xs = [1, 2, 5, 10, 11];
+ for i in 0..xs.len() {
+ for j in i..xs.len() {
+ assert_eq!(xs[i..j].iter().is_empty(), xs[i..j].is_empty());
+ }
+ }
+}
+
+#[test]
+fn test_mut_iterator() {
+ let mut xs = [1, 2, 3, 4, 5];
+ for x in &mut xs {
+ *x += 1;
+ }
+ assert!(xs == [2, 3, 4, 5, 6])
+}
+
+#[test]
+fn test_rev_iterator() {
+ let xs = [1, 2, 5, 10, 11];
+ let ys = [11, 10, 5, 2, 1];
+ let mut i = 0;
+ for &x in xs.iter().rev() {
+ assert_eq!(x, ys[i]);
+ i += 1;
+ }
+ assert_eq!(i, 5);
+}
+
+#[test]
+fn test_mut_rev_iterator() {
+ let mut xs = [1, 2, 3, 4, 5];
+ for (i, x) in xs.iter_mut().rev().enumerate() {
+ *x += i;
+ }
+ assert!(xs == [5, 5, 5, 5, 5])
+}
+
+#[test]
+fn test_move_iterator() {
+ let xs = vec![1, 2, 3, 4, 5];
+ assert_eq!(xs.into_iter().fold(0, |a: usize, b: usize| 10 * a + b), 12345);
+}
+
+#[test]
+fn test_move_rev_iterator() {
+ let xs = vec![1, 2, 3, 4, 5];
+ assert_eq!(xs.into_iter().rev().fold(0, |a: usize, b: usize| 10 * a + b), 54321);
+}
+
+#[test]
+fn test_splitator() {
+ let xs = &[1, 2, 3, 4, 5];
+
+ let splits: &[&[_]] = &[&[1], &[3], &[5]];
+ assert_eq!(xs.split(|x| *x % 2 == 0).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[], &[2, 3, 4, 5]];
+ assert_eq!(xs.split(|x| *x == 1).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1, 2, 3, 4], &[]];
+ assert_eq!(xs.split(|x| *x == 5).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(xs.split(|x| *x == 10).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[], &[], &[], &[], &[], &[]];
+ assert_eq!(xs.split(|_| true).collect::<Vec<&[i32]>>(), splits);
+
+ let xs: &[i32] = &[];
+ let splits: &[&[i32]] = &[&[]];
+ assert_eq!(xs.split(|x| *x == 5).collect::<Vec<&[i32]>>(), splits);
+}
+
+#[test]
+fn test_splitator_inclusive() {
+ let xs = &[1, 2, 3, 4, 5];
+
+ let splits: &[&[_]] = &[&[1, 2], &[3, 4], &[5]];
+ assert_eq!(xs.split_inclusive(|x| *x % 2 == 0).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1], &[2, 3, 4, 5]];
+ assert_eq!(xs.split_inclusive(|x| *x == 1).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(xs.split_inclusive(|x| *x == 5).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(xs.split_inclusive(|x| *x == 10).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1], &[2], &[3], &[4], &[5]];
+ assert_eq!(xs.split_inclusive(|_| true).collect::<Vec<&[i32]>>(), splits);
+
+ let xs: &[i32] = &[];
+ let splits: &[&[i32]] = &[];
+ assert_eq!(xs.split_inclusive(|x| *x == 5).collect::<Vec<&[i32]>>(), splits);
+}
+
+#[test]
+fn test_splitator_inclusive_reverse() {
+ let xs = &[1, 2, 3, 4, 5];
+
+ let splits: &[&[_]] = &[&[5], &[3, 4], &[1, 2]];
+ assert_eq!(xs.split_inclusive(|x| *x % 2 == 0).rev().collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[2, 3, 4, 5], &[1]];
+ assert_eq!(xs.split_inclusive(|x| *x == 1).rev().collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(xs.split_inclusive(|x| *x == 5).rev().collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(xs.split_inclusive(|x| *x == 10).rev().collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[5], &[4], &[3], &[2], &[1]];
+ assert_eq!(xs.split_inclusive(|_| true).rev().collect::<Vec<_>>(), splits);
+
+ let xs: &[i32] = &[];
+ let splits: &[&[i32]] = &[];
+ assert_eq!(xs.split_inclusive(|x| *x == 5).rev().collect::<Vec<_>>(), splits);
+}
+
+#[test]
+fn test_splitator_mut_inclusive() {
+ let xs = &mut [1, 2, 3, 4, 5];
+
+ let splits: &[&[_]] = &[&[1, 2], &[3, 4], &[5]];
+ assert_eq!(xs.split_inclusive_mut(|x| *x % 2 == 0).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1], &[2, 3, 4, 5]];
+ assert_eq!(xs.split_inclusive_mut(|x| *x == 1).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(xs.split_inclusive_mut(|x| *x == 5).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(xs.split_inclusive_mut(|x| *x == 10).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1], &[2], &[3], &[4], &[5]];
+ assert_eq!(xs.split_inclusive_mut(|_| true).collect::<Vec<_>>(), splits);
+
+ let xs: &mut [i32] = &mut [];
+ let splits: &[&[i32]] = &[];
+ assert_eq!(xs.split_inclusive_mut(|x| *x == 5).collect::<Vec<_>>(), splits);
+}
+
+#[test]
+fn test_splitator_mut_inclusive_reverse() {
+ let xs = &mut [1, 2, 3, 4, 5];
+
+ let splits: &[&[_]] = &[&[5], &[3, 4], &[1, 2]];
+ assert_eq!(xs.split_inclusive_mut(|x| *x % 2 == 0).rev().collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[2, 3, 4, 5], &[1]];
+ assert_eq!(xs.split_inclusive_mut(|x| *x == 1).rev().collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(xs.split_inclusive_mut(|x| *x == 5).rev().collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(xs.split_inclusive_mut(|x| *x == 10).rev().collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[5], &[4], &[3], &[2], &[1]];
+ assert_eq!(xs.split_inclusive_mut(|_| true).rev().collect::<Vec<_>>(), splits);
+
+ let xs: &mut [i32] = &mut [];
+ let splits: &[&[i32]] = &[];
+ assert_eq!(xs.split_inclusive_mut(|x| *x == 5).rev().collect::<Vec<_>>(), splits);
+}
+
+#[test]
+fn test_splitnator() {
+ let xs = &[1, 2, 3, 4, 5];
+
+ let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(xs.splitn(1, |x| *x % 2 == 0).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1], &[3, 4, 5]];
+ assert_eq!(xs.splitn(2, |x| *x % 2 == 0).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[], &[], &[], &[4, 5]];
+ assert_eq!(xs.splitn(4, |_| true).collect::<Vec<_>>(), splits);
+
+ let xs: &[i32] = &[];
+ let splits: &[&[i32]] = &[&[]];
+ assert_eq!(xs.splitn(2, |x| *x == 5).collect::<Vec<_>>(), splits);
+}
+
+#[test]
+fn test_splitnator_mut() {
+ let xs = &mut [1, 2, 3, 4, 5];
+
+ let splits: &[&mut [_]] = &[&mut [1, 2, 3, 4, 5]];
+ assert_eq!(xs.splitn_mut(1, |x| *x % 2 == 0).collect::<Vec<_>>(), splits);
+ let splits: &[&mut [_]] = &[&mut [1], &mut [3, 4, 5]];
+ assert_eq!(xs.splitn_mut(2, |x| *x % 2 == 0).collect::<Vec<_>>(), splits);
+ let splits: &[&mut [_]] = &[&mut [], &mut [], &mut [], &mut [4, 5]];
+ assert_eq!(xs.splitn_mut(4, |_| true).collect::<Vec<_>>(), splits);
+
+ let xs: &mut [i32] = &mut [];
+ let splits: &[&mut [i32]] = &[&mut []];
+ assert_eq!(xs.splitn_mut(2, |x| *x == 5).collect::<Vec<_>>(), splits);
+}
+
+#[test]
+fn test_rsplitator() {
+ let xs = &[1, 2, 3, 4, 5];
+
+ let splits: &[&[_]] = &[&[5], &[3], &[1]];
+ assert_eq!(xs.split(|x| *x % 2 == 0).rev().collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[2, 3, 4, 5], &[]];
+ assert_eq!(xs.split(|x| *x == 1).rev().collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[], &[1, 2, 3, 4]];
+ assert_eq!(xs.split(|x| *x == 5).rev().collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(xs.split(|x| *x == 10).rev().collect::<Vec<_>>(), splits);
+
+ let xs: &[i32] = &[];
+ let splits: &[&[i32]] = &[&[]];
+ assert_eq!(xs.split(|x| *x == 5).rev().collect::<Vec<&[i32]>>(), splits);
+}
+
+#[test]
+fn test_rsplitnator() {
+ let xs = &[1, 2, 3, 4, 5];
+
+ let splits: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(xs.rsplitn(1, |x| *x % 2 == 0).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[5], &[1, 2, 3]];
+ assert_eq!(xs.rsplitn(2, |x| *x % 2 == 0).collect::<Vec<_>>(), splits);
+ let splits: &[&[_]] = &[&[], &[], &[], &[1, 2]];
+ assert_eq!(xs.rsplitn(4, |_| true).collect::<Vec<_>>(), splits);
+
+ let xs: &[i32] = &[];
+ let splits: &[&[i32]] = &[&[]];
+ assert_eq!(xs.rsplitn(2, |x| *x == 5).collect::<Vec<&[i32]>>(), splits);
+ assert!(xs.rsplitn(0, |x| *x % 2 == 0).next().is_none());
+}
+
+#[test]
+fn test_split_iterators_size_hint() {
+ #[derive(Copy, Clone)]
+ enum Bounds {
+ Lower,
+ Upper,
+ }
+ fn assert_tight_size_hints(mut it: impl Iterator, which: Bounds, ctx: impl fmt::Display) {
+ match which {
+ Bounds::Lower => {
+ let mut lower_bounds = vec![it.size_hint().0];
+ while let Some(_) = it.next() {
+ lower_bounds.push(it.size_hint().0);
+ }
+ let target: Vec<_> = (0..lower_bounds.len()).rev().collect();
+ assert_eq!(lower_bounds, target, "lower bounds incorrect or not tight: {}", ctx);
+ }
+ Bounds::Upper => {
+ let mut upper_bounds = vec![it.size_hint().1];
+ while let Some(_) = it.next() {
+ upper_bounds.push(it.size_hint().1);
+ }
+ let target: Vec<_> = (0..upper_bounds.len()).map(Some).rev().collect();
+ assert_eq!(upper_bounds, target, "upper bounds incorrect or not tight: {}", ctx);
+ }
+ }
+ }
+
+ for len in 0..=2 {
+ let mut v: Vec<u8> = (0..len).collect();
+
+ // p: predicate, b: bound selection
+ for (p, b) in [
+ // with a predicate always returning false, the split*-iterators
+ // become maximally short, so the size_hint lower bounds are tight
+ ((|_| false) as fn(&_) -> _, Bounds::Lower),
+ // with a predicate always returning true, the split*-iterators
+ // become maximally long, so the size_hint upper bounds are tight
+ ((|_| true) as fn(&_) -> _, Bounds::Upper),
+ ] {
+ use assert_tight_size_hints as a;
+ use format_args as f;
+
+ a(v.split(p), b, "split");
+ a(v.split_mut(p), b, "split_mut");
+ a(v.split_inclusive(p), b, "split_inclusive");
+ a(v.split_inclusive_mut(p), b, "split_inclusive_mut");
+ a(v.rsplit(p), b, "rsplit");
+ a(v.rsplit_mut(p), b, "rsplit_mut");
+
+ for n in 0..=3 {
+ a(v.splitn(n, p), b, f!("splitn, n = {n}"));
+ a(v.splitn_mut(n, p), b, f!("splitn_mut, n = {n}"));
+ a(v.rsplitn(n, p), b, f!("rsplitn, n = {n}"));
+ a(v.rsplitn_mut(n, p), b, f!("rsplitn_mut, n = {n}"));
+ }
+ }
+ }
+}
+
+#[test]
+fn test_windowsator() {
+ let v = &[1, 2, 3, 4];
+
+ let wins: &[&[_]] = &[&[1, 2], &[2, 3], &[3, 4]];
+ assert_eq!(v.windows(2).collect::<Vec<_>>(), wins);
+
+ let wins: &[&[_]] = &[&[1, 2, 3], &[2, 3, 4]];
+ assert_eq!(v.windows(3).collect::<Vec<_>>(), wins);
+ assert!(v.windows(6).next().is_none());
+
+ let wins: &[&[_]] = &[&[3, 4], &[2, 3], &[1, 2]];
+ assert_eq!(v.windows(2).rev().collect::<Vec<&[_]>>(), wins);
+}
+
+#[test]
+#[should_panic]
+fn test_windowsator_0() {
+ let v = &[1, 2, 3, 4];
+ let _it = v.windows(0);
+}
+
+#[test]
+fn test_chunksator() {
+ let v = &[1, 2, 3, 4, 5];
+
+ assert_eq!(v.chunks(2).len(), 3);
+
+ let chunks: &[&[_]] = &[&[1, 2], &[3, 4], &[5]];
+ assert_eq!(v.chunks(2).collect::<Vec<_>>(), chunks);
+ let chunks: &[&[_]] = &[&[1, 2, 3], &[4, 5]];
+ assert_eq!(v.chunks(3).collect::<Vec<_>>(), chunks);
+ let chunks: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(v.chunks(6).collect::<Vec<_>>(), chunks);
+
+ let chunks: &[&[_]] = &[&[5], &[3, 4], &[1, 2]];
+ assert_eq!(v.chunks(2).rev().collect::<Vec<_>>(), chunks);
+}
+
+#[test]
+#[should_panic]
+fn test_chunksator_0() {
+ let v = &[1, 2, 3, 4];
+ let _it = v.chunks(0);
+}
+
+#[test]
+fn test_chunks_exactator() {
+ let v = &[1, 2, 3, 4, 5];
+
+ assert_eq!(v.chunks_exact(2).len(), 2);
+
+ let chunks: &[&[_]] = &[&[1, 2], &[3, 4]];
+ assert_eq!(v.chunks_exact(2).collect::<Vec<_>>(), chunks);
+ let chunks: &[&[_]] = &[&[1, 2, 3]];
+ assert_eq!(v.chunks_exact(3).collect::<Vec<_>>(), chunks);
+ let chunks: &[&[_]] = &[];
+ assert_eq!(v.chunks_exact(6).collect::<Vec<_>>(), chunks);
+
+ let chunks: &[&[_]] = &[&[3, 4], &[1, 2]];
+ assert_eq!(v.chunks_exact(2).rev().collect::<Vec<_>>(), chunks);
+}
+
+#[test]
+#[should_panic]
+fn test_chunks_exactator_0() {
+ let v = &[1, 2, 3, 4];
+ let _it = v.chunks_exact(0);
+}
+
+#[test]
+fn test_rchunksator() {
+ let v = &[1, 2, 3, 4, 5];
+
+ assert_eq!(v.rchunks(2).len(), 3);
+
+ let chunks: &[&[_]] = &[&[4, 5], &[2, 3], &[1]];
+ assert_eq!(v.rchunks(2).collect::<Vec<_>>(), chunks);
+ let chunks: &[&[_]] = &[&[3, 4, 5], &[1, 2]];
+ assert_eq!(v.rchunks(3).collect::<Vec<_>>(), chunks);
+ let chunks: &[&[_]] = &[&[1, 2, 3, 4, 5]];
+ assert_eq!(v.rchunks(6).collect::<Vec<_>>(), chunks);
+
+ let chunks: &[&[_]] = &[&[1], &[2, 3], &[4, 5]];
+ assert_eq!(v.rchunks(2).rev().collect::<Vec<_>>(), chunks);
+}
+
+#[test]
+#[should_panic]
+fn test_rchunksator_0() {
+ let v = &[1, 2, 3, 4];
+ let _it = v.rchunks(0);
+}
+
+#[test]
+fn test_rchunks_exactator() {
+ let v = &[1, 2, 3, 4, 5];
+
+ assert_eq!(v.rchunks_exact(2).len(), 2);
+
+ let chunks: &[&[_]] = &[&[4, 5], &[2, 3]];
+ assert_eq!(v.rchunks_exact(2).collect::<Vec<_>>(), chunks);
+ let chunks: &[&[_]] = &[&[3, 4, 5]];
+ assert_eq!(v.rchunks_exact(3).collect::<Vec<_>>(), chunks);
+ let chunks: &[&[_]] = &[];
+ assert_eq!(v.rchunks_exact(6).collect::<Vec<_>>(), chunks);
+
+ let chunks: &[&[_]] = &[&[2, 3], &[4, 5]];
+ assert_eq!(v.rchunks_exact(2).rev().collect::<Vec<_>>(), chunks);
+}
+
+#[test]
+#[should_panic]
+fn test_rchunks_exactator_0() {
+ let v = &[1, 2, 3, 4];
+ let _it = v.rchunks_exact(0);
+}
+
+#[test]
+fn test_reverse_part() {
+ let mut values = [1, 2, 3, 4, 5];
+ values[1..4].reverse();
+ assert!(values == [1, 4, 3, 2, 5]);
+}
+
+#[test]
+fn test_show() {
+ macro_rules! test_show_vec {
+ ($x:expr, $x_str:expr) => {{
+ let (x, x_str) = ($x, $x_str);
+ assert_eq!(format!("{x:?}"), x_str);
+ assert_eq!(format!("{x:?}"), x_str);
+ }};
+ }
+ let empty = Vec::<i32>::new();
+ test_show_vec!(empty, "[]");
+ test_show_vec!(vec![1], "[1]");
+ test_show_vec!(vec![1, 2, 3], "[1, 2, 3]");
+ test_show_vec!(vec![vec![], vec![1], vec![1, 1]], "[[], [1], [1, 1]]");
+
+ let empty_mut: &mut [i32] = &mut [];
+ test_show_vec!(empty_mut, "[]");
+ let v = &mut [1];
+ test_show_vec!(v, "[1]");
+ let v = &mut [1, 2, 3];
+ test_show_vec!(v, "[1, 2, 3]");
+ let v: &mut [&mut [_]] = &mut [&mut [], &mut [1], &mut [1, 1]];
+ test_show_vec!(v, "[[], [1], [1, 1]]");
+}
+
+#[test]
+fn test_vec_default() {
+ macro_rules! t {
+ ($ty:ty) => {{
+ let v: $ty = Default::default();
+ assert!(v.is_empty());
+ }};
+ }
+
+ t!(&[i32]);
+ t!(Vec<i32>);
+}
+
+#[test]
+#[should_panic]
+fn test_overflow_does_not_cause_segfault() {
+ let mut v = vec![];
+ v.reserve_exact(!0);
+ v.push(1);
+ v.push(2);
+}
+
+#[test]
+#[should_panic]
+fn test_overflow_does_not_cause_segfault_managed() {
+ let mut v = vec![Rc::new(1)];
+ v.reserve_exact(!0);
+ v.push(Rc::new(2));
+}
+
+#[test]
+fn test_mut_split_at() {
+ let mut values = [1, 2, 3, 4, 5];
+ {
+ let (left, right) = values.split_at_mut(2);
+ {
+ let left: &[_] = left;
+ assert!(left[..left.len()] == [1, 2]);
+ }
+ for p in left {
+ *p += 1;
+ }
+
+ {
+ let right: &[_] = right;
+ assert!(right[..right.len()] == [3, 4, 5]);
+ }
+ for p in right {
+ *p += 2;
+ }
+ }
+
+ assert!(values == [2, 3, 5, 6, 7]);
+}
+
+#[derive(Clone, PartialEq)]
+struct Foo;
+
+#[test]
+fn test_iter_zero_sized() {
+ let mut v = vec![Foo, Foo, Foo];
+ assert_eq!(v.len(), 3);
+ let mut cnt = 0;
+
+ for f in &v {
+ assert!(*f == Foo);
+ cnt += 1;
+ }
+ assert_eq!(cnt, 3);
+
+ for f in &v[1..3] {
+ assert!(*f == Foo);
+ cnt += 1;
+ }
+ assert_eq!(cnt, 5);
+
+ for f in &mut v {
+ assert!(*f == Foo);
+ cnt += 1;
+ }
+ assert_eq!(cnt, 8);
+
+ for f in v {
+ assert!(f == Foo);
+ cnt += 1;
+ }
+ assert_eq!(cnt, 11);
+
+ let xs: [Foo; 3] = [Foo, Foo, Foo];
+ cnt = 0;
+ for f in &xs {
+ assert!(*f == Foo);
+ cnt += 1;
+ }
+ assert!(cnt == 3);
+}
+
+#[test]
+fn test_shrink_to_fit() {
+ let mut xs = vec![0, 1, 2, 3];
+ for i in 4..100 {
+ xs.push(i)
+ }
+ assert_eq!(xs.capacity(), 128);
+ xs.shrink_to_fit();
+ assert_eq!(xs.capacity(), 100);
+ assert_eq!(xs, (0..100).collect::<Vec<_>>());
+}
+
+#[test]
+fn test_starts_with() {
+ assert!(b"foobar".starts_with(b"foo"));
+ assert!(!b"foobar".starts_with(b"oob"));
+ assert!(!b"foobar".starts_with(b"bar"));
+ assert!(!b"foo".starts_with(b"foobar"));
+ assert!(!b"bar".starts_with(b"foobar"));
+ assert!(b"foobar".starts_with(b"foobar"));
+ let empty: &[u8] = &[];
+ assert!(empty.starts_with(empty));
+ assert!(!empty.starts_with(b"foo"));
+ assert!(b"foobar".starts_with(empty));
+}
+
+#[test]
+fn test_ends_with() {
+ assert!(b"foobar".ends_with(b"bar"));
+ assert!(!b"foobar".ends_with(b"oba"));
+ assert!(!b"foobar".ends_with(b"foo"));
+ assert!(!b"foo".ends_with(b"foobar"));
+ assert!(!b"bar".ends_with(b"foobar"));
+ assert!(b"foobar".ends_with(b"foobar"));
+ let empty: &[u8] = &[];
+ assert!(empty.ends_with(empty));
+ assert!(!empty.ends_with(b"foo"));
+ assert!(b"foobar".ends_with(empty));
+}
+
+#[test]
+fn test_mut_splitator() {
+ let mut xs = [0, 1, 0, 2, 3, 0, 0, 4, 5, 0];
+ assert_eq!(xs.split_mut(|x| *x == 0).count(), 6);
+ for slice in xs.split_mut(|x| *x == 0) {
+ slice.reverse();
+ }
+ assert!(xs == [0, 1, 0, 3, 2, 0, 0, 5, 4, 0]);
+
+ let mut xs = [0, 1, 0, 2, 3, 0, 0, 4, 5, 0, 6, 7];
+ for slice in xs.split_mut(|x| *x == 0).take(5) {
+ slice.reverse();
+ }
+ assert!(xs == [0, 1, 0, 3, 2, 0, 0, 5, 4, 0, 6, 7]);
+}
+
+#[test]
+fn test_mut_splitator_rev() {
+ let mut xs = [1, 2, 0, 3, 4, 0, 0, 5, 6, 0];
+ for slice in xs.split_mut(|x| *x == 0).rev().take(4) {
+ slice.reverse();
+ }
+ assert!(xs == [1, 2, 0, 4, 3, 0, 0, 6, 5, 0]);
+}
+
+#[test]
+fn test_get_mut() {
+ let mut v = [0, 1, 2];
+ assert_eq!(v.get_mut(3), None);
+ v.get_mut(1).map(|e| *e = 7);
+ assert_eq!(v[1], 7);
+ let mut x = 2;
+ assert_eq!(v.get_mut(2), Some(&mut x));
+}
+
+#[test]
+fn test_mut_chunks() {
+ let mut v = [0, 1, 2, 3, 4, 5, 6];
+ assert_eq!(v.chunks_mut(3).len(), 3);
+ for (i, chunk) in v.chunks_mut(3).enumerate() {
+ for x in chunk {
+ *x = i as u8;
+ }
+ }
+ let result = [0, 0, 0, 1, 1, 1, 2];
+ assert_eq!(v, result);
+}
+
+#[test]
+fn test_mut_chunks_rev() {
+ let mut v = [0, 1, 2, 3, 4, 5, 6];
+ for (i, chunk) in v.chunks_mut(3).rev().enumerate() {
+ for x in chunk {
+ *x = i as u8;
+ }
+ }
+ let result = [2, 2, 2, 1, 1, 1, 0];
+ assert_eq!(v, result);
+}
+
+#[test]
+#[should_panic]
+fn test_mut_chunks_0() {
+ let mut v = [1, 2, 3, 4];
+ let _it = v.chunks_mut(0);
+}
+
+#[test]
+fn test_mut_chunks_exact() {
+ let mut v = [0, 1, 2, 3, 4, 5, 6];
+ assert_eq!(v.chunks_exact_mut(3).len(), 2);
+ for (i, chunk) in v.chunks_exact_mut(3).enumerate() {
+ for x in chunk {
+ *x = i as u8;
+ }
+ }
+ let result = [0, 0, 0, 1, 1, 1, 6];
+ assert_eq!(v, result);
+}
+
+#[test]
+fn test_mut_chunks_exact_rev() {
+ let mut v = [0, 1, 2, 3, 4, 5, 6];
+ for (i, chunk) in v.chunks_exact_mut(3).rev().enumerate() {
+ for x in chunk {
+ *x = i as u8;
+ }
+ }
+ let result = [1, 1, 1, 0, 0, 0, 6];
+ assert_eq!(v, result);
+}
+
+#[test]
+#[should_panic]
+fn test_mut_chunks_exact_0() {
+ let mut v = [1, 2, 3, 4];
+ let _it = v.chunks_exact_mut(0);
+}
+
+#[test]
+fn test_mut_rchunks() {
+ let mut v = [0, 1, 2, 3, 4, 5, 6];
+ assert_eq!(v.rchunks_mut(3).len(), 3);
+ for (i, chunk) in v.rchunks_mut(3).enumerate() {
+ for x in chunk {
+ *x = i as u8;
+ }
+ }
+ let result = [2, 1, 1, 1, 0, 0, 0];
+ assert_eq!(v, result);
+}
+
+#[test]
+fn test_mut_rchunks_rev() {
+ let mut v = [0, 1, 2, 3, 4, 5, 6];
+ for (i, chunk) in v.rchunks_mut(3).rev().enumerate() {
+ for x in chunk {
+ *x = i as u8;
+ }
+ }
+ let result = [0, 1, 1, 1, 2, 2, 2];
+ assert_eq!(v, result);
+}
+
+#[test]
+#[should_panic]
+fn test_mut_rchunks_0() {
+ let mut v = [1, 2, 3, 4];
+ let _it = v.rchunks_mut(0);
+}
+
+#[test]
+fn test_mut_rchunks_exact() {
+ let mut v = [0, 1, 2, 3, 4, 5, 6];
+ assert_eq!(v.rchunks_exact_mut(3).len(), 2);
+ for (i, chunk) in v.rchunks_exact_mut(3).enumerate() {
+ for x in chunk {
+ *x = i as u8;
+ }
+ }
+ let result = [0, 1, 1, 1, 0, 0, 0];
+ assert_eq!(v, result);
+}
+
+#[test]
+fn test_mut_rchunks_exact_rev() {
+ let mut v = [0, 1, 2, 3, 4, 5, 6];
+ for (i, chunk) in v.rchunks_exact_mut(3).rev().enumerate() {
+ for x in chunk {
+ *x = i as u8;
+ }
+ }
+ let result = [0, 0, 0, 0, 1, 1, 1];
+ assert_eq!(v, result);
+}
+
+#[test]
+#[should_panic]
+fn test_mut_rchunks_exact_0() {
+ let mut v = [1, 2, 3, 4];
+ let _it = v.rchunks_exact_mut(0);
+}
+
+#[test]
+fn test_mut_last() {
+ let mut x = [1, 2, 3, 4, 5];
+ let h = x.last_mut();
+ assert_eq!(*h.unwrap(), 5);
+
+ let y: &mut [i32] = &mut [];
+ assert!(y.last_mut().is_none());
+}
+
+#[test]
+fn test_to_vec() {
+ let xs: Box<_> = Box::new([1, 2, 3]);
+ let ys = xs.to_vec();
+ assert_eq!(ys, [1, 2, 3]);
+}
+
+#[test]
+fn test_in_place_iterator_specialization() {
+ let src: Box<[usize]> = Box::new([1, 2, 3]);
+ let src_ptr = src.as_ptr();
+ let sink: Box<_> = src.into_vec().into_iter().map(std::convert::identity).collect();
+ let sink_ptr = sink.as_ptr();
+ assert_eq!(src_ptr, sink_ptr);
+}
+
+#[test]
+fn test_box_slice_clone() {
+ let data = vec![vec![0, 1], vec![0], vec![1]];
+ let data2 = data.clone().into_boxed_slice().clone().to_vec();
+
+ assert_eq!(data, data2);
+}
+
+#[test]
+#[allow(unused_must_use)] // here, we care about the side effects of `.clone()`
+#[cfg_attr(target_os = "emscripten", ignore)]
+fn test_box_slice_clone_panics() {
+ use std::sync::atomic::{AtomicUsize, Ordering};
+ use std::sync::Arc;
+
+ struct Canary {
+ count: Arc<AtomicUsize>,
+ panics: bool,
+ }
+
+ impl Drop for Canary {
+ fn drop(&mut self) {
+ self.count.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ impl Clone for Canary {
+ fn clone(&self) -> Self {
+ if self.panics {
+ panic!()
+ }
+
+ Canary { count: self.count.clone(), panics: self.panics }
+ }
+ }
+
+ let drop_count = Arc::new(AtomicUsize::new(0));
+ let canary = Canary { count: drop_count.clone(), panics: false };
+ let panic = Canary { count: drop_count.clone(), panics: true };
+
+ std::panic::catch_unwind(move || {
+ // When xs is dropped, +5.
+ let xs =
+ vec![canary.clone(), canary.clone(), canary.clone(), panic, canary].into_boxed_slice();
+
+ // When panic is cloned, +3.
+ xs.clone();
+ })
+ .unwrap_err();
+
+ // Total = 8
+ assert_eq!(drop_count.load(Ordering::SeqCst), 8);
+}
+
+#[test]
+fn test_copy_from_slice() {
+ let src = [0, 1, 2, 3, 4, 5];
+ let mut dst = [0; 6];
+ dst.copy_from_slice(&src);
+ assert_eq!(src, dst)
+}
+
+#[test]
+#[should_panic(expected = "source slice length (4) does not match destination slice length (5)")]
+fn test_copy_from_slice_dst_longer() {
+ let src = [0, 1, 2, 3];
+ let mut dst = [0; 5];
+ dst.copy_from_slice(&src);
+}
+
+#[test]
+#[should_panic(expected = "source slice length (4) does not match destination slice length (3)")]
+fn test_copy_from_slice_dst_shorter() {
+ let src = [0, 1, 2, 3];
+ let mut dst = [0; 3];
+ dst.copy_from_slice(&src);
+}
+
+const MAX_LEN: usize = 80;
+
+static DROP_COUNTS: [AtomicUsize; MAX_LEN] = [
+ // FIXME(RFC 1109): AtomicUsize is not Copy.
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+ AtomicUsize::new(0),
+];
+
+static VERSIONS: AtomicUsize = AtomicUsize::new(0);
+
+#[derive(Clone, Eq)]
+struct DropCounter {
+ x: u32,
+ id: usize,
+ version: Cell<usize>,
+}
+
+impl PartialEq for DropCounter {
+ fn eq(&self, other: &Self) -> bool {
+ self.partial_cmp(other) == Some(Ordering::Equal)
+ }
+}
+
+impl PartialOrd for DropCounter {
+ fn partial_cmp(&self, other: &Self) -> Option<Ordering> {
+ self.version.set(self.version.get() + 1);
+ other.version.set(other.version.get() + 1);
+ VERSIONS.fetch_add(2, Relaxed);
+ self.x.partial_cmp(&other.x)
+ }
+}
+
+impl Ord for DropCounter {
+ fn cmp(&self, other: &Self) -> Ordering {
+ self.partial_cmp(other).unwrap()
+ }
+}
+
+impl Drop for DropCounter {
+ fn drop(&mut self) {
+ DROP_COUNTS[self.id].fetch_add(1, Relaxed);
+ VERSIONS.fetch_sub(self.version.get(), Relaxed);
+ }
+}
+
+macro_rules! test {
+ ($input:ident, $func:ident) => {
+ let len = $input.len();
+
+ // Work out the total number of comparisons required to sort
+ // this array...
+ let mut count = 0usize;
+ $input.to_owned().$func(|a, b| {
+ count += 1;
+ a.cmp(b)
+ });
+
+ // ... and then panic on each and every single one.
+ for panic_countdown in 0..count {
+ // Refresh the counters.
+ VERSIONS.store(0, Relaxed);
+ for i in 0..len {
+ DROP_COUNTS[i].store(0, Relaxed);
+ }
+
+ let v = $input.to_owned();
+ let _ = std::panic::catch_unwind(move || {
+ let mut v = v;
+ let mut panic_countdown = panic_countdown;
+ v.$func(|a, b| {
+ if panic_countdown == 0 {
+ SILENCE_PANIC.with(|s| s.set(true));
+ panic!();
+ }
+ panic_countdown -= 1;
+ a.cmp(b)
+ })
+ });
+
+ // Check that the number of things dropped is exactly
+ // what we expect (i.e., the contents of `v`).
+ for (i, c) in DROP_COUNTS.iter().enumerate().take(len) {
+ let count = c.load(Relaxed);
+ assert!(count == 1, "found drop count == {} for i == {}, len == {}", count, i, len);
+ }
+
+ // Check that the most recent versions of values were dropped.
+ assert_eq!(VERSIONS.load(Relaxed), 0);
+ }
+ };
+}
+
+thread_local!(static SILENCE_PANIC: Cell<bool> = Cell::new(false));
+
+#[test]
+#[cfg_attr(target_os = "emscripten", ignore)] // no threads
+fn panic_safe() {
+ panic::update_hook(move |prev, info| {
+ if !SILENCE_PANIC.with(|s| s.get()) {
+ prev(info);
+ }
+ });
+
+ let mut rng = thread_rng();
+
+ // Miri is too slow (but still need to `chain` to make the types match)
+ let lens = if cfg!(miri) { (1..10).chain(0..0) } else { (1..20).chain(70..MAX_LEN) };
+ let moduli: &[u32] = if cfg!(miri) { &[5] } else { &[5, 20, 50] };
+
+ for len in lens {
+ for &modulus in moduli {
+ for &has_runs in &[false, true] {
+ let mut input = (0..len)
+ .map(|id| DropCounter {
+ x: rng.next_u32() % modulus,
+ id: id,
+ version: Cell::new(0),
+ })
+ .collect::<Vec<_>>();
+
+ if has_runs {
+ for c in &mut input {
+ c.x = c.id as u32;
+ }
+
+ for _ in 0..5 {
+ let a = rng.gen::<usize>() % len;
+ let b = rng.gen::<usize>() % len;
+ if a < b {
+ input[a..b].reverse();
+ } else {
+ input.swap(a, b);
+ }
+ }
+ }
+
+ test!(input, sort_by);
+ test!(input, sort_unstable_by);
+ }
+ }
+ }
+
+ // Set default panic hook again.
+ drop(panic::take_hook());
+}
+
+#[test]
+fn repeat_generic_slice() {
+ assert_eq!([1, 2].repeat(2), vec![1, 2, 1, 2]);
+ assert_eq!([1, 2, 3, 4].repeat(0), vec![]);
+ assert_eq!([1, 2, 3, 4].repeat(1), vec![1, 2, 3, 4]);
+ assert_eq!([1, 2, 3, 4].repeat(3), vec![1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]);
+}
+
+#[test]
+#[allow(unreachable_patterns)]
+fn subslice_patterns() {
+ // This test comprehensively checks the passing static and dynamic semantics
+ // of subslice patterns `..`, `x @ ..`, `ref x @ ..`, and `ref mut @ ..`
+ // in slice patterns `[$($pat), $(,)?]` .
+
+ #[derive(PartialEq, Debug, Clone)]
+ struct N(u8);
+
+ macro_rules! n {
+ ($($e:expr),* $(,)?) => {
+ [$(N($e)),*]
+ }
+ }
+
+ macro_rules! c {
+ ($inp:expr, $typ:ty, $out:expr $(,)?) => {
+ assert_eq!($out, identity::<$typ>($inp))
+ };
+ }
+
+ macro_rules! m {
+ ($e:expr, $p:pat => $b:expr) => {
+ match $e {
+ $p => $b,
+ _ => panic!(),
+ }
+ };
+ }
+
+ // == Slices ==
+
+ // Matching slices using `ref` patterns:
+ let mut v = vec![N(0), N(1), N(2), N(3), N(4)];
+ let mut vc = (0..=4).collect::<Vec<u8>>();
+
+ let [..] = v[..]; // Always matches.
+ m!(v[..], [N(0), ref sub @ .., N(4)] => c!(sub, &[N], n![1, 2, 3]));
+ m!(v[..], [N(0), ref sub @ ..] => c!(sub, &[N], n![1, 2, 3, 4]));
+ m!(v[..], [ref sub @ .., N(4)] => c!(sub, &[N], n![0, 1, 2, 3]));
+ m!(v[..], [ref sub @ .., _, _, _, _, _] => c!(sub, &[N], &n![] as &[N]));
+ m!(v[..], [_, _, _, _, _, ref sub @ ..] => c!(sub, &[N], &n![] as &[N]));
+ m!(vc[..], [x, .., y] => c!((x, y), (u8, u8), (0, 4)));
+
+ // Matching slices using `ref mut` patterns:
+ let [..] = v[..]; // Always matches.
+ m!(v[..], [N(0), ref mut sub @ .., N(4)] => c!(sub, &mut [N], n![1, 2, 3]));
+ m!(v[..], [N(0), ref mut sub @ ..] => c!(sub, &mut [N], n![1, 2, 3, 4]));
+ m!(v[..], [ref mut sub @ .., N(4)] => c!(sub, &mut [N], n![0, 1, 2, 3]));
+ m!(v[..], [ref mut sub @ .., _, _, _, _, _] => c!(sub, &mut [N], &mut n![] as &mut [N]));
+ m!(v[..], [_, _, _, _, _, ref mut sub @ ..] => c!(sub, &mut [N], &mut n![] as &mut [N]));
+ m!(vc[..], [x, .., y] => c!((x, y), (u8, u8), (0, 4)));
+
+ // Matching slices using default binding modes (&):
+ let [..] = &v[..]; // Always matches.
+ m!(&v[..], [N(0), sub @ .., N(4)] => c!(sub, &[N], n![1, 2, 3]));
+ m!(&v[..], [N(0), sub @ ..] => c!(sub, &[N], n![1, 2, 3, 4]));
+ m!(&v[..], [sub @ .., N(4)] => c!(sub, &[N], n![0, 1, 2, 3]));
+ m!(&v[..], [sub @ .., _, _, _, _, _] => c!(sub, &[N], &n![] as &[N]));
+ m!(&v[..], [_, _, _, _, _, sub @ ..] => c!(sub, &[N], &n![] as &[N]));
+ m!(&vc[..], [x, .., y] => c!((x, y), (&u8, &u8), (&0, &4)));
+
+ // Matching slices using default binding modes (&mut):
+ let [..] = &mut v[..]; // Always matches.
+ m!(&mut v[..], [N(0), sub @ .., N(4)] => c!(sub, &mut [N], n![1, 2, 3]));
+ m!(&mut v[..], [N(0), sub @ ..] => c!(sub, &mut [N], n![1, 2, 3, 4]));
+ m!(&mut v[..], [sub @ .., N(4)] => c!(sub, &mut [N], n![0, 1, 2, 3]));
+ m!(&mut v[..], [sub @ .., _, _, _, _, _] => c!(sub, &mut [N], &mut n![] as &mut [N]));
+ m!(&mut v[..], [_, _, _, _, _, sub @ ..] => c!(sub, &mut [N], &mut n![] as &mut [N]));
+ m!(&mut vc[..], [x, .., y] => c!((x, y), (&mut u8, &mut u8), (&mut 0, &mut 4)));
+
+ // == Arrays ==
+ let mut v = n![0, 1, 2, 3, 4];
+ let vc = [0, 1, 2, 3, 4];
+
+ // Matching arrays by value:
+ m!(v.clone(), [N(0), sub @ .., N(4)] => c!(sub, [N; 3], n![1, 2, 3]));
+ m!(v.clone(), [N(0), sub @ ..] => c!(sub, [N; 4], n![1, 2, 3, 4]));
+ m!(v.clone(), [sub @ .., N(4)] => c!(sub, [N; 4], n![0, 1, 2, 3]));
+ m!(v.clone(), [sub @ .., _, _, _, _, _] => c!(sub, [N; 0], n![] as [N; 0]));
+ m!(v.clone(), [_, _, _, _, _, sub @ ..] => c!(sub, [N; 0], n![] as [N; 0]));
+ m!(v.clone(), [x, .., y] => c!((x, y), (N, N), (N(0), N(4))));
+ m!(v.clone(), [..] => ());
+
+ // Matching arrays by ref patterns:
+ m!(v, [N(0), ref sub @ .., N(4)] => c!(sub, &[N; 3], &n![1, 2, 3]));
+ m!(v, [N(0), ref sub @ ..] => c!(sub, &[N; 4], &n![1, 2, 3, 4]));
+ m!(v, [ref sub @ .., N(4)] => c!(sub, &[N; 4], &n![0, 1, 2, 3]));
+ m!(v, [ref sub @ .., _, _, _, _, _] => c!(sub, &[N; 0], &n![] as &[N; 0]));
+ m!(v, [_, _, _, _, _, ref sub @ ..] => c!(sub, &[N; 0], &n![] as &[N; 0]));
+ m!(vc, [x, .., y] => c!((x, y), (u8, u8), (0, 4)));
+
+ // Matching arrays by ref mut patterns:
+ m!(v, [N(0), ref mut sub @ .., N(4)] => c!(sub, &mut [N; 3], &mut n![1, 2, 3]));
+ m!(v, [N(0), ref mut sub @ ..] => c!(sub, &mut [N; 4], &mut n![1, 2, 3, 4]));
+ m!(v, [ref mut sub @ .., N(4)] => c!(sub, &mut [N; 4], &mut n![0, 1, 2, 3]));
+ m!(v, [ref mut sub @ .., _, _, _, _, _] => c!(sub, &mut [N; 0], &mut n![] as &mut [N; 0]));
+ m!(v, [_, _, _, _, _, ref mut sub @ ..] => c!(sub, &mut [N; 0], &mut n![] as &mut [N; 0]));
+
+ // Matching arrays by default binding modes (&):
+ m!(&v, [N(0), sub @ .., N(4)] => c!(sub, &[N; 3], &n![1, 2, 3]));
+ m!(&v, [N(0), sub @ ..] => c!(sub, &[N; 4], &n![1, 2, 3, 4]));
+ m!(&v, [sub @ .., N(4)] => c!(sub, &[N; 4], &n![0, 1, 2, 3]));
+ m!(&v, [sub @ .., _, _, _, _, _] => c!(sub, &[N; 0], &n![] as &[N; 0]));
+ m!(&v, [_, _, _, _, _, sub @ ..] => c!(sub, &[N; 0], &n![] as &[N; 0]));
+ m!(&v, [..] => ());
+ m!(&v, [x, .., y] => c!((x, y), (&N, &N), (&N(0), &N(4))));
+
+ // Matching arrays by default binding modes (&mut):
+ m!(&mut v, [N(0), sub @ .., N(4)] => c!(sub, &mut [N; 3], &mut n![1, 2, 3]));
+ m!(&mut v, [N(0), sub @ ..] => c!(sub, &mut [N; 4], &mut n![1, 2, 3, 4]));
+ m!(&mut v, [sub @ .., N(4)] => c!(sub, &mut [N; 4], &mut n![0, 1, 2, 3]));
+ m!(&mut v, [sub @ .., _, _, _, _, _] => c!(sub, &mut [N; 0], &mut n![] as &[N; 0]));
+ m!(&mut v, [_, _, _, _, _, sub @ ..] => c!(sub, &mut [N; 0], &mut n![] as &[N; 0]));
+ m!(&mut v, [..] => ());
+ m!(&mut v, [x, .., y] => c!((x, y), (&mut N, &mut N), (&mut N(0), &mut N(4))));
+}
+
+#[test]
+fn test_group_by() {
+ let slice = &[1, 1, 1, 3, 3, 2, 2, 2, 1, 0];
+
+ let mut iter = slice.group_by(|a, b| a == b);
+ assert_eq!(iter.next(), Some(&[1, 1, 1][..]));
+ assert_eq!(iter.next(), Some(&[3, 3][..]));
+ assert_eq!(iter.next(), Some(&[2, 2, 2][..]));
+ assert_eq!(iter.next(), Some(&[1][..]));
+ assert_eq!(iter.next(), Some(&[0][..]));
+ assert_eq!(iter.next(), None);
+
+ let mut iter = slice.group_by(|a, b| a == b);
+ assert_eq!(iter.next_back(), Some(&[0][..]));
+ assert_eq!(iter.next_back(), Some(&[1][..]));
+ assert_eq!(iter.next_back(), Some(&[2, 2, 2][..]));
+ assert_eq!(iter.next_back(), Some(&[3, 3][..]));
+ assert_eq!(iter.next_back(), Some(&[1, 1, 1][..]));
+ assert_eq!(iter.next_back(), None);
+
+ let mut iter = slice.group_by(|a, b| a == b);
+ assert_eq!(iter.next(), Some(&[1, 1, 1][..]));
+ assert_eq!(iter.next_back(), Some(&[0][..]));
+ assert_eq!(iter.next(), Some(&[3, 3][..]));
+ assert_eq!(iter.next_back(), Some(&[1][..]));
+ assert_eq!(iter.next(), Some(&[2, 2, 2][..]));
+ assert_eq!(iter.next_back(), None);
+}
+
+#[test]
+fn test_group_by_mut() {
+ let slice = &mut [1, 1, 1, 3, 3, 2, 2, 2, 1, 0];
+
+ let mut iter = slice.group_by_mut(|a, b| a == b);
+ assert_eq!(iter.next(), Some(&mut [1, 1, 1][..]));
+ assert_eq!(iter.next(), Some(&mut [3, 3][..]));
+ assert_eq!(iter.next(), Some(&mut [2, 2, 2][..]));
+ assert_eq!(iter.next(), Some(&mut [1][..]));
+ assert_eq!(iter.next(), Some(&mut [0][..]));
+ assert_eq!(iter.next(), None);
+
+ let mut iter = slice.group_by_mut(|a, b| a == b);
+ assert_eq!(iter.next_back(), Some(&mut [0][..]));
+ assert_eq!(iter.next_back(), Some(&mut [1][..]));
+ assert_eq!(iter.next_back(), Some(&mut [2, 2, 2][..]));
+ assert_eq!(iter.next_back(), Some(&mut [3, 3][..]));
+ assert_eq!(iter.next_back(), Some(&mut [1, 1, 1][..]));
+ assert_eq!(iter.next_back(), None);
+
+ let mut iter = slice.group_by_mut(|a, b| a == b);
+ assert_eq!(iter.next(), Some(&mut [1, 1, 1][..]));
+ assert_eq!(iter.next_back(), Some(&mut [0][..]));
+ assert_eq!(iter.next(), Some(&mut [3, 3][..]));
+ assert_eq!(iter.next_back(), Some(&mut [1][..]));
+ assert_eq!(iter.next(), Some(&mut [2, 2, 2][..]));
+ assert_eq!(iter.next_back(), None);
+}
diff --git a/library/alloc/tests/str.rs b/library/alloc/tests/str.rs
new file mode 100644
index 000000000..7379569dd
--- /dev/null
+++ b/library/alloc/tests/str.rs
@@ -0,0 +1,2383 @@
+use std::assert_matches::assert_matches;
+use std::borrow::Cow;
+use std::cmp::Ordering::{Equal, Greater, Less};
+use std::str::{from_utf8, from_utf8_unchecked};
+
+#[test]
+fn test_le() {
+ assert!("" <= "");
+ assert!("" <= "foo");
+ assert!("foo" <= "foo");
+ assert_ne!("foo", "bar");
+}
+
+#[test]
+fn test_find() {
+ assert_eq!("hello".find('l'), Some(2));
+ assert_eq!("hello".find(|c: char| c == 'o'), Some(4));
+ assert!("hello".find('x').is_none());
+ assert!("hello".find(|c: char| c == 'x').is_none());
+ assert_eq!("ประเทศไทย中华Việt Nam".find('华'), Some(30));
+ assert_eq!("ประเทศไทย中华Việt Nam".find(|c: char| c == '华'), Some(30));
+}
+
+#[test]
+fn test_rfind() {
+ assert_eq!("hello".rfind('l'), Some(3));
+ assert_eq!("hello".rfind(|c: char| c == 'o'), Some(4));
+ assert!("hello".rfind('x').is_none());
+ assert!("hello".rfind(|c: char| c == 'x').is_none());
+ assert_eq!("ประเทศไทย中华Việt Nam".rfind('华'), Some(30));
+ assert_eq!("ประเทศไทย中华Việt Nam".rfind(|c: char| c == '华'), Some(30));
+}
+
+#[test]
+fn test_collect() {
+ let empty = "";
+ let s: String = empty.chars().collect();
+ assert_eq!(empty, s);
+ let data = "ประเทศไทย中";
+ let s: String = data.chars().collect();
+ assert_eq!(data, s);
+}
+
+#[test]
+fn test_into_bytes() {
+ let data = String::from("asdf");
+ let buf = data.into_bytes();
+ assert_eq!(buf, b"asdf");
+}
+
+#[test]
+fn test_find_str() {
+ // byte positions
+ assert_eq!("".find(""), Some(0));
+ assert!("banana".find("apple pie").is_none());
+
+ let data = "abcabc";
+ assert_eq!(data[0..6].find("ab"), Some(0));
+ assert_eq!(data[2..6].find("ab"), Some(3 - 2));
+ assert!(data[2..4].find("ab").is_none());
+
+ let string = "ประเทศไทย中华Việt Nam";
+ let mut data = String::from(string);
+ data.push_str(string);
+ assert!(data.find("ไท华").is_none());
+ assert_eq!(data[0..43].find(""), Some(0));
+ assert_eq!(data[6..43].find(""), Some(6 - 6));
+
+ assert_eq!(data[0..43].find("ประ"), Some(0));
+ assert_eq!(data[0..43].find("ทศไ"), Some(12));
+ assert_eq!(data[0..43].find("ย中"), Some(24));
+ assert_eq!(data[0..43].find("iệt"), Some(34));
+ assert_eq!(data[0..43].find("Nam"), Some(40));
+
+ assert_eq!(data[43..86].find("ประ"), Some(43 - 43));
+ assert_eq!(data[43..86].find("ทศไ"), Some(55 - 43));
+ assert_eq!(data[43..86].find("ย中"), Some(67 - 43));
+ assert_eq!(data[43..86].find("iệt"), Some(77 - 43));
+ assert_eq!(data[43..86].find("Nam"), Some(83 - 43));
+
+ // find every substring -- assert that it finds it, or an earlier occurrence.
+ let string = "Việt Namacbaabcaabaaba";
+ for (i, ci) in string.char_indices() {
+ let ip = i + ci.len_utf8();
+ for j in string[ip..].char_indices().map(|(i, _)| i).chain(Some(string.len() - ip)) {
+ let pat = &string[i..ip + j];
+ assert!(match string.find(pat) {
+ None => false,
+ Some(x) => x <= i,
+ });
+ assert!(match string.rfind(pat) {
+ None => false,
+ Some(x) => x >= i,
+ });
+ }
+ }
+}
+
+fn s(x: &str) -> String {
+ x.to_string()
+}
+
+macro_rules! test_concat {
+ ($expected: expr, $string: expr) => {{
+ let s: String = $string.concat();
+ assert_eq!($expected, s);
+ }};
+}
+
+#[test]
+fn test_concat_for_different_types() {
+ test_concat!("ab", vec![s("a"), s("b")]);
+ test_concat!("ab", vec!["a", "b"]);
+}
+
+#[test]
+fn test_concat_for_different_lengths() {
+ let empty: &[&str] = &[];
+ test_concat!("", empty);
+ test_concat!("a", ["a"]);
+ test_concat!("ab", ["a", "b"]);
+ test_concat!("abc", ["", "a", "bc"]);
+}
+
+macro_rules! test_join {
+ ($expected: expr, $string: expr, $delim: expr) => {{
+ let s = $string.join($delim);
+ assert_eq!($expected, s);
+ }};
+}
+
+#[test]
+fn test_join_for_different_types() {
+ test_join!("a-b", ["a", "b"], "-");
+ let hyphen = "-".to_string();
+ test_join!("a-b", [s("a"), s("b")], &*hyphen);
+ test_join!("a-b", vec!["a", "b"], &*hyphen);
+ test_join!("a-b", &*vec!["a", "b"], "-");
+ test_join!("a-b", vec![s("a"), s("b")], "-");
+}
+
+#[test]
+fn test_join_for_different_lengths() {
+ let empty: &[&str] = &[];
+ test_join!("", empty, "-");
+ test_join!("a", ["a"], "-");
+ test_join!("a-b", ["a", "b"], "-");
+ test_join!("-a-bc", ["", "a", "bc"], "-");
+}
+
+// join has fast paths for small separators up to 4 bytes
+// this tests the slow paths.
+#[test]
+fn test_join_for_different_lengths_with_long_separator() {
+ assert_eq!("~~~~~".len(), 15);
+
+ let empty: &[&str] = &[];
+ test_join!("", empty, "~~~~~");
+ test_join!("a", ["a"], "~~~~~");
+ test_join!("a~~~~~b", ["a", "b"], "~~~~~");
+ test_join!("~~~~~a~~~~~bc", ["", "a", "bc"], "~~~~~");
+}
+
+#[test]
+fn test_join_issue_80335() {
+ use core::{borrow::Borrow, cell::Cell};
+
+ struct WeirdBorrow {
+ state: Cell<bool>,
+ }
+
+ impl Default for WeirdBorrow {
+ fn default() -> Self {
+ WeirdBorrow { state: Cell::new(false) }
+ }
+ }
+
+ impl Borrow<str> for WeirdBorrow {
+ fn borrow(&self) -> &str {
+ let state = self.state.get();
+ if state {
+ "0"
+ } else {
+ self.state.set(true);
+ "123456"
+ }
+ }
+ }
+
+ let arr: [WeirdBorrow; 3] = Default::default();
+ test_join!("0-0-0", arr, "-");
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn test_unsafe_slice() {
+ assert_eq!("ab", unsafe { "abc".get_unchecked(0..2) });
+ assert_eq!("bc", unsafe { "abc".get_unchecked(1..3) });
+ assert_eq!("", unsafe { "abc".get_unchecked(1..1) });
+ fn a_million_letter_a() -> String {
+ let mut i = 0;
+ let mut rs = String::new();
+ while i < 100000 {
+ rs.push_str("aaaaaaaaaa");
+ i += 1;
+ }
+ rs
+ }
+ fn half_a_million_letter_a() -> String {
+ let mut i = 0;
+ let mut rs = String::new();
+ while i < 100000 {
+ rs.push_str("aaaaa");
+ i += 1;
+ }
+ rs
+ }
+ let letters = a_million_letter_a();
+ assert_eq!(half_a_million_letter_a(), unsafe { letters.get_unchecked(0..500000) });
+}
+
+#[test]
+fn test_starts_with() {
+ assert!("".starts_with(""));
+ assert!("abc".starts_with(""));
+ assert!("abc".starts_with("a"));
+ assert!(!"a".starts_with("abc"));
+ assert!(!"".starts_with("abc"));
+ assert!(!"ödd".starts_with("-"));
+ assert!("ödd".starts_with("öd"));
+}
+
+#[test]
+fn test_ends_with() {
+ assert!("".ends_with(""));
+ assert!("abc".ends_with(""));
+ assert!("abc".ends_with("c"));
+ assert!(!"a".ends_with("abc"));
+ assert!(!"".ends_with("abc"));
+ assert!(!"ddö".ends_with("-"));
+ assert!("ddö".ends_with("dö"));
+}
+
+#[test]
+fn test_is_empty() {
+ assert!("".is_empty());
+ assert!(!"a".is_empty());
+}
+
+#[test]
+fn test_replacen() {
+ assert_eq!("".replacen('a', "b", 5), "");
+ assert_eq!("acaaa".replacen("a", "b", 3), "bcbba");
+ assert_eq!("aaaa".replacen("a", "b", 0), "aaaa");
+
+ let test = "test";
+ assert_eq!(" test test ".replacen(test, "toast", 3), " toast toast ");
+ assert_eq!(" test test ".replacen(test, "toast", 0), " test test ");
+ assert_eq!(" test test ".replacen(test, "", 5), " ");
+
+ assert_eq!("qwer123zxc789".replacen(char::is_numeric, "", 3), "qwerzxc789");
+}
+
+#[test]
+fn test_replace() {
+ let a = "a";
+ assert_eq!("".replace(a, "b"), "");
+ assert_eq!("a".replace(a, "b"), "b");
+ assert_eq!("ab".replace(a, "b"), "bb");
+ let test = "test";
+ assert_eq!(" test test ".replace(test, "toast"), " toast toast ");
+ assert_eq!(" test test ".replace(test, ""), " ");
+}
+
+#[test]
+fn test_replace_2a() {
+ let data = "ประเทศไทย中华";
+ let repl = "دولة الكويت";
+
+ let a = "ประเ";
+ let a2 = "دولة الكويتทศไทย中华";
+ assert_eq!(data.replace(a, repl), a2);
+}
+
+#[test]
+fn test_replace_2b() {
+ let data = "ประเทศไทย中华";
+ let repl = "دولة الكويت";
+
+ let b = "ะเ";
+ let b2 = "ปรدولة الكويتทศไทย中华";
+ assert_eq!(data.replace(b, repl), b2);
+}
+
+#[test]
+fn test_replace_2c() {
+ let data = "ประเทศไทย中华";
+ let repl = "دولة الكويت";
+
+ let c = "中华";
+ let c2 = "ประเทศไทยدولة الكويت";
+ assert_eq!(data.replace(c, repl), c2);
+}
+
+#[test]
+fn test_replace_2d() {
+ let data = "ประเทศไทย中华";
+ let repl = "دولة الكويت";
+
+ let d = "ไท华";
+ assert_eq!(data.replace(d, repl), data);
+}
+
+#[test]
+fn test_replace_pattern() {
+ let data = "abcdαβγδabcdαβγδ";
+ assert_eq!(data.replace("dαβ", "😺😺😺"), "abc😺😺😺γδabc😺😺😺γδ");
+ assert_eq!(data.replace('γ', "😺😺😺"), "abcdαβ😺😺😺δabcdαβ😺😺😺δ");
+ assert_eq!(data.replace(&['a', 'γ'] as &[_], "😺😺😺"), "😺😺😺bcdαβ😺😺😺δ😺😺😺bcdαβ😺😺😺δ");
+ assert_eq!(data.replace(|c| c == 'γ', "😺😺😺"), "abcdαβ😺😺😺δabcdαβ😺😺😺δ");
+}
+
+// The current implementation of SliceIndex fails to handle methods
+// orthogonally from range types; therefore, it is worth testing
+// all of the indexing operations on each input.
+mod slice_index {
+ // Test a slicing operation **that should succeed,**
+ // testing it on all of the indexing methods.
+ //
+ // This is not suitable for testing failure on invalid inputs.
+ macro_rules! assert_range_eq {
+ ($s:expr, $range:expr, $expected:expr) => {
+ let mut s: String = $s.to_owned();
+ let mut expected: String = $expected.to_owned();
+ {
+ let s: &str = &s;
+ let expected: &str = &expected;
+
+ assert_eq!(&s[$range], expected, "(in assertion for: index)");
+ assert_eq!(s.get($range), Some(expected), "(in assertion for: get)");
+ unsafe {
+ assert_eq!(
+ s.get_unchecked($range),
+ expected,
+ "(in assertion for: get_unchecked)",
+ );
+ }
+ }
+ {
+ let s: &mut str = &mut s;
+ let expected: &mut str = &mut expected;
+
+ assert_eq!(&mut s[$range], expected, "(in assertion for: index_mut)",);
+ assert_eq!(
+ s.get_mut($range),
+ Some(&mut expected[..]),
+ "(in assertion for: get_mut)",
+ );
+ unsafe {
+ assert_eq!(
+ s.get_unchecked_mut($range),
+ expected,
+ "(in assertion for: get_unchecked_mut)",
+ );
+ }
+ }
+ };
+ }
+
+ // Make sure the macro can actually detect bugs,
+ // because if it can't, then what are we even doing here?
+ //
+ // (Be aware this only demonstrates the ability to detect bugs
+ // in the FIRST method that panics, as the macro is not designed
+ // to be used in `should_panic`)
+ #[test]
+ #[should_panic(expected = "out of bounds")]
+ fn assert_range_eq_can_fail_by_panic() {
+ assert_range_eq!("abc", 0..5, "abc");
+ }
+
+ // (Be aware this only demonstrates the ability to detect bugs
+ // in the FIRST method it calls, as the macro is not designed
+ // to be used in `should_panic`)
+ #[test]
+ #[should_panic(expected = "==")]
+ fn assert_range_eq_can_fail_by_inequality() {
+ assert_range_eq!("abc", 0..2, "abc");
+ }
+
+ // Generates test cases for bad index operations.
+ //
+ // This generates `should_panic` test cases for Index/IndexMut
+ // and `None` test cases for get/get_mut.
+ macro_rules! panic_cases {
+ ($(
+ in mod $case_name:ident {
+ data: $data:expr;
+
+ // optional:
+ //
+ // a similar input for which DATA[input] succeeds, and the corresponding
+ // output str. This helps validate "critical points" where an input range
+ // straddles the boundary between valid and invalid.
+ // (such as the input `len..len`, which is just barely valid)
+ $(
+ good: data[$good:expr] == $output:expr;
+ )*
+
+ bad: data[$bad:expr];
+ message: $expect_msg:expr; // must be a literal
+ }
+ )*) => {$(
+ mod $case_name {
+ #[test]
+ fn pass() {
+ let mut v: String = $data.into();
+
+ $( assert_range_eq!(v, $good, $output); )*
+
+ {
+ let v: &str = &v;
+ assert_eq!(v.get($bad), None, "(in None assertion for get)");
+ }
+
+ {
+ let v: &mut str = &mut v;
+ assert_eq!(v.get_mut($bad), None, "(in None assertion for get_mut)");
+ }
+ }
+
+ #[test]
+ #[should_panic(expected = $expect_msg)]
+ fn index_fail() {
+ let v: String = $data.into();
+ let v: &str = &v;
+ let _v = &v[$bad];
+ }
+
+ #[test]
+ #[should_panic(expected = $expect_msg)]
+ fn index_mut_fail() {
+ let mut v: String = $data.into();
+ let v: &mut str = &mut v;
+ let _v = &mut v[$bad];
+ }
+ }
+ )*};
+ }
+
+ #[test]
+ fn simple_ascii() {
+ assert_range_eq!("abc", .., "abc");
+
+ assert_range_eq!("abc", 0..2, "ab");
+ assert_range_eq!("abc", 0..=1, "ab");
+ assert_range_eq!("abc", ..2, "ab");
+ assert_range_eq!("abc", ..=1, "ab");
+
+ assert_range_eq!("abc", 1..3, "bc");
+ assert_range_eq!("abc", 1..=2, "bc");
+ assert_range_eq!("abc", 1..1, "");
+ assert_range_eq!("abc", 1..=0, "");
+ }
+
+ #[test]
+ fn simple_unicode() {
+ // 日本
+ assert_range_eq!("\u{65e5}\u{672c}", .., "\u{65e5}\u{672c}");
+
+ assert_range_eq!("\u{65e5}\u{672c}", 0..3, "\u{65e5}");
+ assert_range_eq!("\u{65e5}\u{672c}", 0..=2, "\u{65e5}");
+ assert_range_eq!("\u{65e5}\u{672c}", ..3, "\u{65e5}");
+ assert_range_eq!("\u{65e5}\u{672c}", ..=2, "\u{65e5}");
+
+ assert_range_eq!("\u{65e5}\u{672c}", 3..6, "\u{672c}");
+ assert_range_eq!("\u{65e5}\u{672c}", 3..=5, "\u{672c}");
+ assert_range_eq!("\u{65e5}\u{672c}", 3.., "\u{672c}");
+
+ let data = "ประเทศไทย中华";
+ assert_range_eq!(data, 0..3, "ป");
+ assert_range_eq!(data, 3..6, "ร");
+ assert_range_eq!(data, 3..3, "");
+ assert_range_eq!(data, 30..33, "华");
+
+ /*0: 中
+ 3: 华
+ 6: V
+ 7: i
+ 8: ệ
+ 11: t
+ 12:
+ 13: N
+ 14: a
+ 15: m */
+ let ss = "中华Việt Nam";
+ assert_range_eq!(ss, 3..6, "华");
+ assert_range_eq!(ss, 6..16, "Việt Nam");
+ assert_range_eq!(ss, 6..=15, "Việt Nam");
+ assert_range_eq!(ss, 6.., "Việt Nam");
+
+ assert_range_eq!(ss, 0..3, "中");
+ assert_range_eq!(ss, 3..7, "华V");
+ assert_range_eq!(ss, 3..=6, "华V");
+ assert_range_eq!(ss, 3..3, "");
+ assert_range_eq!(ss, 3..=2, "");
+ }
+
+ #[test]
+ #[cfg_attr(target_os = "emscripten", ignore)] // hits an OOM
+ #[cfg_attr(miri, ignore)] // Miri is too slow
+ fn simple_big() {
+ fn a_million_letter_x() -> String {
+ let mut i = 0;
+ let mut rs = String::new();
+ while i < 100000 {
+ rs.push_str("华华华华华华华华华华");
+ i += 1;
+ }
+ rs
+ }
+ fn half_a_million_letter_x() -> String {
+ let mut i = 0;
+ let mut rs = String::new();
+ while i < 100000 {
+ rs.push_str("华华华华华");
+ i += 1;
+ }
+ rs
+ }
+ let letters = a_million_letter_x();
+ assert_range_eq!(letters, 0..3 * 500000, half_a_million_letter_x());
+ }
+
+ #[test]
+ #[should_panic]
+ fn test_slice_fail() {
+ let _ = &"中华Việt Nam"[0..2];
+ }
+
+ panic_cases! {
+ in mod rangefrom_len {
+ data: "abcdef";
+ good: data[6..] == "";
+ bad: data[7..];
+ message: "out of bounds";
+ }
+
+ in mod rangeto_len {
+ data: "abcdef";
+ good: data[..6] == "abcdef";
+ bad: data[..7];
+ message: "out of bounds";
+ }
+
+ in mod rangetoinclusive_len {
+ data: "abcdef";
+ good: data[..=5] == "abcdef";
+ bad: data[..=6];
+ message: "out of bounds";
+ }
+
+ in mod rangeinclusive_len {
+ data: "abcdef";
+ good: data[0..=5] == "abcdef";
+ bad: data[0..=6];
+ message: "out of bounds";
+ }
+
+ in mod range_len_len {
+ data: "abcdef";
+ good: data[6..6] == "";
+ bad: data[7..7];
+ message: "out of bounds";
+ }
+
+ in mod rangeinclusive_len_len {
+ data: "abcdef";
+ good: data[6..=5] == "";
+ bad: data[7..=6];
+ message: "out of bounds";
+ }
+ }
+
+ panic_cases! {
+ in mod rangeinclusive_exhausted {
+ data: "abcdef";
+
+ good: data[0..=5] == "abcdef";
+ good: data[{
+ let mut iter = 0..=5;
+ iter.by_ref().count(); // exhaust it
+ iter
+ }] == "";
+
+ // 0..=6 is out of bounds before exhaustion, so it
+ // stands to reason that it still would be after.
+ bad: data[{
+ let mut iter = 0..=6;
+ iter.by_ref().count(); // exhaust it
+ iter
+ }];
+ message: "out of bounds";
+ }
+ }
+
+ panic_cases! {
+ in mod range_neg_width {
+ data: "abcdef";
+ good: data[4..4] == "";
+ bad: data[4..3];
+ message: "begin <= end (4 <= 3)";
+ }
+
+ in mod rangeinclusive_neg_width {
+ data: "abcdef";
+ good: data[4..=3] == "";
+ bad: data[4..=2];
+ message: "begin <= end (4 <= 3)";
+ }
+ }
+
+ mod overflow {
+ panic_cases! {
+ in mod rangeinclusive {
+ data: "hello";
+ // note: using 0 specifically ensures that the result of overflowing is 0..0,
+ // so that `get` doesn't simply return None for the wrong reason.
+ bad: data[0..=usize::MAX];
+ message: "maximum usize";
+ }
+
+ in mod rangetoinclusive {
+ data: "hello";
+ bad: data[..=usize::MAX];
+ message: "maximum usize";
+ }
+ }
+ }
+
+ mod boundary {
+ const DATA: &str = "abcαβγ";
+
+ const BAD_START: usize = 4;
+ const GOOD_START: usize = 3;
+ const BAD_END: usize = 6;
+ const GOOD_END: usize = 7;
+ const BAD_END_INCL: usize = BAD_END - 1;
+ const GOOD_END_INCL: usize = GOOD_END - 1;
+
+ // it is especially important to test all of the different range types here
+ // because some of the logic may be duplicated as part of micro-optimizations
+ // to dodge unicode boundary checks on half-ranges.
+ panic_cases! {
+ in mod range_1 {
+ data: super::DATA;
+ bad: data[super::BAD_START..super::GOOD_END];
+ message:
+ "byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of";
+ }
+
+ in mod range_2 {
+ data: super::DATA;
+ bad: data[super::GOOD_START..super::BAD_END];
+ message:
+ "byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of";
+ }
+
+ in mod rangefrom {
+ data: super::DATA;
+ bad: data[super::BAD_START..];
+ message:
+ "byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of";
+ }
+
+ in mod rangeto {
+ data: super::DATA;
+ bad: data[..super::BAD_END];
+ message:
+ "byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of";
+ }
+
+ in mod rangeinclusive_1 {
+ data: super::DATA;
+ bad: data[super::BAD_START..=super::GOOD_END_INCL];
+ message:
+ "byte index 4 is not a char boundary; it is inside 'α' (bytes 3..5) of";
+ }
+
+ in mod rangeinclusive_2 {
+ data: super::DATA;
+ bad: data[super::GOOD_START..=super::BAD_END_INCL];
+ message:
+ "byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of";
+ }
+
+ in mod rangetoinclusive {
+ data: super::DATA;
+ bad: data[..=super::BAD_END_INCL];
+ message:
+ "byte index 6 is not a char boundary; it is inside 'β' (bytes 5..7) of";
+ }
+ }
+ }
+
+ const LOREM_PARAGRAPH: &str = "\
+ Lorem ipsum dolor sit amet, consectetur adipiscing elit. Suspendisse quis lorem \
+ sit amet dolor ultricies condimentum. Praesent iaculis purus elit, ac malesuada \
+ quam malesuada in. Duis sed orci eros. Suspendisse sit amet magna mollis, mollis \
+ nunc luctus, imperdiet mi. Integer fringilla non sem ut lacinia. Fusce varius \
+ tortor a risus porttitor hendrerit. Morbi mauris dui, ultricies nec tempus vel, \
+ gravida nec quam.";
+
+ // check the panic includes the prefix of the sliced string
+ #[test]
+ #[should_panic(expected = "byte index 1024 is out of bounds of `Lorem ipsum dolor sit amet")]
+ fn test_slice_fail_truncated_1() {
+ let _ = &LOREM_PARAGRAPH[..1024];
+ }
+ // check the truncation in the panic message
+ #[test]
+ #[should_panic(expected = "luctus, im`[...]")]
+ fn test_slice_fail_truncated_2() {
+ let _ = &LOREM_PARAGRAPH[..1024];
+ }
+}
+
+#[test]
+fn test_str_slice_rangetoinclusive_ok() {
+ let s = "abcαβγ";
+ assert_eq!(&s[..=2], "abc");
+ assert_eq!(&s[..=4], "abcα");
+}
+
+#[test]
+#[should_panic]
+fn test_str_slice_rangetoinclusive_notok() {
+ let s = "abcαβγ";
+ let _ = &s[..=3];
+}
+
+#[test]
+fn test_str_slicemut_rangetoinclusive_ok() {
+ let mut s = "abcαβγ".to_owned();
+ let s: &mut str = &mut s;
+ assert_eq!(&mut s[..=2], "abc");
+ assert_eq!(&mut s[..=4], "abcα");
+}
+
+#[test]
+#[should_panic]
+fn test_str_slicemut_rangetoinclusive_notok() {
+ let mut s = "abcαβγ".to_owned();
+ let s: &mut str = &mut s;
+ let _ = &mut s[..=3];
+}
+
+#[test]
+fn test_is_char_boundary() {
+ let s = "ศไทย中华Việt Nam β-release 🐱123";
+ assert!(s.is_char_boundary(0));
+ assert!(s.is_char_boundary(s.len()));
+ assert!(!s.is_char_boundary(s.len() + 1));
+ for (i, ch) in s.char_indices() {
+ // ensure character locations are boundaries and continuation bytes are not
+ assert!(s.is_char_boundary(i), "{} is a char boundary in {:?}", i, s);
+ for j in 1..ch.len_utf8() {
+ assert!(
+ !s.is_char_boundary(i + j),
+ "{} should not be a char boundary in {:?}",
+ i + j,
+ s
+ );
+ }
+ }
+}
+
+#[test]
+fn test_trim_start_matches() {
+ let v: &[char] = &[];
+ assert_eq!(" *** foo *** ".trim_start_matches(v), " *** foo *** ");
+ let chars: &[char] = &['*', ' '];
+ assert_eq!(" *** foo *** ".trim_start_matches(chars), "foo *** ");
+ assert_eq!(" *** *** ".trim_start_matches(chars), "");
+ assert_eq!("foo *** ".trim_start_matches(chars), "foo *** ");
+
+ assert_eq!("11foo1bar11".trim_start_matches('1'), "foo1bar11");
+ let chars: &[char] = &['1', '2'];
+ assert_eq!("12foo1bar12".trim_start_matches(chars), "foo1bar12");
+ assert_eq!("123foo1bar123".trim_start_matches(|c: char| c.is_numeric()), "foo1bar123");
+}
+
+#[test]
+fn test_trim_end_matches() {
+ let v: &[char] = &[];
+ assert_eq!(" *** foo *** ".trim_end_matches(v), " *** foo *** ");
+ let chars: &[char] = &['*', ' '];
+ assert_eq!(" *** foo *** ".trim_end_matches(chars), " *** foo");
+ assert_eq!(" *** *** ".trim_end_matches(chars), "");
+ assert_eq!(" *** foo".trim_end_matches(chars), " *** foo");
+
+ assert_eq!("11foo1bar11".trim_end_matches('1'), "11foo1bar");
+ let chars: &[char] = &['1', '2'];
+ assert_eq!("12foo1bar12".trim_end_matches(chars), "12foo1bar");
+ assert_eq!("123foo1bar123".trim_end_matches(|c: char| c.is_numeric()), "123foo1bar");
+}
+
+#[test]
+fn test_trim_matches() {
+ let v: &[char] = &[];
+ assert_eq!(" *** foo *** ".trim_matches(v), " *** foo *** ");
+ let chars: &[char] = &['*', ' '];
+ assert_eq!(" *** foo *** ".trim_matches(chars), "foo");
+ assert_eq!(" *** *** ".trim_matches(chars), "");
+ assert_eq!("foo".trim_matches(chars), "foo");
+
+ assert_eq!("11foo1bar11".trim_matches('1'), "foo1bar");
+ let chars: &[char] = &['1', '2'];
+ assert_eq!("12foo1bar12".trim_matches(chars), "foo1bar");
+ assert_eq!("123foo1bar123".trim_matches(|c: char| c.is_numeric()), "foo1bar");
+}
+
+#[test]
+fn test_trim_start() {
+ assert_eq!("".trim_start(), "");
+ assert_eq!("a".trim_start(), "a");
+ assert_eq!(" ".trim_start(), "");
+ assert_eq!(" blah".trim_start(), "blah");
+ assert_eq!(" \u{3000} wut".trim_start(), "wut");
+ assert_eq!("hey ".trim_start(), "hey ");
+}
+
+#[test]
+fn test_trim_end() {
+ assert_eq!("".trim_end(), "");
+ assert_eq!("a".trim_end(), "a");
+ assert_eq!(" ".trim_end(), "");
+ assert_eq!("blah ".trim_end(), "blah");
+ assert_eq!("wut \u{3000} ".trim_end(), "wut");
+ assert_eq!(" hey".trim_end(), " hey");
+}
+
+#[test]
+fn test_trim() {
+ assert_eq!("".trim(), "");
+ assert_eq!("a".trim(), "a");
+ assert_eq!(" ".trim(), "");
+ assert_eq!(" blah ".trim(), "blah");
+ assert_eq!("\nwut \u{3000} ".trim(), "wut");
+ assert_eq!(" hey dude ".trim(), "hey dude");
+}
+
+#[test]
+fn test_is_whitespace() {
+ assert!("".chars().all(|c| c.is_whitespace()));
+ assert!(" ".chars().all(|c| c.is_whitespace()));
+ assert!("\u{2009}".chars().all(|c| c.is_whitespace())); // Thin space
+ assert!(" \n\t ".chars().all(|c| c.is_whitespace()));
+ assert!(!" _ ".chars().all(|c| c.is_whitespace()));
+}
+
+#[test]
+fn test_is_utf8() {
+ // deny overlong encodings
+ assert!(from_utf8(&[0xc0, 0x80]).is_err());
+ assert!(from_utf8(&[0xc0, 0xae]).is_err());
+ assert!(from_utf8(&[0xe0, 0x80, 0x80]).is_err());
+ assert!(from_utf8(&[0xe0, 0x80, 0xaf]).is_err());
+ assert!(from_utf8(&[0xe0, 0x81, 0x81]).is_err());
+ assert!(from_utf8(&[0xf0, 0x82, 0x82, 0xac]).is_err());
+ assert!(from_utf8(&[0xf4, 0x90, 0x80, 0x80]).is_err());
+
+ // deny surrogates
+ assert!(from_utf8(&[0xED, 0xA0, 0x80]).is_err());
+ assert!(from_utf8(&[0xED, 0xBF, 0xBF]).is_err());
+
+ assert!(from_utf8(&[0xC2, 0x80]).is_ok());
+ assert!(from_utf8(&[0xDF, 0xBF]).is_ok());
+ assert!(from_utf8(&[0xE0, 0xA0, 0x80]).is_ok());
+ assert!(from_utf8(&[0xED, 0x9F, 0xBF]).is_ok());
+ assert!(from_utf8(&[0xEE, 0x80, 0x80]).is_ok());
+ assert!(from_utf8(&[0xEF, 0xBF, 0xBF]).is_ok());
+ assert!(from_utf8(&[0xF0, 0x90, 0x80, 0x80]).is_ok());
+ assert!(from_utf8(&[0xF4, 0x8F, 0xBF, 0xBF]).is_ok());
+}
+
+#[test]
+fn test_const_is_utf8() {
+ const _: () = {
+ // deny overlong encodings
+ assert!(from_utf8(&[0xc0, 0x80]).is_err());
+ assert!(from_utf8(&[0xc0, 0xae]).is_err());
+ assert!(from_utf8(&[0xe0, 0x80, 0x80]).is_err());
+ assert!(from_utf8(&[0xe0, 0x80, 0xaf]).is_err());
+ assert!(from_utf8(&[0xe0, 0x81, 0x81]).is_err());
+ assert!(from_utf8(&[0xf0, 0x82, 0x82, 0xac]).is_err());
+ assert!(from_utf8(&[0xf4, 0x90, 0x80, 0x80]).is_err());
+
+ // deny surrogates
+ assert!(from_utf8(&[0xED, 0xA0, 0x80]).is_err());
+ assert!(from_utf8(&[0xED, 0xBF, 0xBF]).is_err());
+
+ assert!(from_utf8(&[0xC2, 0x80]).is_ok());
+ assert!(from_utf8(&[0xDF, 0xBF]).is_ok());
+ assert!(from_utf8(&[0xE0, 0xA0, 0x80]).is_ok());
+ assert!(from_utf8(&[0xED, 0x9F, 0xBF]).is_ok());
+ assert!(from_utf8(&[0xEE, 0x80, 0x80]).is_ok());
+ assert!(from_utf8(&[0xEF, 0xBF, 0xBF]).is_ok());
+ assert!(from_utf8(&[0xF0, 0x90, 0x80, 0x80]).is_ok());
+ assert!(from_utf8(&[0xF4, 0x8F, 0xBF, 0xBF]).is_ok());
+ };
+}
+
+#[test]
+fn from_utf8_mostly_ascii() {
+ // deny invalid bytes embedded in long stretches of ascii
+ for i in 32..64 {
+ let mut data = [0; 128];
+ data[i] = 0xC0;
+ assert!(from_utf8(&data).is_err());
+ data[i] = 0xC2;
+ assert!(from_utf8(&data).is_err());
+ }
+}
+
+#[test]
+fn const_from_utf8_mostly_ascii() {
+ const _: () = {
+ // deny invalid bytes embedded in long stretches of ascii
+ let mut i = 32;
+ while i < 64 {
+ let mut data = [0; 128];
+ data[i] = 0xC0;
+ assert!(from_utf8(&data).is_err());
+ data[i] = 0xC2;
+ assert!(from_utf8(&data).is_err());
+
+ i = i + 1;
+ }
+ };
+}
+
+#[test]
+fn from_utf8_error() {
+ macro_rules! test {
+ ($input: expr, $expected_valid_up_to:pat, $expected_error_len:pat) => {
+ let error = from_utf8($input).unwrap_err();
+ assert_matches!(error.valid_up_to(), $expected_valid_up_to);
+ assert_matches!(error.error_len(), $expected_error_len);
+
+ const _: () = {
+ match from_utf8($input) {
+ Err(error) => {
+ let valid_up_to = error.valid_up_to();
+ let error_len = error.error_len();
+
+ assert!(matches!(valid_up_to, $expected_valid_up_to));
+ assert!(matches!(error_len, $expected_error_len));
+ }
+ Ok(_) => unreachable!(),
+ }
+ };
+ };
+ }
+ test!(b"A\xC3\xA9 \xFF ", 4, Some(1));
+ test!(b"A\xC3\xA9 \x80 ", 4, Some(1));
+ test!(b"A\xC3\xA9 \xC1 ", 4, Some(1));
+ test!(b"A\xC3\xA9 \xC1", 4, Some(1));
+ test!(b"A\xC3\xA9 \xC2", 4, None);
+ test!(b"A\xC3\xA9 \xC2 ", 4, Some(1));
+ test!(b"A\xC3\xA9 \xC2\xC0", 4, Some(1));
+ test!(b"A\xC3\xA9 \xE0", 4, None);
+ test!(b"A\xC3\xA9 \xE0\x9F", 4, Some(1));
+ test!(b"A\xC3\xA9 \xE0\xA0", 4, None);
+ test!(b"A\xC3\xA9 \xE0\xA0\xC0", 4, Some(2));
+ test!(b"A\xC3\xA9 \xE0\xA0 ", 4, Some(2));
+ test!(b"A\xC3\xA9 \xED\xA0\x80 ", 4, Some(1));
+ test!(b"A\xC3\xA9 \xF1", 4, None);
+ test!(b"A\xC3\xA9 \xF1\x80", 4, None);
+ test!(b"A\xC3\xA9 \xF1\x80\x80", 4, None);
+ test!(b"A\xC3\xA9 \xF1 ", 4, Some(1));
+ test!(b"A\xC3\xA9 \xF1\x80 ", 4, Some(2));
+ test!(b"A\xC3\xA9 \xF1\x80\x80 ", 4, Some(3));
+}
+
+#[test]
+fn test_as_bytes() {
+ // no null
+ let v = [
+ 224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228, 184, 173, 229, 141, 142,
+ 86, 105, 225, 187, 135, 116, 32, 78, 97, 109,
+ ];
+ let b: &[u8] = &[];
+ assert_eq!("".as_bytes(), b);
+ assert_eq!("abc".as_bytes(), b"abc");
+ assert_eq!("ศไทย中华Việt Nam".as_bytes(), v);
+}
+
+#[test]
+#[should_panic]
+fn test_as_bytes_fail() {
+ // Don't double free. (I'm not sure if this exercises the
+ // original problem code path anymore.)
+ let s = String::from("");
+ let _bytes = s.as_bytes();
+ panic!();
+}
+
+#[test]
+fn test_as_ptr() {
+ let buf = "hello".as_ptr();
+ unsafe {
+ assert_eq!(*buf.offset(0), b'h');
+ assert_eq!(*buf.offset(1), b'e');
+ assert_eq!(*buf.offset(2), b'l');
+ assert_eq!(*buf.offset(3), b'l');
+ assert_eq!(*buf.offset(4), b'o');
+ }
+}
+
+#[test]
+fn vec_str_conversions() {
+ let s1: String = String::from("All mimsy were the borogoves");
+
+ let v: Vec<u8> = s1.as_bytes().to_vec();
+ let s2: String = String::from(from_utf8(&v).unwrap());
+ let mut i = 0;
+ let n1 = s1.len();
+ let n2 = v.len();
+ assert_eq!(n1, n2);
+ while i < n1 {
+ let a: u8 = s1.as_bytes()[i];
+ let b: u8 = s2.as_bytes()[i];
+ assert_eq!(a, b);
+ i += 1;
+ }
+}
+
+#[test]
+fn test_contains() {
+ assert!("abcde".contains("bcd"));
+ assert!("abcde".contains("abcd"));
+ assert!("abcde".contains("bcde"));
+ assert!("abcde".contains(""));
+ assert!("".contains(""));
+ assert!(!"abcde".contains("def"));
+ assert!(!"".contains("a"));
+
+ let data = "ประเทศไทย中华Việt Nam";
+ assert!(data.contains("ประเ"));
+ assert!(data.contains("ะเ"));
+ assert!(data.contains("中华"));
+ assert!(!data.contains("ไท华"));
+}
+
+#[test]
+fn test_contains_char() {
+ assert!("abc".contains('b'));
+ assert!("a".contains('a'));
+ assert!(!"abc".contains('d'));
+ assert!(!"".contains('a'));
+}
+
+#[test]
+fn test_split_at() {
+ let s = "ศไทย中华Việt Nam";
+ for (index, _) in s.char_indices() {
+ let (a, b) = s.split_at(index);
+ assert_eq!(&s[..a.len()], a);
+ assert_eq!(&s[a.len()..], b);
+ }
+ let (a, b) = s.split_at(s.len());
+ assert_eq!(a, s);
+ assert_eq!(b, "");
+}
+
+#[test]
+fn test_split_at_mut() {
+ let mut s = "Hello World".to_string();
+ {
+ let (a, b) = s.split_at_mut(5);
+ a.make_ascii_uppercase();
+ b.make_ascii_lowercase();
+ }
+ assert_eq!(s, "HELLO world");
+}
+
+#[test]
+#[should_panic]
+fn test_split_at_boundscheck() {
+ let s = "ศไทย中华Việt Nam";
+ let _ = s.split_at(1);
+}
+
+#[test]
+fn test_escape_unicode() {
+ assert_eq!("abc".escape_unicode().to_string(), "\\u{61}\\u{62}\\u{63}");
+ assert_eq!("a c".escape_unicode().to_string(), "\\u{61}\\u{20}\\u{63}");
+ assert_eq!("\r\n\t".escape_unicode().to_string(), "\\u{d}\\u{a}\\u{9}");
+ assert_eq!("'\"\\".escape_unicode().to_string(), "\\u{27}\\u{22}\\u{5c}");
+ assert_eq!("\x00\x01\u{fe}\u{ff}".escape_unicode().to_string(), "\\u{0}\\u{1}\\u{fe}\\u{ff}");
+ assert_eq!("\u{100}\u{ffff}".escape_unicode().to_string(), "\\u{100}\\u{ffff}");
+ assert_eq!("\u{10000}\u{10ffff}".escape_unicode().to_string(), "\\u{10000}\\u{10ffff}");
+ assert_eq!("ab\u{fb00}".escape_unicode().to_string(), "\\u{61}\\u{62}\\u{fb00}");
+ assert_eq!("\u{1d4ea}\r".escape_unicode().to_string(), "\\u{1d4ea}\\u{d}");
+}
+
+#[test]
+fn test_escape_debug() {
+ // Note that there are subtleties with the number of backslashes
+ // on the left- and right-hand sides. In particular, Unicode code points
+ // are usually escaped with two backslashes on the right-hand side, as
+ // they are escaped. However, when the character is unescaped (e.g., for
+ // printable characters), only a single backslash appears (as the character
+ // itself appears in the debug string).
+ assert_eq!("abc".escape_debug().to_string(), "abc");
+ assert_eq!("a c".escape_debug().to_string(), "a c");
+ assert_eq!("éèê".escape_debug().to_string(), "éèê");
+ assert_eq!("\0\r\n\t".escape_debug().to_string(), "\\0\\r\\n\\t");
+ assert_eq!("'\"\\".escape_debug().to_string(), "\\'\\\"\\\\");
+ assert_eq!("\u{7f}\u{ff}".escape_debug().to_string(), "\\u{7f}\u{ff}");
+ assert_eq!("\u{100}\u{ffff}".escape_debug().to_string(), "\u{100}\\u{ffff}");
+ assert_eq!("\u{10000}\u{10ffff}".escape_debug().to_string(), "\u{10000}\\u{10ffff}");
+ assert_eq!("ab\u{200b}".escape_debug().to_string(), "ab\\u{200b}");
+ assert_eq!("\u{10d4ea}\r".escape_debug().to_string(), "\\u{10d4ea}\\r");
+ assert_eq!(
+ "\u{301}a\u{301}bé\u{e000}".escape_debug().to_string(),
+ "\\u{301}a\u{301}bé\\u{e000}"
+ );
+}
+
+#[test]
+fn test_escape_default() {
+ assert_eq!("abc".escape_default().to_string(), "abc");
+ assert_eq!("a c".escape_default().to_string(), "a c");
+ assert_eq!("éèê".escape_default().to_string(), "\\u{e9}\\u{e8}\\u{ea}");
+ assert_eq!("\r\n\t".escape_default().to_string(), "\\r\\n\\t");
+ assert_eq!("'\"\\".escape_default().to_string(), "\\'\\\"\\\\");
+ assert_eq!("\u{7f}\u{ff}".escape_default().to_string(), "\\u{7f}\\u{ff}");
+ assert_eq!("\u{100}\u{ffff}".escape_default().to_string(), "\\u{100}\\u{ffff}");
+ assert_eq!("\u{10000}\u{10ffff}".escape_default().to_string(), "\\u{10000}\\u{10ffff}");
+ assert_eq!("ab\u{200b}".escape_default().to_string(), "ab\\u{200b}");
+ assert_eq!("\u{10d4ea}\r".escape_default().to_string(), "\\u{10d4ea}\\r");
+}
+
+#[test]
+fn test_total_ord() {
+ assert_eq!("1234".cmp("123"), Greater);
+ assert_eq!("123".cmp("1234"), Less);
+ assert_eq!("1234".cmp("1234"), Equal);
+ assert_eq!("12345555".cmp("123456"), Less);
+ assert_eq!("22".cmp("1234"), Greater);
+}
+
+#[test]
+fn test_iterator() {
+ let s = "ศไทย中华Việt Nam";
+ let v = ['ศ', 'ไ', 'ท', 'ย', '中', '华', 'V', 'i', 'ệ', 't', ' ', 'N', 'a', 'm'];
+
+ let mut pos = 0;
+ let it = s.chars();
+
+ for c in it {
+ assert_eq!(c, v[pos]);
+ pos += 1;
+ }
+ assert_eq!(pos, v.len());
+ assert_eq!(s.chars().count(), v.len());
+}
+
+#[test]
+fn test_rev_iterator() {
+ let s = "ศไทย中华Việt Nam";
+ let v = ['m', 'a', 'N', ' ', 't', 'ệ', 'i', 'V', '华', '中', 'ย', 'ท', 'ไ', 'ศ'];
+
+ let mut pos = 0;
+ let it = s.chars().rev();
+
+ for c in it {
+ assert_eq!(c, v[pos]);
+ pos += 1;
+ }
+ assert_eq!(pos, v.len());
+}
+
+#[test]
+fn test_to_lowercase_rev_iterator() {
+ let s = "AÖßÜ💩ΣΤΙΓΜΑΣDžfiİ";
+ let v = ['\u{307}', 'i', 'fi', 'dž', 'σ', 'α', 'μ', 'γ', 'ι', 'τ', 'σ', '💩', 'ü', 'ß', 'ö', 'a'];
+
+ let mut pos = 0;
+ let it = s.chars().flat_map(|c| c.to_lowercase()).rev();
+
+ for c in it {
+ assert_eq!(c, v[pos]);
+ pos += 1;
+ }
+ assert_eq!(pos, v.len());
+}
+
+#[test]
+fn test_to_uppercase_rev_iterator() {
+ let s = "aößü💩στιγμαςDžfiᾀ";
+ let v =
+ ['Ι', 'Ἀ', 'I', 'F', 'DŽ', 'Σ', 'Α', 'Μ', 'Γ', 'Ι', 'Τ', 'Σ', '💩', 'Ü', 'S', 'S', 'Ö', 'A'];
+
+ let mut pos = 0;
+ let it = s.chars().flat_map(|c| c.to_uppercase()).rev();
+
+ for c in it {
+ assert_eq!(c, v[pos]);
+ pos += 1;
+ }
+ assert_eq!(pos, v.len());
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn test_chars_decoding() {
+ let mut bytes = [0; 4];
+ for c in (0..0x110000).filter_map(std::char::from_u32) {
+ let s = c.encode_utf8(&mut bytes);
+ if Some(c) != s.chars().next() {
+ panic!("character {:x}={} does not decode correctly", c as u32, c);
+ }
+ }
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn test_chars_rev_decoding() {
+ let mut bytes = [0; 4];
+ for c in (0..0x110000).filter_map(std::char::from_u32) {
+ let s = c.encode_utf8(&mut bytes);
+ if Some(c) != s.chars().rev().next() {
+ panic!("character {:x}={} does not decode correctly", c as u32, c);
+ }
+ }
+}
+
+#[test]
+fn test_iterator_clone() {
+ let s = "ศไทย中华Việt Nam";
+ let mut it = s.chars();
+ it.next();
+ assert!(it.clone().zip(it).all(|(x, y)| x == y));
+}
+
+#[test]
+fn test_iterator_last() {
+ let s = "ศไทย中华Việt Nam";
+ let mut it = s.chars();
+ it.next();
+ assert_eq!(it.last(), Some('m'));
+}
+
+#[test]
+fn test_chars_debug() {
+ let s = "ศไทย中华Việt Nam";
+ let c = s.chars();
+ assert_eq!(
+ format!("{c:?}"),
+ r#"Chars(['ศ', 'ไ', 'ท', 'ย', '中', '华', 'V', 'i', 'ệ', 't', ' ', 'N', 'a', 'm'])"#
+ );
+}
+
+#[test]
+fn test_bytesator() {
+ let s = "ศไทย中华Việt Nam";
+ let v = [
+ 224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228, 184, 173, 229, 141, 142,
+ 86, 105, 225, 187, 135, 116, 32, 78, 97, 109,
+ ];
+ let mut pos = 0;
+
+ for b in s.bytes() {
+ assert_eq!(b, v[pos]);
+ pos += 1;
+ }
+}
+
+#[test]
+fn test_bytes_revator() {
+ let s = "ศไทย中华Việt Nam";
+ let v = [
+ 224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228, 184, 173, 229, 141, 142,
+ 86, 105, 225, 187, 135, 116, 32, 78, 97, 109,
+ ];
+ let mut pos = v.len();
+
+ for b in s.bytes().rev() {
+ pos -= 1;
+ assert_eq!(b, v[pos]);
+ }
+}
+
+#[test]
+fn test_bytesator_nth() {
+ let s = "ศไทย中华Việt Nam";
+ let v = [
+ 224, 184, 168, 224, 185, 132, 224, 184, 151, 224, 184, 162, 228, 184, 173, 229, 141, 142,
+ 86, 105, 225, 187, 135, 116, 32, 78, 97, 109,
+ ];
+
+ let mut b = s.bytes();
+ assert_eq!(b.nth(2).unwrap(), v[2]);
+ assert_eq!(b.nth(10).unwrap(), v[10]);
+ assert_eq!(b.nth(200), None);
+}
+
+#[test]
+fn test_bytesator_count() {
+ let s = "ศไทย中华Việt Nam";
+
+ let b = s.bytes();
+ assert_eq!(b.count(), 28)
+}
+
+#[test]
+fn test_bytesator_last() {
+ let s = "ศไทย中华Việt Nam";
+
+ let b = s.bytes();
+ assert_eq!(b.last().unwrap(), 109)
+}
+
+#[test]
+fn test_char_indicesator() {
+ let s = "ศไทย中华Việt Nam";
+ let p = [0, 3, 6, 9, 12, 15, 18, 19, 20, 23, 24, 25, 26, 27];
+ let v = ['ศ', 'ไ', 'ท', 'ย', '中', '华', 'V', 'i', 'ệ', 't', ' ', 'N', 'a', 'm'];
+
+ let mut pos = 0;
+ let it = s.char_indices();
+
+ for c in it {
+ assert_eq!(c, (p[pos], v[pos]));
+ pos += 1;
+ }
+ assert_eq!(pos, v.len());
+ assert_eq!(pos, p.len());
+}
+
+#[test]
+fn test_char_indices_revator() {
+ let s = "ศไทย中华Việt Nam";
+ let p = [27, 26, 25, 24, 23, 20, 19, 18, 15, 12, 9, 6, 3, 0];
+ let v = ['m', 'a', 'N', ' ', 't', 'ệ', 'i', 'V', '华', '中', 'ย', 'ท', 'ไ', 'ศ'];
+
+ let mut pos = 0;
+ let it = s.char_indices().rev();
+
+ for c in it {
+ assert_eq!(c, (p[pos], v[pos]));
+ pos += 1;
+ }
+ assert_eq!(pos, v.len());
+ assert_eq!(pos, p.len());
+}
+
+#[test]
+fn test_char_indices_last() {
+ let s = "ศไทย中华Việt Nam";
+ let mut it = s.char_indices();
+ it.next();
+ assert_eq!(it.last(), Some((27, 'm')));
+}
+
+#[test]
+fn test_splitn_char_iterator() {
+ let data = "\nMäry häd ä little lämb\nLittle lämb\n";
+
+ let split: Vec<&str> = data.splitn(4, ' ').collect();
+ assert_eq!(split, ["\nMäry", "häd", "ä", "little lämb\nLittle lämb\n"]);
+
+ let split: Vec<&str> = data.splitn(4, |c: char| c == ' ').collect();
+ assert_eq!(split, ["\nMäry", "häd", "ä", "little lämb\nLittle lämb\n"]);
+
+ // Unicode
+ let split: Vec<&str> = data.splitn(4, 'ä').collect();
+ assert_eq!(split, ["\nM", "ry h", "d ", " little lämb\nLittle lämb\n"]);
+
+ let split: Vec<&str> = data.splitn(4, |c: char| c == 'ä').collect();
+ assert_eq!(split, ["\nM", "ry h", "d ", " little lämb\nLittle lämb\n"]);
+}
+
+#[test]
+fn test_split_char_iterator_no_trailing() {
+ let data = "\nMäry häd ä little lämb\nLittle lämb\n";
+
+ let split: Vec<&str> = data.split('\n').collect();
+ assert_eq!(split, ["", "Märy häd ä little lämb", "Little lämb", ""]);
+
+ let split: Vec<&str> = data.split_terminator('\n').collect();
+ assert_eq!(split, ["", "Märy häd ä little lämb", "Little lämb"]);
+}
+
+#[test]
+fn test_split_char_iterator_inclusive() {
+ let data = "\nMäry häd ä little lämb\nLittle lämb\n";
+
+ let split: Vec<&str> = data.split_inclusive('\n').collect();
+ assert_eq!(split, ["\n", "Märy häd ä little lämb\n", "Little lämb\n"]);
+
+ let uppercase_separated = "SheePSharKTurtlECaT";
+ let mut first_char = true;
+ let split: Vec<&str> = uppercase_separated
+ .split_inclusive(|c: char| {
+ let split = !first_char && c.is_uppercase();
+ first_char = split;
+ split
+ })
+ .collect();
+ assert_eq!(split, ["SheeP", "SharK", "TurtlE", "CaT"]);
+}
+
+#[test]
+fn test_split_char_iterator_inclusive_rev() {
+ let data = "\nMäry häd ä little lämb\nLittle lämb\n";
+
+ let split: Vec<&str> = data.split_inclusive('\n').rev().collect();
+ assert_eq!(split, ["Little lämb\n", "Märy häd ä little lämb\n", "\n"]);
+
+ // Note that the predicate is stateful and thus dependent
+ // on the iteration order.
+ // (A different predicate is needed for reverse iterator vs normal iterator.)
+ // Not sure if anything can be done though.
+ let uppercase_separated = "SheePSharKTurtlECaT";
+ let mut term_char = true;
+ let split: Vec<&str> = uppercase_separated
+ .split_inclusive(|c: char| {
+ let split = term_char && c.is_uppercase();
+ term_char = c.is_uppercase();
+ split
+ })
+ .rev()
+ .collect();
+ assert_eq!(split, ["CaT", "TurtlE", "SharK", "SheeP"]);
+}
+
+#[test]
+fn test_rsplit() {
+ let data = "\nMäry häd ä little lämb\nLittle lämb\n";
+
+ let split: Vec<&str> = data.rsplit(' ').collect();
+ assert_eq!(split, ["lämb\n", "lämb\nLittle", "little", "ä", "häd", "\nMäry"]);
+
+ let split: Vec<&str> = data.rsplit("lämb").collect();
+ assert_eq!(split, ["\n", "\nLittle ", "\nMäry häd ä little "]);
+
+ let split: Vec<&str> = data.rsplit(|c: char| c == 'ä').collect();
+ assert_eq!(split, ["mb\n", "mb\nLittle l", " little l", "d ", "ry h", "\nM"]);
+}
+
+#[test]
+fn test_rsplitn() {
+ let data = "\nMäry häd ä little lämb\nLittle lämb\n";
+
+ let split: Vec<&str> = data.rsplitn(2, ' ').collect();
+ assert_eq!(split, ["lämb\n", "\nMäry häd ä little lämb\nLittle"]);
+
+ let split: Vec<&str> = data.rsplitn(2, "lämb").collect();
+ assert_eq!(split, ["\n", "\nMäry häd ä little lämb\nLittle "]);
+
+ let split: Vec<&str> = data.rsplitn(2, |c: char| c == 'ä').collect();
+ assert_eq!(split, ["mb\n", "\nMäry häd ä little lämb\nLittle l"]);
+}
+
+#[test]
+fn test_split_once() {
+ assert_eq!("".split_once("->"), None);
+ assert_eq!("-".split_once("->"), None);
+ assert_eq!("->".split_once("->"), Some(("", "")));
+ assert_eq!("a->".split_once("->"), Some(("a", "")));
+ assert_eq!("->b".split_once("->"), Some(("", "b")));
+ assert_eq!("a->b".split_once("->"), Some(("a", "b")));
+ assert_eq!("a->b->c".split_once("->"), Some(("a", "b->c")));
+ assert_eq!("---".split_once("--"), Some(("", "-")));
+}
+
+#[test]
+fn test_rsplit_once() {
+ assert_eq!("".rsplit_once("->"), None);
+ assert_eq!("-".rsplit_once("->"), None);
+ assert_eq!("->".rsplit_once("->"), Some(("", "")));
+ assert_eq!("a->".rsplit_once("->"), Some(("a", "")));
+ assert_eq!("->b".rsplit_once("->"), Some(("", "b")));
+ assert_eq!("a->b".rsplit_once("->"), Some(("a", "b")));
+ assert_eq!("a->b->c".rsplit_once("->"), Some(("a->b", "c")));
+ assert_eq!("---".rsplit_once("--"), Some(("-", "")));
+}
+
+#[test]
+fn test_split_whitespace() {
+ let data = "\n \tMäry häd\tä little lämb\nLittle lämb\n";
+ let words: Vec<&str> = data.split_whitespace().collect();
+ assert_eq!(words, ["Märy", "häd", "ä", "little", "lämb", "Little", "lämb"])
+}
+
+#[test]
+fn test_lines() {
+ let data = "\nMäry häd ä little lämb\n\r\nLittle lämb\n";
+ let lines: Vec<&str> = data.lines().collect();
+ assert_eq!(lines, ["", "Märy häd ä little lämb", "", "Little lämb"]);
+
+ let data = "\r\nMäry häd ä little lämb\n\nLittle lämb"; // no trailing \n
+ let lines: Vec<&str> = data.lines().collect();
+ assert_eq!(lines, ["", "Märy häd ä little lämb", "", "Little lämb"]);
+}
+
+#[test]
+fn test_splitator() {
+ fn t(s: &str, sep: &str, u: &[&str]) {
+ let v: Vec<&str> = s.split(sep).collect();
+ assert_eq!(v, u);
+ }
+ t("--1233345--", "12345", &["--1233345--"]);
+ t("abc::hello::there", "::", &["abc", "hello", "there"]);
+ t("::hello::there", "::", &["", "hello", "there"]);
+ t("hello::there::", "::", &["hello", "there", ""]);
+ t("::hello::there::", "::", &["", "hello", "there", ""]);
+ t("ประเทศไทย中华Việt Nam", "中华", &["ประเทศไทย", "Việt Nam"]);
+ t("zzXXXzzYYYzz", "zz", &["", "XXX", "YYY", ""]);
+ t("zzXXXzYYYz", "XXX", &["zz", "zYYYz"]);
+ t(".XXX.YYY.", ".", &["", "XXX", "YYY", ""]);
+ t("", ".", &[""]);
+ t("zz", "zz", &["", ""]);
+ t("ok", "z", &["ok"]);
+ t("zzz", "zz", &["", "z"]);
+ t("zzzzz", "zz", &["", "", "z"]);
+}
+
+#[test]
+fn test_str_default() {
+ use std::default::Default;
+
+ fn t<S: Default + AsRef<str>>() {
+ let s: S = Default::default();
+ assert_eq!(s.as_ref(), "");
+ }
+
+ t::<&str>();
+ t::<String>();
+ t::<&mut str>();
+}
+
+#[test]
+fn test_str_container() {
+ fn sum_len(v: &[&str]) -> usize {
+ v.iter().map(|x| x.len()).sum()
+ }
+
+ let s = "01234";
+ assert_eq!(5, sum_len(&["012", "", "34"]));
+ assert_eq!(5, sum_len(&["01", "2", "34", ""]));
+ assert_eq!(5, sum_len(&[s]));
+}
+
+#[test]
+fn test_str_from_utf8() {
+ let xs = b"hello";
+ assert_eq!(from_utf8(xs), Ok("hello"));
+
+ let xs = "ศไทย中华Việt Nam".as_bytes();
+ assert_eq!(from_utf8(xs), Ok("ศไทย中华Việt Nam"));
+
+ let xs = b"hello\xFF";
+ assert!(from_utf8(xs).is_err());
+}
+
+#[test]
+fn test_pattern_deref_forward() {
+ let data = "aabcdaa";
+ assert!(data.contains("bcd"));
+ assert!(data.contains(&"bcd"));
+ assert!(data.contains(&"bcd".to_string()));
+}
+
+#[test]
+fn test_empty_match_indices() {
+ let data = "aä中!";
+ let vec: Vec<_> = data.match_indices("").collect();
+ assert_eq!(vec, [(0, ""), (1, ""), (3, ""), (6, ""), (7, "")]);
+}
+
+#[test]
+fn test_bool_from_str() {
+ assert_eq!("true".parse().ok(), Some(true));
+ assert_eq!("false".parse().ok(), Some(false));
+ assert_eq!("not even a boolean".parse::<bool>().ok(), None);
+}
+
+fn check_contains_all_substrings(s: &str) {
+ assert!(s.contains(""));
+ for i in 0..s.len() {
+ for j in i + 1..=s.len() {
+ assert!(s.contains(&s[i..j]));
+ }
+ }
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn strslice_issue_16589() {
+ assert!("bananas".contains("nana"));
+
+ // prior to the fix for #16589, x.contains("abcdabcd") returned false
+ // test all substrings for good measure
+ check_contains_all_substrings("012345678901234567890123456789bcdabcdabcd");
+}
+
+#[test]
+fn strslice_issue_16878() {
+ assert!(!"1234567ah012345678901ah".contains("hah"));
+ assert!(!"00abc01234567890123456789abc".contains("bcabc"));
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri is too slow
+fn test_strslice_contains() {
+ let x = "There are moments, Jeeves, when one asks oneself, 'Do trousers matter?'";
+ check_contains_all_substrings(x);
+}
+
+#[test]
+fn test_rsplitn_char_iterator() {
+ let data = "\nMäry häd ä little lämb\nLittle lämb\n";
+
+ let mut split: Vec<&str> = data.rsplitn(4, ' ').collect();
+ split.reverse();
+ assert_eq!(split, ["\nMäry häd ä", "little", "lämb\nLittle", "lämb\n"]);
+
+ let mut split: Vec<&str> = data.rsplitn(4, |c: char| c == ' ').collect();
+ split.reverse();
+ assert_eq!(split, ["\nMäry häd ä", "little", "lämb\nLittle", "lämb\n"]);
+
+ // Unicode
+ let mut split: Vec<&str> = data.rsplitn(4, 'ä').collect();
+ split.reverse();
+ assert_eq!(split, ["\nMäry häd ", " little l", "mb\nLittle l", "mb\n"]);
+
+ let mut split: Vec<&str> = data.rsplitn(4, |c: char| c == 'ä').collect();
+ split.reverse();
+ assert_eq!(split, ["\nMäry häd ", " little l", "mb\nLittle l", "mb\n"]);
+}
+
+#[test]
+fn test_split_char_iterator() {
+ let data = "\nMäry häd ä little lämb\nLittle lämb\n";
+
+ let split: Vec<&str> = data.split(' ').collect();
+ assert_eq!(split, ["\nMäry", "häd", "ä", "little", "lämb\nLittle", "lämb\n"]);
+
+ let mut rsplit: Vec<&str> = data.split(' ').rev().collect();
+ rsplit.reverse();
+ assert_eq!(rsplit, ["\nMäry", "häd", "ä", "little", "lämb\nLittle", "lämb\n"]);
+
+ let split: Vec<&str> = data.split(|c: char| c == ' ').collect();
+ assert_eq!(split, ["\nMäry", "häd", "ä", "little", "lämb\nLittle", "lämb\n"]);
+
+ let mut rsplit: Vec<&str> = data.split(|c: char| c == ' ').rev().collect();
+ rsplit.reverse();
+ assert_eq!(rsplit, ["\nMäry", "häd", "ä", "little", "lämb\nLittle", "lämb\n"]);
+
+ // Unicode
+ let split: Vec<&str> = data.split('ä').collect();
+ assert_eq!(split, ["\nM", "ry h", "d ", " little l", "mb\nLittle l", "mb\n"]);
+
+ let mut rsplit: Vec<&str> = data.split('ä').rev().collect();
+ rsplit.reverse();
+ assert_eq!(rsplit, ["\nM", "ry h", "d ", " little l", "mb\nLittle l", "mb\n"]);
+
+ let split: Vec<&str> = data.split(|c: char| c == 'ä').collect();
+ assert_eq!(split, ["\nM", "ry h", "d ", " little l", "mb\nLittle l", "mb\n"]);
+
+ let mut rsplit: Vec<&str> = data.split(|c: char| c == 'ä').rev().collect();
+ rsplit.reverse();
+ assert_eq!(rsplit, ["\nM", "ry h", "d ", " little l", "mb\nLittle l", "mb\n"]);
+}
+
+#[test]
+fn test_rev_split_char_iterator_no_trailing() {
+ let data = "\nMäry häd ä little lämb\nLittle lämb\n";
+
+ let mut split: Vec<&str> = data.split('\n').rev().collect();
+ split.reverse();
+ assert_eq!(split, ["", "Märy häd ä little lämb", "Little lämb", ""]);
+
+ let mut split: Vec<&str> = data.split_terminator('\n').rev().collect();
+ split.reverse();
+ assert_eq!(split, ["", "Märy häd ä little lämb", "Little lämb"]);
+}
+
+#[test]
+fn test_utf16_code_units() {
+ assert_eq!("é\u{1F4A9}".encode_utf16().collect::<Vec<u16>>(), [0xE9, 0xD83D, 0xDCA9])
+}
+
+#[test]
+fn starts_with_in_unicode() {
+ assert!(!"├── Cargo.toml".starts_with("# "));
+}
+
+#[test]
+fn starts_short_long() {
+ assert!(!"".starts_with("##"));
+ assert!(!"##".starts_with("####"));
+ assert!("####".starts_with("##"));
+ assert!(!"##ä".starts_with("####"));
+ assert!("####ä".starts_with("##"));
+ assert!(!"##".starts_with("####ä"));
+ assert!("##ä##".starts_with("##ä"));
+
+ assert!("".starts_with(""));
+ assert!("ä".starts_with(""));
+ assert!("#ä".starts_with(""));
+ assert!("##ä".starts_with(""));
+ assert!("ä###".starts_with(""));
+ assert!("#ä##".starts_with(""));
+ assert!("##ä#".starts_with(""));
+}
+
+#[test]
+fn contains_weird_cases() {
+ assert!("* \t".contains(' '));
+ assert!(!"* \t".contains('?'));
+ assert!(!"* \t".contains('\u{1F4A9}'));
+}
+
+#[test]
+fn trim_ws() {
+ assert_eq!(" \t a \t ".trim_start_matches(|c: char| c.is_whitespace()), "a \t ");
+ assert_eq!(" \t a \t ".trim_end_matches(|c: char| c.is_whitespace()), " \t a");
+ assert_eq!(" \t a \t ".trim_start_matches(|c: char| c.is_whitespace()), "a \t ");
+ assert_eq!(" \t a \t ".trim_end_matches(|c: char| c.is_whitespace()), " \t a");
+ assert_eq!(" \t a \t ".trim_matches(|c: char| c.is_whitespace()), "a");
+ assert_eq!(" \t \t ".trim_start_matches(|c: char| c.is_whitespace()), "");
+ assert_eq!(" \t \t ".trim_end_matches(|c: char| c.is_whitespace()), "");
+ assert_eq!(" \t \t ".trim_start_matches(|c: char| c.is_whitespace()), "");
+ assert_eq!(" \t \t ".trim_end_matches(|c: char| c.is_whitespace()), "");
+ assert_eq!(" \t \t ".trim_matches(|c: char| c.is_whitespace()), "");
+}
+
+#[test]
+fn to_lowercase() {
+ assert_eq!("".to_lowercase(), "");
+ assert_eq!("AÉDžaé ".to_lowercase(), "aédžaé ");
+
+ // https://github.com/rust-lang/rust/issues/26035
+ assert_eq!("ΑΣ".to_lowercase(), "ας");
+ assert_eq!("Α'Σ".to_lowercase(), "α'ς");
+ assert_eq!("Α''Σ".to_lowercase(), "α''ς");
+
+ assert_eq!("ΑΣ Α".to_lowercase(), "ας α");
+ assert_eq!("Α'Σ Α".to_lowercase(), "α'ς α");
+ assert_eq!("Α''Σ Α".to_lowercase(), "α''ς α");
+
+ assert_eq!("ΑΣ' Α".to_lowercase(), "ας' α");
+ assert_eq!("ΑΣ'' Α".to_lowercase(), "ας'' α");
+
+ assert_eq!("Α'Σ' Α".to_lowercase(), "α'ς' α");
+ assert_eq!("Α''Σ'' Α".to_lowercase(), "α''ς'' α");
+
+ assert_eq!("Α Σ".to_lowercase(), "α σ");
+ assert_eq!("Α 'Σ".to_lowercase(), "α 'σ");
+ assert_eq!("Α ''Σ".to_lowercase(), "α ''σ");
+
+ assert_eq!("Σ".to_lowercase(), "σ");
+ assert_eq!("'Σ".to_lowercase(), "'σ");
+ assert_eq!("''Σ".to_lowercase(), "''σ");
+
+ assert_eq!("ΑΣΑ".to_lowercase(), "ασα");
+ assert_eq!("ΑΣ'Α".to_lowercase(), "ασ'α");
+ assert_eq!("ΑΣ''Α".to_lowercase(), "ασ''α");
+
+ // a really long string that has it's lowercase form
+ // even longer. this tests that implementations don't assume
+ // an incorrect upper bound on allocations
+ let upper = str::repeat("İ", 512);
+ let lower = str::repeat("i̇", 512);
+ assert_eq!(upper.to_lowercase(), lower);
+
+ // a really long ascii-only string.
+ // This test that the ascii hot-path
+ // functions correctly
+ let upper = str::repeat("A", 511);
+ let lower = str::repeat("a", 511);
+ assert_eq!(upper.to_lowercase(), lower);
+}
+
+#[test]
+fn to_uppercase() {
+ assert_eq!("".to_uppercase(), "");
+ assert_eq!("aéDžßfiᾀ".to_uppercase(), "AÉDŽSSFIἈΙ");
+}
+
+#[test]
+fn test_into_string() {
+ // The only way to acquire a Box<str> in the first place is through a String, so just
+ // test that we can round-trip between Box<str> and String.
+ let string = String::from("Some text goes here");
+ assert_eq!(string.clone().into_boxed_str().into_string(), string);
+}
+
+#[test]
+fn test_box_slice_clone() {
+ let data = String::from("hello HELLO hello HELLO yes YES 5 中ä华!!!");
+ let data2 = data.clone().into_boxed_str().clone().into_string();
+
+ assert_eq!(data, data2);
+}
+
+#[test]
+fn test_cow_from() {
+ let borrowed = "borrowed";
+ let owned = String::from("owned");
+ match (Cow::from(owned.clone()), Cow::from(borrowed)) {
+ (Cow::Owned(o), Cow::Borrowed(b)) => assert!(o == owned && b == borrowed),
+ _ => panic!("invalid `Cow::from`"),
+ }
+}
+
+#[test]
+fn test_repeat() {
+ assert_eq!("".repeat(3), "");
+ assert_eq!("abc".repeat(0), "");
+ assert_eq!("α".repeat(3), "ααα");
+}
+
+mod pattern {
+ use std::str::pattern::SearchStep::{self, Done, Match, Reject};
+ use std::str::pattern::{Pattern, ReverseSearcher, Searcher};
+
+ macro_rules! make_test {
+ ($name:ident, $p:expr, $h:expr, [$($e:expr,)*]) => {
+ #[allow(unused_imports)]
+ mod $name {
+ use std::str::pattern::SearchStep::{Match, Reject};
+ use super::{cmp_search_to_vec};
+ #[test]
+ fn fwd() {
+ cmp_search_to_vec(false, $p, $h, vec![$($e),*]);
+ }
+ #[test]
+ fn bwd() {
+ cmp_search_to_vec(true, $p, $h, vec![$($e),*]);
+ }
+ }
+ }
+ }
+
+ fn cmp_search_to_vec<'a>(
+ rev: bool,
+ pat: impl Pattern<'a, Searcher: ReverseSearcher<'a>>,
+ haystack: &'a str,
+ right: Vec<SearchStep>,
+ ) {
+ let mut searcher = pat.into_searcher(haystack);
+ let mut v = vec![];
+ loop {
+ match if !rev { searcher.next() } else { searcher.next_back() } {
+ Match(a, b) => v.push(Match(a, b)),
+ Reject(a, b) => v.push(Reject(a, b)),
+ Done => break,
+ }
+ }
+ if rev {
+ v.reverse();
+ }
+
+ let mut first_index = 0;
+ let mut err = None;
+
+ for (i, e) in right.iter().enumerate() {
+ match *e {
+ Match(a, b) | Reject(a, b) if a <= b && a == first_index => {
+ first_index = b;
+ }
+ _ => {
+ err = Some(i);
+ break;
+ }
+ }
+ }
+
+ if let Some(err) = err {
+ panic!("Input skipped range at {err}");
+ }
+
+ if first_index != haystack.len() {
+ panic!("Did not cover whole input");
+ }
+
+ assert_eq!(v, right);
+ }
+
+ make_test!(
+ str_searcher_ascii_haystack,
+ "bb",
+ "abbcbbd",
+ [Reject(0, 1), Match(1, 3), Reject(3, 4), Match(4, 6), Reject(6, 7),]
+ );
+ make_test!(
+ str_searcher_ascii_haystack_seq,
+ "bb",
+ "abbcbbbbd",
+ [Reject(0, 1), Match(1, 3), Reject(3, 4), Match(4, 6), Match(6, 8), Reject(8, 9),]
+ );
+ make_test!(
+ str_searcher_empty_needle_ascii_haystack,
+ "",
+ "abbcbbd",
+ [
+ Match(0, 0),
+ Reject(0, 1),
+ Match(1, 1),
+ Reject(1, 2),
+ Match(2, 2),
+ Reject(2, 3),
+ Match(3, 3),
+ Reject(3, 4),
+ Match(4, 4),
+ Reject(4, 5),
+ Match(5, 5),
+ Reject(5, 6),
+ Match(6, 6),
+ Reject(6, 7),
+ Match(7, 7),
+ ]
+ );
+ make_test!(
+ str_searcher_multibyte_haystack,
+ " ",
+ "├──",
+ [Reject(0, 3), Reject(3, 6), Reject(6, 9),]
+ );
+ make_test!(
+ str_searcher_empty_needle_multibyte_haystack,
+ "",
+ "├──",
+ [
+ Match(0, 0),
+ Reject(0, 3),
+ Match(3, 3),
+ Reject(3, 6),
+ Match(6, 6),
+ Reject(6, 9),
+ Match(9, 9),
+ ]
+ );
+ make_test!(str_searcher_empty_needle_empty_haystack, "", "", [Match(0, 0),]);
+ make_test!(str_searcher_nonempty_needle_empty_haystack, "├", "", []);
+ make_test!(
+ char_searcher_ascii_haystack,
+ 'b',
+ "abbcbbd",
+ [
+ Reject(0, 1),
+ Match(1, 2),
+ Match(2, 3),
+ Reject(3, 4),
+ Match(4, 5),
+ Match(5, 6),
+ Reject(6, 7),
+ ]
+ );
+ make_test!(
+ char_searcher_multibyte_haystack,
+ ' ',
+ "├──",
+ [Reject(0, 3), Reject(3, 6), Reject(6, 9),]
+ );
+ make_test!(
+ char_searcher_short_haystack,
+ '\u{1F4A9}',
+ "* \t",
+ [Reject(0, 1), Reject(1, 2), Reject(2, 3),]
+ );
+
+ // See #85462
+ #[test]
+ fn str_searcher_empty_needle_after_done() {
+ // Empty needle and haystack
+ {
+ let mut searcher = "".into_searcher("");
+
+ assert_eq!(searcher.next(), SearchStep::Match(0, 0));
+ assert_eq!(searcher.next(), SearchStep::Done);
+ assert_eq!(searcher.next(), SearchStep::Done);
+ assert_eq!(searcher.next(), SearchStep::Done);
+
+ let mut searcher = "".into_searcher("");
+
+ assert_eq!(searcher.next_back(), SearchStep::Match(0, 0));
+ assert_eq!(searcher.next_back(), SearchStep::Done);
+ assert_eq!(searcher.next_back(), SearchStep::Done);
+ assert_eq!(searcher.next_back(), SearchStep::Done);
+ }
+ // Empty needle and non-empty haystack
+ {
+ let mut searcher = "".into_searcher("a");
+
+ assert_eq!(searcher.next(), SearchStep::Match(0, 0));
+ assert_eq!(searcher.next(), SearchStep::Reject(0, 1));
+ assert_eq!(searcher.next(), SearchStep::Match(1, 1));
+ assert_eq!(searcher.next(), SearchStep::Done);
+ assert_eq!(searcher.next(), SearchStep::Done);
+ assert_eq!(searcher.next(), SearchStep::Done);
+
+ let mut searcher = "".into_searcher("a");
+
+ assert_eq!(searcher.next_back(), SearchStep::Match(1, 1));
+ assert_eq!(searcher.next_back(), SearchStep::Reject(0, 1));
+ assert_eq!(searcher.next_back(), SearchStep::Match(0, 0));
+ assert_eq!(searcher.next_back(), SearchStep::Done);
+ assert_eq!(searcher.next_back(), SearchStep::Done);
+ assert_eq!(searcher.next_back(), SearchStep::Done);
+ }
+ }
+}
+
+macro_rules! generate_iterator_test {
+ {
+ $name:ident {
+ $(
+ ($($arg:expr),*) -> [$($t:tt)*];
+ )*
+ }
+ with $fwd:expr, $bwd:expr;
+ } => {
+ #[test]
+ fn $name() {
+ $(
+ {
+ let res = vec![$($t)*];
+
+ let fwd_vec: Vec<_> = ($fwd)($($arg),*).collect();
+ assert_eq!(fwd_vec, res);
+
+ let mut bwd_vec: Vec<_> = ($bwd)($($arg),*).collect();
+ bwd_vec.reverse();
+ assert_eq!(bwd_vec, res);
+ }
+ )*
+ }
+ };
+ {
+ $name:ident {
+ $(
+ ($($arg:expr),*) -> [$($t:tt)*];
+ )*
+ }
+ with $fwd:expr;
+ } => {
+ #[test]
+ fn $name() {
+ $(
+ {
+ let res = vec![$($t)*];
+
+ let fwd_vec: Vec<_> = ($fwd)($($arg),*).collect();
+ assert_eq!(fwd_vec, res);
+ }
+ )*
+ }
+ }
+}
+
+generate_iterator_test! {
+ double_ended_split {
+ ("foo.bar.baz", '.') -> ["foo", "bar", "baz"];
+ ("foo::bar::baz", "::") -> ["foo", "bar", "baz"];
+ }
+ with str::split, str::rsplit;
+}
+
+generate_iterator_test! {
+ double_ended_split_terminator {
+ ("foo;bar;baz;", ';') -> ["foo", "bar", "baz"];
+ }
+ with str::split_terminator, str::rsplit_terminator;
+}
+
+generate_iterator_test! {
+ double_ended_matches {
+ ("a1b2c3", char::is_numeric) -> ["1", "2", "3"];
+ }
+ with str::matches, str::rmatches;
+}
+
+generate_iterator_test! {
+ double_ended_match_indices {
+ ("a1b2c3", char::is_numeric) -> [(1, "1"), (3, "2"), (5, "3")];
+ }
+ with str::match_indices, str::rmatch_indices;
+}
+
+generate_iterator_test! {
+ not_double_ended_splitn {
+ ("foo::bar::baz", 2, "::") -> ["foo", "bar::baz"];
+ }
+ with str::splitn;
+}
+
+generate_iterator_test! {
+ not_double_ended_rsplitn {
+ ("foo::bar::baz", 2, "::") -> ["baz", "foo::bar"];
+ }
+ with str::rsplitn;
+}
+
+#[test]
+fn different_str_pattern_forwarding_lifetimes() {
+ use std::str::pattern::Pattern;
+
+ fn foo<'a, P>(p: P)
+ where
+ for<'b> &'b P: Pattern<'a>,
+ {
+ for _ in 0..3 {
+ "asdf".find(&p);
+ }
+ }
+
+ foo::<&str>("x");
+}
+
+#[test]
+fn test_str_multiline() {
+ let a: String = "this \
+is a test"
+ .to_string();
+ let b: String = "this \
+ is \
+ another \
+ test"
+ .to_string();
+ assert_eq!(a, "this is a test".to_string());
+ assert_eq!(b, "this is another test".to_string());
+}
+
+#[test]
+fn test_str_escapes() {
+ let x = "\\\\\
+ ";
+ assert_eq!(x, r"\\"); // extraneous whitespace stripped
+}
+
+#[test]
+fn const_str_ptr() {
+ const A: [u8; 2] = ['h' as u8, 'i' as u8];
+ const B: &'static [u8; 2] = &A;
+ const C: *const u8 = B as *const u8;
+
+ // Miri does not deduplicate consts (https://github.com/rust-lang/miri/issues/131)
+ #[cfg(not(miri))]
+ {
+ let foo = &A as *const u8;
+ assert_eq!(foo, C);
+ }
+
+ unsafe {
+ assert_eq!(from_utf8_unchecked(&A), "hi");
+ assert_eq!(*C, A[0]);
+ assert_eq!(*(&B[0] as *const u8), A[0]);
+ }
+}
+
+#[test]
+fn utf8() {
+ let yen: char = '¥'; // 0xa5
+ let c_cedilla: char = 'ç'; // 0xe7
+ let thorn: char = 'þ'; // 0xfe
+ let y_diaeresis: char = 'ÿ'; // 0xff
+ let pi: char = 'Π'; // 0x3a0
+
+ assert_eq!(yen as isize, 0xa5);
+ assert_eq!(c_cedilla as isize, 0xe7);
+ assert_eq!(thorn as isize, 0xfe);
+ assert_eq!(y_diaeresis as isize, 0xff);
+ assert_eq!(pi as isize, 0x3a0);
+
+ assert_eq!(pi as isize, '\u{3a0}' as isize);
+ assert_eq!('\x0a' as isize, '\n' as isize);
+
+ let bhutan: String = "འབྲུག་ཡུལ།".to_string();
+ let japan: String = "日本".to_string();
+ let uzbekistan: String = "Ўзбекистон".to_string();
+ let austria: String = "Österreich".to_string();
+
+ let bhutan_e: String =
+ "\u{f60}\u{f56}\u{fb2}\u{f74}\u{f42}\u{f0b}\u{f61}\u{f74}\u{f63}\u{f0d}".to_string();
+ let japan_e: String = "\u{65e5}\u{672c}".to_string();
+ let uzbekistan_e: String =
+ "\u{40e}\u{437}\u{431}\u{435}\u{43a}\u{438}\u{441}\u{442}\u{43e}\u{43d}".to_string();
+ let austria_e: String = "\u{d6}sterreich".to_string();
+
+ let oo: char = 'Ö';
+ assert_eq!(oo as isize, 0xd6);
+
+ fn check_str_eq(a: String, b: String) {
+ let mut i: isize = 0;
+ for ab in a.bytes() {
+ println!("{i}");
+ println!("{ab}");
+ let bb: u8 = b.as_bytes()[i as usize];
+ println!("{bb}");
+ assert_eq!(ab, bb);
+ i += 1;
+ }
+ }
+
+ check_str_eq(bhutan, bhutan_e);
+ check_str_eq(japan, japan_e);
+ check_str_eq(uzbekistan, uzbekistan_e);
+ check_str_eq(austria, austria_e);
+}
+
+#[test]
+fn utf8_chars() {
+ // Chars of 1, 2, 3, and 4 bytes
+ let chs: Vec<char> = vec!['e', 'é', '€', '\u{10000}'];
+ let s: String = chs.iter().cloned().collect();
+ let schs: Vec<char> = s.chars().collect();
+
+ assert_eq!(s.len(), 10);
+ assert_eq!(s.chars().count(), 4);
+ assert_eq!(schs.len(), 4);
+ assert_eq!(schs.iter().cloned().collect::<String>(), s);
+
+ assert!((from_utf8(s.as_bytes()).is_ok()));
+ // invalid prefix
+ assert!((!from_utf8(&[0x80]).is_ok()));
+ // invalid 2 byte prefix
+ assert!((!from_utf8(&[0xc0]).is_ok()));
+ assert!((!from_utf8(&[0xc0, 0x10]).is_ok()));
+ // invalid 3 byte prefix
+ assert!((!from_utf8(&[0xe0]).is_ok()));
+ assert!((!from_utf8(&[0xe0, 0x10]).is_ok()));
+ assert!((!from_utf8(&[0xe0, 0xff, 0x10]).is_ok()));
+ // invalid 4 byte prefix
+ assert!((!from_utf8(&[0xf0]).is_ok()));
+ assert!((!from_utf8(&[0xf0, 0x10]).is_ok()));
+ assert!((!from_utf8(&[0xf0, 0xff, 0x10]).is_ok()));
+ assert!((!from_utf8(&[0xf0, 0xff, 0xff, 0x10]).is_ok()));
+}
+
+#[test]
+fn utf8_char_counts() {
+ let strs = [("e", 1), ("é", 1), ("€", 1), ("\u{10000}", 1), ("eé€\u{10000}", 4)];
+ let spread = if cfg!(miri) { 4 } else { 8 };
+ let mut reps = [8, 64, 256, 512]
+ .iter()
+ .copied()
+ .flat_map(|n| n - spread..=n + spread)
+ .collect::<Vec<usize>>();
+ if cfg!(not(miri)) {
+ reps.extend([1024, 1 << 16].iter().copied().flat_map(|n| n - spread..=n + spread));
+ }
+ let counts = if cfg!(miri) { 0..1 } else { 0..8 };
+ let padding = counts.map(|len| " ".repeat(len)).collect::<Vec<String>>();
+
+ for repeat in reps {
+ for (tmpl_str, tmpl_char_count) in strs {
+ for pad_start in &padding {
+ for pad_end in &padding {
+ // Create a string with padding...
+ let with_padding =
+ format!("{}{}{}", pad_start, tmpl_str.repeat(repeat), pad_end);
+ // ...and then skip past that padding. This should ensure
+ // that we test several different alignments for both head
+ // and tail.
+ let si = pad_start.len();
+ let ei = with_padding.len() - pad_end.len();
+ let target = &with_padding[si..ei];
+
+ assert!(!target.starts_with(" ") && !target.ends_with(" "));
+ let expected_count = tmpl_char_count * repeat;
+ assert_eq!(
+ expected_count,
+ target.chars().count(),
+ "wrong count for `{:?}.repeat({})` (padding: `{:?}`)",
+ tmpl_str,
+ repeat,
+ (pad_start.len(), pad_end.len()),
+ );
+ }
+ }
+ }
+ }
+}
+
+#[test]
+fn floor_char_boundary() {
+ fn check_many(s: &str, arg: impl IntoIterator<Item = usize>, ret: usize) {
+ for idx in arg {
+ assert_eq!(
+ s.floor_char_boundary(idx),
+ ret,
+ "{:?}.floor_char_boundary({:?}) != {:?}",
+ s,
+ idx,
+ ret
+ );
+ }
+ }
+
+ // edge case
+ check_many("", [0, 1, isize::MAX as usize, usize::MAX], 0);
+
+ // basic check
+ check_many("x", [0], 0);
+ check_many("x", [1, isize::MAX as usize, usize::MAX], 1);
+
+ // 1-byte chars
+ check_many("jp", [0], 0);
+ check_many("jp", [1], 1);
+ check_many("jp", 2..4, 2);
+
+ // 2-byte chars
+ check_many("ĵƥ", 0..2, 0);
+ check_many("ĵƥ", 2..4, 2);
+ check_many("ĵƥ", 4..6, 4);
+
+ // 3-byte chars
+ check_many("日本", 0..3, 0);
+ check_many("日本", 3..6, 3);
+ check_many("日本", 6..8, 6);
+
+ // 4-byte chars
+ check_many("🇯🇵", 0..4, 0);
+ check_many("🇯🇵", 4..8, 4);
+ check_many("🇯🇵", 8..10, 8);
+}
+
+#[test]
+fn ceil_char_boundary() {
+ fn check_many(s: &str, arg: impl IntoIterator<Item = usize>, ret: usize) {
+ for idx in arg {
+ assert_eq!(
+ s.ceil_char_boundary(idx),
+ ret,
+ "{:?}.ceil_char_boundary({:?}) != {:?}",
+ s,
+ idx,
+ ret
+ );
+ }
+ }
+
+ // edge case
+ check_many("", [0], 0);
+
+ // basic check
+ check_many("x", [0], 0);
+ check_many("x", [1], 1);
+
+ // 1-byte chars
+ check_many("jp", [0], 0);
+ check_many("jp", [1], 1);
+ check_many("jp", [2], 2);
+
+ // 2-byte chars
+ check_many("ĵƥ", 0..=0, 0);
+ check_many("ĵƥ", 1..=2, 2);
+ check_many("ĵƥ", 3..=4, 4);
+
+ // 3-byte chars
+ check_many("日本", 0..=0, 0);
+ check_many("日本", 1..=3, 3);
+ check_many("日本", 4..=6, 6);
+
+ // 4-byte chars
+ check_many("🇯🇵", 0..=0, 0);
+ check_many("🇯🇵", 1..=4, 4);
+ check_many("🇯🇵", 5..=8, 8);
+}
+
+#[test]
+#[should_panic]
+fn ceil_char_boundary_above_len_panic() {
+ let _ = "x".ceil_char_boundary(2);
+}
diff --git a/library/alloc/tests/string.rs b/library/alloc/tests/string.rs
new file mode 100644
index 000000000..b6836fdc8
--- /dev/null
+++ b/library/alloc/tests/string.rs
@@ -0,0 +1,876 @@
+use std::assert_matches::assert_matches;
+use std::borrow::Cow;
+use std::cell::Cell;
+use std::collections::TryReserveErrorKind::*;
+use std::ops::Bound;
+use std::ops::Bound::*;
+use std::ops::RangeBounds;
+use std::panic;
+use std::str;
+
+pub trait IntoCow<'a, B: ?Sized>
+where
+ B: ToOwned,
+{
+ fn into_cow(self) -> Cow<'a, B>;
+}
+
+impl<'a> IntoCow<'a, str> for String {
+ fn into_cow(self) -> Cow<'a, str> {
+ Cow::Owned(self)
+ }
+}
+
+impl<'a> IntoCow<'a, str> for &'a str {
+ fn into_cow(self) -> Cow<'a, str> {
+ Cow::Borrowed(self)
+ }
+}
+
+#[test]
+fn test_from_str() {
+ let owned: Option<std::string::String> = "string".parse().ok();
+ assert_eq!(owned.as_ref().map(|s| &**s), Some("string"));
+}
+
+#[test]
+fn test_from_cow_str() {
+ assert_eq!(String::from(Cow::Borrowed("string")), "string");
+ assert_eq!(String::from(Cow::Owned(String::from("string"))), "string");
+}
+
+#[test]
+fn test_unsized_to_string() {
+ let s: &str = "abc";
+ let _: String = (*s).to_string();
+}
+
+#[test]
+fn test_from_utf8() {
+ let xs = b"hello".to_vec();
+ assert_eq!(String::from_utf8(xs).unwrap(), String::from("hello"));
+
+ let xs = "ศไทย中华Việt Nam".as_bytes().to_vec();
+ assert_eq!(String::from_utf8(xs).unwrap(), String::from("ศไทย中华Việt Nam"));
+
+ let xs = b"hello\xFF".to_vec();
+ let err = String::from_utf8(xs).unwrap_err();
+ assert_eq!(err.as_bytes(), b"hello\xff");
+ let err_clone = err.clone();
+ assert_eq!(err, err_clone);
+ assert_eq!(err.into_bytes(), b"hello\xff".to_vec());
+ assert_eq!(err_clone.utf8_error().valid_up_to(), 5);
+}
+
+#[test]
+fn test_from_utf8_lossy() {
+ let xs = b"hello";
+ let ys: Cow<'_, str> = "hello".into_cow();
+ assert_eq!(String::from_utf8_lossy(xs), ys);
+
+ let xs = "ศไทย中华Việt Nam".as_bytes();
+ let ys: Cow<'_, str> = "ศไทย中华Việt Nam".into_cow();
+ assert_eq!(String::from_utf8_lossy(xs), ys);
+
+ let xs = b"Hello\xC2 There\xFF Goodbye";
+ assert_eq!(
+ String::from_utf8_lossy(xs),
+ String::from("Hello\u{FFFD} There\u{FFFD} Goodbye").into_cow()
+ );
+
+ let xs = b"Hello\xC0\x80 There\xE6\x83 Goodbye";
+ assert_eq!(
+ String::from_utf8_lossy(xs),
+ String::from("Hello\u{FFFD}\u{FFFD} There\u{FFFD} Goodbye").into_cow()
+ );
+
+ let xs = b"\xF5foo\xF5\x80bar";
+ assert_eq!(
+ String::from_utf8_lossy(xs),
+ String::from("\u{FFFD}foo\u{FFFD}\u{FFFD}bar").into_cow()
+ );
+
+ let xs = b"\xF1foo\xF1\x80bar\xF1\x80\x80baz";
+ assert_eq!(
+ String::from_utf8_lossy(xs),
+ String::from("\u{FFFD}foo\u{FFFD}bar\u{FFFD}baz").into_cow()
+ );
+
+ let xs = b"\xF4foo\xF4\x80bar\xF4\xBFbaz";
+ assert_eq!(
+ String::from_utf8_lossy(xs),
+ String::from("\u{FFFD}foo\u{FFFD}bar\u{FFFD}\u{FFFD}baz").into_cow()
+ );
+
+ let xs = b"\xF0\x80\x80\x80foo\xF0\x90\x80\x80bar";
+ assert_eq!(
+ String::from_utf8_lossy(xs),
+ String::from("\u{FFFD}\u{FFFD}\u{FFFD}\u{FFFD}foo\u{10000}bar").into_cow()
+ );
+
+ // surrogates
+ let xs = b"\xED\xA0\x80foo\xED\xBF\xBFbar";
+ assert_eq!(
+ String::from_utf8_lossy(xs),
+ String::from("\u{FFFD}\u{FFFD}\u{FFFD}foo\u{FFFD}\u{FFFD}\u{FFFD}bar").into_cow()
+ );
+}
+
+#[test]
+fn test_from_utf16() {
+ let pairs = [
+ (
+ String::from("𐍅𐌿𐌻𐍆𐌹𐌻𐌰\n"),
+ vec![
+ 0xd800, 0xdf45, 0xd800, 0xdf3f, 0xd800, 0xdf3b, 0xd800, 0xdf46, 0xd800, 0xdf39,
+ 0xd800, 0xdf3b, 0xd800, 0xdf30, 0x000a,
+ ],
+ ),
+ (
+ String::from("𐐒𐑉𐐮𐑀𐐲𐑋 𐐏𐐲𐑍\n"),
+ vec![
+ 0xd801, 0xdc12, 0xd801, 0xdc49, 0xd801, 0xdc2e, 0xd801, 0xdc40, 0xd801, 0xdc32,
+ 0xd801, 0xdc4b, 0x0020, 0xd801, 0xdc0f, 0xd801, 0xdc32, 0xd801, 0xdc4d, 0x000a,
+ ],
+ ),
+ (
+ String::from("𐌀𐌖𐌋𐌄𐌑𐌉·𐌌𐌄𐌕𐌄𐌋𐌉𐌑\n"),
+ vec![
+ 0xd800, 0xdf00, 0xd800, 0xdf16, 0xd800, 0xdf0b, 0xd800, 0xdf04, 0xd800, 0xdf11,
+ 0xd800, 0xdf09, 0x00b7, 0xd800, 0xdf0c, 0xd800, 0xdf04, 0xd800, 0xdf15, 0xd800,
+ 0xdf04, 0xd800, 0xdf0b, 0xd800, 0xdf09, 0xd800, 0xdf11, 0x000a,
+ ],
+ ),
+ (
+ String::from("𐒋𐒘𐒈𐒑𐒛𐒒 𐒕𐒓 𐒈𐒚𐒍 𐒏𐒜𐒒𐒖𐒆 𐒕𐒆\n"),
+ vec![
+ 0xd801, 0xdc8b, 0xd801, 0xdc98, 0xd801, 0xdc88, 0xd801, 0xdc91, 0xd801, 0xdc9b,
+ 0xd801, 0xdc92, 0x0020, 0xd801, 0xdc95, 0xd801, 0xdc93, 0x0020, 0xd801, 0xdc88,
+ 0xd801, 0xdc9a, 0xd801, 0xdc8d, 0x0020, 0xd801, 0xdc8f, 0xd801, 0xdc9c, 0xd801,
+ 0xdc92, 0xd801, 0xdc96, 0xd801, 0xdc86, 0x0020, 0xd801, 0xdc95, 0xd801, 0xdc86,
+ 0x000a,
+ ],
+ ),
+ // Issue #12318, even-numbered non-BMP planes
+ (String::from("\u{20000}"), vec![0xD840, 0xDC00]),
+ ];
+
+ for p in &pairs {
+ let (s, u) = (*p).clone();
+ let s_as_utf16 = s.encode_utf16().collect::<Vec<u16>>();
+ let u_as_string = String::from_utf16(&u).unwrap();
+
+ assert!(core::char::decode_utf16(u.iter().cloned()).all(|r| r.is_ok()));
+ assert_eq!(s_as_utf16, u);
+
+ assert_eq!(u_as_string, s);
+ assert_eq!(String::from_utf16_lossy(&u), s);
+
+ assert_eq!(String::from_utf16(&s_as_utf16).unwrap(), s);
+ assert_eq!(u_as_string.encode_utf16().collect::<Vec<u16>>(), u);
+ }
+}
+
+#[test]
+fn test_utf16_invalid() {
+ // completely positive cases tested above.
+ // lead + eof
+ assert!(String::from_utf16(&[0xD800]).is_err());
+ // lead + lead
+ assert!(String::from_utf16(&[0xD800, 0xD800]).is_err());
+
+ // isolated trail
+ assert!(String::from_utf16(&[0x0061, 0xDC00]).is_err());
+
+ // general
+ assert!(String::from_utf16(&[0xD800, 0xd801, 0xdc8b, 0xD800]).is_err());
+}
+
+#[test]
+fn test_from_utf16_lossy() {
+ // completely positive cases tested above.
+ // lead + eof
+ assert_eq!(String::from_utf16_lossy(&[0xD800]), String::from("\u{FFFD}"));
+ // lead + lead
+ assert_eq!(String::from_utf16_lossy(&[0xD800, 0xD800]), String::from("\u{FFFD}\u{FFFD}"));
+
+ // isolated trail
+ assert_eq!(String::from_utf16_lossy(&[0x0061, 0xDC00]), String::from("a\u{FFFD}"));
+
+ // general
+ assert_eq!(
+ String::from_utf16_lossy(&[0xD800, 0xd801, 0xdc8b, 0xD800]),
+ String::from("\u{FFFD}𐒋\u{FFFD}")
+ );
+}
+
+#[test]
+fn test_push_bytes() {
+ let mut s = String::from("ABC");
+ unsafe {
+ let mv = s.as_mut_vec();
+ mv.extend_from_slice(&[b'D']);
+ }
+ assert_eq!(s, "ABCD");
+}
+
+#[test]
+fn test_push_str() {
+ let mut s = String::new();
+ s.push_str("");
+ assert_eq!(&s[0..], "");
+ s.push_str("abc");
+ assert_eq!(&s[0..], "abc");
+ s.push_str("ประเทศไทย中华Việt Nam");
+ assert_eq!(&s[0..], "abcประเทศไทย中华Việt Nam");
+}
+
+#[test]
+fn test_add_assign() {
+ let mut s = String::new();
+ s += "";
+ assert_eq!(s.as_str(), "");
+ s += "abc";
+ assert_eq!(s.as_str(), "abc");
+ s += "ประเทศไทย中华Việt Nam";
+ assert_eq!(s.as_str(), "abcประเทศไทย中华Việt Nam");
+}
+
+#[test]
+fn test_push() {
+ let mut data = String::from("ประเทศไทย中");
+ data.push('华');
+ data.push('b'); // 1 byte
+ data.push('¢'); // 2 byte
+ data.push('€'); // 3 byte
+ data.push('𤭢'); // 4 byte
+ assert_eq!(data, "ประเทศไทย中华b¢€𤭢");
+}
+
+#[test]
+fn test_pop() {
+ let mut data = String::from("ประเทศไทย中华b¢€𤭢");
+ assert_eq!(data.pop().unwrap(), '𤭢'); // 4 bytes
+ assert_eq!(data.pop().unwrap(), '€'); // 3 bytes
+ assert_eq!(data.pop().unwrap(), '¢'); // 2 bytes
+ assert_eq!(data.pop().unwrap(), 'b'); // 1 bytes
+ assert_eq!(data.pop().unwrap(), '华');
+ assert_eq!(data, "ประเทศไทย中");
+}
+
+#[test]
+fn test_split_off_empty() {
+ let orig = "Hello, world!";
+ let mut split = String::from(orig);
+ let empty: String = split.split_off(orig.len());
+ assert!(empty.is_empty());
+}
+
+#[test]
+#[should_panic]
+fn test_split_off_past_end() {
+ let orig = "Hello, world!";
+ let mut split = String::from(orig);
+ let _ = split.split_off(orig.len() + 1);
+}
+
+#[test]
+#[should_panic]
+fn test_split_off_mid_char() {
+ let mut shan = String::from("山");
+ let _broken_mountain = shan.split_off(1);
+}
+
+#[test]
+fn test_split_off_ascii() {
+ let mut ab = String::from("ABCD");
+ let orig_capacity = ab.capacity();
+ let cd = ab.split_off(2);
+ assert_eq!(ab, "AB");
+ assert_eq!(cd, "CD");
+ assert_eq!(ab.capacity(), orig_capacity);
+}
+
+#[test]
+fn test_split_off_unicode() {
+ let mut nihon = String::from("日本語");
+ let orig_capacity = nihon.capacity();
+ let go = nihon.split_off("日本".len());
+ assert_eq!(nihon, "日本");
+ assert_eq!(go, "語");
+ assert_eq!(nihon.capacity(), orig_capacity);
+}
+
+#[test]
+fn test_str_truncate() {
+ let mut s = String::from("12345");
+ s.truncate(5);
+ assert_eq!(s, "12345");
+ s.truncate(3);
+ assert_eq!(s, "123");
+ s.truncate(0);
+ assert_eq!(s, "");
+
+ let mut s = String::from("12345");
+ let p = s.as_ptr();
+ s.truncate(3);
+ s.push_str("6");
+ let p_ = s.as_ptr();
+ assert_eq!(p_, p);
+}
+
+#[test]
+fn test_str_truncate_invalid_len() {
+ let mut s = String::from("12345");
+ s.truncate(6);
+ assert_eq!(s, "12345");
+}
+
+#[test]
+#[should_panic]
+fn test_str_truncate_split_codepoint() {
+ let mut s = String::from("\u{FC}"); // ü
+ s.truncate(1);
+}
+
+#[test]
+fn test_str_clear() {
+ let mut s = String::from("12345");
+ s.clear();
+ assert_eq!(s.len(), 0);
+ assert_eq!(s, "");
+}
+
+#[test]
+fn test_str_add() {
+ let a = String::from("12345");
+ let b = a + "2";
+ let b = b + "2";
+ assert_eq!(b.len(), 7);
+ assert_eq!(b, "1234522");
+}
+
+#[test]
+fn remove() {
+ let mut s = "ศไทย中华Việt Nam; foobar".to_string();
+ assert_eq!(s.remove(0), 'ศ');
+ assert_eq!(s.len(), 33);
+ assert_eq!(s, "ไทย中华Việt Nam; foobar");
+ assert_eq!(s.remove(17), 'ệ');
+ assert_eq!(s, "ไทย中华Vit Nam; foobar");
+}
+
+#[test]
+#[should_panic]
+fn remove_bad() {
+ "ศ".to_string().remove(1);
+}
+
+#[test]
+fn test_remove_matches() {
+ let mut s = "abc".to_string();
+
+ s.remove_matches('b');
+ assert_eq!(s, "ac");
+ s.remove_matches('b');
+ assert_eq!(s, "ac");
+
+ let mut s = "abcb".to_string();
+
+ s.remove_matches('b');
+ assert_eq!(s, "ac");
+
+ let mut s = "ศไทย中华Việt Nam; foobarศ".to_string();
+ s.remove_matches('ศ');
+ assert_eq!(s, "ไทย中华Việt Nam; foobar");
+
+ let mut s = "".to_string();
+ s.remove_matches("");
+ assert_eq!(s, "");
+
+ let mut s = "aaaaa".to_string();
+ s.remove_matches('a');
+ assert_eq!(s, "");
+}
+
+#[test]
+fn test_retain() {
+ let mut s = String::from("α_β_γ");
+
+ s.retain(|_| true);
+ assert_eq!(s, "α_β_γ");
+
+ s.retain(|c| c != '_');
+ assert_eq!(s, "αβγ");
+
+ s.retain(|c| c != 'β');
+ assert_eq!(s, "αγ");
+
+ s.retain(|c| c == 'α');
+ assert_eq!(s, "α");
+
+ s.retain(|_| false);
+ assert_eq!(s, "");
+
+ let mut s = String::from("0è0");
+ let _ = panic::catch_unwind(panic::AssertUnwindSafe(|| {
+ let mut count = 0;
+ s.retain(|_| {
+ count += 1;
+ match count {
+ 1 => false,
+ 2 => true,
+ _ => panic!(),
+ }
+ });
+ }));
+ assert!(std::str::from_utf8(s.as_bytes()).is_ok());
+}
+
+#[test]
+fn insert() {
+ let mut s = "foobar".to_string();
+ s.insert(0, 'ệ');
+ assert_eq!(s, "ệfoobar");
+ s.insert(6, 'ย');
+ assert_eq!(s, "ệfooยbar");
+}
+
+#[test]
+#[should_panic]
+fn insert_bad1() {
+ "".to_string().insert(1, 't');
+}
+#[test]
+#[should_panic]
+fn insert_bad2() {
+ "ệ".to_string().insert(1, 't');
+}
+
+#[test]
+fn test_slicing() {
+ let s = "foobar".to_string();
+ assert_eq!("foobar", &s[..]);
+ assert_eq!("foo", &s[..3]);
+ assert_eq!("bar", &s[3..]);
+ assert_eq!("oob", &s[1..4]);
+}
+
+#[test]
+fn test_simple_types() {
+ assert_eq!(1.to_string(), "1");
+ assert_eq!((-1).to_string(), "-1");
+ assert_eq!(200.to_string(), "200");
+ assert_eq!(2.to_string(), "2");
+ assert_eq!(true.to_string(), "true");
+ assert_eq!(false.to_string(), "false");
+ assert_eq!(("hi".to_string()).to_string(), "hi");
+}
+
+#[test]
+fn test_vectors() {
+ let x: Vec<i32> = vec![];
+ assert_eq!(format!("{x:?}"), "[]");
+ assert_eq!(format!("{:?}", vec![1]), "[1]");
+ assert_eq!(format!("{:?}", vec![1, 2, 3]), "[1, 2, 3]");
+ assert!(format!("{:?}", vec![vec![], vec![1], vec![1, 1]]) == "[[], [1], [1, 1]]");
+}
+
+#[test]
+fn test_from_iterator() {
+ let s = "ศไทย中华Việt Nam".to_string();
+ let t = "ศไทย中华";
+ let u = "Việt Nam";
+
+ let a: String = s.chars().collect();
+ assert_eq!(s, a);
+
+ let mut b = t.to_string();
+ b.extend(u.chars());
+ assert_eq!(s, b);
+
+ let c: String = [t, u].into_iter().collect();
+ assert_eq!(s, c);
+
+ let mut d = t.to_string();
+ d.extend(vec![u]);
+ assert_eq!(s, d);
+}
+
+#[test]
+fn test_drain() {
+ let mut s = String::from("αβγ");
+ assert_eq!(s.drain(2..4).collect::<String>(), "β");
+ assert_eq!(s, "αγ");
+
+ let mut t = String::from("abcd");
+ t.drain(..0);
+ assert_eq!(t, "abcd");
+ t.drain(..1);
+ assert_eq!(t, "bcd");
+ t.drain(3..);
+ assert_eq!(t, "bcd");
+ t.drain(..);
+ assert_eq!(t, "");
+}
+
+#[test]
+#[should_panic]
+fn test_drain_start_overflow() {
+ let mut s = String::from("abc");
+ s.drain((Excluded(usize::MAX), Included(0)));
+}
+
+#[test]
+#[should_panic]
+fn test_drain_end_overflow() {
+ let mut s = String::from("abc");
+ s.drain((Included(0), Included(usize::MAX)));
+}
+
+#[test]
+fn test_replace_range() {
+ let mut s = "Hello, world!".to_owned();
+ s.replace_range(7..12, "世界");
+ assert_eq!(s, "Hello, 世界!");
+}
+
+#[test]
+#[should_panic]
+fn test_replace_range_char_boundary() {
+ let mut s = "Hello, 世界!".to_owned();
+ s.replace_range(..8, "");
+}
+
+#[test]
+fn test_replace_range_inclusive_range() {
+ let mut v = String::from("12345");
+ v.replace_range(2..=3, "789");
+ assert_eq!(v, "127895");
+ v.replace_range(1..=2, "A");
+ assert_eq!(v, "1A895");
+}
+
+#[test]
+#[should_panic]
+fn test_replace_range_out_of_bounds() {
+ let mut s = String::from("12345");
+ s.replace_range(5..6, "789");
+}
+
+#[test]
+#[should_panic]
+fn test_replace_range_inclusive_out_of_bounds() {
+ let mut s = String::from("12345");
+ s.replace_range(5..=5, "789");
+}
+
+#[test]
+#[should_panic]
+fn test_replace_range_start_overflow() {
+ let mut s = String::from("123");
+ s.replace_range((Excluded(usize::MAX), Included(0)), "");
+}
+
+#[test]
+#[should_panic]
+fn test_replace_range_end_overflow() {
+ let mut s = String::from("456");
+ s.replace_range((Included(0), Included(usize::MAX)), "");
+}
+
+#[test]
+fn test_replace_range_empty() {
+ let mut s = String::from("12345");
+ s.replace_range(1..2, "");
+ assert_eq!(s, "1345");
+}
+
+#[test]
+fn test_replace_range_unbounded() {
+ let mut s = String::from("12345");
+ s.replace_range(.., "");
+ assert_eq!(s, "");
+}
+
+#[test]
+fn test_replace_range_evil_start_bound() {
+ struct EvilRange(Cell<bool>);
+
+ impl RangeBounds<usize> for EvilRange {
+ fn start_bound(&self) -> Bound<&usize> {
+ Bound::Included(if self.0.get() {
+ &1
+ } else {
+ self.0.set(true);
+ &0
+ })
+ }
+ fn end_bound(&self) -> Bound<&usize> {
+ Bound::Unbounded
+ }
+ }
+
+ let mut s = String::from("🦀");
+ s.replace_range(EvilRange(Cell::new(false)), "");
+ assert_eq!(Ok(""), str::from_utf8(s.as_bytes()));
+}
+
+#[test]
+fn test_replace_range_evil_end_bound() {
+ struct EvilRange(Cell<bool>);
+
+ impl RangeBounds<usize> for EvilRange {
+ fn start_bound(&self) -> Bound<&usize> {
+ Bound::Included(&0)
+ }
+ fn end_bound(&self) -> Bound<&usize> {
+ Bound::Excluded(if self.0.get() {
+ &3
+ } else {
+ self.0.set(true);
+ &4
+ })
+ }
+ }
+
+ let mut s = String::from("🦀");
+ s.replace_range(EvilRange(Cell::new(false)), "");
+ assert_eq!(Ok(""), str::from_utf8(s.as_bytes()));
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut a = "foo".to_string();
+ a.extend(&['b', 'a', 'r']);
+
+ assert_eq!(&a, "foobar");
+}
+
+#[test]
+fn test_into_boxed_str() {
+ let xs = String::from("hello my name is bob");
+ let ys = xs.into_boxed_str();
+ assert_eq!(&*ys, "hello my name is bob");
+}
+
+#[test]
+fn test_reserve_exact() {
+ // This is all the same as test_reserve
+
+ let mut s = String::new();
+ assert_eq!(s.capacity(), 0);
+
+ s.reserve_exact(2);
+ assert!(s.capacity() >= 2);
+
+ for _i in 0..16 {
+ s.push('0');
+ }
+
+ assert!(s.capacity() >= 16);
+ s.reserve_exact(16);
+ assert!(s.capacity() >= 32);
+
+ s.push('0');
+
+ s.reserve_exact(16);
+ assert!(s.capacity() >= 33)
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
+fn test_try_reserve() {
+ // These are the interesting cases:
+ // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM)
+ // * > isize::MAX should always fail
+ // * On 16/32-bit should CapacityOverflow
+ // * On 64-bit should OOM
+ // * overflow may trigger when adding `len` to `cap` (in number of elements)
+ // * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes)
+
+ const MAX_CAP: usize = isize::MAX as usize;
+ const MAX_USIZE: usize = usize::MAX;
+
+ // On 16/32-bit, we check that allocations don't exceed isize::MAX,
+ // on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
+ // Any platform that succeeds for these requests is technically broken with
+ // ptr::offset because LLVM is the worst.
+ let guards_against_isize = usize::BITS < 64;
+
+ {
+ // Note: basic stuff is checked by test_reserve
+ let mut empty_string: String = String::new();
+
+ // Check isize::MAX doesn't count as an overflow
+ if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ // Play it again, frank! (just to be sure)
+ if let Err(CapacityOverflow) = empty_string.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+
+ if guards_against_isize {
+ // Check isize::MAX + 1 does count as overflow
+ assert_matches!(
+ empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ // Check usize::MAX does count as overflow
+ assert_matches!(
+ empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ } else {
+ // Check isize::MAX + 1 is an OOM
+ assert_matches!(
+ empty_string.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+
+ // Check usize::MAX is an OOM
+ assert_matches!(
+ empty_string.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "usize::MAX should trigger an OOM!"
+ );
+ }
+ }
+
+ {
+ // Same basic idea, but with non-zero len
+ let mut ten_bytes: String = String::from("0123456789");
+
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if guards_against_isize {
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+ } else {
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+ }
+ // Should always overflow in the add-to-len
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ }
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
+fn test_try_reserve_exact() {
+ // This is exactly the same as test_try_reserve with the method changed.
+ // See that test for comments.
+
+ const MAX_CAP: usize = isize::MAX as usize;
+ const MAX_USIZE: usize = usize::MAX;
+
+ let guards_against_isize = usize::BITS < 64;
+
+ {
+ let mut empty_string: String = String::new();
+
+ if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) = empty_string.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+
+ if guards_against_isize {
+ assert_matches!(
+ empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ assert_matches!(
+ empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ } else {
+ assert_matches!(
+ empty_string.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+
+ assert_matches!(
+ empty_string.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "usize::MAX should trigger an OOM!"
+ );
+ }
+ }
+
+ {
+ let mut ten_bytes: String = String::from("0123456789");
+
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if guards_against_isize {
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+ } else {
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+ }
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ }
+}
+
+#[test]
+fn test_from_char() {
+ assert_eq!(String::from('a'), 'a'.to_string());
+ let s: String = 'x'.into();
+ assert_eq!(s, 'x'.to_string());
+}
+
+#[test]
+fn test_str_concat() {
+ let a: String = "hello".to_string();
+ let b: String = "world".to_string();
+ let s: String = format!("{a}{b}");
+ assert_eq!(s.as_bytes()[9], 'd' as u8);
+}
diff --git a/library/alloc/tests/thin_box.rs b/library/alloc/tests/thin_box.rs
new file mode 100644
index 000000000..368aa564f
--- /dev/null
+++ b/library/alloc/tests/thin_box.rs
@@ -0,0 +1,262 @@
+use core::fmt::Debug;
+use core::mem::size_of;
+use std::boxed::ThinBox;
+
+#[test]
+fn want_niche_optimization() {
+ fn uses_niche<T: ?Sized>() -> bool {
+ size_of::<*const ()>() == size_of::<Option<ThinBox<T>>>()
+ }
+
+ trait Tr {}
+ assert!(uses_niche::<dyn Tr>());
+ assert!(uses_niche::<[i32]>());
+ assert!(uses_niche::<i32>());
+}
+
+#[test]
+fn want_thin() {
+ fn is_thin<T: ?Sized>() -> bool {
+ size_of::<*const ()>() == size_of::<ThinBox<T>>()
+ }
+
+ trait Tr {}
+ assert!(is_thin::<dyn Tr>());
+ assert!(is_thin::<[i32]>());
+ assert!(is_thin::<i32>());
+}
+
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn thin_box<'new>(b: ThinBox<[&'static str]>) -> ThinBox<[&'new str]> {
+ b
+ }
+}
+
+#[track_caller]
+fn verify_aligned<T>(ptr: *const T) {
+ // Use `black_box` to attempt to obscure the fact that we're calling this
+ // function on pointers that come from box/references, which the compiler
+ // would otherwise realize is impossible (because it would mean we've
+ // already executed UB).
+ //
+ // That is, we'd *like* it to be possible for the asserts in this function
+ // to detect brokenness in the ThinBox impl.
+ //
+ // It would probably be better if we instead had these as debug_asserts
+ // inside `ThinBox`, prior to the point where we do the UB. Anyway, in
+ // practice these checks are mostly just smoke-detectors for an extremely
+ // broken `ThinBox` impl, since it's an extremely subtle piece of code.
+ let ptr = core::hint::black_box(ptr);
+ let align = core::mem::align_of::<T>();
+ assert!(
+ (ptr.addr() & (align - 1)) == 0 && !ptr.is_null(),
+ "misaligned ThinBox data; valid pointers to `{}` should be aligned to {align}: {ptr:p}",
+ core::any::type_name::<T>(),
+ );
+}
+
+#[track_caller]
+fn check_thin_sized<T: Debug + PartialEq + Clone>(make: impl FnOnce() -> T) {
+ let value = make();
+ let boxed = ThinBox::new(value.clone());
+ let val = &*boxed;
+ verify_aligned(val as *const T);
+ assert_eq!(val, &value);
+}
+
+#[track_caller]
+fn check_thin_dyn<T: Debug + PartialEq + Clone>(make: impl FnOnce() -> T) {
+ let value = make();
+ let wanted_debug = format!("{value:?}");
+ let boxed: ThinBox<dyn Debug> = ThinBox::new_unsize(value.clone());
+ let val = &*boxed;
+ // wide reference -> wide pointer -> thin pointer
+ verify_aligned(val as *const dyn Debug as *const T);
+ let got_debug = format!("{val:?}");
+ assert_eq!(wanted_debug, got_debug);
+}
+
+macro_rules! define_test {
+ (
+ @test_name: $testname:ident;
+
+ $(#[$m:meta])*
+ struct $Type:ident($inner:ty);
+
+ $($test_stmts:tt)*
+ ) => {
+ #[test]
+ fn $testname() {
+ use core::sync::atomic::{AtomicIsize, Ordering};
+ // Define the type, and implement new/clone/drop in such a way that
+ // the number of live instances will be counted.
+ $(#[$m])*
+ #[derive(Debug, PartialEq)]
+ struct $Type {
+ _priv: $inner,
+ }
+
+ impl Clone for $Type {
+ fn clone(&self) -> Self {
+ verify_aligned(self);
+ Self::new(self._priv.clone())
+ }
+ }
+
+ impl Drop for $Type {
+ fn drop(&mut self) {
+ verify_aligned(self);
+ Self::modify_live(-1);
+ }
+ }
+
+ impl $Type {
+ fn new(i: $inner) -> Self {
+ Self::modify_live(1);
+ Self { _priv: i }
+ }
+
+ fn modify_live(n: isize) -> isize {
+ static COUNTER: AtomicIsize = AtomicIsize::new(0);
+ COUNTER.fetch_add(n, Ordering::Relaxed) + n
+ }
+
+ fn live_objects() -> isize {
+ Self::modify_live(0)
+ }
+ }
+ // Run the test statements
+ let _: () = { $($test_stmts)* };
+ // Check that we didn't leak anything, or call drop too many times.
+ assert_eq!(
+ $Type::live_objects(), 0,
+ "Wrong number of drops of {}, `initializations - drops` should be 0.",
+ stringify!($Type),
+ );
+ }
+ };
+}
+
+define_test! {
+ @test_name: align1zst;
+ struct Align1Zst(());
+
+ check_thin_sized(|| Align1Zst::new(()));
+ check_thin_dyn(|| Align1Zst::new(()));
+}
+
+define_test! {
+ @test_name: align1small;
+ struct Align1Small(u8);
+
+ check_thin_sized(|| Align1Small::new(50));
+ check_thin_dyn(|| Align1Small::new(50));
+}
+
+define_test! {
+ @test_name: align1_size_not_pow2;
+ struct Align64NotPow2Size([u8; 79]);
+
+ check_thin_sized(|| Align64NotPow2Size::new([100; 79]));
+ check_thin_dyn(|| Align64NotPow2Size::new([100; 79]));
+}
+
+define_test! {
+ @test_name: align1big;
+ struct Align1Big([u8; 256]);
+
+ check_thin_sized(|| Align1Big::new([5u8; 256]));
+ check_thin_dyn(|| Align1Big::new([5u8; 256]));
+}
+
+// Note: `#[repr(align(2))]` is worth testing because
+// - can have pointers which are misaligned, unlike align(1)
+// - is still expected to have an alignment less than the alignment of a vtable.
+define_test! {
+ @test_name: align2zst;
+ #[repr(align(2))]
+ struct Align2Zst(());
+
+ check_thin_sized(|| Align2Zst::new(()));
+ check_thin_dyn(|| Align2Zst::new(()));
+}
+
+define_test! {
+ @test_name: align2small;
+ #[repr(align(2))]
+ struct Align2Small(u8);
+
+ check_thin_sized(|| Align2Small::new(60));
+ check_thin_dyn(|| Align2Small::new(60));
+}
+
+define_test! {
+ @test_name: align2full;
+ #[repr(align(2))]
+ struct Align2Full([u8; 2]);
+ check_thin_sized(|| Align2Full::new([3u8; 2]));
+ check_thin_dyn(|| Align2Full::new([3u8; 2]));
+}
+
+define_test! {
+ @test_name: align2_size_not_pow2;
+ #[repr(align(2))]
+ struct Align2NotPower2Size([u8; 6]);
+
+ check_thin_sized(|| Align2NotPower2Size::new([3; 6]));
+ check_thin_dyn(|| Align2NotPower2Size::new([3; 6]));
+}
+
+define_test! {
+ @test_name: align2big;
+ #[repr(align(2))]
+ struct Align2Big([u8; 256]);
+
+ check_thin_sized(|| Align2Big::new([5u8; 256]));
+ check_thin_dyn(|| Align2Big::new([5u8; 256]));
+}
+
+define_test! {
+ @test_name: align64zst;
+ #[repr(align(64))]
+ struct Align64Zst(());
+
+ check_thin_sized(|| Align64Zst::new(()));
+ check_thin_dyn(|| Align64Zst::new(()));
+}
+
+define_test! {
+ @test_name: align64small;
+ #[repr(align(64))]
+ struct Align64Small(u8);
+
+ check_thin_sized(|| Align64Small::new(50));
+ check_thin_dyn(|| Align64Small::new(50));
+}
+
+define_test! {
+ @test_name: align64med;
+ #[repr(align(64))]
+ struct Align64Med([u8; 64]);
+ check_thin_sized(|| Align64Med::new([10; 64]));
+ check_thin_dyn(|| Align64Med::new([10; 64]));
+}
+
+define_test! {
+ @test_name: align64_size_not_pow2;
+ #[repr(align(64))]
+ struct Align64NotPow2Size([u8; 192]);
+
+ check_thin_sized(|| Align64NotPow2Size::new([10; 192]));
+ check_thin_dyn(|| Align64NotPow2Size::new([10; 192]));
+}
+
+define_test! {
+ @test_name: align64big;
+ #[repr(align(64))]
+ struct Align64Big([u8; 256]);
+
+ check_thin_sized(|| Align64Big::new([10; 256]));
+ check_thin_dyn(|| Align64Big::new([10; 256]));
+}
diff --git a/library/alloc/tests/vec.rs b/library/alloc/tests/vec.rs
new file mode 100644
index 000000000..b797e2375
--- /dev/null
+++ b/library/alloc/tests/vec.rs
@@ -0,0 +1,2436 @@
+use core::alloc::{Allocator, Layout};
+use core::iter::IntoIterator;
+use core::ptr::NonNull;
+use std::alloc::System;
+use std::assert_matches::assert_matches;
+use std::borrow::Cow;
+use std::cell::Cell;
+use std::collections::TryReserveErrorKind::*;
+use std::fmt::Debug;
+use std::iter::InPlaceIterable;
+use std::mem::{size_of, swap};
+use std::ops::Bound::*;
+use std::panic::{catch_unwind, AssertUnwindSafe};
+use std::rc::Rc;
+use std::sync::atomic::{AtomicU32, Ordering};
+use std::vec::{Drain, IntoIter};
+
+struct DropCounter<'a> {
+ count: &'a mut u32,
+}
+
+impl Drop for DropCounter<'_> {
+ fn drop(&mut self) {
+ *self.count += 1;
+ }
+}
+
+#[test]
+fn test_small_vec_struct() {
+ assert_eq!(size_of::<Vec<u8>>(), size_of::<usize>() * 3);
+}
+
+#[test]
+fn test_double_drop() {
+ struct TwoVec<T> {
+ x: Vec<T>,
+ y: Vec<T>,
+ }
+
+ let (mut count_x, mut count_y) = (0, 0);
+ {
+ let mut tv = TwoVec { x: Vec::new(), y: Vec::new() };
+ tv.x.push(DropCounter { count: &mut count_x });
+ tv.y.push(DropCounter { count: &mut count_y });
+
+ // If Vec had a drop flag, here is where it would be zeroed.
+ // Instead, it should rely on its internal state to prevent
+ // doing anything significant when dropped multiple times.
+ drop(tv.x);
+
+ // Here tv goes out of scope, tv.y should be dropped, but not tv.x.
+ }
+
+ assert_eq!(count_x, 1);
+ assert_eq!(count_y, 1);
+}
+
+#[test]
+fn test_reserve() {
+ let mut v = Vec::new();
+ assert_eq!(v.capacity(), 0);
+
+ v.reserve(2);
+ assert!(v.capacity() >= 2);
+
+ for i in 0..16 {
+ v.push(i);
+ }
+
+ assert!(v.capacity() >= 16);
+ v.reserve(16);
+ assert!(v.capacity() >= 32);
+
+ v.push(16);
+
+ v.reserve(16);
+ assert!(v.capacity() >= 33)
+}
+
+#[test]
+fn test_zst_capacity() {
+ assert_eq!(Vec::<()>::new().capacity(), usize::MAX);
+}
+
+#[test]
+fn test_indexing() {
+ let v: Vec<isize> = vec![10, 20];
+ assert_eq!(v[0], 10);
+ assert_eq!(v[1], 20);
+ let mut x: usize = 0;
+ assert_eq!(v[x], 10);
+ assert_eq!(v[x + 1], 20);
+ x = x + 1;
+ assert_eq!(v[x], 20);
+ assert_eq!(v[x - 1], 10);
+}
+
+#[test]
+fn test_debug_fmt() {
+ let vec1: Vec<isize> = vec![];
+ assert_eq!("[]", format!("{:?}", vec1));
+
+ let vec2 = vec![0, 1];
+ assert_eq!("[0, 1]", format!("{:?}", vec2));
+
+ let slice: &[isize] = &[4, 5];
+ assert_eq!("[4, 5]", format!("{slice:?}"));
+}
+
+#[test]
+fn test_push() {
+ let mut v = vec![];
+ v.push(1);
+ assert_eq!(v, [1]);
+ v.push(2);
+ assert_eq!(v, [1, 2]);
+ v.push(3);
+ assert_eq!(v, [1, 2, 3]);
+}
+
+#[test]
+fn test_extend() {
+ let mut v = Vec::new();
+ let mut w = Vec::new();
+
+ v.extend(w.clone());
+ assert_eq!(v, &[]);
+
+ v.extend(0..3);
+ for i in 0..3 {
+ w.push(i)
+ }
+
+ assert_eq!(v, w);
+
+ v.extend(3..10);
+ for i in 3..10 {
+ w.push(i)
+ }
+
+ assert_eq!(v, w);
+
+ v.extend(w.clone()); // specializes to `append`
+ assert!(v.iter().eq(w.iter().chain(w.iter())));
+
+ // Zero sized types
+ #[derive(PartialEq, Debug)]
+ struct Foo;
+
+ let mut a = Vec::new();
+ let b = vec![Foo, Foo];
+
+ a.extend(b);
+ assert_eq!(a, &[Foo, Foo]);
+
+ // Double drop
+ let mut count_x = 0;
+ {
+ let mut x = Vec::new();
+ let y = vec![DropCounter { count: &mut count_x }];
+ x.extend(y);
+ }
+ assert_eq!(count_x, 1);
+}
+
+#[test]
+fn test_extend_from_slice() {
+ let a: Vec<isize> = vec![1, 2, 3, 4, 5];
+ let b: Vec<isize> = vec![6, 7, 8, 9, 0];
+
+ let mut v: Vec<isize> = a;
+
+ v.extend_from_slice(&b);
+
+ assert_eq!(v, [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]);
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut v = vec![1, 2];
+ v.extend(&[3, 4, 5]);
+
+ assert_eq!(v.len(), 5);
+ assert_eq!(v, [1, 2, 3, 4, 5]);
+
+ let w = vec![6, 7];
+ v.extend(&w);
+
+ assert_eq!(v.len(), 7);
+ assert_eq!(v, [1, 2, 3, 4, 5, 6, 7]);
+}
+
+#[test]
+fn test_slice_from_ref() {
+ let values = vec![1, 2, 3, 4, 5];
+ let slice = &values[1..3];
+
+ assert_eq!(slice, [2, 3]);
+}
+
+#[test]
+fn test_slice_from_mut() {
+ let mut values = vec![1, 2, 3, 4, 5];
+ {
+ let slice = &mut values[2..];
+ assert!(slice == [3, 4, 5]);
+ for p in slice {
+ *p += 2;
+ }
+ }
+
+ assert!(values == [1, 2, 5, 6, 7]);
+}
+
+#[test]
+fn test_slice_to_mut() {
+ let mut values = vec![1, 2, 3, 4, 5];
+ {
+ let slice = &mut values[..2];
+ assert!(slice == [1, 2]);
+ for p in slice {
+ *p += 1;
+ }
+ }
+
+ assert!(values == [2, 3, 3, 4, 5]);
+}
+
+#[test]
+fn test_split_at_mut() {
+ let mut values = vec![1, 2, 3, 4, 5];
+ {
+ let (left, right) = values.split_at_mut(2);
+ {
+ let left: &[_] = left;
+ assert!(&left[..left.len()] == &[1, 2]);
+ }
+ for p in left {
+ *p += 1;
+ }
+
+ {
+ let right: &[_] = right;
+ assert!(&right[..right.len()] == &[3, 4, 5]);
+ }
+ for p in right {
+ *p += 2;
+ }
+ }
+
+ assert_eq!(values, [2, 3, 5, 6, 7]);
+}
+
+#[test]
+fn test_clone() {
+ let v: Vec<i32> = vec![];
+ let w = vec![1, 2, 3];
+
+ assert_eq!(v, v.clone());
+
+ let z = w.clone();
+ assert_eq!(w, z);
+ // they should be disjoint in memory.
+ assert!(w.as_ptr() != z.as_ptr())
+}
+
+#[test]
+fn test_clone_from() {
+ let mut v = vec![];
+ let three: Vec<Box<_>> = vec![Box::new(1), Box::new(2), Box::new(3)];
+ let two: Vec<Box<_>> = vec![Box::new(4), Box::new(5)];
+ // zero, long
+ v.clone_from(&three);
+ assert_eq!(v, three);
+
+ // equal
+ v.clone_from(&three);
+ assert_eq!(v, three);
+
+ // long, short
+ v.clone_from(&two);
+ assert_eq!(v, two);
+
+ // short, long
+ v.clone_from(&three);
+ assert_eq!(v, three)
+}
+
+#[test]
+fn test_retain() {
+ let mut vec = vec![1, 2, 3, 4];
+ vec.retain(|&x| x % 2 == 0);
+ assert_eq!(vec, [2, 4]);
+}
+
+#[test]
+fn test_retain_pred_panic_with_hole() {
+ let v = (0..5).map(Rc::new).collect::<Vec<_>>();
+ catch_unwind(AssertUnwindSafe(|| {
+ let mut v = v.clone();
+ v.retain(|r| match **r {
+ 0 => true,
+ 1 => false,
+ 2 => true,
+ _ => panic!(),
+ });
+ }))
+ .unwrap_err();
+ // Everything is dropped when predicate panicked.
+ assert!(v.iter().all(|r| Rc::strong_count(r) == 1));
+}
+
+#[test]
+fn test_retain_pred_panic_no_hole() {
+ let v = (0..5).map(Rc::new).collect::<Vec<_>>();
+ catch_unwind(AssertUnwindSafe(|| {
+ let mut v = v.clone();
+ v.retain(|r| match **r {
+ 0 | 1 | 2 => true,
+ _ => panic!(),
+ });
+ }))
+ .unwrap_err();
+ // Everything is dropped when predicate panicked.
+ assert!(v.iter().all(|r| Rc::strong_count(r) == 1));
+}
+
+#[test]
+fn test_retain_drop_panic() {
+ struct Wrap(Rc<i32>);
+
+ impl Drop for Wrap {
+ fn drop(&mut self) {
+ if *self.0 == 3 {
+ panic!();
+ }
+ }
+ }
+
+ let v = (0..5).map(|x| Rc::new(x)).collect::<Vec<_>>();
+ catch_unwind(AssertUnwindSafe(|| {
+ let mut v = v.iter().map(|r| Wrap(r.clone())).collect::<Vec<_>>();
+ v.retain(|w| match *w.0 {
+ 0 => true,
+ 1 => false,
+ 2 => true,
+ 3 => false, // Drop panic.
+ _ => true,
+ });
+ }))
+ .unwrap_err();
+ // Other elements are dropped when `drop` of one element panicked.
+ // The panicked wrapper also has its Rc dropped.
+ assert!(v.iter().all(|r| Rc::strong_count(r) == 1));
+}
+
+#[test]
+fn test_dedup() {
+ fn case(a: Vec<i32>, b: Vec<i32>) {
+ let mut v = a;
+ v.dedup();
+ assert_eq!(v, b);
+ }
+ case(vec![], vec![]);
+ case(vec![1], vec![1]);
+ case(vec![1, 1], vec![1]);
+ case(vec![1, 2, 3], vec![1, 2, 3]);
+ case(vec![1, 1, 2, 3], vec![1, 2, 3]);
+ case(vec![1, 2, 2, 3], vec![1, 2, 3]);
+ case(vec![1, 2, 3, 3], vec![1, 2, 3]);
+ case(vec![1, 1, 2, 2, 2, 3, 3], vec![1, 2, 3]);
+}
+
+#[test]
+fn test_dedup_by_key() {
+ fn case(a: Vec<i32>, b: Vec<i32>) {
+ let mut v = a;
+ v.dedup_by_key(|i| *i / 10);
+ assert_eq!(v, b);
+ }
+ case(vec![], vec![]);
+ case(vec![10], vec![10]);
+ case(vec![10, 11], vec![10]);
+ case(vec![10, 20, 30], vec![10, 20, 30]);
+ case(vec![10, 11, 20, 30], vec![10, 20, 30]);
+ case(vec![10, 20, 21, 30], vec![10, 20, 30]);
+ case(vec![10, 20, 30, 31], vec![10, 20, 30]);
+ case(vec![10, 11, 20, 21, 22, 30, 31], vec![10, 20, 30]);
+}
+
+#[test]
+fn test_dedup_by() {
+ let mut vec = vec!["foo", "bar", "Bar", "baz", "bar"];
+ vec.dedup_by(|a, b| a.eq_ignore_ascii_case(b));
+
+ assert_eq!(vec, ["foo", "bar", "baz", "bar"]);
+
+ let mut vec = vec![("foo", 1), ("foo", 2), ("bar", 3), ("bar", 4), ("bar", 5)];
+ vec.dedup_by(|a, b| {
+ a.0 == b.0 && {
+ b.1 += a.1;
+ true
+ }
+ });
+
+ assert_eq!(vec, [("foo", 3), ("bar", 12)]);
+}
+
+#[test]
+fn test_dedup_unique() {
+ let mut v0: Vec<Box<_>> = vec![Box::new(1), Box::new(1), Box::new(2), Box::new(3)];
+ v0.dedup();
+ let mut v1: Vec<Box<_>> = vec![Box::new(1), Box::new(2), Box::new(2), Box::new(3)];
+ v1.dedup();
+ let mut v2: Vec<Box<_>> = vec![Box::new(1), Box::new(2), Box::new(3), Box::new(3)];
+ v2.dedup();
+ // If the boxed pointers were leaked or otherwise misused, valgrind
+ // and/or rt should raise errors.
+}
+
+#[test]
+fn zero_sized_values() {
+ let mut v = Vec::new();
+ assert_eq!(v.len(), 0);
+ v.push(());
+ assert_eq!(v.len(), 1);
+ v.push(());
+ assert_eq!(v.len(), 2);
+ assert_eq!(v.pop(), Some(()));
+ assert_eq!(v.pop(), Some(()));
+ assert_eq!(v.pop(), None);
+
+ assert_eq!(v.iter().count(), 0);
+ v.push(());
+ assert_eq!(v.iter().count(), 1);
+ v.push(());
+ assert_eq!(v.iter().count(), 2);
+
+ for &() in &v {}
+
+ assert_eq!(v.iter_mut().count(), 2);
+ v.push(());
+ assert_eq!(v.iter_mut().count(), 3);
+ v.push(());
+ assert_eq!(v.iter_mut().count(), 4);
+
+ for &mut () in &mut v {}
+ unsafe {
+ v.set_len(0);
+ }
+ assert_eq!(v.iter_mut().count(), 0);
+}
+
+#[test]
+fn test_partition() {
+ assert_eq!([].into_iter().partition(|x: &i32| *x < 3), (vec![], vec![]));
+ assert_eq!([1, 2, 3].into_iter().partition(|x| *x < 4), (vec![1, 2, 3], vec![]));
+ assert_eq!([1, 2, 3].into_iter().partition(|x| *x < 2), (vec![1], vec![2, 3]));
+ assert_eq!([1, 2, 3].into_iter().partition(|x| *x < 0), (vec![], vec![1, 2, 3]));
+}
+
+#[test]
+fn test_zip_unzip() {
+ let z1 = vec![(1, 4), (2, 5), (3, 6)];
+
+ let (left, right): (Vec<_>, Vec<_>) = z1.iter().cloned().unzip();
+
+ assert_eq!((1, 4), (left[0], right[0]));
+ assert_eq!((2, 5), (left[1], right[1]));
+ assert_eq!((3, 6), (left[2], right[2]));
+}
+
+#[test]
+fn test_cmp() {
+ let x: &[isize] = &[1, 2, 3, 4, 5];
+ let cmp: &[isize] = &[1, 2, 3, 4, 5];
+ assert_eq!(&x[..], cmp);
+ let cmp: &[isize] = &[3, 4, 5];
+ assert_eq!(&x[2..], cmp);
+ let cmp: &[isize] = &[1, 2, 3];
+ assert_eq!(&x[..3], cmp);
+ let cmp: &[isize] = &[2, 3, 4];
+ assert_eq!(&x[1..4], cmp);
+
+ let x: Vec<isize> = vec![1, 2, 3, 4, 5];
+ let cmp: &[isize] = &[1, 2, 3, 4, 5];
+ assert_eq!(&x[..], cmp);
+ let cmp: &[isize] = &[3, 4, 5];
+ assert_eq!(&x[2..], cmp);
+ let cmp: &[isize] = &[1, 2, 3];
+ assert_eq!(&x[..3], cmp);
+ let cmp: &[isize] = &[2, 3, 4];
+ assert_eq!(&x[1..4], cmp);
+}
+
+#[test]
+fn test_vec_truncate_drop() {
+ static mut DROPS: u32 = 0;
+ struct Elem(i32);
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let mut v = vec![Elem(1), Elem(2), Elem(3), Elem(4), Elem(5)];
+ assert_eq!(unsafe { DROPS }, 0);
+ v.truncate(3);
+ assert_eq!(unsafe { DROPS }, 2);
+ v.truncate(0);
+ assert_eq!(unsafe { DROPS }, 5);
+}
+
+#[test]
+#[should_panic]
+fn test_vec_truncate_fail() {
+ struct BadElem(i32);
+ impl Drop for BadElem {
+ fn drop(&mut self) {
+ let BadElem(ref mut x) = *self;
+ if *x == 0xbadbeef {
+ panic!("BadElem panic: 0xbadbeef")
+ }
+ }
+ }
+
+ let mut v = vec![BadElem(1), BadElem(2), BadElem(0xbadbeef), BadElem(4)];
+ v.truncate(0);
+}
+
+#[test]
+fn test_index() {
+ let vec = vec![1, 2, 3];
+ assert!(vec[1] == 2);
+}
+
+#[test]
+#[should_panic]
+fn test_index_out_of_bounds() {
+ let vec = vec![1, 2, 3];
+ let _ = vec[3];
+}
+
+#[test]
+#[should_panic]
+fn test_slice_out_of_bounds_1() {
+ let x = vec![1, 2, 3, 4, 5];
+ let _ = &x[!0..];
+}
+
+#[test]
+#[should_panic]
+fn test_slice_out_of_bounds_2() {
+ let x = vec![1, 2, 3, 4, 5];
+ let _ = &x[..6];
+}
+
+#[test]
+#[should_panic]
+fn test_slice_out_of_bounds_3() {
+ let x = vec![1, 2, 3, 4, 5];
+ let _ = &x[!0..4];
+}
+
+#[test]
+#[should_panic]
+fn test_slice_out_of_bounds_4() {
+ let x = vec![1, 2, 3, 4, 5];
+ let _ = &x[1..6];
+}
+
+#[test]
+#[should_panic]
+fn test_slice_out_of_bounds_5() {
+ let x = vec![1, 2, 3, 4, 5];
+ let _ = &x[3..2];
+}
+
+#[test]
+#[should_panic]
+fn test_swap_remove_empty() {
+ let mut vec = Vec::<i32>::new();
+ vec.swap_remove(0);
+}
+
+#[test]
+fn test_move_items() {
+ let vec = vec![1, 2, 3];
+ let mut vec2 = vec![];
+ for i in vec {
+ vec2.push(i);
+ }
+ assert_eq!(vec2, [1, 2, 3]);
+}
+
+#[test]
+fn test_move_items_reverse() {
+ let vec = vec![1, 2, 3];
+ let mut vec2 = vec![];
+ for i in vec.into_iter().rev() {
+ vec2.push(i);
+ }
+ assert_eq!(vec2, [3, 2, 1]);
+}
+
+#[test]
+fn test_move_items_zero_sized() {
+ let vec = vec![(), (), ()];
+ let mut vec2 = vec![];
+ for i in vec {
+ vec2.push(i);
+ }
+ assert_eq!(vec2, [(), (), ()]);
+}
+
+#[test]
+fn test_drain_empty_vec() {
+ let mut vec: Vec<i32> = vec![];
+ let mut vec2: Vec<i32> = vec![];
+ for i in vec.drain(..) {
+ vec2.push(i);
+ }
+ assert!(vec.is_empty());
+ assert!(vec2.is_empty());
+}
+
+#[test]
+fn test_drain_items() {
+ let mut vec = vec![1, 2, 3];
+ let mut vec2 = vec![];
+ for i in vec.drain(..) {
+ vec2.push(i);
+ }
+ assert_eq!(vec, []);
+ assert_eq!(vec2, [1, 2, 3]);
+}
+
+#[test]
+fn test_drain_items_reverse() {
+ let mut vec = vec![1, 2, 3];
+ let mut vec2 = vec![];
+ for i in vec.drain(..).rev() {
+ vec2.push(i);
+ }
+ assert_eq!(vec, []);
+ assert_eq!(vec2, [3, 2, 1]);
+}
+
+#[test]
+fn test_drain_items_zero_sized() {
+ let mut vec = vec![(), (), ()];
+ let mut vec2 = vec![];
+ for i in vec.drain(..) {
+ vec2.push(i);
+ }
+ assert_eq!(vec, []);
+ assert_eq!(vec2, [(), (), ()]);
+}
+
+#[test]
+#[should_panic]
+fn test_drain_out_of_bounds() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ v.drain(5..6);
+}
+
+#[test]
+fn test_drain_range() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ for _ in v.drain(4..) {}
+ assert_eq!(v, &[1, 2, 3, 4]);
+
+ let mut v: Vec<_> = (1..6).map(|x| x.to_string()).collect();
+ for _ in v.drain(1..4) {}
+ assert_eq!(v, &[1.to_string(), 5.to_string()]);
+
+ let mut v: Vec<_> = (1..6).map(|x| x.to_string()).collect();
+ for _ in v.drain(1..4).rev() {}
+ assert_eq!(v, &[1.to_string(), 5.to_string()]);
+
+ let mut v: Vec<_> = vec![(); 5];
+ for _ in v.drain(1..4).rev() {}
+ assert_eq!(v, &[(), ()]);
+}
+
+#[test]
+fn test_drain_inclusive_range() {
+ let mut v = vec!['a', 'b', 'c', 'd', 'e'];
+ for _ in v.drain(1..=3) {}
+ assert_eq!(v, &['a', 'e']);
+
+ let mut v: Vec<_> = (0..=5).map(|x| x.to_string()).collect();
+ for _ in v.drain(1..=5) {}
+ assert_eq!(v, &["0".to_string()]);
+
+ let mut v: Vec<String> = (0..=5).map(|x| x.to_string()).collect();
+ for _ in v.drain(0..=5) {}
+ assert_eq!(v, Vec::<String>::new());
+
+ let mut v: Vec<_> = (0..=5).map(|x| x.to_string()).collect();
+ for _ in v.drain(0..=3) {}
+ assert_eq!(v, &["4".to_string(), "5".to_string()]);
+
+ let mut v: Vec<_> = (0..=1).map(|x| x.to_string()).collect();
+ for _ in v.drain(..=0) {}
+ assert_eq!(v, &["1".to_string()]);
+}
+
+#[test]
+fn test_drain_max_vec_size() {
+ let mut v = Vec::<()>::with_capacity(usize::MAX);
+ unsafe {
+ v.set_len(usize::MAX);
+ }
+ for _ in v.drain(usize::MAX - 1..) {}
+ assert_eq!(v.len(), usize::MAX - 1);
+
+ let mut v = Vec::<()>::with_capacity(usize::MAX);
+ unsafe {
+ v.set_len(usize::MAX);
+ }
+ for _ in v.drain(usize::MAX - 1..=usize::MAX - 1) {}
+ assert_eq!(v.len(), usize::MAX - 1);
+}
+
+#[test]
+#[should_panic]
+fn test_drain_index_overflow() {
+ let mut v = Vec::<()>::with_capacity(usize::MAX);
+ unsafe {
+ v.set_len(usize::MAX);
+ }
+ v.drain(0..=usize::MAX);
+}
+
+#[test]
+#[should_panic]
+fn test_drain_inclusive_out_of_bounds() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ v.drain(5..=5);
+}
+
+#[test]
+#[should_panic]
+fn test_drain_start_overflow() {
+ let mut v = vec![1, 2, 3];
+ v.drain((Excluded(usize::MAX), Included(0)));
+}
+
+#[test]
+#[should_panic]
+fn test_drain_end_overflow() {
+ let mut v = vec![1, 2, 3];
+ v.drain((Included(0), Included(usize::MAX)));
+}
+
+#[test]
+fn test_drain_leak() {
+ static mut DROPS: i32 = 0;
+
+ #[derive(Debug, PartialEq)]
+ struct D(u32, bool);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+
+ if self.1 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let mut v = vec![
+ D(0, false),
+ D(1, false),
+ D(2, false),
+ D(3, false),
+ D(4, true),
+ D(5, false),
+ D(6, false),
+ ];
+
+ catch_unwind(AssertUnwindSafe(|| {
+ v.drain(2..=5);
+ }))
+ .ok();
+
+ assert_eq!(unsafe { DROPS }, 4);
+ assert_eq!(v, vec![D(0, false), D(1, false), D(6, false),]);
+}
+
+#[test]
+fn test_splice() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let a = [10, 11, 12];
+ v.splice(2..4, a);
+ assert_eq!(v, &[1, 2, 10, 11, 12, 5]);
+ v.splice(1..3, Some(20));
+ assert_eq!(v, &[1, 20, 11, 12, 5]);
+}
+
+#[test]
+fn test_splice_inclusive_range() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let a = [10, 11, 12];
+ let t1: Vec<_> = v.splice(2..=3, a).collect();
+ assert_eq!(v, &[1, 2, 10, 11, 12, 5]);
+ assert_eq!(t1, &[3, 4]);
+ let t2: Vec<_> = v.splice(1..=2, Some(20)).collect();
+ assert_eq!(v, &[1, 20, 11, 12, 5]);
+ assert_eq!(t2, &[2, 10]);
+}
+
+#[test]
+#[should_panic]
+fn test_splice_out_of_bounds() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let a = [10, 11, 12];
+ v.splice(5..6, a);
+}
+
+#[test]
+#[should_panic]
+fn test_splice_inclusive_out_of_bounds() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let a = [10, 11, 12];
+ v.splice(5..=5, a);
+}
+
+#[test]
+fn test_splice_items_zero_sized() {
+ let mut vec = vec![(), (), ()];
+ let vec2 = vec![];
+ let t: Vec<_> = vec.splice(1..2, vec2.iter().cloned()).collect();
+ assert_eq!(vec, &[(), ()]);
+ assert_eq!(t, &[()]);
+}
+
+#[test]
+fn test_splice_unbounded() {
+ let mut vec = vec![1, 2, 3, 4, 5];
+ let t: Vec<_> = vec.splice(.., None).collect();
+ assert_eq!(vec, &[]);
+ assert_eq!(t, &[1, 2, 3, 4, 5]);
+}
+
+#[test]
+fn test_splice_forget() {
+ let mut v = vec![1, 2, 3, 4, 5];
+ let a = [10, 11, 12];
+ std::mem::forget(v.splice(2..4, a));
+ assert_eq!(v, &[1, 2]);
+}
+
+#[test]
+fn test_into_boxed_slice() {
+ let xs = vec![1, 2, 3];
+ let ys = xs.into_boxed_slice();
+ assert_eq!(&*ys, [1, 2, 3]);
+}
+
+#[test]
+fn test_append() {
+ let mut vec = vec![1, 2, 3];
+ let mut vec2 = vec![4, 5, 6];
+ vec.append(&mut vec2);
+ assert_eq!(vec, [1, 2, 3, 4, 5, 6]);
+ assert_eq!(vec2, []);
+}
+
+#[test]
+fn test_split_off() {
+ let mut vec = vec![1, 2, 3, 4, 5, 6];
+ let orig_capacity = vec.capacity();
+ let vec2 = vec.split_off(4);
+ assert_eq!(vec, [1, 2, 3, 4]);
+ assert_eq!(vec2, [5, 6]);
+ assert_eq!(vec.capacity(), orig_capacity);
+}
+
+#[test]
+fn test_split_off_take_all() {
+ let mut vec = vec![1, 2, 3, 4, 5, 6];
+ let orig_ptr = vec.as_ptr();
+ let orig_capacity = vec.capacity();
+ let vec2 = vec.split_off(0);
+ assert_eq!(vec, []);
+ assert_eq!(vec2, [1, 2, 3, 4, 5, 6]);
+ assert_eq!(vec.capacity(), orig_capacity);
+ assert_eq!(vec2.as_ptr(), orig_ptr);
+}
+
+#[test]
+fn test_into_iter_as_slice() {
+ let vec = vec!['a', 'b', 'c'];
+ let mut into_iter = vec.into_iter();
+ assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
+ let _ = into_iter.next().unwrap();
+ assert_eq!(into_iter.as_slice(), &['b', 'c']);
+ let _ = into_iter.next().unwrap();
+ let _ = into_iter.next().unwrap();
+ assert_eq!(into_iter.as_slice(), &[]);
+}
+
+#[test]
+fn test_into_iter_as_mut_slice() {
+ let vec = vec!['a', 'b', 'c'];
+ let mut into_iter = vec.into_iter();
+ assert_eq!(into_iter.as_slice(), &['a', 'b', 'c']);
+ into_iter.as_mut_slice()[0] = 'x';
+ into_iter.as_mut_slice()[1] = 'y';
+ assert_eq!(into_iter.next().unwrap(), 'x');
+ assert_eq!(into_iter.as_slice(), &['y', 'c']);
+}
+
+#[test]
+fn test_into_iter_debug() {
+ let vec = vec!['a', 'b', 'c'];
+ let into_iter = vec.into_iter();
+ let debug = format!("{into_iter:?}");
+ assert_eq!(debug, "IntoIter(['a', 'b', 'c'])");
+}
+
+#[test]
+fn test_into_iter_count() {
+ assert_eq!([1, 2, 3].into_iter().count(), 3);
+}
+
+#[test]
+fn test_into_iter_next_chunk() {
+ let mut iter = b"lorem".to_vec().into_iter();
+
+ assert_eq!(iter.next_chunk().unwrap(), [b'l', b'o']); // N is inferred as 2
+ assert_eq!(iter.next_chunk().unwrap(), [b'r', b'e', b'm']); // N is inferred as 3
+ assert_eq!(iter.next_chunk::<4>().unwrap_err().as_slice(), &[]); // N is explicitly 4
+}
+
+#[test]
+fn test_into_iter_clone() {
+ fn iter_equal<I: Iterator<Item = i32>>(it: I, slice: &[i32]) {
+ let v: Vec<i32> = it.collect();
+ assert_eq!(&v[..], slice);
+ }
+ let mut it = [1, 2, 3].into_iter();
+ iter_equal(it.clone(), &[1, 2, 3]);
+ assert_eq!(it.next(), Some(1));
+ let mut it = it.rev();
+ iter_equal(it.clone(), &[3, 2]);
+ assert_eq!(it.next(), Some(3));
+ iter_equal(it.clone(), &[2]);
+ assert_eq!(it.next(), Some(2));
+ iter_equal(it.clone(), &[]);
+ assert_eq!(it.next(), None);
+}
+
+#[test]
+fn test_into_iter_leak() {
+ static mut DROPS: i32 = 0;
+
+ struct D(bool);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+
+ if self.0 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let v = vec![D(false), D(true), D(false)];
+
+ catch_unwind(move || drop(v.into_iter())).ok();
+
+ assert_eq!(unsafe { DROPS }, 3);
+}
+
+#[test]
+fn test_into_iter_advance_by() {
+ let mut i = [1, 2, 3, 4, 5].into_iter();
+ i.advance_by(0).unwrap();
+ i.advance_back_by(0).unwrap();
+ assert_eq!(i.as_slice(), [1, 2, 3, 4, 5]);
+
+ i.advance_by(1).unwrap();
+ i.advance_back_by(1).unwrap();
+ assert_eq!(i.as_slice(), [2, 3, 4]);
+
+ assert_eq!(i.advance_back_by(usize::MAX), Err(3));
+
+ assert_eq!(i.advance_by(usize::MAX), Err(0));
+
+ i.advance_by(0).unwrap();
+ i.advance_back_by(0).unwrap();
+
+ assert_eq!(i.len(), 0);
+}
+
+#[test]
+fn test_into_iter_drop_allocator() {
+ struct ReferenceCountedAllocator<'a>(DropCounter<'a>);
+
+ unsafe impl Allocator for ReferenceCountedAllocator<'_> {
+ fn allocate(&self, layout: Layout) -> Result<NonNull<[u8]>, core::alloc::AllocError> {
+ System.allocate(layout)
+ }
+
+ unsafe fn deallocate(&self, ptr: NonNull<u8>, layout: Layout) {
+ System.deallocate(ptr, layout)
+ }
+ }
+
+ let mut drop_count = 0;
+
+ let allocator = ReferenceCountedAllocator(DropCounter { count: &mut drop_count });
+ let _ = Vec::<u32, _>::new_in(allocator);
+ assert_eq!(drop_count, 1);
+
+ let allocator = ReferenceCountedAllocator(DropCounter { count: &mut drop_count });
+ let _ = Vec::<u32, _>::new_in(allocator).into_iter();
+ assert_eq!(drop_count, 2);
+}
+
+#[test]
+fn test_from_iter_specialization() {
+ let src: Vec<usize> = vec![0usize; 1];
+ let srcptr = src.as_ptr();
+ let sink = src.into_iter().collect::<Vec<_>>();
+ let sinkptr = sink.as_ptr();
+ assert_eq!(srcptr, sinkptr);
+}
+
+#[test]
+fn test_from_iter_partially_drained_in_place_specialization() {
+ let src: Vec<usize> = vec![0usize; 10];
+ let srcptr = src.as_ptr();
+ let mut iter = src.into_iter();
+ iter.next();
+ iter.next();
+ let sink = iter.collect::<Vec<_>>();
+ let sinkptr = sink.as_ptr();
+ assert_eq!(srcptr, sinkptr);
+}
+
+#[test]
+fn test_from_iter_specialization_with_iterator_adapters() {
+ fn assert_in_place_trait<T: InPlaceIterable>(_: &T) {}
+ let src: Vec<usize> = vec![0usize; 256];
+ let srcptr = src.as_ptr();
+ let iter = src
+ .into_iter()
+ .enumerate()
+ .map(|i| i.0 + i.1)
+ .zip(std::iter::repeat(1usize))
+ .map(|(a, b)| a + b)
+ .map_while(Option::Some)
+ .skip(1)
+ .map(|e| if e != usize::MAX { Ok(std::num::NonZeroUsize::new(e)) } else { Err(()) });
+ assert_in_place_trait(&iter);
+ let sink = iter.collect::<Result<Vec<_>, _>>().unwrap();
+ let sinkptr = sink.as_ptr();
+ assert_eq!(srcptr, sinkptr as *const usize);
+}
+
+#[test]
+fn test_from_iter_specialization_head_tail_drop() {
+ let drop_count: Vec<_> = (0..=2).map(|_| Rc::new(())).collect();
+ let src: Vec<_> = drop_count.iter().cloned().collect();
+ let srcptr = src.as_ptr();
+ let iter = src.into_iter();
+ let sink: Vec<_> = iter.skip(1).take(1).collect();
+ let sinkptr = sink.as_ptr();
+ assert_eq!(srcptr, sinkptr, "specialization was applied");
+ assert_eq!(Rc::strong_count(&drop_count[0]), 1, "front was dropped");
+ assert_eq!(Rc::strong_count(&drop_count[1]), 2, "one element was collected");
+ assert_eq!(Rc::strong_count(&drop_count[2]), 1, "tail was dropped");
+ assert_eq!(sink.len(), 1);
+}
+
+#[test]
+fn test_from_iter_specialization_panic_during_iteration_drops() {
+ let drop_count: Vec<_> = (0..=2).map(|_| Rc::new(())).collect();
+ let src: Vec<_> = drop_count.iter().cloned().collect();
+ let iter = src.into_iter();
+
+ let _ = std::panic::catch_unwind(AssertUnwindSafe(|| {
+ let _ = iter
+ .enumerate()
+ .filter_map(|(i, e)| {
+ if i == 1 {
+ std::panic!("aborting iteration");
+ }
+ Some(e)
+ })
+ .collect::<Vec<_>>();
+ }));
+
+ assert!(
+ drop_count.iter().map(Rc::strong_count).all(|count| count == 1),
+ "all items were dropped once"
+ );
+}
+
+#[test]
+fn test_from_iter_specialization_panic_during_drop_leaks() {
+ static mut DROP_COUNTER: usize = 0;
+
+ #[derive(Debug)]
+ enum Droppable {
+ DroppedTwice(Box<i32>),
+ PanicOnDrop,
+ }
+
+ impl Drop for Droppable {
+ fn drop(&mut self) {
+ match self {
+ Droppable::DroppedTwice(_) => {
+ unsafe {
+ DROP_COUNTER += 1;
+ }
+ println!("Dropping!")
+ }
+ Droppable::PanicOnDrop => {
+ if !std::thread::panicking() {
+ panic!();
+ }
+ }
+ }
+ }
+ }
+
+ let mut to_free: *mut Droppable = core::ptr::null_mut();
+ let mut cap = 0;
+
+ let _ = std::panic::catch_unwind(AssertUnwindSafe(|| {
+ let mut v = vec![Droppable::DroppedTwice(Box::new(123)), Droppable::PanicOnDrop];
+ to_free = v.as_mut_ptr();
+ cap = v.capacity();
+ let _ = v.into_iter().take(0).collect::<Vec<_>>();
+ }));
+
+ assert_eq!(unsafe { DROP_COUNTER }, 1);
+ // clean up the leak to keep miri happy
+ unsafe {
+ drop(Vec::from_raw_parts(to_free, 0, cap));
+ }
+}
+
+// regression test for issue #85322. Peekable previously implemented InPlaceIterable,
+// but due to an interaction with IntoIter's current Clone implementation it failed to uphold
+// the contract.
+#[test]
+fn test_collect_after_iterator_clone() {
+ let v = vec![0; 5];
+ let mut i = v.into_iter().map(|i| i + 1).peekable();
+ i.peek();
+ let v = i.clone().collect::<Vec<_>>();
+ assert_eq!(v, [1, 1, 1, 1, 1]);
+ assert!(v.len() <= v.capacity());
+}
+#[test]
+fn test_cow_from() {
+ let borrowed: &[_] = &["borrowed", "(slice)"];
+ let owned = vec!["owned", "(vec)"];
+ match (Cow::from(owned.clone()), Cow::from(borrowed)) {
+ (Cow::Owned(o), Cow::Borrowed(b)) => assert!(o == owned && b == borrowed),
+ _ => panic!("invalid `Cow::from`"),
+ }
+}
+
+#[test]
+fn test_from_cow() {
+ let borrowed: &[_] = &["borrowed", "(slice)"];
+ let owned = vec!["owned", "(vec)"];
+ assert_eq!(Vec::from(Cow::Borrowed(borrowed)), vec!["borrowed", "(slice)"]);
+ assert_eq!(Vec::from(Cow::Owned(owned)), vec!["owned", "(vec)"]);
+}
+
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> {
+ d
+ }
+ fn into_iter<'new>(i: IntoIter<&'static str>) -> IntoIter<&'new str> {
+ i
+ }
+}
+
+#[test]
+fn from_into_inner() {
+ let vec = vec![1, 2, 3];
+ let ptr = vec.as_ptr();
+ let vec = vec.into_iter().collect::<Vec<_>>();
+ assert_eq!(vec, [1, 2, 3]);
+ assert_eq!(vec.as_ptr(), ptr);
+
+ let ptr = &vec[1] as *const _;
+ let mut it = vec.into_iter();
+ it.next().unwrap();
+ let vec = it.collect::<Vec<_>>();
+ assert_eq!(vec, [2, 3]);
+ assert!(ptr != vec.as_ptr());
+}
+
+#[test]
+fn overaligned_allocations() {
+ #[repr(align(256))]
+ struct Foo(usize);
+ let mut v = vec![Foo(273)];
+ for i in 0..0x1000 {
+ v.reserve_exact(i);
+ assert!(v[0].0 == 273);
+ assert!(v.as_ptr() as usize & 0xff == 0);
+ v.shrink_to_fit();
+ assert!(v[0].0 == 273);
+ assert!(v.as_ptr() as usize & 0xff == 0);
+ }
+}
+
+#[test]
+fn drain_filter_empty() {
+ let mut vec: Vec<i32> = vec![];
+
+ {
+ let mut iter = vec.drain_filter(|_| true);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+ assert_eq!(vec.len(), 0);
+ assert_eq!(vec, vec![]);
+}
+
+#[test]
+fn drain_filter_zst() {
+ let mut vec = vec![(), (), (), (), ()];
+ let initial_len = vec.len();
+ let mut count = 0;
+ {
+ let mut iter = vec.drain_filter(|_| true);
+ assert_eq!(iter.size_hint(), (0, Some(initial_len)));
+ while let Some(_) = iter.next() {
+ count += 1;
+ assert_eq!(iter.size_hint(), (0, Some(initial_len - count)));
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ assert_eq!(count, initial_len);
+ assert_eq!(vec.len(), 0);
+ assert_eq!(vec, vec![]);
+}
+
+#[test]
+fn drain_filter_false() {
+ let mut vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+ let initial_len = vec.len();
+ let mut count = 0;
+ {
+ let mut iter = vec.drain_filter(|_| false);
+ assert_eq!(iter.size_hint(), (0, Some(initial_len)));
+ for _ in iter.by_ref() {
+ count += 1;
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ assert_eq!(count, 0);
+ assert_eq!(vec.len(), initial_len);
+ assert_eq!(vec, vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10]);
+}
+
+#[test]
+fn drain_filter_true() {
+ let mut vec = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+ let initial_len = vec.len();
+ let mut count = 0;
+ {
+ let mut iter = vec.drain_filter(|_| true);
+ assert_eq!(iter.size_hint(), (0, Some(initial_len)));
+ while let Some(_) = iter.next() {
+ count += 1;
+ assert_eq!(iter.size_hint(), (0, Some(initial_len - count)));
+ }
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ assert_eq!(count, initial_len);
+ assert_eq!(vec.len(), 0);
+ assert_eq!(vec, vec![]);
+}
+
+#[test]
+fn drain_filter_complex() {
+ {
+ // [+xxx++++++xxxxx++++x+x++]
+ let mut vec = vec![
+ 1, 2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37,
+ 39,
+ ];
+
+ let removed = vec.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]);
+
+ assert_eq!(vec.len(), 14);
+ assert_eq!(vec, vec![1, 7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35, 37, 39]);
+ }
+
+ {
+ // [xxx++++++xxxxx++++x+x++]
+ let mut vec = vec![
+ 2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36, 37, 39,
+ ];
+
+ let removed = vec.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]);
+
+ assert_eq!(vec.len(), 13);
+ assert_eq!(vec, vec![7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35, 37, 39]);
+ }
+
+ {
+ // [xxx++++++xxxxx++++x+x]
+ let mut vec =
+ vec![2, 4, 6, 7, 9, 11, 13, 15, 17, 18, 20, 22, 24, 26, 27, 29, 31, 33, 34, 35, 36];
+
+ let removed = vec.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 18, 20, 22, 24, 26, 34, 36]);
+
+ assert_eq!(vec.len(), 11);
+ assert_eq!(vec, vec![7, 9, 11, 13, 15, 17, 27, 29, 31, 33, 35]);
+ }
+
+ {
+ // [xxxxxxxxxx+++++++++++]
+ let mut vec = vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 1, 3, 5, 7, 9, 11, 13, 15, 17, 19];
+
+ let removed = vec.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20]);
+
+ assert_eq!(vec.len(), 10);
+ assert_eq!(vec, vec![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]);
+ }
+
+ {
+ // [+++++++++++xxxxxxxxxx]
+ let mut vec = vec![1, 3, 5, 7, 9, 11, 13, 15, 17, 19, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20];
+
+ let removed = vec.drain_filter(|x| *x % 2 == 0).collect::<Vec<_>>();
+ assert_eq!(removed.len(), 10);
+ assert_eq!(removed, vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20]);
+
+ assert_eq!(vec.len(), 10);
+ assert_eq!(vec, vec![1, 3, 5, 7, 9, 11, 13, 15, 17, 19]);
+ }
+}
+
+// FIXME: re-enable emscripten once it can unwind again
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn drain_filter_consumed_panic() {
+ use std::rc::Rc;
+ use std::sync::Mutex;
+
+ struct Check {
+ index: usize,
+ drop_counts: Rc<Mutex<Vec<usize>>>,
+ }
+
+ impl Drop for Check {
+ fn drop(&mut self) {
+ self.drop_counts.lock().unwrap()[self.index] += 1;
+ println!("drop: {}", self.index);
+ }
+ }
+
+ let check_count = 10;
+ let drop_counts = Rc::new(Mutex::new(vec![0_usize; check_count]));
+ let mut data: Vec<Check> = (0..check_count)
+ .map(|index| Check { index, drop_counts: Rc::clone(&drop_counts) })
+ .collect();
+
+ let _ = std::panic::catch_unwind(move || {
+ let filter = |c: &mut Check| {
+ if c.index == 2 {
+ panic!("panic at index: {}", c.index);
+ }
+ // Verify that if the filter could panic again on another element
+ // that it would not cause a double panic and all elements of the
+ // vec would still be dropped exactly once.
+ if c.index == 4 {
+ panic!("panic at index: {}", c.index);
+ }
+ c.index < 6
+ };
+ let drain = data.drain_filter(filter);
+
+ // NOTE: The DrainFilter is explicitly consumed
+ drain.for_each(drop);
+ });
+
+ let drop_counts = drop_counts.lock().unwrap();
+ assert_eq!(check_count, drop_counts.len());
+
+ for (index, count) in drop_counts.iter().cloned().enumerate() {
+ assert_eq!(1, count, "unexpected drop count at index: {} (count: {})", index, count);
+ }
+}
+
+// FIXME: Re-enable emscripten once it can catch panics
+#[test]
+#[cfg(not(target_os = "emscripten"))]
+fn drain_filter_unconsumed_panic() {
+ use std::rc::Rc;
+ use std::sync::Mutex;
+
+ struct Check {
+ index: usize,
+ drop_counts: Rc<Mutex<Vec<usize>>>,
+ }
+
+ impl Drop for Check {
+ fn drop(&mut self) {
+ self.drop_counts.lock().unwrap()[self.index] += 1;
+ println!("drop: {}", self.index);
+ }
+ }
+
+ let check_count = 10;
+ let drop_counts = Rc::new(Mutex::new(vec![0_usize; check_count]));
+ let mut data: Vec<Check> = (0..check_count)
+ .map(|index| Check { index, drop_counts: Rc::clone(&drop_counts) })
+ .collect();
+
+ let _ = std::panic::catch_unwind(move || {
+ let filter = |c: &mut Check| {
+ if c.index == 2 {
+ panic!("panic at index: {}", c.index);
+ }
+ // Verify that if the filter could panic again on another element
+ // that it would not cause a double panic and all elements of the
+ // vec would still be dropped exactly once.
+ if c.index == 4 {
+ panic!("panic at index: {}", c.index);
+ }
+ c.index < 6
+ };
+ let _drain = data.drain_filter(filter);
+
+ // NOTE: The DrainFilter is dropped without being consumed
+ });
+
+ let drop_counts = drop_counts.lock().unwrap();
+ assert_eq!(check_count, drop_counts.len());
+
+ for (index, count) in drop_counts.iter().cloned().enumerate() {
+ assert_eq!(1, count, "unexpected drop count at index: {} (count: {})", index, count);
+ }
+}
+
+#[test]
+fn drain_filter_unconsumed() {
+ let mut vec = vec![1, 2, 3, 4];
+ let drain = vec.drain_filter(|&mut x| x % 2 != 0);
+ drop(drain);
+ assert_eq!(vec, [2, 4]);
+}
+
+#[test]
+fn test_reserve_exact() {
+ // This is all the same as test_reserve
+
+ let mut v = Vec::new();
+ assert_eq!(v.capacity(), 0);
+
+ v.reserve_exact(2);
+ assert!(v.capacity() >= 2);
+
+ for i in 0..16 {
+ v.push(i);
+ }
+
+ assert!(v.capacity() >= 16);
+ v.reserve_exact(16);
+ assert!(v.capacity() >= 32);
+
+ v.push(16);
+
+ v.reserve_exact(16);
+ assert!(v.capacity() >= 33)
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
+fn test_try_reserve() {
+ // These are the interesting cases:
+ // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM)
+ // * > isize::MAX should always fail
+ // * On 16/32-bit should CapacityOverflow
+ // * On 64-bit should OOM
+ // * overflow may trigger when adding `len` to `cap` (in number of elements)
+ // * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes)
+
+ const MAX_CAP: usize = isize::MAX as usize;
+ const MAX_USIZE: usize = usize::MAX;
+
+ // On 16/32-bit, we check that allocations don't exceed isize::MAX,
+ // on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
+ // Any platform that succeeds for these requests is technically broken with
+ // ptr::offset because LLVM is the worst.
+ let guards_against_isize = usize::BITS < 64;
+
+ {
+ // Note: basic stuff is checked by test_reserve
+ let mut empty_bytes: Vec<u8> = Vec::new();
+
+ // Check isize::MAX doesn't count as an overflow
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ // Play it again, frank! (just to be sure)
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+
+ if guards_against_isize {
+ // Check isize::MAX + 1 does count as overflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ // Check usize::MAX does count as overflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ } else {
+ // Check isize::MAX + 1 is an OOM
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+
+ // Check usize::MAX is an OOM
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "usize::MAX should trigger an OOM!"
+ );
+ }
+ }
+
+ {
+ // Same basic idea, but with non-zero len
+ let mut ten_bytes: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if guards_against_isize {
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+ } else {
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+ }
+ // Should always overflow in the add-to-len
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ }
+
+ {
+ // Same basic idea, but with interesting type size
+ let mut ten_u32s: Vec<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+ if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if guards_against_isize {
+ assert_matches!(
+ ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+ } else {
+ assert_matches!(
+ ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+ }
+ // Should fail in the mul-by-size
+ assert_matches!(
+ ten_u32s.try_reserve(MAX_USIZE - 20).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ }
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
+fn test_try_reserve_exact() {
+ // This is exactly the same as test_try_reserve with the method changed.
+ // See that test for comments.
+
+ const MAX_CAP: usize = isize::MAX as usize;
+ const MAX_USIZE: usize = usize::MAX;
+
+ let guards_against_isize = size_of::<usize>() < 8;
+
+ {
+ let mut empty_bytes: Vec<u8> = Vec::new();
+
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+
+ if guards_against_isize {
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ } else {
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "usize::MAX should trigger an OOM!"
+ );
+ }
+ }
+
+ {
+ let mut ten_bytes: Vec<u8> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if guards_against_isize {
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+ } else {
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+ }
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ }
+
+ {
+ let mut ten_u32s: Vec<u32> = vec![1, 2, 3, 4, 5, 6, 7, 8, 9, 10];
+
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if guards_against_isize {
+ assert_matches!(
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+ } else {
+ assert_matches!(
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+ }
+ assert_matches!(
+ ten_u32s.try_reserve_exact(MAX_USIZE - 20).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ }
+}
+
+#[test]
+fn test_stable_pointers() {
+ /// Pull an element from the iterator, then drop it.
+ /// Useful to cover both the `next` and `drop` paths of an iterator.
+ fn next_then_drop<I: Iterator>(mut i: I) {
+ i.next().unwrap();
+ drop(i);
+ }
+
+ // Test that, if we reserved enough space, adding and removing elements does not
+ // invalidate references into the vector (such as `v0`). This test also
+ // runs in Miri, which would detect such problems.
+ // Note that this test does *not* constitute a stable guarantee that all these functions do not
+ // reallocate! Only what is explicitly documented at
+ // <https://doc.rust-lang.org/nightly/std/vec/struct.Vec.html#guarantees> is stably guaranteed.
+ let mut v = Vec::with_capacity(128);
+ v.push(13);
+
+ // Laundering the lifetime -- we take care that `v` does not reallocate, so that's okay.
+ let v0 = &mut v[0];
+ let v0 = unsafe { &mut *(v0 as *mut _) };
+ // Now do a bunch of things and occasionally use `v0` again to assert it is still valid.
+
+ // Pushing/inserting and popping/removing
+ v.push(1);
+ v.push(2);
+ v.insert(1, 1);
+ assert_eq!(*v0, 13);
+ v.remove(1);
+ v.pop().unwrap();
+ assert_eq!(*v0, 13);
+ v.push(1);
+ v.swap_remove(1);
+ assert_eq!(v.len(), 2);
+ v.swap_remove(1); // swap_remove the last element
+ assert_eq!(*v0, 13);
+
+ // Appending
+ v.append(&mut vec![27, 19]);
+ assert_eq!(*v0, 13);
+
+ // Extending
+ v.extend_from_slice(&[1, 2]);
+ v.extend(&[1, 2]); // `slice::Iter` (with `T: Copy`) specialization
+ v.extend(vec![2, 3]); // `vec::IntoIter` specialization
+ v.extend(std::iter::once(3)); // `TrustedLen` specialization
+ v.extend(std::iter::empty::<i32>()); // `TrustedLen` specialization with empty iterator
+ v.extend(std::iter::once(3).filter(|_| true)); // base case
+ v.extend(std::iter::once(&3)); // `cloned` specialization
+ assert_eq!(*v0, 13);
+
+ // Truncation
+ v.truncate(2);
+ assert_eq!(*v0, 13);
+
+ // Resizing
+ v.resize_with(v.len() + 10, || 42);
+ assert_eq!(*v0, 13);
+ v.resize_with(2, || panic!());
+ assert_eq!(*v0, 13);
+
+ // No-op reservation
+ v.reserve(32);
+ v.reserve_exact(32);
+ assert_eq!(*v0, 13);
+
+ // Partial draining
+ v.resize_with(10, || 42);
+ next_then_drop(v.drain(5..));
+ assert_eq!(*v0, 13);
+
+ // Splicing
+ v.resize_with(10, || 42);
+ next_then_drop(v.splice(5.., vec![1, 2, 3, 4, 5])); // empty tail after range
+ assert_eq!(*v0, 13);
+ next_then_drop(v.splice(5..8, vec![1])); // replacement is smaller than original range
+ assert_eq!(*v0, 13);
+ next_then_drop(v.splice(5..6, [1; 10].into_iter().filter(|_| true))); // lower bound not exact
+ assert_eq!(*v0, 13);
+
+ // spare_capacity_mut
+ v.spare_capacity_mut();
+ assert_eq!(*v0, 13);
+
+ // Smoke test that would fire even outside Miri if an actual relocation happened.
+ *v0 -= 13;
+ assert_eq!(v[0], 0);
+}
+
+// https://github.com/rust-lang/rust/pull/49496 introduced specialization based on:
+//
+// ```
+// unsafe impl<T: ?Sized> IsZero for *mut T {
+// fn is_zero(&self) -> bool {
+// (*self).is_null()
+// }
+// }
+// ```
+//
+// … to call `RawVec::with_capacity_zeroed` for creating `Vec<*mut T>`,
+// which is incorrect for fat pointers since `<*mut T>::is_null` only looks at the data component.
+// That is, a fat pointer can be “null” without being made entirely of zero bits.
+#[test]
+fn vec_macro_repeating_null_raw_fat_pointer() {
+ let raw_dyn = &mut (|| ()) as &mut dyn Fn() as *mut dyn Fn();
+ let vtable = dbg!(ptr_metadata(raw_dyn));
+ let null_raw_dyn = ptr_from_raw_parts(std::ptr::null_mut(), vtable);
+ assert!(null_raw_dyn.is_null());
+
+ let vec = vec![null_raw_dyn; 1];
+ dbg!(ptr_metadata(vec[0]));
+ assert!(vec[0] == null_raw_dyn);
+
+ // Polyfill for https://github.com/rust-lang/rfcs/pull/2580
+
+ fn ptr_metadata(ptr: *mut dyn Fn()) -> *mut () {
+ unsafe { std::mem::transmute::<*mut dyn Fn(), DynRepr>(ptr).vtable }
+ }
+
+ fn ptr_from_raw_parts(data: *mut (), vtable: *mut ()) -> *mut dyn Fn() {
+ unsafe { std::mem::transmute::<DynRepr, *mut dyn Fn()>(DynRepr { data, vtable }) }
+ }
+
+ #[repr(C)]
+ struct DynRepr {
+ data: *mut (),
+ vtable: *mut (),
+ }
+}
+
+// This test will likely fail if you change the capacities used in
+// `RawVec::grow_amortized`.
+#[test]
+fn test_push_growth_strategy() {
+ // If the element size is 1, we jump from 0 to 8, then double.
+ {
+ let mut v1: Vec<u8> = vec![];
+ assert_eq!(v1.capacity(), 0);
+
+ for _ in 0..8 {
+ v1.push(0);
+ assert_eq!(v1.capacity(), 8);
+ }
+
+ for _ in 8..16 {
+ v1.push(0);
+ assert_eq!(v1.capacity(), 16);
+ }
+
+ for _ in 16..32 {
+ v1.push(0);
+ assert_eq!(v1.capacity(), 32);
+ }
+
+ for _ in 32..64 {
+ v1.push(0);
+ assert_eq!(v1.capacity(), 64);
+ }
+ }
+
+ // If the element size is 2..=1024, we jump from 0 to 4, then double.
+ {
+ let mut v2: Vec<u16> = vec![];
+ let mut v1024: Vec<[u8; 1024]> = vec![];
+ assert_eq!(v2.capacity(), 0);
+ assert_eq!(v1024.capacity(), 0);
+
+ for _ in 0..4 {
+ v2.push(0);
+ v1024.push([0; 1024]);
+ assert_eq!(v2.capacity(), 4);
+ assert_eq!(v1024.capacity(), 4);
+ }
+
+ for _ in 4..8 {
+ v2.push(0);
+ v1024.push([0; 1024]);
+ assert_eq!(v2.capacity(), 8);
+ assert_eq!(v1024.capacity(), 8);
+ }
+
+ for _ in 8..16 {
+ v2.push(0);
+ v1024.push([0; 1024]);
+ assert_eq!(v2.capacity(), 16);
+ assert_eq!(v1024.capacity(), 16);
+ }
+
+ for _ in 16..32 {
+ v2.push(0);
+ v1024.push([0; 1024]);
+ assert_eq!(v2.capacity(), 32);
+ assert_eq!(v1024.capacity(), 32);
+ }
+
+ for _ in 32..64 {
+ v2.push(0);
+ v1024.push([0; 1024]);
+ assert_eq!(v2.capacity(), 64);
+ assert_eq!(v1024.capacity(), 64);
+ }
+ }
+
+ // If the element size is > 1024, we jump from 0 to 1, then double.
+ {
+ let mut v1025: Vec<[u8; 1025]> = vec![];
+ assert_eq!(v1025.capacity(), 0);
+
+ for _ in 0..1 {
+ v1025.push([0; 1025]);
+ assert_eq!(v1025.capacity(), 1);
+ }
+
+ for _ in 1..2 {
+ v1025.push([0; 1025]);
+ assert_eq!(v1025.capacity(), 2);
+ }
+
+ for _ in 2..4 {
+ v1025.push([0; 1025]);
+ assert_eq!(v1025.capacity(), 4);
+ }
+
+ for _ in 4..8 {
+ v1025.push([0; 1025]);
+ assert_eq!(v1025.capacity(), 8);
+ }
+
+ for _ in 8..16 {
+ v1025.push([0; 1025]);
+ assert_eq!(v1025.capacity(), 16);
+ }
+
+ for _ in 16..32 {
+ v1025.push([0; 1025]);
+ assert_eq!(v1025.capacity(), 32);
+ }
+
+ for _ in 32..64 {
+ v1025.push([0; 1025]);
+ assert_eq!(v1025.capacity(), 64);
+ }
+ }
+}
+
+macro_rules! generate_assert_eq_vec_and_prim {
+ ($name:ident<$B:ident>($type:ty)) => {
+ fn $name<A: PartialEq<$B> + Debug, $B: Debug>(a: Vec<A>, b: $type) {
+ assert!(a == b);
+ assert_eq!(a, b);
+ }
+ };
+}
+
+generate_assert_eq_vec_and_prim! { assert_eq_vec_and_slice <B>(&[B]) }
+generate_assert_eq_vec_and_prim! { assert_eq_vec_and_array_3<B>([B; 3]) }
+
+#[test]
+fn partialeq_vec_and_prim() {
+ assert_eq_vec_and_slice(vec![1, 2, 3], &[1, 2, 3]);
+ assert_eq_vec_and_array_3(vec![1, 2, 3], [1, 2, 3]);
+}
+
+macro_rules! assert_partial_eq_valid {
+ ($a2:expr, $a3:expr; $b2:expr, $b3: expr) => {
+ assert!($a2 == $b2);
+ assert!($a2 != $b3);
+ assert!($a3 != $b2);
+ assert!($a3 == $b3);
+ assert_eq!($a2, $b2);
+ assert_ne!($a2, $b3);
+ assert_ne!($a3, $b2);
+ assert_eq!($a3, $b3);
+ };
+}
+
+#[test]
+fn partialeq_vec_full() {
+ let vec2: Vec<_> = vec![1, 2];
+ let vec3: Vec<_> = vec![1, 2, 3];
+ let slice2: &[_] = &[1, 2];
+ let slice3: &[_] = &[1, 2, 3];
+ let slicemut2: &[_] = &mut [1, 2];
+ let slicemut3: &[_] = &mut [1, 2, 3];
+ let array2: [_; 2] = [1, 2];
+ let array3: [_; 3] = [1, 2, 3];
+ let arrayref2: &[_; 2] = &[1, 2];
+ let arrayref3: &[_; 3] = &[1, 2, 3];
+
+ assert_partial_eq_valid!(vec2,vec3; vec2,vec3);
+ assert_partial_eq_valid!(vec2,vec3; slice2,slice3);
+ assert_partial_eq_valid!(vec2,vec3; slicemut2,slicemut3);
+ assert_partial_eq_valid!(slice2,slice3; vec2,vec3);
+ assert_partial_eq_valid!(slicemut2,slicemut3; vec2,vec3);
+ assert_partial_eq_valid!(vec2,vec3; array2,array3);
+ assert_partial_eq_valid!(vec2,vec3; arrayref2,arrayref3);
+ assert_partial_eq_valid!(vec2,vec3; arrayref2[..],arrayref3[..]);
+}
+
+#[test]
+fn test_vec_cycle() {
+ #[derive(Debug)]
+ struct C<'a> {
+ v: Vec<Cell<Option<&'a C<'a>>>>,
+ }
+
+ impl<'a> C<'a> {
+ fn new() -> C<'a> {
+ C { v: Vec::new() }
+ }
+ }
+
+ let mut c1 = C::new();
+ let mut c2 = C::new();
+ let mut c3 = C::new();
+
+ // Push
+ c1.v.push(Cell::new(None));
+ c1.v.push(Cell::new(None));
+
+ c2.v.push(Cell::new(None));
+ c2.v.push(Cell::new(None));
+
+ c3.v.push(Cell::new(None));
+ c3.v.push(Cell::new(None));
+
+ // Set
+ c1.v[0].set(Some(&c2));
+ c1.v[1].set(Some(&c3));
+
+ c2.v[0].set(Some(&c2));
+ c2.v[1].set(Some(&c3));
+
+ c3.v[0].set(Some(&c1));
+ c3.v[1].set(Some(&c2));
+}
+
+#[test]
+fn test_vec_cycle_wrapped() {
+ struct Refs<'a> {
+ v: Vec<Cell<Option<&'a C<'a>>>>,
+ }
+
+ struct C<'a> {
+ refs: Refs<'a>,
+ }
+
+ impl<'a> Refs<'a> {
+ fn new() -> Refs<'a> {
+ Refs { v: Vec::new() }
+ }
+ }
+
+ impl<'a> C<'a> {
+ fn new() -> C<'a> {
+ C { refs: Refs::new() }
+ }
+ }
+
+ let mut c1 = C::new();
+ let mut c2 = C::new();
+ let mut c3 = C::new();
+
+ c1.refs.v.push(Cell::new(None));
+ c1.refs.v.push(Cell::new(None));
+ c2.refs.v.push(Cell::new(None));
+ c2.refs.v.push(Cell::new(None));
+ c3.refs.v.push(Cell::new(None));
+ c3.refs.v.push(Cell::new(None));
+
+ c1.refs.v[0].set(Some(&c2));
+ c1.refs.v[1].set(Some(&c3));
+ c2.refs.v[0].set(Some(&c2));
+ c2.refs.v[1].set(Some(&c3));
+ c3.refs.v[0].set(Some(&c1));
+ c3.refs.v[1].set(Some(&c2));
+}
+
+#[test]
+fn test_zero_sized_capacity() {
+ for len in [0, 1, 2, 4, 8, 16, 32, 64, 128, 256] {
+ let v = Vec::<()>::with_capacity(len);
+ assert_eq!(v.len(), 0);
+ assert_eq!(v.capacity(), usize::MAX);
+ }
+}
+
+#[test]
+fn test_zero_sized_vec_push() {
+ const N: usize = 8;
+
+ for len in 0..N {
+ let mut tester = Vec::with_capacity(len);
+ assert_eq!(tester.len(), 0);
+ assert!(tester.capacity() >= len);
+ for _ in 0..len {
+ tester.push(());
+ }
+ assert_eq!(tester.len(), len);
+ assert_eq!(tester.iter().count(), len);
+ tester.clear();
+ }
+}
+
+#[test]
+fn test_vec_macro_repeat() {
+ assert_eq!(vec![1; 3], vec![1, 1, 1]);
+ assert_eq!(vec![1; 2], vec![1, 1]);
+ assert_eq!(vec![1; 1], vec![1]);
+ assert_eq!(vec![1; 0], vec![]);
+
+ // from_elem syntax (see RFC 832)
+ let el = Box::new(1);
+ let n = 3;
+ assert_eq!(vec![el; n], vec![Box::new(1), Box::new(1), Box::new(1)]);
+}
+
+#[test]
+fn test_vec_swap() {
+ let mut a: Vec<isize> = vec![0, 1, 2, 3, 4, 5, 6];
+ a.swap(2, 4);
+ assert_eq!(a[2], 4);
+ assert_eq!(a[4], 2);
+ let mut n = 42;
+ swap(&mut n, &mut a[0]);
+ assert_eq!(a[0], 42);
+ assert_eq!(n, 0);
+}
+
+#[test]
+fn test_extend_from_within_spec() {
+ #[derive(Copy)]
+ struct CopyOnly;
+
+ impl Clone for CopyOnly {
+ fn clone(&self) -> Self {
+ panic!("extend_from_within must use specialization on copy");
+ }
+ }
+
+ vec![CopyOnly, CopyOnly].extend_from_within(..);
+}
+
+#[test]
+fn test_extend_from_within_clone() {
+ let mut v = vec![String::from("sssss"), String::from("12334567890"), String::from("c")];
+ v.extend_from_within(1..);
+
+ assert_eq!(v, ["sssss", "12334567890", "c", "12334567890", "c"]);
+}
+
+#[test]
+fn test_extend_from_within_complete_rande() {
+ let mut v = vec![0, 1, 2, 3];
+ v.extend_from_within(..);
+
+ assert_eq!(v, [0, 1, 2, 3, 0, 1, 2, 3]);
+}
+
+#[test]
+fn test_extend_from_within_empty_rande() {
+ let mut v = vec![0, 1, 2, 3];
+ v.extend_from_within(1..1);
+
+ assert_eq!(v, [0, 1, 2, 3]);
+}
+
+#[test]
+#[should_panic]
+fn test_extend_from_within_out_of_rande() {
+ let mut v = vec![0, 1];
+ v.extend_from_within(..3);
+}
+
+#[test]
+fn test_extend_from_within_zst() {
+ let mut v = vec![(); 8];
+ v.extend_from_within(3..7);
+
+ assert_eq!(v, [(); 12]);
+}
+
+#[test]
+fn test_extend_from_within_empty_vec() {
+ let mut v = Vec::<i32>::new();
+ v.extend_from_within(..);
+
+ assert_eq!(v, []);
+}
+
+#[test]
+fn test_extend_from_within() {
+ let mut v = vec![String::from("a"), String::from("b"), String::from("c")];
+ v.extend_from_within(1..=2);
+ v.extend_from_within(..=1);
+
+ assert_eq!(v, ["a", "b", "c", "b", "c", "a", "b"]);
+}
+
+#[test]
+fn test_vec_dedup_by() {
+ let mut vec: Vec<i32> = vec![1, -1, 2, 3, 1, -5, 5, -2, 2];
+
+ vec.dedup_by(|a, b| a.abs() == b.abs());
+
+ assert_eq!(vec, [1, 2, 3, 1, -5, -2]);
+}
+
+#[test]
+fn test_vec_dedup_empty() {
+ let mut vec: Vec<i32> = Vec::new();
+
+ vec.dedup();
+
+ assert_eq!(vec, []);
+}
+
+#[test]
+fn test_vec_dedup_one() {
+ let mut vec = vec![12i32];
+
+ vec.dedup();
+
+ assert_eq!(vec, [12]);
+}
+
+#[test]
+fn test_vec_dedup_multiple_ident() {
+ let mut vec = vec![12, 12, 12, 12, 12, 11, 11, 11, 11, 11, 11];
+
+ vec.dedup();
+
+ assert_eq!(vec, [12, 11]);
+}
+
+#[test]
+fn test_vec_dedup_partialeq() {
+ #[derive(Debug)]
+ struct Foo(i32, i32);
+
+ impl PartialEq for Foo {
+ fn eq(&self, other: &Foo) -> bool {
+ self.0 == other.0
+ }
+ }
+
+ let mut vec = vec![Foo(0, 1), Foo(0, 5), Foo(1, 7), Foo(1, 9)];
+
+ vec.dedup();
+ assert_eq!(vec, [Foo(0, 1), Foo(1, 7)]);
+}
+
+#[test]
+fn test_vec_dedup() {
+ let mut vec: Vec<bool> = Vec::with_capacity(8);
+ let mut template = vec.clone();
+
+ for x in 0u8..255u8 {
+ vec.clear();
+ template.clear();
+
+ let iter = (0..8).map(move |bit| (x >> bit) & 1 == 1);
+ vec.extend(iter);
+ template.extend_from_slice(&vec);
+
+ let (dedup, _) = template.partition_dedup();
+ vec.dedup();
+
+ assert_eq!(vec, dedup);
+ }
+}
+
+#[test]
+fn test_vec_dedup_panicking() {
+ #[derive(Debug)]
+ struct Panic<'a> {
+ drop_counter: &'a Cell<u32>,
+ value: bool,
+ index: usize,
+ }
+
+ impl<'a> PartialEq for Panic<'a> {
+ fn eq(&self, other: &Self) -> bool {
+ self.value == other.value
+ }
+ }
+
+ impl<'a> Drop for Panic<'a> {
+ fn drop(&mut self) {
+ self.drop_counter.set(self.drop_counter.get() + 1);
+ if !std::thread::panicking() {
+ assert!(self.index != 4);
+ }
+ }
+ }
+
+ let drop_counter = &Cell::new(0);
+ let expected = [
+ Panic { drop_counter, value: false, index: 0 },
+ Panic { drop_counter, value: false, index: 5 },
+ Panic { drop_counter, value: true, index: 6 },
+ Panic { drop_counter, value: true, index: 7 },
+ ];
+ let mut vec = vec![
+ Panic { drop_counter, value: false, index: 0 },
+ // these elements get deduplicated
+ Panic { drop_counter, value: false, index: 1 },
+ Panic { drop_counter, value: false, index: 2 },
+ Panic { drop_counter, value: false, index: 3 },
+ Panic { drop_counter, value: false, index: 4 },
+ // here it panics while dropping the item with index==4
+ Panic { drop_counter, value: false, index: 5 },
+ Panic { drop_counter, value: true, index: 6 },
+ Panic { drop_counter, value: true, index: 7 },
+ ];
+
+ let _ = catch_unwind(AssertUnwindSafe(|| vec.dedup())).unwrap_err();
+
+ assert_eq!(drop_counter.get(), 4);
+
+ let ok = vec.iter().zip(expected.iter()).all(|(x, y)| x.index == y.index);
+
+ if !ok {
+ panic!("expected: {expected:?}\ngot: {vec:?}\n");
+ }
+}
+
+// Regression test for issue #82533
+#[test]
+fn test_extend_from_within_panicing_clone() {
+ struct Panic<'dc> {
+ drop_count: &'dc AtomicU32,
+ aaaaa: bool,
+ }
+
+ impl Clone for Panic<'_> {
+ fn clone(&self) -> Self {
+ if self.aaaaa {
+ panic!("panic! at the clone");
+ }
+
+ Self { ..*self }
+ }
+ }
+
+ impl Drop for Panic<'_> {
+ fn drop(&mut self) {
+ self.drop_count.fetch_add(1, Ordering::SeqCst);
+ }
+ }
+
+ let count = core::sync::atomic::AtomicU32::new(0);
+ let mut vec = vec![
+ Panic { drop_count: &count, aaaaa: false },
+ Panic { drop_count: &count, aaaaa: true },
+ Panic { drop_count: &count, aaaaa: false },
+ ];
+
+ // This should clone&append one Panic{..} at the end, and then panic while
+ // cloning second Panic{..}. This means that `Panic::drop` should be called
+ // 4 times (3 for items already in vector, 1 for just appended).
+ //
+ // Previously just appended item was leaked, making drop_count = 3, instead of 4.
+ std::panic::catch_unwind(move || vec.extend_from_within(..)).unwrap_err();
+
+ assert_eq!(count.load(Ordering::SeqCst), 4);
+}
+
+#[test]
+#[should_panic = "vec len overflow"]
+fn test_into_flattened_size_overflow() {
+ let v = vec![[(); usize::MAX]; 2];
+ let _ = v.into_flattened();
+}
diff --git a/library/alloc/tests/vec_deque.rs b/library/alloc/tests/vec_deque.rs
new file mode 100644
index 000000000..89cc7f905
--- /dev/null
+++ b/library/alloc/tests/vec_deque.rs
@@ -0,0 +1,1786 @@
+use std::assert_matches::assert_matches;
+use std::collections::TryReserveErrorKind::*;
+use std::collections::{vec_deque::Drain, VecDeque};
+use std::fmt::Debug;
+use std::mem::size_of;
+use std::ops::Bound::*;
+use std::panic::{catch_unwind, AssertUnwindSafe};
+
+use crate::hash;
+
+use Taggy::*;
+use Taggypar::*;
+
+#[test]
+fn test_simple() {
+ let mut d = VecDeque::new();
+ assert_eq!(d.len(), 0);
+ d.push_front(17);
+ d.push_front(42);
+ d.push_back(137);
+ assert_eq!(d.len(), 3);
+ d.push_back(137);
+ assert_eq!(d.len(), 4);
+ assert_eq!(*d.front().unwrap(), 42);
+ assert_eq!(*d.back().unwrap(), 137);
+ let mut i = d.pop_front();
+ assert_eq!(i, Some(42));
+ i = d.pop_back();
+ assert_eq!(i, Some(137));
+ i = d.pop_back();
+ assert_eq!(i, Some(137));
+ i = d.pop_back();
+ assert_eq!(i, Some(17));
+ assert_eq!(d.len(), 0);
+ d.push_back(3);
+ assert_eq!(d.len(), 1);
+ d.push_front(2);
+ assert_eq!(d.len(), 2);
+ d.push_back(4);
+ assert_eq!(d.len(), 3);
+ d.push_front(1);
+ assert_eq!(d.len(), 4);
+ assert_eq!(d[0], 1);
+ assert_eq!(d[1], 2);
+ assert_eq!(d[2], 3);
+ assert_eq!(d[3], 4);
+}
+
+fn test_parameterized<T: Clone + PartialEq + Debug>(a: T, b: T, c: T, d: T) {
+ let mut deq = VecDeque::new();
+ assert_eq!(deq.len(), 0);
+ deq.push_front(a.clone());
+ deq.push_front(b.clone());
+ deq.push_back(c.clone());
+ assert_eq!(deq.len(), 3);
+ deq.push_back(d.clone());
+ assert_eq!(deq.len(), 4);
+ assert_eq!((*deq.front().unwrap()).clone(), b.clone());
+ assert_eq!((*deq.back().unwrap()).clone(), d.clone());
+ assert_eq!(deq.pop_front().unwrap(), b.clone());
+ assert_eq!(deq.pop_back().unwrap(), d.clone());
+ assert_eq!(deq.pop_back().unwrap(), c.clone());
+ assert_eq!(deq.pop_back().unwrap(), a.clone());
+ assert_eq!(deq.len(), 0);
+ deq.push_back(c.clone());
+ assert_eq!(deq.len(), 1);
+ deq.push_front(b.clone());
+ assert_eq!(deq.len(), 2);
+ deq.push_back(d.clone());
+ assert_eq!(deq.len(), 3);
+ deq.push_front(a.clone());
+ assert_eq!(deq.len(), 4);
+ assert_eq!(deq[0].clone(), a.clone());
+ assert_eq!(deq[1].clone(), b.clone());
+ assert_eq!(deq[2].clone(), c.clone());
+ assert_eq!(deq[3].clone(), d.clone());
+}
+
+#[test]
+fn test_push_front_grow() {
+ let mut deq = VecDeque::new();
+ for i in 0..66 {
+ deq.push_front(i);
+ }
+ assert_eq!(deq.len(), 66);
+
+ for i in 0..66 {
+ assert_eq!(deq[i], 65 - i);
+ }
+
+ let mut deq = VecDeque::new();
+ for i in 0..66 {
+ deq.push_back(i);
+ }
+
+ for i in 0..66 {
+ assert_eq!(deq[i], i);
+ }
+}
+
+#[test]
+fn test_index() {
+ let mut deq = VecDeque::new();
+ for i in 1..4 {
+ deq.push_front(i);
+ }
+ assert_eq!(deq[1], 2);
+}
+
+#[test]
+#[should_panic]
+fn test_index_out_of_bounds() {
+ let mut deq = VecDeque::new();
+ for i in 1..4 {
+ deq.push_front(i);
+ }
+ deq[3];
+}
+
+#[test]
+#[should_panic]
+fn test_range_start_overflow() {
+ let deq = VecDeque::from(vec![1, 2, 3]);
+ deq.range((Included(0), Included(usize::MAX)));
+}
+
+#[test]
+#[should_panic]
+fn test_range_end_overflow() {
+ let deq = VecDeque::from(vec![1, 2, 3]);
+ deq.range((Excluded(usize::MAX), Included(0)));
+}
+
+#[derive(Clone, PartialEq, Debug)]
+enum Taggy {
+ One(i32),
+ Two(i32, i32),
+ Three(i32, i32, i32),
+}
+
+#[derive(Clone, PartialEq, Debug)]
+enum Taggypar<T> {
+ Onepar(T),
+ Twopar(T, T),
+ Threepar(T, T, T),
+}
+
+#[derive(Clone, PartialEq, Debug)]
+struct RecCy {
+ x: i32,
+ y: i32,
+ t: Taggy,
+}
+
+#[test]
+fn test_param_int() {
+ test_parameterized::<i32>(5, 72, 64, 175);
+}
+
+#[test]
+fn test_param_taggy() {
+ test_parameterized::<Taggy>(One(1), Two(1, 2), Three(1, 2, 3), Two(17, 42));
+}
+
+#[test]
+fn test_param_taggypar() {
+ test_parameterized::<Taggypar<i32>>(
+ Onepar::<i32>(1),
+ Twopar::<i32>(1, 2),
+ Threepar::<i32>(1, 2, 3),
+ Twopar::<i32>(17, 42),
+ );
+}
+
+#[test]
+fn test_param_reccy() {
+ let reccy1 = RecCy { x: 1, y: 2, t: One(1) };
+ let reccy2 = RecCy { x: 345, y: 2, t: Two(1, 2) };
+ let reccy3 = RecCy { x: 1, y: 777, t: Three(1, 2, 3) };
+ let reccy4 = RecCy { x: 19, y: 252, t: Two(17, 42) };
+ test_parameterized::<RecCy>(reccy1, reccy2, reccy3, reccy4);
+}
+
+#[test]
+fn test_with_capacity() {
+ let mut d = VecDeque::with_capacity(0);
+ d.push_back(1);
+ assert_eq!(d.len(), 1);
+ let mut d = VecDeque::with_capacity(50);
+ d.push_back(1);
+ assert_eq!(d.len(), 1);
+}
+
+#[test]
+fn test_with_capacity_non_power_two() {
+ let mut d3 = VecDeque::with_capacity(3);
+ d3.push_back(1);
+
+ // X = None, | = lo
+ // [|1, X, X]
+ assert_eq!(d3.pop_front(), Some(1));
+ // [X, |X, X]
+ assert_eq!(d3.front(), None);
+
+ // [X, |3, X]
+ d3.push_back(3);
+ // [X, |3, 6]
+ d3.push_back(6);
+ // [X, X, |6]
+ assert_eq!(d3.pop_front(), Some(3));
+
+ // Pushing the lo past half way point to trigger
+ // the 'B' scenario for growth
+ // [9, X, |6]
+ d3.push_back(9);
+ // [9, 12, |6]
+ d3.push_back(12);
+
+ d3.push_back(15);
+ // There used to be a bug here about how the
+ // VecDeque made growth assumptions about the
+ // underlying Vec which didn't hold and lead
+ // to corruption.
+ // (Vec grows to next power of two)
+ // good- [9, 12, 15, X, X, X, X, |6]
+ // bug- [15, 12, X, X, X, |6, X, X]
+ assert_eq!(d3.pop_front(), Some(6));
+
+ // Which leads us to the following state which
+ // would be a failure case.
+ // bug- [15, 12, X, X, X, X, |X, X]
+ assert_eq!(d3.front(), Some(&9));
+}
+
+#[test]
+fn test_reserve_exact() {
+ let mut d = VecDeque::new();
+ d.push_back(0);
+ d.reserve_exact(50);
+ assert!(d.capacity() >= 51);
+}
+
+#[test]
+fn test_reserve() {
+ let mut d = VecDeque::new();
+ d.push_back(0);
+ d.reserve(50);
+ assert!(d.capacity() >= 51);
+}
+
+#[test]
+fn test_swap() {
+ let mut d: VecDeque<_> = (0..5).collect();
+ d.pop_front();
+ d.swap(0, 3);
+ assert_eq!(d.iter().cloned().collect::<Vec<_>>(), [4, 2, 3, 1]);
+}
+
+#[test]
+fn test_iter() {
+ let mut d = VecDeque::new();
+ assert_eq!(d.iter().next(), None);
+ assert_eq!(d.iter().size_hint(), (0, Some(0)));
+
+ for i in 0..5 {
+ d.push_back(i);
+ }
+ {
+ let b: &[_] = &[&0, &1, &2, &3, &4];
+ assert_eq!(d.iter().collect::<Vec<_>>(), b);
+ }
+
+ for i in 6..9 {
+ d.push_front(i);
+ }
+ {
+ let b: &[_] = &[&8, &7, &6, &0, &1, &2, &3, &4];
+ assert_eq!(d.iter().collect::<Vec<_>>(), b);
+ }
+
+ let mut it = d.iter();
+ let mut len = d.len();
+ loop {
+ match it.next() {
+ None => break,
+ _ => {
+ len -= 1;
+ assert_eq!(it.size_hint(), (len, Some(len)))
+ }
+ }
+ }
+}
+
+#[test]
+fn test_rev_iter() {
+ let mut d = VecDeque::new();
+ assert_eq!(d.iter().rev().next(), None);
+
+ for i in 0..5 {
+ d.push_back(i);
+ }
+ {
+ let b: &[_] = &[&4, &3, &2, &1, &0];
+ assert_eq!(d.iter().rev().collect::<Vec<_>>(), b);
+ }
+
+ for i in 6..9 {
+ d.push_front(i);
+ }
+ let b: &[_] = &[&4, &3, &2, &1, &0, &6, &7, &8];
+ assert_eq!(d.iter().rev().collect::<Vec<_>>(), b);
+}
+
+#[test]
+fn test_mut_rev_iter_wrap() {
+ let mut d = VecDeque::with_capacity(3);
+ assert!(d.iter_mut().rev().next().is_none());
+
+ d.push_back(1);
+ d.push_back(2);
+ d.push_back(3);
+ assert_eq!(d.pop_front(), Some(1));
+ d.push_back(4);
+
+ assert_eq!(d.iter_mut().rev().map(|x| *x).collect::<Vec<_>>(), vec![4, 3, 2]);
+}
+
+#[test]
+fn test_mut_iter() {
+ let mut d = VecDeque::new();
+ assert!(d.iter_mut().next().is_none());
+
+ for i in 0..3 {
+ d.push_front(i);
+ }
+
+ for (i, elt) in d.iter_mut().enumerate() {
+ assert_eq!(*elt, 2 - i);
+ *elt = i;
+ }
+
+ {
+ let mut it = d.iter_mut();
+ assert_eq!(*it.next().unwrap(), 0);
+ assert_eq!(*it.next().unwrap(), 1);
+ assert_eq!(*it.next().unwrap(), 2);
+ assert!(it.next().is_none());
+ }
+}
+
+#[test]
+fn test_mut_rev_iter() {
+ let mut d = VecDeque::new();
+ assert!(d.iter_mut().rev().next().is_none());
+
+ for i in 0..3 {
+ d.push_front(i);
+ }
+
+ for (i, elt) in d.iter_mut().rev().enumerate() {
+ assert_eq!(*elt, i);
+ *elt = i;
+ }
+
+ {
+ let mut it = d.iter_mut().rev();
+ assert_eq!(*it.next().unwrap(), 0);
+ assert_eq!(*it.next().unwrap(), 1);
+ assert_eq!(*it.next().unwrap(), 2);
+ assert!(it.next().is_none());
+ }
+}
+
+#[test]
+fn test_into_iter() {
+ // Empty iter
+ {
+ let d: VecDeque<i32> = VecDeque::new();
+ let mut iter = d.into_iter();
+
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ // simple iter
+ {
+ let mut d = VecDeque::new();
+ for i in 0..5 {
+ d.push_back(i);
+ }
+
+ let b = vec![0, 1, 2, 3, 4];
+ assert_eq!(d.into_iter().collect::<Vec<_>>(), b);
+ }
+
+ // wrapped iter
+ {
+ let mut d = VecDeque::new();
+ for i in 0..5 {
+ d.push_back(i);
+ }
+ for i in 6..9 {
+ d.push_front(i);
+ }
+
+ let b = vec![8, 7, 6, 0, 1, 2, 3, 4];
+ assert_eq!(d.into_iter().collect::<Vec<_>>(), b);
+ }
+
+ // partially used
+ {
+ let mut d = VecDeque::new();
+ for i in 0..5 {
+ d.push_back(i);
+ }
+ for i in 6..9 {
+ d.push_front(i);
+ }
+
+ let mut it = d.into_iter();
+ assert_eq!(it.size_hint(), (8, Some(8)));
+ assert_eq!(it.next(), Some(8));
+ assert_eq!(it.size_hint(), (7, Some(7)));
+ assert_eq!(it.next_back(), Some(4));
+ assert_eq!(it.size_hint(), (6, Some(6)));
+ assert_eq!(it.next(), Some(7));
+ assert_eq!(it.size_hint(), (5, Some(5)));
+ }
+}
+
+#[test]
+fn test_drain() {
+ // Empty iter
+ {
+ let mut d: VecDeque<i32> = VecDeque::new();
+
+ {
+ let mut iter = d.drain(..);
+
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ assert_eq!(iter.next(), None);
+ assert_eq!(iter.size_hint(), (0, Some(0)));
+ }
+
+ assert!(d.is_empty());
+ }
+
+ // simple iter
+ {
+ let mut d = VecDeque::new();
+ for i in 0..5 {
+ d.push_back(i);
+ }
+
+ assert_eq!(d.drain(..).collect::<Vec<_>>(), [0, 1, 2, 3, 4]);
+ assert!(d.is_empty());
+ }
+
+ // wrapped iter
+ {
+ let mut d = VecDeque::new();
+ for i in 0..5 {
+ d.push_back(i);
+ }
+ for i in 6..9 {
+ d.push_front(i);
+ }
+
+ assert_eq!(d.drain(..).collect::<Vec<_>>(), [8, 7, 6, 0, 1, 2, 3, 4]);
+ assert!(d.is_empty());
+ }
+
+ // partially used
+ {
+ let mut d: VecDeque<_> = VecDeque::new();
+ for i in 0..5 {
+ d.push_back(i);
+ }
+ for i in 6..9 {
+ d.push_front(i);
+ }
+
+ {
+ let mut it = d.drain(..);
+ assert_eq!(it.size_hint(), (8, Some(8)));
+ assert_eq!(it.next(), Some(8));
+ assert_eq!(it.size_hint(), (7, Some(7)));
+ assert_eq!(it.next_back(), Some(4));
+ assert_eq!(it.size_hint(), (6, Some(6)));
+ assert_eq!(it.next(), Some(7));
+ assert_eq!(it.size_hint(), (5, Some(5)));
+ }
+ assert!(d.is_empty());
+ }
+}
+
+#[test]
+fn test_from_iter() {
+ let v = vec![1, 2, 3, 4, 5, 6, 7];
+ let deq: VecDeque<_> = v.iter().cloned().collect();
+ let u: Vec<_> = deq.iter().cloned().collect();
+ assert_eq!(u, v);
+
+ let seq = (0..).step_by(2).take(256);
+ let deq: VecDeque<_> = seq.collect();
+ for (i, &x) in deq.iter().enumerate() {
+ assert_eq!(2 * i, x);
+ }
+ assert_eq!(deq.len(), 256);
+}
+
+#[test]
+fn test_clone() {
+ let mut d = VecDeque::new();
+ d.push_front(17);
+ d.push_front(42);
+ d.push_back(137);
+ d.push_back(137);
+ assert_eq!(d.len(), 4);
+ let mut e = d.clone();
+ assert_eq!(e.len(), 4);
+ while !d.is_empty() {
+ assert_eq!(d.pop_back(), e.pop_back());
+ }
+ assert_eq!(d.len(), 0);
+ assert_eq!(e.len(), 0);
+}
+
+#[test]
+fn test_eq() {
+ let mut d = VecDeque::new();
+ assert!(d == VecDeque::with_capacity(0));
+ d.push_front(137);
+ d.push_front(17);
+ d.push_front(42);
+ d.push_back(137);
+ let mut e = VecDeque::with_capacity(0);
+ e.push_back(42);
+ e.push_back(17);
+ e.push_back(137);
+ e.push_back(137);
+ assert!(&e == &d);
+ e.pop_back();
+ e.push_back(0);
+ assert!(e != d);
+ e.clear();
+ assert!(e == VecDeque::new());
+}
+
+#[test]
+fn test_partial_eq_array() {
+ let d = VecDeque::<char>::new();
+ assert!(d == []);
+
+ let mut d = VecDeque::new();
+ d.push_front('a');
+ assert!(d == ['a']);
+
+ let mut d = VecDeque::new();
+ d.push_back('a');
+ assert!(d == ['a']);
+
+ let mut d = VecDeque::new();
+ d.push_back('a');
+ d.push_back('b');
+ assert!(d == ['a', 'b']);
+}
+
+#[test]
+fn test_hash() {
+ let mut x = VecDeque::new();
+ let mut y = VecDeque::new();
+
+ x.push_back(1);
+ x.push_back(2);
+ x.push_back(3);
+
+ y.push_back(0);
+ y.push_back(1);
+ y.pop_front();
+ y.push_back(2);
+ y.push_back(3);
+
+ assert!(hash(&x) == hash(&y));
+}
+
+#[test]
+fn test_hash_after_rotation() {
+ // test that two deques hash equal even if elements are laid out differently
+ let len = 28;
+ let mut ring: VecDeque<i32> = (0..len as i32).collect();
+ let orig = ring.clone();
+ for _ in 0..ring.capacity() {
+ // shift values 1 step to the right by pop, sub one, push
+ ring.pop_front();
+ for elt in &mut ring {
+ *elt -= 1;
+ }
+ ring.push_back(len - 1);
+ assert_eq!(hash(&orig), hash(&ring));
+ assert_eq!(orig, ring);
+ assert_eq!(ring, orig);
+ }
+}
+
+#[test]
+fn test_eq_after_rotation() {
+ // test that two deques are equal even if elements are laid out differently
+ let len = 28;
+ let mut ring: VecDeque<i32> = (0..len as i32).collect();
+ let mut shifted = ring.clone();
+ for _ in 0..10 {
+ // shift values 1 step to the right by pop, sub one, push
+ ring.pop_front();
+ for elt in &mut ring {
+ *elt -= 1;
+ }
+ ring.push_back(len - 1);
+ }
+
+ // try every shift
+ for _ in 0..shifted.capacity() {
+ shifted.pop_front();
+ for elt in &mut shifted {
+ *elt -= 1;
+ }
+ shifted.push_back(len - 1);
+ assert_eq!(shifted, ring);
+ assert_eq!(ring, shifted);
+ }
+}
+
+#[test]
+fn test_ord() {
+ let x = VecDeque::new();
+ let mut y = VecDeque::new();
+ y.push_back(1);
+ y.push_back(2);
+ y.push_back(3);
+ assert!(x < y);
+ assert!(y > x);
+ assert!(x <= x);
+ assert!(x >= x);
+}
+
+#[test]
+fn test_show() {
+ let ringbuf: VecDeque<_> = (0..10).collect();
+ assert_eq!(format!("{ringbuf:?}"), "[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]");
+
+ let ringbuf: VecDeque<_> = vec!["just", "one", "test", "more"].iter().cloned().collect();
+ assert_eq!(format!("{ringbuf:?}"), "[\"just\", \"one\", \"test\", \"more\"]");
+}
+
+#[test]
+fn test_drop() {
+ static mut DROPS: i32 = 0;
+ struct Elem;
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let mut ring = VecDeque::new();
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ drop(ring);
+
+ assert_eq!(unsafe { DROPS }, 4);
+}
+
+#[test]
+fn test_drop_with_pop() {
+ static mut DROPS: i32 = 0;
+ struct Elem;
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let mut ring = VecDeque::new();
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+
+ drop(ring.pop_back());
+ drop(ring.pop_front());
+ assert_eq!(unsafe { DROPS }, 2);
+
+ drop(ring);
+ assert_eq!(unsafe { DROPS }, 4);
+}
+
+#[test]
+fn test_drop_clear() {
+ static mut DROPS: i32 = 0;
+ struct Elem;
+ impl Drop for Elem {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+ }
+ }
+
+ let mut ring = VecDeque::new();
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ ring.push_back(Elem);
+ ring.push_front(Elem);
+ ring.clear();
+ assert_eq!(unsafe { DROPS }, 4);
+
+ drop(ring);
+ assert_eq!(unsafe { DROPS }, 4);
+}
+
+#[test]
+fn test_drop_panic() {
+ static mut DROPS: i32 = 0;
+
+ struct D(bool);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+
+ if self.0 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let mut q = VecDeque::new();
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_front(D(false));
+ q.push_front(D(false));
+ q.push_front(D(true));
+
+ catch_unwind(move || drop(q)).ok();
+
+ assert_eq!(unsafe { DROPS }, 8);
+}
+
+#[test]
+fn test_reserve_grow() {
+ // test growth path A
+ // [T o o H] -> [T o o H . . . . ]
+ let mut ring = VecDeque::with_capacity(4);
+ for i in 0..3 {
+ ring.push_back(i);
+ }
+ ring.reserve(7);
+ for i in 0..3 {
+ assert_eq!(ring.pop_front(), Some(i));
+ }
+
+ // test growth path B
+ // [H T o o] -> [. T o o H . . . ]
+ let mut ring = VecDeque::with_capacity(4);
+ for i in 0..1 {
+ ring.push_back(i);
+ assert_eq!(ring.pop_front(), Some(i));
+ }
+ for i in 0..3 {
+ ring.push_back(i);
+ }
+ ring.reserve(7);
+ for i in 0..3 {
+ assert_eq!(ring.pop_front(), Some(i));
+ }
+
+ // test growth path C
+ // [o o H T] -> [o o H . . . . T ]
+ let mut ring = VecDeque::with_capacity(4);
+ for i in 0..3 {
+ ring.push_back(i);
+ assert_eq!(ring.pop_front(), Some(i));
+ }
+ for i in 0..3 {
+ ring.push_back(i);
+ }
+ ring.reserve(7);
+ for i in 0..3 {
+ assert_eq!(ring.pop_front(), Some(i));
+ }
+}
+
+#[test]
+fn test_get() {
+ let mut ring = VecDeque::new();
+ ring.push_back(0);
+ assert_eq!(ring.get(0), Some(&0));
+ assert_eq!(ring.get(1), None);
+
+ ring.push_back(1);
+ assert_eq!(ring.get(0), Some(&0));
+ assert_eq!(ring.get(1), Some(&1));
+ assert_eq!(ring.get(2), None);
+
+ ring.push_back(2);
+ assert_eq!(ring.get(0), Some(&0));
+ assert_eq!(ring.get(1), Some(&1));
+ assert_eq!(ring.get(2), Some(&2));
+ assert_eq!(ring.get(3), None);
+
+ assert_eq!(ring.pop_front(), Some(0));
+ assert_eq!(ring.get(0), Some(&1));
+ assert_eq!(ring.get(1), Some(&2));
+ assert_eq!(ring.get(2), None);
+
+ assert_eq!(ring.pop_front(), Some(1));
+ assert_eq!(ring.get(0), Some(&2));
+ assert_eq!(ring.get(1), None);
+
+ assert_eq!(ring.pop_front(), Some(2));
+ assert_eq!(ring.get(0), None);
+ assert_eq!(ring.get(1), None);
+}
+
+#[test]
+fn test_get_mut() {
+ let mut ring = VecDeque::new();
+ for i in 0..3 {
+ ring.push_back(i);
+ }
+
+ match ring.get_mut(1) {
+ Some(x) => *x = -1,
+ None => (),
+ };
+
+ assert_eq!(ring.get_mut(0), Some(&mut 0));
+ assert_eq!(ring.get_mut(1), Some(&mut -1));
+ assert_eq!(ring.get_mut(2), Some(&mut 2));
+ assert_eq!(ring.get_mut(3), None);
+
+ assert_eq!(ring.pop_front(), Some(0));
+ assert_eq!(ring.get_mut(0), Some(&mut -1));
+ assert_eq!(ring.get_mut(1), Some(&mut 2));
+ assert_eq!(ring.get_mut(2), None);
+}
+
+#[test]
+fn test_front() {
+ let mut ring = VecDeque::new();
+ ring.push_back(10);
+ ring.push_back(20);
+ assert_eq!(ring.front(), Some(&10));
+ ring.pop_front();
+ assert_eq!(ring.front(), Some(&20));
+ ring.pop_front();
+ assert_eq!(ring.front(), None);
+}
+
+#[test]
+fn test_as_slices() {
+ let mut ring: VecDeque<i32> = VecDeque::with_capacity(127);
+ let cap = ring.capacity() as i32;
+ let first = cap / 2;
+ let last = cap - first;
+ for i in 0..first {
+ ring.push_back(i);
+
+ let (left, right) = ring.as_slices();
+ let expected: Vec<_> = (0..=i).collect();
+ assert_eq!(left, &expected[..]);
+ assert_eq!(right, []);
+ }
+
+ for j in -last..0 {
+ ring.push_front(j);
+ let (left, right) = ring.as_slices();
+ let expected_left: Vec<_> = (-last..=j).rev().collect();
+ let expected_right: Vec<_> = (0..first).collect();
+ assert_eq!(left, &expected_left[..]);
+ assert_eq!(right, &expected_right[..]);
+ }
+
+ assert_eq!(ring.len() as i32, cap);
+ assert_eq!(ring.capacity() as i32, cap);
+}
+
+#[test]
+fn test_as_mut_slices() {
+ let mut ring: VecDeque<i32> = VecDeque::with_capacity(127);
+ let cap = ring.capacity() as i32;
+ let first = cap / 2;
+ let last = cap - first;
+ for i in 0..first {
+ ring.push_back(i);
+
+ let (left, right) = ring.as_mut_slices();
+ let expected: Vec<_> = (0..=i).collect();
+ assert_eq!(left, &expected[..]);
+ assert_eq!(right, []);
+ }
+
+ for j in -last..0 {
+ ring.push_front(j);
+ let (left, right) = ring.as_mut_slices();
+ let expected_left: Vec<_> = (-last..=j).rev().collect();
+ let expected_right: Vec<_> = (0..first).collect();
+ assert_eq!(left, &expected_left[..]);
+ assert_eq!(right, &expected_right[..]);
+ }
+
+ assert_eq!(ring.len() as i32, cap);
+ assert_eq!(ring.capacity() as i32, cap);
+}
+
+#[test]
+fn test_append() {
+ let mut a: VecDeque<_> = [1, 2, 3].into_iter().collect();
+ let mut b: VecDeque<_> = [4, 5, 6].into_iter().collect();
+
+ // normal append
+ a.append(&mut b);
+ assert_eq!(a.iter().cloned().collect::<Vec<_>>(), [1, 2, 3, 4, 5, 6]);
+ assert_eq!(b.iter().cloned().collect::<Vec<_>>(), []);
+
+ // append nothing to something
+ a.append(&mut b);
+ assert_eq!(a.iter().cloned().collect::<Vec<_>>(), [1, 2, 3, 4, 5, 6]);
+ assert_eq!(b.iter().cloned().collect::<Vec<_>>(), []);
+
+ // append something to nothing
+ b.append(&mut a);
+ assert_eq!(b.iter().cloned().collect::<Vec<_>>(), [1, 2, 3, 4, 5, 6]);
+ assert_eq!(a.iter().cloned().collect::<Vec<_>>(), []);
+}
+
+#[test]
+fn test_append_permutations() {
+ fn construct_vec_deque(
+ push_back: usize,
+ pop_back: usize,
+ push_front: usize,
+ pop_front: usize,
+ ) -> VecDeque<usize> {
+ let mut out = VecDeque::new();
+ for a in 0..push_back {
+ out.push_back(a);
+ }
+ for b in 0..push_front {
+ out.push_front(push_back + b);
+ }
+ for _ in 0..pop_back {
+ out.pop_back();
+ }
+ for _ in 0..pop_front {
+ out.pop_front();
+ }
+ out
+ }
+
+ // Miri is too slow
+ let max = if cfg!(miri) { 3 } else { 5 };
+
+ // Many different permutations of both the `VecDeque` getting appended to
+ // and the one getting appended are generated to check `append`.
+ // This ensures all 6 code paths of `append` are tested.
+ for src_push_back in 0..max {
+ for src_push_front in 0..max {
+ // doesn't pop more values than are pushed
+ for src_pop_back in 0..(src_push_back + src_push_front) {
+ for src_pop_front in 0..(src_push_back + src_push_front - src_pop_back) {
+ let src = construct_vec_deque(
+ src_push_back,
+ src_pop_back,
+ src_push_front,
+ src_pop_front,
+ );
+
+ for dst_push_back in 0..max {
+ for dst_push_front in 0..max {
+ for dst_pop_back in 0..(dst_push_back + dst_push_front) {
+ for dst_pop_front in
+ 0..(dst_push_back + dst_push_front - dst_pop_back)
+ {
+ let mut dst = construct_vec_deque(
+ dst_push_back,
+ dst_pop_back,
+ dst_push_front,
+ dst_pop_front,
+ );
+ let mut src = src.clone();
+
+ // Assert that appending `src` to `dst` gives the same order
+ // of values as iterating over both in sequence.
+ let correct = dst
+ .iter()
+ .chain(src.iter())
+ .cloned()
+ .collect::<Vec<usize>>();
+ dst.append(&mut src);
+ assert_eq!(dst, correct);
+ assert!(src.is_empty());
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
+
+struct DropCounter<'a> {
+ count: &'a mut u32,
+}
+
+impl Drop for DropCounter<'_> {
+ fn drop(&mut self) {
+ *self.count += 1;
+ }
+}
+
+#[test]
+fn test_append_double_drop() {
+ let (mut count_a, mut count_b) = (0, 0);
+ {
+ let mut a = VecDeque::new();
+ let mut b = VecDeque::new();
+ a.push_back(DropCounter { count: &mut count_a });
+ b.push_back(DropCounter { count: &mut count_b });
+
+ a.append(&mut b);
+ }
+ assert_eq!(count_a, 1);
+ assert_eq!(count_b, 1);
+}
+
+#[test]
+fn test_retain() {
+ let mut buf = VecDeque::new();
+ buf.extend(1..5);
+ buf.retain(|&x| x % 2 == 0);
+ let v: Vec<_> = buf.into_iter().collect();
+ assert_eq!(&v[..], &[2, 4]);
+}
+
+#[test]
+fn test_extend_ref() {
+ let mut v = VecDeque::new();
+ v.push_back(1);
+ v.extend(&[2, 3, 4]);
+
+ assert_eq!(v.len(), 4);
+ assert_eq!(v[0], 1);
+ assert_eq!(v[1], 2);
+ assert_eq!(v[2], 3);
+ assert_eq!(v[3], 4);
+
+ let mut w = VecDeque::new();
+ w.push_back(5);
+ w.push_back(6);
+ v.extend(&w);
+
+ assert_eq!(v.len(), 6);
+ assert_eq!(v[0], 1);
+ assert_eq!(v[1], 2);
+ assert_eq!(v[2], 3);
+ assert_eq!(v[3], 4);
+ assert_eq!(v[4], 5);
+ assert_eq!(v[5], 6);
+}
+
+#[test]
+fn test_contains() {
+ let mut v = VecDeque::new();
+ v.extend(&[2, 3, 4]);
+
+ assert!(v.contains(&3));
+ assert!(!v.contains(&1));
+
+ v.clear();
+
+ assert!(!v.contains(&3));
+}
+
+#[allow(dead_code)]
+fn assert_covariance() {
+ fn drain<'new>(d: Drain<'static, &'static str>) -> Drain<'new, &'new str> {
+ d
+ }
+}
+
+#[test]
+fn test_is_empty() {
+ let mut v = VecDeque::<i32>::new();
+ assert!(v.is_empty());
+ assert!(v.iter().is_empty());
+ assert!(v.iter_mut().is_empty());
+ v.extend(&[2, 3, 4]);
+ assert!(!v.is_empty());
+ assert!(!v.iter().is_empty());
+ assert!(!v.iter_mut().is_empty());
+ while let Some(_) = v.pop_front() {
+ assert_eq!(v.is_empty(), v.len() == 0);
+ assert_eq!(v.iter().is_empty(), v.iter().len() == 0);
+ assert_eq!(v.iter_mut().is_empty(), v.iter_mut().len() == 0);
+ }
+ assert!(v.is_empty());
+ assert!(v.iter().is_empty());
+ assert!(v.iter_mut().is_empty());
+ assert!(v.into_iter().is_empty());
+}
+
+#[test]
+fn test_reserve_exact_2() {
+ // This is all the same as test_reserve
+
+ let mut v = VecDeque::new();
+
+ v.reserve_exact(2);
+ assert!(v.capacity() >= 2);
+
+ for i in 0..16 {
+ v.push_back(i);
+ }
+
+ assert!(v.capacity() >= 16);
+ v.reserve_exact(16);
+ assert!(v.capacity() >= 32);
+
+ v.push_back(16);
+
+ v.reserve_exact(16);
+ assert!(v.capacity() >= 48)
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
+fn test_try_reserve() {
+ // These are the interesting cases:
+ // * exactly isize::MAX should never trigger a CapacityOverflow (can be OOM)
+ // * > isize::MAX should always fail
+ // * On 16/32-bit should CapacityOverflow
+ // * On 64-bit should OOM
+ // * overflow may trigger when adding `len` to `cap` (in number of elements)
+ // * overflow may trigger when multiplying `new_cap` by size_of::<T> (to get bytes)
+
+ const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1;
+ const MAX_USIZE: usize = usize::MAX;
+
+ // On 16/32-bit, we check that allocations don't exceed isize::MAX,
+ // on 64-bit, we assume the OS will give an OOM for such a ridiculous size.
+ // Any platform that succeeds for these requests is technically broken with
+ // ptr::offset because LLVM is the worst.
+ let guards_against_isize = size_of::<usize>() < 8;
+
+ {
+ // Note: basic stuff is checked by test_reserve
+ let mut empty_bytes: VecDeque<u8> = VecDeque::new();
+
+ // Check isize::MAX doesn't count as an overflow
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ // Play it again, frank! (just to be sure)
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()) {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+
+ if guards_against_isize {
+ // Check isize::MAX + 1 does count as overflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ // Check usize::MAX does count as overflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ } else {
+ // Check isize::MAX is an OOM
+ // VecDeque starts with capacity 7, always adds 1 to the capacity
+ // and also rounds the number to next power of 2 so this is the
+ // furthest we can go without triggering CapacityOverflow
+ assert_matches!(
+ empty_bytes.try_reserve(MAX_CAP).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+ }
+ }
+
+ {
+ // Same basic idea, but with non-zero len
+ let mut ten_bytes: VecDeque<u8> = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
+
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) = ten_bytes.try_reserve(MAX_CAP - 10).map_err(|e| e.kind()) {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if guards_against_isize {
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+ } else {
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+ }
+ // Should always overflow in the add-to-len
+ assert_matches!(
+ ten_bytes.try_reserve(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ }
+
+ {
+ // Same basic idea, but with interesting type size
+ let mut ten_u32s: VecDeque<u32> = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
+
+ if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) = ten_u32s.try_reserve(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if guards_against_isize {
+ assert_matches!(
+ ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+ } else {
+ assert_matches!(
+ ten_u32s.try_reserve(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+ }
+ // Should fail in the mul-by-size
+ assert_matches!(
+ ten_u32s.try_reserve(MAX_USIZE - 20).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ }
+}
+
+#[test]
+#[cfg_attr(miri, ignore)] // Miri does not support signalling OOM
+#[cfg_attr(target_os = "android", ignore)] // Android used in CI has a broken dlmalloc
+fn test_try_reserve_exact() {
+ // This is exactly the same as test_try_reserve with the method changed.
+ // See that test for comments.
+
+ const MAX_CAP: usize = (isize::MAX as usize + 1) / 2 - 1;
+ const MAX_USIZE: usize = usize::MAX;
+
+ let guards_against_isize = size_of::<usize>() < 8;
+
+ {
+ let mut empty_bytes: VecDeque<u8> = VecDeque::new();
+
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) = empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+
+ if guards_against_isize {
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_CAP + 1).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ } else {
+ // Check isize::MAX is an OOM
+ // VecDeque starts with capacity 7, always adds 1 to the capacity
+ // and also rounds the number to next power of 2 so this is the
+ // furthest we can go without triggering CapacityOverflow
+ assert_matches!(
+ empty_bytes.try_reserve_exact(MAX_CAP).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+ }
+ }
+
+ {
+ let mut ten_bytes: VecDeque<u8> = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
+
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) =
+ ten_bytes.try_reserve_exact(MAX_CAP - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if guards_against_isize {
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+ } else {
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_CAP - 9).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+ }
+ assert_matches!(
+ ten_bytes.try_reserve_exact(MAX_USIZE).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ }
+
+ {
+ let mut ten_u32s: VecDeque<u32> = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10].into_iter().collect();
+
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if let Err(CapacityOverflow) =
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 10).map_err(|e| e.kind())
+ {
+ panic!("isize::MAX shouldn't trigger an overflow!");
+ }
+ if guards_against_isize {
+ assert_matches!(
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "isize::MAX + 1 should trigger an overflow!"
+ );
+ } else {
+ assert_matches!(
+ ten_u32s.try_reserve_exact(MAX_CAP / 4 - 9).map_err(|e| e.kind()),
+ Err(AllocError { .. }),
+ "isize::MAX + 1 should trigger an OOM!"
+ );
+ }
+ assert_matches!(
+ ten_u32s.try_reserve_exact(MAX_USIZE - 20).map_err(|e| e.kind()),
+ Err(CapacityOverflow),
+ "usize::MAX should trigger an overflow!"
+ );
+ }
+}
+
+#[test]
+fn test_rotate_nop() {
+ let mut v: VecDeque<_> = (0..10).collect();
+ assert_unchanged(&v);
+
+ v.rotate_left(0);
+ assert_unchanged(&v);
+
+ v.rotate_left(10);
+ assert_unchanged(&v);
+
+ v.rotate_right(0);
+ assert_unchanged(&v);
+
+ v.rotate_right(10);
+ assert_unchanged(&v);
+
+ v.rotate_left(3);
+ v.rotate_right(3);
+ assert_unchanged(&v);
+
+ v.rotate_right(3);
+ v.rotate_left(3);
+ assert_unchanged(&v);
+
+ v.rotate_left(6);
+ v.rotate_right(6);
+ assert_unchanged(&v);
+
+ v.rotate_right(6);
+ v.rotate_left(6);
+ assert_unchanged(&v);
+
+ v.rotate_left(3);
+ v.rotate_left(7);
+ assert_unchanged(&v);
+
+ v.rotate_right(4);
+ v.rotate_right(6);
+ assert_unchanged(&v);
+
+ v.rotate_left(1);
+ v.rotate_left(2);
+ v.rotate_left(3);
+ v.rotate_left(4);
+ assert_unchanged(&v);
+
+ v.rotate_right(1);
+ v.rotate_right(2);
+ v.rotate_right(3);
+ v.rotate_right(4);
+ assert_unchanged(&v);
+
+ fn assert_unchanged(v: &VecDeque<i32>) {
+ assert_eq!(v, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
+ }
+}
+
+#[test]
+fn test_rotate_left_parts() {
+ let mut v: VecDeque<_> = (1..=7).collect();
+ v.rotate_left(2);
+ assert_eq!(v.as_slices(), (&[3, 4, 5, 6, 7, 1][..], &[2][..]));
+ v.rotate_left(2);
+ assert_eq!(v.as_slices(), (&[5, 6, 7, 1][..], &[2, 3, 4][..]));
+ v.rotate_left(2);
+ assert_eq!(v.as_slices(), (&[7, 1][..], &[2, 3, 4, 5, 6][..]));
+ v.rotate_left(2);
+ assert_eq!(v.as_slices(), (&[2, 3, 4, 5, 6, 7, 1][..], &[][..]));
+ v.rotate_left(2);
+ assert_eq!(v.as_slices(), (&[4, 5, 6, 7, 1, 2][..], &[3][..]));
+ v.rotate_left(2);
+ assert_eq!(v.as_slices(), (&[6, 7, 1, 2][..], &[3, 4, 5][..]));
+ v.rotate_left(2);
+ assert_eq!(v.as_slices(), (&[1, 2][..], &[3, 4, 5, 6, 7][..]));
+}
+
+#[test]
+fn test_rotate_right_parts() {
+ let mut v: VecDeque<_> = (1..=7).collect();
+ v.rotate_right(2);
+ assert_eq!(v.as_slices(), (&[6, 7][..], &[1, 2, 3, 4, 5][..]));
+ v.rotate_right(2);
+ assert_eq!(v.as_slices(), (&[4, 5, 6, 7][..], &[1, 2, 3][..]));
+ v.rotate_right(2);
+ assert_eq!(v.as_slices(), (&[2, 3, 4, 5, 6, 7][..], &[1][..]));
+ v.rotate_right(2);
+ assert_eq!(v.as_slices(), (&[7, 1, 2, 3, 4, 5, 6][..], &[][..]));
+ v.rotate_right(2);
+ assert_eq!(v.as_slices(), (&[5, 6][..], &[7, 1, 2, 3, 4][..]));
+ v.rotate_right(2);
+ assert_eq!(v.as_slices(), (&[3, 4, 5, 6][..], &[7, 1, 2][..]));
+ v.rotate_right(2);
+ assert_eq!(v.as_slices(), (&[1, 2, 3, 4, 5, 6][..], &[7][..]));
+}
+
+#[test]
+fn test_rotate_left_random() {
+ let shifts = [
+ 6, 1, 0, 11, 12, 1, 11, 7, 9, 3, 6, 1, 4, 0, 5, 1, 3, 1, 12, 8, 3, 1, 11, 11, 9, 4, 12, 3,
+ 12, 9, 11, 1, 7, 9, 7, 2,
+ ];
+ let n = 12;
+ let mut v: VecDeque<_> = (0..n).collect();
+ let mut total_shift = 0;
+ for shift in shifts.iter().cloned() {
+ v.rotate_left(shift);
+ total_shift += shift;
+ for i in 0..n {
+ assert_eq!(v[i], (i + total_shift) % n);
+ }
+ }
+}
+
+#[test]
+fn test_rotate_right_random() {
+ let shifts = [
+ 6, 1, 0, 11, 12, 1, 11, 7, 9, 3, 6, 1, 4, 0, 5, 1, 3, 1, 12, 8, 3, 1, 11, 11, 9, 4, 12, 3,
+ 12, 9, 11, 1, 7, 9, 7, 2,
+ ];
+ let n = 12;
+ let mut v: VecDeque<_> = (0..n).collect();
+ let mut total_shift = 0;
+ for shift in shifts.iter().cloned() {
+ v.rotate_right(shift);
+ total_shift += shift;
+ for i in 0..n {
+ assert_eq!(v[(i + total_shift) % n], i);
+ }
+ }
+}
+
+#[test]
+fn test_try_fold_empty() {
+ assert_eq!(Some(0), VecDeque::<u32>::new().iter().try_fold(0, |_, _| None));
+}
+
+#[test]
+fn test_try_fold_none() {
+ let v: VecDeque<u32> = (0..12).collect();
+ assert_eq!(None, v.into_iter().try_fold(0, |a, b| if b < 11 { Some(a + b) } else { None }));
+}
+
+#[test]
+fn test_try_fold_ok() {
+ let v: VecDeque<u32> = (0..12).collect();
+ assert_eq!(Ok::<_, ()>(66), v.into_iter().try_fold(0, |a, b| Ok(a + b)));
+}
+
+#[test]
+fn test_try_fold_unit() {
+ let v: VecDeque<()> = std::iter::repeat(()).take(42).collect();
+ assert_eq!(Some(()), v.into_iter().try_fold((), |(), ()| Some(())));
+}
+
+#[test]
+fn test_try_fold_unit_none() {
+ let v: std::collections::VecDeque<()> = [(); 10].iter().cloned().collect();
+ let mut iter = v.into_iter();
+ assert!(iter.try_fold((), |_, _| None).is_none());
+ assert_eq!(iter.len(), 9);
+}
+
+#[test]
+fn test_try_fold_rotated() {
+ let mut v: VecDeque<_> = (0..12).collect();
+ for n in 0..10 {
+ if n & 1 == 0 {
+ v.rotate_left(n);
+ } else {
+ v.rotate_right(n);
+ }
+ assert_eq!(Ok::<_, ()>(66), v.iter().try_fold(0, |a, b| Ok(a + b)));
+ }
+}
+
+#[test]
+fn test_try_fold_moves_iter() {
+ let v: VecDeque<_> = [10, 20, 30, 40, 100, 60, 70, 80, 90].iter().collect();
+ let mut iter = v.into_iter();
+ assert_eq!(iter.try_fold(0_i8, |acc, &x| acc.checked_add(x)), None);
+ assert_eq!(iter.next(), Some(&60));
+}
+
+#[test]
+fn test_try_fold_exhaust_wrap() {
+ let mut v = VecDeque::with_capacity(7);
+ v.push_back(1);
+ v.push_back(1);
+ v.push_back(1);
+ v.pop_front();
+ v.pop_front();
+ let mut iter = v.iter();
+ let _ = iter.try_fold(0, |_, _| Some(1));
+ assert!(iter.is_empty());
+}
+
+#[test]
+fn test_try_fold_wraparound() {
+ let mut v = VecDeque::with_capacity(8);
+ v.push_back(7);
+ v.push_back(8);
+ v.push_back(9);
+ v.push_front(2);
+ v.push_front(1);
+ let mut iter = v.iter();
+ let _ = iter.find(|&&x| x == 2);
+ assert_eq!(Some(&7), iter.next());
+}
+
+#[test]
+fn test_try_rfold_rotated() {
+ let mut v: VecDeque<_> = (0..12).collect();
+ for n in 0..10 {
+ if n & 1 == 0 {
+ v.rotate_left(n);
+ } else {
+ v.rotate_right(n);
+ }
+ assert_eq!(Ok::<_, ()>(66), v.iter().try_rfold(0, |a, b| Ok(a + b)));
+ }
+}
+
+#[test]
+fn test_try_rfold_moves_iter() {
+ let v: VecDeque<_> = [10, 20, 30, 40, 100, 60, 70, 80, 90].iter().collect();
+ let mut iter = v.into_iter();
+ assert_eq!(iter.try_rfold(0_i8, |acc, &x| acc.checked_add(x)), None);
+ assert_eq!(iter.next_back(), Some(&70));
+}
+
+#[test]
+fn truncate_leak() {
+ static mut DROPS: i32 = 0;
+
+ struct D(bool);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+
+ if self.0 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let mut q = VecDeque::new();
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_back(D(false));
+ q.push_front(D(true));
+ q.push_front(D(false));
+ q.push_front(D(false));
+
+ catch_unwind(AssertUnwindSafe(|| q.truncate(1))).ok();
+
+ assert_eq!(unsafe { DROPS }, 7);
+}
+
+#[test]
+fn test_drain_leak() {
+ static mut DROPS: i32 = 0;
+
+ #[derive(Debug, PartialEq)]
+ struct D(u32, bool);
+
+ impl Drop for D {
+ fn drop(&mut self) {
+ unsafe {
+ DROPS += 1;
+ }
+
+ if self.1 {
+ panic!("panic in `drop`");
+ }
+ }
+ }
+
+ let mut v = VecDeque::new();
+ v.push_back(D(4, false));
+ v.push_back(D(5, false));
+ v.push_back(D(6, false));
+ v.push_front(D(3, false));
+ v.push_front(D(2, true));
+ v.push_front(D(1, false));
+ v.push_front(D(0, false));
+
+ catch_unwind(AssertUnwindSafe(|| {
+ v.drain(1..=4);
+ }))
+ .ok();
+
+ assert_eq!(unsafe { DROPS }, 4);
+ assert_eq!(v.len(), 3);
+ drop(v);
+ assert_eq!(unsafe { DROPS }, 7);
+}
+
+#[test]
+fn test_binary_search() {
+ // Contiguous (front only) search:
+ let deque: VecDeque<_> = vec![1, 2, 3, 5, 6].into();
+ assert!(deque.as_slices().1.is_empty());
+ assert_eq!(deque.binary_search(&3), Ok(2));
+ assert_eq!(deque.binary_search(&4), Err(3));
+
+ // Split search (both front & back non-empty):
+ let mut deque: VecDeque<_> = vec![5, 6].into();
+ deque.push_front(3);
+ deque.push_front(2);
+ deque.push_front(1);
+ deque.push_back(10);
+ assert!(!deque.as_slices().0.is_empty());
+ assert!(!deque.as_slices().1.is_empty());
+ assert_eq!(deque.binary_search(&0), Err(0));
+ assert_eq!(deque.binary_search(&1), Ok(0));
+ assert_eq!(deque.binary_search(&5), Ok(3));
+ assert_eq!(deque.binary_search(&7), Err(5));
+ assert_eq!(deque.binary_search(&20), Err(6));
+}
+
+#[test]
+fn test_binary_search_by() {
+ let deque: VecDeque<_> = vec![(1,), (2,), (3,), (5,), (6,)].into();
+
+ assert_eq!(deque.binary_search_by(|&(v,)| v.cmp(&3)), Ok(2));
+ assert_eq!(deque.binary_search_by(|&(v,)| v.cmp(&4)), Err(3));
+}
+
+#[test]
+fn test_binary_search_by_key() {
+ let deque: VecDeque<_> = vec![(1,), (2,), (3,), (5,), (6,)].into();
+
+ assert_eq!(deque.binary_search_by_key(&3, |&(v,)| v), Ok(2));
+ assert_eq!(deque.binary_search_by_key(&4, |&(v,)| v), Err(3));
+}
+
+#[test]
+fn test_partition_point() {
+ // Contiguous (front only) search:
+ let deque: VecDeque<_> = vec![1, 2, 3, 5, 6].into();
+ assert!(deque.as_slices().1.is_empty());
+ assert_eq!(deque.partition_point(|&v| v <= 3), 3);
+
+ // Split search (both front & back non-empty):
+ let mut deque: VecDeque<_> = vec![5, 6].into();
+ deque.push_front(3);
+ deque.push_front(2);
+ deque.push_front(1);
+ deque.push_back(10);
+ assert!(!deque.as_slices().0.is_empty());
+ assert!(!deque.as_slices().1.is_empty());
+ assert_eq!(deque.partition_point(|&v| v <= 5), 4);
+}
+
+#[test]
+fn test_zero_sized_push() {
+ const N: usize = 8;
+
+ // Zero sized type
+ struct Zst;
+
+ // Test that for all possible sequences of push_front / push_back,
+ // we end up with a deque of the correct size
+
+ for len in 0..N {
+ let mut tester = VecDeque::with_capacity(len);
+ assert_eq!(tester.len(), 0);
+ assert!(tester.capacity() >= len);
+ for case in 0..(1 << len) {
+ assert_eq!(tester.len(), 0);
+ for bit in 0..len {
+ if case & (1 << bit) != 0 {
+ tester.push_front(Zst);
+ } else {
+ tester.push_back(Zst);
+ }
+ }
+ assert_eq!(tester.len(), len);
+ assert_eq!(tester.iter().count(), len);
+ tester.clear();
+ }
+ }
+}
+
+#[test]
+fn test_from_zero_sized_vec() {
+ let v = vec![(); 100];
+ let queue = VecDeque::from(v);
+ assert_eq!(queue.len(), 100);
+}